hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
bf093691496f4a2e27c59c915a7b0daafbaa751b.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <math.h>
#include <time.h>
#include <conio.h>
#include "hip/hip_runtime.h" //headers for the cuda methods
#include "device_launch_parameters.h"
#include "Functions.cuh"
#define TMAE 0.15
#define trainspeed 0.05
#define totalRows 100
#define trRow 90 //number of rows in the training set
#define tsRow 10 //number of rows in the test set
#define col 10 //columns of data including desired value "result"
int trainingDataCount = trRow * (col - 1);
int testDataCount = tsRow * (col - 1);
//datasets
float TrainSetData[trRow][col - 1];
float TestSetData[tsRow][col - 1];
float TrainSetDiag[trRow]; //training result set
float TestSetDiag[tsRow]; //testing result set
double trainz[trRow]; //store training set z value of each patient
double testz[tsRow]; //store testing set z value of each patient
double trainsig[trRow]; //store training set sigmoid y cap of each patient
double testsig[tsRow]; //store testing set sigmoid y cap of each patient
//pointers to the datasets
//training data set
//data is r1c1 r1c2 r1c9 r2c1 r2c2
//1 row is 1 patient
float* pTrainSetData;
//testing data set
//data is r1c1 r1c2 r1c9 r2c1 r2c2
//1 row is 1 patient
float* pTestSetData;
float* pTrainSetDiag;
float* pTestSetDiag;
double* ptrainz;
double* ptestz;
//original data/weights/bias for printing at end
double weight[9];
double bias;
double utrmmse, utsmmse, ttrmmse, ttsmmse;
double* putrmmse = &utrmmse;
double* putsmmse = &utsmmse;
double* pttrmmse = &ttrmmse;
double* pttsmmse = &ttsmmse;
void readFile(float* traindata, float* testdata, float* trainDiag, float* testDiag);
double random();
void matrix();
int main(void) {
clock_t tstart = clock(); //start clock
srand(time(NULL));
//cuda memory allocation
hipMallocManaged(&pTrainSetData, trRow * (col - 1) * sizeof(float));
hipMallocManaged(&pTestSetData, tsRow * (col - 1) * sizeof(float));
hipMallocManaged(&pTrainSetDiag, trRow * sizeof(float));
hipMallocManaged(&pTestSetDiag, tsRow * sizeof(float));
hipMallocManaged(&ptrainz, trRow * sizeof(double));
hipMallocManaged(&ptestz, trRow * sizeof(double));
readFile(pTrainSetData, pTestSetData, pTrainSetDiag, pTestSetDiag);
int numBlocks = (trainingDataCount + 256 - 1) / 256;
memset(ptrainz, 0, trRow * sizeof(double)); // set the z arr to 0 so the threads can assign values
hipLaunchKernelGGL(( linearRegress) , dim3(numBlocks), dim3(256) , 0, 0, trRow, pTrainSetData, ptrainz, col);
}
void readFile(float *traindata, float *testdata, float *trainDiag, float *testDiag) {
int x, y;
int a=0, b=0, c=0, d=0;
FILE* fertfile_ptr = fopen("fertility_Diagnosis_Data_Group1_4.txt", "r");
// error handling
if (fertfile_ptr == NULL)
{
fprintf(stderr, "Error opening file: ");
exit(EXIT_FAILURE);
}
for (x = 0; x < totalRows; x++) {
for (y = 0; y < col; y++) {
if (y == (col - 1)) { //result of diagnosis
if (x < trRow) {
fscanf(fertfile_ptr, "%f, ", trainDiag);
trainDiag++,a++;
}
else {
fscanf(fertfile_ptr, "%f, ", testDiag);
testDiag++,b++;
}
}
else { //data to determine diagnosis
if (x < trRow) {
fscanf(fertfile_ptr, "%f, ", traindata);
traindata++,c++;
}
else {
fscanf(fertfile_ptr, "%f, ", testdata);
testdata++,d++;
}
}
}
}
fclose(fertfile_ptr);
printf("%d training data read.\n", c);
printf("%d training diag read.\n", a);
printf("%d testing data read.\n", d);
printf("%d testing diag read.\n", b);
}
//generate a number between -1 and 1
double random()
{
int w;
double resultrand;
w = (rand() % 3) - 1; //random between int -1, 0 , 1
if (w > 1 || w < -1)
{
w = (rand() % 3) - 1; //random between int -1, 0 , 1
//printf("%d", w);
}
if (w == 0)
w = 1;
//to improve the random result for double -1.00 to 1.00 by using w
resultrand = (1.0 * rand() / RAND_MAX - w);
if (resultrand > 1.00)
{
resultrand = resultrand - 1;
}
//printf("\nweight = %lf", resultrand);
return resultrand;
}
// to display the confusion matrix
void matrix() {
int tp = 0, fp = 0, tn = 0, fn = 0, i, y;
for (i = 0; i < trRow; i++) {
y = round(trainsig[i]);
if (y == 1)
{
if (TrainSetDiag[i] == y)
tp++;
else
fp++;
}
else
{
if (TrainSetDiag[i] == y)
tn++;
else
fn++;
}
}
printf("\n-------------------------------------------\n\n");
printf("Training Set Confusion Matrix\n True False\n");
printf("Predicted Positive %d %d\n", tp, fp);
printf("Predicted Negative %d %d\n", tn, fn);
printf("\n-------------------------------------------\n\n");
tp = 0, fp = 0, tn = 0, fn = 0;
for (i = 0; i < tsRow; i++) {
y = round(testsig[i]);
if (y == 1)
{
if (TestSetDiag[i] == y)
tp++;
else
fp++;
}
else
{
if (TestSetDiag[i] == y)
tn++;
else
fn++;
}
}
printf("Testing Set Confusion Matrix\n True False\n");
printf("Predicted Positive %d %d\n", tp, fp);
printf("Predicted Negative %d %d", tn, fn);
printf("\n\n-------------------------------------------\n\n");
} | bf093691496f4a2e27c59c915a7b0daafbaa751b.cu | #include <iostream>
#include <math.h>
#include <time.h>
#include <conio.h>
#include "cuda_runtime.h" //headers for the cuda methods
#include "device_launch_parameters.h"
#include "Functions.cuh"
#define TMAE 0.15
#define trainspeed 0.05
#define totalRows 100
#define trRow 90 //number of rows in the training set
#define tsRow 10 //number of rows in the test set
#define col 10 //columns of data including desired value "result"
int trainingDataCount = trRow * (col - 1);
int testDataCount = tsRow * (col - 1);
//datasets
float TrainSetData[trRow][col - 1];
float TestSetData[tsRow][col - 1];
float TrainSetDiag[trRow]; //training result set
float TestSetDiag[tsRow]; //testing result set
double trainz[trRow]; //store training set z value of each patient
double testz[tsRow]; //store testing set z value of each patient
double trainsig[trRow]; //store training set sigmoid y cap of each patient
double testsig[tsRow]; //store testing set sigmoid y cap of each patient
//pointers to the datasets
//training data set
//data is r1c1 r1c2 r1c9 r2c1 r2c2
//1 row is 1 patient
float* pTrainSetData;
//testing data set
//data is r1c1 r1c2 r1c9 r2c1 r2c2
//1 row is 1 patient
float* pTestSetData;
float* pTrainSetDiag;
float* pTestSetDiag;
double* ptrainz;
double* ptestz;
//original data/weights/bias for printing at end
double weight[9];
double bias;
double utrmmse, utsmmse, ttrmmse, ttsmmse;
double* putrmmse = &utrmmse;
double* putsmmse = &utsmmse;
double* pttrmmse = &ttrmmse;
double* pttsmmse = &ttsmmse;
void readFile(float* traindata, float* testdata, float* trainDiag, float* testDiag);
double random();
void matrix();
int main(void) {
clock_t tstart = clock(); //start clock
srand(time(NULL));
//cuda memory allocation
cudaMallocManaged(&pTrainSetData, trRow * (col - 1) * sizeof(float));
cudaMallocManaged(&pTestSetData, tsRow * (col - 1) * sizeof(float));
cudaMallocManaged(&pTrainSetDiag, trRow * sizeof(float));
cudaMallocManaged(&pTestSetDiag, tsRow * sizeof(float));
cudaMallocManaged(&ptrainz, trRow * sizeof(double));
cudaMallocManaged(&ptestz, trRow * sizeof(double));
readFile(pTrainSetData, pTestSetData, pTrainSetDiag, pTestSetDiag);
int numBlocks = (trainingDataCount + 256 - 1) / 256;
memset(ptrainz, 0, trRow * sizeof(double)); // set the z arr to 0 so the threads can assign values
linearRegress <<<numBlocks, 256 >>> (trRow, pTrainSetData, ptrainz, col);
}
void readFile(float *traindata, float *testdata, float *trainDiag, float *testDiag) {
int x, y;
int a=0, b=0, c=0, d=0;
FILE* fertfile_ptr = fopen("fertility_Diagnosis_Data_Group1_4.txt", "r");
// error handling
if (fertfile_ptr == NULL)
{
fprintf(stderr, "Error opening file: ");
exit(EXIT_FAILURE);
}
for (x = 0; x < totalRows; x++) {
for (y = 0; y < col; y++) {
if (y == (col - 1)) { //result of diagnosis
if (x < trRow) {
fscanf(fertfile_ptr, "%f, ", trainDiag);
trainDiag++,a++;
}
else {
fscanf(fertfile_ptr, "%f, ", testDiag);
testDiag++,b++;
}
}
else { //data to determine diagnosis
if (x < trRow) {
fscanf(fertfile_ptr, "%f, ", traindata);
traindata++,c++;
}
else {
fscanf(fertfile_ptr, "%f, ", testdata);
testdata++,d++;
}
}
}
}
fclose(fertfile_ptr);
printf("%d training data read.\n", c);
printf("%d training diag read.\n", a);
printf("%d testing data read.\n", d);
printf("%d testing diag read.\n", b);
}
//generate a number between -1 and 1
double random()
{
int w;
double resultrand;
w = (rand() % 3) - 1; //random between int -1, 0 , 1
if (w > 1 || w < -1)
{
w = (rand() % 3) - 1; //random between int -1, 0 , 1
//printf("%d", w);
}
if (w == 0)
w = 1;
//to improve the random result for double -1.00 to 1.00 by using w
resultrand = (1.0 * rand() / RAND_MAX - w);
if (resultrand > 1.00)
{
resultrand = resultrand - 1;
}
//printf("\nweight = %lf", resultrand);
return resultrand;
}
// to display the confusion matrix
void matrix() {
int tp = 0, fp = 0, tn = 0, fn = 0, i, y;
for (i = 0; i < trRow; i++) {
y = round(trainsig[i]);
if (y == 1)
{
if (TrainSetDiag[i] == y)
tp++;
else
fp++;
}
else
{
if (TrainSetDiag[i] == y)
tn++;
else
fn++;
}
}
printf("\n-------------------------------------------\n\n");
printf("Training Set Confusion Matrix\n True False\n");
printf("Predicted Positive %d %d\n", tp, fp);
printf("Predicted Negative %d %d\n", tn, fn);
printf("\n-------------------------------------------\n\n");
tp = 0, fp = 0, tn = 0, fn = 0;
for (i = 0; i < tsRow; i++) {
y = round(testsig[i]);
if (y == 1)
{
if (TestSetDiag[i] == y)
tp++;
else
fp++;
}
else
{
if (TestSetDiag[i] == y)
tn++;
else
fn++;
}
}
printf("Testing Set Confusion Matrix\n True False\n");
printf("Predicted Positive %d %d\n", tp, fp);
printf("Predicted Negative %d %d", tn, fn);
printf("\n\n-------------------------------------------\n\n");
} |
7688b2f438995101c432c0b957db2fe74cd9c51a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include "f_eval.cuh"
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/transform_reduce.h>
#include <thrust/host_vector.h>
struct func
{
double m,n,h,epsilon;
func(int m, int n, double h, double epsilon) : m(m),n(n),h(h),epsilon(epsilon) {}
__host__ __device__
double operator()(double& input)
{
for(int j = 0; j <m; j++){
double val1 = f_eval(&input, m, j, h);
double val2 = f_eval(&input, m, j, -h);
double result = (val1 - val2) / (2*epsilon);
return result;
}
}
};
struct func2
{
double m,n,h,epsilon;
func2(int m, int n, double h, double epsilon) : m(m),n(n),h(h),epsilon(epsilon) {}
__host__ __device__
double operator()(double& input)
{
input += h;
double val1 = f_eval(&input, m);
input -= 2*h;
double val2 = f_eval(&input, m);
double result = (val1 - val2) / (2*epsilon);
return result;
// input[i*m + j] += h;
}
};
struct func3
{
double m,n,h,epsilon;
func3(int m, int start_addr, double h, double epsilon) : m(m),start_addr(start_addr),h(h),epsilon(epsilon) {}
__host__ __device__
double operator()(double& input)
{
input += h;
double val1 = f_eval(start_addr, m);
input -= 2*h;
double val2 = f_eval(start_addr, m);
double result = (val1 - val2) / (2*epsilon);
return result;
// input[i*m + j] += h;
}
};
struct func4
{
__host__ __device__
double operator(double& input)()
{
return -1.7;
}
};
int main(int argc, char* argv[]) {
if(argc == 4){
FILE* fpIn = fopen(argv[1], "r");
FILE* fpOut = fopen(argv[2], "w");
double epsilon = atof(argv[3]);
const double h = 1e-2;
// n different input points, m variables ( x1,x2,.....xm) at each point
int n, m;
fscanf(fpIn, "%d", &n);
fscanf(fpIn, "%d", &m);
double *input = (double*) malloc(m * n * sizeof(double));
double *output = (double*) malloc(m * n * sizeof(double));
thrust::host_vector<double> hX(n*m);
// Read all the input points from the file
for (int i = 0; i < n; i++) {
for(int j = 0; j < m; j++) {
fscanf(fpIn, "%lf,", &input[i*m+j]);
hX[i*m+j] = input[i*m+j];
}
}
// Start the timer
// double start = omp_get_wtime();
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, NULL);
thrust::device_vector<double> dX = hX;
thrust::device_vector<double> dout(n*m);
// thrust::transform(dX.begin(),dX.end(),dout.begin(), func(m,n,h,epsilon));
// for(int i = 0; i < n; i++){
// int start_addr = i*m;
// thrust::transform(&dX[start_addr],&dX[start_addr+m],&dout[start_addr], func3(m,&dX[start_addr],h,epsilon));
// }
thrust::transform(dX.begin(),dX.end(),dout.begin(),func4())
thrust::copy(dout.begin(), dout.end(), &output[0]);
// Stop the timer
// double end = omp_get_wtime();
hipEventRecord(stop,NULL);
hipEventSynchronize(stop);
float msecTotal = 0.0f;
hipEventElapsedTime(&msecTotal, start, stop);
// Write the result into the file
for(int i = 0; i < n; i++) {
for(int j = 0; j < m; j++) {
if(j != m-1)
fprintf(fpOut, "%.4lf,", output[i*m + j] );
else
fprintf(fpOut, "%.4lf\n", output[i*m + j] );
}
}
fclose(fpIn);
fclose(fpOut);
double time = msecTotal; // time in ms
// Create a new file to log the execution time
FILE* fpLog = fopen("sequentialLog", "a");
fprintf(fpLog, "%d\t%d\t%lf\n", n, m, time);
fclose(fpLog);
free(input);
free(output);
}
else{
printf("Insufficient arguments");
}
return 0;
}
| 7688b2f438995101c432c0b957db2fe74cd9c51a.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include "f_eval.cuh"
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/transform_reduce.h>
#include <thrust/host_vector.h>
struct func
{
double m,n,h,epsilon;
func(int m, int n, double h, double epsilon) : m(m),n(n),h(h),epsilon(epsilon) {}
__host__ __device__
double operator()(double& input)
{
for(int j = 0; j <m; j++){
double val1 = f_eval(&input, m, j, h);
double val2 = f_eval(&input, m, j, -h);
double result = (val1 - val2) / (2*epsilon);
return result;
}
}
};
struct func2
{
double m,n,h,epsilon;
func2(int m, int n, double h, double epsilon) : m(m),n(n),h(h),epsilon(epsilon) {}
__host__ __device__
double operator()(double& input)
{
input += h;
double val1 = f_eval(&input, m);
input -= 2*h;
double val2 = f_eval(&input, m);
double result = (val1 - val2) / (2*epsilon);
return result;
// input[i*m + j] += h;
}
};
struct func3
{
double m,n,h,epsilon;
func3(int m, int start_addr, double h, double epsilon) : m(m),start_addr(start_addr),h(h),epsilon(epsilon) {}
__host__ __device__
double operator()(double& input)
{
input += h;
double val1 = f_eval(start_addr, m);
input -= 2*h;
double val2 = f_eval(start_addr, m);
double result = (val1 - val2) / (2*epsilon);
return result;
// input[i*m + j] += h;
}
};
struct func4
{
__host__ __device__
double operator(double& input)()
{
return -1.7;
}
};
int main(int argc, char* argv[]) {
if(argc == 4){
FILE* fpIn = fopen(argv[1], "r");
FILE* fpOut = fopen(argv[2], "w");
double epsilon = atof(argv[3]);
const double h = 1e-2;
// n different input points, m variables ( x1,x2,.....xm) at each point
int n, m;
fscanf(fpIn, "%d", &n);
fscanf(fpIn, "%d", &m);
double *input = (double*) malloc(m * n * sizeof(double));
double *output = (double*) malloc(m * n * sizeof(double));
thrust::host_vector<double> hX(n*m);
// Read all the input points from the file
for (int i = 0; i < n; i++) {
for(int j = 0; j < m; j++) {
fscanf(fpIn, "%lf,", &input[i*m+j]);
hX[i*m+j] = input[i*m+j];
}
}
// Start the timer
// double start = omp_get_wtime();
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, NULL);
thrust::device_vector<double> dX = hX;
thrust::device_vector<double> dout(n*m);
// thrust::transform(dX.begin(),dX.end(),dout.begin(), func(m,n,h,epsilon));
// for(int i = 0; i < n; i++){
// int start_addr = i*m;
// thrust::transform(&dX[start_addr],&dX[start_addr+m],&dout[start_addr], func3(m,&dX[start_addr],h,epsilon));
// }
thrust::transform(dX.begin(),dX.end(),dout.begin(),func4())
thrust::copy(dout.begin(), dout.end(), &output[0]);
// Stop the timer
// double end = omp_get_wtime();
cudaEventRecord(stop,NULL);
cudaEventSynchronize(stop);
float msecTotal = 0.0f;
cudaEventElapsedTime(&msecTotal, start, stop);
// Write the result into the file
for(int i = 0; i < n; i++) {
for(int j = 0; j < m; j++) {
if(j != m-1)
fprintf(fpOut, "%.4lf,", output[i*m + j] );
else
fprintf(fpOut, "%.4lf\n", output[i*m + j] );
}
}
fclose(fpIn);
fclose(fpOut);
double time = msecTotal; // time in ms
// Create a new file to log the execution time
FILE* fpLog = fopen("sequentialLog", "a");
fprintf(fpLog, "%d\t%d\t%lf\n", n, m, time);
fclose(fpLog);
free(input);
free(output);
}
else{
printf("Insufficient arguments");
}
return 0;
}
|
99db06deba4379a257340568fec68a0b363bcd95.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/scalar/scalar_device_view.cuh>
#include <cudf/table/table_view.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include "binary_ops.hpp"
namespace cudf {
namespace binops {
namespace compiled {
namespace {
template <typename Lhs, typename Rhs, typename Out>
struct apply_binop {
binary_operator op;
apply_binop(binary_operator op) : op(op) {}
CUDA_DEVICE_CALLABLE Out operator()(Lhs const& x, Rhs const& y) const
{
switch (op) {
case binary_operator::EQUAL: return this->equal(x, y);
case binary_operator::NOT_EQUAL: return this->not_equal(x, y);
case binary_operator::LESS: return this->less(x, y);
case binary_operator::GREATER: return this->greater(x, y);
case binary_operator::LESS_EQUAL: return this->less_equal(x, y);
case binary_operator::GREATER_EQUAL: return this->greater_equal(x, y);
default: return Out{};
}
}
CUDA_DEVICE_CALLABLE Out equal(Lhs const& x, Rhs const& y) const
{
return static_cast<Out>(x == y);
}
CUDA_DEVICE_CALLABLE Out not_equal(Lhs const& x, Rhs const& y) const
{
return static_cast<Out>(x != y);
}
CUDA_DEVICE_CALLABLE Out less(Lhs const& x, Rhs const& y) const
{
return static_cast<Out>(x < y);
}
CUDA_DEVICE_CALLABLE Out greater(Lhs const& x, Rhs const& y) const
{
return static_cast<Out>(x > y);
}
CUDA_DEVICE_CALLABLE Out less_equal(Lhs const& x, Rhs const& y) const
{
return static_cast<Out>(x <= y);
}
CUDA_DEVICE_CALLABLE Out greater_equal(Lhs const& x, Rhs const& y) const
{
return static_cast<Out>(x >= y);
}
};
template <typename Lhs, typename Rhs, typename Out>
struct apply_binop_scalar_lhs_rhs : apply_binop<Lhs, Rhs, Out> {
cudf::scalar_device_type_t<Rhs> scalar;
apply_binop_scalar_lhs_rhs(binary_operator op, cudf::scalar_device_type_t<Rhs> scalar)
: apply_binop<Lhs, Rhs, Out>(op), scalar(scalar)
{
}
CUDA_DEVICE_CALLABLE Out operator()(Lhs const& x) const
{
return apply_binop<Lhs, Rhs, Out>::operator()(x, scalar.value());
}
};
template <typename Lhs, typename Rhs, typename Out>
struct apply_binop_scalar_rhs_lhs : apply_binop<Rhs, Lhs, Out> {
cudf::scalar_device_type_t<Rhs> scalar;
apply_binop_scalar_rhs_lhs(binary_operator op, cudf::scalar_device_type_t<Rhs> scalar)
: apply_binop<Rhs, Lhs, Out>(op), scalar(scalar)
{
}
CUDA_DEVICE_CALLABLE Out operator()(Lhs const& x) const
{
return apply_binop<Rhs, Lhs, Out>::operator()(scalar.value(), x);
}
};
template <typename Lhs, typename Rhs, typename Out>
struct binary_op {
std::unique_ptr<column> operator()(column_view const& lhs,
scalar const& rhs,
binary_operator op,
data_type out_type,
bool const reversed,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
auto new_mask = binops::detail::scalar_col_valid_mask_and(lhs, rhs, stream, mr);
auto out = make_fixed_width_column(out_type,
lhs.size(),
std::move(new_mask),
rhs.is_valid(stream) ? cudf::UNKNOWN_NULL_COUNT : lhs.size(),
stream,
mr);
if (lhs.size() > 0 && rhs.is_valid(stream)) {
auto out_view = out->mutable_view();
auto out_itr = out_view.begin<Out>();
auto lhs_device_view = column_device_view::create(lhs, stream);
auto rhs_scalar = static_cast<cudf::scalar_type_t<Rhs> const&>(rhs);
auto rhs_scalar_view = get_scalar_device_view(rhs_scalar);
if (lhs.has_nulls()) {
auto lhs_itr = cudf::detail::make_null_replacement_iterator(*lhs_device_view, Lhs{});
reversed
? thrust::transform(rmm::exec_policy(stream)->on(stream),
lhs_itr,
lhs_itr + lhs.size(),
out_itr,
apply_binop_scalar_rhs_lhs<Lhs, Rhs, Out>{op, rhs_scalar_view})
: thrust::transform(rmm::exec_policy(stream)->on(stream),
lhs_itr,
lhs_itr + lhs.size(),
out_itr,
apply_binop_scalar_lhs_rhs<Lhs, Rhs, Out>{op, rhs_scalar_view});
} else {
auto lhs_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator(size_type{0}),
[col = *lhs_device_view] __device__(size_type i) { return col.element<Lhs>(i); });
reversed
? thrust::transform(rmm::exec_policy(stream)->on(stream),
lhs_itr,
lhs_itr + lhs.size(),
out_itr,
apply_binop_scalar_rhs_lhs<Lhs, Rhs, Out>{op, rhs_scalar_view})
: thrust::transform(rmm::exec_policy(stream)->on(stream),
lhs_itr,
lhs_itr + lhs.size(),
out_itr,
apply_binop_scalar_lhs_rhs<Lhs, Rhs, Out>{op, rhs_scalar_view});
}
}
CHECK_CUDA(stream);
return out;
}
std::unique_ptr<column> operator()(column_view const& lhs,
column_view const& rhs,
binary_operator op,
data_type out_type,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
auto new_mask = bitmask_and(table_view({lhs, rhs}), mr, stream);
auto out = make_fixed_width_column(
out_type, lhs.size(), std::move(new_mask), cudf::UNKNOWN_NULL_COUNT, stream, mr);
if (lhs.size() > 0) {
auto out_view = out->mutable_view();
auto out_itr = out_view.begin<Out>();
auto lhs_device_view = column_device_view::create(lhs, stream);
auto rhs_device_view = column_device_view::create(rhs, stream);
if (lhs.has_nulls() && rhs.has_nulls()) {
auto lhs_itr = cudf::detail::make_null_replacement_iterator(*lhs_device_view, Lhs{});
auto rhs_itr = cudf::detail::make_null_replacement_iterator(*rhs_device_view, Rhs{});
thrust::transform(rmm::exec_policy(stream)->on(stream),
lhs_itr,
lhs_itr + lhs.size(),
rhs_itr,
out_itr,
apply_binop<Lhs, Rhs, Out>{op});
} else if (lhs.has_nulls()) {
auto lhs_itr = cudf::detail::make_null_replacement_iterator(*lhs_device_view, Lhs{});
auto rhs_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator(size_type{0}),
[col = *rhs_device_view] __device__(size_type i) { return col.element<Rhs>(i); });
thrust::transform(rmm::exec_policy(stream)->on(stream),
lhs_itr,
lhs_itr + lhs.size(),
rhs_itr,
out_itr,
apply_binop<Lhs, Rhs, Out>{op});
} else if (rhs.has_nulls()) {
auto lhs_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator(size_type{0}),
[col = *lhs_device_view] __device__(size_type i) { return col.element<Lhs>(i); });
auto rhs_itr = cudf::detail::make_null_replacement_iterator(*rhs_device_view, Rhs{});
thrust::transform(rmm::exec_policy(stream)->on(stream),
lhs_itr,
lhs_itr + lhs.size(),
rhs_itr,
out_itr,
apply_binop<Lhs, Rhs, Out>{op});
} else {
auto lhs_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator(size_type{0}),
[col = *lhs_device_view] __device__(size_type i) { return col.element<Lhs>(i); });
auto rhs_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator(size_type{0}),
[col = *rhs_device_view] __device__(size_type i) { return col.element<Rhs>(i); });
thrust::transform(rmm::exec_policy(stream)->on(stream),
lhs_itr,
lhs_itr + lhs.size(),
rhs_itr,
out_itr,
apply_binop<Lhs, Rhs, Out>{op});
}
}
CHECK_CUDA(stream);
return out;
}
};
// This functor does the actual comparison between string column value and a scalar string
// or between two string column values using a comparator
template <typename LhsDeviceViewT, typename RhsDeviceViewT, typename OutT, typename CompareFunc>
struct compare_functor {
LhsDeviceViewT const lhs_dev_view_; // Scalar or a column device view - lhs
RhsDeviceViewT const rhs_dev_view_; // Scalar or a column device view - rhs
CompareFunc const cfunc_; // Comparison function
compare_functor(LhsDeviceViewT const& lhs_dev_view,
RhsDeviceViewT const& rhs_dev_view,
CompareFunc cf)
: lhs_dev_view_(lhs_dev_view), rhs_dev_view_(rhs_dev_view), cfunc_(cf)
{
}
// This is used to compare a scalar and a column value
template <typename LhsViewT = LhsDeviceViewT, typename RhsViewT = RhsDeviceViewT>
CUDA_DEVICE_CALLABLE
typename std::enable_if_t<std::is_same<LhsViewT, column_device_view>::value &&
!std::is_same<RhsViewT, column_device_view>::value,
OutT>
operator()(cudf::size_type i) const
{
return cfunc_(lhs_dev_view_.is_valid(i),
rhs_dev_view_.is_valid(),
lhs_dev_view_.is_valid(i) ? lhs_dev_view_.template element<cudf::string_view>(i)
: cudf::string_view{},
rhs_dev_view_.is_valid() ? rhs_dev_view_.value() : cudf::string_view{});
}
// This is used to compare a scalar and a column value
template <typename LhsViewT = LhsDeviceViewT, typename RhsViewT = RhsDeviceViewT>
CUDA_DEVICE_CALLABLE
typename std::enable_if_t<!std::is_same<LhsViewT, column_device_view>::value &&
std::is_same<RhsViewT, column_device_view>::value,
OutT>
operator()(cudf::size_type i) const
{
return cfunc_(lhs_dev_view_.is_valid(),
rhs_dev_view_.is_valid(i),
lhs_dev_view_.is_valid() ? lhs_dev_view_.value() : cudf::string_view{},
rhs_dev_view_.is_valid(i) ? rhs_dev_view_.template element<cudf::string_view>(i)
: cudf::string_view{});
}
// This is used to compare 2 column values
template <typename LhsViewT = LhsDeviceViewT, typename RhsViewT = RhsDeviceViewT>
CUDA_DEVICE_CALLABLE
typename std::enable_if_t<std::is_same<LhsViewT, column_device_view>::value &&
std::is_same<RhsViewT, column_device_view>::value,
OutT>
operator()(cudf::size_type i) const
{
return cfunc_(lhs_dev_view_.is_valid(i),
rhs_dev_view_.is_valid(i),
lhs_dev_view_.is_valid(i) ? lhs_dev_view_.template element<cudf::string_view>(i)
: cudf::string_view{},
rhs_dev_view_.is_valid(i) ? rhs_dev_view_.template element<cudf::string_view>(i)
: cudf::string_view{});
}
};
// This functor performs null aware binop between two columns or a column and a scalar by
// iterating over them on the device
struct null_considering_binop {
auto get_device_view(cudf::scalar const& scalar_item) const
{
return get_scalar_device_view(
static_cast<cudf::scalar_type_t<cudf::string_view>&>(const_cast<scalar&>(scalar_item)));
}
auto get_device_view(column_device_view const& col_item) const { return col_item; }
template <typename LhsViewT, typename RhsViewT, typename OutT, typename CompareFunc>
void populate_out_col(LhsViewT const& lhsv,
RhsViewT const& rhsv,
cudf::size_type col_size,
hipStream_t stream,
CompareFunc cfunc,
OutT* out_col) const
{
// Create binop functor instance
compare_functor<LhsViewT, RhsViewT, OutT, CompareFunc> binop_func{lhsv, rhsv, cfunc};
// Execute it on every element
thrust::transform(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(col_size),
out_col,
binop_func);
}
// This is invoked to perform comparison between cudf string types
template <typename LhsT, typename RhsT>
std::unique_ptr<column> operator()(LhsT const& lhs,
RhsT const& rhs,
binary_operator op,
data_type output_type,
cudf::size_type col_size,
rmm::mr::device_memory_resource* mr,
hipStream_t stream) const
{
std::unique_ptr<column> out;
// Create device views for inputs
auto const lhs_dev_view = get_device_view(lhs);
auto const rhs_dev_view = get_device_view(rhs);
switch (op) {
case binary_operator::NULL_EQUALS: {
// Validate input
CUDF_EXPECTS(output_type.id() == type_id::BOOL8, "Output column type has to be bool");
// Make a bool8 numeric output column
out = make_numeric_column(
data_type{type_id::BOOL8}, col_size, mask_state::ALL_VALID, stream, mr);
// Create a compare function lambda
auto equal_func = [] __device__(bool lhs_valid,
bool rhs_valid,
cudf::string_view lhs_value,
cudf::string_view rhs_value) {
if (!lhs_valid && !rhs_valid) return true;
if (lhs_valid && rhs_valid) return (lhs_value == rhs_value);
return false;
};
// Populate output column
populate_out_col(lhs_dev_view,
rhs_dev_view,
col_size,
stream,
equal_func,
mutable_column_view{*out}.begin<bool>());
break;
}
case binary_operator::NULL_MAX:
case binary_operator::NULL_MIN: {
// Validate input
CUDF_EXPECTS(output_type.id() == lhs.type().id(),
"Output column type should match input column type");
// Shallow copy of the resultant strings
rmm::device_vector<cudf::string_view> out_col_strings(col_size);
// Invalid output column strings - null rows
cudf::string_view const invalid_str{nullptr, 0};
// Create a compare function lambda
auto minmax_func = [op, invalid_str] __device__(bool lhs_valid,
bool rhs_valid,
cudf::string_view lhs_value,
cudf::string_view rhs_value) {
if (!lhs_valid && !rhs_valid)
return invalid_str;
else if (lhs_valid && rhs_valid) {
return (op == binary_operator::NULL_MAX)
? thrust::maximum<cudf::string_view>()(lhs_value, rhs_value)
: thrust::minimum<cudf::string_view>()(lhs_value, rhs_value);
} else if (lhs_valid)
return lhs_value;
else
return rhs_value;
};
// Populate output column
populate_out_col(
lhs_dev_view, rhs_dev_view, col_size, stream, minmax_func, out_col_strings.data().get());
// Create an output column with the resultant strings
out = make_strings_column(out_col_strings, invalid_str, stream, mr);
break;
}
default: {
CUDF_FAIL("Null aware binop not supported");
}
}
return out;
}
};
} // namespace
std::unique_ptr<column> binary_operation(scalar const& lhs,
column_view const& rhs,
binary_operator op,
data_type output_type,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
// hard-coded to only work with cudf::string_view so we don't explode compile times
CUDF_EXPECTS(lhs.type().id() == cudf::type_id::STRING, "Invalid/Unsupported lhs datatype");
CUDF_EXPECTS(rhs.type().id() == cudf::type_id::STRING, "Invalid/Unsupported rhs datatype");
if (is_null_dependent(op)) {
if (rhs.size() == 0) return cudf::make_empty_column(output_type);
auto rhs_device_view = cudf::column_device_view::create(rhs, stream);
return null_considering_binop{}(lhs, *rhs_device_view, op, output_type, rhs.size(), mr, stream);
} else {
CUDF_EXPECTS(is_boolean(output_type), "Invalid/Unsupported output datatype");
// Should pass the right type of scalar and column_view when specializing binary_op
return binary_op<cudf::string_view, cudf::string_view, bool>{}(
rhs, lhs, op, output_type, true, mr, stream);
}
}
std::unique_ptr<column> binary_operation(column_view const& lhs,
scalar const& rhs,
binary_operator op,
data_type output_type,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
// hard-coded to only work with cudf::string_view so we don't explode compile times
CUDF_EXPECTS(lhs.type().id() == cudf::type_id::STRING, "Invalid/Unsupported lhs datatype");
CUDF_EXPECTS(rhs.type().id() == cudf::type_id::STRING, "Invalid/Unsupported rhs datatype");
if (is_null_dependent(op)) {
if (lhs.size() == 0) return cudf::make_empty_column(output_type);
auto lhs_device_view = cudf::column_device_view::create(lhs, stream);
return null_considering_binop{}(*lhs_device_view, rhs, op, output_type, lhs.size(), mr, stream);
} else {
CUDF_EXPECTS(is_boolean(output_type), "Invalid/Unsupported output datatype");
return binary_op<cudf::string_view, cudf::string_view, bool>{}(
lhs, rhs, op, output_type, false, mr, stream);
}
}
std::unique_ptr<column> binary_operation(column_view const& lhs,
column_view const& rhs,
binary_operator op,
data_type output_type,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
// hard-coded to only work with cudf::string_view so we don't explode compile times
CUDF_EXPECTS(lhs.type().id() == cudf::type_id::STRING, "Invalid/Unsupported lhs datatype");
CUDF_EXPECTS(rhs.type().id() == cudf::type_id::STRING, "Invalid/Unsupported rhs datatype");
if (is_null_dependent(op)) {
CUDF_EXPECTS(lhs.size() == rhs.size(), "Column sizes do not match");
if (lhs.size() == 0) return cudf::make_empty_column(output_type);
auto lhs_device_view = cudf::column_device_view::create(lhs, stream);
auto rhs_device_view = cudf::column_device_view::create(rhs, stream);
return null_considering_binop{}(
*lhs_device_view, *rhs_device_view, op, output_type, lhs.size(), mr, stream);
} else {
CUDF_EXPECTS(is_boolean(output_type), "Invalid/Unsupported output datatype");
return binary_op<cudf::string_view, cudf::string_view, bool>{}(
lhs, rhs, op, output_type, mr, stream);
}
}
} // namespace compiled
} // namespace binops
} // namespace cudf
| 99db06deba4379a257340568fec68a0b363bcd95.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/scalar/scalar_device_view.cuh>
#include <cudf/table/table_view.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include "binary_ops.hpp"
namespace cudf {
namespace binops {
namespace compiled {
namespace {
template <typename Lhs, typename Rhs, typename Out>
struct apply_binop {
binary_operator op;
apply_binop(binary_operator op) : op(op) {}
CUDA_DEVICE_CALLABLE Out operator()(Lhs const& x, Rhs const& y) const
{
switch (op) {
case binary_operator::EQUAL: return this->equal(x, y);
case binary_operator::NOT_EQUAL: return this->not_equal(x, y);
case binary_operator::LESS: return this->less(x, y);
case binary_operator::GREATER: return this->greater(x, y);
case binary_operator::LESS_EQUAL: return this->less_equal(x, y);
case binary_operator::GREATER_EQUAL: return this->greater_equal(x, y);
default: return Out{};
}
}
CUDA_DEVICE_CALLABLE Out equal(Lhs const& x, Rhs const& y) const
{
return static_cast<Out>(x == y);
}
CUDA_DEVICE_CALLABLE Out not_equal(Lhs const& x, Rhs const& y) const
{
return static_cast<Out>(x != y);
}
CUDA_DEVICE_CALLABLE Out less(Lhs const& x, Rhs const& y) const
{
return static_cast<Out>(x < y);
}
CUDA_DEVICE_CALLABLE Out greater(Lhs const& x, Rhs const& y) const
{
return static_cast<Out>(x > y);
}
CUDA_DEVICE_CALLABLE Out less_equal(Lhs const& x, Rhs const& y) const
{
return static_cast<Out>(x <= y);
}
CUDA_DEVICE_CALLABLE Out greater_equal(Lhs const& x, Rhs const& y) const
{
return static_cast<Out>(x >= y);
}
};
template <typename Lhs, typename Rhs, typename Out>
struct apply_binop_scalar_lhs_rhs : apply_binop<Lhs, Rhs, Out> {
cudf::scalar_device_type_t<Rhs> scalar;
apply_binop_scalar_lhs_rhs(binary_operator op, cudf::scalar_device_type_t<Rhs> scalar)
: apply_binop<Lhs, Rhs, Out>(op), scalar(scalar)
{
}
CUDA_DEVICE_CALLABLE Out operator()(Lhs const& x) const
{
return apply_binop<Lhs, Rhs, Out>::operator()(x, scalar.value());
}
};
template <typename Lhs, typename Rhs, typename Out>
struct apply_binop_scalar_rhs_lhs : apply_binop<Rhs, Lhs, Out> {
cudf::scalar_device_type_t<Rhs> scalar;
apply_binop_scalar_rhs_lhs(binary_operator op, cudf::scalar_device_type_t<Rhs> scalar)
: apply_binop<Rhs, Lhs, Out>(op), scalar(scalar)
{
}
CUDA_DEVICE_CALLABLE Out operator()(Lhs const& x) const
{
return apply_binop<Rhs, Lhs, Out>::operator()(scalar.value(), x);
}
};
template <typename Lhs, typename Rhs, typename Out>
struct binary_op {
std::unique_ptr<column> operator()(column_view const& lhs,
scalar const& rhs,
binary_operator op,
data_type out_type,
bool const reversed,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
auto new_mask = binops::detail::scalar_col_valid_mask_and(lhs, rhs, stream, mr);
auto out = make_fixed_width_column(out_type,
lhs.size(),
std::move(new_mask),
rhs.is_valid(stream) ? cudf::UNKNOWN_NULL_COUNT : lhs.size(),
stream,
mr);
if (lhs.size() > 0 && rhs.is_valid(stream)) {
auto out_view = out->mutable_view();
auto out_itr = out_view.begin<Out>();
auto lhs_device_view = column_device_view::create(lhs, stream);
auto rhs_scalar = static_cast<cudf::scalar_type_t<Rhs> const&>(rhs);
auto rhs_scalar_view = get_scalar_device_view(rhs_scalar);
if (lhs.has_nulls()) {
auto lhs_itr = cudf::detail::make_null_replacement_iterator(*lhs_device_view, Lhs{});
reversed
? thrust::transform(rmm::exec_policy(stream)->on(stream),
lhs_itr,
lhs_itr + lhs.size(),
out_itr,
apply_binop_scalar_rhs_lhs<Lhs, Rhs, Out>{op, rhs_scalar_view})
: thrust::transform(rmm::exec_policy(stream)->on(stream),
lhs_itr,
lhs_itr + lhs.size(),
out_itr,
apply_binop_scalar_lhs_rhs<Lhs, Rhs, Out>{op, rhs_scalar_view});
} else {
auto lhs_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator(size_type{0}),
[col = *lhs_device_view] __device__(size_type i) { return col.element<Lhs>(i); });
reversed
? thrust::transform(rmm::exec_policy(stream)->on(stream),
lhs_itr,
lhs_itr + lhs.size(),
out_itr,
apply_binop_scalar_rhs_lhs<Lhs, Rhs, Out>{op, rhs_scalar_view})
: thrust::transform(rmm::exec_policy(stream)->on(stream),
lhs_itr,
lhs_itr + lhs.size(),
out_itr,
apply_binop_scalar_lhs_rhs<Lhs, Rhs, Out>{op, rhs_scalar_view});
}
}
CHECK_CUDA(stream);
return out;
}
std::unique_ptr<column> operator()(column_view const& lhs,
column_view const& rhs,
binary_operator op,
data_type out_type,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
auto new_mask = bitmask_and(table_view({lhs, rhs}), mr, stream);
auto out = make_fixed_width_column(
out_type, lhs.size(), std::move(new_mask), cudf::UNKNOWN_NULL_COUNT, stream, mr);
if (lhs.size() > 0) {
auto out_view = out->mutable_view();
auto out_itr = out_view.begin<Out>();
auto lhs_device_view = column_device_view::create(lhs, stream);
auto rhs_device_view = column_device_view::create(rhs, stream);
if (lhs.has_nulls() && rhs.has_nulls()) {
auto lhs_itr = cudf::detail::make_null_replacement_iterator(*lhs_device_view, Lhs{});
auto rhs_itr = cudf::detail::make_null_replacement_iterator(*rhs_device_view, Rhs{});
thrust::transform(rmm::exec_policy(stream)->on(stream),
lhs_itr,
lhs_itr + lhs.size(),
rhs_itr,
out_itr,
apply_binop<Lhs, Rhs, Out>{op});
} else if (lhs.has_nulls()) {
auto lhs_itr = cudf::detail::make_null_replacement_iterator(*lhs_device_view, Lhs{});
auto rhs_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator(size_type{0}),
[col = *rhs_device_view] __device__(size_type i) { return col.element<Rhs>(i); });
thrust::transform(rmm::exec_policy(stream)->on(stream),
lhs_itr,
lhs_itr + lhs.size(),
rhs_itr,
out_itr,
apply_binop<Lhs, Rhs, Out>{op});
} else if (rhs.has_nulls()) {
auto lhs_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator(size_type{0}),
[col = *lhs_device_view] __device__(size_type i) { return col.element<Lhs>(i); });
auto rhs_itr = cudf::detail::make_null_replacement_iterator(*rhs_device_view, Rhs{});
thrust::transform(rmm::exec_policy(stream)->on(stream),
lhs_itr,
lhs_itr + lhs.size(),
rhs_itr,
out_itr,
apply_binop<Lhs, Rhs, Out>{op});
} else {
auto lhs_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator(size_type{0}),
[col = *lhs_device_view] __device__(size_type i) { return col.element<Lhs>(i); });
auto rhs_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator(size_type{0}),
[col = *rhs_device_view] __device__(size_type i) { return col.element<Rhs>(i); });
thrust::transform(rmm::exec_policy(stream)->on(stream),
lhs_itr,
lhs_itr + lhs.size(),
rhs_itr,
out_itr,
apply_binop<Lhs, Rhs, Out>{op});
}
}
CHECK_CUDA(stream);
return out;
}
};
// This functor does the actual comparison between string column value and a scalar string
// or between two string column values using a comparator
template <typename LhsDeviceViewT, typename RhsDeviceViewT, typename OutT, typename CompareFunc>
struct compare_functor {
LhsDeviceViewT const lhs_dev_view_; // Scalar or a column device view - lhs
RhsDeviceViewT const rhs_dev_view_; // Scalar or a column device view - rhs
CompareFunc const cfunc_; // Comparison function
compare_functor(LhsDeviceViewT const& lhs_dev_view,
RhsDeviceViewT const& rhs_dev_view,
CompareFunc cf)
: lhs_dev_view_(lhs_dev_view), rhs_dev_view_(rhs_dev_view), cfunc_(cf)
{
}
// This is used to compare a scalar and a column value
template <typename LhsViewT = LhsDeviceViewT, typename RhsViewT = RhsDeviceViewT>
CUDA_DEVICE_CALLABLE
typename std::enable_if_t<std::is_same<LhsViewT, column_device_view>::value &&
!std::is_same<RhsViewT, column_device_view>::value,
OutT>
operator()(cudf::size_type i) const
{
return cfunc_(lhs_dev_view_.is_valid(i),
rhs_dev_view_.is_valid(),
lhs_dev_view_.is_valid(i) ? lhs_dev_view_.template element<cudf::string_view>(i)
: cudf::string_view{},
rhs_dev_view_.is_valid() ? rhs_dev_view_.value() : cudf::string_view{});
}
// This is used to compare a scalar and a column value
template <typename LhsViewT = LhsDeviceViewT, typename RhsViewT = RhsDeviceViewT>
CUDA_DEVICE_CALLABLE
typename std::enable_if_t<!std::is_same<LhsViewT, column_device_view>::value &&
std::is_same<RhsViewT, column_device_view>::value,
OutT>
operator()(cudf::size_type i) const
{
return cfunc_(lhs_dev_view_.is_valid(),
rhs_dev_view_.is_valid(i),
lhs_dev_view_.is_valid() ? lhs_dev_view_.value() : cudf::string_view{},
rhs_dev_view_.is_valid(i) ? rhs_dev_view_.template element<cudf::string_view>(i)
: cudf::string_view{});
}
// This is used to compare 2 column values
template <typename LhsViewT = LhsDeviceViewT, typename RhsViewT = RhsDeviceViewT>
CUDA_DEVICE_CALLABLE
typename std::enable_if_t<std::is_same<LhsViewT, column_device_view>::value &&
std::is_same<RhsViewT, column_device_view>::value,
OutT>
operator()(cudf::size_type i) const
{
return cfunc_(lhs_dev_view_.is_valid(i),
rhs_dev_view_.is_valid(i),
lhs_dev_view_.is_valid(i) ? lhs_dev_view_.template element<cudf::string_view>(i)
: cudf::string_view{},
rhs_dev_view_.is_valid(i) ? rhs_dev_view_.template element<cudf::string_view>(i)
: cudf::string_view{});
}
};
// This functor performs null aware binop between two columns or a column and a scalar by
// iterating over them on the device
struct null_considering_binop {
auto get_device_view(cudf::scalar const& scalar_item) const
{
return get_scalar_device_view(
static_cast<cudf::scalar_type_t<cudf::string_view>&>(const_cast<scalar&>(scalar_item)));
}
auto get_device_view(column_device_view const& col_item) const { return col_item; }
template <typename LhsViewT, typename RhsViewT, typename OutT, typename CompareFunc>
void populate_out_col(LhsViewT const& lhsv,
RhsViewT const& rhsv,
cudf::size_type col_size,
cudaStream_t stream,
CompareFunc cfunc,
OutT* out_col) const
{
// Create binop functor instance
compare_functor<LhsViewT, RhsViewT, OutT, CompareFunc> binop_func{lhsv, rhsv, cfunc};
// Execute it on every element
thrust::transform(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(col_size),
out_col,
binop_func);
}
// This is invoked to perform comparison between cudf string types
template <typename LhsT, typename RhsT>
std::unique_ptr<column> operator()(LhsT const& lhs,
RhsT const& rhs,
binary_operator op,
data_type output_type,
cudf::size_type col_size,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream) const
{
std::unique_ptr<column> out;
// Create device views for inputs
auto const lhs_dev_view = get_device_view(lhs);
auto const rhs_dev_view = get_device_view(rhs);
switch (op) {
case binary_operator::NULL_EQUALS: {
// Validate input
CUDF_EXPECTS(output_type.id() == type_id::BOOL8, "Output column type has to be bool");
// Make a bool8 numeric output column
out = make_numeric_column(
data_type{type_id::BOOL8}, col_size, mask_state::ALL_VALID, stream, mr);
// Create a compare function lambda
auto equal_func = [] __device__(bool lhs_valid,
bool rhs_valid,
cudf::string_view lhs_value,
cudf::string_view rhs_value) {
if (!lhs_valid && !rhs_valid) return true;
if (lhs_valid && rhs_valid) return (lhs_value == rhs_value);
return false;
};
// Populate output column
populate_out_col(lhs_dev_view,
rhs_dev_view,
col_size,
stream,
equal_func,
mutable_column_view{*out}.begin<bool>());
break;
}
case binary_operator::NULL_MAX:
case binary_operator::NULL_MIN: {
// Validate input
CUDF_EXPECTS(output_type.id() == lhs.type().id(),
"Output column type should match input column type");
// Shallow copy of the resultant strings
rmm::device_vector<cudf::string_view> out_col_strings(col_size);
// Invalid output column strings - null rows
cudf::string_view const invalid_str{nullptr, 0};
// Create a compare function lambda
auto minmax_func = [op, invalid_str] __device__(bool lhs_valid,
bool rhs_valid,
cudf::string_view lhs_value,
cudf::string_view rhs_value) {
if (!lhs_valid && !rhs_valid)
return invalid_str;
else if (lhs_valid && rhs_valid) {
return (op == binary_operator::NULL_MAX)
? thrust::maximum<cudf::string_view>()(lhs_value, rhs_value)
: thrust::minimum<cudf::string_view>()(lhs_value, rhs_value);
} else if (lhs_valid)
return lhs_value;
else
return rhs_value;
};
// Populate output column
populate_out_col(
lhs_dev_view, rhs_dev_view, col_size, stream, minmax_func, out_col_strings.data().get());
// Create an output column with the resultant strings
out = make_strings_column(out_col_strings, invalid_str, stream, mr);
break;
}
default: {
CUDF_FAIL("Null aware binop not supported");
}
}
return out;
}
};
} // namespace
std::unique_ptr<column> binary_operation(scalar const& lhs,
column_view const& rhs,
binary_operator op,
data_type output_type,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
// hard-coded to only work with cudf::string_view so we don't explode compile times
CUDF_EXPECTS(lhs.type().id() == cudf::type_id::STRING, "Invalid/Unsupported lhs datatype");
CUDF_EXPECTS(rhs.type().id() == cudf::type_id::STRING, "Invalid/Unsupported rhs datatype");
if (is_null_dependent(op)) {
if (rhs.size() == 0) return cudf::make_empty_column(output_type);
auto rhs_device_view = cudf::column_device_view::create(rhs, stream);
return null_considering_binop{}(lhs, *rhs_device_view, op, output_type, rhs.size(), mr, stream);
} else {
CUDF_EXPECTS(is_boolean(output_type), "Invalid/Unsupported output datatype");
// Should pass the right type of scalar and column_view when specializing binary_op
return binary_op<cudf::string_view, cudf::string_view, bool>{}(
rhs, lhs, op, output_type, true, mr, stream);
}
}
std::unique_ptr<column> binary_operation(column_view const& lhs,
scalar const& rhs,
binary_operator op,
data_type output_type,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
// hard-coded to only work with cudf::string_view so we don't explode compile times
CUDF_EXPECTS(lhs.type().id() == cudf::type_id::STRING, "Invalid/Unsupported lhs datatype");
CUDF_EXPECTS(rhs.type().id() == cudf::type_id::STRING, "Invalid/Unsupported rhs datatype");
if (is_null_dependent(op)) {
if (lhs.size() == 0) return cudf::make_empty_column(output_type);
auto lhs_device_view = cudf::column_device_view::create(lhs, stream);
return null_considering_binop{}(*lhs_device_view, rhs, op, output_type, lhs.size(), mr, stream);
} else {
CUDF_EXPECTS(is_boolean(output_type), "Invalid/Unsupported output datatype");
return binary_op<cudf::string_view, cudf::string_view, bool>{}(
lhs, rhs, op, output_type, false, mr, stream);
}
}
std::unique_ptr<column> binary_operation(column_view const& lhs,
column_view const& rhs,
binary_operator op,
data_type output_type,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
// hard-coded to only work with cudf::string_view so we don't explode compile times
CUDF_EXPECTS(lhs.type().id() == cudf::type_id::STRING, "Invalid/Unsupported lhs datatype");
CUDF_EXPECTS(rhs.type().id() == cudf::type_id::STRING, "Invalid/Unsupported rhs datatype");
if (is_null_dependent(op)) {
CUDF_EXPECTS(lhs.size() == rhs.size(), "Column sizes do not match");
if (lhs.size() == 0) return cudf::make_empty_column(output_type);
auto lhs_device_view = cudf::column_device_view::create(lhs, stream);
auto rhs_device_view = cudf::column_device_view::create(rhs, stream);
return null_considering_binop{}(
*lhs_device_view, *rhs_device_view, op, output_type, lhs.size(), mr, stream);
} else {
CUDF_EXPECTS(is_boolean(output_type), "Invalid/Unsupported output datatype");
return binary_op<cudf::string_view, cudf::string_view, bool>{}(
lhs, rhs, op, output_type, mr, stream);
}
}
} // namespace compiled
} // namespace binops
} // namespace cudf
|
52da5de216f09f53566978d1d81f5f5928b2f57e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../common/common.h"
#include <stdio.h>
#include <stdlib.h>
/**
* This example illustrates the effect on numerical accuracy of fusing a
* multiply-add into a single MAD instruction.
**/
__global__ void fmad_kernel(double x, double y, double *out)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0)
{
*out = x * x + y;
}
}
double host_fmad_kernel(double x, double y)
{
return x * x + y;
}
int main(int argc, char **argv)
{
double *d_out, h_out;
double x = 2.891903;
double y = -3.980364;
double host_value = host_fmad_kernel(x, y);
CHECK(hipMalloc((void **)&d_out, sizeof(double)));
hipLaunchKernelGGL(( fmad_kernel), dim3(1), dim3(32), 0, 0, x, y, d_out);
CHECK(hipMemcpy(&h_out, d_out, sizeof(double),
hipMemcpyDeviceToHost));
if (host_value == h_out)
{
printf("The device output the same value as the host.\n");
}
else
{
printf("The device output a different value than the host, diff=%e.\n",
fabs(host_value - h_out));
}
return 0;
}
| 52da5de216f09f53566978d1d81f5f5928b2f57e.cu | #include "../common/common.h"
#include <stdio.h>
#include <stdlib.h>
/**
* This example illustrates the effect on numerical accuracy of fusing a
* multiply-add into a single MAD instruction.
**/
__global__ void fmad_kernel(double x, double y, double *out)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0)
{
*out = x * x + y;
}
}
double host_fmad_kernel(double x, double y)
{
return x * x + y;
}
int main(int argc, char **argv)
{
double *d_out, h_out;
double x = 2.891903;
double y = -3.980364;
double host_value = host_fmad_kernel(x, y);
CHECK(cudaMalloc((void **)&d_out, sizeof(double)));
fmad_kernel<<<1, 32>>>(x, y, d_out);
CHECK(cudaMemcpy(&h_out, d_out, sizeof(double),
cudaMemcpyDeviceToHost));
if (host_value == h_out)
{
printf("The device output the same value as the host.\n");
}
else
{
printf("The device output a different value than the host, diff=%e.\n",
fabs(host_value - h_out));
}
return 0;
}
|
5a697253764cb8264c0948dbfc18907db2497647.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file
* Copyright (c) 2011-2020, CESNET z.s.p.o
* Copyright (c) 2011, Silicon Genome, LLC.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "gpujpeg_huffman_gpu_decoder.h"
#include "gpujpeg_util.h"
/**
* Entry of pre-built Huffman fast-decoding table.
*/
struct gpujpeg_table_huffman_decoder_entry {
int value_nbits;
};
/** Number of code bits to be checked first (with high chance for the code to fit into this number of bits). */
#define QUICK_CHECK_BITS 10
#define QUICK_TABLE_ITEMS (4 * (1 << QUICK_CHECK_BITS))
// TODO: try to tweak QUICK table size and memory space
struct gpujpeg_huffman_gpu_decoder {
/**
* 4 pre-built tables for faster Huffman decoding (codewords up-to 16 bit length):
* - 0x00000 to 0x0ffff: luminance DC table
* - 0x10000 to 0x1ffff: luminance AC table
* - 0x20000 to 0x2ffff: chrominance DC table
* - 0x30000 to 0x3ffff: chrominance AC table
*
* Each entry consists of:
* - Number of bits of code corresponding to this entry (0 - 16, both inclusive) - bits 4 to 8
* - Number of run-length coded zeros before currently decoded coefficient + 1 (1 - 64, both inclusive) - bits 9 to 15
* - Number of bits representing the value of currently decoded coefficient (0 - 15, both inclusive) - bits 0 to 3
* @code
* bit #: 15 9 8 4 3 0
* +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
* value: | RLE zero count | code bit size | value bit size|
* +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
* @endcode
*/
uint16_t *d_tables_full;
/** Table with same format as the full table, except that all-zero-entry means that the full table should be consulted. */
uint16_t *d_tables_quick;
/** Natural order */
int *d_order_natural;
};
#ifdef HUFFMAN_GPU_CONST_TABLES
/** Same table as above, but copied into constant memory */
__constant__ uint16_t gpujpeg_huffman_gpu_decoder_tables_quick_const[QUICK_TABLE_ITEMS];
/** Natural order in constant memory */
__constant__ int gpujpeg_huffman_gpu_decoder_order_natural[GPUJPEG_ORDER_NATURAL_SIZE];
#endif
// /**
// * Fill more bit to current get buffer
// *
// * @param get_bits
// * @param get_buff
// * @param data
// * @param data_size
// * @return void
// */
// __device__ inline void
// gpujpeg_huffman_gpu_decoder_decode_fill_bit_buffer(int & get_bits, int & get_buff, uint8_t* & data, int & data_size)
// {
// while ( get_bits < 25 ) {
// //Are there some data?
// if( data_size > 0 ) {
// // Attempt to read a byte
// //printf("read byte %X 0x%X\n", (int)data, (unsigned char)*data);
// unsigned char uc = *data++;
// data_size--;
//
// // If it's 0xFF, check and discard stuffed zero byte
// if ( uc == 0xFF ) {
// while ( uc == 0xFF ) {
// //printf("read byte %X 0x%X\n", (int)data, (unsigned char)*data);
// uc = *data++;
// data_size--;
// }
//
// if ( uc == 0 ) {
// // Found FF/00, which represents an FF data byte
// uc = 0xFF;
// } else {
// // There should be enough bits still left in the data segment;
// // if so, just break out of the outer while loop.
// //if (m_nGetBits >= nbits)
// if ( get_bits >= 0 )
// break;
// }
// }
//
// get_buff = (get_buff << 8) | ((int) uc);
// get_bits += 8;
// }
// else
// break;
// }
// }
/**
* Loads at least specified number of bits into the register
*/
__device__ inline void
gpujpeg_huffman_gpu_decoder_load_bits(
const unsigned int required_bit_count, unsigned int & r_bit,
unsigned int & r_bit_count, uint4 * const s_byte, unsigned int & s_byte_idx
) {
// Add bytes until have enough
while(r_bit_count < required_bit_count) {
// Load byte value and posibly skip next stuffed byte if loaded byte's value is 0xFF
const uint8_t byte_value = ((const uint8_t*)s_byte)[s_byte_idx++];
if((uint8_t)0xFF == byte_value) {
s_byte_idx++;
}
// Add newly loaded byte to the buffer, updating bit count
r_bit = (r_bit << 8) + byte_value;
r_bit_count += 8;
}
}
/**
* Get bits
*
* @param nbits Number of bits to get
* @param get_bits
* @param get_buff
* @param data
* @param data_size
* @return bits
*/
__device__ inline unsigned int
gpujpeg_huffman_gpu_decoder_get_bits(
const unsigned int nbits, unsigned int & r_bit, unsigned int & r_bit_count,
uint4 * const s_byte, unsigned int & s_byte_idx)
{
// load bits into the register if haven't got enough
gpujpeg_huffman_gpu_decoder_load_bits(nbits, r_bit, r_bit_count, s_byte, s_byte_idx);
// update remaining bit count
r_bit_count -= nbits;
// return bits
return (r_bit >> r_bit_count) & ((1 << nbits) - 1);
}
/**
* Gets bits without removing them from the buffer.
*/
__device__ inline unsigned int
gpujpeg_huffman_gpu_decoder_peek_bits(
const unsigned int nbits, unsigned int & r_bit, unsigned int & r_bit_count,
uint4 * const s_byte, unsigned int & s_byte_idx)
{
// load bits into the register if haven't got enough
gpujpeg_huffman_gpu_decoder_load_bits(nbits, r_bit, r_bit_count, s_byte, s_byte_idx);
// return bits
return (r_bit >> (r_bit_count - nbits)) & ((1 << nbits) - 1);
}
/**
* Removes some bits from the buffer (assumes that they are there)
*/
__device__ inline void
gpujpeg_huffman_gpu_decoder_discard_bits(const unsigned int nb, unsigned int, unsigned int & r_bit_count) {
r_bit_count -= nb;
}
/**
* Special Huffman decode:
* (1) For codes with length > 8
* (2) For codes with length < 8 while data is finished
*
* @param table
* @param min_bits
* @param get_bits
* @param get_buff
* @param data
* @param data_size
* @return int
*/
__device__ inline int
gpujpeg_huffman_gpu_decoder_decode_special_decode(
const struct gpujpeg_table_huffman_decoder* const table, int min_bits, unsigned int & r_bit,
unsigned int & r_bit_count, uint4 * const s_byte, unsigned int & s_byte_idx)
{
// HUFF_DECODE has determined that the code is at least min_bits
// bits long, so fetch that many bits in one swoop.
int code = gpujpeg_huffman_gpu_decoder_get_bits(min_bits, r_bit, r_bit_count, s_byte, s_byte_idx);
// Collect the rest of the Huffman code one bit at a time.
// This is per Figure F.16 in the JPEG spec.
int l = min_bits;
while ( code > table->maxcode[l] ) {
code <<= 1;
code |= gpujpeg_huffman_gpu_decoder_get_bits(1, r_bit, r_bit_count, s_byte, s_byte_idx);
l++;
}
// With garbage input we may reach the sentinel value l = 17.
if ( l > 16 ) {
// Fake a zero as the safest result
return 0;
}
return table->huffval[table->valptr[l] + (int)(code - table->mincode[l])];
}
/**
* To find dc or ac value according to code and its bit length s
*/
__device__ inline int
gpujpeg_huffman_gpu_decoder_value_from_category(int nbits, int code)
{
// TODO: try to replace with __constant__ table lookup
return code < ((1 << nbits) >> 1) ? (code + ((-1) << nbits) + 1) : code;
// // Method 1:
// // On some machines, a shift and add will be faster than a table lookup.
// // #define HUFF_EXTEND(x,s) \
// // ((x)< (1<<((s)-1)) ? (x) + (((-1)<<(s)) + 1) : (x))
//
// // Method 2: Table lookup
// // If (offset < half[category]), then value is below zero
// // Otherwise, value is above zero, and just the offset
// // entry n is 2**(n-1)
// const int half[16] = {
// 0x0000, 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040,
// 0x0080, 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000
// };
//
// //start[i] is the starting value in this category; surely it is below zero
// // entry n is (-1 << n) + 1
// const int start[16] = {
// 0, ((-1)<<1) + 1, ((-1)<<2) + 1, ((-1)<<3) + 1, ((-1)<<4) + 1,
// ((-1)<<5) + 1, ((-1)<<6) + 1, ((-1)<<7) + 1, ((-1)<<8) + 1,
// ((-1)<<9) + 1, ((-1)<<10) + 1, ((-1)<<11) + 1, ((-1)<<12) + 1,
// ((-1)<<13) + 1, ((-1)<<14) + 1, ((-1)<<15) + 1
// };
//
// return (code < half[nbits]) ? (code + start[nbits]) : code;
}
/**
* Decodes next coefficient, updating its output index
*
* @param table
* @param get_bits
* @param get_buff
* @param data
* @param data_size
* @return int
*/
__device__ inline int
gpujpeg_huffman_gpu_decoder_get_coefficient(struct gpujpeg_huffman_gpu_decoder huffman_gpu_decoder,
unsigned int & r_bit, unsigned int & r_bit_count, uint4* const s_byte,
unsigned int & s_byte_idx, const unsigned int table_offset, unsigned int & coefficient_idx)
{
// Peek next 16 bits and use them as an index into decoder table to find all the info.
const unsigned int table_idx = table_offset + gpujpeg_huffman_gpu_decoder_peek_bits(16, r_bit, r_bit_count, s_byte, s_byte_idx);
// Try the quick table first (use the full table only if not succeded with the quick table)
#ifdef HUFFMAN_GPU_CONST_TABLES
unsigned int packed_info = gpujpeg_huffman_gpu_decoder_tables_quick_const[table_idx >> (16 - QUICK_CHECK_BITS)];
#else
unsigned int packed_info = huffman_gpu_decoder.d_tables_quick[table_idx >> (16 - QUICK_CHECK_BITS)];
#endif
if(0 == packed_info) {
packed_info = huffman_gpu_decoder.d_tables_full[table_idx];
}
// remove the right number of bits from the bit buffer
gpujpeg_huffman_gpu_decoder_discard_bits((packed_info >> 4) & 0x1F, r_bit, r_bit_count);
// update coefficient index by skipping run-length encoded zeros
coefficient_idx += packed_info >> 9;
// read coefficient bits and decode the coefficient from them
const unsigned int value_nbits = packed_info & 0xF;
const unsigned int value_code = gpujpeg_huffman_gpu_decoder_get_bits(value_nbits, r_bit, r_bit_count, s_byte, s_byte_idx);
// return deocded coefficient
return gpujpeg_huffman_gpu_decoder_value_from_category(value_nbits, value_code);
}
/**
* Decode one 8x8 block
*
* @return 0 if succeeds, otherwise nonzero
*/
__device__ inline int
gpujpeg_huffman_gpu_decoder_decode_block(
struct gpujpeg_huffman_gpu_decoder huffman_gpu_decoder,
int & dc, int16_t* const data_output, const unsigned int dc_table_offset, const unsigned int ac_table_offset,
unsigned int & r_bit, unsigned int & r_bit_count, uint4* const s_byte,
unsigned int & s_byte_idx, const uint4* & d_byte, unsigned int & d_byte_chunk_count)
{
// TODO: try unified decoding of DC/AC coefficients
// Index of next coefficient to be decoded (in ZIG-ZAG order)
unsigned int coefficient_idx = 0;
// Section F.2.2.1: decode the DC coefficient difference
// Get the coefficient value (using DC coding table)
int dc_coefficient_value = gpujpeg_huffman_gpu_decoder_get_coefficient(huffman_gpu_decoder, r_bit, r_bit_count, s_byte, s_byte_idx, dc_table_offset, coefficient_idx);
// Convert DC difference to actual value, update last_dc_val
dc = dc_coefficient_value += dc;
// Output the DC coefficient (assumes gpujpeg_natural_order[0] = 0)
// TODO: try to skip saving of zero coefficients
data_output[0] = dc_coefficient_value;
// TODO: error check: coefficient_idx must still be 0 in valid codestreams
coefficient_idx = 1;
// Section F.2.2.2: decode the AC coefficients
// Since zeroes are skipped, output area must be cleared beforehand
do {
// Possibly load more bytes into shared buffer from global memory
if(s_byte_idx >= 16) {
// Move remaining bytes to begin and update index of next byte
s_byte[0] = s_byte[1];
s_byte_idx -= 16;
// Load another byte chunk from global memory only if there is one
if(d_byte_chunk_count) {
s_byte[1] = *(d_byte++);
d_byte_chunk_count--;
}
}
// decode next coefficient, updating its destination index
const int coefficient_value = gpujpeg_huffman_gpu_decoder_get_coefficient(huffman_gpu_decoder, r_bit, r_bit_count, s_byte, s_byte_idx, ac_table_offset, coefficient_idx);
// stop with this block if have all coefficients
if(coefficient_idx > 64) {
break;
}
// save the coefficient TODO: try to ommit saving 0 coefficients
#ifdef HUFFMAN_GPU_CONST_TABLES
data_output[gpujpeg_huffman_gpu_decoder_order_natural[coefficient_idx - 1]] = coefficient_value;
#else
data_output[huffman_gpu_decoder.d_order_natural[coefficient_idx - 1]] = coefficient_value;
#endif
} while(coefficient_idx < 64);
return 0;
}
/**
* Huffman decoder kernel
*
* @return void
*/
template <bool SINGLE_COMP, int THREADS_PER_TBLOCK>
__global__ void
#if __CUDA_ARCH__ < 200
__launch_bounds__(THREADS_PER_TBLOCK, 2)
#else
__launch_bounds__(THREADS_PER_TBLOCK, 6)
#endif
gpujpeg_huffman_decoder_decode_kernel(
struct gpujpeg_huffman_gpu_decoder huffman_gpu_decoder,
struct gpujpeg_component* d_component,
struct gpujpeg_segment* d_segment,
int comp_count,
int segment_count,
uint8_t* d_data_compressed,
const uint64_t* d_block_list,
int16_t* d_data_quantized
) {
int segment_index = blockIdx.x * THREADS_PER_TBLOCK + threadIdx.x;
if ( segment_index >= segment_count )
return;
struct gpujpeg_segment* segment = &d_segment[segment_index];
// Byte buffers in shared memory
__shared__ uint4 s_byte_all[2 * THREADS_PER_TBLOCK]; // 32 bytes per thread
uint4 * const s_byte = s_byte_all + 2 * threadIdx.x;
// Last DC coefficient values TODO: try to move into shared memory
int dc[GPUJPEG_MAX_COMPONENT_COUNT];
for ( int comp = 0; comp < GPUJPEG_MAX_COMPONENT_COUNT; comp++ )
dc[comp] = 0;
// Get aligned compressed data chunk pointer and load first 2 chunks of the data
const unsigned int d_byte_begin_idx = segment->data_compressed_index;
const unsigned int d_byte_begin_idx_aligned = d_byte_begin_idx & ~15; // loading 16byte chunks
const uint4* d_byte = (uint4*)(d_data_compressed + d_byte_begin_idx_aligned);
// Get number of remaining global memory byte chunks (not to read bytes out of buffer)
const unsigned int d_byte_end_idx_aligned = (d_byte_begin_idx + segment->data_compressed_size + 15) & ~15;
unsigned int d_byte_chunk_count = (d_byte_end_idx_aligned - d_byte_begin_idx_aligned) / 16;
// Load first 2 chunks of compressed data into the shared memory buffer and remember index of first code byte (skipping bytes read due to alignment)
s_byte[0] = d_byte[0];
s_byte[1] = d_byte[1];
d_byte += 2;
d_byte_chunk_count = max(d_byte_chunk_count, 2) - 2;
unsigned int s_byte_idx = d_byte_begin_idx - d_byte_begin_idx_aligned;
// bits loaded into the register and their count
unsigned int r_bit_count = 0;
unsigned int r_bit = 0; // LSB-aligned
// Non-interleaving mode
if ( SINGLE_COMP ) {
// Get component for current scan
const struct gpujpeg_component* const component = d_component + segment->scan_index;
// Get huffman tables offset
const unsigned int dc_table_offset = component->dc_huff_idx * 0x20000;
const unsigned int ac_table_offset = component->ac_huff_idx * 0x20000 + 0x10000;
// Size of MCUs in this segment's component
const int component_mcu_size = component->mcu_size;
// Pointer to first MCU's output block
int16_t* block = component->d_data_quantized + segment->scan_segment_index * component->segment_mcu_count * component_mcu_size;
// Encode MCUs in segment
for ( int mcu_index = 0; mcu_index < segment->mcu_count; mcu_index++ ) {
// Encode 8x8 block
if ( gpujpeg_huffman_gpu_decoder_decode_block(huffman_gpu_decoder, dc[0], block, dc_table_offset, ac_table_offset, r_bit, r_bit_count, s_byte, s_byte_idx, d_byte, d_byte_chunk_count) != 0 )
break;
// advance to next block
block += component_mcu_size;
}
}
// Interleaving mode
else {
// Pointer to segment's list of 8x8 blocks and their count
const uint64_t* packed_block_info_ptr = d_block_list + segment->block_index_list_begin;
// Encode all blocks
for(int block_count = segment->block_count; block_count--;) {
// Get pointer to next block input data and info about its color type
const uint64_t packed_block_info = *(packed_block_info_ptr++);
// Get coder parameters
const int last_dc_idx = packed_block_info & 0x7f;
// Get offset to right part of huffman table
const unsigned int dc_huffman_table_offset = d_component[last_dc_idx].dc_huff_idx * 0x20000;
const unsigned int ac_huffman_table_offset = d_component[last_dc_idx].ac_huff_idx * 0x20000 + 0x10000;
// Source data pointer
int16_t* block = d_data_quantized + (packed_block_info >> 8);
// Encode 8x8 block
gpujpeg_huffman_gpu_decoder_decode_block(huffman_gpu_decoder, dc[last_dc_idx], block, dc_huffman_table_offset, ac_huffman_table_offset, r_bit, r_bit_count, s_byte, s_byte_idx, d_byte, d_byte_chunk_count);
}
// // Encode MCUs in segment
// for ( int mcu_index = 0; mcu_index < segment->mcu_count; mcu_index++ ) {
//
//
//
//
//
//
//
//
// //assert(segment->scan_index == 0);
// for ( int comp = 0; comp < comp_count; comp++ ) {
// struct gpujpeg_component* component = &d_component[comp];
//
// // Prepare mcu indexes
// int mcu_index_x = (segment_index * component->segment_mcu_count + mcu_index) % component->mcu_count_x;
// int mcu_index_y = (segment_index * component->segment_mcu_count + mcu_index) / component->mcu_count_x;
// // Compute base data index
// int data_index_base = mcu_index_y * (component->mcu_size * component->mcu_count_x) + mcu_index_x * (component->mcu_size_x * GPUJPEG_BLOCK_SIZE);
//
// // For all vertical 8x8 blocks
// for ( int y = 0; y < component->sampling_factor.vertical; y++ ) {
// // Compute base row data index
// int data_index_row = data_index_base + y * (component->mcu_count_x * component->mcu_size_x * GPUJPEG_BLOCK_SIZE);
// // For all horizontal 8x8 blocks
// for ( int x = 0; x < component->sampling_factor.horizontal; x++ ) {
// // Compute 8x8 block data index
// int data_index = data_index_row + x * GPUJPEG_BLOCK_SIZE * GPUJPEG_BLOCK_SIZE;
//
// // Get component data for MCU
// int16_t* block = &component->d_data_quantized[data_index];
//
// // Get coder parameters
// int & component_dc = dc[comp];
//
// // Get huffman tables offset
// const unsigned int table_offset = component->type == GPUJPEG_COMPONENT_LUMINANCE ? 0x00000 : 0x20000;
//
// // Encode 8x8 block
// gpujpeg_huffman_gpu_decoder_decode_block(component_dc, block, table_offset, r_bit, r_bit_count, s_byte, s_byte_idx, d_byte, d_byte_chunk_count);
// }
// }
// }
// }
}
}
/**
* Setup of one Huffman table entry for fast decoding.
* @param bits bits to extract one codeword from (first bit is bit #15, then #14, ... last is #0)
* @param d_table_src source (slow-decoding) table pointer
* @param d_table_dest destination (fast-decoding) table pointer
*/
__device__ void
gpujpeg_huffman_gpu_decoder_table_setup(
struct gpujpeg_huffman_gpu_decoder huffman_gpu_decoder,
const int bits,
const struct gpujpeg_table_huffman_decoder* const d_table_src,
const int table_idx
) {
// Decode one codeword from given bits to get following:
// - minimal number of bits actually needed to decode the codeword (up to 16 bits, 0 for invalid ones)
// - category ID represented by the codeword, consisting from:
// - number of run-length-coded preceding zeros (up to 16, or 63 for both special end-of block symbol or invalid codewords)
// - bit-size of the actual value of coefficient (up to 16, 0 for invalid ones)
int code_nbits = 1, category_id = 0;
// First, decode codeword length (This is per Figure F.16 in the JPEG spec.)
int code_value = bits >> 15; // only single bit initially
while ( code_value > d_table_src->maxcode[code_nbits] ) {
code_value = bits >> (16 - ++code_nbits); // not enough to decide => try more bits
}
// With garbage input we may reach the sentinel value l = 17.
if ( code_nbits > 16 ) {
code_nbits = 0;
// category ID remains 0 for invalid symbols from garbage input
} else {
category_id = d_table_src->huffval[d_table_src->valptr[code_nbits] + code_value - d_table_src->mincode[code_nbits]];
}
// decompose category number into 1 + number of run-length coded zeros and length of the value
// (special category #0 contains all invalid codes and special end-of-block code -- all of those codes
// should terminate block decoding => use 64 run-length zeros and 0 value bits for such symbols)
const int value_nbits = 0xF & category_id;
const int rle_zero_count = category_id ? min(1 + (category_id >> 4), 64) : 64;
// save all the info into the right place in the destination table
const int packed_info = (rle_zero_count << 9) + (code_nbits << 4) + value_nbits;
huffman_gpu_decoder.d_tables_full[(table_idx << 16) + bits] = packed_info;
// some threads also save entries into the quick table
const int dest_idx_quick = bits >> (16 - QUICK_CHECK_BITS);
if(bits == (dest_idx_quick << (16 - QUICK_CHECK_BITS))) {
// save info also into the quick table if number of required bits is less than quick
// check bit count, otherwise put 0 there to indicate that full table lookup consultation is needed
huffman_gpu_decoder.d_tables_quick[(table_idx << QUICK_CHECK_BITS) + dest_idx_quick] = code_nbits <= QUICK_CHECK_BITS ? packed_info : 0;
}
}
/**
* Huffman decoder table setup kernel
* (Based on the original table, this kernel prepares another table, which is more suitable for fast decoding.)
*/
__global__ void
gpujpeg_huffman_decoder_table_kernel(
struct gpujpeg_huffman_gpu_decoder huffman_gpu_decoder,
const struct gpujpeg_table_huffman_decoder* const d_table_y_dc,
const struct gpujpeg_table_huffman_decoder* const d_table_y_ac,
const struct gpujpeg_table_huffman_decoder* const d_table_cbcr_dc,
const struct gpujpeg_table_huffman_decoder* const d_table_cbcr_ac
) {
// Each thread uses all 4 Huffman tables to "decode" one symbol from its unique 16bits.
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
gpujpeg_huffman_gpu_decoder_table_setup(huffman_gpu_decoder, idx, d_table_y_dc, 0);
gpujpeg_huffman_gpu_decoder_table_setup(huffman_gpu_decoder, idx, d_table_y_ac, 1);
gpujpeg_huffman_gpu_decoder_table_setup(huffman_gpu_decoder, idx, d_table_cbcr_dc, 2);
gpujpeg_huffman_gpu_decoder_table_setup(huffman_gpu_decoder, idx, d_table_cbcr_ac, 3);
}
/* Documented at declaration */
struct gpujpeg_huffman_gpu_decoder *
gpujpeg_huffman_gpu_decoder_init()
{
struct gpujpeg_huffman_gpu_decoder *huffman_gpu_decoder = (struct gpujpeg_huffman_gpu_decoder *) calloc(1, sizeof(struct gpujpeg_huffman_gpu_decoder));
#ifdef HUFFMAN_GPU_CONST_TABLES
// Copy natural order to constant device memory
hipMemcpyToSymbol(
gpujpeg_huffman_gpu_decoder_order_natural,
gpujpeg_order_natural,
GPUJPEG_ORDER_NATURAL_SIZE * sizeof(int),
0,
hipMemcpyHostToDevice
);
gpujpeg_cuda_check_error("Huffman decoder init", gpujpeg_huffman_gpu_decoder_destroy(huffman_gpu_decoder); return NULL);
#else
hipMalloc((void**)&huffman_gpu_decoder->d_order_natural, GPUJPEG_ORDER_NATURAL_SIZE * sizeof(int));
gpujpeg_cuda_check_error("Huffman GPU decoder natural order table allocation", gpujpeg_huffman_gpu_decoder_destroy(huffman_gpu_decoder); return NULL);
hipMemcpy(
huffman_gpu_decoder->d_order_natural,
gpujpeg_order_natural,
GPUJPEG_ORDER_NATURAL_SIZE * sizeof(int),
hipMemcpyHostToDevice
);
gpujpeg_cuda_check_error("Huffman GPU decoder natural order table copy", gpujpeg_huffman_gpu_decoder_destroy(huffman_gpu_decoder); return NULL);
#endif
hipMalloc((void**)&huffman_gpu_decoder->d_tables_full, 4 * (1 << 16) * sizeof(uint16_t));
gpujpeg_cuda_check_error("Huffman GPU decoder full table allocation", gpujpeg_huffman_gpu_decoder_destroy(huffman_gpu_decoder); return NULL);
hipMalloc((void**)&huffman_gpu_decoder->d_tables_quick, QUICK_TABLE_ITEMS * sizeof(uint16_t));
gpujpeg_cuda_check_error("Huffman GPU decoder quick table allocation", gpujpeg_huffman_gpu_decoder_destroy(huffman_gpu_decoder); return NULL);
return huffman_gpu_decoder;
}
void
gpujpeg_huffman_gpu_decoder_destroy(struct gpujpeg_huffman_gpu_decoder *huffman_gpu_decoder)
{
if (huffman_gpu_decoder == NULL) {
return;
}
hipFree(huffman_gpu_decoder->d_order_natural);
hipFree(huffman_gpu_decoder->d_tables_full);
hipFree(huffman_gpu_decoder->d_tables_quick);
free(huffman_gpu_decoder);
}
/* Documented at declaration */
int
gpujpeg_huffman_gpu_decoder_decode(struct gpujpeg_decoder* decoder)
{
// Get coder
struct gpujpeg_coder* coder = &decoder->coder;
assert(coder->param.restart_interval > 0);
int comp_count = 1;
if (coder->param.interleaved == 1) {
comp_count = coder->param_image.comp_count;
}
assert(comp_count >= 1 && comp_count <= GPUJPEG_MAX_COMPONENT_COUNT);
// Number of decoder kernel threads per each threadblock
enum { THREADS_PER_TBLOCK = 192 };
// Configure more Shared memory for both kernels
hipFuncSetCacheConfig(gpujpeg_huffman_decoder_table_kernel, hipFuncCachePreferShared);
hipFuncSetCacheConfig(gpujpeg_huffman_decoder_decode_kernel<true, THREADS_PER_TBLOCK>, hipFuncCachePreferShared);
hipFuncSetCacheConfig(gpujpeg_huffman_decoder_decode_kernel<false, THREADS_PER_TBLOCK>, hipFuncCachePreferShared);
// Setup GPU tables (one thread for each of 65536 entries)
hipLaunchKernelGGL(( gpujpeg_huffman_decoder_table_kernel), dim3(256), dim3(256), 0, decoder->stream,
*decoder->huffman_gpu_decoder,
decoder->d_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC],
decoder->d_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_AC],
decoder->d_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_DC],
decoder->d_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_AC]
);
gpujpeg_cuda_check_error("Huffman decoder table setup failed", return -1);
#ifdef HUFFMAN_GPU_CONST_TABLES
// Copy quick decoding table into constant memory
hipMemcpyToSymbolAsync(
gpujpeg_huffman_gpu_decoder_tables_quick_const,
decoder->huffman_gpu_decoder->d_tables_quick,
sizeof(*decoder->huffman_gpu_decoder->d_tables_quick) * QUICK_TABLE_ITEMS,
0,
hipMemcpyDeviceToDevice,
decoder->stream
);
gpujpeg_cuda_check_error("Huffman decoder table copy failed", return -1);
#endif
for (int comp = 0; comp < coder->param_image.comp_count; comp++) {
coder->component[comp].dc_huff_idx = decoder->comp_table_huffman_map[comp][GPUJPEG_HUFFMAN_DC];
coder->component[comp].ac_huff_idx = decoder->comp_table_huffman_map[comp][GPUJPEG_HUFFMAN_AC];
}
// Copy updated components to device memory
hipMemcpyAsync(coder->d_component, coder->component, coder->param_image.comp_count * sizeof(struct gpujpeg_component), hipMemcpyHostToDevice, decoder->stream);
gpujpeg_cuda_check_error("Coder component copy", return 0);
// Run decoding kernel
dim3 thread(THREADS_PER_TBLOCK);
dim3 grid(gpujpeg_div_and_round_up(decoder->segment_count, THREADS_PER_TBLOCK));
if(comp_count == 1) {
hipLaunchKernelGGL(( gpujpeg_huffman_decoder_decode_kernel<true, THREADS_PER_TBLOCK>), dim3(grid), dim3(thread), 0, decoder->stream,
*decoder->huffman_gpu_decoder,
coder->d_component,
coder->d_segment,
comp_count,
decoder->segment_count,
coder->d_data_compressed,
coder->d_block_list,
coder->d_data_quantized
);
} else {
hipLaunchKernelGGL(( gpujpeg_huffman_decoder_decode_kernel<false, THREADS_PER_TBLOCK>), dim3(grid), dim3(thread), 0, decoder->stream,
*decoder->huffman_gpu_decoder,
coder->d_component,
coder->d_segment,
comp_count,
decoder->segment_count,
coder->d_data_compressed,
coder->d_block_list,
coder->d_data_quantized
);
}
gpujpeg_cuda_check_error("Huffman decoding failed", return -1);
return 0;
}
/* vi: set expandtab sw=4 : */
| 5a697253764cb8264c0948dbfc18907db2497647.cu | /**
* @file
* Copyright (c) 2011-2020, CESNET z.s.p.o
* Copyright (c) 2011, Silicon Genome, LLC.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "gpujpeg_huffman_gpu_decoder.h"
#include "gpujpeg_util.h"
/**
* Entry of pre-built Huffman fast-decoding table.
*/
struct gpujpeg_table_huffman_decoder_entry {
int value_nbits;
};
/** Number of code bits to be checked first (with high chance for the code to fit into this number of bits). */
#define QUICK_CHECK_BITS 10
#define QUICK_TABLE_ITEMS (4 * (1 << QUICK_CHECK_BITS))
// TODO: try to tweak QUICK table size and memory space
struct gpujpeg_huffman_gpu_decoder {
/**
* 4 pre-built tables for faster Huffman decoding (codewords up-to 16 bit length):
* - 0x00000 to 0x0ffff: luminance DC table
* - 0x10000 to 0x1ffff: luminance AC table
* - 0x20000 to 0x2ffff: chrominance DC table
* - 0x30000 to 0x3ffff: chrominance AC table
*
* Each entry consists of:
* - Number of bits of code corresponding to this entry (0 - 16, both inclusive) - bits 4 to 8
* - Number of run-length coded zeros before currently decoded coefficient + 1 (1 - 64, both inclusive) - bits 9 to 15
* - Number of bits representing the value of currently decoded coefficient (0 - 15, both inclusive) - bits 0 to 3
* @code
* bit #: 15 9 8 4 3 0
* +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
* value: | RLE zero count | code bit size | value bit size|
* +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
* @endcode
*/
uint16_t *d_tables_full;
/** Table with same format as the full table, except that all-zero-entry means that the full table should be consulted. */
uint16_t *d_tables_quick;
/** Natural order */
int *d_order_natural;
};
#ifdef HUFFMAN_GPU_CONST_TABLES
/** Same table as above, but copied into constant memory */
__constant__ uint16_t gpujpeg_huffman_gpu_decoder_tables_quick_const[QUICK_TABLE_ITEMS];
/** Natural order in constant memory */
__constant__ int gpujpeg_huffman_gpu_decoder_order_natural[GPUJPEG_ORDER_NATURAL_SIZE];
#endif
// /**
// * Fill more bit to current get buffer
// *
// * @param get_bits
// * @param get_buff
// * @param data
// * @param data_size
// * @return void
// */
// __device__ inline void
// gpujpeg_huffman_gpu_decoder_decode_fill_bit_buffer(int & get_bits, int & get_buff, uint8_t* & data, int & data_size)
// {
// while ( get_bits < 25 ) {
// //Are there some data?
// if( data_size > 0 ) {
// // Attempt to read a byte
// //printf("read byte %X 0x%X\n", (int)data, (unsigned char)*data);
// unsigned char uc = *data++;
// data_size--;
//
// // If it's 0xFF, check and discard stuffed zero byte
// if ( uc == 0xFF ) {
// while ( uc == 0xFF ) {
// //printf("read byte %X 0x%X\n", (int)data, (unsigned char)*data);
// uc = *data++;
// data_size--;
// }
//
// if ( uc == 0 ) {
// // Found FF/00, which represents an FF data byte
// uc = 0xFF;
// } else {
// // There should be enough bits still left in the data segment;
// // if so, just break out of the outer while loop.
// //if (m_nGetBits >= nbits)
// if ( get_bits >= 0 )
// break;
// }
// }
//
// get_buff = (get_buff << 8) | ((int) uc);
// get_bits += 8;
// }
// else
// break;
// }
// }
/**
* Loads at least specified number of bits into the register
*/
__device__ inline void
gpujpeg_huffman_gpu_decoder_load_bits(
const unsigned int required_bit_count, unsigned int & r_bit,
unsigned int & r_bit_count, uint4 * const s_byte, unsigned int & s_byte_idx
) {
// Add bytes until have enough
while(r_bit_count < required_bit_count) {
// Load byte value and posibly skip next stuffed byte if loaded byte's value is 0xFF
const uint8_t byte_value = ((const uint8_t*)s_byte)[s_byte_idx++];
if((uint8_t)0xFF == byte_value) {
s_byte_idx++;
}
// Add newly loaded byte to the buffer, updating bit count
r_bit = (r_bit << 8) + byte_value;
r_bit_count += 8;
}
}
/**
* Get bits
*
* @param nbits Number of bits to get
* @param get_bits
* @param get_buff
* @param data
* @param data_size
* @return bits
*/
__device__ inline unsigned int
gpujpeg_huffman_gpu_decoder_get_bits(
const unsigned int nbits, unsigned int & r_bit, unsigned int & r_bit_count,
uint4 * const s_byte, unsigned int & s_byte_idx)
{
// load bits into the register if haven't got enough
gpujpeg_huffman_gpu_decoder_load_bits(nbits, r_bit, r_bit_count, s_byte, s_byte_idx);
// update remaining bit count
r_bit_count -= nbits;
// return bits
return (r_bit >> r_bit_count) & ((1 << nbits) - 1);
}
/**
* Gets bits without removing them from the buffer.
*/
__device__ inline unsigned int
gpujpeg_huffman_gpu_decoder_peek_bits(
const unsigned int nbits, unsigned int & r_bit, unsigned int & r_bit_count,
uint4 * const s_byte, unsigned int & s_byte_idx)
{
// load bits into the register if haven't got enough
gpujpeg_huffman_gpu_decoder_load_bits(nbits, r_bit, r_bit_count, s_byte, s_byte_idx);
// return bits
return (r_bit >> (r_bit_count - nbits)) & ((1 << nbits) - 1);
}
/**
* Removes some bits from the buffer (assumes that they are there)
*/
__device__ inline void
gpujpeg_huffman_gpu_decoder_discard_bits(const unsigned int nb, unsigned int, unsigned int & r_bit_count) {
r_bit_count -= nb;
}
/**
* Special Huffman decode:
* (1) For codes with length > 8
* (2) For codes with length < 8 while data is finished
*
* @param table
* @param min_bits
* @param get_bits
* @param get_buff
* @param data
* @param data_size
* @return int
*/
__device__ inline int
gpujpeg_huffman_gpu_decoder_decode_special_decode(
const struct gpujpeg_table_huffman_decoder* const table, int min_bits, unsigned int & r_bit,
unsigned int & r_bit_count, uint4 * const s_byte, unsigned int & s_byte_idx)
{
// HUFF_DECODE has determined that the code is at least min_bits
// bits long, so fetch that many bits in one swoop.
int code = gpujpeg_huffman_gpu_decoder_get_bits(min_bits, r_bit, r_bit_count, s_byte, s_byte_idx);
// Collect the rest of the Huffman code one bit at a time.
// This is per Figure F.16 in the JPEG spec.
int l = min_bits;
while ( code > table->maxcode[l] ) {
code <<= 1;
code |= gpujpeg_huffman_gpu_decoder_get_bits(1, r_bit, r_bit_count, s_byte, s_byte_idx);
l++;
}
// With garbage input we may reach the sentinel value l = 17.
if ( l > 16 ) {
// Fake a zero as the safest result
return 0;
}
return table->huffval[table->valptr[l] + (int)(code - table->mincode[l])];
}
/**
* To find dc or ac value according to code and its bit length s
*/
__device__ inline int
gpujpeg_huffman_gpu_decoder_value_from_category(int nbits, int code)
{
// TODO: try to replace with __constant__ table lookup
return code < ((1 << nbits) >> 1) ? (code + ((-1) << nbits) + 1) : code;
// // Method 1:
// // On some machines, a shift and add will be faster than a table lookup.
// // #define HUFF_EXTEND(x,s) \
// // ((x)< (1<<((s)-1)) ? (x) + (((-1)<<(s)) + 1) : (x))
//
// // Method 2: Table lookup
// // If (offset < half[category]), then value is below zero
// // Otherwise, value is above zero, and just the offset
// // entry n is 2**(n-1)
// const int half[16] = {
// 0x0000, 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040,
// 0x0080, 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000
// };
//
// //start[i] is the starting value in this category; surely it is below zero
// // entry n is (-1 << n) + 1
// const int start[16] = {
// 0, ((-1)<<1) + 1, ((-1)<<2) + 1, ((-1)<<3) + 1, ((-1)<<4) + 1,
// ((-1)<<5) + 1, ((-1)<<6) + 1, ((-1)<<7) + 1, ((-1)<<8) + 1,
// ((-1)<<9) + 1, ((-1)<<10) + 1, ((-1)<<11) + 1, ((-1)<<12) + 1,
// ((-1)<<13) + 1, ((-1)<<14) + 1, ((-1)<<15) + 1
// };
//
// return (code < half[nbits]) ? (code + start[nbits]) : code;
}
/**
* Decodes next coefficient, updating its output index
*
* @param table
* @param get_bits
* @param get_buff
* @param data
* @param data_size
* @return int
*/
__device__ inline int
gpujpeg_huffman_gpu_decoder_get_coefficient(struct gpujpeg_huffman_gpu_decoder huffman_gpu_decoder,
unsigned int & r_bit, unsigned int & r_bit_count, uint4* const s_byte,
unsigned int & s_byte_idx, const unsigned int table_offset, unsigned int & coefficient_idx)
{
// Peek next 16 bits and use them as an index into decoder table to find all the info.
const unsigned int table_idx = table_offset + gpujpeg_huffman_gpu_decoder_peek_bits(16, r_bit, r_bit_count, s_byte, s_byte_idx);
// Try the quick table first (use the full table only if not succeded with the quick table)
#ifdef HUFFMAN_GPU_CONST_TABLES
unsigned int packed_info = gpujpeg_huffman_gpu_decoder_tables_quick_const[table_idx >> (16 - QUICK_CHECK_BITS)];
#else
unsigned int packed_info = huffman_gpu_decoder.d_tables_quick[table_idx >> (16 - QUICK_CHECK_BITS)];
#endif
if(0 == packed_info) {
packed_info = huffman_gpu_decoder.d_tables_full[table_idx];
}
// remove the right number of bits from the bit buffer
gpujpeg_huffman_gpu_decoder_discard_bits((packed_info >> 4) & 0x1F, r_bit, r_bit_count);
// update coefficient index by skipping run-length encoded zeros
coefficient_idx += packed_info >> 9;
// read coefficient bits and decode the coefficient from them
const unsigned int value_nbits = packed_info & 0xF;
const unsigned int value_code = gpujpeg_huffman_gpu_decoder_get_bits(value_nbits, r_bit, r_bit_count, s_byte, s_byte_idx);
// return deocded coefficient
return gpujpeg_huffman_gpu_decoder_value_from_category(value_nbits, value_code);
}
/**
* Decode one 8x8 block
*
* @return 0 if succeeds, otherwise nonzero
*/
__device__ inline int
gpujpeg_huffman_gpu_decoder_decode_block(
struct gpujpeg_huffman_gpu_decoder huffman_gpu_decoder,
int & dc, int16_t* const data_output, const unsigned int dc_table_offset, const unsigned int ac_table_offset,
unsigned int & r_bit, unsigned int & r_bit_count, uint4* const s_byte,
unsigned int & s_byte_idx, const uint4* & d_byte, unsigned int & d_byte_chunk_count)
{
// TODO: try unified decoding of DC/AC coefficients
// Index of next coefficient to be decoded (in ZIG-ZAG order)
unsigned int coefficient_idx = 0;
// Section F.2.2.1: decode the DC coefficient difference
// Get the coefficient value (using DC coding table)
int dc_coefficient_value = gpujpeg_huffman_gpu_decoder_get_coefficient(huffman_gpu_decoder, r_bit, r_bit_count, s_byte, s_byte_idx, dc_table_offset, coefficient_idx);
// Convert DC difference to actual value, update last_dc_val
dc = dc_coefficient_value += dc;
// Output the DC coefficient (assumes gpujpeg_natural_order[0] = 0)
// TODO: try to skip saving of zero coefficients
data_output[0] = dc_coefficient_value;
// TODO: error check: coefficient_idx must still be 0 in valid codestreams
coefficient_idx = 1;
// Section F.2.2.2: decode the AC coefficients
// Since zeroes are skipped, output area must be cleared beforehand
do {
// Possibly load more bytes into shared buffer from global memory
if(s_byte_idx >= 16) {
// Move remaining bytes to begin and update index of next byte
s_byte[0] = s_byte[1];
s_byte_idx -= 16;
// Load another byte chunk from global memory only if there is one
if(d_byte_chunk_count) {
s_byte[1] = *(d_byte++);
d_byte_chunk_count--;
}
}
// decode next coefficient, updating its destination index
const int coefficient_value = gpujpeg_huffman_gpu_decoder_get_coefficient(huffman_gpu_decoder, r_bit, r_bit_count, s_byte, s_byte_idx, ac_table_offset, coefficient_idx);
// stop with this block if have all coefficients
if(coefficient_idx > 64) {
break;
}
// save the coefficient TODO: try to ommit saving 0 coefficients
#ifdef HUFFMAN_GPU_CONST_TABLES
data_output[gpujpeg_huffman_gpu_decoder_order_natural[coefficient_idx - 1]] = coefficient_value;
#else
data_output[huffman_gpu_decoder.d_order_natural[coefficient_idx - 1]] = coefficient_value;
#endif
} while(coefficient_idx < 64);
return 0;
}
/**
* Huffman decoder kernel
*
* @return void
*/
template <bool SINGLE_COMP, int THREADS_PER_TBLOCK>
__global__ void
#if __CUDA_ARCH__ < 200
__launch_bounds__(THREADS_PER_TBLOCK, 2)
#else
__launch_bounds__(THREADS_PER_TBLOCK, 6)
#endif
gpujpeg_huffman_decoder_decode_kernel(
struct gpujpeg_huffman_gpu_decoder huffman_gpu_decoder,
struct gpujpeg_component* d_component,
struct gpujpeg_segment* d_segment,
int comp_count,
int segment_count,
uint8_t* d_data_compressed,
const uint64_t* d_block_list,
int16_t* d_data_quantized
) {
int segment_index = blockIdx.x * THREADS_PER_TBLOCK + threadIdx.x;
if ( segment_index >= segment_count )
return;
struct gpujpeg_segment* segment = &d_segment[segment_index];
// Byte buffers in shared memory
__shared__ uint4 s_byte_all[2 * THREADS_PER_TBLOCK]; // 32 bytes per thread
uint4 * const s_byte = s_byte_all + 2 * threadIdx.x;
// Last DC coefficient values TODO: try to move into shared memory
int dc[GPUJPEG_MAX_COMPONENT_COUNT];
for ( int comp = 0; comp < GPUJPEG_MAX_COMPONENT_COUNT; comp++ )
dc[comp] = 0;
// Get aligned compressed data chunk pointer and load first 2 chunks of the data
const unsigned int d_byte_begin_idx = segment->data_compressed_index;
const unsigned int d_byte_begin_idx_aligned = d_byte_begin_idx & ~15; // loading 16byte chunks
const uint4* d_byte = (uint4*)(d_data_compressed + d_byte_begin_idx_aligned);
// Get number of remaining global memory byte chunks (not to read bytes out of buffer)
const unsigned int d_byte_end_idx_aligned = (d_byte_begin_idx + segment->data_compressed_size + 15) & ~15;
unsigned int d_byte_chunk_count = (d_byte_end_idx_aligned - d_byte_begin_idx_aligned) / 16;
// Load first 2 chunks of compressed data into the shared memory buffer and remember index of first code byte (skipping bytes read due to alignment)
s_byte[0] = d_byte[0];
s_byte[1] = d_byte[1];
d_byte += 2;
d_byte_chunk_count = max(d_byte_chunk_count, 2) - 2;
unsigned int s_byte_idx = d_byte_begin_idx - d_byte_begin_idx_aligned;
// bits loaded into the register and their count
unsigned int r_bit_count = 0;
unsigned int r_bit = 0; // LSB-aligned
// Non-interleaving mode
if ( SINGLE_COMP ) {
// Get component for current scan
const struct gpujpeg_component* const component = d_component + segment->scan_index;
// Get huffman tables offset
const unsigned int dc_table_offset = component->dc_huff_idx * 0x20000;
const unsigned int ac_table_offset = component->ac_huff_idx * 0x20000 + 0x10000;
// Size of MCUs in this segment's component
const int component_mcu_size = component->mcu_size;
// Pointer to first MCU's output block
int16_t* block = component->d_data_quantized + segment->scan_segment_index * component->segment_mcu_count * component_mcu_size;
// Encode MCUs in segment
for ( int mcu_index = 0; mcu_index < segment->mcu_count; mcu_index++ ) {
// Encode 8x8 block
if ( gpujpeg_huffman_gpu_decoder_decode_block(huffman_gpu_decoder, dc[0], block, dc_table_offset, ac_table_offset, r_bit, r_bit_count, s_byte, s_byte_idx, d_byte, d_byte_chunk_count) != 0 )
break;
// advance to next block
block += component_mcu_size;
}
}
// Interleaving mode
else {
// Pointer to segment's list of 8x8 blocks and their count
const uint64_t* packed_block_info_ptr = d_block_list + segment->block_index_list_begin;
// Encode all blocks
for(int block_count = segment->block_count; block_count--;) {
// Get pointer to next block input data and info about its color type
const uint64_t packed_block_info = *(packed_block_info_ptr++);
// Get coder parameters
const int last_dc_idx = packed_block_info & 0x7f;
// Get offset to right part of huffman table
const unsigned int dc_huffman_table_offset = d_component[last_dc_idx].dc_huff_idx * 0x20000;
const unsigned int ac_huffman_table_offset = d_component[last_dc_idx].ac_huff_idx * 0x20000 + 0x10000;
// Source data pointer
int16_t* block = d_data_quantized + (packed_block_info >> 8);
// Encode 8x8 block
gpujpeg_huffman_gpu_decoder_decode_block(huffman_gpu_decoder, dc[last_dc_idx], block, dc_huffman_table_offset, ac_huffman_table_offset, r_bit, r_bit_count, s_byte, s_byte_idx, d_byte, d_byte_chunk_count);
}
// // Encode MCUs in segment
// for ( int mcu_index = 0; mcu_index < segment->mcu_count; mcu_index++ ) {
//
//
//
//
//
//
//
//
// //assert(segment->scan_index == 0);
// for ( int comp = 0; comp < comp_count; comp++ ) {
// struct gpujpeg_component* component = &d_component[comp];
//
// // Prepare mcu indexes
// int mcu_index_x = (segment_index * component->segment_mcu_count + mcu_index) % component->mcu_count_x;
// int mcu_index_y = (segment_index * component->segment_mcu_count + mcu_index) / component->mcu_count_x;
// // Compute base data index
// int data_index_base = mcu_index_y * (component->mcu_size * component->mcu_count_x) + mcu_index_x * (component->mcu_size_x * GPUJPEG_BLOCK_SIZE);
//
// // For all vertical 8x8 blocks
// for ( int y = 0; y < component->sampling_factor.vertical; y++ ) {
// // Compute base row data index
// int data_index_row = data_index_base + y * (component->mcu_count_x * component->mcu_size_x * GPUJPEG_BLOCK_SIZE);
// // For all horizontal 8x8 blocks
// for ( int x = 0; x < component->sampling_factor.horizontal; x++ ) {
// // Compute 8x8 block data index
// int data_index = data_index_row + x * GPUJPEG_BLOCK_SIZE * GPUJPEG_BLOCK_SIZE;
//
// // Get component data for MCU
// int16_t* block = &component->d_data_quantized[data_index];
//
// // Get coder parameters
// int & component_dc = dc[comp];
//
// // Get huffman tables offset
// const unsigned int table_offset = component->type == GPUJPEG_COMPONENT_LUMINANCE ? 0x00000 : 0x20000;
//
// // Encode 8x8 block
// gpujpeg_huffman_gpu_decoder_decode_block(component_dc, block, table_offset, r_bit, r_bit_count, s_byte, s_byte_idx, d_byte, d_byte_chunk_count);
// }
// }
// }
// }
}
}
/**
* Setup of one Huffman table entry for fast decoding.
* @param bits bits to extract one codeword from (first bit is bit #15, then #14, ... last is #0)
* @param d_table_src source (slow-decoding) table pointer
* @param d_table_dest destination (fast-decoding) table pointer
*/
__device__ void
gpujpeg_huffman_gpu_decoder_table_setup(
struct gpujpeg_huffman_gpu_decoder huffman_gpu_decoder,
const int bits,
const struct gpujpeg_table_huffman_decoder* const d_table_src,
const int table_idx
) {
// Decode one codeword from given bits to get following:
// - minimal number of bits actually needed to decode the codeword (up to 16 bits, 0 for invalid ones)
// - category ID represented by the codeword, consisting from:
// - number of run-length-coded preceding zeros (up to 16, or 63 for both special end-of block symbol or invalid codewords)
// - bit-size of the actual value of coefficient (up to 16, 0 for invalid ones)
int code_nbits = 1, category_id = 0;
// First, decode codeword length (This is per Figure F.16 in the JPEG spec.)
int code_value = bits >> 15; // only single bit initially
while ( code_value > d_table_src->maxcode[code_nbits] ) {
code_value = bits >> (16 - ++code_nbits); // not enough to decide => try more bits
}
// With garbage input we may reach the sentinel value l = 17.
if ( code_nbits > 16 ) {
code_nbits = 0;
// category ID remains 0 for invalid symbols from garbage input
} else {
category_id = d_table_src->huffval[d_table_src->valptr[code_nbits] + code_value - d_table_src->mincode[code_nbits]];
}
// decompose category number into 1 + number of run-length coded zeros and length of the value
// (special category #0 contains all invalid codes and special end-of-block code -- all of those codes
// should terminate block decoding => use 64 run-length zeros and 0 value bits for such symbols)
const int value_nbits = 0xF & category_id;
const int rle_zero_count = category_id ? min(1 + (category_id >> 4), 64) : 64;
// save all the info into the right place in the destination table
const int packed_info = (rle_zero_count << 9) + (code_nbits << 4) + value_nbits;
huffman_gpu_decoder.d_tables_full[(table_idx << 16) + bits] = packed_info;
// some threads also save entries into the quick table
const int dest_idx_quick = bits >> (16 - QUICK_CHECK_BITS);
if(bits == (dest_idx_quick << (16 - QUICK_CHECK_BITS))) {
// save info also into the quick table if number of required bits is less than quick
// check bit count, otherwise put 0 there to indicate that full table lookup consultation is needed
huffman_gpu_decoder.d_tables_quick[(table_idx << QUICK_CHECK_BITS) + dest_idx_quick] = code_nbits <= QUICK_CHECK_BITS ? packed_info : 0;
}
}
/**
* Huffman decoder table setup kernel
* (Based on the original table, this kernel prepares another table, which is more suitable for fast decoding.)
*/
__global__ void
gpujpeg_huffman_decoder_table_kernel(
struct gpujpeg_huffman_gpu_decoder huffman_gpu_decoder,
const struct gpujpeg_table_huffman_decoder* const d_table_y_dc,
const struct gpujpeg_table_huffman_decoder* const d_table_y_ac,
const struct gpujpeg_table_huffman_decoder* const d_table_cbcr_dc,
const struct gpujpeg_table_huffman_decoder* const d_table_cbcr_ac
) {
// Each thread uses all 4 Huffman tables to "decode" one symbol from its unique 16bits.
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
gpujpeg_huffman_gpu_decoder_table_setup(huffman_gpu_decoder, idx, d_table_y_dc, 0);
gpujpeg_huffman_gpu_decoder_table_setup(huffman_gpu_decoder, idx, d_table_y_ac, 1);
gpujpeg_huffman_gpu_decoder_table_setup(huffman_gpu_decoder, idx, d_table_cbcr_dc, 2);
gpujpeg_huffman_gpu_decoder_table_setup(huffman_gpu_decoder, idx, d_table_cbcr_ac, 3);
}
/* Documented at declaration */
struct gpujpeg_huffman_gpu_decoder *
gpujpeg_huffman_gpu_decoder_init()
{
struct gpujpeg_huffman_gpu_decoder *huffman_gpu_decoder = (struct gpujpeg_huffman_gpu_decoder *) calloc(1, sizeof(struct gpujpeg_huffman_gpu_decoder));
#ifdef HUFFMAN_GPU_CONST_TABLES
// Copy natural order to constant device memory
cudaMemcpyToSymbol(
gpujpeg_huffman_gpu_decoder_order_natural,
gpujpeg_order_natural,
GPUJPEG_ORDER_NATURAL_SIZE * sizeof(int),
0,
cudaMemcpyHostToDevice
);
gpujpeg_cuda_check_error("Huffman decoder init", gpujpeg_huffman_gpu_decoder_destroy(huffman_gpu_decoder); return NULL);
#else
cudaMalloc((void**)&huffman_gpu_decoder->d_order_natural, GPUJPEG_ORDER_NATURAL_SIZE * sizeof(int));
gpujpeg_cuda_check_error("Huffman GPU decoder natural order table allocation", gpujpeg_huffman_gpu_decoder_destroy(huffman_gpu_decoder); return NULL);
cudaMemcpy(
huffman_gpu_decoder->d_order_natural,
gpujpeg_order_natural,
GPUJPEG_ORDER_NATURAL_SIZE * sizeof(int),
cudaMemcpyHostToDevice
);
gpujpeg_cuda_check_error("Huffman GPU decoder natural order table copy", gpujpeg_huffman_gpu_decoder_destroy(huffman_gpu_decoder); return NULL);
#endif
cudaMalloc((void**)&huffman_gpu_decoder->d_tables_full, 4 * (1 << 16) * sizeof(uint16_t));
gpujpeg_cuda_check_error("Huffman GPU decoder full table allocation", gpujpeg_huffman_gpu_decoder_destroy(huffman_gpu_decoder); return NULL);
cudaMalloc((void**)&huffman_gpu_decoder->d_tables_quick, QUICK_TABLE_ITEMS * sizeof(uint16_t));
gpujpeg_cuda_check_error("Huffman GPU decoder quick table allocation", gpujpeg_huffman_gpu_decoder_destroy(huffman_gpu_decoder); return NULL);
return huffman_gpu_decoder;
}
void
gpujpeg_huffman_gpu_decoder_destroy(struct gpujpeg_huffman_gpu_decoder *huffman_gpu_decoder)
{
if (huffman_gpu_decoder == NULL) {
return;
}
cudaFree(huffman_gpu_decoder->d_order_natural);
cudaFree(huffman_gpu_decoder->d_tables_full);
cudaFree(huffman_gpu_decoder->d_tables_quick);
free(huffman_gpu_decoder);
}
/* Documented at declaration */
int
gpujpeg_huffman_gpu_decoder_decode(struct gpujpeg_decoder* decoder)
{
// Get coder
struct gpujpeg_coder* coder = &decoder->coder;
assert(coder->param.restart_interval > 0);
int comp_count = 1;
if (coder->param.interleaved == 1) {
comp_count = coder->param_image.comp_count;
}
assert(comp_count >= 1 && comp_count <= GPUJPEG_MAX_COMPONENT_COUNT);
// Number of decoder kernel threads per each threadblock
enum { THREADS_PER_TBLOCK = 192 };
// Configure more Shared memory for both kernels
cudaFuncSetCacheConfig(gpujpeg_huffman_decoder_table_kernel, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(gpujpeg_huffman_decoder_decode_kernel<true, THREADS_PER_TBLOCK>, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(gpujpeg_huffman_decoder_decode_kernel<false, THREADS_PER_TBLOCK>, cudaFuncCachePreferShared);
// Setup GPU tables (one thread for each of 65536 entries)
gpujpeg_huffman_decoder_table_kernel<<<256, 256, 0, decoder->stream>>>(
*decoder->huffman_gpu_decoder,
decoder->d_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC],
decoder->d_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_AC],
decoder->d_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_DC],
decoder->d_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_AC]
);
gpujpeg_cuda_check_error("Huffman decoder table setup failed", return -1);
#ifdef HUFFMAN_GPU_CONST_TABLES
// Copy quick decoding table into constant memory
cudaMemcpyToSymbolAsync(
gpujpeg_huffman_gpu_decoder_tables_quick_const,
decoder->huffman_gpu_decoder->d_tables_quick,
sizeof(*decoder->huffman_gpu_decoder->d_tables_quick) * QUICK_TABLE_ITEMS,
0,
cudaMemcpyDeviceToDevice,
decoder->stream
);
gpujpeg_cuda_check_error("Huffman decoder table copy failed", return -1);
#endif
for (int comp = 0; comp < coder->param_image.comp_count; comp++) {
coder->component[comp].dc_huff_idx = decoder->comp_table_huffman_map[comp][GPUJPEG_HUFFMAN_DC];
coder->component[comp].ac_huff_idx = decoder->comp_table_huffman_map[comp][GPUJPEG_HUFFMAN_AC];
}
// Copy updated components to device memory
cudaMemcpyAsync(coder->d_component, coder->component, coder->param_image.comp_count * sizeof(struct gpujpeg_component), cudaMemcpyHostToDevice, decoder->stream);
gpujpeg_cuda_check_error("Coder component copy", return 0);
// Run decoding kernel
dim3 thread(THREADS_PER_TBLOCK);
dim3 grid(gpujpeg_div_and_round_up(decoder->segment_count, THREADS_PER_TBLOCK));
if(comp_count == 1) {
gpujpeg_huffman_decoder_decode_kernel<true, THREADS_PER_TBLOCK><<<grid, thread, 0, decoder->stream>>>(
*decoder->huffman_gpu_decoder,
coder->d_component,
coder->d_segment,
comp_count,
decoder->segment_count,
coder->d_data_compressed,
coder->d_block_list,
coder->d_data_quantized
);
} else {
gpujpeg_huffman_decoder_decode_kernel<false, THREADS_PER_TBLOCK><<<grid, thread, 0, decoder->stream>>>(
*decoder->huffman_gpu_decoder,
coder->d_component,
coder->d_segment,
comp_count,
decoder->segment_count,
coder->d_data_compressed,
coder->d_block_list,
coder->d_data_quantized
);
}
gpujpeg_cuda_check_error("Huffman decoding failed", return -1);
return 0;
}
/* vi: set expandtab sw=4 : */
|
636d5701891090a8a1909273aaeec80fed7b0ce0.hip | // !!! This is a file automatically generated by hipify!!!
#include "./common/book.h"
#define SIZE (64*1024*1024)
float cuda_malloc_test(int size, bool up) {
hipEvent_t start, stop;
int *a, *dev_a;
float elapsedTime;
HANDLE_ERROR( hipEventCreate( &start ) );
HANDLE_ERROR( hipEventCreate( &stop ) );
a = (int*)malloc( size * sizeof( *a ) );
HANDLE_NULL( a );
HANDLE_ERROR( hipMalloc( (void**)&dev_a,
size * sizeof( *dev_a ) ) );
HANDLE_ERROR( hipEventRecord( start, 0 ) );
for (int i=0; i<100; i++) {
if (up)
HANDLE_ERROR( hipMemcpy( dev_a, a,
size * sizeof( *dev_a ),
hipMemcpyHostToDevice ) );
else
HANDLE_ERROR( hipMemcpy( a, dev_a,
size * sizeof( *dev_a ),
hipMemcpyDeviceToHost ) );
}
HANDLE_ERROR( hipEventRecord( stop, 0 ) );
HANDLE_ERROR( hipEventSynchronize( stop ) );
HANDLE_ERROR( hipEventElapsedTime( &elapsedTime,
start, stop ) );
free( a );
HANDLE_ERROR( hipFree( dev_a ) );
HANDLE_ERROR( hipEventDestroy( start ) );
HANDLE_ERROR( hipEventDestroy( stop ) );
return elapsedTime;
}
float cuda_host_alloc_test(int size, bool up) {
hipEvent_t start, stop;
int *a, *dev_a;
float elapsedTime;
HANDLE_ERROR( hipEventCreate( &start ) );
HANDLE_ERROR( hipEventCreate( &stop ) );
HANDLE_ERROR(hipHostMalloc((void**)&a,size * sizeof(*a), hipHostMallocDefault));
HANDLE_ERROR( hipMalloc( (void**)&dev_a,
size * sizeof( *dev_a ) ) );
HANDLE_ERROR( hipEventRecord( start, 0 ) );
for (int i=0; i<100; i++) {
if (up)
HANDLE_ERROR( hipMemcpy( dev_a, a,
size * sizeof( *a ),
hipMemcpyHostToDevice ) );
else
HANDLE_ERROR( hipMemcpy( a, dev_a,
size * sizeof( *a ),
hipMemcpyDeviceToHost ) );
}
HANDLE_ERROR( hipEventRecord( stop, 0 ) );
HANDLE_ERROR( hipEventSynchronize( stop ) );
HANDLE_ERROR( hipEventElapsedTime( &elapsedTime,
start, stop ) );
HANDLE_ERROR( hipHostFree( a ) );
HANDLE_ERROR( hipFree( dev_a ) );
HANDLE_ERROR( hipEventDestroy( start ) );
HANDLE_ERROR( hipEventDestroy( stop ) );
return elapsedTime;
}
int main(void) {
float elapsedTime;
float MB = (float)100*SIZE*sizeof(int)/1024/1024;
// try it with hipMalloc
elapsedTime = cuda_malloc_test( SIZE, true );
printf( "Time using hipMalloc: %3.1f ms\n",
elapsedTime );
printf( "\tMB/s during copy up: %3.1f\n",
MB/(elapsedTime/1000) );
elapsedTime = cuda_malloc_test( SIZE, false );
printf( "Time using hipMalloc: %3.1f ms\n",
elapsedTime );
printf( "\tMB/s during copy down: %3.1f\n",
MB/(elapsedTime/1000) );
// now try it with hipHostMalloc
elapsedTime = cuda_host_alloc_test( SIZE, true );
printf( "Time using hipHostMalloc: %3.1f ms\n",
elapsedTime );
printf( "\tMB/s during copy up: %3.1f\n",
MB/(elapsedTime/1000) );
elapsedTime = cuda_host_alloc_test( SIZE, false );
printf( "Time using hipHostMalloc: %3.1f ms\n",
elapsedTime );
printf( "\tMB/s during copy down: %3.1f\n",
MB/(elapsedTime/1000) );
}
| 636d5701891090a8a1909273aaeec80fed7b0ce0.cu |
#include "./common/book.h"
#define SIZE (64*1024*1024)
float cuda_malloc_test(int size, bool up) {
cudaEvent_t start, stop;
int *a, *dev_a;
float elapsedTime;
HANDLE_ERROR( cudaEventCreate( &start ) );
HANDLE_ERROR( cudaEventCreate( &stop ) );
a = (int*)malloc( size * sizeof( *a ) );
HANDLE_NULL( a );
HANDLE_ERROR( cudaMalloc( (void**)&dev_a,
size * sizeof( *dev_a ) ) );
HANDLE_ERROR( cudaEventRecord( start, 0 ) );
for (int i=0; i<100; i++) {
if (up)
HANDLE_ERROR( cudaMemcpy( dev_a, a,
size * sizeof( *dev_a ),
cudaMemcpyHostToDevice ) );
else
HANDLE_ERROR( cudaMemcpy( a, dev_a,
size * sizeof( *dev_a ),
cudaMemcpyDeviceToHost ) );
}
HANDLE_ERROR( cudaEventRecord( stop, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stop ) );
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime,
start, stop ) );
free( a );
HANDLE_ERROR( cudaFree( dev_a ) );
HANDLE_ERROR( cudaEventDestroy( start ) );
HANDLE_ERROR( cudaEventDestroy( stop ) );
return elapsedTime;
}
float cuda_host_alloc_test(int size, bool up) {
cudaEvent_t start, stop;
int *a, *dev_a;
float elapsedTime;
HANDLE_ERROR( cudaEventCreate( &start ) );
HANDLE_ERROR( cudaEventCreate( &stop ) );
HANDLE_ERROR(cudaHostAlloc((void**)&a,size * sizeof(*a), cudaHostAllocDefault));
HANDLE_ERROR( cudaMalloc( (void**)&dev_a,
size * sizeof( *dev_a ) ) );
HANDLE_ERROR( cudaEventRecord( start, 0 ) );
for (int i=0; i<100; i++) {
if (up)
HANDLE_ERROR( cudaMemcpy( dev_a, a,
size * sizeof( *a ),
cudaMemcpyHostToDevice ) );
else
HANDLE_ERROR( cudaMemcpy( a, dev_a,
size * sizeof( *a ),
cudaMemcpyDeviceToHost ) );
}
HANDLE_ERROR( cudaEventRecord( stop, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stop ) );
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime,
start, stop ) );
HANDLE_ERROR( cudaFreeHost( a ) );
HANDLE_ERROR( cudaFree( dev_a ) );
HANDLE_ERROR( cudaEventDestroy( start ) );
HANDLE_ERROR( cudaEventDestroy( stop ) );
return elapsedTime;
}
int main(void) {
float elapsedTime;
float MB = (float)100*SIZE*sizeof(int)/1024/1024;
// try it with cudaMalloc
elapsedTime = cuda_malloc_test( SIZE, true );
printf( "Time using cudaMalloc: %3.1f ms\n",
elapsedTime );
printf( "\tMB/s during copy up: %3.1f\n",
MB/(elapsedTime/1000) );
elapsedTime = cuda_malloc_test( SIZE, false );
printf( "Time using cudaMalloc: %3.1f ms\n",
elapsedTime );
printf( "\tMB/s during copy down: %3.1f\n",
MB/(elapsedTime/1000) );
// now try it with cudaHostAlloc
elapsedTime = cuda_host_alloc_test( SIZE, true );
printf( "Time using cudaHostAlloc: %3.1f ms\n",
elapsedTime );
printf( "\tMB/s during copy up: %3.1f\n",
MB/(elapsedTime/1000) );
elapsedTime = cuda_host_alloc_test( SIZE, false );
printf( "Time using cudaHostAlloc: %3.1f ms\n",
elapsedTime );
printf( "\tMB/s during copy down: %3.1f\n",
MB/(elapsedTime/1000) );
}
|
3bde408f86150e8f2acfb3534d6053d0f6899c63.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/hip/HIPContext.h>
#include <THH/THHAtomics.cuh>
#include "util.cuh"
#include "operator.cuh"
#include "spmm.h"
// Memory & time efficient implementation of generalized spmm
// Much of the code is inspired by GE-SpMM
// https://github.com/hgyhungry/ge-spmm
namespace at {
namespace {
const int kCoarseningFactor = 2;
const int kThreadPerBlock = 256;
} // namespace anonymous
template <class scalar_t, class NaryOp, class BinaryOp>
__global__
void spmm_forward_out_cuda(const int64_t *row_ptr, const int64_t *col_ind, const scalar_t *value,
const scalar_t *input, scalar_t *output,
int64_t num_row, int64_t nnz, int64_t dim) {
// for best optimization, the following code is compiled with constant warpSize
assert(blockDim.x == warpSize);
extern __shared__ int64_t buffer[];
int64_t *col_ind_buf = buffer;
scalar_t *value_buf = reinterpret_cast<scalar_t *>(col_ind_buf + blockDim.y * warpSize);
col_ind_buf += threadIdx.y * warpSize;
value_buf += threadIdx.y * warpSize;
int64_t row = blockIdx.x * blockDim.y + threadIdx.y;
if (row >= num_row)
return;
int64_t d_start = blockIdx.y * warpSize * kCoarseningFactor + threadIdx.x;
int64_t ptr_start = row_ptr[row];
int64_t ptr_end = row + 1 < num_row ? row_ptr[row + 1] : nnz;
scalar_t out[kCoarseningFactor];
#pragma unroll
for (int64_t i = 0; i < kCoarseningFactor; i++)
out[i] = NaryOp::zero;
for (int64_t block_ptr = ptr_start; block_ptr < ptr_end; block_ptr += warpSize) {
int64_t ptr = block_ptr + threadIdx.x;
if (ptr < ptr_end) {
col_ind_buf[threadIdx.x] = col_ind[ptr];
value_buf[threadIdx.x] = value[ptr];
}
__syncwarp();
int64_t max_offset = warpSize < ptr_end - block_ptr ? warpSize : ptr_end - block_ptr;
for (int64_t offset_ptr = 0; offset_ptr < max_offset; offset_ptr++) {
int64_t col = col_ind_buf[offset_ptr];
scalar_t val = value_buf[offset_ptr];
#pragma unroll
for (int64_t i = 0; i < kCoarseningFactor; i++) {
int64_t d = d_start + i * warpSize;
if (d >= dim)
break;
scalar_t x = BinaryOp::forward(val, input[col * dim + d]);
out[i] = NaryOp::forward(out[i], x);
}
}
__syncwarp();
}
#pragma unroll
for (int64_t i = 0; i < kCoarseningFactor; i++) {
int64_t d = d_start + i * warpSize;
if (d >= dim)
break;
output[row * dim + d] = out[i];
}
}
// both sparse and input require gradients
template <class scalar_t, class NaryOp, class BinaryOp>
__global__
void spmm_backward_out_cuda(const int64_t *row_ptr, const int64_t *col_ind, const scalar_t *value,
const scalar_t *input, const scalar_t *output, const scalar_t *output_grad,
scalar_t *value_grad, scalar_t *input_grad,
int64_t num_row, int64_t nnz, int64_t dim) {
// for best optimization, the following code is compiled with constant warpSize
assert(blockDim.x == warpSize);
extern __shared__ int64_t buffer[];
int64_t *col_ind_buf = buffer;
scalar_t *value_buf = reinterpret_cast<scalar_t *>(col_ind_buf + blockDim.y * warpSize);
col_ind_buf += threadIdx.y * warpSize;
value_buf += threadIdx.y * warpSize;
int64_t row = blockIdx.x * blockDim.y + threadIdx.y;
if (row >= num_row)
return;
int64_t d_start = blockIdx.y * warpSize * kCoarseningFactor + threadIdx.x;
int64_t ptr_start = row_ptr[row];
int64_t ptr_end = row + 1 < num_row ? row_ptr[row + 1] : nnz;
for (int64_t block_ptr = ptr_start; block_ptr < ptr_end; block_ptr += warpSize) {
int64_t ptr = block_ptr + threadIdx.x;
if (ptr < ptr_end) {
col_ind_buf[threadIdx.x] = col_ind[ptr];
value_buf[threadIdx.x] = value[ptr];
}
__syncwarp();
int64_t max_offset = warpSize < ptr_end - block_ptr ? warpSize : ptr_end - block_ptr;
for (int64_t offset_ptr = 0; offset_ptr < max_offset; offset_ptr++) {
int64_t col = col_ind_buf[offset_ptr];
scalar_t val = value_buf[offset_ptr];
scalar_t val_grad = 0;
#pragma unroll
for (int64_t i = 0; i < kCoarseningFactor; i++) {
int64_t d = d_start + i * warpSize;
if (d >= dim)
break;
scalar_t in = input[col * dim + d];
scalar_t out = output[row * dim + d];
scalar_t out_grad = output_grad[row * dim + d];
scalar_t x = BinaryOp::forward(val, in);
scalar_t dx_dval = BinaryOp::backward_lhs(val, in);
scalar_t dx_din = BinaryOp::backward_rhs(val, in);
scalar_t dout_dx = NaryOp::backward(out, x);
val_grad += out_grad * dout_dx * dx_dval;
atomicAdd(&input_grad[col * dim + d], out_grad * dout_dx * dx_din);
}
val_grad = warp_reduce(val_grad);
if (threadIdx.x == 0)
atomicAdd(&value_grad[block_ptr + offset_ptr], val_grad);
}
__syncwarp();
}
}
// only input requires gradients
template <class scalar_t, class NaryOp, class BinaryOp>
__global__
void spmm_backward_out_cuda(const int64_t *row_ptr, const int64_t *col_ind, const scalar_t *value,
const scalar_t *input, const scalar_t *output, const scalar_t *output_grad,
scalar_t *input_grad,
int64_t num_row, int64_t nnz, int64_t dim) {
// for best optimization, the following code is compiled with constant warpSize
assert(blockDim.x == warpSize);
extern __shared__ int64_t buffer[];
int64_t *col_ind_buf = buffer;
scalar_t *value_buf = reinterpret_cast<scalar_t *>(col_ind_buf + blockDim.y * warpSize);
col_ind_buf += threadIdx.y * warpSize;
value_buf += threadIdx.y * warpSize;
int64_t row = blockIdx.x * blockDim.y + threadIdx.y;
if (row >= num_row)
return;
int64_t d_start = blockIdx.y * warpSize * kCoarseningFactor + threadIdx.x;
int64_t ptr_start = row_ptr[row];
int64_t ptr_end = row + 1 < num_row ? row_ptr[row + 1] : nnz;
for (int64_t block_ptr = ptr_start; block_ptr < ptr_end; block_ptr += warpSize) {
int64_t ptr = block_ptr + threadIdx.x;
if (ptr < ptr_end) {
col_ind_buf[threadIdx.x] = col_ind[ptr];
value_buf[threadIdx.x] = value[ptr];
}
__syncwarp();
int64_t max_offset = warpSize < ptr_end - block_ptr ? warpSize : ptr_end - block_ptr;
for (int64_t offset_ptr = 0; offset_ptr < max_offset; offset_ptr++) {
int64_t col = col_ind_buf[offset_ptr];
scalar_t val = value_buf[offset_ptr];
#pragma unroll
for (int64_t i = 0; i < kCoarseningFactor; i++) {
int64_t d = d_start + i * warpSize;
if (d >= dim)
break;
scalar_t in = input[col * dim + d];
scalar_t out = output[row * dim + d];
scalar_t out_grad = output_grad[row * dim + d];
scalar_t x = BinaryOp::forward(val, in);
scalar_t dx_din = BinaryOp::backward_rhs(val, in);
scalar_t dout_dx = NaryOp::backward(out, x);
atomicAdd(&input_grad[col * dim + d], out_grad * dout_dx * dx_din);
}
}
__syncwarp();
}
}
template <template<class> class NaryOp, template<class> class BinaryOp>
Tensor spmm_forward_cuda(const SparseTensor &sparse, const Tensor &input_) {
constexpr const char *fn_name = "spmm_forward_cuda";
TensorArg sparse_arg(sparse, "sparse", 1), input_arg(input_, "input", 2);
spmm_forward_check(fn_name, sparse_arg, input_arg);
checkAllSameGPU(fn_name, {sparse_arg, input_arg});
const Tensor input = input_.contiguous();
int64_t nnz = sparse._nnz();
int64_t dim = input.size(1);
int64_t num_row = sparse.size(0);
Tensor output = at::empty({num_row, dim}, input.options());
auto csr = coo2csr(sparse);
Tensor row_ptr = std::get<0>(csr);
Tensor col_ind = std::get<1>(csr);
Tensor value = std::get<2>(csr);
hipSetDevice(input.get_device());
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int dim_per_block = 32; // warpSize
const int num_dim_block = (dim + dim_per_block * kCoarseningFactor - 1) / (dim_per_block * kCoarseningFactor);
const int row_per_block = kThreadPerBlock / dim_per_block;
const int num_row_block = (num_row + row_per_block - 1) / row_per_block;
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), fn_name, [&] {
const int memory_size = kThreadPerBlock * (sizeof(int64_t) + sizeof(scalar_t));
hipLaunchKernelGGL(( spmm_forward_out_cuda<scalar_t, NaryOp<scalar_t>, BinaryOp<scalar_t>>)
, dim3(dim3(num_row_block, num_dim_block)), dim3(dim3(dim_per_block, row_per_block)), memory_size, stream,
row_ptr.data_ptr<int64_t>(),
col_ind.data_ptr<int64_t>(),
value.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
num_row, nnz, dim
);
});
return output;
}
template <template<class> class NaryOp, template<class> class BinaryOp>
std::tuple<SparseTensor, Tensor> spmm_backward_cuda(
const SparseTensor &sparse, const Tensor &input_, const Tensor &output_, const Tensor &output_grad_) {
constexpr const char *fn_name = "spmm_backward_cuda";
TensorArg sparse_arg(sparse, "sparse", 1), input_arg(input_, "input", 2), output_arg(output_, "output", 3),
output_grad_arg(output_grad_, "output_grad", 4);
spmm_backward_check(fn_name, sparse_arg, input_arg, output_arg, output_grad_arg);
checkAllSameGPU(fn_name, {sparse_arg, input_arg, output_arg, output_grad_arg});
const Tensor input = input_.contiguous();
const Tensor output = output_.contiguous();
const Tensor output_grad = output_grad_.contiguous();
int64_t nnz = sparse._nnz();
int64_t dim = input.size(1);
int64_t num_row = sparse.size(0);
Tensor value_grad = at::zeros_like(sparse.values());
Tensor input_grad = at::zeros_like(input);
SparseTensor sparse_grad = at::_sparse_coo_tensor_unsafe(sparse.indices(), value_grad, sparse.sizes());
auto csr = coo2csr(sparse);
Tensor row_ptr = std::get<0>(csr).contiguous();
Tensor col_ind = std::get<1>(csr).contiguous();
Tensor value = std::get<2>(csr).contiguous();
hipSetDevice(input.get_device());
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int dim_per_block = 32; // warpSize
const int num_dim_block = (dim + dim_per_block * kCoarseningFactor - 1) / (dim_per_block * kCoarseningFactor);
const int row_per_block = kThreadPerBlock / dim_per_block;
const int num_row_block = (num_row + row_per_block - 1) / row_per_block;
if (sparse.requires_grad())
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), fn_name, [&] {
const int memory_size = kThreadPerBlock * (sizeof(int64_t) + sizeof(scalar_t));
hipLaunchKernelGGL(( spmm_backward_out_cuda<scalar_t, NaryOp<scalar_t>, BinaryOp<scalar_t>>)
, dim3(dim3(num_row_block, num_dim_block)), dim3(dim3(dim_per_block, row_per_block)), memory_size, stream,
row_ptr.data_ptr<int64_t>(),
col_ind.data_ptr<int64_t>(),
value.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
output_grad.data_ptr<scalar_t>(),
value_grad.data_ptr<scalar_t>(),
input_grad.data_ptr<scalar_t>(),
num_row, nnz, dim
);
});
else
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), fn_name, [&] {
const int memory_size = kThreadPerBlock * (sizeof(int64_t) + sizeof(scalar_t));
hipLaunchKernelGGL(( spmm_backward_out_cuda<scalar_t, NaryOp<scalar_t>, BinaryOp<scalar_t>>)
, dim3(dim3(num_row_block, num_dim_block)), dim3(dim3(dim_per_block, row_per_block)), memory_size, stream,
row_ptr.data_ptr<int64_t>(),
col_ind.data_ptr<int64_t>(),
value.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
output_grad.data_ptr<scalar_t>(),
input_grad.data_ptr<scalar_t>(),
num_row, nnz, dim
);
});
return std::make_tuple(sparse_grad, input_grad);
}
#define DECLARE_FORWARD_IMPL(ADD, MUL, NARYOP, BINARYOP) \
Tensor spmm_##ADD##_##MUL##_forward_cuda(const SparseTensor &sparse, const Tensor &input) { \
return spmm_forward_cuda<NARYOP, BINARYOP>(sparse, input); \
}
#define DECLARE_BACKWARD_IMPL(ADD, MUL, NARYOP, BINARYOP) \
std::tuple<SparseTensor, Tensor> spmm_##ADD##_##MUL##_backward_cuda( \
const SparseTensor &sparse, const Tensor &input, const Tensor &output, const Tensor &output_grad) { \
return spmm_backward_cuda<NARYOP, BINARYOP>(sparse, input, output, output_grad); \
}
DECLARE_FORWARD_IMPL(add, mul, NaryAdd, BinaryMul)
DECLARE_BACKWARD_IMPL(add, mul, NaryAdd, BinaryMul)
DECLARE_FORWARD_IMPL(min, mul, NaryMin, BinaryMul)
DECLARE_BACKWARD_IMPL(min, mul, NaryMin, BinaryMul)
DECLARE_FORWARD_IMPL(max, mul, NaryMax, BinaryMul)
DECLARE_BACKWARD_IMPL(max, mul, NaryMax, BinaryMul)
DECLARE_FORWARD_IMPL(min, add, NaryMin, BinaryAdd)
DECLARE_BACKWARD_IMPL(min, add, NaryMin, BinaryAdd)
DECLARE_FORWARD_IMPL(max, add, NaryMax, BinaryAdd)
DECLARE_BACKWARD_IMPL(max, add, NaryMax, BinaryAdd)
} // namespace at | 3bde408f86150e8f2acfb3534d6053d0f6899c63.cu | #include <ATen/cuda/CUDAContext.h>
#include <THC/THCAtomics.cuh>
#include "util.cuh"
#include "operator.cuh"
#include "spmm.h"
// Memory & time efficient implementation of generalized spmm
// Much of the code is inspired by GE-SpMM
// https://github.com/hgyhungry/ge-spmm
namespace at {
namespace {
const int kCoarseningFactor = 2;
const int kThreadPerBlock = 256;
} // namespace anonymous
template <class scalar_t, class NaryOp, class BinaryOp>
__global__
void spmm_forward_out_cuda(const int64_t *row_ptr, const int64_t *col_ind, const scalar_t *value,
const scalar_t *input, scalar_t *output,
int64_t num_row, int64_t nnz, int64_t dim) {
// for best optimization, the following code is compiled with constant warpSize
assert(blockDim.x == warpSize);
extern __shared__ int64_t buffer[];
int64_t *col_ind_buf = buffer;
scalar_t *value_buf = reinterpret_cast<scalar_t *>(col_ind_buf + blockDim.y * warpSize);
col_ind_buf += threadIdx.y * warpSize;
value_buf += threadIdx.y * warpSize;
int64_t row = blockIdx.x * blockDim.y + threadIdx.y;
if (row >= num_row)
return;
int64_t d_start = blockIdx.y * warpSize * kCoarseningFactor + threadIdx.x;
int64_t ptr_start = row_ptr[row];
int64_t ptr_end = row + 1 < num_row ? row_ptr[row + 1] : nnz;
scalar_t out[kCoarseningFactor];
#pragma unroll
for (int64_t i = 0; i < kCoarseningFactor; i++)
out[i] = NaryOp::zero;
for (int64_t block_ptr = ptr_start; block_ptr < ptr_end; block_ptr += warpSize) {
int64_t ptr = block_ptr + threadIdx.x;
if (ptr < ptr_end) {
col_ind_buf[threadIdx.x] = col_ind[ptr];
value_buf[threadIdx.x] = value[ptr];
}
__syncwarp();
int64_t max_offset = warpSize < ptr_end - block_ptr ? warpSize : ptr_end - block_ptr;
for (int64_t offset_ptr = 0; offset_ptr < max_offset; offset_ptr++) {
int64_t col = col_ind_buf[offset_ptr];
scalar_t val = value_buf[offset_ptr];
#pragma unroll
for (int64_t i = 0; i < kCoarseningFactor; i++) {
int64_t d = d_start + i * warpSize;
if (d >= dim)
break;
scalar_t x = BinaryOp::forward(val, input[col * dim + d]);
out[i] = NaryOp::forward(out[i], x);
}
}
__syncwarp();
}
#pragma unroll
for (int64_t i = 0; i < kCoarseningFactor; i++) {
int64_t d = d_start + i * warpSize;
if (d >= dim)
break;
output[row * dim + d] = out[i];
}
}
// both sparse and input require gradients
template <class scalar_t, class NaryOp, class BinaryOp>
__global__
void spmm_backward_out_cuda(const int64_t *row_ptr, const int64_t *col_ind, const scalar_t *value,
const scalar_t *input, const scalar_t *output, const scalar_t *output_grad,
scalar_t *value_grad, scalar_t *input_grad,
int64_t num_row, int64_t nnz, int64_t dim) {
// for best optimization, the following code is compiled with constant warpSize
assert(blockDim.x == warpSize);
extern __shared__ int64_t buffer[];
int64_t *col_ind_buf = buffer;
scalar_t *value_buf = reinterpret_cast<scalar_t *>(col_ind_buf + blockDim.y * warpSize);
col_ind_buf += threadIdx.y * warpSize;
value_buf += threadIdx.y * warpSize;
int64_t row = blockIdx.x * blockDim.y + threadIdx.y;
if (row >= num_row)
return;
int64_t d_start = blockIdx.y * warpSize * kCoarseningFactor + threadIdx.x;
int64_t ptr_start = row_ptr[row];
int64_t ptr_end = row + 1 < num_row ? row_ptr[row + 1] : nnz;
for (int64_t block_ptr = ptr_start; block_ptr < ptr_end; block_ptr += warpSize) {
int64_t ptr = block_ptr + threadIdx.x;
if (ptr < ptr_end) {
col_ind_buf[threadIdx.x] = col_ind[ptr];
value_buf[threadIdx.x] = value[ptr];
}
__syncwarp();
int64_t max_offset = warpSize < ptr_end - block_ptr ? warpSize : ptr_end - block_ptr;
for (int64_t offset_ptr = 0; offset_ptr < max_offset; offset_ptr++) {
int64_t col = col_ind_buf[offset_ptr];
scalar_t val = value_buf[offset_ptr];
scalar_t val_grad = 0;
#pragma unroll
for (int64_t i = 0; i < kCoarseningFactor; i++) {
int64_t d = d_start + i * warpSize;
if (d >= dim)
break;
scalar_t in = input[col * dim + d];
scalar_t out = output[row * dim + d];
scalar_t out_grad = output_grad[row * dim + d];
scalar_t x = BinaryOp::forward(val, in);
scalar_t dx_dval = BinaryOp::backward_lhs(val, in);
scalar_t dx_din = BinaryOp::backward_rhs(val, in);
scalar_t dout_dx = NaryOp::backward(out, x);
val_grad += out_grad * dout_dx * dx_dval;
atomicAdd(&input_grad[col * dim + d], out_grad * dout_dx * dx_din);
}
val_grad = warp_reduce(val_grad);
if (threadIdx.x == 0)
atomicAdd(&value_grad[block_ptr + offset_ptr], val_grad);
}
__syncwarp();
}
}
// only input requires gradients
template <class scalar_t, class NaryOp, class BinaryOp>
__global__
void spmm_backward_out_cuda(const int64_t *row_ptr, const int64_t *col_ind, const scalar_t *value,
const scalar_t *input, const scalar_t *output, const scalar_t *output_grad,
scalar_t *input_grad,
int64_t num_row, int64_t nnz, int64_t dim) {
// for best optimization, the following code is compiled with constant warpSize
assert(blockDim.x == warpSize);
extern __shared__ int64_t buffer[];
int64_t *col_ind_buf = buffer;
scalar_t *value_buf = reinterpret_cast<scalar_t *>(col_ind_buf + blockDim.y * warpSize);
col_ind_buf += threadIdx.y * warpSize;
value_buf += threadIdx.y * warpSize;
int64_t row = blockIdx.x * blockDim.y + threadIdx.y;
if (row >= num_row)
return;
int64_t d_start = blockIdx.y * warpSize * kCoarseningFactor + threadIdx.x;
int64_t ptr_start = row_ptr[row];
int64_t ptr_end = row + 1 < num_row ? row_ptr[row + 1] : nnz;
for (int64_t block_ptr = ptr_start; block_ptr < ptr_end; block_ptr += warpSize) {
int64_t ptr = block_ptr + threadIdx.x;
if (ptr < ptr_end) {
col_ind_buf[threadIdx.x] = col_ind[ptr];
value_buf[threadIdx.x] = value[ptr];
}
__syncwarp();
int64_t max_offset = warpSize < ptr_end - block_ptr ? warpSize : ptr_end - block_ptr;
for (int64_t offset_ptr = 0; offset_ptr < max_offset; offset_ptr++) {
int64_t col = col_ind_buf[offset_ptr];
scalar_t val = value_buf[offset_ptr];
#pragma unroll
for (int64_t i = 0; i < kCoarseningFactor; i++) {
int64_t d = d_start + i * warpSize;
if (d >= dim)
break;
scalar_t in = input[col * dim + d];
scalar_t out = output[row * dim + d];
scalar_t out_grad = output_grad[row * dim + d];
scalar_t x = BinaryOp::forward(val, in);
scalar_t dx_din = BinaryOp::backward_rhs(val, in);
scalar_t dout_dx = NaryOp::backward(out, x);
atomicAdd(&input_grad[col * dim + d], out_grad * dout_dx * dx_din);
}
}
__syncwarp();
}
}
template <template<class> class NaryOp, template<class> class BinaryOp>
Tensor spmm_forward_cuda(const SparseTensor &sparse, const Tensor &input_) {
constexpr const char *fn_name = "spmm_forward_cuda";
TensorArg sparse_arg(sparse, "sparse", 1), input_arg(input_, "input", 2);
spmm_forward_check(fn_name, sparse_arg, input_arg);
checkAllSameGPU(fn_name, {sparse_arg, input_arg});
const Tensor input = input_.contiguous();
int64_t nnz = sparse._nnz();
int64_t dim = input.size(1);
int64_t num_row = sparse.size(0);
Tensor output = at::empty({num_row, dim}, input.options());
auto csr = coo2csr(sparse);
Tensor row_ptr = std::get<0>(csr);
Tensor col_ind = std::get<1>(csr);
Tensor value = std::get<2>(csr);
cudaSetDevice(input.get_device());
auto stream = at::cuda::getCurrentCUDAStream();
const int dim_per_block = 32; // warpSize
const int num_dim_block = (dim + dim_per_block * kCoarseningFactor - 1) / (dim_per_block * kCoarseningFactor);
const int row_per_block = kThreadPerBlock / dim_per_block;
const int num_row_block = (num_row + row_per_block - 1) / row_per_block;
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), fn_name, [&] {
const int memory_size = kThreadPerBlock * (sizeof(int64_t) + sizeof(scalar_t));
spmm_forward_out_cuda<scalar_t, NaryOp<scalar_t>, BinaryOp<scalar_t>>
<<<dim3(num_row_block, num_dim_block), dim3(dim_per_block, row_per_block), memory_size, stream>>>(
row_ptr.data_ptr<int64_t>(),
col_ind.data_ptr<int64_t>(),
value.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
num_row, nnz, dim
);
});
return output;
}
template <template<class> class NaryOp, template<class> class BinaryOp>
std::tuple<SparseTensor, Tensor> spmm_backward_cuda(
const SparseTensor &sparse, const Tensor &input_, const Tensor &output_, const Tensor &output_grad_) {
constexpr const char *fn_name = "spmm_backward_cuda";
TensorArg sparse_arg(sparse, "sparse", 1), input_arg(input_, "input", 2), output_arg(output_, "output", 3),
output_grad_arg(output_grad_, "output_grad", 4);
spmm_backward_check(fn_name, sparse_arg, input_arg, output_arg, output_grad_arg);
checkAllSameGPU(fn_name, {sparse_arg, input_arg, output_arg, output_grad_arg});
const Tensor input = input_.contiguous();
const Tensor output = output_.contiguous();
const Tensor output_grad = output_grad_.contiguous();
int64_t nnz = sparse._nnz();
int64_t dim = input.size(1);
int64_t num_row = sparse.size(0);
Tensor value_grad = at::zeros_like(sparse.values());
Tensor input_grad = at::zeros_like(input);
SparseTensor sparse_grad = at::_sparse_coo_tensor_unsafe(sparse.indices(), value_grad, sparse.sizes());
auto csr = coo2csr(sparse);
Tensor row_ptr = std::get<0>(csr).contiguous();
Tensor col_ind = std::get<1>(csr).contiguous();
Tensor value = std::get<2>(csr).contiguous();
cudaSetDevice(input.get_device());
auto stream = at::cuda::getCurrentCUDAStream();
const int dim_per_block = 32; // warpSize
const int num_dim_block = (dim + dim_per_block * kCoarseningFactor - 1) / (dim_per_block * kCoarseningFactor);
const int row_per_block = kThreadPerBlock / dim_per_block;
const int num_row_block = (num_row + row_per_block - 1) / row_per_block;
if (sparse.requires_grad())
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), fn_name, [&] {
const int memory_size = kThreadPerBlock * (sizeof(int64_t) + sizeof(scalar_t));
spmm_backward_out_cuda<scalar_t, NaryOp<scalar_t>, BinaryOp<scalar_t>>
<<<dim3(num_row_block, num_dim_block), dim3(dim_per_block, row_per_block), memory_size, stream>>>(
row_ptr.data_ptr<int64_t>(),
col_ind.data_ptr<int64_t>(),
value.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
output_grad.data_ptr<scalar_t>(),
value_grad.data_ptr<scalar_t>(),
input_grad.data_ptr<scalar_t>(),
num_row, nnz, dim
);
});
else
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), fn_name, [&] {
const int memory_size = kThreadPerBlock * (sizeof(int64_t) + sizeof(scalar_t));
spmm_backward_out_cuda<scalar_t, NaryOp<scalar_t>, BinaryOp<scalar_t>>
<<<dim3(num_row_block, num_dim_block), dim3(dim_per_block, row_per_block), memory_size, stream>>>(
row_ptr.data_ptr<int64_t>(),
col_ind.data_ptr<int64_t>(),
value.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
output_grad.data_ptr<scalar_t>(),
input_grad.data_ptr<scalar_t>(),
num_row, nnz, dim
);
});
return std::make_tuple(sparse_grad, input_grad);
}
#define DECLARE_FORWARD_IMPL(ADD, MUL, NARYOP, BINARYOP) \
Tensor spmm_##ADD##_##MUL##_forward_cuda(const SparseTensor &sparse, const Tensor &input) { \
return spmm_forward_cuda<NARYOP, BINARYOP>(sparse, input); \
}
#define DECLARE_BACKWARD_IMPL(ADD, MUL, NARYOP, BINARYOP) \
std::tuple<SparseTensor, Tensor> spmm_##ADD##_##MUL##_backward_cuda( \
const SparseTensor &sparse, const Tensor &input, const Tensor &output, const Tensor &output_grad) { \
return spmm_backward_cuda<NARYOP, BINARYOP>(sparse, input, output, output_grad); \
}
DECLARE_FORWARD_IMPL(add, mul, NaryAdd, BinaryMul)
DECLARE_BACKWARD_IMPL(add, mul, NaryAdd, BinaryMul)
DECLARE_FORWARD_IMPL(min, mul, NaryMin, BinaryMul)
DECLARE_BACKWARD_IMPL(min, mul, NaryMin, BinaryMul)
DECLARE_FORWARD_IMPL(max, mul, NaryMax, BinaryMul)
DECLARE_BACKWARD_IMPL(max, mul, NaryMax, BinaryMul)
DECLARE_FORWARD_IMPL(min, add, NaryMin, BinaryAdd)
DECLARE_BACKWARD_IMPL(min, add, NaryMin, BinaryAdd)
DECLARE_FORWARD_IMPL(max, add, NaryMax, BinaryAdd)
DECLARE_BACKWARD_IMPL(max, add, NaryMax, BinaryAdd)
} // namespace at |
ad30bd97514963ac86fb8ddd8ea4ad1077342851.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef __NVCC__
#include "hipcub/hipcub.hpp"
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include "paddle/fluid/operators/amp/fp16_type_traits.h"
#include "paddle/fluid/operators/math/cross_entropy.h"
#include "paddle/fluid/operators/softmax_with_cross_entropy_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_device_function.h"
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
#include "paddle/fluid/platform/for_range.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/gpudnn/softmax_gpudnn.h"
namespace paddle {
namespace operators {
#define ALIGN_BYTES 16
using ScopedTensorDescriptor = platform::ScopedTensorDescriptor;
using DataLayout = platform::DataLayout;
using Tensor = framework::Tensor;
namespace kps = phi::kps;
// Wrapper of log function. Use log(float32) for float16
template <typename T>
static __device__ __forceinline__ T Log(T x) {
using AccT = typename details::MPTypeTrait<T>::Type;
AccT logx = ::log(static_cast<AccT>(x));
return math::TolerableValue<T>()(static_cast<T>(logx));
}
// Wrapper of exp function. Use exp(float32) for float16
template <typename T>
static __device__ __forceinline__ T Exp(T x) {
using AccT = typename details::MPTypeTrait<T>::Type;
AccT expx = ::exp(static_cast<AccT>(x));
return math::TolerableValue<T>()(static_cast<T>(expx));
}
template <typename Tx, typename Ty = Tx>
struct ExpAddFunctor {
HOSTDEVICE inline ExpAddFunctor(Tx max) : max(max) {}
HOSTDEVICE inline Ty operator()(const Tx& sum, const Tx& x) const {
return static_cast<Ty>(sum + ::exp(x - max));
}
private:
Tx max;
};
// log2(value)
static inline int Log2Ceil(int value) {
int log2_value = 0;
while ((1 << log2_value) < value) ++log2_value;
return log2_value;
}
enum class SoftmaxMode { kSoftmax, kLogSoftmax, kCrossEntropy };
/*
Hard label cross entropy.
*/
template <typename T, typename LabelT, bool IgnoreIndex>
__global__ void CrossEntropyHardLabel(T* loss, const T* softmax,
const LabelT* labels, const int n,
const int dim, const int d,
const int ignore_idx) {
int64_t ids = blockIdx.x * blockDim.x + threadIdx.x;
int64_t idx_n = ids / d;
int64_t idx_d = ids % d;
// thread ids compute loss[ids] using softmax[idx]
if (ids < n * d) {
auto lbl = static_cast<int64_t>(labels[ids]);
if (lbl < 0) { // label is negative
loss[ids] = static_cast<T>(0.0);
} else { // label is positive of zero
int64_t idx = idx_n * dim * d + lbl * d + idx_d;
if (IgnoreIndex == true) {
// IgnoreIndex is true
if (lbl == ignore_idx) {
loss[ids] = static_cast<T>(0.0);
} else {
loss[ids] = -Log(softmax[idx]);
}
} else {
// IgnoreIndex is false
loss[ids] = -Log(softmax[idx]);
}
}
}
}
/*
Hard label cross entropy with exp.
Input: log softmax
Output: loss and exp(input)
*/
template <typename T, typename LabelT, bool IgnoreIndex>
__global__ void CrossEntropyExpHardLabel(T* loss, T* softmax,
const LabelT* labels, const int n,
const int dim, const int d,
const int ignore_idx) {
int64_t idx = blockIdx.x * blockDim.x + threadIdx.x;
int64_t idx_n = idx / (d * dim);
int64_t idx_dim = (idx / d) % dim;
int64_t idx_d = idx % d;
int64_t ids = idx_n * d + idx_d;
if (idx < n * dim * d) {
auto lbl = static_cast<int64_t>(labels[ids]);
if (IgnoreIndex == true) {
// IgnoreIndex is true
if (idx_dim == lbl) {
if (lbl == ignore_idx) {
loss[ids] = static_cast<T>(0.0);
} else {
loss[ids] = -softmax[idx];
}
}
} else {
// IgnoreIndex is false
if (lbl >= 0 && lbl < dim) {
if (lbl == idx_dim) {
loss[ids] = -softmax[idx];
}
} else {
loss[ids] = static_cast<T>(0.0);
}
}
softmax[idx] = Exp(softmax[idx]);
}
}
/*
Core function of softmax with cross entropy forward
- softmax, SoftmaxMode=kSoftmax
- log softmax, SoftmaxMode=kLogSoftmax
- softmax with cross entropy hard label, SoftmaxMode=kCrossEntropy
The computation includes
- Compute max value: maxvalue_{i} = max_j src_{i,j}
- Compute sum of exp: s_{i} = sum_{j}{e^{src_{i,j} - maxvalue_{i}}}
- Compute: softmax_{i,j} = e^{src_{i,j} - maxvalue_{i}} / s_{i}
- Compute: logsoftmax_{i,j} = src_{i,j} - maxvalue_{i} - log(s_{i})
- Compute: loss_{i} = -logsoftmax[i,label[i]] (Hard label)
This computation results from following formula:
softmax_{i,j} = e^{src_{i,j}} / sum_{j}{e^{src_{i,j}}}
= e^{src_{i,j} - maxvalue_{i}}
/ sum_{j}{e^{src_{i,j} - maxvalue_{i}}}
= e^{src_{i,j} - maxvalue_{i}} / s_{i}
logsoftmax_{i,j} = log(softmax_{i,j})
= src_{i,j} - maxvalue_{i} - log(s_{i})
One warp (32 threads) is used to compute 1 or 2 batch (kBatchSize).
For reduction max (sum), firstly compute max (sum) to one warp, then use
shuffle api to compute max (sum) in one warp.
*/
template <typename T, typename LabelT, typename VecT, typename AccT,
int Log2Elements, SoftmaxMode mode, bool IgnoreIndex>
__global__ void WarpSoftmaxForward(T* loss, T* softmax, const T* src,
const LabelT* label, const int batch_size,
const int stride, const int element_count,
const int ignore_index) {
constexpr int kDimCeil = 1 << Log2Elements;
constexpr int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32;
constexpr int kVSize = sizeof(VecT) / sizeof(T);
constexpr int kIterations = kDimCeil / kWarpSize;
constexpr int kIterationsV =
(kIterations >= kVSize) ? (kIterations / kVSize) : 1;
constexpr int kBatchSize = (kDimCeil <= 128) ? 2 : 1;
int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * kBatchSize;
// max index to read
int idx_max_v[kBatchSize];
#pragma unroll
for (int i = 0; i < kBatchSize; i++) {
int idx_max = ((i + first_batch) < batch_size) ? element_count : 0;
idx_max_v[i] = idx_max / kVSize;
}
// read data from global memory
AccT srcdata[kBatchSize][kIterationsV][kVSize];
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
// read data to srcdata: - KVSize==1, - KVSize>1
#pragma unroll
for (int it = 0; it < kIterationsV; ++it) {
int src_idx = threadIdx.x + it * kWarpSize;
if (kVSize == 1) {
if (src_idx < idx_max_v[i]) {
srcdata[i][it][0] =
static_cast<AccT>(src[(first_batch + i) * stride + src_idx]);
} else {
srcdata[i][it][0] = -std::numeric_limits<AccT>::infinity();
}
} else {
const VecT* src_v =
reinterpret_cast<const VecT*>(&src[(first_batch + i) * stride]);
if (src_idx < idx_max_v[i]) {
VecT srctmp = src_v[src_idx];
const T* srcinptr = reinterpret_cast<const T*>(&srctmp);
#pragma unroll
for (int s = 0; s < kVSize; s++) {
srcdata[i][it][s] = static_cast<AccT>(srcinptr[s]);
}
} else {
#pragma unroll
for (int s = 0; s < kVSize; s++) {
srcdata[i][it][s] = -std::numeric_limits<AccT>::infinity();
}
}
}
}
}
// compute max value: maxvalue_{i} = max_j src_{i,j}
AccT max_value[kBatchSize];
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
// it = 0
AccT valmax = srcdata[i][0][0];
#pragma unroll
for (int s = 1; s < kVSize; ++s) {
valmax = (valmax > srcdata[i][0][s]) ? valmax : srcdata[i][0][s];
}
max_value[i] = valmax;
// it = 1, 2, ...
#pragma unroll
for (int it = 1; it < kIterationsV; ++it) {
AccT valmax = srcdata[i][it][0];
#pragma unroll
for (int s = 1; s < kVSize; ++s) {
valmax = (valmax > srcdata[i][it][s]) ? valmax : srcdata[i][it][s];
}
max_value[i] = (max_value[i] > valmax) ? max_value[i] : valmax;
}
}
phi::WarpReduceMax<AccT, kBatchSize, kWarpSize>(max_value);
// compute sum: s_{i} = sum_{j}{ exp(src_{i,j} - maxvalue_{i} }
AccT sum[kBatchSize];
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
// it = 0
if (mode == SoftmaxMode::kLogSoftmax ||
mode == SoftmaxMode::kCrossEntropy) {
sum[i] = ::exp(srcdata[i][0][0] - max_value[i]);
} else {
srcdata[i][0][0] = ::exp(srcdata[i][0][0] - max_value[i]);
sum[i] = srcdata[i][0][0];
}
#pragma unroll
for (int s = 1; s < kVSize; ++s) {
if (mode == SoftmaxMode::kLogSoftmax ||
mode == SoftmaxMode::kCrossEntropy) {
sum[i] += ::exp(srcdata[i][0][s] - max_value[i]);
} else {
srcdata[i][0][s] = ::exp(srcdata[i][0][s] - max_value[i]);
sum[i] += srcdata[i][0][s];
}
}
// it = 1, 2, ...
#pragma unroll
for (int it = 1; it < kIterationsV; ++it) {
#pragma unroll
for (int s = 0; s < kVSize; ++s) {
if (mode == SoftmaxMode::kLogSoftmax ||
mode == SoftmaxMode::kCrossEntropy) {
sum[i] += ::exp(srcdata[i][it][s] - max_value[i]);
} else {
srcdata[i][it][s] = ::exp(srcdata[i][it][s] - max_value[i]);
sum[i] += srcdata[i][it][s];
}
}
}
}
phi::WarpReduceSum<AccT, kBatchSize, kWarpSize>(sum);
// write data
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
if (mode == SoftmaxMode::kLogSoftmax ||
mode == SoftmaxMode::kCrossEntropy) {
sum[i] = ::log(sum[i]);
}
#pragma unroll
for (int it = 0; it < kIterationsV; ++it) {
int idx = threadIdx.x + it * kWarpSize;
if (kVSize == 1) { // kVSize==1
if (idx < idx_max_v[i]) {
if (mode == SoftmaxMode::kLogSoftmax) { // log softmax
softmax[(first_batch + i) * stride + idx] =
srcdata[i][it][0] - max_value[i] - sum[i];
// softmax with cross entropy hard label
} else if (mode == SoftmaxMode::kCrossEntropy) {
AccT logsoftmax = srcdata[i][it][0] - max_value[i] - sum[i];
// softmax
softmax[(first_batch + i) * stride + idx] = ::exp(logsoftmax);
// label
int loss_idx = (threadIdx.x + it * kWarpSize) * kVSize;
auto lbl = static_cast<int64_t>(label[first_batch + i]);
if (IgnoreIndex == true) {
// IgnoreIndex is true
if (lbl == loss_idx) {
if (lbl != ignore_index) {
loss[first_batch + i] = -logsoftmax;
} else {
loss[first_batch + i] = static_cast<T>(0.0);
}
}
} else {
// IgnoreIndex is false
if (lbl >= 0 && lbl < element_count) {
if (lbl == loss_idx) {
loss[first_batch + i] = -logsoftmax;
}
} else {
loss[first_batch + i] = static_cast<T>(0.0);
}
}
} else { // softmax
softmax[(first_batch + i) * stride + idx] =
srcdata[i][it][0] / sum[i];
}
} else {
break;
}
} else { // KVSize>1
VecT* softmax_v =
reinterpret_cast<VecT*>(&softmax[(first_batch + i) * stride]);
VecT tmpdata;
T* tmpptr = reinterpret_cast<T*>(&tmpdata);
#pragma unroll
for (int s = 0; s < kVSize; ++s) {
if (mode == SoftmaxMode::kLogSoftmax) { // log softmax
tmpptr[s] = srcdata[i][it][s] - max_value[i] - sum[i];
// softmax with cross entropy hard label
} else if (mode == SoftmaxMode::kCrossEntropy) {
AccT logsoftmax = srcdata[i][it][s] - max_value[i] - sum[i];
// softmax
tmpptr[s] = ::exp(logsoftmax);
// label
int loss_idx = (threadIdx.x + it * kWarpSize) * kVSize + s;
auto lbl = static_cast<int64_t>(label[first_batch + i]);
if (IgnoreIndex == true) {
// IgnoreIndex is true
if (lbl == loss_idx && lbl != ignore_index) {
loss[first_batch + i] = -logsoftmax;
}
} else {
// IgnoreIndex is false
if (lbl >= 0 && lbl < element_count) {
if (lbl == loss_idx) {
loss[first_batch + i] = -logsoftmax;
}
} else {
loss[first_batch + i] = static_cast<T>(0.0);
}
}
} else { // softmax
tmpptr[s] = srcdata[i][it][s] / sum[i];
}
}
if (idx < idx_max_v[i]) {
softmax_v[idx] = tmpdata;
} else {
break;
}
}
}
}
}
#define SOFTMAX_WARP_FORWARD_CASE(Log2Elements, LabelT, VecT, AccT) \
case Log2Elements: \
hipLaunchKernelGGL(( WarpSoftmaxForward<T, LabelT, VecT, AccT, Log2Elements, mode, \
IgnoreIndex>), dim3(blocks), dim3(threads), 0, stream, \
loss, softmax, src, label, batch_size, stride, element_count, \
ignore_index); \
break;
/*
Wrapper of softmax with cross entropy forward hard label.
*/
template <typename T, typename LabelT, SoftmaxMode mode, bool IgnoreIndex>
void SwitchWarpSoftmaxForward(T* loss, T* softmax, const T* src,
const LabelT* label, const int batch_size,
const int stride, const int element_count,
const int ignore_index, gpuStream_t stream) {
using AccT = typename details::MPTypeTrait<T>::Type;
// use 128 threads per block to maximimize gpu utilization
const int log2_elements = static_cast<int>(Log2Ceil(element_count));
const int kDimCeil = 1 << log2_elements;
int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32;
int batches_per_warp = (kDimCeil <= 128) ? 2 : 1;
constexpr int threads_per_block = 128;
int warps_per_block = (threads_per_block / kWarpSize);
int batches_per_block = warps_per_block * batches_per_warp;
int blocks = (batch_size + batches_per_block - 1) / batches_per_block;
dim3 threads(kWarpSize, warps_per_block, 1);
switch (log2_elements) {
SOFTMAX_WARP_FORWARD_CASE(0, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(1, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(2, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(3, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(4, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(5, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(6, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(7, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(8, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(9, LabelT, T, AccT);
default:
break;
}
}
template <typename T, bool IgnoreIndex>
__device__ __forceinline__ void ComputeLoss(T* loss, const T loss_value,
const int label_id,
const int64_t label_value,
const int tid, const int vec_size,
const int offset,
const int ignore_index) {
int loss_id = vec_size * tid + offset;
if (IgnoreIndex) {
if (label_value == loss_id) {
if (label_value == ignore_index) {
loss[label_id] = static_cast<T>(0.0f);
} else {
loss[label_id] = loss_value;
}
}
} else {
if (label_value == loss_id) {
loss[label_id] = loss_value;
}
}
}
template <typename T, typename AccT, int VecSize, class ReduceFunctor>
__device__ __forceinline__ AccT ThreadReduce(const T* input, int size,
const int offset, AccT init,
ReduceFunctor reducer) {
using VecT = kps::details::VectorType<T, VecSize>;
int tid = threadIdx.x;
AccT val = init;
if (offset > 0) {
input -= offset;
size += offset;
if (tid >= offset) {
val = reducer(val, input[tid]);
}
size -= blockDim.x;
input += blockDim.x;
}
int remain = size % (VecSize * blockDim.x);
T ins[VecSize];
VecT* ins_vec = reinterpret_cast<VecT*>(&ins);
// vector part
for (; VecSize * tid < (size - remain); tid += blockDim.x) {
*ins_vec = reinterpret_cast<const VecT*>(input)[tid];
#pragma unroll
for (int i = 0; i < VecSize; ++i) {
val = reducer(val, ins[i]);
}
}
// scalar part
tid = size - remain + threadIdx.x;
for (; tid < size; tid += blockDim.x) {
val = reducer(val, input[tid]);
}
return val;
}
template <typename T, typename AccT, typename LabelT, int VecSize,
bool IgnoreIndex>
__device__ __forceinline__ void VectorizedSoftmaxForwardImpl(
T* loss, T* softmax, const T* logits, const LabelT* label, int size,
const int offset, const phi::LogSoftmaxForwardFunctor<AccT>& func,
const int ignore_index) {
using VecT = kps::details::VectorType<T, VecSize>;
int tid = threadIdx.x;
int label_id = blockIdx.x;
auto label_value = static_cast<int64_t>(label[label_id]);
const bool label_valid = label_value >= 0 && label_value < size;
int loss_id_offset = 0;
if (offset > 0) {
logits -= offset;
softmax -= offset;
size += offset;
loss_id_offset -= offset;
if (tid >= offset) {
AccT log_softmax = func(static_cast<AccT>(logits[tid]));
softmax[tid] = static_cast<T>(::exp(log_softmax));
// loss
if (label_valid) {
ComputeLoss<T, IgnoreIndex>(loss, static_cast<T>(-log_softmax),
label_id, label_value, tid, 1,
loss_id_offset, ignore_index);
}
}
size -= blockDim.x;
logits += blockDim.x;
softmax += blockDim.x;
loss_id_offset += blockDim.x;
}
int remain = size % (VecSize * blockDim.x);
T ins[VecSize];
T outs[VecSize];
VecT* ins_vec = reinterpret_cast<VecT*>(&ins);
VecT* outs_vec = reinterpret_cast<VecT*>(&outs);
// vector part
for (; VecSize * tid < (size - remain); tid += blockDim.x) {
// read
*ins_vec = reinterpret_cast<const VecT*>(logits)[tid];
#pragma unroll
// compute
for (int i = 0; i < VecSize; ++i) {
AccT log_softmax = func(static_cast<AccT>(ins[i]));
outs[i] = static_cast<T>(::exp(log_softmax));
// loss
if (label_valid) {
ComputeLoss<T, IgnoreIndex>(loss, static_cast<T>(-log_softmax),
label_id, label_value, tid, VecSize,
loss_id_offset + i, ignore_index);
}
}
// write
reinterpret_cast<VecT*>(softmax)[tid] = *outs_vec;
}
// scalar part
tid = size - remain + threadIdx.x;
for (; tid < size; tid += blockDim.x) {
AccT log_softmax = func(static_cast<AccT>(logits[tid]));
softmax[tid] = static_cast<T>(::exp(log_softmax));
// loss
if (label_valid) {
ComputeLoss<T, IgnoreIndex>(loss, static_cast<T>(-log_softmax), label_id,
label_value, tid, 1, loss_id_offset,
ignore_index);
}
}
// invalid label, write once
if (!label_valid && threadIdx.x == 0) {
loss[label_id] = static_cast<T>(0.0f);
}
}
template <typename T, typename AccT, typename LabelT, int VecSize,
bool IgnoreIndex>
__device__ __forceinline__ void ScalarSoftmaxForwardImpl(
T* loss, T* softmax, const T* logits, const LabelT* label, const int size,
const phi::LogSoftmaxForwardFunctor<AccT>& func, const int ignore_index) {
int tid = threadIdx.x;
int remain = size % (VecSize * blockDim.x);
int label_id = blockIdx.x;
auto label_value = static_cast<int64_t>(label[label_id]);
const bool label_valid = label_value >= 0 && label_value < size;
// main part
for (; tid < (size - remain); tid += VecSize * blockDim.x) {
T ins[VecSize];
#pragma unroll
for (int i = 0; i < VecSize; ++i) {
ins[i] = logits[tid + i * blockDim.x];
}
#pragma unroll
for (int i = 0; i < VecSize; ++i) {
AccT log_softmax = func(static_cast<AccT>(ins[i]));
softmax[tid + i * blockDim.x] = static_cast<T>(::exp(log_softmax));
// loss
if (label_valid) {
ComputeLoss<T, IgnoreIndex>(loss, static_cast<T>(-log_softmax),
label_id, label_value, tid, VecSize, i,
ignore_index);
}
}
}
// tail part
for (; tid < size; tid += blockDim.x) {
AccT log_softmax = func(static_cast<AccT>(logits[tid]));
softmax[tid] = static_cast<T>(::exp(log_softmax));
// loss
if (label_valid) {
ComputeLoss<T, IgnoreIndex>(loss, static_cast<T>(-log_softmax), label_id,
label_value, tid, 1, 0, ignore_index);
}
}
// invalid label, write once
if (!label_valid && threadIdx.x == 0) {
loss[label_id] = static_cast<T>(0.0f);
}
}
template <typename T, typename AccT, typename LabelT, int VecSize,
bool IgnoreIndex>
__global__ void VectorizedSoftmaxForward(T* loss, T* softmax, const T* logits,
const LabelT* label,
const int high_dim, const int mid_dim,
const int ignore_index) {
using VecT = kps::details::VectorType<T, VecSize>;
// each block deal with one batch
logits += blockIdx.x * mid_dim;
softmax += blockIdx.x * mid_dim;
const int input_offset = ((uint64_t)logits) % ALIGN_BYTES / sizeof(T);
const int output_offset = ((uint64_t)softmax) % ALIGN_BYTES / sizeof(T);
// 1. reduce max
AccT max = ThreadReduce<T, AccT, VecSize, kps::MaxFunctor<AccT>>(
logits, mid_dim, input_offset, -std::numeric_limits<AccT>::infinity(),
kps::MaxFunctor<AccT>());
max = kps::details::BlockXReduce<AccT, kps::MaxFunctor<AccT>>(
max, kps::MaxFunctor<AccT>());
// 2. reduce sum
AccT sum = ThreadReduce<T, AccT, VecSize, ExpAddFunctor<AccT>>(
logits, mid_dim, input_offset, static_cast<AccT>(0),
ExpAddFunctor<AccT>(max));
sum = kps::details::BlockXReduce<AccT, kps::AddFunctor<AccT>>(
sum, kps::AddFunctor<AccT>());
// 3. softmax
phi::LogSoftmaxForwardFunctor<AccT> func(max, sum);
if (input_offset == output_offset) {
VectorizedSoftmaxForwardImpl<T, AccT, LabelT, VecSize, IgnoreIndex>(
loss, softmax, logits, label, mid_dim, input_offset, func,
ignore_index);
} else {
ScalarSoftmaxForwardImpl<T, AccT, LabelT, VecSize, IgnoreIndex>(
loss, softmax, logits, label, mid_dim, func, ignore_index);
}
}
template <typename T, typename LabelT, bool IgnoreIndex>
void LaunchVectorizedSoftmaxForward(T* loss, T* softmax, const T* logits,
const LabelT* label, const int high_dim,
const int mid_dim, const int ignore_index,
gpuStream_t stream) {
using AccT = typename details::MPTypeTrait<T>::Type;
constexpr int vec_size = sizeof(float4) / sizeof(T);
const int max_num_threads = 1024;
int max_block_size = ::min(mid_dim / vec_size, max_num_threads);
if (vec_size > 1) {
max_block_size /= 2;
}
int block_size = 1;
while (block_size < max_block_size) {
block_size *= 2;
}
block_size = ::max(block_size, kps::details::kWarpSize);
dim3 grids(high_dim);
dim3 blocks(block_size);
hipLaunchKernelGGL(( VectorizedSoftmaxForward<T, AccT, LabelT, vec_size,
IgnoreIndex>), dim3(grids), dim3(blocks), 0, stream,
loss, softmax, logits, label, high_dim, mid_dim, ignore_index);
}
/*
Wrapper of softmax with cross entropy hard label.
- SwitchWarpSoftmaxForward for small size when axis == -1
- LaunchVectorizedSoftmaxForward for large size when axis == -1
- cudnn function for axis != -1
*/
template <typename T, typename LabelT, bool IgnoreIndex>
static void SoftmaxWithCrossEntropyHardLabel(
const platform::CUDADeviceContext& ctx, int rank, int axis,
const T* logits_data, const LabelT* labels_data, T* loss_data,
T* softmax_data, int N, int dim, int D, const int ignore_index) {
auto stream = ctx.stream();
constexpr int max_dim = 320;
if (D == 1) {
if (dim <= max_dim) { // small size
const SoftmaxMode mode = SoftmaxMode::kCrossEntropy;
SwitchWarpSoftmaxForward<T, LabelT, mode, IgnoreIndex>(
loss_data, softmax_data, logits_data, labels_data, N, dim, dim,
ignore_index, stream);
} else { // large size
LaunchVectorizedSoftmaxForward<T, LabelT, IgnoreIndex>(
loss_data, softmax_data, logits_data, labels_data, N, dim,
ignore_index, stream);
}
} else {
ScopedTensorDescriptor desc;
std::vector<int> tensor_dims = {N, dim, D, 1};
DataLayout layout = DataLayout::kNCHW;
#ifdef PADDLE_WITH_HIP
miopenTensorDescriptor_t descp = desc.descriptor<T>(layout, tensor_dims);
#else
cudnnTensorDescriptor_t descp = desc.descriptor<T>(layout, tensor_dims);
#endif
auto handle = ctx.cudnn_handle();
#ifdef PADDLE_WITH_HIP
auto mode = axis == rank - 1 ? MIOPEN_SOFTMAX_MODE_INSTANCE
: MIOPEN_SOFTMAX_MODE_CHANNEL;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSoftmaxForward_V2(
handle, platform::CudnnDataType<T>::kOne(), descp, logits_data,
platform::CudnnDataType<T>::kZero(), descp, softmax_data,
MIOPEN_SOFTMAX_LOG, mode));
#else
auto mode = axis == rank - 1 ? CUDNN_SOFTMAX_MODE_INSTANCE
: CUDNN_SOFTMAX_MODE_CHANNEL;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSoftmaxForward(
handle, CUDNN_SOFTMAX_LOG, mode, platform::CudnnDataType<T>::kOne(),
descp, logits_data, platform::CudnnDataType<T>::kZero(), descp,
softmax_data));
#endif
int threads = 128;
int blocks = (N * dim * D + threads - 1) / threads;
// compute cross entropy, input is log softmax
hipLaunchKernelGGL(( CrossEntropyExpHardLabel<T, LabelT,
IgnoreIndex>), dim3(blocks), dim3(threads), 0, stream,
loss_data, softmax_data, labels_data, N, dim, D, ignore_index);
}
}
/*
Wrapper of softmax with cross entropy grad hard label.
*/
template <typename T, typename LabelT>
__global__ void SoftmaxWithCrossEntropyGradHardLabel(
T* logits_grad, const T* loss_grad, const T* softmax, const LabelT* labels,
const int64_t n, const int64_t dim, const int64_t d,
const int ignore_index) {
int64_t idx = blockIdx.x * blockDim.x + threadIdx.x;
int64_t idx_n = idx / (d * dim);
int64_t idx_dim = (idx / d) % dim;
int64_t idx_d = idx % d;
int64_t ids = idx_n * d + idx_d;
if (idx < n * dim * d) {
auto lbl = static_cast<int64_t>(labels[ids]);
if (lbl == ignore_index) {
logits_grad[idx] = static_cast<T>(0.0);
} else if (lbl == idx_dim) {
logits_grad[idx] = (softmax[idx] - static_cast<T>(1.0)) * loss_grad[ids];
} else {
logits_grad[idx] = softmax[idx] * loss_grad[ids];
}
}
}
/*
Cross entropy soft label with dynamic size on axis (log2_elements is
varibale).
- if the input is softmaxcompute loss with softmax
- if the input is log_softmax, compute loss with log_softmax and update
softmax
*/
template <typename T, typename VecT, bool InLogMode = false>
__global__ void CrossEntropySoftLabel(T* loss, T* softmaxwrt, const T* softmax,
const T* labels, const int n,
const int dim, const int d,
int log2_elements) {
const int kDimCeil = 1 << log2_elements;
const int kVSize = sizeof(VecT) / sizeof(T);
#ifdef __HIPCC__
const int kThreadPerBlock = 256;
#else
const int kThreadPerBlock = 512;
#endif
const int kBatchPerBlock = 1;
const int kWarpSize = 32; // (dim < 32) ? dim : 32;
const int kBatchSize = 1;
const int kThreadPerBatch = kThreadPerBlock / kBatchPerBlock;
const int kWarpPerBatch = kThreadPerBatch / kWarpSize;
const int kIterations = (dim + kThreadPerBatch - 1) / kThreadPerBatch;
const int kIterationsV = (kIterations >= kVSize) ? (kIterations / kVSize) : 1;
const int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * kBatchSize;
T sum[kBatchSize]{static_cast<T>(0.0)};
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
int ids = first_batch + i;
if (ids >= n * d) break;
int idx_n = ids / d;
int idx_d = ids % d;
#pragma unroll
for (int it = 0; it < kIterations; ++it) {
int idx_dim = it * kThreadPerBatch + threadIdx.x;
int idx = idx_n * dim * d + idx_dim * d + idx_d;
if (idx_n < n && idx_dim < dim) {
VecT softmaxdata;
if (InLogMode) {
softmaxdata = reinterpret_cast<VecT*>(&softmaxwrt[idx])[0];
} else {
softmaxdata = reinterpret_cast<const VecT*>(&softmax[idx])[0];
}
VecT labelsdata = reinterpret_cast<const VecT*>(&labels[idx])[0];
T* softmaxptr = reinterpret_cast<T*>(&softmaxdata);
T* labelsptr = reinterpret_cast<T*>(&labelsdata);
#pragma unroll
for (int s = 0; s < kVSize; s++) {
if (InLogMode) {
sum[i] -= softmaxptr[s] * labelsptr[s];
softmaxptr[s] = Exp(softmaxptr[s]);
} else {
sum[i] -= Log(softmaxptr[s]) * labelsptr[s];
}
}
if (InLogMode) {
reinterpret_cast<VecT*>(&softmaxwrt[idx])[0] = softmaxdata;
}
}
}
}
phi::WarpReduceSum<T, kBatchSize, kWarpSize>(sum);
__syncthreads();
__shared__ T sumshare[kWarpPerBatch][kBatchPerBlock][kBatchSize];
if (threadIdx.x % kWarpSize == 0) {
#pragma unroll
for (int i = 0; i < kBatchSize; i++) {
sumshare[threadIdx.x / kWarpSize][threadIdx.y][i] = sum[i];
}
}
__syncthreads();
// write
if (threadIdx.x == 0) {
for (int i = 0; i < kBatchSize; i++) {
int ids = first_batch + i;
if (ids < n * d) {
loss[ids] = sumshare[0][threadIdx.y][i];
for (int s = 1; s < kWarpPerBatch; s++) {
loss[ids] += sumshare[s][threadIdx.y][i];
}
}
}
}
}
/*
Core function of softmax with cross entropy forward soft label.
The computation includes
- Compute maximum of batch: maxvalue_{i} = max_j src_{i,j}
- Compute sum of exp batch: s_{i} = sum_{j}{ exp(src_{i,j} - maxvalue_{i} }
- Compute: sum of - sum_{j}{ label_{i,j} * (src_{i,j} - maxvalue_{i} -
log(sum[i]))}
One warp (32 threads) is used to compute 1 or 2 batch (kBatchSize).
For reduction max (sum), firstly compute max (sum) to one warp, then use shuffle
api to compute max (sum) in one warp.
*/
template <typename T, typename VecT, typename AccT, int Log2Elements>
__global__ void WarpSoftmaxForwardSoftLabel(T* loss, T* softmax, const T* src,
const T* label,
const int batch_size,
const int stride,
const int element_count) {
const bool LogMode = true;
constexpr int kDimCeil = 1 << Log2Elements;
constexpr int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32;
constexpr int kVSize = sizeof(VecT) / sizeof(T);
constexpr int kIterations = kDimCeil / kWarpSize;
constexpr int kIterationsV =
(kIterations >= kVSize) ? (kIterations / kVSize) : 1;
constexpr int kBatchSize = (kDimCeil <= 128) ? 2 : 1;
int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * kBatchSize;
int local_batches = batch_size - first_batch;
if (local_batches > kBatchSize) {
local_batches = kBatchSize;
}
// read data from global memory
VecT srcdata[kBatchSize][kIterationsV];
VecT labeldata[kBatchSize][kIterationsV];
for (int i = 0; i < kBatchSize; ++i) {
const VecT* src_v =
reinterpret_cast<const VecT*>(&src[(first_batch + i) * stride]);
const VecT* label_v =
reinterpret_cast<const VecT*>(&label[(first_batch + i) * stride]);
// max index to read
int idx_max = (i < local_batches) ? element_count : 0;
int idx_max_v = idx_max / kVSize;
// read data
for (int it = 0; it < kIterationsV; ++it) {
int src_idx = threadIdx.x + it * kWarpSize;
if (src_idx < idx_max_v) {
srcdata[i][it] = src_v[src_idx];
labeldata[i][it] = label_v[src_idx];
} else {
#pragma unroll
for (int s = 0; s < kVSize; s++) {
reinterpret_cast<T*>(&srcdata[i][it])[s] =
-std::numeric_limits<AccT>::max();
reinterpret_cast<T*>(&labeldata[i][it])[s] = 0.0;
}
}
}
}
// compute max value
AccT max_value[kBatchSize];
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
max_value[i] = -std::numeric_limits<AccT>::infinity();
#pragma unroll
for (int it = 0; it < kIterationsV; ++it) {
T* srcptr_v = reinterpret_cast<T*>(&srcdata[i][it]);
T valmax = srcptr_v[0];
#pragma unroll
for (int s = 1; s < kVSize; ++s) {
valmax = (valmax > srcptr_v[s]) ? valmax : srcptr_v[s];
}
max_value[i] = (max_value[i] > static_cast<AccT>(valmax))
? max_value[i]
: static_cast<AccT>(valmax);
}
}
phi::WarpReduceMax<AccT, kBatchSize, kWarpSize>(max_value);
// compute sum
AccT sum[kBatchSize]{0.0};
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
#pragma unroll
for (int it = 0; it < kIterationsV; ++it) {
T* srcptr_v = reinterpret_cast<T*>(&srcdata[i][it]);
#pragma unroll
for (int s = 0; s < kVSize; ++s) {
if (LogMode) {
sum[i] += ::exp(static_cast<AccT>(srcptr_v[s]) - max_value[i]);
} else {
srcptr_v[s] = ::exp(static_cast<AccT>(srcptr_v[s]) - max_value[i]);
sum[i] += static_cast<AccT>(srcptr_v[s]);
}
}
}
}
phi::WarpReduceSum<AccT, kBatchSize, kWarpSize>(sum);
// log_softmax and loss
AccT sumloss[kBatchSize]{0.0};
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
if (i >= local_batches) break;
VecT* softmax_v =
reinterpret_cast<VecT*>(&softmax[(first_batch + i) * stride]);
// max index to write
int idx_max = (i < local_batches) ? element_count : 0;
int idx_max_v = idx_max / kVSize;
if (LogMode) {
sum[i] = ::log(sum[i]);
}
#pragma unroll
for (int it = 0; it < kIterationsV; ++it) {
T* srcvp = reinterpret_cast<T*>(&srcdata[i][it]);
T* labelvp = reinterpret_cast<T*>(&labeldata[i][it]);
VecT tmpv;
T* tmpvp = reinterpret_cast<T*>(&tmpv);
#pragma unroll
for (int s = 0; s < kVSize; ++s) {
if (LogMode) {
AccT logsoftmax = static_cast<AccT>(srcvp[s]) - max_value[i] - sum[i];
sumloss[i] -= logsoftmax * static_cast<AccT>(labelvp[s]);
tmpvp[s] = ::exp(logsoftmax);
} else {
tmpvp[s] = static_cast<AccT>(srcvp[s]) / sum[i];
}
}
int idx = threadIdx.x + it * kWarpSize;
if (idx < idx_max_v) {
softmax_v[idx] = tmpv;
}
}
}
// loss
phi::WarpReduceSum<AccT, kBatchSize, kWarpSize>(sumloss);
for (int i = 0; i < kBatchSize; i++) {
if (i >= local_batches) break;
loss[first_batch + i] = sumloss[i];
}
}
#define SOFTMAX_WARP_FORWARD_SOFT_CASE(Log2Elements, VecT, AccT) \
case Log2Elements: \
hipLaunchKernelGGL(( WarpSoftmaxForwardSoftLabel<T, VecT, AccT, \
Log2Elements>), dim3(blocks), dim3(threads), 0, stream, \
loss, softmax, src, label, batch_size, stride, element_count); \
break;
/*
Wrapper of softmax with cross entropy forward soft label.
*/
template <typename T>
void SwitchWarpSoftmaxForwardSoftLabel(const int blocks, const dim3 threads,
gpuStream_t stream, T* loss, T* softmax,
const T* src, const T* label,
const int batch_size, const int stride,
const int element_count,
const int log2_elements) {
using AccT = typename details::MPTypeTrait<T>::Type;
switch (log2_elements) {
SOFTMAX_WARP_FORWARD_SOFT_CASE(0, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(1, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(2, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(3, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(4, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(5, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(6, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(7, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(8, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(9, T, AccT);
default:
break;
}
}
template <typename T>
static void SoftmaxWithCrossEntropySoftLabel(
const platform::CUDADeviceContext& ctx, const int rank, const int axis,
const T* logits_data, const T* labels_data, T* softmax_data, T* loss_data,
int N, int dim, int D) {
#ifdef __HIPCC__
constexpr int kMaxBlockDim = 256;
#else
constexpr int kMaxBlockDim = 512;
#endif
int64_t block_dim = dim >= kMaxBlockDim
? kMaxBlockDim
: (1 << static_cast<int>(std::log2(dim)));
int64_t grid_dim = N * D;
constexpr int max_dim = 320;
const int kDimLog2 = static_cast<int>(Log2Ceil(dim));
const int kDimCeil = 1 << kDimLog2;
auto stream = ctx.stream();
if (D == 1 && dim <= max_dim) {
int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32;
int batches_per_warp = (kDimCeil <= 128) ? 2 : 1;
// use 128 threads per block to maximimize gpu utilization
constexpr int threads_per_block = 128;
int warps_per_block = (threads_per_block / kWarpSize);
int batches_per_block = warps_per_block * batches_per_warp;
int blocks = (N + batches_per_block - 1) / batches_per_block;
dim3 threads(kWarpSize, warps_per_block, 1);
SwitchWarpSoftmaxForwardSoftLabel<T>(blocks, threads, stream, loss_data,
softmax_data, logits_data, labels_data,
N, dim, dim, kDimLog2);
} else {
ScopedTensorDescriptor desc;
std::vector<int> tensor_dims = {N, dim, D, 1};
DataLayout layout = DataLayout::kNCHW;
#ifdef PADDLE_WITH_HIP
miopenTensorDescriptor_t descp = desc.descriptor<T>(layout, tensor_dims);
#else
cudnnTensorDescriptor_t descp = desc.descriptor<T>(layout, tensor_dims);
#endif
auto handle = ctx.cudnn_handle();
#ifdef PADDLE_WITH_HIP
auto mode = axis == rank - 1 ? MIOPEN_SOFTMAX_MODE_INSTANCE
: MIOPEN_SOFTMAX_MODE_CHANNEL;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSoftmaxForward_V2(
handle, platform::CudnnDataType<T>::kOne(), descp, logits_data,
platform::CudnnDataType<T>::kZero(), descp, softmax_data,
MIOPEN_SOFTMAX_LOG, mode));
#else
auto mode = axis == rank - 1 ? CUDNN_SOFTMAX_MODE_INSTANCE
: CUDNN_SOFTMAX_MODE_CHANNEL;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSoftmaxForward(
handle, CUDNN_SOFTMAX_LOG, mode, platform::CudnnDataType<T>::kOne(),
descp, logits_data, platform::CudnnDataType<T>::kZero(), descp,
softmax_data));
#endif
const int kDimLog2 = static_cast<int>(Log2Ceil(dim));
const int kDimCeil = 1 << kDimLog2;
#ifdef __HIPCC__
int kThreadPerBlock = 256;
#else
int kThreadPerBlock = 512;
#endif
int kBatchPerBlock = 1;
int blocks = (N * D + kBatchPerBlock - 1) / kBatchPerBlock;
dim3 threads(kThreadPerBlock / kBatchPerBlock, kBatchPerBlock, 1);
hipLaunchKernelGGL(( CrossEntropySoftLabel<T, T, true>), dim3(blocks), dim3(threads), 0, stream,
loss_data, softmax_data, NULL, labels_data, N, dim, D, kDimLog2);
}
}
template <typename T>
__global__ void SoftCrossEntropyGradientKernel(T* logit_grad,
const T* loss_grad,
const T* labels, const int64_t n,
const int64_t d,
const int64_t remain) {
int64_t ids = blockIdx.x * blockDim.x + threadIdx.x;
if (ids < n * d) {
int64_t idx_n = ids / d;
int64_t idx_remain = ids % remain;
int64_t idx_loss = idx_n * remain + idx_remain;
logit_grad[ids] = loss_grad[idx_loss] * (logit_grad[ids] - labels[ids]);
}
}
template <typename T>
__global__ void SoftLabelCrossEntropyGradientKernel(T* logit_grad,
const T* loss_grad,
const T* labels,
const int n, const int d,
const int remain) {
int ids = blockIdx.x * blockDim.x + threadIdx.x;
if (ids < n * d) {
int idx_n = ids / d;
int idx_remain = ids % remain;
int idx_loss = idx_n * remain + idx_remain;
logit_grad[ids] = loss_grad[idx_loss] * (-labels[ids] / logit_grad[ids]);
}
}
template <typename T, typename LabelT>
__global__ void HardLabelCrossEntropyGradientKernel(T* logit_grad,
const LabelT* labels,
const int n, const int d,
const int remain,
const int ignore_index) {
CUDA_KERNEL_LOOP(index, n * remain) {
int idx_n = index / remain;
int idx_remain = index % remain;
int tmp = static_cast<int>(labels[index]);
int idx = idx_n * d + tmp * remain + idx_remain;
if (ignore_index != tmp) {
logit_grad[idx] = -static_cast<T>(1.) / logit_grad[idx];
}
}
}
template <typename T, typename LabelT>
__global__ void ScaleCrossEntropyGradient(T* logit_grad, const T* loss_grad,
const int num, const int d,
const int remain,
const LabelT* labels,
const int ignore_index) {
CUDA_KERNEL_LOOP(index, num) {
int idx_n = index / d;
int idx_remain = index % remain;
int idx_lbl = idx_n * remain + idx_remain;
int k = (index % d) / remain;
auto lbl = static_cast<int64_t>(labels[idx_lbl]);
if (lbl == ignore_index || lbl != k) {
logit_grad[index] = static_cast<T>(0.);
} else {
logit_grad[index] *= loss_grad[idx_lbl];
}
}
}
template <typename T>
class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
RunSoftmaxWithCrossEntropyFunctor<T>(context, *this);
}
template <typename LabelT>
static void Apply(const framework::ExecutionContext& context,
const framework::Tensor& labels, const bool soft_label) {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(context.GetPlace()), true,
platform::errors::Unavailable("softmax_with_cross_entropy operator's "
"CUDA kernel only runs on GPU device."));
const bool use_softmax = context.Attr<bool>("use_softmax");
// do not with softmax op, and input is softmax
if (!use_softmax) {
const Tensor* softmax = context.Input<Tensor>("Logits");
Tensor* softmax_out = context.Output<Tensor>("Softmax");
Tensor* loss = context.Output<Tensor>("Loss");
const int rank = softmax->dims().size();
const int axis =
phi::funcs::CanonicalAxis(context.Attr<int>("axis"), rank);
const int axis_dim = softmax->dims()[axis];
const int n = phi::funcs::SizeToAxis(axis, softmax->dims());
const int d = phi::funcs::SizeFromAxis(axis, softmax->dims());
auto* softmax_out_data =
softmax_out->template mutable_data<T>(context.GetPlace());
auto* loss_data = loss->template mutable_data<T>(context.GetPlace());
phi::funcs::SetConstant<platform::CUDADeviceContext, T> set_constant;
set_constant(context.cuda_device_context(), loss, static_cast<T>(0));
if (axis_dim == 1) {
set_constant(context.cuda_device_context(), softmax_out,
static_cast<T>(1));
return;
}
auto ignore_index = context.Attr<int>("ignore_index");
Tensor softmax_2d, labels_2d, loss_2d, softmax_out_2d;
softmax_2d.ShareDataWith(*softmax).Resize({n, d});
labels_2d.ShareDataWith(labels).Resize({n, labels.numel() / n});
loss_2d.ShareDataWith(*loss).Resize({n, 1});
softmax_out_2d.ShareDataWith(*softmax_out).Resize({n, d});
// math::CrossEntropyFunctor support axis is the last
if (axis == -1) {
math::CrossEntropyFunctor<platform::CUDADeviceContext, T>()(
context.cuda_device_context(), &loss_2d, &softmax_2d, &labels_2d,
soft_label, ignore_index, axis_dim);
return;
}
// if axis is not the last, we need a new impliment
if (soft_label) {
auto* logits_data = softmax->template data<T>();
auto* labels_data = labels.template data<T>();
const int kDimLog2 = static_cast<int>(Log2Ceil(axis_dim));
const int kDimCeil = 1 << kDimLog2;
#ifdef __HIPCC__
int kThreadPerBlock = 256;
#else
int kThreadPerBlock = 512;
#endif
int kBatchPerBlock = 1;
int blocks = (n * d + kBatchPerBlock - 1) / kBatchPerBlock;
dim3 threads(kThreadPerBlock / kBatchPerBlock, kBatchPerBlock, 1);
hipLaunchKernelGGL(( CrossEntropySoftLabel<T, T, false>),
dim3(blocks), dim3(threads), 0, context.cuda_device_context().stream(),
loss_data, NULL, logits_data, labels_data, n, axis_dim,
d / axis_dim, kDimLog2);
} else { // HardLabel
auto* logits_data = softmax->template data<T>();
auto* labels_data = labels.template data<LabelT>();
int threads = 128;
int blocks = (n * d / axis_dim + threads - 1) / threads;
if (ignore_index >= 0 && ignore_index < axis_dim) {
hipLaunchKernelGGL(( CrossEntropyHardLabel<T, LabelT, true>),
dim3(blocks), dim3(threads), 0, context.cuda_device_context().stream(),
loss_data, logits_data, labels_data, n, axis_dim, d / axis_dim,
ignore_index);
} else {
hipLaunchKernelGGL(( CrossEntropyHardLabel<T, LabelT, false>),
dim3(blocks), dim3(threads), 0, context.cuda_device_context().stream(),
loss_data, logits_data, labels_data, n, axis_dim, d / axis_dim,
ignore_index);
}
}
// cause of input is softmax
// copy to output softmax, directly
framework::TensorCopy(*softmax, context.GetPlace(),
context.device_context(), softmax_out);
return;
}
const Tensor* logits = context.Input<Tensor>("Logits");
Tensor* softmax = context.Output<Tensor>("Softmax");
Tensor* loss = context.Output<Tensor>("Loss");
const int rank = logits->dims().size();
const int axis = phi::funcs::CanonicalAxis(context.Attr<int>("axis"), rank);
int axis_dim = logits->dims()[axis];
const int64_t n = phi::funcs::SizeToAxis(axis, logits->dims());
const int64_t d = phi::funcs::SizeFromAxis(axis, logits->dims());
auto* softmax_data = softmax->template mutable_data<T>(context.GetPlace());
auto* loss_data = loss->template mutable_data<T>(context.GetPlace());
if (axis_dim == 1) {
phi::funcs::SetConstant<platform::CUDADeviceContext, T> set_constant;
set_constant(context.cuda_device_context(), softmax, static_cast<T>(1));
set_constant(context.cuda_device_context(), loss, static_cast<T>(0));
return;
}
auto ignore_index = context.Attr<int>("ignore_index");
if (soft_label) {
auto* logits_data = logits->template data<T>();
auto* labels_data = labels.template data<T>();
SoftmaxWithCrossEntropySoftLabel<T>(
context.cuda_device_context(), rank, axis, logits_data, labels_data,
softmax_data, loss_data, n, axis_dim, d / axis_dim);
} else {
if (!context.Attr<bool>("numeric_stable_mode")) {
// CUDNN kernel only suppoer 2-D tensor and perfome softmax on last dim
Tensor logits_2d, softmax_2d, labels_2d, loss_2d;
logits_2d.ShareDataWith(*logits).Resize({n, d});
softmax_2d.ShareDataWith(*softmax).Resize({n, d});
labels_2d.ShareDataWith(labels).Resize({n, labels.numel() / n});
loss_2d.ShareDataWith(*loss).Resize({n, 1});
math::SoftmaxCUDNNFunctor<T>()(context.cuda_device_context(),
&logits_2d, &softmax_2d);
math::CrossEntropyFunctor<platform::CUDADeviceContext, T>()(
context.cuda_device_context(), &loss_2d, &softmax_2d, &labels_2d,
false, ignore_index, axis_dim);
} else {
auto* logits_data = logits->template data<T>();
auto* labels_data = labels.template data<LabelT>();
if (ignore_index >= 0 && ignore_index < axis_dim) {
SoftmaxWithCrossEntropyHardLabel<T, LabelT, true>(
context.cuda_device_context(), rank, axis, logits_data,
labels_data, loss_data, softmax_data, n, axis_dim, d / axis_dim,
ignore_index);
} else {
SoftmaxWithCrossEntropyHardLabel<T, LabelT, false>(
context.cuda_device_context(), rank, axis, logits_data,
labels_data, loss_data, softmax_data, n, axis_dim, d / axis_dim,
ignore_index);
}
}
}
}
};
template <typename T>
class SoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
RunSoftmaxWithCrossEntropyFunctor<T>(context, *this);
}
template <typename LabelT>
static void Apply(const framework::ExecutionContext& context,
const framework::Tensor& labels, const bool soft_label) {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(context.GetPlace()), true,
platform::errors::Unavailable("softmax_with_cross_entropy operator's "
"CUDA kernel only runs on GPU device."));
const T* loss_grad_data =
context.Input<Tensor>(framework::GradVarName("Loss"))
->template data<T>();
Tensor* logit_grad =
context.Output<Tensor>(framework::GradVarName("Logits"));
const Tensor* softmax = context.Input<Tensor>("Softmax");
auto stream = context.cuda_device_context().stream();
auto ignore_index = context.Attr<int>("ignore_index");
auto use_softmax = context.Attr<bool>("use_softmax");
T* logit_grad_data = nullptr;
bool copy_flag = (logit_grad != softmax && (!use_softmax || soft_label));
if (copy_flag) {
framework::TensorCopy(*softmax, context.GetPlace(),
context.device_context(), logit_grad);
logit_grad_data = logit_grad->template data<T>();
} else {
logit_grad_data =
logit_grad->template mutable_data<T>(context.GetPlace());
}
const int rank = logit_grad->dims().size();
const int axis = phi::funcs::CanonicalAxis(context.Attr<int>("axis"), rank);
int axis_dim = logit_grad->dims()[axis];
const int64_t n = phi::funcs::SizeToAxis(axis, logit_grad->dims());
const int64_t d = phi::funcs::SizeFromAxis(axis, logit_grad->dims());
const int64_t remain = d / axis_dim;
#ifdef __HIPCC__
int block = 256;
#else
int block = 512;
#endif
// do not with softmax op, and input is softmax
if (!use_softmax) {
if (soft_label) {
int grid = (n * d + block - 1) / block;
const T* label_data = labels.template data<T>();
hipLaunchKernelGGL(( SoftLabelCrossEntropyGradientKernel<T>), dim3(grid), dim3(block), 0, stream,
logit_grad_data, loss_grad_data, label_data, n, d, remain);
} else {
Tensor logits_grad_2d;
logits_grad_2d.ShareDataWith(*logit_grad).Resize({n, d});
int grid = (n * remain + block - 1) / block;
const auto* label_data = labels.template data<LabelT>();
hipLaunchKernelGGL(( HardLabelCrossEntropyGradientKernel<T,
LabelT>), dim3(grid), dim3(block), 0, stream,
logit_grad_data, label_data, n, d, remain, ignore_index);
int num = n * d;
grid = (num + block - 1) / block;
hipLaunchKernelGGL(( ScaleCrossEntropyGradient<T, LabelT>), dim3(grid), dim3(block), 0, stream,
logit_grad_data, loss_grad_data, num, d, remain, label_data,
ignore_index);
}
return;
}
// with softmax, continue
if (soft_label) {
int64_t grid = (n * d + block - 1) / block;
const T* label_data = labels.template data<T>();
hipLaunchKernelGGL(( SoftCrossEntropyGradientKernel<T>), dim3(grid), dim3(block), 0, stream,
logit_grad_data, loss_grad_data, label_data, n, d, remain);
} else {
const T* softmax_data = softmax->template data<T>();
const auto* label_data = labels.template data<LabelT>();
int grid = (n * d + block - 1) / block;
hipLaunchKernelGGL(( SoftmaxWithCrossEntropyGradHardLabel<T>), dim3(grid), dim3(block), 0, stream,
logit_grad_data, loss_grad_data, softmax_data, label_data, n,
d / remain, remain, ignore_index);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
#ifdef PADDLE_WITH_HIP
// MIOPEN do not support double
REGISTER_OP_CUDA_KERNEL(
softmax_with_cross_entropy, ops::SoftmaxWithCrossEntropyCUDAKernel<float>,
ops::SoftmaxWithCrossEntropyCUDAKernel<paddle::platform::float16>);
REGISTER_OP_CUDA_KERNEL(
softmax_with_cross_entropy_grad,
ops::SoftmaxWithCrossEntropyGradCUDAKernel<float>,
ops::SoftmaxWithCrossEntropyGradCUDAKernel<paddle::platform::float16>);
#else
REGISTER_OP_CUDA_KERNEL(
softmax_with_cross_entropy, ops::SoftmaxWithCrossEntropyCUDAKernel<float>,
ops::SoftmaxWithCrossEntropyCUDAKernel<paddle::platform::float16>,
ops::SoftmaxWithCrossEntropyCUDAKernel<double>);
REGISTER_OP_CUDA_KERNEL(
softmax_with_cross_entropy_grad,
ops::SoftmaxWithCrossEntropyGradCUDAKernel<float>,
ops::SoftmaxWithCrossEntropyGradCUDAKernel<paddle::platform::float16>,
ops::SoftmaxWithCrossEntropyGradCUDAKernel<double>);
#endif
| ad30bd97514963ac86fb8ddd8ea4ad1077342851.cu | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef __NVCC__
#include "cub/cub.cuh"
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include "paddle/fluid/operators/amp/fp16_type_traits.h"
#include "paddle/fluid/operators/math/cross_entropy.h"
#include "paddle/fluid/operators/softmax_with_cross_entropy_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_device_function.h"
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
#include "paddle/fluid/platform/for_range.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/gpudnn/softmax_gpudnn.h"
namespace paddle {
namespace operators {
#define ALIGN_BYTES 16
using ScopedTensorDescriptor = platform::ScopedTensorDescriptor;
using DataLayout = platform::DataLayout;
using Tensor = framework::Tensor;
namespace kps = phi::kps;
// Wrapper of log function. Use log(float32) for float16
template <typename T>
static __device__ __forceinline__ T Log(T x) {
using AccT = typename details::MPTypeTrait<T>::Type;
AccT logx = std::log(static_cast<AccT>(x));
return math::TolerableValue<T>()(static_cast<T>(logx));
}
// Wrapper of exp function. Use exp(float32) for float16
template <typename T>
static __device__ __forceinline__ T Exp(T x) {
using AccT = typename details::MPTypeTrait<T>::Type;
AccT expx = std::exp(static_cast<AccT>(x));
return math::TolerableValue<T>()(static_cast<T>(expx));
}
template <typename Tx, typename Ty = Tx>
struct ExpAddFunctor {
HOSTDEVICE inline ExpAddFunctor(Tx max) : max(max) {}
HOSTDEVICE inline Ty operator()(const Tx& sum, const Tx& x) const {
return static_cast<Ty>(sum + std::exp(x - max));
}
private:
Tx max;
};
// log2(value)
static inline int Log2Ceil(int value) {
int log2_value = 0;
while ((1 << log2_value) < value) ++log2_value;
return log2_value;
}
enum class SoftmaxMode { kSoftmax, kLogSoftmax, kCrossEntropy };
/*
Hard label cross entropy.
*/
template <typename T, typename LabelT, bool IgnoreIndex>
__global__ void CrossEntropyHardLabel(T* loss, const T* softmax,
const LabelT* labels, const int n,
const int dim, const int d,
const int ignore_idx) {
int64_t ids = blockIdx.x * blockDim.x + threadIdx.x;
int64_t idx_n = ids / d;
int64_t idx_d = ids % d;
// thread ids compute loss[ids] using softmax[idx]
if (ids < n * d) {
auto lbl = static_cast<int64_t>(labels[ids]);
if (lbl < 0) { // label is negative
loss[ids] = static_cast<T>(0.0);
} else { // label is positive of zero
int64_t idx = idx_n * dim * d + lbl * d + idx_d;
if (IgnoreIndex == true) {
// IgnoreIndex is true
if (lbl == ignore_idx) {
loss[ids] = static_cast<T>(0.0);
} else {
loss[ids] = -Log(softmax[idx]);
}
} else {
// IgnoreIndex is false
loss[ids] = -Log(softmax[idx]);
}
}
}
}
/*
Hard label cross entropy with exp.
Input: log softmax
Output: loss and exp(input)
*/
template <typename T, typename LabelT, bool IgnoreIndex>
__global__ void CrossEntropyExpHardLabel(T* loss, T* softmax,
const LabelT* labels, const int n,
const int dim, const int d,
const int ignore_idx) {
int64_t idx = blockIdx.x * blockDim.x + threadIdx.x;
int64_t idx_n = idx / (d * dim);
int64_t idx_dim = (idx / d) % dim;
int64_t idx_d = idx % d;
int64_t ids = idx_n * d + idx_d;
if (idx < n * dim * d) {
auto lbl = static_cast<int64_t>(labels[ids]);
if (IgnoreIndex == true) {
// IgnoreIndex is true
if (idx_dim == lbl) {
if (lbl == ignore_idx) {
loss[ids] = static_cast<T>(0.0);
} else {
loss[ids] = -softmax[idx];
}
}
} else {
// IgnoreIndex is false
if (lbl >= 0 && lbl < dim) {
if (lbl == idx_dim) {
loss[ids] = -softmax[idx];
}
} else {
loss[ids] = static_cast<T>(0.0);
}
}
softmax[idx] = Exp(softmax[idx]);
}
}
/*
Core function of softmax with cross entropy forward
- softmax, SoftmaxMode=kSoftmax
- log softmax, SoftmaxMode=kLogSoftmax
- softmax with cross entropy hard label, SoftmaxMode=kCrossEntropy
The computation includes
- Compute max value: maxvalue_{i} = max_j src_{i,j}
- Compute sum of exp: s_{i} = sum_{j}{e^{src_{i,j} - maxvalue_{i}}}
- Compute: softmax_{i,j} = e^{src_{i,j} - maxvalue_{i}} / s_{i}
- Compute: logsoftmax_{i,j} = src_{i,j} - maxvalue_{i} - log(s_{i})
- Compute: loss_{i} = -logsoftmax[i,label[i]] (Hard label)
This computation results from following formula:
softmax_{i,j} = e^{src_{i,j}} / sum_{j}{e^{src_{i,j}}}
= e^{src_{i,j} - maxvalue_{i}}
/ sum_{j}{e^{src_{i,j} - maxvalue_{i}}}
= e^{src_{i,j} - maxvalue_{i}} / s_{i}
logsoftmax_{i,j} = log(softmax_{i,j})
= src_{i,j} - maxvalue_{i} - log(s_{i})
One warp (32 threads) is used to compute 1 or 2 batch (kBatchSize).
For reduction max (sum), firstly compute max (sum) to one warp, then use
shuffle api to compute max (sum) in one warp.
*/
template <typename T, typename LabelT, typename VecT, typename AccT,
int Log2Elements, SoftmaxMode mode, bool IgnoreIndex>
__global__ void WarpSoftmaxForward(T* loss, T* softmax, const T* src,
const LabelT* label, const int batch_size,
const int stride, const int element_count,
const int ignore_index) {
constexpr int kDimCeil = 1 << Log2Elements;
constexpr int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32;
constexpr int kVSize = sizeof(VecT) / sizeof(T);
constexpr int kIterations = kDimCeil / kWarpSize;
constexpr int kIterationsV =
(kIterations >= kVSize) ? (kIterations / kVSize) : 1;
constexpr int kBatchSize = (kDimCeil <= 128) ? 2 : 1;
int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * kBatchSize;
// max index to read
int idx_max_v[kBatchSize];
#pragma unroll
for (int i = 0; i < kBatchSize; i++) {
int idx_max = ((i + first_batch) < batch_size) ? element_count : 0;
idx_max_v[i] = idx_max / kVSize;
}
// read data from global memory
AccT srcdata[kBatchSize][kIterationsV][kVSize];
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
// read data to srcdata: - KVSize==1, - KVSize>1
#pragma unroll
for (int it = 0; it < kIterationsV; ++it) {
int src_idx = threadIdx.x + it * kWarpSize;
if (kVSize == 1) {
if (src_idx < idx_max_v[i]) {
srcdata[i][it][0] =
static_cast<AccT>(src[(first_batch + i) * stride + src_idx]);
} else {
srcdata[i][it][0] = -std::numeric_limits<AccT>::infinity();
}
} else {
const VecT* src_v =
reinterpret_cast<const VecT*>(&src[(first_batch + i) * stride]);
if (src_idx < idx_max_v[i]) {
VecT srctmp = src_v[src_idx];
const T* srcinptr = reinterpret_cast<const T*>(&srctmp);
#pragma unroll
for (int s = 0; s < kVSize; s++) {
srcdata[i][it][s] = static_cast<AccT>(srcinptr[s]);
}
} else {
#pragma unroll
for (int s = 0; s < kVSize; s++) {
srcdata[i][it][s] = -std::numeric_limits<AccT>::infinity();
}
}
}
}
}
// compute max value: maxvalue_{i} = max_j src_{i,j}
AccT max_value[kBatchSize];
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
// it = 0
AccT valmax = srcdata[i][0][0];
#pragma unroll
for (int s = 1; s < kVSize; ++s) {
valmax = (valmax > srcdata[i][0][s]) ? valmax : srcdata[i][0][s];
}
max_value[i] = valmax;
// it = 1, 2, ...
#pragma unroll
for (int it = 1; it < kIterationsV; ++it) {
AccT valmax = srcdata[i][it][0];
#pragma unroll
for (int s = 1; s < kVSize; ++s) {
valmax = (valmax > srcdata[i][it][s]) ? valmax : srcdata[i][it][s];
}
max_value[i] = (max_value[i] > valmax) ? max_value[i] : valmax;
}
}
phi::WarpReduceMax<AccT, kBatchSize, kWarpSize>(max_value);
// compute sum: s_{i} = sum_{j}{ exp(src_{i,j} - maxvalue_{i} }
AccT sum[kBatchSize];
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
// it = 0
if (mode == SoftmaxMode::kLogSoftmax ||
mode == SoftmaxMode::kCrossEntropy) {
sum[i] = std::exp(srcdata[i][0][0] - max_value[i]);
} else {
srcdata[i][0][0] = std::exp(srcdata[i][0][0] - max_value[i]);
sum[i] = srcdata[i][0][0];
}
#pragma unroll
for (int s = 1; s < kVSize; ++s) {
if (mode == SoftmaxMode::kLogSoftmax ||
mode == SoftmaxMode::kCrossEntropy) {
sum[i] += std::exp(srcdata[i][0][s] - max_value[i]);
} else {
srcdata[i][0][s] = std::exp(srcdata[i][0][s] - max_value[i]);
sum[i] += srcdata[i][0][s];
}
}
// it = 1, 2, ...
#pragma unroll
for (int it = 1; it < kIterationsV; ++it) {
#pragma unroll
for (int s = 0; s < kVSize; ++s) {
if (mode == SoftmaxMode::kLogSoftmax ||
mode == SoftmaxMode::kCrossEntropy) {
sum[i] += std::exp(srcdata[i][it][s] - max_value[i]);
} else {
srcdata[i][it][s] = std::exp(srcdata[i][it][s] - max_value[i]);
sum[i] += srcdata[i][it][s];
}
}
}
}
phi::WarpReduceSum<AccT, kBatchSize, kWarpSize>(sum);
// write data
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
if (mode == SoftmaxMode::kLogSoftmax ||
mode == SoftmaxMode::kCrossEntropy) {
sum[i] = std::log(sum[i]);
}
#pragma unroll
for (int it = 0; it < kIterationsV; ++it) {
int idx = threadIdx.x + it * kWarpSize;
if (kVSize == 1) { // kVSize==1
if (idx < idx_max_v[i]) {
if (mode == SoftmaxMode::kLogSoftmax) { // log softmax
softmax[(first_batch + i) * stride + idx] =
srcdata[i][it][0] - max_value[i] - sum[i];
// softmax with cross entropy hard label
} else if (mode == SoftmaxMode::kCrossEntropy) {
AccT logsoftmax = srcdata[i][it][0] - max_value[i] - sum[i];
// softmax
softmax[(first_batch + i) * stride + idx] = std::exp(logsoftmax);
// label
int loss_idx = (threadIdx.x + it * kWarpSize) * kVSize;
auto lbl = static_cast<int64_t>(label[first_batch + i]);
if (IgnoreIndex == true) {
// IgnoreIndex is true
if (lbl == loss_idx) {
if (lbl != ignore_index) {
loss[first_batch + i] = -logsoftmax;
} else {
loss[first_batch + i] = static_cast<T>(0.0);
}
}
} else {
// IgnoreIndex is false
if (lbl >= 0 && lbl < element_count) {
if (lbl == loss_idx) {
loss[first_batch + i] = -logsoftmax;
}
} else {
loss[first_batch + i] = static_cast<T>(0.0);
}
}
} else { // softmax
softmax[(first_batch + i) * stride + idx] =
srcdata[i][it][0] / sum[i];
}
} else {
break;
}
} else { // KVSize>1
VecT* softmax_v =
reinterpret_cast<VecT*>(&softmax[(first_batch + i) * stride]);
VecT tmpdata;
T* tmpptr = reinterpret_cast<T*>(&tmpdata);
#pragma unroll
for (int s = 0; s < kVSize; ++s) {
if (mode == SoftmaxMode::kLogSoftmax) { // log softmax
tmpptr[s] = srcdata[i][it][s] - max_value[i] - sum[i];
// softmax with cross entropy hard label
} else if (mode == SoftmaxMode::kCrossEntropy) {
AccT logsoftmax = srcdata[i][it][s] - max_value[i] - sum[i];
// softmax
tmpptr[s] = std::exp(logsoftmax);
// label
int loss_idx = (threadIdx.x + it * kWarpSize) * kVSize + s;
auto lbl = static_cast<int64_t>(label[first_batch + i]);
if (IgnoreIndex == true) {
// IgnoreIndex is true
if (lbl == loss_idx && lbl != ignore_index) {
loss[first_batch + i] = -logsoftmax;
}
} else {
// IgnoreIndex is false
if (lbl >= 0 && lbl < element_count) {
if (lbl == loss_idx) {
loss[first_batch + i] = -logsoftmax;
}
} else {
loss[first_batch + i] = static_cast<T>(0.0);
}
}
} else { // softmax
tmpptr[s] = srcdata[i][it][s] / sum[i];
}
}
if (idx < idx_max_v[i]) {
softmax_v[idx] = tmpdata;
} else {
break;
}
}
}
}
}
#define SOFTMAX_WARP_FORWARD_CASE(Log2Elements, LabelT, VecT, AccT) \
case Log2Elements: \
WarpSoftmaxForward<T, LabelT, VecT, AccT, Log2Elements, mode, \
IgnoreIndex><<<blocks, threads, 0, stream>>>( \
loss, softmax, src, label, batch_size, stride, element_count, \
ignore_index); \
break;
/*
Wrapper of softmax with cross entropy forward hard label.
*/
template <typename T, typename LabelT, SoftmaxMode mode, bool IgnoreIndex>
void SwitchWarpSoftmaxForward(T* loss, T* softmax, const T* src,
const LabelT* label, const int batch_size,
const int stride, const int element_count,
const int ignore_index, gpuStream_t stream) {
using AccT = typename details::MPTypeTrait<T>::Type;
// use 128 threads per block to maximimize gpu utilization
const int log2_elements = static_cast<int>(Log2Ceil(element_count));
const int kDimCeil = 1 << log2_elements;
int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32;
int batches_per_warp = (kDimCeil <= 128) ? 2 : 1;
constexpr int threads_per_block = 128;
int warps_per_block = (threads_per_block / kWarpSize);
int batches_per_block = warps_per_block * batches_per_warp;
int blocks = (batch_size + batches_per_block - 1) / batches_per_block;
dim3 threads(kWarpSize, warps_per_block, 1);
switch (log2_elements) {
SOFTMAX_WARP_FORWARD_CASE(0, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(1, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(2, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(3, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(4, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(5, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(6, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(7, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(8, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(9, LabelT, T, AccT);
default:
break;
}
}
template <typename T, bool IgnoreIndex>
__device__ __forceinline__ void ComputeLoss(T* loss, const T loss_value,
const int label_id,
const int64_t label_value,
const int tid, const int vec_size,
const int offset,
const int ignore_index) {
int loss_id = vec_size * tid + offset;
if (IgnoreIndex) {
if (label_value == loss_id) {
if (label_value == ignore_index) {
loss[label_id] = static_cast<T>(0.0f);
} else {
loss[label_id] = loss_value;
}
}
} else {
if (label_value == loss_id) {
loss[label_id] = loss_value;
}
}
}
template <typename T, typename AccT, int VecSize, class ReduceFunctor>
__device__ __forceinline__ AccT ThreadReduce(const T* input, int size,
const int offset, AccT init,
ReduceFunctor reducer) {
using VecT = kps::details::VectorType<T, VecSize>;
int tid = threadIdx.x;
AccT val = init;
if (offset > 0) {
input -= offset;
size += offset;
if (tid >= offset) {
val = reducer(val, input[tid]);
}
size -= blockDim.x;
input += blockDim.x;
}
int remain = size % (VecSize * blockDim.x);
T ins[VecSize];
VecT* ins_vec = reinterpret_cast<VecT*>(&ins);
// vector part
for (; VecSize * tid < (size - remain); tid += blockDim.x) {
*ins_vec = reinterpret_cast<const VecT*>(input)[tid];
#pragma unroll
for (int i = 0; i < VecSize; ++i) {
val = reducer(val, ins[i]);
}
}
// scalar part
tid = size - remain + threadIdx.x;
for (; tid < size; tid += blockDim.x) {
val = reducer(val, input[tid]);
}
return val;
}
template <typename T, typename AccT, typename LabelT, int VecSize,
bool IgnoreIndex>
__device__ __forceinline__ void VectorizedSoftmaxForwardImpl(
T* loss, T* softmax, const T* logits, const LabelT* label, int size,
const int offset, const phi::LogSoftmaxForwardFunctor<AccT>& func,
const int ignore_index) {
using VecT = kps::details::VectorType<T, VecSize>;
int tid = threadIdx.x;
int label_id = blockIdx.x;
auto label_value = static_cast<int64_t>(label[label_id]);
const bool label_valid = label_value >= 0 && label_value < size;
int loss_id_offset = 0;
if (offset > 0) {
logits -= offset;
softmax -= offset;
size += offset;
loss_id_offset -= offset;
if (tid >= offset) {
AccT log_softmax = func(static_cast<AccT>(logits[tid]));
softmax[tid] = static_cast<T>(std::exp(log_softmax));
// loss
if (label_valid) {
ComputeLoss<T, IgnoreIndex>(loss, static_cast<T>(-log_softmax),
label_id, label_value, tid, 1,
loss_id_offset, ignore_index);
}
}
size -= blockDim.x;
logits += blockDim.x;
softmax += blockDim.x;
loss_id_offset += blockDim.x;
}
int remain = size % (VecSize * blockDim.x);
T ins[VecSize];
T outs[VecSize];
VecT* ins_vec = reinterpret_cast<VecT*>(&ins);
VecT* outs_vec = reinterpret_cast<VecT*>(&outs);
// vector part
for (; VecSize * tid < (size - remain); tid += blockDim.x) {
// read
*ins_vec = reinterpret_cast<const VecT*>(logits)[tid];
#pragma unroll
// compute
for (int i = 0; i < VecSize; ++i) {
AccT log_softmax = func(static_cast<AccT>(ins[i]));
outs[i] = static_cast<T>(std::exp(log_softmax));
// loss
if (label_valid) {
ComputeLoss<T, IgnoreIndex>(loss, static_cast<T>(-log_softmax),
label_id, label_value, tid, VecSize,
loss_id_offset + i, ignore_index);
}
}
// write
reinterpret_cast<VecT*>(softmax)[tid] = *outs_vec;
}
// scalar part
tid = size - remain + threadIdx.x;
for (; tid < size; tid += blockDim.x) {
AccT log_softmax = func(static_cast<AccT>(logits[tid]));
softmax[tid] = static_cast<T>(std::exp(log_softmax));
// loss
if (label_valid) {
ComputeLoss<T, IgnoreIndex>(loss, static_cast<T>(-log_softmax), label_id,
label_value, tid, 1, loss_id_offset,
ignore_index);
}
}
// invalid label, write once
if (!label_valid && threadIdx.x == 0) {
loss[label_id] = static_cast<T>(0.0f);
}
}
template <typename T, typename AccT, typename LabelT, int VecSize,
bool IgnoreIndex>
__device__ __forceinline__ void ScalarSoftmaxForwardImpl(
T* loss, T* softmax, const T* logits, const LabelT* label, const int size,
const phi::LogSoftmaxForwardFunctor<AccT>& func, const int ignore_index) {
int tid = threadIdx.x;
int remain = size % (VecSize * blockDim.x);
int label_id = blockIdx.x;
auto label_value = static_cast<int64_t>(label[label_id]);
const bool label_valid = label_value >= 0 && label_value < size;
// main part
for (; tid < (size - remain); tid += VecSize * blockDim.x) {
T ins[VecSize];
#pragma unroll
for (int i = 0; i < VecSize; ++i) {
ins[i] = logits[tid + i * blockDim.x];
}
#pragma unroll
for (int i = 0; i < VecSize; ++i) {
AccT log_softmax = func(static_cast<AccT>(ins[i]));
softmax[tid + i * blockDim.x] = static_cast<T>(std::exp(log_softmax));
// loss
if (label_valid) {
ComputeLoss<T, IgnoreIndex>(loss, static_cast<T>(-log_softmax),
label_id, label_value, tid, VecSize, i,
ignore_index);
}
}
}
// tail part
for (; tid < size; tid += blockDim.x) {
AccT log_softmax = func(static_cast<AccT>(logits[tid]));
softmax[tid] = static_cast<T>(std::exp(log_softmax));
// loss
if (label_valid) {
ComputeLoss<T, IgnoreIndex>(loss, static_cast<T>(-log_softmax), label_id,
label_value, tid, 1, 0, ignore_index);
}
}
// invalid label, write once
if (!label_valid && threadIdx.x == 0) {
loss[label_id] = static_cast<T>(0.0f);
}
}
template <typename T, typename AccT, typename LabelT, int VecSize,
bool IgnoreIndex>
__global__ void VectorizedSoftmaxForward(T* loss, T* softmax, const T* logits,
const LabelT* label,
const int high_dim, const int mid_dim,
const int ignore_index) {
using VecT = kps::details::VectorType<T, VecSize>;
// each block deal with one batch
logits += blockIdx.x * mid_dim;
softmax += blockIdx.x * mid_dim;
const int input_offset = ((uint64_t)logits) % ALIGN_BYTES / sizeof(T);
const int output_offset = ((uint64_t)softmax) % ALIGN_BYTES / sizeof(T);
// 1. reduce max
AccT max = ThreadReduce<T, AccT, VecSize, kps::MaxFunctor<AccT>>(
logits, mid_dim, input_offset, -std::numeric_limits<AccT>::infinity(),
kps::MaxFunctor<AccT>());
max = kps::details::BlockXReduce<AccT, kps::MaxFunctor<AccT>>(
max, kps::MaxFunctor<AccT>());
// 2. reduce sum
AccT sum = ThreadReduce<T, AccT, VecSize, ExpAddFunctor<AccT>>(
logits, mid_dim, input_offset, static_cast<AccT>(0),
ExpAddFunctor<AccT>(max));
sum = kps::details::BlockXReduce<AccT, kps::AddFunctor<AccT>>(
sum, kps::AddFunctor<AccT>());
// 3. softmax
phi::LogSoftmaxForwardFunctor<AccT> func(max, sum);
if (input_offset == output_offset) {
VectorizedSoftmaxForwardImpl<T, AccT, LabelT, VecSize, IgnoreIndex>(
loss, softmax, logits, label, mid_dim, input_offset, func,
ignore_index);
} else {
ScalarSoftmaxForwardImpl<T, AccT, LabelT, VecSize, IgnoreIndex>(
loss, softmax, logits, label, mid_dim, func, ignore_index);
}
}
template <typename T, typename LabelT, bool IgnoreIndex>
void LaunchVectorizedSoftmaxForward(T* loss, T* softmax, const T* logits,
const LabelT* label, const int high_dim,
const int mid_dim, const int ignore_index,
gpuStream_t stream) {
using AccT = typename details::MPTypeTrait<T>::Type;
constexpr int vec_size = sizeof(float4) / sizeof(T);
const int max_num_threads = 1024;
int max_block_size = std::min(mid_dim / vec_size, max_num_threads);
if (vec_size > 1) {
max_block_size /= 2;
}
int block_size = 1;
while (block_size < max_block_size) {
block_size *= 2;
}
block_size = std::max(block_size, kps::details::kWarpSize);
dim3 grids(high_dim);
dim3 blocks(block_size);
VectorizedSoftmaxForward<T, AccT, LabelT, vec_size,
IgnoreIndex><<<grids, blocks, 0, stream>>>(
loss, softmax, logits, label, high_dim, mid_dim, ignore_index);
}
/*
Wrapper of softmax with cross entropy hard label.
- SwitchWarpSoftmaxForward for small size when axis == -1
- LaunchVectorizedSoftmaxForward for large size when axis == -1
- cudnn function for axis != -1
*/
template <typename T, typename LabelT, bool IgnoreIndex>
static void SoftmaxWithCrossEntropyHardLabel(
const platform::CUDADeviceContext& ctx, int rank, int axis,
const T* logits_data, const LabelT* labels_data, T* loss_data,
T* softmax_data, int N, int dim, int D, const int ignore_index) {
auto stream = ctx.stream();
constexpr int max_dim = 320;
if (D == 1) {
if (dim <= max_dim) { // small size
const SoftmaxMode mode = SoftmaxMode::kCrossEntropy;
SwitchWarpSoftmaxForward<T, LabelT, mode, IgnoreIndex>(
loss_data, softmax_data, logits_data, labels_data, N, dim, dim,
ignore_index, stream);
} else { // large size
LaunchVectorizedSoftmaxForward<T, LabelT, IgnoreIndex>(
loss_data, softmax_data, logits_data, labels_data, N, dim,
ignore_index, stream);
}
} else {
ScopedTensorDescriptor desc;
std::vector<int> tensor_dims = {N, dim, D, 1};
DataLayout layout = DataLayout::kNCHW;
#ifdef PADDLE_WITH_HIP
miopenTensorDescriptor_t descp = desc.descriptor<T>(layout, tensor_dims);
#else
cudnnTensorDescriptor_t descp = desc.descriptor<T>(layout, tensor_dims);
#endif
auto handle = ctx.cudnn_handle();
#ifdef PADDLE_WITH_HIP
auto mode = axis == rank - 1 ? MIOPEN_SOFTMAX_MODE_INSTANCE
: MIOPEN_SOFTMAX_MODE_CHANNEL;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSoftmaxForward_V2(
handle, platform::CudnnDataType<T>::kOne(), descp, logits_data,
platform::CudnnDataType<T>::kZero(), descp, softmax_data,
MIOPEN_SOFTMAX_LOG, mode));
#else
auto mode = axis == rank - 1 ? CUDNN_SOFTMAX_MODE_INSTANCE
: CUDNN_SOFTMAX_MODE_CHANNEL;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSoftmaxForward(
handle, CUDNN_SOFTMAX_LOG, mode, platform::CudnnDataType<T>::kOne(),
descp, logits_data, platform::CudnnDataType<T>::kZero(), descp,
softmax_data));
#endif
int threads = 128;
int blocks = (N * dim * D + threads - 1) / threads;
// compute cross entropy, input is log softmax
CrossEntropyExpHardLabel<T, LabelT,
IgnoreIndex><<<blocks, threads, 0, stream>>>(
loss_data, softmax_data, labels_data, N, dim, D, ignore_index);
}
}
/*
Wrapper of softmax with cross entropy grad hard label.
*/
template <typename T, typename LabelT>
__global__ void SoftmaxWithCrossEntropyGradHardLabel(
T* logits_grad, const T* loss_grad, const T* softmax, const LabelT* labels,
const int64_t n, const int64_t dim, const int64_t d,
const int ignore_index) {
int64_t idx = blockIdx.x * blockDim.x + threadIdx.x;
int64_t idx_n = idx / (d * dim);
int64_t idx_dim = (idx / d) % dim;
int64_t idx_d = idx % d;
int64_t ids = idx_n * d + idx_d;
if (idx < n * dim * d) {
auto lbl = static_cast<int64_t>(labels[ids]);
if (lbl == ignore_index) {
logits_grad[idx] = static_cast<T>(0.0);
} else if (lbl == idx_dim) {
logits_grad[idx] = (softmax[idx] - static_cast<T>(1.0)) * loss_grad[ids];
} else {
logits_grad[idx] = softmax[idx] * loss_grad[ids];
}
}
}
/*
Cross entropy soft label with dynamic size on axis (log2_elements is
varibale).
- if the input is softmax,compute loss with softmax
- if the input is log_softmax, compute loss with log_softmax and update
softmax
*/
template <typename T, typename VecT, bool InLogMode = false>
__global__ void CrossEntropySoftLabel(T* loss, T* softmaxwrt, const T* softmax,
const T* labels, const int n,
const int dim, const int d,
int log2_elements) {
const int kDimCeil = 1 << log2_elements;
const int kVSize = sizeof(VecT) / sizeof(T);
#ifdef __HIPCC__
const int kThreadPerBlock = 256;
#else
const int kThreadPerBlock = 512;
#endif
const int kBatchPerBlock = 1;
const int kWarpSize = 32; // (dim < 32) ? dim : 32;
const int kBatchSize = 1;
const int kThreadPerBatch = kThreadPerBlock / kBatchPerBlock;
const int kWarpPerBatch = kThreadPerBatch / kWarpSize;
const int kIterations = (dim + kThreadPerBatch - 1) / kThreadPerBatch;
const int kIterationsV = (kIterations >= kVSize) ? (kIterations / kVSize) : 1;
const int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * kBatchSize;
T sum[kBatchSize]{static_cast<T>(0.0)};
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
int ids = first_batch + i;
if (ids >= n * d) break;
int idx_n = ids / d;
int idx_d = ids % d;
#pragma unroll
for (int it = 0; it < kIterations; ++it) {
int idx_dim = it * kThreadPerBatch + threadIdx.x;
int idx = idx_n * dim * d + idx_dim * d + idx_d;
if (idx_n < n && idx_dim < dim) {
VecT softmaxdata;
if (InLogMode) {
softmaxdata = reinterpret_cast<VecT*>(&softmaxwrt[idx])[0];
} else {
softmaxdata = reinterpret_cast<const VecT*>(&softmax[idx])[0];
}
VecT labelsdata = reinterpret_cast<const VecT*>(&labels[idx])[0];
T* softmaxptr = reinterpret_cast<T*>(&softmaxdata);
T* labelsptr = reinterpret_cast<T*>(&labelsdata);
#pragma unroll
for (int s = 0; s < kVSize; s++) {
if (InLogMode) {
sum[i] -= softmaxptr[s] * labelsptr[s];
softmaxptr[s] = Exp(softmaxptr[s]);
} else {
sum[i] -= Log(softmaxptr[s]) * labelsptr[s];
}
}
if (InLogMode) {
reinterpret_cast<VecT*>(&softmaxwrt[idx])[0] = softmaxdata;
}
}
}
}
phi::WarpReduceSum<T, kBatchSize, kWarpSize>(sum);
__syncthreads();
__shared__ T sumshare[kWarpPerBatch][kBatchPerBlock][kBatchSize];
if (threadIdx.x % kWarpSize == 0) {
#pragma unroll
for (int i = 0; i < kBatchSize; i++) {
sumshare[threadIdx.x / kWarpSize][threadIdx.y][i] = sum[i];
}
}
__syncthreads();
// write
if (threadIdx.x == 0) {
for (int i = 0; i < kBatchSize; i++) {
int ids = first_batch + i;
if (ids < n * d) {
loss[ids] = sumshare[0][threadIdx.y][i];
for (int s = 1; s < kWarpPerBatch; s++) {
loss[ids] += sumshare[s][threadIdx.y][i];
}
}
}
}
}
/*
Core function of softmax with cross entropy forward soft label.
The computation includes
- Compute maximum of batch: maxvalue_{i} = max_j src_{i,j}
- Compute sum of exp batch: s_{i} = sum_{j}{ exp(src_{i,j} - maxvalue_{i} }
- Compute: sum of - sum_{j}{ label_{i,j} * (src_{i,j} - maxvalue_{i} -
log(sum[i]))}
One warp (32 threads) is used to compute 1 or 2 batch (kBatchSize).
For reduction max (sum), firstly compute max (sum) to one warp, then use shuffle
api to compute max (sum) in one warp.
*/
template <typename T, typename VecT, typename AccT, int Log2Elements>
__global__ void WarpSoftmaxForwardSoftLabel(T* loss, T* softmax, const T* src,
const T* label,
const int batch_size,
const int stride,
const int element_count) {
const bool LogMode = true;
constexpr int kDimCeil = 1 << Log2Elements;
constexpr int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32;
constexpr int kVSize = sizeof(VecT) / sizeof(T);
constexpr int kIterations = kDimCeil / kWarpSize;
constexpr int kIterationsV =
(kIterations >= kVSize) ? (kIterations / kVSize) : 1;
constexpr int kBatchSize = (kDimCeil <= 128) ? 2 : 1;
int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * kBatchSize;
int local_batches = batch_size - first_batch;
if (local_batches > kBatchSize) {
local_batches = kBatchSize;
}
// read data from global memory
VecT srcdata[kBatchSize][kIterationsV];
VecT labeldata[kBatchSize][kIterationsV];
for (int i = 0; i < kBatchSize; ++i) {
const VecT* src_v =
reinterpret_cast<const VecT*>(&src[(first_batch + i) * stride]);
const VecT* label_v =
reinterpret_cast<const VecT*>(&label[(first_batch + i) * stride]);
// max index to read
int idx_max = (i < local_batches) ? element_count : 0;
int idx_max_v = idx_max / kVSize;
// read data
for (int it = 0; it < kIterationsV; ++it) {
int src_idx = threadIdx.x + it * kWarpSize;
if (src_idx < idx_max_v) {
srcdata[i][it] = src_v[src_idx];
labeldata[i][it] = label_v[src_idx];
} else {
#pragma unroll
for (int s = 0; s < kVSize; s++) {
reinterpret_cast<T*>(&srcdata[i][it])[s] =
-std::numeric_limits<AccT>::max();
reinterpret_cast<T*>(&labeldata[i][it])[s] = 0.0;
}
}
}
}
// compute max value
AccT max_value[kBatchSize];
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
max_value[i] = -std::numeric_limits<AccT>::infinity();
#pragma unroll
for (int it = 0; it < kIterationsV; ++it) {
T* srcptr_v = reinterpret_cast<T*>(&srcdata[i][it]);
T valmax = srcptr_v[0];
#pragma unroll
for (int s = 1; s < kVSize; ++s) {
valmax = (valmax > srcptr_v[s]) ? valmax : srcptr_v[s];
}
max_value[i] = (max_value[i] > static_cast<AccT>(valmax))
? max_value[i]
: static_cast<AccT>(valmax);
}
}
phi::WarpReduceMax<AccT, kBatchSize, kWarpSize>(max_value);
// compute sum
AccT sum[kBatchSize]{0.0};
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
#pragma unroll
for (int it = 0; it < kIterationsV; ++it) {
T* srcptr_v = reinterpret_cast<T*>(&srcdata[i][it]);
#pragma unroll
for (int s = 0; s < kVSize; ++s) {
if (LogMode) {
sum[i] += std::exp(static_cast<AccT>(srcptr_v[s]) - max_value[i]);
} else {
srcptr_v[s] = std::exp(static_cast<AccT>(srcptr_v[s]) - max_value[i]);
sum[i] += static_cast<AccT>(srcptr_v[s]);
}
}
}
}
phi::WarpReduceSum<AccT, kBatchSize, kWarpSize>(sum);
// log_softmax and loss
AccT sumloss[kBatchSize]{0.0};
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
if (i >= local_batches) break;
VecT* softmax_v =
reinterpret_cast<VecT*>(&softmax[(first_batch + i) * stride]);
// max index to write
int idx_max = (i < local_batches) ? element_count : 0;
int idx_max_v = idx_max / kVSize;
if (LogMode) {
sum[i] = std::log(sum[i]);
}
#pragma unroll
for (int it = 0; it < kIterationsV; ++it) {
T* srcvp = reinterpret_cast<T*>(&srcdata[i][it]);
T* labelvp = reinterpret_cast<T*>(&labeldata[i][it]);
VecT tmpv;
T* tmpvp = reinterpret_cast<T*>(&tmpv);
#pragma unroll
for (int s = 0; s < kVSize; ++s) {
if (LogMode) {
AccT logsoftmax = static_cast<AccT>(srcvp[s]) - max_value[i] - sum[i];
sumloss[i] -= logsoftmax * static_cast<AccT>(labelvp[s]);
tmpvp[s] = std::exp(logsoftmax);
} else {
tmpvp[s] = static_cast<AccT>(srcvp[s]) / sum[i];
}
}
int idx = threadIdx.x + it * kWarpSize;
if (idx < idx_max_v) {
softmax_v[idx] = tmpv;
}
}
}
// loss
phi::WarpReduceSum<AccT, kBatchSize, kWarpSize>(sumloss);
for (int i = 0; i < kBatchSize; i++) {
if (i >= local_batches) break;
loss[first_batch + i] = sumloss[i];
}
}
#define SOFTMAX_WARP_FORWARD_SOFT_CASE(Log2Elements, VecT, AccT) \
case Log2Elements: \
WarpSoftmaxForwardSoftLabel<T, VecT, AccT, \
Log2Elements><<<blocks, threads, 0, stream>>>( \
loss, softmax, src, label, batch_size, stride, element_count); \
break;
/*
Wrapper of softmax with cross entropy forward soft label.
*/
template <typename T>
void SwitchWarpSoftmaxForwardSoftLabel(const int blocks, const dim3 threads,
gpuStream_t stream, T* loss, T* softmax,
const T* src, const T* label,
const int batch_size, const int stride,
const int element_count,
const int log2_elements) {
using AccT = typename details::MPTypeTrait<T>::Type;
switch (log2_elements) {
SOFTMAX_WARP_FORWARD_SOFT_CASE(0, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(1, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(2, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(3, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(4, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(5, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(6, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(7, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(8, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(9, T, AccT);
default:
break;
}
}
template <typename T>
static void SoftmaxWithCrossEntropySoftLabel(
const platform::CUDADeviceContext& ctx, const int rank, const int axis,
const T* logits_data, const T* labels_data, T* softmax_data, T* loss_data,
int N, int dim, int D) {
#ifdef __HIPCC__
constexpr int kMaxBlockDim = 256;
#else
constexpr int kMaxBlockDim = 512;
#endif
int64_t block_dim = dim >= kMaxBlockDim
? kMaxBlockDim
: (1 << static_cast<int>(std::log2(dim)));
int64_t grid_dim = N * D;
constexpr int max_dim = 320;
const int kDimLog2 = static_cast<int>(Log2Ceil(dim));
const int kDimCeil = 1 << kDimLog2;
auto stream = ctx.stream();
if (D == 1 && dim <= max_dim) {
int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32;
int batches_per_warp = (kDimCeil <= 128) ? 2 : 1;
// use 128 threads per block to maximimize gpu utilization
constexpr int threads_per_block = 128;
int warps_per_block = (threads_per_block / kWarpSize);
int batches_per_block = warps_per_block * batches_per_warp;
int blocks = (N + batches_per_block - 1) / batches_per_block;
dim3 threads(kWarpSize, warps_per_block, 1);
SwitchWarpSoftmaxForwardSoftLabel<T>(blocks, threads, stream, loss_data,
softmax_data, logits_data, labels_data,
N, dim, dim, kDimLog2);
} else {
ScopedTensorDescriptor desc;
std::vector<int> tensor_dims = {N, dim, D, 1};
DataLayout layout = DataLayout::kNCHW;
#ifdef PADDLE_WITH_HIP
miopenTensorDescriptor_t descp = desc.descriptor<T>(layout, tensor_dims);
#else
cudnnTensorDescriptor_t descp = desc.descriptor<T>(layout, tensor_dims);
#endif
auto handle = ctx.cudnn_handle();
#ifdef PADDLE_WITH_HIP
auto mode = axis == rank - 1 ? MIOPEN_SOFTMAX_MODE_INSTANCE
: MIOPEN_SOFTMAX_MODE_CHANNEL;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSoftmaxForward_V2(
handle, platform::CudnnDataType<T>::kOne(), descp, logits_data,
platform::CudnnDataType<T>::kZero(), descp, softmax_data,
MIOPEN_SOFTMAX_LOG, mode));
#else
auto mode = axis == rank - 1 ? CUDNN_SOFTMAX_MODE_INSTANCE
: CUDNN_SOFTMAX_MODE_CHANNEL;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSoftmaxForward(
handle, CUDNN_SOFTMAX_LOG, mode, platform::CudnnDataType<T>::kOne(),
descp, logits_data, platform::CudnnDataType<T>::kZero(), descp,
softmax_data));
#endif
const int kDimLog2 = static_cast<int>(Log2Ceil(dim));
const int kDimCeil = 1 << kDimLog2;
#ifdef __HIPCC__
int kThreadPerBlock = 256;
#else
int kThreadPerBlock = 512;
#endif
int kBatchPerBlock = 1;
int blocks = (N * D + kBatchPerBlock - 1) / kBatchPerBlock;
dim3 threads(kThreadPerBlock / kBatchPerBlock, kBatchPerBlock, 1);
CrossEntropySoftLabel<T, T, true><<<blocks, threads, 0, stream>>>(
loss_data, softmax_data, NULL, labels_data, N, dim, D, kDimLog2);
}
}
template <typename T>
__global__ void SoftCrossEntropyGradientKernel(T* logit_grad,
const T* loss_grad,
const T* labels, const int64_t n,
const int64_t d,
const int64_t remain) {
int64_t ids = blockIdx.x * blockDim.x + threadIdx.x;
if (ids < n * d) {
int64_t idx_n = ids / d;
int64_t idx_remain = ids % remain;
int64_t idx_loss = idx_n * remain + idx_remain;
logit_grad[ids] = loss_grad[idx_loss] * (logit_grad[ids] - labels[ids]);
}
}
template <typename T>
__global__ void SoftLabelCrossEntropyGradientKernel(T* logit_grad,
const T* loss_grad,
const T* labels,
const int n, const int d,
const int remain) {
int ids = blockIdx.x * blockDim.x + threadIdx.x;
if (ids < n * d) {
int idx_n = ids / d;
int idx_remain = ids % remain;
int idx_loss = idx_n * remain + idx_remain;
logit_grad[ids] = loss_grad[idx_loss] * (-labels[ids] / logit_grad[ids]);
}
}
template <typename T, typename LabelT>
__global__ void HardLabelCrossEntropyGradientKernel(T* logit_grad,
const LabelT* labels,
const int n, const int d,
const int remain,
const int ignore_index) {
CUDA_KERNEL_LOOP(index, n * remain) {
int idx_n = index / remain;
int idx_remain = index % remain;
int tmp = static_cast<int>(labels[index]);
int idx = idx_n * d + tmp * remain + idx_remain;
if (ignore_index != tmp) {
logit_grad[idx] = -static_cast<T>(1.) / logit_grad[idx];
}
}
}
template <typename T, typename LabelT>
__global__ void ScaleCrossEntropyGradient(T* logit_grad, const T* loss_grad,
const int num, const int d,
const int remain,
const LabelT* labels,
const int ignore_index) {
CUDA_KERNEL_LOOP(index, num) {
int idx_n = index / d;
int idx_remain = index % remain;
int idx_lbl = idx_n * remain + idx_remain;
int k = (index % d) / remain;
auto lbl = static_cast<int64_t>(labels[idx_lbl]);
if (lbl == ignore_index || lbl != k) {
logit_grad[index] = static_cast<T>(0.);
} else {
logit_grad[index] *= loss_grad[idx_lbl];
}
}
}
template <typename T>
class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
RunSoftmaxWithCrossEntropyFunctor<T>(context, *this);
}
template <typename LabelT>
static void Apply(const framework::ExecutionContext& context,
const framework::Tensor& labels, const bool soft_label) {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(context.GetPlace()), true,
platform::errors::Unavailable("softmax_with_cross_entropy operator's "
"CUDA kernel only runs on GPU device."));
const bool use_softmax = context.Attr<bool>("use_softmax");
// do not with softmax op, and input is softmax
if (!use_softmax) {
const Tensor* softmax = context.Input<Tensor>("Logits");
Tensor* softmax_out = context.Output<Tensor>("Softmax");
Tensor* loss = context.Output<Tensor>("Loss");
const int rank = softmax->dims().size();
const int axis =
phi::funcs::CanonicalAxis(context.Attr<int>("axis"), rank);
const int axis_dim = softmax->dims()[axis];
const int n = phi::funcs::SizeToAxis(axis, softmax->dims());
const int d = phi::funcs::SizeFromAxis(axis, softmax->dims());
auto* softmax_out_data =
softmax_out->template mutable_data<T>(context.GetPlace());
auto* loss_data = loss->template mutable_data<T>(context.GetPlace());
phi::funcs::SetConstant<platform::CUDADeviceContext, T> set_constant;
set_constant(context.cuda_device_context(), loss, static_cast<T>(0));
if (axis_dim == 1) {
set_constant(context.cuda_device_context(), softmax_out,
static_cast<T>(1));
return;
}
auto ignore_index = context.Attr<int>("ignore_index");
Tensor softmax_2d, labels_2d, loss_2d, softmax_out_2d;
softmax_2d.ShareDataWith(*softmax).Resize({n, d});
labels_2d.ShareDataWith(labels).Resize({n, labels.numel() / n});
loss_2d.ShareDataWith(*loss).Resize({n, 1});
softmax_out_2d.ShareDataWith(*softmax_out).Resize({n, d});
// math::CrossEntropyFunctor support axis is the last
if (axis == -1) {
math::CrossEntropyFunctor<platform::CUDADeviceContext, T>()(
context.cuda_device_context(), &loss_2d, &softmax_2d, &labels_2d,
soft_label, ignore_index, axis_dim);
return;
}
// if axis is not the last, we need a new impliment
if (soft_label) {
auto* logits_data = softmax->template data<T>();
auto* labels_data = labels.template data<T>();
const int kDimLog2 = static_cast<int>(Log2Ceil(axis_dim));
const int kDimCeil = 1 << kDimLog2;
#ifdef __HIPCC__
int kThreadPerBlock = 256;
#else
int kThreadPerBlock = 512;
#endif
int kBatchPerBlock = 1;
int blocks = (n * d + kBatchPerBlock - 1) / kBatchPerBlock;
dim3 threads(kThreadPerBlock / kBatchPerBlock, kBatchPerBlock, 1);
CrossEntropySoftLabel<T, T, false><<<
blocks, threads, 0, context.cuda_device_context().stream()>>>(
loss_data, NULL, logits_data, labels_data, n, axis_dim,
d / axis_dim, kDimLog2);
} else { // HardLabel
auto* logits_data = softmax->template data<T>();
auto* labels_data = labels.template data<LabelT>();
int threads = 128;
int blocks = (n * d / axis_dim + threads - 1) / threads;
if (ignore_index >= 0 && ignore_index < axis_dim) {
CrossEntropyHardLabel<T, LabelT, true><<<
blocks, threads, 0, context.cuda_device_context().stream()>>>(
loss_data, logits_data, labels_data, n, axis_dim, d / axis_dim,
ignore_index);
} else {
CrossEntropyHardLabel<T, LabelT, false><<<
blocks, threads, 0, context.cuda_device_context().stream()>>>(
loss_data, logits_data, labels_data, n, axis_dim, d / axis_dim,
ignore_index);
}
}
// cause of input is softmax
// copy to output softmax, directly
framework::TensorCopy(*softmax, context.GetPlace(),
context.device_context(), softmax_out);
return;
}
const Tensor* logits = context.Input<Tensor>("Logits");
Tensor* softmax = context.Output<Tensor>("Softmax");
Tensor* loss = context.Output<Tensor>("Loss");
const int rank = logits->dims().size();
const int axis = phi::funcs::CanonicalAxis(context.Attr<int>("axis"), rank);
int axis_dim = logits->dims()[axis];
const int64_t n = phi::funcs::SizeToAxis(axis, logits->dims());
const int64_t d = phi::funcs::SizeFromAxis(axis, logits->dims());
auto* softmax_data = softmax->template mutable_data<T>(context.GetPlace());
auto* loss_data = loss->template mutable_data<T>(context.GetPlace());
if (axis_dim == 1) {
phi::funcs::SetConstant<platform::CUDADeviceContext, T> set_constant;
set_constant(context.cuda_device_context(), softmax, static_cast<T>(1));
set_constant(context.cuda_device_context(), loss, static_cast<T>(0));
return;
}
auto ignore_index = context.Attr<int>("ignore_index");
if (soft_label) {
auto* logits_data = logits->template data<T>();
auto* labels_data = labels.template data<T>();
SoftmaxWithCrossEntropySoftLabel<T>(
context.cuda_device_context(), rank, axis, logits_data, labels_data,
softmax_data, loss_data, n, axis_dim, d / axis_dim);
} else {
if (!context.Attr<bool>("numeric_stable_mode")) {
// CUDNN kernel only suppoer 2-D tensor and perfome softmax on last dim
Tensor logits_2d, softmax_2d, labels_2d, loss_2d;
logits_2d.ShareDataWith(*logits).Resize({n, d});
softmax_2d.ShareDataWith(*softmax).Resize({n, d});
labels_2d.ShareDataWith(labels).Resize({n, labels.numel() / n});
loss_2d.ShareDataWith(*loss).Resize({n, 1});
math::SoftmaxCUDNNFunctor<T>()(context.cuda_device_context(),
&logits_2d, &softmax_2d);
math::CrossEntropyFunctor<platform::CUDADeviceContext, T>()(
context.cuda_device_context(), &loss_2d, &softmax_2d, &labels_2d,
false, ignore_index, axis_dim);
} else {
auto* logits_data = logits->template data<T>();
auto* labels_data = labels.template data<LabelT>();
if (ignore_index >= 0 && ignore_index < axis_dim) {
SoftmaxWithCrossEntropyHardLabel<T, LabelT, true>(
context.cuda_device_context(), rank, axis, logits_data,
labels_data, loss_data, softmax_data, n, axis_dim, d / axis_dim,
ignore_index);
} else {
SoftmaxWithCrossEntropyHardLabel<T, LabelT, false>(
context.cuda_device_context(), rank, axis, logits_data,
labels_data, loss_data, softmax_data, n, axis_dim, d / axis_dim,
ignore_index);
}
}
}
}
};
template <typename T>
class SoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
RunSoftmaxWithCrossEntropyFunctor<T>(context, *this);
}
template <typename LabelT>
static void Apply(const framework::ExecutionContext& context,
const framework::Tensor& labels, const bool soft_label) {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(context.GetPlace()), true,
platform::errors::Unavailable("softmax_with_cross_entropy operator's "
"CUDA kernel only runs on GPU device."));
const T* loss_grad_data =
context.Input<Tensor>(framework::GradVarName("Loss"))
->template data<T>();
Tensor* logit_grad =
context.Output<Tensor>(framework::GradVarName("Logits"));
const Tensor* softmax = context.Input<Tensor>("Softmax");
auto stream = context.cuda_device_context().stream();
auto ignore_index = context.Attr<int>("ignore_index");
auto use_softmax = context.Attr<bool>("use_softmax");
T* logit_grad_data = nullptr;
bool copy_flag = (logit_grad != softmax && (!use_softmax || soft_label));
if (copy_flag) {
framework::TensorCopy(*softmax, context.GetPlace(),
context.device_context(), logit_grad);
logit_grad_data = logit_grad->template data<T>();
} else {
logit_grad_data =
logit_grad->template mutable_data<T>(context.GetPlace());
}
const int rank = logit_grad->dims().size();
const int axis = phi::funcs::CanonicalAxis(context.Attr<int>("axis"), rank);
int axis_dim = logit_grad->dims()[axis];
const int64_t n = phi::funcs::SizeToAxis(axis, logit_grad->dims());
const int64_t d = phi::funcs::SizeFromAxis(axis, logit_grad->dims());
const int64_t remain = d / axis_dim;
#ifdef __HIPCC__
int block = 256;
#else
int block = 512;
#endif
// do not with softmax op, and input is softmax
if (!use_softmax) {
if (soft_label) {
int grid = (n * d + block - 1) / block;
const T* label_data = labels.template data<T>();
SoftLabelCrossEntropyGradientKernel<T><<<grid, block, 0, stream>>>(
logit_grad_data, loss_grad_data, label_data, n, d, remain);
} else {
Tensor logits_grad_2d;
logits_grad_2d.ShareDataWith(*logit_grad).Resize({n, d});
int grid = (n * remain + block - 1) / block;
const auto* label_data = labels.template data<LabelT>();
HardLabelCrossEntropyGradientKernel<T,
LabelT><<<grid, block, 0, stream>>>(
logit_grad_data, label_data, n, d, remain, ignore_index);
int num = n * d;
grid = (num + block - 1) / block;
ScaleCrossEntropyGradient<T, LabelT><<<grid, block, 0, stream>>>(
logit_grad_data, loss_grad_data, num, d, remain, label_data,
ignore_index);
}
return;
}
// with softmax, continue
if (soft_label) {
int64_t grid = (n * d + block - 1) / block;
const T* label_data = labels.template data<T>();
SoftCrossEntropyGradientKernel<T><<<grid, block, 0, stream>>>(
logit_grad_data, loss_grad_data, label_data, n, d, remain);
} else {
const T* softmax_data = softmax->template data<T>();
const auto* label_data = labels.template data<LabelT>();
int grid = (n * d + block - 1) / block;
SoftmaxWithCrossEntropyGradHardLabel<T><<<grid, block, 0, stream>>>(
logit_grad_data, loss_grad_data, softmax_data, label_data, n,
d / remain, remain, ignore_index);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
#ifdef PADDLE_WITH_HIP
// MIOPEN do not support double
REGISTER_OP_CUDA_KERNEL(
softmax_with_cross_entropy, ops::SoftmaxWithCrossEntropyCUDAKernel<float>,
ops::SoftmaxWithCrossEntropyCUDAKernel<paddle::platform::float16>);
REGISTER_OP_CUDA_KERNEL(
softmax_with_cross_entropy_grad,
ops::SoftmaxWithCrossEntropyGradCUDAKernel<float>,
ops::SoftmaxWithCrossEntropyGradCUDAKernel<paddle::platform::float16>);
#else
REGISTER_OP_CUDA_KERNEL(
softmax_with_cross_entropy, ops::SoftmaxWithCrossEntropyCUDAKernel<float>,
ops::SoftmaxWithCrossEntropyCUDAKernel<paddle::platform::float16>,
ops::SoftmaxWithCrossEntropyCUDAKernel<double>);
REGISTER_OP_CUDA_KERNEL(
softmax_with_cross_entropy_grad,
ops::SoftmaxWithCrossEntropyGradCUDAKernel<float>,
ops::SoftmaxWithCrossEntropyGradCUDAKernel<paddle::platform::float16>,
ops::SoftmaxWithCrossEntropyGradCUDAKernel<double>);
#endif
|
4204cd2082cc8118226b516b974e2dfa6b3025bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#include <hip/hip_complex.h>
#include <getopt.h>
#include <cmath>
#include <sys/time.h>
#include "offset.h"
#include "bitmap.h"
#include "png_writer.h"
/* Given "value" and "max", the maximum value which we expect "value"
to take, this returns an integer between 0 and 255 proportional to
"value" divided by "max". */
__device__
int rgb_value(int value, int max) {
return (int)(256 * (value / (double)max));
}
__device__
hipDoubleComplex cuCexp(hipDoubleComplex z) {
double e = exp(cuCreal(z));
double s, c;
sincos(cuCimag(z), &s, &c);
return make_cuDoubleComplex(c * e, s * e);
}
__device__
int iteration(hipDoubleComplex c, int limit = 1000) {
int i = 0;
double n;
hipDoubleComplex z = make_cuDoubleComplex(0, 0);
while ((n = cuCreal(cuCmul(cuConj(z), z))) < 4 && i < limit) {
// z = cuCadd(cuCmul(z, z), c);
// z = cuCsub(cuCexp(z), c);
// z = c * exp(-z) + z * z;
z = cuCadd(cuCmul(c, cuCexp(cuCsub(make_cuDoubleComplex(0, 0), z))), cuCmul(z, z));
++i;
}
// if (n < 4)
// return -1;
// else
return i;
}
__global__
void calc(Offset offset, Bitmap bitmap, bool quiet) {
if (!quiet)
printf("Thread-%d:%d on block %d:%d started.\n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y);
double lowerX = offset.lowerX, upperX = offset.upperX;
double lowerY = offset.lowerY, upperY = offset.upperY;
int xStride = gridDim.x * blockDim.x;
int yStride = gridDim.y * blockDim.y;
hipDoubleComplex c;
int iter;
Pixel* pixel;
size_t width = bitmap.width;
size_t height = bitmap.height;
for (size_t y = blockIdx.y * blockDim.y + threadIdx.y; y < height; y += yStride) {
for (size_t x = blockIdx.x * blockDim.x + threadIdx.x; x < width; x += xStride) {
c = make_cuDoubleComplex(lowerX + (upperX - lowerX) * x / (width - 1),
lowerY + (upperY - lowerY) * y / (height - 1));
iter = iteration(c);
pixel = bitmap.pixels + width * y + x;
pixel->red = rgb_value(1000 - iter, 1000);
pixel->green = rgb_value(500 - iter, 1000);
pixel->blue = rgb_value(200 - iter, 1000);
}
}
if (!quiet)
printf("Thread-%d:%d on block %d:%d finished.\n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y);
}
void threadConfigCalc(int threadsX, int threadsY, int width, int height, int &threadsPerBlock1dim1, int &threadsPerBlock1dim2, int &numBlocksX, int &numBlocksY, bool quiet = false) {
// double sqroot = sqrt(maxThreads);
// threadsPerBlock1dim = exp2(fmin(floor(log2(sqroot)), 4));
// numBlocksY = floor(sqroot / threadsPerBlock1dim);
// numBlocksX = floor((double)maxThreads / (threadsPerBlock1dim * threadsPerBlock1dim * numBlocksY));
threadsPerBlock1dim1 = threadsX;
threadsPerBlock1dim2 = threadsY;
numBlocksX = (width + threadsPerBlock1dim1 - 1) / threadsPerBlock1dim1;
numBlocksY = (height + threadsPerBlock1dim2 - 1) / threadsPerBlock1dim2;
// if (!quiet)
printf("Threads used in current run: %d, threadsPerBlock: %dx%d, numBlocksXxnumBlocksY: %dx%d\n",
threadsPerBlock1dim1 * threadsPerBlock1dim2 * numBlocksX * numBlocksY, threadsPerBlock1dim1, threadsPerBlock1dim2, numBlocksX, numBlocksY);
}
double cpuSecondMonolitic() {
struct timespec tp;
clock_gettime(CLOCK_MONOTONIC, &tp);
return ((double)tp.tv_sec + (double)tp.tv_nsec*1.e-9);
}
void generateImage(int width, int height, Offset offset, int threadsX, int threadsY, const char* filename, bool quiet) {
Bitmap bitmap(width, height);
size_t pixelsCount = bitmap.width * bitmap.height;
size_t pixelsSize = pixelsCount * sizeof(Pixel);
hipMalloc(&bitmap.pixels, pixelsSize);
int threadsPerBlock1dim1, threadsPerBlock1dim2, numBlocksX, numBlocksY;
threadConfigCalc(threadsX, threadsY, bitmap.width, bitmap.height, threadsPerBlock1dim1, threadsPerBlock1dim2, numBlocksX, numBlocksY, quiet);
dim3 threadsPerBlock(threadsPerBlock1dim1, threadsPerBlock1dim2);
dim3 numBlocks(numBlocksX, numBlocksY);
// TIMINGS
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipLaunchKernelGGL(( calc), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, offset, bitmap, quiet);
hipEventRecord(stop);
hipDeviceSynchronize();
Pixel* devicePixels = bitmap.pixels;
bitmap.pixels = new Pixel[pixelsCount];
hipMemcpy(bitmap.pixels, devicePixels, pixelsSize, hipMemcpyDeviceToHost);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
// if (!quiet)
printf("Execution time on gpu: %f\n", milliseconds / 1000);
writePNG(bitmap, filename);
hipFree(devicePixels);
delete[] bitmap.pixels;
hipError_t error = hipGetLastError();
if (!quiet)
printf("%s\n", hipGetErrorString(error));
}
int main(int argc, char** argv) {
double iStart = cpuSecondMonolitic();
int width = 640, height = 480;
double lowerX = -2, upperX = 2;
double lowerY = -2, upperY = 2;
int threadsX = 1, threadsY = 1;
char filename[100] = "fractal.png";
bool quiet = false;
char *svalue = NULL, *rvalue = NULL, *tvalue = NULL, *filenameArg = NULL;
int c;
static struct option long_options[] =
{
{"quiet", no_argument, 0, 'q'},
{"size", required_argument, 0, 's'},
{"rect", required_argument, 0, 'r'},
{"tasks", required_argument, 0, 't'},
{"output", required_argument, 0, 'o'},
{0, 0, 0, 0}
};
while ((c = getopt_long_only(argc, argv, "s:r:t:o:q", long_options, NULL)) != -1)
switch (c) {
case 's':
svalue = optarg;
break;
case 'r':
rvalue = optarg;
break;
case 't':
tvalue = optarg;
break;
case 'o':
filenameArg = optarg;
break;
case 'q':
quiet = true;
break;
case '?':
if (optopt == 's')
fprintf(stderr, "Option -%c requires an argument.\n", optopt);
else if (isprint(optopt))
fprintf(stderr, "Unknown option `-%c'.\n", optopt);
else
fprintf(stderr, "Unknown option character `\\x%x'.\n", optopt);
return 1;
default:
abort();
}
if (svalue != NULL)
sscanf(svalue, "%dx%d", &width, &height);
if (rvalue != NULL)
sscanf(rvalue, "%lf:%lf:%lf:%lf", &lowerX, &upperX, &lowerY, &upperY);
if (tvalue != NULL)
sscanf(tvalue, "%dx%d", &threadsX, &threadsY);
if (filenameArg != NULL)
sscanf(filenameArg, "%s", filename);
if (!quiet) {
printf("svalue = %s;%dx%d\nrvalue = %s; %lf, %lf, %lf, %lf\n", svalue, width, height, rvalue, lowerX, upperX, lowerY, upperY);
printf("tvalue = %s; %dx%d\n", tvalue, threadsX, threadsY);
printf("filenameArg = %s; %s\n", filenameArg, filename);
printf("quiet = %d\n", quiet);
}
//////////////////////////////////
Offset offset(lowerX, upperX, lowerY, upperY);
generateImage(width, height, offset, threadsX, threadsY, filename, quiet);
double iElaps = cpuSecondMonolitic() - iStart;
// if (!quiet)
printf("Total execution time for this run: %lf\n", iElaps);
return 0;
}
| 4204cd2082cc8118226b516b974e2dfa6b3025bb.cu | #include <stdio.h>
#include <iostream>
#include <cuComplex.h>
#include <getopt.h>
#include <cmath>
#include <sys/time.h>
#include "offset.h"
#include "bitmap.h"
#include "png_writer.h"
/* Given "value" and "max", the maximum value which we expect "value"
to take, this returns an integer between 0 and 255 proportional to
"value" divided by "max". */
__device__
int rgb_value(int value, int max) {
return (int)(256 * (value / (double)max));
}
__device__
cuDoubleComplex cuCexp(cuDoubleComplex z) {
double e = exp(cuCreal(z));
double s, c;
sincos(cuCimag(z), &s, &c);
return make_cuDoubleComplex(c * e, s * e);
}
__device__
int iteration(cuDoubleComplex c, int limit = 1000) {
int i = 0;
double n;
cuDoubleComplex z = make_cuDoubleComplex(0, 0);
while ((n = cuCreal(cuCmul(cuConj(z), z))) < 4 && i < limit) {
// z = cuCadd(cuCmul(z, z), c);
// z = cuCsub(cuCexp(z), c);
// z = c * exp(-z) + z * z;
z = cuCadd(cuCmul(c, cuCexp(cuCsub(make_cuDoubleComplex(0, 0), z))), cuCmul(z, z));
++i;
}
// if (n < 4)
// return -1;
// else
return i;
}
__global__
void calc(Offset offset, Bitmap bitmap, bool quiet) {
if (!quiet)
printf("Thread-%d:%d on block %d:%d started.\n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y);
double lowerX = offset.lowerX, upperX = offset.upperX;
double lowerY = offset.lowerY, upperY = offset.upperY;
int xStride = gridDim.x * blockDim.x;
int yStride = gridDim.y * blockDim.y;
cuDoubleComplex c;
int iter;
Pixel* pixel;
size_t width = bitmap.width;
size_t height = bitmap.height;
for (size_t y = blockIdx.y * blockDim.y + threadIdx.y; y < height; y += yStride) {
for (size_t x = blockIdx.x * blockDim.x + threadIdx.x; x < width; x += xStride) {
c = make_cuDoubleComplex(lowerX + (upperX - lowerX) * x / (width - 1),
lowerY + (upperY - lowerY) * y / (height - 1));
iter = iteration(c);
pixel = bitmap.pixels + width * y + x;
pixel->red = rgb_value(1000 - iter, 1000);
pixel->green = rgb_value(500 - iter, 1000);
pixel->blue = rgb_value(200 - iter, 1000);
}
}
if (!quiet)
printf("Thread-%d:%d on block %d:%d finished.\n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y);
}
void threadConfigCalc(int threadsX, int threadsY, int width, int height, int &threadsPerBlock1dim1, int &threadsPerBlock1dim2, int &numBlocksX, int &numBlocksY, bool quiet = false) {
// double sqroot = sqrt(maxThreads);
// threadsPerBlock1dim = exp2(fmin(floor(log2(sqroot)), 4));
// numBlocksY = floor(sqroot / threadsPerBlock1dim);
// numBlocksX = floor((double)maxThreads / (threadsPerBlock1dim * threadsPerBlock1dim * numBlocksY));
threadsPerBlock1dim1 = threadsX;
threadsPerBlock1dim2 = threadsY;
numBlocksX = (width + threadsPerBlock1dim1 - 1) / threadsPerBlock1dim1;
numBlocksY = (height + threadsPerBlock1dim2 - 1) / threadsPerBlock1dim2;
// if (!quiet)
printf("Threads used in current run: %d, threadsPerBlock: %dx%d, numBlocksXxnumBlocksY: %dx%d\n",
threadsPerBlock1dim1 * threadsPerBlock1dim2 * numBlocksX * numBlocksY, threadsPerBlock1dim1, threadsPerBlock1dim2, numBlocksX, numBlocksY);
}
double cpuSecondMonolitic() {
struct timespec tp;
clock_gettime(CLOCK_MONOTONIC, &tp);
return ((double)tp.tv_sec + (double)tp.tv_nsec*1.e-9);
}
void generateImage(int width, int height, Offset offset, int threadsX, int threadsY, const char* filename, bool quiet) {
Bitmap bitmap(width, height);
size_t pixelsCount = bitmap.width * bitmap.height;
size_t pixelsSize = pixelsCount * sizeof(Pixel);
cudaMalloc(&bitmap.pixels, pixelsSize);
int threadsPerBlock1dim1, threadsPerBlock1dim2, numBlocksX, numBlocksY;
threadConfigCalc(threadsX, threadsY, bitmap.width, bitmap.height, threadsPerBlock1dim1, threadsPerBlock1dim2, numBlocksX, numBlocksY, quiet);
dim3 threadsPerBlock(threadsPerBlock1dim1, threadsPerBlock1dim2);
dim3 numBlocks(numBlocksX, numBlocksY);
// TIMINGS
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
calc<<<numBlocks, threadsPerBlock>>>(offset, bitmap, quiet);
cudaEventRecord(stop);
cudaDeviceSynchronize();
Pixel* devicePixels = bitmap.pixels;
bitmap.pixels = new Pixel[pixelsCount];
cudaMemcpy(bitmap.pixels, devicePixels, pixelsSize, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
// if (!quiet)
printf("Execution time on gpu: %f\n", milliseconds / 1000);
writePNG(bitmap, filename);
cudaFree(devicePixels);
delete[] bitmap.pixels;
cudaError error = cudaGetLastError();
if (!quiet)
printf("%s\n", cudaGetErrorString(error));
}
int main(int argc, char** argv) {
double iStart = cpuSecondMonolitic();
int width = 640, height = 480;
double lowerX = -2, upperX = 2;
double lowerY = -2, upperY = 2;
int threadsX = 1, threadsY = 1;
char filename[100] = "fractal.png";
bool quiet = false;
char *svalue = NULL, *rvalue = NULL, *tvalue = NULL, *filenameArg = NULL;
int c;
static struct option long_options[] =
{
{"quiet", no_argument, 0, 'q'},
{"size", required_argument, 0, 's'},
{"rect", required_argument, 0, 'r'},
{"tasks", required_argument, 0, 't'},
{"output", required_argument, 0, 'o'},
{0, 0, 0, 0}
};
while ((c = getopt_long_only(argc, argv, "s:r:t:o:q", long_options, NULL)) != -1)
switch (c) {
case 's':
svalue = optarg;
break;
case 'r':
rvalue = optarg;
break;
case 't':
tvalue = optarg;
break;
case 'o':
filenameArg = optarg;
break;
case 'q':
quiet = true;
break;
case '?':
if (optopt == 's')
fprintf(stderr, "Option -%c requires an argument.\n", optopt);
else if (isprint(optopt))
fprintf(stderr, "Unknown option `-%c'.\n", optopt);
else
fprintf(stderr, "Unknown option character `\\x%x'.\n", optopt);
return 1;
default:
abort();
}
if (svalue != NULL)
sscanf(svalue, "%dx%d", &width, &height);
if (rvalue != NULL)
sscanf(rvalue, "%lf:%lf:%lf:%lf", &lowerX, &upperX, &lowerY, &upperY);
if (tvalue != NULL)
sscanf(tvalue, "%dx%d", &threadsX, &threadsY);
if (filenameArg != NULL)
sscanf(filenameArg, "%s", filename);
if (!quiet) {
printf("svalue = %s;%dx%d\nrvalue = %s; %lf, %lf, %lf, %lf\n", svalue, width, height, rvalue, lowerX, upperX, lowerY, upperY);
printf("tvalue = %s; %dx%d\n", tvalue, threadsX, threadsY);
printf("filenameArg = %s; %s\n", filenameArg, filename);
printf("quiet = %d\n", quiet);
}
//////////////////////////////////
Offset offset(lowerX, upperX, lowerY, upperY);
generateImage(width, height, offset, threadsX, threadsY, filename, quiet);
double iElaps = cpuSecondMonolitic() - iStart;
// if (!quiet)
printf("Total execution time for this run: %lf\n", iElaps);
return 0;
}
|
ff3c99d5f61603b27c2ac2fa823948aa7bcb022b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
% Function: generate_dmrs_pusch
% Description: Generates LTE demodulation reference signal for PUSCH
% Inputs: N_subfr - Subframe number within a radio frame
% N_id_cell - Physical layer cell identity
% delta_ss - Configurable portion of the sequence-shift pattern for PUSCH (sib2 groupAssignmentPUSCH)
% group_hopping_enabled - Boolean value determining if group hopping is enabled (sib2 groupHoppingEnabled)
% sequence_hopping_enabled - Boolean value determining if sequence hopping is enabled (sib2 sequenceHoppingEnabled)
% cyclic_shift - Broadcast cyclic shift to apply to base reference signal (sib2 cyclicShift)
% cyclic_shift_dci - Scheduled cyclic shift to apply to base reference signal
% w_config - fixed or table
% N_prbs - Number of PRBs used for the uplink grant
% layer - Which diversity layer to generate reference signals for
% Outputs: *dmrs1_h - Demodulation reference signal for PUSCH
*dmrs2_h - Demodulation reference signal for PUSCH
By: Mohammed Mostafa
*/
#include "generate_dmrs_pusch_hip.cuh"
#include "generate_ul_rs.cuh"
#include "generate_psuedo_random_seq.cuh"
__global__ void generate_reference_signal(hipfftComplex* dmrs2_d, int w_vector, int M_sc_rb) {
int x_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (x_idx >= M_sc_rb)
return;
dmrs2_d[x_idx] = w_vector * dmrs2_d[x_idx];
}
void generate_dmrs_pusch(int N_subfr, int N_id_cell, int delta_ss, bool group_hopping_enabled, bool sequence_hopping_enabled, int cyclic_shift, int cyclic_shift_dci, char* w_config, int N_prbs, int layer, hipfftComplex** dmrs1_h, hipfftComplex** dmrs2_h)
{
//Calculate M_sc_rb (called in generate_ul_rs M_sc_rs)
int M_sc_rb = N_prbs*N_sc_rb;
//Calculate N_s
int N_s = N_subfr * 2;
//Set lambda
int lambda = layer;
//Calculate f_ss_pusch
int f_ss_pusch = ((N_id_cell % 30) + delta_ss) % 30;
//Generate c
Byte* c = (Byte*)malloc(sizeof(Byte)* 8 * N_ul_symb * 20);
int c_init = floor(N_id_cell / 30) * 32 + f_ss_pusch;
generate_psuedo_random_seq(&c, 8 * N_ul_symb * 20, 0, 0, c_init); //added c_init in N_id_cell according to ahmed nour
//Calculate n_pn_ns
int n_pn_ns_1 = c[8 * N_ul_symb*N_s + 0] + c[8 * N_ul_symb*N_s + 1] * 2 + c[8 * N_ul_symb*N_s + 2] * 4 + c[8 * N_ul_symb*N_s + 3] * 8 + c[8 * N_ul_symb*N_s + 4] * 16 + c[8 * N_ul_symb*N_s + 5] * 32 + c[8 * N_ul_symb*N_s + 6] * 64 + c[8 * N_ul_symb*N_s + 7] * 128;
int n_pn_ns_2 = c[8 * N_ul_symb*(N_s + 1) + 0] + c[8 * N_ul_symb*(N_s + 1) + 1]*2 + c[8 * N_ul_symb*(N_s + 1) + 2]*4 + c[8 * N_ul_symb*(N_s + 1) + 3]*8 + c[8 * N_ul_symb*(N_s + 1) + 4]*16 + c[8 * N_ul_symb*(N_s + 1) + 5]*32 + c[8 * N_ul_symb*(N_s + 1) + 6]*64 + c[8 * N_ul_symb*(N_s + 1) + 7]*128;
//Determine n_1_dmrs
int n_1_dmrs = N_1_DMRS[cyclic_shift];
//Determine n_2_dmrs_lambda
int n_2_dmrs_lambda = N_2_DMRS_LAMBDA[cyclic_shift_dci][lambda];
//Calculate n_cs_lambda
int n_cs_lambda_1 = (n_1_dmrs + n_2_dmrs_lambda + n_pn_ns_1) % 12;
int n_cs_lambda_2 = (n_1_dmrs + n_2_dmrs_lambda + n_pn_ns_2) % 12;
//Calculate alpha_lambda
float alpha_lambda_1 = 2 * PI *n_cs_lambda_1 / (float)12;
float alpha_lambda_2 = 2 * PI *n_cs_lambda_2 / (float)12;
//Generate the base reference signal
*dmrs1_h = (hipfftComplex *)malloc(sizeof(hipfftComplex)*M_sc_rb);
*dmrs2_h = (hipfftComplex *)malloc(sizeof(hipfftComplex)*M_sc_rb);
generate_ul_rs(N_s, N_id_cell, "pusch", delta_ss, group_hopping_enabled, sequence_hopping_enabled, alpha_lambda_1, N_prbs, &*dmrs1_h);
generate_ul_rs(N_s+1, N_id_cell, "pusch", delta_ss, group_hopping_enabled, sequence_hopping_enabled, alpha_lambda_2, N_prbs, &*dmrs2_h);
//Determine w vector
int w_vector;
if (!strcmp(w_config, "fixed"))
{
w_vector = 1;
}
else
{
w_vector = W_VECTOR[cyclic_shift_dci*4 + lambda];
}
//Generate the PUSCH demodulation reference signal sequence
//For timing purpose
float elapsed = 0; //For time calc.
hipEvent_t start, stop;
//Device data
hipfftComplex* dmrs2_d;
//Device data allocation
startTimer();
hipMalloc((void **)&dmrs2_d, sizeof(hipfftComplex)*M_sc_rb);
stopTimer("hipMalloc Time= %.6f ms\n", elapsed);
//Copying data to device
startTimer();
hipMemcpy(dmrs2_d, *dmrs2_h, sizeof(hipfftComplex)*M_sc_rb, hipMemcpyHostToDevice);
stopTimer("hipMemcpy Host->Device Time= %.6f ms\n", elapsed);
generate_reference_signal << < 2, 1024 >> >(dmrs2_d, w_vector, M_sc_rb);
//Retrieve data from device
startTimer();
hipMemcpy(*dmrs2_h, dmrs2_d, sizeof(hipfftComplex)*M_sc_rb, hipMemcpyDeviceToHost);
stopTimer("hipMemcpy Device->Host Time= %.6f ms\n", elapsed);
// Cleanup
hipFree(dmrs2_d);
//Destroy timers
destroyTimers();
}
| ff3c99d5f61603b27c2ac2fa823948aa7bcb022b.cu | /*
% Function: generate_dmrs_pusch
% Description: Generates LTE demodulation reference signal for PUSCH
% Inputs: N_subfr - Subframe number within a radio frame
% N_id_cell - Physical layer cell identity
% delta_ss - Configurable portion of the sequence-shift pattern for PUSCH (sib2 groupAssignmentPUSCH)
% group_hopping_enabled - Boolean value determining if group hopping is enabled (sib2 groupHoppingEnabled)
% sequence_hopping_enabled - Boolean value determining if sequence hopping is enabled (sib2 sequenceHoppingEnabled)
% cyclic_shift - Broadcast cyclic shift to apply to base reference signal (sib2 cyclicShift)
% cyclic_shift_dci - Scheduled cyclic shift to apply to base reference signal
% w_config - fixed or table
% N_prbs - Number of PRBs used for the uplink grant
% layer - Which diversity layer to generate reference signals for
% Outputs: *dmrs1_h - Demodulation reference signal for PUSCH
*dmrs2_h - Demodulation reference signal for PUSCH
By: Mohammed Mostafa
*/
#include "generate_dmrs_pusch.cuh"
#include "generate_ul_rs.cuh"
#include "generate_psuedo_random_seq.cuh"
__global__ void generate_reference_signal(cufftComplex* dmrs2_d, int w_vector, int M_sc_rb) {
int x_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (x_idx >= M_sc_rb)
return;
dmrs2_d[x_idx] = w_vector * dmrs2_d[x_idx];
}
void generate_dmrs_pusch(int N_subfr, int N_id_cell, int delta_ss, bool group_hopping_enabled, bool sequence_hopping_enabled, int cyclic_shift, int cyclic_shift_dci, char* w_config, int N_prbs, int layer, cufftComplex** dmrs1_h, cufftComplex** dmrs2_h)
{
//Calculate M_sc_rb (called in generate_ul_rs M_sc_rs)
int M_sc_rb = N_prbs*N_sc_rb;
//Calculate N_s
int N_s = N_subfr * 2;
//Set lambda
int lambda = layer;
//Calculate f_ss_pusch
int f_ss_pusch = ((N_id_cell % 30) + delta_ss) % 30;
//Generate c
Byte* c = (Byte*)malloc(sizeof(Byte)* 8 * N_ul_symb * 20);
int c_init = floor(N_id_cell / 30) * 32 + f_ss_pusch;
generate_psuedo_random_seq(&c, 8 * N_ul_symb * 20, 0, 0, c_init); //added c_init in N_id_cell according to ahmed nour
//Calculate n_pn_ns
int n_pn_ns_1 = c[8 * N_ul_symb*N_s + 0] + c[8 * N_ul_symb*N_s + 1] * 2 + c[8 * N_ul_symb*N_s + 2] * 4 + c[8 * N_ul_symb*N_s + 3] * 8 + c[8 * N_ul_symb*N_s + 4] * 16 + c[8 * N_ul_symb*N_s + 5] * 32 + c[8 * N_ul_symb*N_s + 6] * 64 + c[8 * N_ul_symb*N_s + 7] * 128;
int n_pn_ns_2 = c[8 * N_ul_symb*(N_s + 1) + 0] + c[8 * N_ul_symb*(N_s + 1) + 1]*2 + c[8 * N_ul_symb*(N_s + 1) + 2]*4 + c[8 * N_ul_symb*(N_s + 1) + 3]*8 + c[8 * N_ul_symb*(N_s + 1) + 4]*16 + c[8 * N_ul_symb*(N_s + 1) + 5]*32 + c[8 * N_ul_symb*(N_s + 1) + 6]*64 + c[8 * N_ul_symb*(N_s + 1) + 7]*128;
//Determine n_1_dmrs
int n_1_dmrs = N_1_DMRS[cyclic_shift];
//Determine n_2_dmrs_lambda
int n_2_dmrs_lambda = N_2_DMRS_LAMBDA[cyclic_shift_dci][lambda];
//Calculate n_cs_lambda
int n_cs_lambda_1 = (n_1_dmrs + n_2_dmrs_lambda + n_pn_ns_1) % 12;
int n_cs_lambda_2 = (n_1_dmrs + n_2_dmrs_lambda + n_pn_ns_2) % 12;
//Calculate alpha_lambda
float alpha_lambda_1 = 2 * PI *n_cs_lambda_1 / (float)12;
float alpha_lambda_2 = 2 * PI *n_cs_lambda_2 / (float)12;
//Generate the base reference signal
*dmrs1_h = (cufftComplex *)malloc(sizeof(cufftComplex)*M_sc_rb);
*dmrs2_h = (cufftComplex *)malloc(sizeof(cufftComplex)*M_sc_rb);
generate_ul_rs(N_s, N_id_cell, "pusch", delta_ss, group_hopping_enabled, sequence_hopping_enabled, alpha_lambda_1, N_prbs, &*dmrs1_h);
generate_ul_rs(N_s+1, N_id_cell, "pusch", delta_ss, group_hopping_enabled, sequence_hopping_enabled, alpha_lambda_2, N_prbs, &*dmrs2_h);
//Determine w vector
int w_vector;
if (!strcmp(w_config, "fixed"))
{
w_vector = 1;
}
else
{
w_vector = W_VECTOR[cyclic_shift_dci*4 + lambda];
}
//Generate the PUSCH demodulation reference signal sequence
//For timing purpose
float elapsed = 0; //For time calc.
cudaEvent_t start, stop;
//Device data
cufftComplex* dmrs2_d;
//Device data allocation
startTimer();
cudaMalloc((void **)&dmrs2_d, sizeof(cufftComplex)*M_sc_rb);
stopTimer("cudaMalloc Time= %.6f ms\n", elapsed);
//Copying data to device
startTimer();
cudaMemcpy(dmrs2_d, *dmrs2_h, sizeof(cufftComplex)*M_sc_rb, cudaMemcpyHostToDevice);
stopTimer("cudaMemcpy Host->Device Time= %.6f ms\n", elapsed);
generate_reference_signal << < 2, 1024 >> >(dmrs2_d, w_vector, M_sc_rb);
//Retrieve data from device
startTimer();
cudaMemcpy(*dmrs2_h, dmrs2_d, sizeof(cufftComplex)*M_sc_rb, cudaMemcpyDeviceToHost);
stopTimer("cudaMemcpy Device->Host Time= %.6f ms\n", elapsed);
// Cleanup
cudaFree(dmrs2_d);
//Destroy timers
destroyTimers();
}
|
892cbceeacd910fb47cef933199a233abde38c23.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "layers/operator.h"
#include <string.h>
// --------------------------------------------------------------------------
// kernel code
// conv_winograd_cpu
// convert_bottom_{gpu, cpu}
// --------------------------------------------------------------------------
#ifndef GPU
static
void conv_winograd_cpu(const real bottom3d[],
const real weight4d[],
real temp_data[],
real top3d[],
const int top_C, const int bottom_C,
const int H, const int W)
{
const int H2 = DIV_THEN_CEIL(H, 2);
const int W2 = DIV_THEN_CEIL(W, 2);
real* const p_weight4x4 = temp_data;
real* const p_bottom4x4 = temp_data + top_C * bottom_C * 4 * 4;
real* const p_temp4x4 = p_bottom4x4 + bottom_C * H2 * W2 * 4 * 4;
real d[4][4];
real uv[16];
{
const int stride = top_C * bottom_C;
for (int k = 0; k < top_C; ++k) {
for (int c = 0; c < bottom_C; ++c) {
const real* const g = weight4d + (k * bottom_C + c) * 3 * 3;
real* const u = p_weight4x4 + k * bottom_C + c;
const real g_sum = (g[0] + g[1] + g[2] +
g[3] + g[4] + g[5] +
g[6] + g[7] + g[8]) / 4;
u[0 * stride] = g[0];
u[1 * stride] = (g[0] + g[1] + g[2]) / 2;
u[2 * stride] = (g[0] - g[1] + g[2]) / 2;
u[3 * stride] = g[2];
u[4 * stride] = (g[0] + g[3] + g[6]) / 2;
u[5 * stride] = g_sum;
u[6 * stride] = g_sum - (g[1] + g[4] + g[7]) / 2;
u[7 * stride] = (g[2] + g[5] + g[8]) / 2;
u[8 * stride] = (g[0] - g[3] + g[6]) / 2;
u[9 * stride] = g_sum - (g[3] + g[4] + g[5]) / 2;
u[10 * stride] = g_sum - (g[1] + g[3] + g[5] + g[7]) / 2;
u[11 * stride] = (g[2] - g[5] + g[8]) / 2;
u[12 * stride] = g[6];
u[13 * stride] = (g[6] + g[7] + g[8]) / 2;
u[14 * stride] = (g[6] - g[7] + g[8]) / 2;
u[15 * stride] = g[8];
} // endfor c
} // endfor k
}
{
const int stride = bottom_C * H2 * W2;
for (int c = 0; c < bottom_C; ++c) {
for (int h = 0; h < H; h += 2) {
for (int w = 0; w < W; w += 2) {
const real* const p_patch = bottom3d + (c * H + h - 1) * W + w - 1;
//real* const v = p_bottom4x4 + (h / 2 * W2 + w / 2) * bottom_C + c;
real* const v = p_bottom4x4 + (c * H2 + h / 2) * W2 + w / 2;
for (int j = 0; j < 4; ++j) {
for (int i = 0; i < 4; ++i) {
const int hh = h - 1 + j;
const int ww = w - 1 + i;
d[j][i] = (hh >= 0 && hh < H && ww >= 0 && ww < W) ?
p_patch[j * W + i] : 0;
}
}
v[0 * stride] = d[0][0] - d[0][2] - d[2][0] + d[2][2];
v[1 * stride] = d[0][1] + d[0][2] - d[2][1] - d[2][2];
v[2 * stride] = -d[0][1] + d[0][2] + d[2][1] - d[2][2];
v[3 * stride] = d[0][1] - d[0][3] - d[2][1] + d[2][3];
v[4 * stride] = d[1][0] - d[1][2] + d[2][0] - d[2][2];
v[5 * stride] = d[1][1] + d[1][2] + d[2][1] + d[2][2];
v[6 * stride] = -d[1][1] + d[1][2] - d[2][1] + d[2][2];
v[7 * stride] = d[1][1] - d[1][3] + d[2][1] - d[2][3];
v[8 * stride] = -d[1][0] + d[1][2] + d[2][0] - d[2][2];
v[9 * stride] = -d[1][1] - d[1][2] + d[2][1] + d[2][2];
v[10 * stride] = d[1][1] - d[1][2] - d[2][1] + d[2][2];
v[11 * stride] = -d[1][1] + d[1][3] + d[2][1] - d[2][3];
v[12 * stride] = d[1][0] - d[1][2] - d[3][0] + d[3][2];
v[13 * stride] = d[1][1] + d[1][2] - d[3][1] - d[3][2];
v[14 * stride] = -d[1][1] + d[1][2] + d[3][1] - d[3][2];
v[15 * stride] = d[1][1] - d[1][3] - d[3][1] + d[3][3];
}}} // endfor chw
}
{
const int top_area = H2 * W2;
for (int i = 0; i < 16; ++i) {
const real* const u = p_weight4x4 + i * top_C * bottom_C;
const real* const v = p_bottom4x4 + i * bottom_C * top_area;
real* const uv_ = p_temp4x4 + i * top_C * top_area;
cblas_sgemm(CblasRowMajor,
CblasNoTrans, CblasNoTrans,
top_C, top_area, bottom_C,
1,
u, bottom_C,
v, top_area,
0,
uv_, top_area);
}
}
{
const int stride = top_C * H2 * W2;
for (int k = 0; k < top_C; ++k) {
for (int h = 0; h < H; h += 2) {
for (int w = 0; w < W; w += 2) {
const real* const uv_ = p_temp4x4 + k * H2 * W2 + h / 2 * W2 + w / 2;
real* const y = top3d + (k * H + h) * W + w;
for (int i = 0; i < 16; ++i) {
uv[i] = uv_[i * stride];
}
y[0] = uv[0] + uv[1] + uv[2] +
uv[4] + uv[5] + uv[6] +
uv[8] + uv[9] + uv[10];
if (w + 1 < W) {
y[1] = uv[1] - uv[2] - uv[3] +
uv[5] - uv[6] - uv[7] +
uv[9] - uv[10] - uv[11];
}
if (h + 1 < H) {
y[W] = uv[4] + uv[5] + uv[6]
- uv[8] - uv[9] - uv[10]
- uv[12] - uv[13] - uv[14];
if (w + 1 < W) {
y[W + 1] = uv[5] - uv[6] - uv[7]
- uv[9] + uv[10] + uv[11]
- uv[13] + uv[14] + uv[15];
}
}
}}} // endfor khw
}
}
#endif
// convert bottom3d (C x H x W)
// -> bottom5d (C x kernel_h x kernel_w x H5 x W5)
// given (c, h5, w5),
// bottom5d[c][kh][kw][h5][w5] = bottom3d[c][h][w]
// h = (-pad_h + stride_h * h5) + kh, kh = { 0, 1, ..., kernel_h - 1 }
// w = (-pad_w + stride_w * w5) + kw, kw = { 0, 1, ..., kernel_w - 1 }
// if !(0 <= h < H) or !(0 <= w < W), assign 0
#ifdef GPU
__global__
static
void convert_bottom_gpu(const real bottom3d[],
real bottom5d[],
const int C, const int H, const int W,
const int H5, const int W5,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w)
{
// thread index: (c, h5, w5) = c*H5*W5 + h5*W5 + w5
const int index = blockIdx.x * blockDim.x + threadIdx.x;
const int H5W5 = H5 * W5;
if (index < C * H5W5) {
// parse thread index -> (c, h5, w5)
const int c = index / H5W5;
const int h5 = (index / W5) % H5;
const int w5 = index % W5;
// p_bottom5d initially points to bottom5d[c][kh = 0][kw = 0][h5][w5]
real* p_bottom5d = bottom5d + index +
(c * H5W5) * (kernel_h * kernel_w - 1);
// (h_start, w_start): upper-left corner of bottom3d's kernel patch
const int h_start = h5 * stride_h - pad_h;
const int w_start = w5 * stride_w - pad_w;
const real* p_bottom3d = bottom3d + (c * H + h_start) * W + w_start;
// bottom5d[c][kh][kw][h5][w5] = bottom3d[c][h][w]
// h = h_start + kh, kh = {0, 1, ..., kernel_h - 1}
// w = w_start + kw, kw = {0, 1, ..., kernel_w - 1}
// if (h, w) is in a zero-padded region, assign 0
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
const int h = h_start + kh;
const int w = w_start + kw;
p_bottom5d[(kh * kernel_w + kw) * H5W5] =
(h >= 0 && h < H && w >= 0 && w < W) ? p_bottom3d[kh * W + kw] : 0;
}
}
}
}
#else
static
void convert_bottom_cpu(const real bottom3d[],
real bottom5d[],
const int C, const int H, const int W,
const int H5, const int W5,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w)
{
for (int c = 0; c < C; ++c) {
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
// pointer to bottom5d[c][kh][kw][h5 = 0][w5 = 0]
real* const p_bottom5d = bottom5d +
((c * kernel_h + kh) * kernel_w + kw) * H5 * W5;
int h = -pad_h + kh;
int h5 = 0;
// for h < 0 (zero-padded region): bottom5d[c][kh][kw][h5][:] = 0
for (; h < 0; h += stride_h, ++h5) {
for (int w5 = 0; w5 < W5; ++w5) {
p_bottom5d[h5 * W5 + w5] = 0;
}
}
// for 0 <= h < H (data region)
for (; h < H && h5 < H5; h += stride_h, ++h5) {
// pointer to bottom3d[c][h][w = 0]
int w = -pad_w + kw;
int w5 = 0;
// for w < 0 (zero-padded region): bottom5d[c][kh][kw][h5][w5] = 0
for (; w < 0; w += stride_w, ++w5) {
p_bottom5d[h5 * W5 + w5] = 0;
}
// for 0 <= w < W (data region):
// bottom5d[c][kh][kw][h5][w5] = bottom3d[c][h][w]
for (; w < W && w5 < W5; w += stride_w, ++w5) {
p_bottom5d[h5 * W5 + w5] = bottom3d[(c * H + h) * W + w];
}
// for w >= W (zero-padded region): bottom5d[c][kh][kw][h5][w5] = 0
for (; w5 < W5; ++w5) {
p_bottom5d[h5 * W5 + w5] = 0;
}
}
// for h >= H (zero-padded region): bottom5d[c][kh][kw][h5][:] = 0
for (; h5 < H5; ++h5) {
for (int w5 = 0; w5 < W5; ++w5) {
p_bottom5d[h5 * W5 + w5] = 0;
}
}
} // endfor kw
} // endfor kh
} // endfor c
}
#endif
// --------------------------------------------------------------------------
// layer-wise operator code
// --------------------------------------------------------------------------
// convolution: bottom -> top
// G: number of groups
// bottom: (G * C) x H x W
// top: (G * C') x H' x W'
// weight: G x C' x C x kernel_h x kernel_w
// bias: (G * C') x 1
// temp: (G * C * kernel_h * kernel_w) x (H' * W') array
// const: 1 x (H' * W') array, const[i] = 1 for all i
static
void conv_forward(const Tensor* const bottom3d,
Tensor* const top3d,
const Tensor* const weight5d,
const Tensor* const bias1d,
real temp_data[],
const real const_data[],
const LayerOption* const option)
{
// weight shape: G x C' x C x kernel_h x kernel_w
const int G = weight5d->shape[0][0];
const int top_C = weight5d->shape[0][1]; // C'
const int bottom_C = weight5d->shape[0][2]; // C
const int kernel_h = weight5d->shape[0][3];
const int kernel_w = weight5d->shape[0][4];
// padding size & stride size
const int pad_h = option->pad_h;
const int pad_w = option->pad_w;
const int stride_h = option->stride_h;
const int stride_w = option->stride_w;
// do forward-pass for each item in the batch
const real* p_bottom_item = bottom3d->data;
real* p_top_item = top3d->data;
real* p_temp_data = temp_data;
for (int n = 0; n < bottom3d->num_items; ++n) {
// bottom shape: (G * C) x H x W
const int bottom_H = bottom3d->shape[n][1]; // H
const int bottom_W = bottom3d->shape[n][2]; // W
// set top shape: (G * C') x H' x W'
// H' = 1 + (H + 2*pad_h - kernel_h) / stride_h
// W' = 1 + (W + 2*pad_w - kernel_w) / stride_w
const int top_H = 1 + (bottom_H + 2 * pad_h - kernel_h) / stride_h;
const int top_W = 1 + (bottom_W + 2 * pad_w - kernel_w) / stride_w;
top3d->shape[n][0] = G * top_C;
top3d->shape[n][1] = top_H;
top3d->shape[n][2] = top_W;
#ifndef GPU
if (top_C >= 48 &&
kernel_h == 3 && kernel_w == 3 && stride_h == 1 && stride_w == 1)
{
conv_winograd_cpu(
p_bottom_item, weight5d->data, temp_data, p_top_item,
top_C, bottom_C, bottom_H, bottom_W);
#ifdef DEBUG
printf("%s -> %s: Winograd conv\n", bottom3d->name, top3d->name);
#endif
}
else {
#endif
// convert bottom shape
// (G * C) x H x W -> (G * C * kernel_h * kernel_w) x (H' * W')
if (kernel_h != 1 || kernel_w != 1 ||
bottom_H != top_H || bottom_W != top_W)
{
#ifdef GPU
// one thread computes "kernel_h * kernel_w" entries in top
const int num_threads = G * bottom_C * top_H * top_W;
const int threads_per_block = 512;
const int num_blocks = DIV_THEN_CEIL(num_threads, threads_per_block);
hipLaunchKernelGGL(( convert_bottom_gpu), dim3(num_blocks), dim3(threads_per_block), 0, 0,
p_bottom_item, p_temp_data,
G * bottom_C, bottom_H, bottom_W,
top_H, top_W,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w);
#else
convert_bottom_cpu(
p_bottom_item, p_temp_data,
G * bottom_C, bottom_H, bottom_W,
top_H, top_W,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w);
#endif
}
else {
// if 1x1 convolution, skip convert_bottom
p_temp_data = (real*)p_bottom_item;
}
// compute top[g] = dot(weight[g], bottom[g])
// weight[g]: C' x (C * kernel_h * kernel_w)
// bottom[g]: (C * kernel_h * kernel_w) x (H' * W')
// top[g]: C' x H' x W'
for (int g = 0; g < G; ++g) {
const int kernel_size = bottom_C * kernel_h * kernel_w;
const int top_area = top_H * top_W;
const real* const p_temp_g = p_temp_data +
g * kernel_size * top_area;
const real* const p_weight_g = weight5d->data +
g * top_C * kernel_size;
real* const p_top_g = p_top_item + g * top_C * top_area;
// compute Z = alpha * dot(X, Y) + beta * Z
// X (= weight): m x p, Y (= bottom): p x n, Z (= top): m x n
// X, Y, Z: row-major order (e.g., Z[i][j] = Z[i * n + j])
#ifdef GPU
// input arguments:
// cublas handle,
// do_transpose_Y (= false), do_transpose_X (= false),
// n (= H' * W'), m (= C'), p (= C * kernel_h * kernel_w),
// &alpha (= 1),
// &Y, number of columns in Y (= n),
// &X, number of columns in X (= p),
// &beta (= 0),
// &Z, number of columns in Z (= n)
const real one = 1.0f, zero = 0.0f;
hipblasSgemm(*((hipblasHandle_t*)option->handle),
HIPBLAS_OP_N, HIPBLAS_OP_N,
top_area, top_C, kernel_size,
&one,
p_temp_g, top_area,
p_weight_g, kernel_size,
&zero,
p_top_g, top_area);
#else
// input arguments:
// is_row_major_order (= true),
// do_transpose_X (= false), do_transpose_Y (= false),
// m (= C'), n (= H' * W'), p (= C * kernel_h * kernel_w),
// alpha (= 1),
// &X, number of columns in X (= p),
// &Y, number of columns in Y (= n),
// beta (= 0),
// &Z, number of columns in Z (= n)
cblas_sgemm(CblasRowMajor,
CblasNoTrans, CblasNoTrans,
top_C, top_area, kernel_size,
1,
p_weight_g, kernel_size,
p_temp_g, top_area,
0,
p_top_g, top_area);
#endif
} // endfor g
#ifndef GPU
}
#endif
// compute top[i][j] = top[i][j] + bias[i]
// top: (G * C') x (H' * W')
// bias: (G * C') x 1
if (option->bias) {
const int top_channels = G * top_C;
const int top_area = top_H * top_W;
// the computation is equivalent to...
// top = top + dot(bias, constant)
// constant: 1 x (H' * W'), constant[i] = 1 for all i
#ifdef GPU
// thus, input arguments:
// do_transpose_Y (= false), do_transpose_X (= false),
// n = H' * W', m = G * C', p = 1
// alpha = 1, beta = 1
const real one = 1.0f;
hipblasSgemm(*((hipblasHandle_t*)option->handle),
HIPBLAS_OP_N, HIPBLAS_OP_N,
top_area, top_channels, 1,
&one,
const_data, top_area,
bias1d->data, 1,
&one,
p_top_item, top_area);
#else
// input arguments:
// do_transpose_X (= false), do_transpose_Y (= false),
// m = G * C', n = H' * W', p = 1
// alpha = 1, beta = 1
cblas_sger(CblasRowMajor,
top_channels, top_area,
1,
bias1d->data, 1,
const_data, 1,
p_top_item, top_area);
#endif
}
// locate next item
{
const int bottom_size = G * bottom_C * bottom_H * bottom_W;
const int top_size = G * top_C * top_H * top_W;
p_bottom_item += bottom_size;
p_top_item += top_size;
}
} // endfor batch
top3d->ndim = 3;
top3d->num_items = bottom3d->num_items;
}
// --------------------------------------------------------------------------
// output & parameter shape calculator code
// --------------------------------------------------------------------------
static
void conv_shape(const Tensor* const bottom3d,
Tensor* const top3d,
Tensor* const weight5d,
Tensor* const bias1d,
long int* const p_temp_space,
long int* const p_const_space,
const LayerOption* const option)
{
const int G = option->group;
const int top_C = option->num_output / option->group; // C'
const int bottom_C = bottom3d->shape[0][0] / option->group; // C
const int kernel_h = option->kernel_h;
const int kernel_w = option->kernel_w;
const int pad_h = option->pad_h;
const int pad_w = option->pad_w;
const int stride_h = option->stride_h;
const int stride_w = option->stride_w;
// calculate shape for each item in the batch
int total_size = 0, max_top_area = 0;
for (int n = 0; n < bottom3d->num_items; ++n) {
// bottom shape: (G * C) x H x W
const int bottom_H = bottom3d->shape[n][1]; // H
const int bottom_W = bottom3d->shape[n][2]; // W
// top shape: (G * C') x H' x W'
// H' = 1 + (H + 2*pad_h - kernel_h) / stride_h
// W' = 1 + (W + 2*pad_w - kernel_w) / stride_w
const int top_H = 1 + (bottom_H + 2 * pad_h - kernel_h) / stride_h;
const int top_W = 1 + (bottom_W + 2 * pad_w - kernel_w) / stride_w;
const int top_area = top_H * top_W;
top3d->shape[n][0] = G * top_C;
top3d->shape[n][1] = top_H;
top3d->shape[n][2] = top_W;
// start position for n-th item in top3d->data
top3d->start[n] = total_size;
total_size += G * top_C * top_H * top_W;
// max(H' * W') in the batch
max_top_area = MAX(max_top_area, top_area);
}
top3d->ndim = 3;
top3d->num_items = bottom3d->num_items;
// weight shape: G x C' x C x kernel_h x kernel_w
weight5d->num_items = 1;
weight5d->ndim = 5;
weight5d->shape[0][0] = G;
weight5d->shape[0][1] = top_C;
weight5d->shape[0][2] = bottom_C;
weight5d->shape[0][3] = kernel_h;
weight5d->shape[0][4] = kernel_w;
weight5d->start[0] = 0;
// bias shape: (G * C') x 1
if (option->bias) {
bias1d->num_items = 1;
bias1d->ndim = 1;
bias1d->shape[0][0] = G * top_C;
bias1d->start[0] = 0;
}
else if (bias1d) {
bias1d->num_items = 0;
bias1d->ndim = 0;
bias1d->shape[0][0] = 0;
bias1d->start[0] = 0;
}
// temporary data size: G * C * kernel_h * kernel_w * max(H' * W')
// + additional space for Winograd convolution
*p_temp_space = sizeof(real) * (
G * bottom_C * kernel_h * kernel_w * max_top_area
+ G * top_C * max_top_area * 4
+ G * top_C * bottom_C * 4 * 4);
// constant data size: max(H' * W')
*p_const_space = max_top_area * sizeof(real);
}
// --------------------------------------------------------------------------
// functions for layer instance
// --------------------------------------------------------------------------
void forward_conv_layer(void* const net_, void* const layer_)
{
Net* const net = (Net*)net_;
Layer* const layer = (Layer*)layer_;
Tensor* const p_bias = (layer->option.bias) ? get_param(layer, 1) : NULL;
conv_forward(get_bottom(layer, 0), get_top(layer, 0),
get_param(layer, 0), p_bias,
net->temp_data, net->const_data, &layer->option);
}
void shape_conv_layer(void* const net_, void* const layer_)
{
Net* const net = (Net*)net_;
Layer* const layer = (Layer*)layer_;
Tensor* const p_bias = (layer->option.bias) ? get_param(layer, 1) : NULL;
long int temp_space, const_space;
conv_shape(get_bottom(layer, 0), get_top(layer, 0),
get_param(layer, 0), p_bias,
&temp_space, &const_space, &layer->option);
update_temp_space(net, temp_space);
update_const_space(net, const_space);
}
void init_conv_layer(void* const net_, void* const layer_)
{
return;
}
void free_conv_layer(void* const net_, void* const layer_)
{
return;
}
| 892cbceeacd910fb47cef933199a233abde38c23.cu | #include "layers/operator.h"
#include <string.h>
// --------------------------------------------------------------------------
// kernel code
// conv_winograd_cpu
// convert_bottom_{gpu, cpu}
// --------------------------------------------------------------------------
#ifndef GPU
static
void conv_winograd_cpu(const real bottom3d[],
const real weight4d[],
real temp_data[],
real top3d[],
const int top_C, const int bottom_C,
const int H, const int W)
{
const int H2 = DIV_THEN_CEIL(H, 2);
const int W2 = DIV_THEN_CEIL(W, 2);
real* const p_weight4x4 = temp_data;
real* const p_bottom4x4 = temp_data + top_C * bottom_C * 4 * 4;
real* const p_temp4x4 = p_bottom4x4 + bottom_C * H2 * W2 * 4 * 4;
real d[4][4];
real uv[16];
{
const int stride = top_C * bottom_C;
for (int k = 0; k < top_C; ++k) {
for (int c = 0; c < bottom_C; ++c) {
const real* const g = weight4d + (k * bottom_C + c) * 3 * 3;
real* const u = p_weight4x4 + k * bottom_C + c;
const real g_sum = (g[0] + g[1] + g[2] +
g[3] + g[4] + g[5] +
g[6] + g[7] + g[8]) / 4;
u[0 * stride] = g[0];
u[1 * stride] = (g[0] + g[1] + g[2]) / 2;
u[2 * stride] = (g[0] - g[1] + g[2]) / 2;
u[3 * stride] = g[2];
u[4 * stride] = (g[0] + g[3] + g[6]) / 2;
u[5 * stride] = g_sum;
u[6 * stride] = g_sum - (g[1] + g[4] + g[7]) / 2;
u[7 * stride] = (g[2] + g[5] + g[8]) / 2;
u[8 * stride] = (g[0] - g[3] + g[6]) / 2;
u[9 * stride] = g_sum - (g[3] + g[4] + g[5]) / 2;
u[10 * stride] = g_sum - (g[1] + g[3] + g[5] + g[7]) / 2;
u[11 * stride] = (g[2] - g[5] + g[8]) / 2;
u[12 * stride] = g[6];
u[13 * stride] = (g[6] + g[7] + g[8]) / 2;
u[14 * stride] = (g[6] - g[7] + g[8]) / 2;
u[15 * stride] = g[8];
} // endfor c
} // endfor k
}
{
const int stride = bottom_C * H2 * W2;
for (int c = 0; c < bottom_C; ++c) {
for (int h = 0; h < H; h += 2) {
for (int w = 0; w < W; w += 2) {
const real* const p_patch = bottom3d + (c * H + h - 1) * W + w - 1;
//real* const v = p_bottom4x4 + (h / 2 * W2 + w / 2) * bottom_C + c;
real* const v = p_bottom4x4 + (c * H2 + h / 2) * W2 + w / 2;
for (int j = 0; j < 4; ++j) {
for (int i = 0; i < 4; ++i) {
const int hh = h - 1 + j;
const int ww = w - 1 + i;
d[j][i] = (hh >= 0 && hh < H && ww >= 0 && ww < W) ?
p_patch[j * W + i] : 0;
}
}
v[0 * stride] = d[0][0] - d[0][2] - d[2][0] + d[2][2];
v[1 * stride] = d[0][1] + d[0][2] - d[2][1] - d[2][2];
v[2 * stride] = -d[0][1] + d[0][2] + d[2][1] - d[2][2];
v[3 * stride] = d[0][1] - d[0][3] - d[2][1] + d[2][3];
v[4 * stride] = d[1][0] - d[1][2] + d[2][0] - d[2][2];
v[5 * stride] = d[1][1] + d[1][2] + d[2][1] + d[2][2];
v[6 * stride] = -d[1][1] + d[1][2] - d[2][1] + d[2][2];
v[7 * stride] = d[1][1] - d[1][3] + d[2][1] - d[2][3];
v[8 * stride] = -d[1][0] + d[1][2] + d[2][0] - d[2][2];
v[9 * stride] = -d[1][1] - d[1][2] + d[2][1] + d[2][2];
v[10 * stride] = d[1][1] - d[1][2] - d[2][1] + d[2][2];
v[11 * stride] = -d[1][1] + d[1][3] + d[2][1] - d[2][3];
v[12 * stride] = d[1][0] - d[1][2] - d[3][0] + d[3][2];
v[13 * stride] = d[1][1] + d[1][2] - d[3][1] - d[3][2];
v[14 * stride] = -d[1][1] + d[1][2] + d[3][1] - d[3][2];
v[15 * stride] = d[1][1] - d[1][3] - d[3][1] + d[3][3];
}}} // endfor chw
}
{
const int top_area = H2 * W2;
for (int i = 0; i < 16; ++i) {
const real* const u = p_weight4x4 + i * top_C * bottom_C;
const real* const v = p_bottom4x4 + i * bottom_C * top_area;
real* const uv_ = p_temp4x4 + i * top_C * top_area;
cblas_sgemm(CblasRowMajor,
CblasNoTrans, CblasNoTrans,
top_C, top_area, bottom_C,
1,
u, bottom_C,
v, top_area,
0,
uv_, top_area);
}
}
{
const int stride = top_C * H2 * W2;
for (int k = 0; k < top_C; ++k) {
for (int h = 0; h < H; h += 2) {
for (int w = 0; w < W; w += 2) {
const real* const uv_ = p_temp4x4 + k * H2 * W2 + h / 2 * W2 + w / 2;
real* const y = top3d + (k * H + h) * W + w;
for (int i = 0; i < 16; ++i) {
uv[i] = uv_[i * stride];
}
y[0] = uv[0] + uv[1] + uv[2] +
uv[4] + uv[5] + uv[6] +
uv[8] + uv[9] + uv[10];
if (w + 1 < W) {
y[1] = uv[1] - uv[2] - uv[3] +
uv[5] - uv[6] - uv[7] +
uv[9] - uv[10] - uv[11];
}
if (h + 1 < H) {
y[W] = uv[4] + uv[5] + uv[6]
- uv[8] - uv[9] - uv[10]
- uv[12] - uv[13] - uv[14];
if (w + 1 < W) {
y[W + 1] = uv[5] - uv[6] - uv[7]
- uv[9] + uv[10] + uv[11]
- uv[13] + uv[14] + uv[15];
}
}
}}} // endfor khw
}
}
#endif
// convert bottom3d (C x H x W)
// -> bottom5d (C x kernel_h x kernel_w x H5 x W5)
// given (c, h5, w5),
// bottom5d[c][kh][kw][h5][w5] = bottom3d[c][h][w]
// h = (-pad_h + stride_h * h5) + kh, kh = { 0, 1, ..., kernel_h - 1 }
// w = (-pad_w + stride_w * w5) + kw, kw = { 0, 1, ..., kernel_w - 1 }
// if !(0 <= h < H) or !(0 <= w < W), assign 0
#ifdef GPU
__global__
static
void convert_bottom_gpu(const real bottom3d[],
real bottom5d[],
const int C, const int H, const int W,
const int H5, const int W5,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w)
{
// thread index: (c, h5, w5) = c*H5*W5 + h5*W5 + w5
const int index = blockIdx.x * blockDim.x + threadIdx.x;
const int H5W5 = H5 * W5;
if (index < C * H5W5) {
// parse thread index -> (c, h5, w5)
const int c = index / H5W5;
const int h5 = (index / W5) % H5;
const int w5 = index % W5;
// p_bottom5d initially points to bottom5d[c][kh = 0][kw = 0][h5][w5]
real* p_bottom5d = bottom5d + index +
(c * H5W5) * (kernel_h * kernel_w - 1);
// (h_start, w_start): upper-left corner of bottom3d's kernel patch
const int h_start = h5 * stride_h - pad_h;
const int w_start = w5 * stride_w - pad_w;
const real* p_bottom3d = bottom3d + (c * H + h_start) * W + w_start;
// bottom5d[c][kh][kw][h5][w5] = bottom3d[c][h][w]
// h = h_start + kh, kh = {0, 1, ..., kernel_h - 1}
// w = w_start + kw, kw = {0, 1, ..., kernel_w - 1}
// if (h, w) is in a zero-padded region, assign 0
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
const int h = h_start + kh;
const int w = w_start + kw;
p_bottom5d[(kh * kernel_w + kw) * H5W5] =
(h >= 0 && h < H && w >= 0 && w < W) ? p_bottom3d[kh * W + kw] : 0;
}
}
}
}
#else
static
void convert_bottom_cpu(const real bottom3d[],
real bottom5d[],
const int C, const int H, const int W,
const int H5, const int W5,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w)
{
for (int c = 0; c < C; ++c) {
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
// pointer to bottom5d[c][kh][kw][h5 = 0][w5 = 0]
real* const p_bottom5d = bottom5d +
((c * kernel_h + kh) * kernel_w + kw) * H5 * W5;
int h = -pad_h + kh;
int h5 = 0;
// for h < 0 (zero-padded region): bottom5d[c][kh][kw][h5][:] = 0
for (; h < 0; h += stride_h, ++h5) {
for (int w5 = 0; w5 < W5; ++w5) {
p_bottom5d[h5 * W5 + w5] = 0;
}
}
// for 0 <= h < H (data region)
for (; h < H && h5 < H5; h += stride_h, ++h5) {
// pointer to bottom3d[c][h][w = 0]
int w = -pad_w + kw;
int w5 = 0;
// for w < 0 (zero-padded region): bottom5d[c][kh][kw][h5][w5] = 0
for (; w < 0; w += stride_w, ++w5) {
p_bottom5d[h5 * W5 + w5] = 0;
}
// for 0 <= w < W (data region):
// bottom5d[c][kh][kw][h5][w5] = bottom3d[c][h][w]
for (; w < W && w5 < W5; w += stride_w, ++w5) {
p_bottom5d[h5 * W5 + w5] = bottom3d[(c * H + h) * W + w];
}
// for w >= W (zero-padded region): bottom5d[c][kh][kw][h5][w5] = 0
for (; w5 < W5; ++w5) {
p_bottom5d[h5 * W5 + w5] = 0;
}
}
// for h >= H (zero-padded region): bottom5d[c][kh][kw][h5][:] = 0
for (; h5 < H5; ++h5) {
for (int w5 = 0; w5 < W5; ++w5) {
p_bottom5d[h5 * W5 + w5] = 0;
}
}
} // endfor kw
} // endfor kh
} // endfor c
}
#endif
// --------------------------------------------------------------------------
// layer-wise operator code
// --------------------------------------------------------------------------
// convolution: bottom -> top
// G: number of groups
// bottom: (G * C) x H x W
// top: (G * C') x H' x W'
// weight: G x C' x C x kernel_h x kernel_w
// bias: (G * C') x 1
// temp: (G * C * kernel_h * kernel_w) x (H' * W') array
// const: 1 x (H' * W') array, const[i] = 1 for all i
static
void conv_forward(const Tensor* const bottom3d,
Tensor* const top3d,
const Tensor* const weight5d,
const Tensor* const bias1d,
real temp_data[],
const real const_data[],
const LayerOption* const option)
{
// weight shape: G x C' x C x kernel_h x kernel_w
const int G = weight5d->shape[0][0];
const int top_C = weight5d->shape[0][1]; // C'
const int bottom_C = weight5d->shape[0][2]; // C
const int kernel_h = weight5d->shape[0][3];
const int kernel_w = weight5d->shape[0][4];
// padding size & stride size
const int pad_h = option->pad_h;
const int pad_w = option->pad_w;
const int stride_h = option->stride_h;
const int stride_w = option->stride_w;
// do forward-pass for each item in the batch
const real* p_bottom_item = bottom3d->data;
real* p_top_item = top3d->data;
real* p_temp_data = temp_data;
for (int n = 0; n < bottom3d->num_items; ++n) {
// bottom shape: (G * C) x H x W
const int bottom_H = bottom3d->shape[n][1]; // H
const int bottom_W = bottom3d->shape[n][2]; // W
// set top shape: (G * C') x H' x W'
// H' = 1 + (H + 2*pad_h - kernel_h) / stride_h
// W' = 1 + (W + 2*pad_w - kernel_w) / stride_w
const int top_H = 1 + (bottom_H + 2 * pad_h - kernel_h) / stride_h;
const int top_W = 1 + (bottom_W + 2 * pad_w - kernel_w) / stride_w;
top3d->shape[n][0] = G * top_C;
top3d->shape[n][1] = top_H;
top3d->shape[n][2] = top_W;
#ifndef GPU
if (top_C >= 48 &&
kernel_h == 3 && kernel_w == 3 && stride_h == 1 && stride_w == 1)
{
conv_winograd_cpu(
p_bottom_item, weight5d->data, temp_data, p_top_item,
top_C, bottom_C, bottom_H, bottom_W);
#ifdef DEBUG
printf("%s -> %s: Winograd conv\n", bottom3d->name, top3d->name);
#endif
}
else {
#endif
// convert bottom shape
// (G * C) x H x W -> (G * C * kernel_h * kernel_w) x (H' * W')
if (kernel_h != 1 || kernel_w != 1 ||
bottom_H != top_H || bottom_W != top_W)
{
#ifdef GPU
// one thread computes "kernel_h * kernel_w" entries in top
const int num_threads = G * bottom_C * top_H * top_W;
const int threads_per_block = 512;
const int num_blocks = DIV_THEN_CEIL(num_threads, threads_per_block);
convert_bottom_gpu<<<num_blocks, threads_per_block>>>(
p_bottom_item, p_temp_data,
G * bottom_C, bottom_H, bottom_W,
top_H, top_W,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w);
#else
convert_bottom_cpu(
p_bottom_item, p_temp_data,
G * bottom_C, bottom_H, bottom_W,
top_H, top_W,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w);
#endif
}
else {
// if 1x1 convolution, skip convert_bottom
p_temp_data = (real*)p_bottom_item;
}
// compute top[g] = dot(weight[g], bottom[g])
// weight[g]: C' x (C * kernel_h * kernel_w)
// bottom[g]: (C * kernel_h * kernel_w) x (H' * W')
// top[g]: C' x H' x W'
for (int g = 0; g < G; ++g) {
const int kernel_size = bottom_C * kernel_h * kernel_w;
const int top_area = top_H * top_W;
const real* const p_temp_g = p_temp_data +
g * kernel_size * top_area;
const real* const p_weight_g = weight5d->data +
g * top_C * kernel_size;
real* const p_top_g = p_top_item + g * top_C * top_area;
// compute Z = alpha * dot(X, Y) + beta * Z
// X (= weight): m x p, Y (= bottom): p x n, Z (= top): m x n
// X, Y, Z: row-major order (e.g., Z[i][j] = Z[i * n + j])
#ifdef GPU
// input arguments:
// cublas handle,
// do_transpose_Y (= false), do_transpose_X (= false),
// n (= H' * W'), m (= C'), p (= C * kernel_h * kernel_w),
// &alpha (= 1),
// &Y, number of columns in Y (= n),
// &X, number of columns in X (= p),
// &beta (= 0),
// &Z, number of columns in Z (= n)
const real one = 1.0f, zero = 0.0f;
cublasSgemm(*((cublasHandle_t*)option->handle),
CUBLAS_OP_N, CUBLAS_OP_N,
top_area, top_C, kernel_size,
&one,
p_temp_g, top_area,
p_weight_g, kernel_size,
&zero,
p_top_g, top_area);
#else
// input arguments:
// is_row_major_order (= true),
// do_transpose_X (= false), do_transpose_Y (= false),
// m (= C'), n (= H' * W'), p (= C * kernel_h * kernel_w),
// alpha (= 1),
// &X, number of columns in X (= p),
// &Y, number of columns in Y (= n),
// beta (= 0),
// &Z, number of columns in Z (= n)
cblas_sgemm(CblasRowMajor,
CblasNoTrans, CblasNoTrans,
top_C, top_area, kernel_size,
1,
p_weight_g, kernel_size,
p_temp_g, top_area,
0,
p_top_g, top_area);
#endif
} // endfor g
#ifndef GPU
}
#endif
// compute top[i][j] = top[i][j] + bias[i]
// top: (G * C') x (H' * W')
// bias: (G * C') x 1
if (option->bias) {
const int top_channels = G * top_C;
const int top_area = top_H * top_W;
// the computation is equivalent to...
// top = top + dot(bias, constant)
// constant: 1 x (H' * W'), constant[i] = 1 for all i
#ifdef GPU
// thus, input arguments:
// do_transpose_Y (= false), do_transpose_X (= false),
// n = H' * W', m = G * C', p = 1
// alpha = 1, beta = 1
const real one = 1.0f;
cublasSgemm(*((cublasHandle_t*)option->handle),
CUBLAS_OP_N, CUBLAS_OP_N,
top_area, top_channels, 1,
&one,
const_data, top_area,
bias1d->data, 1,
&one,
p_top_item, top_area);
#else
// input arguments:
// do_transpose_X (= false), do_transpose_Y (= false),
// m = G * C', n = H' * W', p = 1
// alpha = 1, beta = 1
cblas_sger(CblasRowMajor,
top_channels, top_area,
1,
bias1d->data, 1,
const_data, 1,
p_top_item, top_area);
#endif
}
// locate next item
{
const int bottom_size = G * bottom_C * bottom_H * bottom_W;
const int top_size = G * top_C * top_H * top_W;
p_bottom_item += bottom_size;
p_top_item += top_size;
}
} // endfor batch
top3d->ndim = 3;
top3d->num_items = bottom3d->num_items;
}
// --------------------------------------------------------------------------
// output & parameter shape calculator code
// --------------------------------------------------------------------------
static
void conv_shape(const Tensor* const bottom3d,
Tensor* const top3d,
Tensor* const weight5d,
Tensor* const bias1d,
long int* const p_temp_space,
long int* const p_const_space,
const LayerOption* const option)
{
const int G = option->group;
const int top_C = option->num_output / option->group; // C'
const int bottom_C = bottom3d->shape[0][0] / option->group; // C
const int kernel_h = option->kernel_h;
const int kernel_w = option->kernel_w;
const int pad_h = option->pad_h;
const int pad_w = option->pad_w;
const int stride_h = option->stride_h;
const int stride_w = option->stride_w;
// calculate shape for each item in the batch
int total_size = 0, max_top_area = 0;
for (int n = 0; n < bottom3d->num_items; ++n) {
// bottom shape: (G * C) x H x W
const int bottom_H = bottom3d->shape[n][1]; // H
const int bottom_W = bottom3d->shape[n][2]; // W
// top shape: (G * C') x H' x W'
// H' = 1 + (H + 2*pad_h - kernel_h) / stride_h
// W' = 1 + (W + 2*pad_w - kernel_w) / stride_w
const int top_H = 1 + (bottom_H + 2 * pad_h - kernel_h) / stride_h;
const int top_W = 1 + (bottom_W + 2 * pad_w - kernel_w) / stride_w;
const int top_area = top_H * top_W;
top3d->shape[n][0] = G * top_C;
top3d->shape[n][1] = top_H;
top3d->shape[n][2] = top_W;
// start position for n-th item in top3d->data
top3d->start[n] = total_size;
total_size += G * top_C * top_H * top_W;
// max(H' * W') in the batch
max_top_area = MAX(max_top_area, top_area);
}
top3d->ndim = 3;
top3d->num_items = bottom3d->num_items;
// weight shape: G x C' x C x kernel_h x kernel_w
weight5d->num_items = 1;
weight5d->ndim = 5;
weight5d->shape[0][0] = G;
weight5d->shape[0][1] = top_C;
weight5d->shape[0][2] = bottom_C;
weight5d->shape[0][3] = kernel_h;
weight5d->shape[0][4] = kernel_w;
weight5d->start[0] = 0;
// bias shape: (G * C') x 1
if (option->bias) {
bias1d->num_items = 1;
bias1d->ndim = 1;
bias1d->shape[0][0] = G * top_C;
bias1d->start[0] = 0;
}
else if (bias1d) {
bias1d->num_items = 0;
bias1d->ndim = 0;
bias1d->shape[0][0] = 0;
bias1d->start[0] = 0;
}
// temporary data size: G * C * kernel_h * kernel_w * max(H' * W')
// + additional space for Winograd convolution
*p_temp_space = sizeof(real) * (
G * bottom_C * kernel_h * kernel_w * max_top_area
+ G * top_C * max_top_area * 4
+ G * top_C * bottom_C * 4 * 4);
// constant data size: max(H' * W')
*p_const_space = max_top_area * sizeof(real);
}
// --------------------------------------------------------------------------
// functions for layer instance
// --------------------------------------------------------------------------
void forward_conv_layer(void* const net_, void* const layer_)
{
Net* const net = (Net*)net_;
Layer* const layer = (Layer*)layer_;
Tensor* const p_bias = (layer->option.bias) ? get_param(layer, 1) : NULL;
conv_forward(get_bottom(layer, 0), get_top(layer, 0),
get_param(layer, 0), p_bias,
net->temp_data, net->const_data, &layer->option);
}
void shape_conv_layer(void* const net_, void* const layer_)
{
Net* const net = (Net*)net_;
Layer* const layer = (Layer*)layer_;
Tensor* const p_bias = (layer->option.bias) ? get_param(layer, 1) : NULL;
long int temp_space, const_space;
conv_shape(get_bottom(layer, 0), get_top(layer, 0),
get_param(layer, 0), p_bias,
&temp_space, &const_space, &layer->option);
update_temp_space(net, temp_space);
update_const_space(net, const_space);
}
void init_conv_layer(void* const net_, void* const layer_)
{
return;
}
void free_conv_layer(void* const net_, void* const layer_)
{
return;
}
|
eaaf3fab8a5be65fd11e30b9b3b0f451132f0d83.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/Math.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/hip/jit_utils.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
// See note [Jiterator]
const char gcd_name[] = "gcd";
void gcd_kernel_cuda(TensorIteratorBase& iter) {
#ifdef USE_JITERATOR
AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "gcd_cuda", [&]() {
jitted_gpu_kernel</*name=*/gcd_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 2>(iter, gcd_string);
});
#else
AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "gcd_cuda", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a, scalar_t b) -> scalar_t {
return calc_gcd(a, b);
});
});
#endif // USE_JITERATOR
}
void lcm_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "lcm_cuda", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a, scalar_t b) -> scalar_t {
scalar_t g = calc_gcd(a, b);
return (g == 0) ? 0 : ::abs(a / g * b);
});
});
}
REGISTER_DISPATCH(gcd_stub, &gcd_kernel_cuda);
REGISTER_DISPATCH(lcm_stub, &lcm_kernel_cuda);
}} // namespace at::native
| eaaf3fab8a5be65fd11e30b9b3b0f451132f0d83.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/cuda/jit_utils.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
// See note [Jiterator]
const char gcd_name[] = "gcd";
void gcd_kernel_cuda(TensorIteratorBase& iter) {
#ifdef USE_JITERATOR
AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "gcd_cuda", [&]() {
jitted_gpu_kernel</*name=*/gcd_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 2>(iter, gcd_string);
});
#else
AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "gcd_cuda", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a, scalar_t b) -> scalar_t {
return calc_gcd(a, b);
});
});
#endif // USE_JITERATOR
}
void lcm_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "lcm_cuda", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a, scalar_t b) -> scalar_t {
scalar_t g = calc_gcd(a, b);
return (g == 0) ? 0 : ::abs(a / g * b);
});
});
}
REGISTER_DISPATCH(gcd_stub, &gcd_kernel_cuda);
REGISTER_DISPATCH(lcm_stub, &lcm_kernel_cuda);
}} // namespace at::native
|
fbaea20ab1b8c278ee1d98cf2e9c4b9f39dccfd3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
extern "C"
extern "C"
__global__ void matrix_round(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = (double)llround(A[index]);
}
} | fbaea20ab1b8c278ee1d98cf2e9c4b9f39dccfd3.cu | #include "includes.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
extern "C"
extern "C"
__global__ void matrix_round(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = (double)llround(A[index]);
}
} |
8c3fe0f9fc7cb8a5b5df5e790cd4e4333e4f03a3.hip | // !!! This is a file automatically generated by hipify!!!
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <iostream>
#include "Renderer.h"
#include "VertexBuffer.h"
#include "IndexBuffer.h"
#include "VertexArray.h"
#include "VertexBufferLayout.h"
#include "Shader.h"
#include "Texture.h"
#include "PixelBuffer.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <cuda_gl_interop.h>
#define window_width 640
#define window_height 480
__global__ void kernel(uchar4* ptr, int frequency)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float fx = x / (float)window_width - 0.5f;
float fy = y / (float)window_height - 0.5f;
unsigned char green = 128 - 127 * sin(abs(fx * frequency) - abs(fy * frequency));
ptr[offset].x = 0;
ptr[offset].y = green;
ptr[offset].z = 0;
ptr[offset].w = 255;
}
void drawpic(PixelBuffer& pixelbuffer, cudaGraphicsResource*& resource, int frequency)
{
hipGraphicsGLRegisterBuffer(&resource, pixelbuffer.GetID(), hipGraphicsMapFlagsNone); // Register buffer as a graphic source
uchar4* devPtr;
size_t size;
hipGraphicsMapResources(1, &resource, 0);
hipGraphicsResourceGetMappedPointer((void**)&devPtr, &size, resource);
dim3 grids(window_width / 16, window_height / 16);
dim3 threads(16, 16);
kernel << <grids, threads >> > (devPtr, frequency);
hipDeviceSynchronize();
hipGraphicsUnmapResources(1, &resource, 0);
}
int main(void)
{
GLFWwindow* window;
cudaGraphicsResource* resource;
hipDeviceProp_t prop;
int dev;
memset(&prop, 0, sizeof(hipDeviceProp_t));
prop.major = 1;
prop.minor = 0;
hipChooseDevice(&dev, &prop);
hipGLSetGLDevice(dev);
/* Initialize the library */
if (!glfwInit())
return -1;
/* Create a windowed mode window and its OpenGL context */
window = glfwCreateWindow(window_width, window_height, "Hello World", NULL, NULL);
if (!window)
{
glfwTerminate();
return -1;
}
/* Make the window's context current */
glfwMakeContextCurrent(window);
glfwSwapInterval(1);
if (glewInit() != GLEW_OK)
std::cout << "GLEW ini failed!" << std::endl;
std::cout << glGetString(GL_VERSION) << std::endl;
{float positions[] =
{
-1.0f, -1.0f, 0.0f, 0.0f,
1.0f, -1.0f, 1.0f, 0.0f,
1.0f, 1.0f, 1.0f, 1.0f,
-1.0f, 1.0f, 0.0f, 1.0f,
};
unsigned int indices[] =
{
0, 1, 2,
2, 3, 0
};
GLCall(glEnable(GL_BLEND));
GLCall(glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA));
Renderer renderer;
Shader shader("res/shaders/Basic.shader");
VertexArray va;
VertexBuffer vb(positions, 4 * 4 * sizeof(float));
VertexBufferLayout layout;
layout.Push<float>(2);
layout.Push<float>(2);
va.AddBuffer(vb, layout);
Texture texture("res/textures/Minato_Aqua_Portrait.png");
Texture tex;
IndexBuffer ib(indices, 6);
PixelBuffer pb(nullptr, window_width, window_height, 4);
va.Unbind();
vb.Unbind();
ib.Unbind();
shader.Unbind();
int frequency = 5;
int increment = 1;
/* Loop until the user closes the window */
while (!glfwWindowShouldClose(window))
{
/* Render here */
renderer.Clear();
shader.Bind();
drawpic(pb, resource, frequency);
tex.ReadRGBA(pb, 0);
shader.SetUniform1i("u_Texture", 0);
renderer.Draw(va, ib, shader);
if (frequency > 100)
increment = -1;
else if (frequency < 5)
increment = 1;
frequency += increment;
glfwSwapBuffers(window);
/* Poll for and process events */
glfwPollEvents();
}
}
glfwTerminate();
return 0;
} | 8c3fe0f9fc7cb8a5b5df5e790cd4e4333e4f03a3.cu | #include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <iostream>
#include "Renderer.h"
#include "VertexBuffer.h"
#include "IndexBuffer.h"
#include "VertexArray.h"
#include "VertexBufferLayout.h"
#include "Shader.h"
#include "Texture.h"
#include "PixelBuffer.h"
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <cuda_gl_interop.h>
#define window_width 640
#define window_height 480
__global__ void kernel(uchar4* ptr, int frequency)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float fx = x / (float)window_width - 0.5f;
float fy = y / (float)window_height - 0.5f;
unsigned char green = 128 - 127 * sin(abs(fx * frequency) - abs(fy * frequency));
ptr[offset].x = 0;
ptr[offset].y = green;
ptr[offset].z = 0;
ptr[offset].w = 255;
}
void drawpic(PixelBuffer& pixelbuffer, cudaGraphicsResource*& resource, int frequency)
{
cudaGraphicsGLRegisterBuffer(&resource, pixelbuffer.GetID(), cudaGraphicsMapFlagsNone); // Register buffer as a graphic source
uchar4* devPtr;
size_t size;
cudaGraphicsMapResources(1, &resource, 0);
cudaGraphicsResourceGetMappedPointer((void**)&devPtr, &size, resource);
dim3 grids(window_width / 16, window_height / 16);
dim3 threads(16, 16);
kernel << <grids, threads >> > (devPtr, frequency);
cudaDeviceSynchronize();
cudaGraphicsUnmapResources(1, &resource, 0);
}
int main(void)
{
GLFWwindow* window;
cudaGraphicsResource* resource;
cudaDeviceProp prop;
int dev;
memset(&prop, 0, sizeof(cudaDeviceProp));
prop.major = 1;
prop.minor = 0;
cudaChooseDevice(&dev, &prop);
cudaGLSetGLDevice(dev);
/* Initialize the library */
if (!glfwInit())
return -1;
/* Create a windowed mode window and its OpenGL context */
window = glfwCreateWindow(window_width, window_height, "Hello World", NULL, NULL);
if (!window)
{
glfwTerminate();
return -1;
}
/* Make the window's context current */
glfwMakeContextCurrent(window);
glfwSwapInterval(1);
if (glewInit() != GLEW_OK)
std::cout << "GLEW ini failed!" << std::endl;
std::cout << glGetString(GL_VERSION) << std::endl;
{float positions[] =
{
-1.0f, -1.0f, 0.0f, 0.0f,
1.0f, -1.0f, 1.0f, 0.0f,
1.0f, 1.0f, 1.0f, 1.0f,
-1.0f, 1.0f, 0.0f, 1.0f,
};
unsigned int indices[] =
{
0, 1, 2,
2, 3, 0
};
GLCall(glEnable(GL_BLEND));
GLCall(glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA));
Renderer renderer;
Shader shader("res/shaders/Basic.shader");
VertexArray va;
VertexBuffer vb(positions, 4 * 4 * sizeof(float));
VertexBufferLayout layout;
layout.Push<float>(2);
layout.Push<float>(2);
va.AddBuffer(vb, layout);
Texture texture("res/textures/Minato_Aqua_Portrait.png");
Texture tex;
IndexBuffer ib(indices, 6);
PixelBuffer pb(nullptr, window_width, window_height, 4);
va.Unbind();
vb.Unbind();
ib.Unbind();
shader.Unbind();
int frequency = 5;
int increment = 1;
/* Loop until the user closes the window */
while (!glfwWindowShouldClose(window))
{
/* Render here */
renderer.Clear();
shader.Bind();
drawpic(pb, resource, frequency);
tex.ReadRGBA(pb, 0);
shader.SetUniform1i("u_Texture", 0);
renderer.Draw(va, ib, shader);
if (frequency > 100)
increment = -1;
else if (frequency < 5)
increment = 1;
frequency += increment;
glfwSwapBuffers(window);
/* Poll for and process events */
glfwPollEvents();
}
}
glfwTerminate();
return 0;
} |
2279c4b5365e00cf0b6909ea63b619e9d023f117.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdlib.h>
#include <algorithm>
#include "radixselect.cuh"
//#include "radixselectNormalInplaceWorking.cuh"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <fstream>
#include <random>
// #define readBinary 1
//#include <random>
// #define Enabletest 1
using namespace std;
typedef unsigned int data_t;
typedef int index_t;
int compare (const void * a, const void * b)
{
return ( *(int*)a - *(int*)b );//in ascending order
}
template<typename data_t,typename index_t>
index_t power(index_t x,index_t n)
{
index_t number=1;
for (index_t i=0; i<n ;i++)
{
number*=x;
}
return number;
}
void getminmax(data_t* arr,index_t n,data_t& max,data_t& min)
{
for (index_t i=1;i<n;i++)
{
if (arr[i]> max)
{
max=arr[i];
}
if (arr[i]<min)
{
min=arr[i];
}
}
return;
}
template<typename data_t,typename index_t>
bool IsPowerof2(index_t x)
{
return (x != 0) && ((x & (x - 1)) == 0);
}
int main(int argc,char** argv)
{
cout<<"./exe num_element k NBitsPerDigit beta scoreFile output_file"<<endl;
cout<<"Size of unsigned int"<<sizeof(unsigned int)<<endl;
if (argc != 7) {cout<<"wrong input"<<endl;exit(-1);}
index_t num_pow = atol(argv[1]);
index_t base=2;
index_t num_element = power<data_t,index_t>(base,num_pow);
cout<<"num_element: "<<num_element<<endl;
index_t k= atol(argv[2]);
index_t NBits=atol(argv[3]);//atol(argv[3]);
int sd[]={10,100000,1000000,100,100000000};
int beta=atoi(argv[4]);//SampleBeta function is designed for Beta=3 only. So we need to update the SampleBetafunction in radix select if we want to change Beta value
// H_ERR(hipSetDevice(1));
data_t* vec= (data_t*)malloc(sizeof(data_t)*num_element);//new data_t[num_element];
data_t* vec1= (data_t*)malloc(sizeof(data_t)*num_element);//new data_t[num_element];
std::random_device rd;
std::mt19937 gen(rd());
unsigned int value;
int over;
int minvalue=2147483643;
bool test=false;
index_t alpha=0.5*(num_pow-log(k)/log(2)+3);
cout<<"Calculated alpha: "<<alpha<<endl;
bool defaultContribution=true;
if (alpha <=5) defaultContribution=false;
index_t SubRangesize=pow(2,alpha);
// for (int dis=3;dis<4;dis++)
{
// std::uniform_int_distribution <unsigned int> d(0, 2147483643);
int minvalue=2147483643;
// std::normal_distribution<float> d(100000000, 10);//Mean =100 mill , sd=100
//std::normal_distribution<float> d(100000000, 10000000);//Mean =100 mill , sd=100
//Read the SCOREFILE
// unsigned int* score = (int*) malloc (num_element*sizeof(unsigned int));
const char *scr_file=argv[5];
// std::fstream;
// std::fin.open(scr_file, std::ios_base::binary|std::ios_base::in);
#ifdef readBinary
std::fstream bin_in(scr_file,std::ios_base::binary|std::ios_base::in);
int value;
for (unsigned int i=0;i<num_element;i++)
{
bin_in.read((char*)&value,sizeof(int));
vec[i]=value;
vec1[i]=vec[i];
}
for (int i=0;i<10;i++)
{
cout<<vec1[i]<<" ";
}
cout<<endl;
#else
FILE* fptr;
if ((fptr = fopen(scr_file,"r")) == NULL)
{
printf("Error! opening SCORE file");
exit(1);
}
char* str;
// fscanf(fptr,"%s", str);
for (index_t i=0;i<67108864;i++)
{
fscanf(fptr,"%f", &vec[i]);
if (i<10)
printf("%f\n",vec[i]);
vec[i] = (1-vec[i])*1000000000;
vec1[i]=vec[i];
}
int offset_j =0;
for (index_t i=67108864;i<num_element;i++)
{
vec[i] = vec[offset_j%67108864];
vec1[i]=vec[i];
offset_j++;
}
#endif
// const char * ${arr[1]}
// FILE* fptr;
// if ((fptr = fopen(scr_file,"r")) == NULL)
// {
// printf("Error! opening SCORE file");
// exit(1);
// }
// for (index_t i=0;i<num_element;i++)
// {
// fscanf(fptr,"%d", &vec[i]);
// vec1[i]=vec[i];
// }
//~Read the SCOREFILE
// std::uniform_int_distribution <unsigned int> d(0, 4294967295);
// for (int dis=3;dis<4;dis++)
// {
// for (index_t i=0;i<num_element;i++)
// {
// // vec[i]=rand()%2147483648;//2^31 -1
// value=d(gen);
// if (minvalue > value)
// {
// minvalue=value;
// }
// // if (value > 2147483650) test=true;
// if (value > 4294967295)
// {
// cout<<"Overflow of unsigned int detected"<<endl;
// return -1;
// }
// vec[i]=value;
// vec1[i]=vec[i];
// }
// if (minvalue < 0)
// {
// cout<<"-ve value detected:"<<minvalue<<endl;
// return -1;
// }
// cout<<"Minimum value:"<<minvalue<<endl;
// if (test) cout<<"Data generated Ok"<<endl;
// else
// cout<<"Data generated not Ok"<<endl;
// sort(vec, vec + num_element);
// for (int Kiteration=atol(argv[2]);Kiteration<536870912;Kiteration=Kiteration*2)
{
// k=Kiteration;
// index_t alpha=atol(argv[4]);
// int beta=3;//SampleBeta function is designed for Beta=3 only. So we need to update the SampleBetafunction in radix select if we want to change Beta value
index_t num_bucket=1<<NBits;
int CurrentDigit=(sizeof(data_t)*8/NBits)-1;
index_t NSubranges=num_element/SubRangesize;
int NthreadstoworkInreduction=32;
if (SubRangesize<32)
{
NthreadstoworkInreduction=SubRangesize;
}
cout<<"Number of Subranges:"<<NSubranges<<endl;
if (NSubranges<k)
{
cout<<"Small number of subranges!. Decrease the value of alpha!"<<endl;
// exit(-1);
}
if ((!IsPowerof2<data_t,index_t>(NBits)) || (NBits > sizeof(data_t)*8))
{
cout<<"Enter correct number of bits per digit"<<endl;
return -1;
}
// data_t* vec= (data_t*)malloc(sizeof(data_t)*num_element);//new data_t[num_element];
// data_t* vec1= (data_t*)malloc(sizeof(data_t)*num_element);//new data_t[num_element];
// std::random_device rd;
// std::mt19937 gen(rd());
// float value;
// float minvalue=10000000;
// for (index_t i=0;i<num_element;i++)
// {
// std::normal_distribution<float> d(10000000, sd[d]);//Mean =100 mill , sd=100
// // vec[i]=rand()%2147483648;//2^31 -1
// value=d(gen);
// if (minvalue > value)
// {
// minvalue=value;
// }
// if (value > 4294967295)
// {
// cout<<"Overflow of unsigned int detected"<<endl;
// }
// vec[i]=value;
// vec1[i]=vec[i];
// }
// cout<<endl;
// if (minvalue < 0)
// {
// cout<<"-ve value detected"<<endl;
// }
cout<<"Starting TopK with Npow:"<<num_pow<<" K:"<<k<<" alpha:"<<alpha<<"DistributionU(0,2^31-1)"<<endl;
std::fstream statusLog;
// timeLog.open("timeRadixSampleOCT11_N_K_alphaVaried.csv",std::fstream::out | std::fstream::app);
cout<<vec[0];
cout<<endl;
data_t* TopArray=new data_t[k];
data_t TopKElement=0;
data_t* vec_d;
H_ERR(hipMalloc((void**) &vec_d,sizeof(data_t)*num_element));
H_ERR(hipMemcpy(vec_d,vec,sizeof(data_t)*num_element,hipMemcpyHostToDevice));
// raelse dix_select_inplace<data_t,index_t>(vec_d,num_element,k,num_bucket,TopKElement,NBits,CurrentDigit,0);
double timeforMaxsample=0;double timeforFirstTopk=0;double timeforSecondTopk=0;double timeforNormalRadixSelect=0;double timeforConcatenation=0;
data_t* Max_d;
H_ERR(hipMalloc((void**) &Max_d,sizeof(data_t)*NSubranges*beta));// updated for Beta
index_t* SubrangeId_d;
H_ERR(hipMalloc((void**) &SubrangeId_d,sizeof(index_t)*NSubranges*beta));//updated for beta
int NThreadsPerBlock=256;//only shared memory
// int NThreadsPerBlock=1024;//Shared memory with subwarp
int SizeOfSubWarp=8;
int pow_size_Subwarp=3;
// int NSharedMemoryElements=NThreadsPerBlock<<alpha;//only shared Memory
int NSharedMemoryElements=NThreadsPerBlock<<5;//3 is giving best result in different values of SubWarp size //Each thread responsible for 32 elements and contribute to 8 Subranges from a group of 4 elements
int SizeOfAllocation=NSharedMemoryElements+(NSharedMemoryElements >> 5);
//sampleMax_multirange<data_t,index_t><<<4096,512>>>(A_d,Max_d,N,NSubranges,SubRangesize,alpha,SubrangeId_d,Nthreadstowork, NSubrangesperWarp, SubWarpSize,NThreadsPerSubRange);
int NumberOfSpace_WithPadding=NSharedMemoryElements+(NSharedMemoryElements >>5);
int NSubRangesPerBlock=NSharedMemoryElements/SizeOfSubWarp;//can be in CPU
int NSubWarps_InBlock=NThreadsPerBlock >> pow_size_Subwarp;// Can be in CPU
//Note NTotalVirtualSubWarpsInBlock=NSubrangesDealtBy1Block as 1 subwarp is responsible for 1 Subrange
int NElementsPerBlock_ReadFromGlobal=NSubRangesPerBlock*SubRangesize;//1 Subwarp works for 1 subrange --> Can be in CPU
int TotalBlocksrequired=num_element/NElementsPerBlock_ReadFromGlobal;
if (TotalBlocksrequired<1)
{
cout<<"reduce blockDim or sizeofSubrange(alpha), for the kernel to work"<<endl;
exit(-1);
}
cout<<"Size of shared memory per block:"<<SizeOfAllocation*sizeof(data_t)/1024.0 <<"KB"<<endl;
// statusLog.open("Status_alpha_0_3_4_5_TotalSOK_Radix.csv",std::fstream::out | std::fstream::app);
statusLog.open("StatusFile.csv",std::fstream::out | std::fstream::app);
statusLog<<endl<<endl<<"Started Radix select with:2^"<<num_pow<<" elements "<<k<<" as Kth element and "<<alpha<<"as alpha!."<<"Distribution:U(0,2^31-1)"<<endl;
index_t* SelectedSubrangeId_d;
// H_ERR(hipMalloc((void**) &SelectedSubrangeId_d,sizeof(index_t)*k*beta));//updated *3 for beta
// H_ERR(hipMalloc((void**) &SelectedSubrangeId_d,sizeof(index_t)*k*beta));//updated *3 for beta
H_ERR(hipMalloc((void**) &SelectedSubrangeId_d,sizeof(index_t)*num_element));//When digit skip is enabled in first topk
index_t* CountSelectedSubrange_d;
index_t* CountLonelyElements_d;
H_ERR(hipMalloc((void**) &CountSelectedSubrange_d,sizeof(index_t)));
H_ERR(hipMalloc((void**) &CountLonelyElements_d,sizeof(index_t)));
H_ERR(hipMemset(CountSelectedSubrange_d, 0, sizeof(index_t)));
H_ERR(hipMemset(CountLonelyElements_d, 0, sizeof(index_t)));
data_t* ConcatenatedRange_d;
index_t* write_pos_d;
// H_ERR(hipMalloc((void**) &ConcatenatedRange_d,sizeof(data_t)*k*SubRangesize));
H_ERR(hipMalloc((void**) &ConcatenatedRange_d,sizeof(data_t)*num_element));// for skipping digits in first top-k
H_ERR(hipMalloc((void**) &write_pos_d,sizeof(index_t)));
double start=wtime();
if (alpha==0)
{
timeforNormalRadixSelect=wtime();
radix_select<data_t,index_t>(vec_d,num_element,k,num_bucket,TopKElement,NBits,CurrentDigit);
timeforNormalRadixSelect=wtime()-timeforNormalRadixSelect;
}
else// if(NSubranges > k)
{
sample_radix_select<data_t,index_t>(vec_d,num_element,k,num_bucket,TopKElement,NBits,CurrentDigit,NSubranges,SubRangesize,alpha,timeforMaxsample,timeforFirstTopk,timeforSecondTopk,timeforConcatenation,Max_d,SubrangeId_d,NSharedMemoryElements,SizeOfSubWarp,pow_size_Subwarp,NSubWarps_InBlock,NSubRangesPerBlock,NElementsPerBlock_ReadFromGlobal,TotalBlocksrequired,SizeOfAllocation,NThreadsPerBlock,beta,defaultContribution,NthreadstoworkInreduction,SelectedSubrangeId_d,CountLonelyElements_d,write_pos_d,ConcatenatedRange_d,CountSelectedSubrange_d);
}
// else
// {
// timeforNormalRadixSelect=wtime();
// radix_select<data_t,index_t>(vec_d,num_element,k,num_bucket,TopKElement,NBits,CurrentDigit);
// timeforNormalRadixSelect=wtime()-timeforNormalRadixSelect;
// }
double totalTime=wtime()-start;
cout<<"The kth element from top is:"<<TopKElement<<endl;
statusLog<<"Successfully Finished Radix select with:2^"<<num_pow<<" elements "<<k<<" as Kth element and "<<alpha<<"as alpha!."<<endl;
statusLog.close();
cout<<"Sampling Time:"<<timeforMaxsample*1000<<" ms"<<endl;
cout<<"Time for First TopK:"<<timeforFirstTopk*1000<<" ms"<<endl;
cout<<"Time for Concatenation:"<<timeforConcatenation*1000<<" ms"<<endl;
cout<<"Time for Second TopK:"<<timeforSecondTopk*1000<<" ms"<<endl;
cout<<"Time for Normal Radix Select:"<<timeforNormalRadixSelect*1000<<" ms"<<endl;
cout<<"Total Time:"<<totalTime*1000<<" ms"<<endl;
#ifdef Enabletest
sort(vec1, vec1 + num_element);
cout<<endl;
if (vec1[num_element-k]==TopKElement)
{
cout<<"Success!"<<endl;
}
else
{
cout<<"Not Success!"<<endl;
}
cout<<"Required value"<<vec1[num_element-k]<<endl;
assert(vec1[num_element-k]==TopKElement);
#endif
std::fstream timeLog;
// timeLog.open("N_29UniformDistributedAutoTuneAdaptive22Feb_TitanSorted_Unsorted.csv",std::fstream::out | std::fstream::app);
// timeLog.open("N_29TestingFor2^32.csv",std::fstream::out | std::fstream::app);
// timeLog.open("U_VectorSize_VaryingK2^29.csv",std::fstream::out | std::fstream::app);
// timeLog.open("FirstTopK3DigitsSkipped_NORMAL_ALL_K.csv",std::fstream::out | std::fstream::app);
timeLog.open(argv[6],std::fstream::out | std::fstream::app);
// timeLog.open("FirstTopK3DigitsSkipped_Uniform.csv",std::fstream::out | std::fstream::app);
// timeLog.open("U_beta_tuning.csv",std::fstream::out | std::fstream::app);
if (defaultContribution)
{
timeLog<<"D"<<";";
}
else
{
timeLog<<"B"<<";";
}
timeLog<<" "<<num_pow<<";"<<k<<";"<<alpha<<";"<<beta<<";"<<timeforMaxsample*1000<<";"<<timeforFirstTopk*1000<<";"<<timeforConcatenation*1000<<";"<<timeforSecondTopk*1000<<";"<<timeforNormalRadixSelect*1000<<";"<<totalTime*1000<<endl;
timeLog.close();
// H_ERR(hipFree(vec_d));H_ERR(hipFree(Max_d));H_ERR(hipFree(SubrangeId_d));
}
}
// free(vec);free(vec1);
return 0;
}
| 2279c4b5365e00cf0b6909ea63b619e9d023f117.cu | #include <iostream>
#include <stdlib.h>
#include <algorithm>
#include "radixselect.cuh"
//#include "radixselectNormalInplaceWorking.cuh"
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <fstream>
#include <random>
// #define readBinary 1
//#include <random>
// #define Enabletest 1
using namespace std;
typedef unsigned int data_t;
typedef int index_t;
int compare (const void * a, const void * b)
{
return ( *(int*)a - *(int*)b );//in ascending order
}
template<typename data_t,typename index_t>
index_t power(index_t x,index_t n)
{
index_t number=1;
for (index_t i=0; i<n ;i++)
{
number*=x;
}
return number;
}
void getminmax(data_t* arr,index_t n,data_t& max,data_t& min)
{
for (index_t i=1;i<n;i++)
{
if (arr[i]> max)
{
max=arr[i];
}
if (arr[i]<min)
{
min=arr[i];
}
}
return;
}
template<typename data_t,typename index_t>
bool IsPowerof2(index_t x)
{
return (x != 0) && ((x & (x - 1)) == 0);
}
int main(int argc,char** argv)
{
cout<<"./exe num_element k NBitsPerDigit beta scoreFile output_file"<<endl;
cout<<"Size of unsigned int"<<sizeof(unsigned int)<<endl;
if (argc != 7) {cout<<"wrong input"<<endl;exit(-1);}
index_t num_pow = atol(argv[1]);
index_t base=2;
index_t num_element = power<data_t,index_t>(base,num_pow);
cout<<"num_element: "<<num_element<<endl;
index_t k= atol(argv[2]);
index_t NBits=atol(argv[3]);//atol(argv[3]);
int sd[]={10,100000,1000000,100,100000000};
int beta=atoi(argv[4]);//SampleBeta function is designed for Beta=3 only. So we need to update the SampleBetafunction in radix select if we want to change Beta value
// H_ERR(cudaSetDevice(1));
data_t* vec= (data_t*)malloc(sizeof(data_t)*num_element);//new data_t[num_element];
data_t* vec1= (data_t*)malloc(sizeof(data_t)*num_element);//new data_t[num_element];
std::random_device rd;
std::mt19937 gen(rd());
unsigned int value;
int over;
int minvalue=2147483643;
bool test=false;
index_t alpha=0.5*(num_pow-log(k)/log(2)+3);
cout<<"Calculated alpha: "<<alpha<<endl;
bool defaultContribution=true;
if (alpha <=5) defaultContribution=false;
index_t SubRangesize=pow(2,alpha);
// for (int dis=3;dis<4;dis++)
{
// std::uniform_int_distribution <unsigned int> d(0, 2147483643);
int minvalue=2147483643;
// std::normal_distribution<float> d(100000000, 10);//Mean =100 mill , sd=100
//std::normal_distribution<float> d(100000000, 10000000);//Mean =100 mill , sd=100
//Read the SCOREFILE
// unsigned int* score = (int*) malloc (num_element*sizeof(unsigned int));
const char *scr_file=argv[5];
// std::fstream;
// std::fin.open(scr_file, std::ios_base::binary|std::ios_base::in);
#ifdef readBinary
std::fstream bin_in(scr_file,std::ios_base::binary|std::ios_base::in);
int value;
for (unsigned int i=0;i<num_element;i++)
{
bin_in.read((char*)&value,sizeof(int));
vec[i]=value;
vec1[i]=vec[i];
}
for (int i=0;i<10;i++)
{
cout<<vec1[i]<<" ";
}
cout<<endl;
#else
FILE* fptr;
if ((fptr = fopen(scr_file,"r")) == NULL)
{
printf("Error! opening SCORE file");
exit(1);
}
char* str;
// fscanf(fptr,"%s", str);
for (index_t i=0;i<67108864;i++)
{
fscanf(fptr,"%f", &vec[i]);
if (i<10)
printf("%f\n",vec[i]);
vec[i] = (1-vec[i])*1000000000;
vec1[i]=vec[i];
}
int offset_j =0;
for (index_t i=67108864;i<num_element;i++)
{
vec[i] = vec[offset_j%67108864];
vec1[i]=vec[i];
offset_j++;
}
#endif
// const char * ${arr[1]}
// FILE* fptr;
// if ((fptr = fopen(scr_file,"r")) == NULL)
// {
// printf("Error! opening SCORE file");
// exit(1);
// }
// for (index_t i=0;i<num_element;i++)
// {
// fscanf(fptr,"%d", &vec[i]);
// vec1[i]=vec[i];
// }
//~Read the SCOREFILE
// std::uniform_int_distribution <unsigned int> d(0, 4294967295);
// for (int dis=3;dis<4;dis++)
// {
// for (index_t i=0;i<num_element;i++)
// {
// // vec[i]=rand()%2147483648;//2^31 -1
// value=d(gen);
// if (minvalue > value)
// {
// minvalue=value;
// }
// // if (value > 2147483650) test=true;
// if (value > 4294967295)
// {
// cout<<"Overflow of unsigned int detected"<<endl;
// return -1;
// }
// vec[i]=value;
// vec1[i]=vec[i];
// }
// if (minvalue < 0)
// {
// cout<<"-ve value detected:"<<minvalue<<endl;
// return -1;
// }
// cout<<"Minimum value:"<<minvalue<<endl;
// if (test) cout<<"Data generated Ok"<<endl;
// else
// cout<<"Data generated not Ok"<<endl;
// sort(vec, vec + num_element);
// for (int Kiteration=atol(argv[2]);Kiteration<536870912;Kiteration=Kiteration*2)
{
// k=Kiteration;
// index_t alpha=atol(argv[4]);
// int beta=3;//SampleBeta function is designed for Beta=3 only. So we need to update the SampleBetafunction in radix select if we want to change Beta value
index_t num_bucket=1<<NBits;
int CurrentDigit=(sizeof(data_t)*8/NBits)-1;
index_t NSubranges=num_element/SubRangesize;
int NthreadstoworkInreduction=32;
if (SubRangesize<32)
{
NthreadstoworkInreduction=SubRangesize;
}
cout<<"Number of Subranges:"<<NSubranges<<endl;
if (NSubranges<k)
{
cout<<"Small number of subranges!. Decrease the value of alpha!"<<endl;
// exit(-1);
}
if ((!IsPowerof2<data_t,index_t>(NBits)) || (NBits > sizeof(data_t)*8))
{
cout<<"Enter correct number of bits per digit"<<endl;
return -1;
}
// data_t* vec= (data_t*)malloc(sizeof(data_t)*num_element);//new data_t[num_element];
// data_t* vec1= (data_t*)malloc(sizeof(data_t)*num_element);//new data_t[num_element];
// std::random_device rd;
// std::mt19937 gen(rd());
// float value;
// float minvalue=10000000;
// for (index_t i=0;i<num_element;i++)
// {
// std::normal_distribution<float> d(10000000, sd[d]);//Mean =100 mill , sd=100
// // vec[i]=rand()%2147483648;//2^31 -1
// value=d(gen);
// if (minvalue > value)
// {
// minvalue=value;
// }
// if (value > 4294967295)
// {
// cout<<"Overflow of unsigned int detected"<<endl;
// }
// vec[i]=value;
// vec1[i]=vec[i];
// }
// cout<<endl;
// if (minvalue < 0)
// {
// cout<<"-ve value detected"<<endl;
// }
cout<<"Starting TopK with Npow:"<<num_pow<<" K:"<<k<<" alpha:"<<alpha<<"DistributionU(0,2^31-1)"<<endl;
std::fstream statusLog;
// timeLog.open("timeRadixSampleOCT11_N_K_alphaVaried.csv",std::fstream::out | std::fstream::app);
cout<<vec[0];
cout<<endl;
data_t* TopArray=new data_t[k];
data_t TopKElement=0;
data_t* vec_d;
H_ERR(cudaMalloc((void**) &vec_d,sizeof(data_t)*num_element));
H_ERR(cudaMemcpy(vec_d,vec,sizeof(data_t)*num_element,cudaMemcpyHostToDevice));
// raelse dix_select_inplace<data_t,index_t>(vec_d,num_element,k,num_bucket,TopKElement,NBits,CurrentDigit,0);
double timeforMaxsample=0;double timeforFirstTopk=0;double timeforSecondTopk=0;double timeforNormalRadixSelect=0;double timeforConcatenation=0;
data_t* Max_d;
H_ERR(cudaMalloc((void**) &Max_d,sizeof(data_t)*NSubranges*beta));// updated for Beta
index_t* SubrangeId_d;
H_ERR(cudaMalloc((void**) &SubrangeId_d,sizeof(index_t)*NSubranges*beta));//updated for beta
int NThreadsPerBlock=256;//only shared memory
// int NThreadsPerBlock=1024;//Shared memory with subwarp
int SizeOfSubWarp=8;
int pow_size_Subwarp=3;
// int NSharedMemoryElements=NThreadsPerBlock<<alpha;//only shared Memory
int NSharedMemoryElements=NThreadsPerBlock<<5;//3 is giving best result in different values of SubWarp size //Each thread responsible for 32 elements and contribute to 8 Subranges from a group of 4 elements
int SizeOfAllocation=NSharedMemoryElements+(NSharedMemoryElements >> 5);
//sampleMax_multirange<data_t,index_t><<<4096,512>>>(A_d,Max_d,N,NSubranges,SubRangesize,alpha,SubrangeId_d,Nthreadstowork, NSubrangesperWarp, SubWarpSize,NThreadsPerSubRange);
int NumberOfSpace_WithPadding=NSharedMemoryElements+(NSharedMemoryElements >>5);
int NSubRangesPerBlock=NSharedMemoryElements/SizeOfSubWarp;//can be in CPU
int NSubWarps_InBlock=NThreadsPerBlock >> pow_size_Subwarp;// Can be in CPU
//Note NTotalVirtualSubWarpsInBlock=NSubrangesDealtBy1Block as 1 subwarp is responsible for 1 Subrange
int NElementsPerBlock_ReadFromGlobal=NSubRangesPerBlock*SubRangesize;//1 Subwarp works for 1 subrange --> Can be in CPU
int TotalBlocksrequired=num_element/NElementsPerBlock_ReadFromGlobal;
if (TotalBlocksrequired<1)
{
cout<<"reduce blockDim or sizeofSubrange(alpha), for the kernel to work"<<endl;
exit(-1);
}
cout<<"Size of shared memory per block:"<<SizeOfAllocation*sizeof(data_t)/1024.0 <<"KB"<<endl;
// statusLog.open("Status_alpha_0_3_4_5_TotalSOK_Radix.csv",std::fstream::out | std::fstream::app);
statusLog.open("StatusFile.csv",std::fstream::out | std::fstream::app);
statusLog<<endl<<endl<<"Started Radix select with:2^"<<num_pow<<" elements "<<k<<" as Kth element and "<<alpha<<"as alpha!."<<"Distribution:U(0,2^31-1)"<<endl;
index_t* SelectedSubrangeId_d;
// H_ERR(cudaMalloc((void**) &SelectedSubrangeId_d,sizeof(index_t)*k*beta));//updated *3 for beta
// H_ERR(cudaMalloc((void**) &SelectedSubrangeId_d,sizeof(index_t)*k*beta));//updated *3 for beta
H_ERR(cudaMalloc((void**) &SelectedSubrangeId_d,sizeof(index_t)*num_element));//When digit skip is enabled in first topk
index_t* CountSelectedSubrange_d;
index_t* CountLonelyElements_d;
H_ERR(cudaMalloc((void**) &CountSelectedSubrange_d,sizeof(index_t)));
H_ERR(cudaMalloc((void**) &CountLonelyElements_d,sizeof(index_t)));
H_ERR(cudaMemset(CountSelectedSubrange_d, 0, sizeof(index_t)));
H_ERR(cudaMemset(CountLonelyElements_d, 0, sizeof(index_t)));
data_t* ConcatenatedRange_d;
index_t* write_pos_d;
// H_ERR(cudaMalloc((void**) &ConcatenatedRange_d,sizeof(data_t)*k*SubRangesize));
H_ERR(cudaMalloc((void**) &ConcatenatedRange_d,sizeof(data_t)*num_element));// for skipping digits in first top-k
H_ERR(cudaMalloc((void**) &write_pos_d,sizeof(index_t)));
double start=wtime();
if (alpha==0)
{
timeforNormalRadixSelect=wtime();
radix_select<data_t,index_t>(vec_d,num_element,k,num_bucket,TopKElement,NBits,CurrentDigit);
timeforNormalRadixSelect=wtime()-timeforNormalRadixSelect;
}
else// if(NSubranges > k)
{
sample_radix_select<data_t,index_t>(vec_d,num_element,k,num_bucket,TopKElement,NBits,CurrentDigit,NSubranges,SubRangesize,alpha,timeforMaxsample,timeforFirstTopk,timeforSecondTopk,timeforConcatenation,Max_d,SubrangeId_d,NSharedMemoryElements,SizeOfSubWarp,pow_size_Subwarp,NSubWarps_InBlock,NSubRangesPerBlock,NElementsPerBlock_ReadFromGlobal,TotalBlocksrequired,SizeOfAllocation,NThreadsPerBlock,beta,defaultContribution,NthreadstoworkInreduction,SelectedSubrangeId_d,CountLonelyElements_d,write_pos_d,ConcatenatedRange_d,CountSelectedSubrange_d);
}
// else
// {
// timeforNormalRadixSelect=wtime();
// radix_select<data_t,index_t>(vec_d,num_element,k,num_bucket,TopKElement,NBits,CurrentDigit);
// timeforNormalRadixSelect=wtime()-timeforNormalRadixSelect;
// }
double totalTime=wtime()-start;
cout<<"The kth element from top is:"<<TopKElement<<endl;
statusLog<<"Successfully Finished Radix select with:2^"<<num_pow<<" elements "<<k<<" as Kth element and "<<alpha<<"as alpha!."<<endl;
statusLog.close();
cout<<"Sampling Time:"<<timeforMaxsample*1000<<" ms"<<endl;
cout<<"Time for First TopK:"<<timeforFirstTopk*1000<<" ms"<<endl;
cout<<"Time for Concatenation:"<<timeforConcatenation*1000<<" ms"<<endl;
cout<<"Time for Second TopK:"<<timeforSecondTopk*1000<<" ms"<<endl;
cout<<"Time for Normal Radix Select:"<<timeforNormalRadixSelect*1000<<" ms"<<endl;
cout<<"Total Time:"<<totalTime*1000<<" ms"<<endl;
#ifdef Enabletest
sort(vec1, vec1 + num_element);
cout<<endl;
if (vec1[num_element-k]==TopKElement)
{
cout<<"Success!"<<endl;
}
else
{
cout<<"Not Success!"<<endl;
}
cout<<"Required value"<<vec1[num_element-k]<<endl;
assert(vec1[num_element-k]==TopKElement);
#endif
std::fstream timeLog;
// timeLog.open("N_29UniformDistributedAutoTuneAdaptive22Feb_TitanSorted_Unsorted.csv",std::fstream::out | std::fstream::app);
// timeLog.open("N_29TestingFor2^32.csv",std::fstream::out | std::fstream::app);
// timeLog.open("U_VectorSize_VaryingK2^29.csv",std::fstream::out | std::fstream::app);
// timeLog.open("FirstTopK3DigitsSkipped_NORMAL_ALL_K.csv",std::fstream::out | std::fstream::app);
timeLog.open(argv[6],std::fstream::out | std::fstream::app);
// timeLog.open("FirstTopK3DigitsSkipped_Uniform.csv",std::fstream::out | std::fstream::app);
// timeLog.open("U_beta_tuning.csv",std::fstream::out | std::fstream::app);
if (defaultContribution)
{
timeLog<<"D"<<";";
}
else
{
timeLog<<"B"<<";";
}
timeLog<<" "<<num_pow<<";"<<k<<";"<<alpha<<";"<<beta<<";"<<timeforMaxsample*1000<<";"<<timeforFirstTopk*1000<<";"<<timeforConcatenation*1000<<";"<<timeforSecondTopk*1000<<";"<<timeforNormalRadixSelect*1000<<";"<<totalTime*1000<<endl;
timeLog.close();
// H_ERR(cudaFree(vec_d));H_ERR(cudaFree(Max_d));H_ERR(cudaFree(SubrangeId_d));
}
}
// free(vec);free(vec1);
return 0;
}
|
9e8d990fcebc3a952efab680e322ad5dcf36f1c1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// This file is distributed under the MIT license.
// See the LICENSE file for details.
#include <cstdint>
#include "Arithmetic_cuda.hpp"
#include "StructuredVolumeView.hpp"
namespace vkt
{
template <typename Func>
__global__ void ArithmeticOp_kernel(
StructuredVolumeView dest,
StructuredVolumeView source1,
StructuredVolumeView source2,
Vec3i first,
Vec3i last,
Vec3i dstOffset,
Func func
)
{
int nx = last.x - first.x;
int ny = last.y - first.y;
int nz = last.z - first.z;
int x = (blockIdx.x * blockDim.x + threadIdx.x) - first.x;
int y = (blockIdx.y * blockDim.y + threadIdx.y) - first.y;
int z = (blockIdx.z * blockDim.z + threadIdx.z) - first.z;
if (x < nx && y < ny && z < nz)
{
float val1;
float val2;
source1.getValue(x, y, z, val1);
source2.getValue(x, y, z, val2);
float val3 = func(val1, val2);
dest.setValue(
x + dstOffset.x,
y + dstOffset.y,
z + dstOffset.z,
val3
);
}
}
template <typename Func>
void ArithmeticOp(
StructuredVolume& dest,
StructuredVolume& source1,
StructuredVolume& source2,
Vec3i first,
Vec3i last,
Vec3i dstOffset,
Func func
)
{
unsigned nx = last.x - first.x;
unsigned ny = last.y - first.y;
unsigned nz = last.z - first.z;
dim3 blockSize(8, 8, 8);
dim3 gridSize(
div_up(nx, blockSize.x),
div_up(ny, blockSize.y),
div_up(nz, blockSize.z)
);
hipLaunchKernelGGL(( ArithmeticOp_kernel), dim3(gridSize), dim3(blockSize), 0, 0,
StructuredVolumeView(dest),
StructuredVolumeView(source1),
StructuredVolumeView(source2),
first,
last,
dstOffset,
func
);
}
void SumRange_cuda(
StructuredVolume& dest,
StructuredVolume& source1,
StructuredVolume& source2,
Vec3i first,
Vec3i last,
Vec3i dstOffset
)
{
ArithmeticOp(
dest,
source1,
source2,
first,
last,
dstOffset,
[] __device__ (float f1, float f2) { return f1 + f2; }
);
}
void DiffRange_cuda(
StructuredVolume& dest,
StructuredVolume& source1,
StructuredVolume& source2,
Vec3i first,
Vec3i last,
Vec3i dstOffset
)
{
ArithmeticOp(
dest,
source1,
source2,
first,
last,
dstOffset,
[] __device__ (float f1, float f2) { return f1 - f2; }
);
}
void ProdRange_cuda(
StructuredVolume& dest,
StructuredVolume& source1,
StructuredVolume& source2,
Vec3i first,
Vec3i last,
Vec3i dstOffset
)
{
ArithmeticOp(
dest,
source1,
source2,
first,
last,
dstOffset,
[] __device__ (float f1, float f2) { return f1 * f2; }
);
}
void QuotRange_cuda(
StructuredVolume& dest,
StructuredVolume& source1,
StructuredVolume& source2,
Vec3i first,
Vec3i last,
Vec3i dstOffset
)
{
ArithmeticOp(
dest,
source1,
source2,
first,
last,
dstOffset,
[] __device__ (float f1, float f2) { return f1 / f2; }
);
}
void AbsDiffRange_cuda(
StructuredVolume& dest,
StructuredVolume& source1,
StructuredVolume& source2,
Vec3i first,
Vec3i last,
Vec3i dstOffset
)
{
ArithmeticOp(
dest,
source1,
source2,
first,
last,
dstOffset,
[] __device__ (float f1, float f2) { return fabsf(f1 - f2); }
);
}
void SafeSumRange_cuda(
StructuredVolume& dest,
StructuredVolume& source1,
StructuredVolume& source2,
Vec3i first,
Vec3i last,
Vec3i dstOffset
)
{
float lo = dest.getVoxelMapping().x;
float hi = dest.getVoxelMapping().y;
ArithmeticOp(
dest,
source1,
source2,
first,
last,
dstOffset,
[lo, hi] __device__ (float f1, float f2) { return clamp(f1 + f2, lo, hi); }
);
}
void SafeDiffRange_cuda(
StructuredVolume& dest,
StructuredVolume& source1,
StructuredVolume& source2,
Vec3i first,
Vec3i last,
Vec3i dstOffset
)
{
float lo = dest.getVoxelMapping().x;
float hi = dest.getVoxelMapping().y;
ArithmeticOp(
dest,
source1,
source2,
first,
last,
dstOffset,
[lo, hi] __device__ (float f1, float f2) { return clamp(f1 - f2, lo, hi); }
);
}
void SafeProdRange_cuda(
StructuredVolume& dest,
StructuredVolume& source1,
StructuredVolume& source2,
Vec3i first,
Vec3i last,
Vec3i dstOffset
)
{
float lo = dest.getVoxelMapping().x;
float hi = dest.getVoxelMapping().y;
ArithmeticOp(
dest,
source1,
source2,
first,
last,
dstOffset,
[lo, hi] __device__ (float f1, float f2) { return clamp(f1 * f2, lo, hi); }
);
}
void SafeQuotRange_cuda(
StructuredVolume& dest,
StructuredVolume& source1,
StructuredVolume& source2,
Vec3i first,
Vec3i last,
Vec3i dstOffset
)
{
float lo = dest.getVoxelMapping().x;
float hi = dest.getVoxelMapping().y;
ArithmeticOp(
dest,
source1,
source2,
first,
last,
dstOffset,
[lo, hi] __device__ (float f1, float f2) { return clamp(f1 / f2, lo, hi); }
);
}
void SafeAbsDiffRange_cuda(
StructuredVolume& dest,
StructuredVolume& source1,
StructuredVolume& source2,
Vec3i first,
Vec3i last,
Vec3i dstOffset
)
{
float lo = dest.getVoxelMapping().x;
float hi = dest.getVoxelMapping().y;
ArithmeticOp(
dest,
source1,
source2,
first,
last,
dstOffset,
[lo, hi] __device__ (float f1, float f2) { return clamp(fabsf(f1 - f2), lo, hi); }
);
}
} // vkt
| 9e8d990fcebc3a952efab680e322ad5dcf36f1c1.cu | // This file is distributed under the MIT license.
// See the LICENSE file for details.
#include <cstdint>
#include "Arithmetic_cuda.hpp"
#include "StructuredVolumeView.hpp"
namespace vkt
{
template <typename Func>
__global__ void ArithmeticOp_kernel(
StructuredVolumeView dest,
StructuredVolumeView source1,
StructuredVolumeView source2,
Vec3i first,
Vec3i last,
Vec3i dstOffset,
Func func
)
{
int nx = last.x - first.x;
int ny = last.y - first.y;
int nz = last.z - first.z;
int x = (blockIdx.x * blockDim.x + threadIdx.x) - first.x;
int y = (blockIdx.y * blockDim.y + threadIdx.y) - first.y;
int z = (blockIdx.z * blockDim.z + threadIdx.z) - first.z;
if (x < nx && y < ny && z < nz)
{
float val1;
float val2;
source1.getValue(x, y, z, val1);
source2.getValue(x, y, z, val2);
float val3 = func(val1, val2);
dest.setValue(
x + dstOffset.x,
y + dstOffset.y,
z + dstOffset.z,
val3
);
}
}
template <typename Func>
void ArithmeticOp(
StructuredVolume& dest,
StructuredVolume& source1,
StructuredVolume& source2,
Vec3i first,
Vec3i last,
Vec3i dstOffset,
Func func
)
{
unsigned nx = last.x - first.x;
unsigned ny = last.y - first.y;
unsigned nz = last.z - first.z;
dim3 blockSize(8, 8, 8);
dim3 gridSize(
div_up(nx, blockSize.x),
div_up(ny, blockSize.y),
div_up(nz, blockSize.z)
);
ArithmeticOp_kernel<<<gridSize, blockSize>>>(
StructuredVolumeView(dest),
StructuredVolumeView(source1),
StructuredVolumeView(source2),
first,
last,
dstOffset,
func
);
}
void SumRange_cuda(
StructuredVolume& dest,
StructuredVolume& source1,
StructuredVolume& source2,
Vec3i first,
Vec3i last,
Vec3i dstOffset
)
{
ArithmeticOp(
dest,
source1,
source2,
first,
last,
dstOffset,
[] __device__ (float f1, float f2) { return f1 + f2; }
);
}
void DiffRange_cuda(
StructuredVolume& dest,
StructuredVolume& source1,
StructuredVolume& source2,
Vec3i first,
Vec3i last,
Vec3i dstOffset
)
{
ArithmeticOp(
dest,
source1,
source2,
first,
last,
dstOffset,
[] __device__ (float f1, float f2) { return f1 - f2; }
);
}
void ProdRange_cuda(
StructuredVolume& dest,
StructuredVolume& source1,
StructuredVolume& source2,
Vec3i first,
Vec3i last,
Vec3i dstOffset
)
{
ArithmeticOp(
dest,
source1,
source2,
first,
last,
dstOffset,
[] __device__ (float f1, float f2) { return f1 * f2; }
);
}
void QuotRange_cuda(
StructuredVolume& dest,
StructuredVolume& source1,
StructuredVolume& source2,
Vec3i first,
Vec3i last,
Vec3i dstOffset
)
{
ArithmeticOp(
dest,
source1,
source2,
first,
last,
dstOffset,
[] __device__ (float f1, float f2) { return f1 / f2; }
);
}
void AbsDiffRange_cuda(
StructuredVolume& dest,
StructuredVolume& source1,
StructuredVolume& source2,
Vec3i first,
Vec3i last,
Vec3i dstOffset
)
{
ArithmeticOp(
dest,
source1,
source2,
first,
last,
dstOffset,
[] __device__ (float f1, float f2) { return fabsf(f1 - f2); }
);
}
void SafeSumRange_cuda(
StructuredVolume& dest,
StructuredVolume& source1,
StructuredVolume& source2,
Vec3i first,
Vec3i last,
Vec3i dstOffset
)
{
float lo = dest.getVoxelMapping().x;
float hi = dest.getVoxelMapping().y;
ArithmeticOp(
dest,
source1,
source2,
first,
last,
dstOffset,
[lo, hi] __device__ (float f1, float f2) { return clamp(f1 + f2, lo, hi); }
);
}
void SafeDiffRange_cuda(
StructuredVolume& dest,
StructuredVolume& source1,
StructuredVolume& source2,
Vec3i first,
Vec3i last,
Vec3i dstOffset
)
{
float lo = dest.getVoxelMapping().x;
float hi = dest.getVoxelMapping().y;
ArithmeticOp(
dest,
source1,
source2,
first,
last,
dstOffset,
[lo, hi] __device__ (float f1, float f2) { return clamp(f1 - f2, lo, hi); }
);
}
void SafeProdRange_cuda(
StructuredVolume& dest,
StructuredVolume& source1,
StructuredVolume& source2,
Vec3i first,
Vec3i last,
Vec3i dstOffset
)
{
float lo = dest.getVoxelMapping().x;
float hi = dest.getVoxelMapping().y;
ArithmeticOp(
dest,
source1,
source2,
first,
last,
dstOffset,
[lo, hi] __device__ (float f1, float f2) { return clamp(f1 * f2, lo, hi); }
);
}
void SafeQuotRange_cuda(
StructuredVolume& dest,
StructuredVolume& source1,
StructuredVolume& source2,
Vec3i first,
Vec3i last,
Vec3i dstOffset
)
{
float lo = dest.getVoxelMapping().x;
float hi = dest.getVoxelMapping().y;
ArithmeticOp(
dest,
source1,
source2,
first,
last,
dstOffset,
[lo, hi] __device__ (float f1, float f2) { return clamp(f1 / f2, lo, hi); }
);
}
void SafeAbsDiffRange_cuda(
StructuredVolume& dest,
StructuredVolume& source1,
StructuredVolume& source2,
Vec3i first,
Vec3i last,
Vec3i dstOffset
)
{
float lo = dest.getVoxelMapping().x;
float hi = dest.getVoxelMapping().y;
ArithmeticOp(
dest,
source1,
source2,
first,
last,
dstOffset,
[lo, hi] __device__ (float f1, float f2) { return clamp(fabsf(f1 - f2), lo, hi); }
);
}
} // vkt
|
0c3adba26aaf8321e0ad0e3ca520dfc646036482.hip | // !!! This is a file automatically generated by hipify!!!
///sta programa calcula la versin paralelizada del algoritmo FFT_DIF_DIT_TD
///(29/12/2016)
///sta versin sirve para graficar en matlab los tiempos de ejecucin, considerando N = (2^5 x 3^4 x 5^4), Li = 800,000 y Lo = vara
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hipfft.h>
#include <cufftw.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_complex.h>
#include <math.h>
#include <math_constants.h>
#include <iostream>
#include <time.h>
//////////////////////////////////////////////////////////////////////////
///////////////////////DECLARACIN DE FUNCIONES///////////////////////////
//////////////////////////////////////////////////////////////////////////
void vector_entrada_xn(int Li, int N);
void arreglo_W(int N);
void asign_rap(int N,int Li,int Lo);
void factor(int N);
void product(int vector_1[500],int vector_2[500],int valor);
void etapa_entrada(void);
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y);
void etapa_intermedia(void);
void etapa_salida(void);
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X);
//////////////////////////////////////////////////////////////////////////
/////////////////////DECLARACIN DE VARIABLES GLOBALES////////////////////
//////////////////////////////////////////////////////////////////////////
cuFloatComplex *x_host;
cuFloatComplex *W_host;
//cuFloatComplex *y_host;
//cuFloatComplex *z_host;
cuFloatComplex *X_host;
cuFloatComplex *x_device;
cuFloatComplex *W_device;
cuFloatComplex *y_device;
cuFloatComplex *z_device;
cuFloatComplex *X_device;
hipfftComplex *in,*out;
FILE *db_open,*dc_open;
int Dip,Dop,P,N,Li,Lo;
int vF[500]; //Almacena los factores de N
int svF; //Almacena el numero de factores de N
int Prod[500];
int a;
#define inf 99999
//////////////////////////////////////////////////////////////////////////
//////////////////////////DATOS DE ENTRADA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// N >>> Nmero de elementos del vector de entrada
/// Li >>> Nmero de elementos de entrada diferentes de cero
/// Lo >>> Nmero de elementos de salida requeridos
/// loop >>> Nmero de iteraciones
/// muestras >>> Nmero de muestras
//////////////////////////////////////////////////////////////////////////
///////////////////////////DATOS DE SALIDA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// X >>> Vector de salida
//////////////////////////////////////////////////////////////////////////
/////////////////// SE INGRESAN LOS DATOS DE ENTRADA /////////////////////
//////////////////////////////////////////////////////////////////////////
///Ingrese el nmero de iteraciones requeridas
const int loop = 300;
///Ingrese el valor de n_max, m_max y l_max (N = (2^n_max x 3^m_max x 5^l_max))
const int n_max = 5;
const int m_max = 4;
const int l_max = 4;
///Ingrese el valor de Li_max
const int Li_max = 800000;
//////////////////////////////////////////////////////////////////////////
//////////////////////////FUNCION PRINCIPAL///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Funcin principal
int main()
{
//////////////////////////////////////////////////////////////////////////
//////////////////////////SELECCIN DEL DEVICE////////////////////////////
//////////////////////////////////////////////////////////////////////////
int device;
FILE *da;
hipSetDevice(1);
hipGetDevice(&device);
if(device == 0)
{
printf("\n\n---DEVICE = GeForce GTX 970---\n\n");
da = fopen("Tiempos_NCompuesta_Li850000_LoVARIA_CUDA_GTX970.bin","a+b"); //Crea o sobre escribe archivo
}
if(device == 1)
{
printf("\n\n---DEVICE = TESLA K20---\n\n");
da = fopen("Tiempos_NCompuesta_Li850000_LoVARIA_CUDA_TESLAK20c.bin","a+b"); //Crea o sobre escribe archivo
}
//////////////////////////////////////////////////////////////////////////
int i,j,i_N,j_res,k_res,cont,i_prom;
float suma;
float promedio[13];
int cont_1,n_1,m_1,l_1,m_ant,l_ant;
cont_1 = 0;
m_ant = 0;
l_ant = 0;
//Pausa
printf("\n---PRESIONA UNA TECLA PARA CONTINUAR---\n\n");
getchar();
for(i_N = 1;i_N <= 1;i_N++)
{
N = (int )((pow(2,n_max))*(pow(3,m_max))*(pow(5,l_max)));
printf("\n N = %d \n",N);
for(j_res=Li_max;j_res <= Li_max;j_res++)
{
Li=j_res;
for(n_1 = 1;n_1 <= n_max;n_1++)
{
for(m_1 = m_ant; m_1 <= n_1;m_1++)
{
m_ant = m_1;
for(l_1 = l_ant;l_1 <= m_1;l_1++)
{
l_ant = l_1;
if((m_1 <= m_max) && (l_1 <= l_max))
{
Lo = (int )((pow(2,n_1))*(pow(3,m_1))*(pow(5,l_1)));
cont_1++;
printf("\n Li = %d Lo = %d",Li,Lo);
///Se abre el archivo binario
db_open = fopen("Entrada_real_NCompuesta_C.bin","rb");
dc_open = fopen("Entrada_imag_NCompuesta_C.bin","rb");
suma=0.0;
for(j=0;j<loop;j++)
{
//Comandos necesarios para medir el tiempo
float elapsedTime_app;
hipEvent_t start_app, stop_app;
hipEventCreate(&start_app);
hipEventCreate(&stop_app);
//Se generan en el host los valores del vector de entrada x[n]
vector_entrada_xn(Li,N);
///Se genera el arreglo W[N]
arreglo_W(N);
//---------------------------------------------------------------------------------------------
//Se empieza a medir el tiempo de ejecucion de la aplicacion
hipEventRecord(start_app,0);
//Se generan en el host los factores Dip y Dop
asign_rap(N,Li,Lo);
//Clculo en el host del factor P
P = N/(Dip*Dop);
//printf("\n\n FACTOR P:\n\n");
//printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P);
//Funcin auxiliar del host para ejecutar la etapa de entrada
etapa_entrada();
//Funcin auxiliar del host para ejecutar la etapa intermedia
etapa_intermedia();
//Funcin auxiliar del host para ejecutar la etapa de salida
etapa_salida();
//---------------------------------------------------------------------------------------------
//Comandos necesarios para medir el tiempo de la aplicacion (app)
hipEventRecord(stop_app,0);
hipEventSynchronize(stop_app);
hipEventElapsedTime(&elapsedTime_app,start_app,stop_app);
//Suma de todos los tiempos
suma = suma + elapsedTime_app;
//Se destruyen los eventos que miden el tiempo de la aplicacion
hipEventDestroy(start_app);
hipEventDestroy(stop_app);
//Se liberan memorias del Host y Device
free(x_host);
free(W_host);
free(X_host);
hipFree(x_device);
hipFree(W_device);
hipFree(y_device);
hipFree(z_device);
hipFree(X_device);
}
promedio[cont_1-1] = suma/(float)loop;
fclose(db_open);
fclose(dc_open);
}
}
}
}
}
}
fwrite(promedio,sizeof(float),13,da);
printf("\n\nTIEMPOS:\n\n");
int time_print;
for(time_print = 0;time_print < 13;time_print++)
{
printf("\nTime (%d)= %f ms",time_print,promedio[time_print]);
}
fclose(da);
}
//////////////////////////////////////////////////////////////////////////
/////////////////////////FUNCIONES SECUNDARIAS////////////////////////////
//////////////////////////////////////////////////////////////////////////
//sta funcin genera el vector de entrada x[n]
void vector_entrada_xn(int Li, int N)
{
//Declaracin de variables locales
int k;
float *buffer_real,*buffer_imag;
//Se reserva memoria para xn_host en el host
x_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Li);
buffer_real = (float*)malloc(sizeof(float)*N);
buffer_imag = (float*)malloc(sizeof(float)*N);
///Se lee el vector de entrada del archivo binario
fread(buffer_real,sizeof(float),N,db_open);
fread(buffer_imag,sizeof(float),N,dc_open);
//Se dan valores a x[n]
for(k = 0;k < Li; k++)
{
//x_host[k] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%11));
//x_host[k] = make_cuFloatComplex((float)(k + 1),(float)(0.0));
x_host[k] = make_cuFloatComplex(buffer_real[k],buffer_imag[k]);
}
/*
//Se imprimen los valores de entrada x[n]
printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n");
for(k=0;k<Li;k++)
{
printf(" %d-> (%f) + (%f)\n",k+1,cuCrealf(x_host[k]),cuCimagf(x_host[k]));
}
*/
free(buffer_real);
free(buffer_imag);
}
//sta funcin genera el arreglo W
void arreglo_W(int N)
{
//Declaracin de variables locales
int n;
//Se reserva memoria para W_host en el host
W_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N);
//Se genera el arreglo W
for(n = 1;n <= N;n++)
{
W_host[n-1] = make_cuFloatComplex((float)cos((2*CUDART_PI*n)/N),(float)(-1)*sin((2*CUDART_PI*n)/N));
}
/*
//Se imprimen los valores del arreglo W[N]
printf("\n---ARREGLO W[N]---\n\n");
for(n = 0;n < N; n++)
{
printf(" W[%d]-> (%f) + (%f)\n",n+1,cuCrealf(W_host[n]),cuCimagf(W_host[n]));
}
*/
}
//sta funcin genera los factores Dip y Dop
void asign_rap(int N,int Li,int Lo)
{
//Declaracin de variables locales
float NLi,NLo,Diprapt,Doprapt;
int Nh[500];
int k[500];
int G;
int g,i,t,ta;
int Dipt[500],Dopt[500];
float distrapt,distrap;
int Pos,h,Poss;
int nk[500];
int r;
//Inicializaciones
G = 0;
svF = 0;
//Factores Dip y Dop ideales
NLi=(float)N/(float)Li;
NLo=(float)N/(float)Lo;
Diprapt=NLi;
Doprapt=NLo;
//Se encuentran los factores de "N"
//vF almacena los factores de "N"
//svF almacena el nmero de factores de "N"
factor(N);
//printf("\n ERROR \n");
/*
Almacena en el vector Nh los factores que son diferentes de del vector vF
En el vector k se almacena la cantidad de veces que se repite cada
elemento almacenado en el vector Nh.
*/
Nh[0] = vF[0];
k[0]=1;
for(g=1;g<=svF-1;g=g+1)
{
if(vF[g]!=vF[g-1])
{
G=G+1;
Nh[G]=vF[g];
k[G]=1;
}
else
{
k[G]=k[G]+1;
}
}
/*
Almacena en el vector Nh todas las posibles combinaciones que den como
producto a N. t almacena el numero de elementos del vector Nh.
*/
product(Nh,k,G);
t = a;
for(i=0;i<t;i=i+1)
{
Dipt[i]=Prod[i];
}
distrapt=inf;
for(g=1;g<=t;g=g+1)
{
if(Dipt[g-1]<=NLi)
{
Pos=g-1;
for(h=0;h<=G;h=h+1)
{
Poss=floor(Pos/(k[h]+1));
nk[h]=k[h]+Poss*(k[h]+1)-Pos;
Pos=Poss;
}
product(Nh,nk,G);
ta=a;
for(i=0;i<ta;i=i+1)
{
Dopt[i]=Prod[i];
}
////////////////////////////////////////////
//int j;
//for(j=0;j<ta;j++)
//{
// printf(" %d ",Dopt[j]);
//}
//printf("\n\n ta=%d\n\n",ta);
///////////////////////////////////////////
for(r=0;r<ta;r=r+1)
{
distrap=sqrt(pow(Diprapt-(Dipt[g-1]),2)+pow(Doprapt-(Dopt[r]),2));
if(distrap<distrapt)
{
distrapt=distrap;
Dip=Dipt[g-1];
Dop=Dopt[r];
}
}
}
}
/*
printf("\n\n FACTOR Dip :\n\n");
printf(" %d ",Dip);
printf("\n\n FACTOR Dop:\n\n");
printf(" %d ",Dop);
*/
}
//sta funcin encuentra los factores de "N"
void factor(int N)
{
//Se empieza a verificar los factores desde 2
int i=2;
long N_factor;
N_factor = N;
while(i<=N_factor)
{
while((N_factor%i)==0)
{
vF[svF]=i;
N_factor=N_factor/i;
// printf("Factores: %d ",vF[svF]);
svF++;
}
i++;
}
}
//sta funcin encuentra todas las posibles combinaciones de factores que den como resultado "N"
void product(int vector_1[500],int vector_2[500],int valor)
{
int d,e,s,pNh,i;
int cont=0;
Prod[0]=1;
a=1;
for(d=0;d<=valor;d=d+1)
{
s=a;
pNh=1;
for(e=1;e<=vector_2[d];e=e+1)
{
pNh=pNh*vector_1[d];
for(i=(s*e+1);i<=(s*e+s);i=i+1)
{
Prod[i-1]=pNh*Prod[cont];
cont=cont+1;
}
a=a+s;
cont=0;
}
}
}
//Funcin auxiliar del host para calcular la etapa de entrada en el device
void etapa_entrada(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE ENTRADA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int k1,n1,n2;
//Asignacin de memoria en el device para el arreglo "x_device"
hipMalloc((void**)&x_device,Li*sizeof(cuFloatComplex));
//Se reserva memoria en el device para el arreglo "W_device"
hipMalloc((void**)&W_device,N*sizeof(cuFloatComplex));
//Asignacin de memoria en el device para el arreglo "y"
hipMalloc((void**)&y_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Se pasa el arreglo x_host a x_device
hipMemcpy(x_device,x_host,Li*sizeof(cuFloatComplex),hipMemcpyHostToDevice);
//Envo de los arreglos W hacia la memoria global del device
hipMemcpy(W_device,W_host,N*sizeof(cuFloatComplex),hipMemcpyHostToDevice);
//Asignacin de memoria en el host para "y"
//y_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Dimensionamiento del grid para la funcin kernel "inputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((P*Dop) < 32 && (Dip) < 32)
{
blockDim.x = (P*Dop);
blockDim.y = (Dip);
gridDim.x = 1;
gridDim.y = 1;
}
else
{
blockDim.x = 32;
blockDim.y = 32;
gridDim.x = (unsigned int) (ceilf((float)(P*Dop)/(float)blockDim.x));
gridDim.y = (unsigned int) (ceilf((float)Dip/(float)blockDim.y));
}
//Lanzamiento del kernel "inputStage_kernel"
hipLaunchKernelGGL(( inputStage_kernel), dim3(gridDim),dim3(blockDim), 0, 0, N,Li,Dip,Dop,P,x_device,W_device,y_device);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
/*
//Copia del arreglo "y" del device hacia el host
hipMemcpy(y_host,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,hipMemcpyDeviceToHost);
//Se imprimen los valores de "y"
printf("\n\n--- ARREGLO y(n1,n2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(n2 = 0;n2 < P;n2++)
{
printf(" (%f) + (%f) ",cuCrealf(y_host[(k1*Dop*P)+(n1*P)+n2]),cuCimagf(y_host[(k1*Dop*P)+(n1*P)+n2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//funcin kernel que ejecuta la etapa de entrada en el device
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y)
{
int n1,n2;
cuFloatComplex t1;
//Threads
int n = blockDim.x *blockIdx.x + threadIdx.x;
int k1 = blockDim.y *blockIdx.y + threadIdx.y;
//Se resetean las flags
//flag_inputstage_1_d[0] = 0;
//flag_inputstage_2_d[0] = 0;
//flag_inputstage_3_d[0] = 0;
//printf("\n n = %d k1 = %d",n,k1);
if( (n < (P*Dop)) && (k1 < Dip))
{
n2 = floorf(n/Dop);
n1 = n - (Dop*n2);
//Generacin de los elementos que dependen de x[0]
if(n == 0)
{
y[(k1*Dop*P)+(0*P)+ 0] = x[0];
///Flag
//flag_inputstage_1_d[0] = 1;
}
//Mapeo de x[n] a las entradas del primer conjunto de Dop DFT's
if((n >= 1) && (n <= (Li-1)))
{
t1 = x[n];
if(k1 == 0)
{
y[(0*Dop*P)+(n1*P)+ n2] = t1;
}
if(k1 >= 1)
{
y[(k1*Dop*P)+(n1*P)+ n2] = cuCmulf(W[((n*k1)%N)-1],t1);
}
///Flag
//flag_inputstage_2_d[0] = 1;
}
//Rellenado de ceros para los elementos de "y" para Li <= n <= (P*Dop)-1
if((n >= Li) && (n <= (P*Dop)-1))
{
y[(k1*Dop*P)+(n1*P)+ n2] = make_cuFloatComplex(0.0,0.0);
///Flag
//flag_inputstage_3_d[0] = 1;
}
//printf("\n (%f) + (%f)\n ",cuCrealf(y[(k1*Dop*P)+(n1*P)+ n2]),cuCimagf(y[(k1*Dop*P)+(n1*P)+ n2]));
}
}
//Funcin auxiliar del host para calcular la etapa intermedia en el device
void etapa_intermedia(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA INTERMEDIA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int k1,k2,n1;
int n[1] = {P};
int inembed[1] = {P};
int onembed[1] = {P};
//Asignacin de memoria en el device para "z"
hipMalloc((void**)&z_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Asignacin de memoria en el host para "z"
//z_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Asignacin de memoria en el device para "in" y "out"
hipMalloc((void**)&in,sizeof(hipfftComplex)*P*Dip*Dop);
hipMalloc((void**)&out,sizeof(hipfftComplex)*P*Dip*Dop);
//Se copia el arreglo "y" al arreglo "in"
hipMemcpy(in,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,hipMemcpyDeviceToDevice);
//Se crea un plan
hipfftHandle plan;
hipfftPlanMany(&plan,1,n,inembed,1,P,onembed,1,P,HIPFFT_C2C,Dip*Dop);
//Ejecucin del plan
hipfftExecC2C(plan,in,out,HIPFFT_FORWARD);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
//Se copian los datos del arreglo "out" al arreglo "z_device"
hipMemcpy(z_device,out,sizeof(hipfftComplex)*P*Dip*Dop,hipMemcpyDeviceToDevice);
//Se destruye el plan
hipfftDestroy(plan);
//Se liberan los arreglos "in" y "out"
hipFree(in);
hipFree(out);
/*
//Se copian los datos del arreglo "z_device" al arreglo "z_host"
hipMemcpy(z_host,z_device,sizeof(cuFloatComplex)*P*Dip*Dop,hipMemcpyDeviceToHost);
///Se imprimen los valores de z(n1,k2,k1)
printf("\n\n--- ARREGLO z(n1,k2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(k2 = 0;k2 < P;k2++)
{
printf(" (%f) + (%f) ",cuCrealf(z_host[(k1*Dop*P)+(n1*P)+k2]),cuCimagf(z_host[(k1*Dop*P)+(n1*P)+k2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//Funcin auxiliar del host para calcular la etapa de salida en el device
void etapa_salida(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE SALIDA///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int m;
//Asignacin de memoria en el device para "X"
hipMalloc((void**)&X_device,Lo*sizeof(cuFloatComplex));
//Asignacin de memoria en el host para "X"
X_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Lo);
//Dimensionamiento del grid para la funcin kernel "outputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((Lo) < 1024)
{
blockDim.x = Lo;
gridDim.x = 1;
}
else
{
blockDim.x = 1024;
gridDim.x = (unsigned int) (ceilf((float)Lo/(float)blockDim.x));
}
//Lanzamiento del kernel "outputStage_kernel"
hipLaunchKernelGGL(( outputStage_kernel), dim3(gridDim),dim3(blockDim), 0, 0, N,Lo,Dip,Dop,P,z_device,W_device,X_device);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
//Copia del arreglo "X" del device hacia el host
hipMemcpy(X_host,X_device,sizeof(cuFloatComplex)*Lo,hipMemcpyDeviceToHost);
/*
//Se imprimen los valores de "X_host"
///Imprimir X[k]
printf("\n\n--- ARREGLO X[k] ---\n\n");
for(m=0;m<=Lo-1;m++)
{
printf("\n X[%d] = %.4f + (%.4f)",m,cuCrealf(X_host[m]),cuCimagf(X_host[m]));
//fprintf(da,"%.4f %.4f\n",creal(X[i]),cimag(X[i]));
}
*/
}
//funcin kernel que ejecuta la etapa de salida en el device
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X)
{
//Declaracin de variables locales
int n1,k_aux,k1,k2,a,b;
cuFloatComplex t1,t2,t3,t4,t5;
//Threads
int k = blockDim.x *blockIdx.x + threadIdx.x;
//Se resetean las flags
//flag_outputstage_1_d[0] = 0;
//flag_outputstage_2_d[0] = 0;
//flag_outputstage_3_d[0] = 0;
if(k < Lo)
{
for(n1 = 0; n1 <= (Dop-1); n1 = n1+1)
{
if(Lo <= Dip)
{
//Clculo de X(k) para 0<=k<=Lo-1.
//printf("\n--- Caso (Lo <= Dip) ---\n");
//En la descomposicin k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
///Flag
//flag_outputstage_1_d[0] = 1;
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
///Flag
//flag_outputstage_1_d[0] = 1;
}
}
else
{
if((k >= 0) && (k <= (Dip-1)))
{
//Clculo de X(k) para 0<=k<=Dip-1.
//En la descomposicin k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
}
}
else
{
if(Dop <= 4)
{
//Usando el mtodo directo
//printf("\n--- Caso (Metodo directo) ---\n");
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
///Flag
//flag_outputstage_2_d[0] = 1;
}
else
{
if(n1 == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
}
a = floorf(k/(Dip*P));
X[k] = cuCaddf(X[k],cuCmulf(z[(k1*Dop*P)+(n1*P)+ (k2%P)],W[((n1*(k2+P*(a))*Dip)%N)-1]));
///Flag
//flag_outputstage_2_d[0] = 1;
}
}
else
{
//Usando el mtodo filtering 2BF
//printf("\n--- Caso (Filtro 2BF) ---\n");
if((Dop-2) >= 1)
{
if(n1 == 0)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
///Flag
//flag_outputstage_3_d[0] = 1;
}
if((n1 >= 1) && (n1 <= (Dop-2)))
{
t2 = t1;
t1 = cuCaddf(z[(k1*Dop*P)+((-(n1-(Dop-1)))*P)+ (k2%P)],t4);
t3 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t4 = cuCsubf(t3,t2);
}
if(n1 == (Dop-1))
{
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
}
}
else
{
if(Dop == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
X[k] = t1;
///Flag
//flag_outputstage_3_d[0] = 1;
}
else
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
///Flag
//flag_outputstage_3_d[0] = 1;
}
}
}
}
}
}
}
} | 0c3adba26aaf8321e0ad0e3ca520dfc646036482.cu | ///Ésta programa calcula la versión paralelizada del algoritmo FFT_DIF_DIT_TD
///(29/12/2016)
///Ésta versión sirve para graficar en matlab los tiempos de ejecución, considerando N = (2^5 x 3^4 x 5^4), Li = 800,000 y Lo = varía
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cufft.h>
#include <cufftw.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuComplex.h>
#include <math.h>
#include <math_constants.h>
#include <iostream>
#include <time.h>
//////////////////////////////////////////////////////////////////////////
///////////////////////DECLARACIÓN DE FUNCIONES///////////////////////////
//////////////////////////////////////////////////////////////////////////
void vector_entrada_xn(int Li, int N);
void arreglo_W(int N);
void asign_rap(int N,int Li,int Lo);
void factor(int N);
void product(int vector_1[500],int vector_2[500],int valor);
void etapa_entrada(void);
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y);
void etapa_intermedia(void);
void etapa_salida(void);
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X);
//////////////////////////////////////////////////////////////////////////
/////////////////////DECLARACIÓN DE VARIABLES GLOBALES////////////////////
//////////////////////////////////////////////////////////////////////////
cuFloatComplex *x_host;
cuFloatComplex *W_host;
//cuFloatComplex *y_host;
//cuFloatComplex *z_host;
cuFloatComplex *X_host;
cuFloatComplex *x_device;
cuFloatComplex *W_device;
cuFloatComplex *y_device;
cuFloatComplex *z_device;
cuFloatComplex *X_device;
cufftComplex *in,*out;
FILE *db_open,*dc_open;
int Dip,Dop,P,N,Li,Lo;
int vF[500]; //Almacena los factores de N
int svF; //Almacena el numero de factores de N
int Prod[500];
int a;
#define inf 99999
//////////////////////////////////////////////////////////////////////////
//////////////////////////DATOS DE ENTRADA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// N >>> Número de elementos del vector de entrada
/// Li >>> Número de elementos de entrada diferentes de cero
/// Lo >>> Número de elementos de salida requeridos
/// loop >>> Número de iteraciones
/// muestras >>> Número de muestras
//////////////////////////////////////////////////////////////////////////
///////////////////////////DATOS DE SALIDA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// X >>> Vector de salida
//////////////////////////////////////////////////////////////////////////
/////////////////// SE INGRESAN LOS DATOS DE ENTRADA /////////////////////
//////////////////////////////////////////////////////////////////////////
///Ingrese el número de iteraciones requeridas
const int loop = 300;
///Ingrese el valor de n_max, m_max y l_max (N = (2^n_max x 3^m_max x 5^l_max))
const int n_max = 5;
const int m_max = 4;
const int l_max = 4;
///Ingrese el valor de Li_max
const int Li_max = 800000;
//////////////////////////////////////////////////////////////////////////
//////////////////////////FUNCION PRINCIPAL///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Función principal
int main()
{
//////////////////////////////////////////////////////////////////////////
//////////////////////////SELECCIÓN DEL DEVICE////////////////////////////
//////////////////////////////////////////////////////////////////////////
int device;
FILE *da;
cudaSetDevice(1);
cudaGetDevice(&device);
if(device == 0)
{
printf("\n\n---DEVICE = GeForce GTX 970---\n\n");
da = fopen("Tiempos_NCompuesta_Li850000_LoVARIA_CUDA_GTX970.bin","a+b"); //Crea o sobre escribe archivo
}
if(device == 1)
{
printf("\n\n---DEVICE = TESLA K20---\n\n");
da = fopen("Tiempos_NCompuesta_Li850000_LoVARIA_CUDA_TESLAK20c.bin","a+b"); //Crea o sobre escribe archivo
}
//////////////////////////////////////////////////////////////////////////
int i,j,i_N,j_res,k_res,cont,i_prom;
float suma;
float promedio[13];
int cont_1,n_1,m_1,l_1,m_ant,l_ant;
cont_1 = 0;
m_ant = 0;
l_ant = 0;
//Pausa
printf("\n---PRESIONA UNA TECLA PARA CONTINUAR---\n\n");
getchar();
for(i_N = 1;i_N <= 1;i_N++)
{
N = (int )((pow(2,n_max))*(pow(3,m_max))*(pow(5,l_max)));
printf("\n N = %d \n",N);
for(j_res=Li_max;j_res <= Li_max;j_res++)
{
Li=j_res;
for(n_1 = 1;n_1 <= n_max;n_1++)
{
for(m_1 = m_ant; m_1 <= n_1;m_1++)
{
m_ant = m_1;
for(l_1 = l_ant;l_1 <= m_1;l_1++)
{
l_ant = l_1;
if((m_1 <= m_max) && (l_1 <= l_max))
{
Lo = (int )((pow(2,n_1))*(pow(3,m_1))*(pow(5,l_1)));
cont_1++;
printf("\n Li = %d Lo = %d",Li,Lo);
///Se abre el archivo binario
db_open = fopen("Entrada_real_NCompuesta_C.bin","rb");
dc_open = fopen("Entrada_imag_NCompuesta_C.bin","rb");
suma=0.0;
for(j=0;j<loop;j++)
{
//Comandos necesarios para medir el tiempo
float elapsedTime_app;
cudaEvent_t start_app, stop_app;
cudaEventCreate(&start_app);
cudaEventCreate(&stop_app);
//Se generan en el host los valores del vector de entrada x[n]
vector_entrada_xn(Li,N);
///Se genera el arreglo W[N]
arreglo_W(N);
//---------------------------------------------------------------------------------------------
//Se empieza a medir el tiempo de ejecucion de la aplicacion
cudaEventRecord(start_app,0);
//Se generan en el host los factores Dip y Dop
asign_rap(N,Li,Lo);
//Cálculo en el host del factor P
P = N/(Dip*Dop);
//printf("\n\n FACTOR P:\n\n");
//printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P);
//Función auxiliar del host para ejecutar la etapa de entrada
etapa_entrada();
//Función auxiliar del host para ejecutar la etapa intermedia
etapa_intermedia();
//Función auxiliar del host para ejecutar la etapa de salida
etapa_salida();
//---------------------------------------------------------------------------------------------
//Comandos necesarios para medir el tiempo de la aplicacion (app)
cudaEventRecord(stop_app,0);
cudaEventSynchronize(stop_app);
cudaEventElapsedTime(&elapsedTime_app,start_app,stop_app);
//Suma de todos los tiempos
suma = suma + elapsedTime_app;
//Se destruyen los eventos que miden el tiempo de la aplicacion
cudaEventDestroy(start_app);
cudaEventDestroy(stop_app);
//Se liberan memorias del Host y Device
free(x_host);
free(W_host);
free(X_host);
cudaFree(x_device);
cudaFree(W_device);
cudaFree(y_device);
cudaFree(z_device);
cudaFree(X_device);
}
promedio[cont_1-1] = suma/(float)loop;
fclose(db_open);
fclose(dc_open);
}
}
}
}
}
}
fwrite(promedio,sizeof(float),13,da);
printf("\n\nTIEMPOS:\n\n");
int time_print;
for(time_print = 0;time_print < 13;time_print++)
{
printf("\nTime (%d)= %f ms",time_print,promedio[time_print]);
}
fclose(da);
}
//////////////////////////////////////////////////////////////////////////
/////////////////////////FUNCIONES SECUNDARIAS////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Ésta función genera el vector de entrada x[n]
void vector_entrada_xn(int Li, int N)
{
//Declaración de variables locales
int k;
float *buffer_real,*buffer_imag;
//Se reserva memoria para xn_host en el host
x_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Li);
buffer_real = (float*)malloc(sizeof(float)*N);
buffer_imag = (float*)malloc(sizeof(float)*N);
///Se lee el vector de entrada del archivo binario
fread(buffer_real,sizeof(float),N,db_open);
fread(buffer_imag,sizeof(float),N,dc_open);
//Se dan valores a x[n]
for(k = 0;k < Li; k++)
{
//x_host[k] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%11));
//x_host[k] = make_cuFloatComplex((float)(k + 1),(float)(0.0));
x_host[k] = make_cuFloatComplex(buffer_real[k],buffer_imag[k]);
}
/*
//Se imprimen los valores de entrada x[n]
printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n");
for(k=0;k<Li;k++)
{
printf(" %d-> (%f) + (%f)\n",k+1,cuCrealf(x_host[k]),cuCimagf(x_host[k]));
}
*/
free(buffer_real);
free(buffer_imag);
}
//Ésta función genera el arreglo W
void arreglo_W(int N)
{
//Declaración de variables locales
int n;
//Se reserva memoria para W_host en el host
W_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N);
//Se genera el arreglo W
for(n = 1;n <= N;n++)
{
W_host[n-1] = make_cuFloatComplex((float)cos((2*CUDART_PI*n)/N),(float)(-1)*sin((2*CUDART_PI*n)/N));
}
/*
//Se imprimen los valores del arreglo W[N]
printf("\n---ARREGLO W[N]---\n\n");
for(n = 0;n < N; n++)
{
printf(" W[%d]-> (%f) + (%f)\n",n+1,cuCrealf(W_host[n]),cuCimagf(W_host[n]));
}
*/
}
//Ésta función genera los factores Dip y Dop
void asign_rap(int N,int Li,int Lo)
{
//Declaración de variables locales
float NLi,NLo,Diprapt,Doprapt;
int Nh[500];
int k[500];
int G;
int g,i,t,ta;
int Dipt[500],Dopt[500];
float distrapt,distrap;
int Pos,h,Poss;
int nk[500];
int r;
//Inicializaciones
G = 0;
svF = 0;
//Factores Dip y Dop ideales
NLi=(float)N/(float)Li;
NLo=(float)N/(float)Lo;
Diprapt=NLi;
Doprapt=NLo;
//Se encuentran los factores de "N"
//vF almacena los factores de "N"
//svF almacena el número de factores de "N"
factor(N);
//printf("\n ERROR \n");
/*
Almacena en el vector Nh los factores que son diferentes de del vector vF
En el vector k se almacena la cantidad de veces que se repite cada
elemento almacenado en el vector Nh.
*/
Nh[0] = vF[0];
k[0]=1;
for(g=1;g<=svF-1;g=g+1)
{
if(vF[g]!=vF[g-1])
{
G=G+1;
Nh[G]=vF[g];
k[G]=1;
}
else
{
k[G]=k[G]+1;
}
}
/*
Almacena en el vector Nh todas las posibles combinaciones que den como
producto a N. t almacena el numero de elementos del vector Nh.
*/
product(Nh,k,G);
t = a;
for(i=0;i<t;i=i+1)
{
Dipt[i]=Prod[i];
}
distrapt=inf;
for(g=1;g<=t;g=g+1)
{
if(Dipt[g-1]<=NLi)
{
Pos=g-1;
for(h=0;h<=G;h=h+1)
{
Poss=floor(Pos/(k[h]+1));
nk[h]=k[h]+Poss*(k[h]+1)-Pos;
Pos=Poss;
}
product(Nh,nk,G);
ta=a;
for(i=0;i<ta;i=i+1)
{
Dopt[i]=Prod[i];
}
////////////////////////////////////////////
//int j;
//for(j=0;j<ta;j++)
//{
// printf(" %d ",Dopt[j]);
//}
//printf("\n\n ta=%d\n\n",ta);
///////////////////////////////////////////
for(r=0;r<ta;r=r+1)
{
distrap=sqrt(pow(Diprapt-(Dipt[g-1]),2)+pow(Doprapt-(Dopt[r]),2));
if(distrap<distrapt)
{
distrapt=distrap;
Dip=Dipt[g-1];
Dop=Dopt[r];
}
}
}
}
/*
printf("\n\n FACTOR Dip :\n\n");
printf(" %d ",Dip);
printf("\n\n FACTOR Dop:\n\n");
printf(" %d ",Dop);
*/
}
//Ésta función encuentra los factores de "N"
void factor(int N)
{
//Se empieza a verificar los factores desde 2
int i=2;
long N_factor;
N_factor = N;
while(i<=N_factor)
{
while((N_factor%i)==0)
{
vF[svF]=i;
N_factor=N_factor/i;
// printf("Factores: %d ",vF[svF]);
svF++;
}
i++;
}
}
//Ésta función encuentra todas las posibles combinaciones de factores que den como resultado "N"
void product(int vector_1[500],int vector_2[500],int valor)
{
int d,e,s,pNh,i;
int cont=0;
Prod[0]=1;
a=1;
for(d=0;d<=valor;d=d+1)
{
s=a;
pNh=1;
for(e=1;e<=vector_2[d];e=e+1)
{
pNh=pNh*vector_1[d];
for(i=(s*e+1);i<=(s*e+s);i=i+1)
{
Prod[i-1]=pNh*Prod[cont];
cont=cont+1;
}
a=a+s;
cont=0;
}
}
}
//Función auxiliar del host para calcular la etapa de entrada en el device
void etapa_entrada(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE ENTRADA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int k1,n1,n2;
//Asignación de memoria en el device para el arreglo "x_device"
cudaMalloc((void**)&x_device,Li*sizeof(cuFloatComplex));
//Se reserva memoria en el device para el arreglo "W_device"
cudaMalloc((void**)&W_device,N*sizeof(cuFloatComplex));
//Asignación de memoria en el device para el arreglo "y"
cudaMalloc((void**)&y_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Se pasa el arreglo x_host a x_device
cudaMemcpy(x_device,x_host,Li*sizeof(cuFloatComplex),cudaMemcpyHostToDevice);
//Envío de los arreglos W hacia la memoria global del device
cudaMemcpy(W_device,W_host,N*sizeof(cuFloatComplex),cudaMemcpyHostToDevice);
//Asignación de memoria en el host para "y"
//y_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Dimensionamiento del grid para la función kernel "inputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((P*Dop) < 32 && (Dip) < 32)
{
blockDim.x = (P*Dop);
blockDim.y = (Dip);
gridDim.x = 1;
gridDim.y = 1;
}
else
{
blockDim.x = 32;
blockDim.y = 32;
gridDim.x = (unsigned int) (ceilf((float)(P*Dop)/(float)blockDim.x));
gridDim.y = (unsigned int) (ceilf((float)Dip/(float)blockDim.y));
}
//Lanzamiento del kernel "inputStage_kernel"
inputStage_kernel<<<gridDim,blockDim>>>(N,Li,Dip,Dop,P,x_device,W_device,y_device);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
/*
//Copia del arreglo "y" del device hacia el host
cudaMemcpy(y_host,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,cudaMemcpyDeviceToHost);
//Se imprimen los valores de "y"
printf("\n\n--- ARREGLO y(n1,n2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(n2 = 0;n2 < P;n2++)
{
printf(" (%f) + (%f) ",cuCrealf(y_host[(k1*Dop*P)+(n1*P)+n2]),cuCimagf(y_host[(k1*Dop*P)+(n1*P)+n2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//función kernel que ejecuta la etapa de entrada en el device
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y)
{
int n1,n2;
cuFloatComplex t1;
//Threads
int n = blockDim.x *blockIdx.x + threadIdx.x;
int k1 = blockDim.y *blockIdx.y + threadIdx.y;
//Se resetean las flags
//flag_inputstage_1_d[0] = 0;
//flag_inputstage_2_d[0] = 0;
//flag_inputstage_3_d[0] = 0;
//printf("\n n = %d k1 = %d",n,k1);
if( (n < (P*Dop)) && (k1 < Dip))
{
n2 = floorf(n/Dop);
n1 = n - (Dop*n2);
//Generación de los elementos que dependen de x[0]
if(n == 0)
{
y[(k1*Dop*P)+(0*P)+ 0] = x[0];
///Flag
//flag_inputstage_1_d[0] = 1;
}
//Mapeo de x[n] a las entradas del primer conjunto de Dop DFT's
if((n >= 1) && (n <= (Li-1)))
{
t1 = x[n];
if(k1 == 0)
{
y[(0*Dop*P)+(n1*P)+ n2] = t1;
}
if(k1 >= 1)
{
y[(k1*Dop*P)+(n1*P)+ n2] = cuCmulf(W[((n*k1)%N)-1],t1);
}
///Flag
//flag_inputstage_2_d[0] = 1;
}
//Rellenado de ceros para los elementos de "y" para Li <= n <= (P*Dop)-1
if((n >= Li) && (n <= (P*Dop)-1))
{
y[(k1*Dop*P)+(n1*P)+ n2] = make_cuFloatComplex(0.0,0.0);
///Flag
//flag_inputstage_3_d[0] = 1;
}
//printf("\n (%f) + (%f)\n ",cuCrealf(y[(k1*Dop*P)+(n1*P)+ n2]),cuCimagf(y[(k1*Dop*P)+(n1*P)+ n2]));
}
}
//Función auxiliar del host para calcular la etapa intermedia en el device
void etapa_intermedia(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA INTERMEDIA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int k1,k2,n1;
int n[1] = {P};
int inembed[1] = {P};
int onembed[1] = {P};
//Asignación de memoria en el device para "z"
cudaMalloc((void**)&z_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Asignación de memoria en el host para "z"
//z_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Asignación de memoria en el device para "in" y "out"
cudaMalloc((void**)&in,sizeof(cufftComplex)*P*Dip*Dop);
cudaMalloc((void**)&out,sizeof(cufftComplex)*P*Dip*Dop);
//Se copia el arreglo "y" al arreglo "in"
cudaMemcpy(in,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,cudaMemcpyDeviceToDevice);
//Se crea un plan
cufftHandle plan;
cufftPlanMany(&plan,1,n,inembed,1,P,onembed,1,P,CUFFT_C2C,Dip*Dop);
//Ejecución del plan
cufftExecC2C(plan,in,out,CUFFT_FORWARD);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
//Se copian los datos del arreglo "out" al arreglo "z_device"
cudaMemcpy(z_device,out,sizeof(cufftComplex)*P*Dip*Dop,cudaMemcpyDeviceToDevice);
//Se destruye el plan
cufftDestroy(plan);
//Se liberan los arreglos "in" y "out"
cudaFree(in);
cudaFree(out);
/*
//Se copian los datos del arreglo "z_device" al arreglo "z_host"
cudaMemcpy(z_host,z_device,sizeof(cuFloatComplex)*P*Dip*Dop,cudaMemcpyDeviceToHost);
///Se imprimen los valores de z(n1,k2,k1)
printf("\n\n--- ARREGLO z(n1,k2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(k2 = 0;k2 < P;k2++)
{
printf(" (%f) + (%f) ",cuCrealf(z_host[(k1*Dop*P)+(n1*P)+k2]),cuCimagf(z_host[(k1*Dop*P)+(n1*P)+k2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//Función auxiliar del host para calcular la etapa de salida en el device
void etapa_salida(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE SALIDA///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int m;
//Asignación de memoria en el device para "X"
cudaMalloc((void**)&X_device,Lo*sizeof(cuFloatComplex));
//Asignación de memoria en el host para "X"
X_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Lo);
//Dimensionamiento del grid para la función kernel "outputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((Lo) < 1024)
{
blockDim.x = Lo;
gridDim.x = 1;
}
else
{
blockDim.x = 1024;
gridDim.x = (unsigned int) (ceilf((float)Lo/(float)blockDim.x));
}
//Lanzamiento del kernel "outputStage_kernel"
outputStage_kernel<<<gridDim,blockDim>>>(N,Lo,Dip,Dop,P,z_device,W_device,X_device);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
//Copia del arreglo "X" del device hacia el host
cudaMemcpy(X_host,X_device,sizeof(cuFloatComplex)*Lo,cudaMemcpyDeviceToHost);
/*
//Se imprimen los valores de "X_host"
///Imprimir X[k]
printf("\n\n--- ARREGLO X[k] ---\n\n");
for(m=0;m<=Lo-1;m++)
{
printf("\n X[%d] = %.4f + (%.4f)",m,cuCrealf(X_host[m]),cuCimagf(X_host[m]));
//fprintf(da,"%.4f %.4f\n",creal(X[i]),cimag(X[i]));
}
*/
}
//función kernel que ejecuta la etapa de salida en el device
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X)
{
//Declaración de variables locales
int n1,k_aux,k1,k2,a,b;
cuFloatComplex t1,t2,t3,t4,t5;
//Threads
int k = blockDim.x *blockIdx.x + threadIdx.x;
//Se resetean las flags
//flag_outputstage_1_d[0] = 0;
//flag_outputstage_2_d[0] = 0;
//flag_outputstage_3_d[0] = 0;
if(k < Lo)
{
for(n1 = 0; n1 <= (Dop-1); n1 = n1+1)
{
if(Lo <= Dip)
{
//Cálculo de X(k) para 0<=k<=Lo-1.
//printf("\n--- Caso (Lo <= Dip) ---\n");
//En la descomposición k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
///Flag
//flag_outputstage_1_d[0] = 1;
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
///Flag
//flag_outputstage_1_d[0] = 1;
}
}
else
{
if((k >= 0) && (k <= (Dip-1)))
{
//Cálculo de X(k) para 0<=k<=Dip-1.
//En la descomposición k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
}
}
else
{
if(Dop <= 4)
{
//Usando el método directo
//printf("\n--- Caso (Metodo directo) ---\n");
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
///Flag
//flag_outputstage_2_d[0] = 1;
}
else
{
if(n1 == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
}
a = floorf(k/(Dip*P));
X[k] = cuCaddf(X[k],cuCmulf(z[(k1*Dop*P)+(n1*P)+ (k2%P)],W[((n1*(k2+P*(a))*Dip)%N)-1]));
///Flag
//flag_outputstage_2_d[0] = 1;
}
}
else
{
//Usando el método filtering 2BF
//printf("\n--- Caso (Filtro 2BF) ---\n");
if((Dop-2) >= 1)
{
if(n1 == 0)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
///Flag
//flag_outputstage_3_d[0] = 1;
}
if((n1 >= 1) && (n1 <= (Dop-2)))
{
t2 = t1;
t1 = cuCaddf(z[(k1*Dop*P)+((-(n1-(Dop-1)))*P)+ (k2%P)],t4);
t3 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t4 = cuCsubf(t3,t2);
}
if(n1 == (Dop-1))
{
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
}
}
else
{
if(Dop == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
X[k] = t1;
///Flag
//flag_outputstage_3_d[0] = 1;
}
else
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
///Flag
//flag_outputstage_3_d[0] = 1;
}
}
}
}
}
}
}
} |
c2102a08c5b960a580fed2e9d0f68a7d5b8abee3.hip | // !!! This is a file automatically generated by hipify!!!
/*
Cryptohaze Multiforcer & Wordyforcer - low performance GPU password cracking
Copyright (C) 2011 Bitweasil (http://www.cryptohaze.com/)
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
* @section DESCRIPTION
*
* This file implements 16HEX multihash cracking.
*/
#include <stdint.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
//#include "CUDA_Common/cuPrintf.cu"
#include "MFN_CUDA_device/MFN_CUDA_Common.h"
#include "MFN_CUDA_device/MFN_CUDA_incrementors.h"
#include "MFN_CUDA_device/MFN_CUDA_MD5.h"
#include "MFN_CUDA_device/MFN_CUDA_MD4.h"
#if !defined(__HIPCC__)
// define the keywords, so that the IDE does not complain about them
#define __global__
#define __device__
#define __shared__
#define __constant__
#define blockIdx.x 1
#define blockDim.x 1
#define threadIdx.x 1
#define __align__() /**/
#endif
/**
* The maximum password length supported by this hash type.
*/
#define MFN_HASH_TYPE_PLAIN_CUDA_16HEX_MAX_PASSLEN 48
/**
* The maximum charset length supported by this hash type.
*/
#define MFN_HASH_TYPE_PLAIN_CUDA_16HEX_MAX_CHARSET_LENGTH 128
// Define the constant types used by the kernels here.
__device__ __constant__ __align__(16) uint8_t constantBitmapAPlain16HEX[8192];
__device__ __constant__ __align__(16) uint8_t deviceCharsetPlain16HEX[MFN_HASH_TYPE_PLAIN_CUDA_16HEX_MAX_CHARSET_LENGTH * \
MFN_HASH_TYPE_PLAIN_CUDA_16HEX_MAX_PASSLEN];
__device__ __constant__ __align__(16) uint8_t deviceReverseCharsetPlain16HEX[MFN_HASH_TYPE_PLAIN_CUDA_16HEX_MAX_CHARSET_LENGTH * \
MFN_HASH_TYPE_PLAIN_CUDA_16HEX_MAX_PASSLEN];
__device__ __constant__ uint8_t charsetLengthsPlain16HEX[MFN_HASH_TYPE_PLAIN_CUDA_16HEX_MAX_PASSLEN];
/**
* Constant parameters go here instead of getting passed as kernel arguments.
* This allows for faster accesses (as they are cached, and all threads will
* be accessing the same element), and also reduces the shared memory usage,
* which may allow for better occupancy in the future. The kernels will load
* these as needed, and theoretically will not need registers for some of them,
* which will help reduce the register pressure on kernels. Hopefully.
*/
// Password length. Needed for some offset calculations.
__device__ __constant__ uint8_t passwordLengthPlain16HEX;
// Number of hashes present in memory.
__device__ __constant__ uint64_t numberOfHashesPlain16HEX;
// Address of the hashlist in global memory.
__device__ __constant__ uint8_t *deviceGlobalHashlistAddressPlain16HEX;
// Addresses of the various global bitmaps.
__device__ __constant__ uint8_t *deviceGlobalBitmapAPlain16HEX;
__device__ __constant__ uint8_t *deviceGlobalBitmapBPlain16HEX;
__device__ __constant__ uint8_t *deviceGlobalBitmapCPlain16HEX;
__device__ __constant__ uint8_t *deviceGlobalBitmapDPlain16HEX;
__device__ __constant__ uint8_t *deviceGlobalBitmap256kPlain16HEX;
// Addresses of the arrays for found passwords & success flags
__device__ __constant__ uint8_t *deviceGlobalFoundPasswordsPlain16HEX;
__device__ __constant__ uint8_t *deviceGlobalFoundPasswordFlagsPlain16HEX;
__device__ __constant__ uint8_t *deviceGlobalStartPointsPlain16HEX;
__device__ __constant__ uint32_t *deviceGlobalStartPasswords32Plain16HEX;
__device__ __constant__ uint32_t deviceNumberStepsToRunPlain16HEX;
__device__ __constant__ uint64_t deviceNumberThreadsPlain16HEX;
__constant__ char hexLookupValues16HEX[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
extern __shared__ uint32_t plainStorage16HEX[];
#define MAKE_MFN_16HEX_KERNEL1_8LENGTH(pass_len) \
__global__ void MFNHashTypePlainCUDA_16HEX_GeneratedKernel_##pass_len () { \
uint32_t b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15, a, b, c, d; \
uint32_t password_count = 0, passOffset; \
__shared__ uint8_t __align__(16) sharedCharsetPlain16HEX[MFN_HASH_TYPE_PLAIN_CUDA_16HEX_MAX_CHARSET_LENGTH * pass_len]; \
__shared__ uint8_t __align__(16) sharedReverseCharsetPlain16HEX[MFN_HASH_TYPE_PLAIN_CUDA_16HEX_MAX_CHARSET_LENGTH * pass_len]; \
__shared__ uint8_t sharedCharsetLengthsPlain16HEX[pass_len]; \
__shared__ uint8_t __align__(16) sharedBitmap[8192]; \
__shared__ uint8_t hashLookup[256][2]; \
if (threadIdx.x == 0) { \
uint64_t *sharedCharset64 = (uint64_t *)sharedCharsetPlain16HEX; \
uint64_t *deviceCharset64 = (uint64_t *)deviceCharsetPlain16HEX; \
uint64_t *sharedReverseCharset64 = (uint64_t *)sharedReverseCharsetPlain16HEX; \
uint64_t *deviceReverseCharset64 = (uint64_t *)deviceReverseCharsetPlain16HEX; \
uint64_t *constantBitmap64 = (uint64_t *)constantBitmapAPlain16HEX; \
uint64_t *sharedBitmap64 = (uint64_t *)sharedBitmap; \
for (a = 0; a < ((MFN_HASH_TYPE_PLAIN_CUDA_16HEX_MAX_CHARSET_LENGTH * pass_len) / 8); a++) { \
sharedCharset64[a] = deviceCharset64[a]; \
sharedReverseCharset64[a] = deviceReverseCharset64[a]; \
} \
for (a = 0; a < pass_len; a++) { \
sharedCharsetLengthsPlain16HEX[a] = charsetLengthsPlain16HEX[a]; \
} \
for (a = 0; a < 8192 / 8; a++) { \
sharedBitmap64[a] = constantBitmap64[a]; \
} \
for (a = 0; a < 256; a++) { \
hashLookup[a][0] = hexLookupValues16HEX[a / 16]; \
hashLookup[a][1] = hexLookupValues16HEX[a % 16]; \
} \
} \
syncthreads(); \
b0 = b1 = b2 = b3 = b4 = b5 = b6 = b7 = b8 = b9 = b10 = b11 = b12 = b13 = b14 = b15 = 0; \
loadPasswords32(deviceGlobalStartPasswords32Plain16HEX, deviceNumberThreadsPlain16HEX, pass_len); \
while (password_count < deviceNumberStepsToRunPlain16HEX) { \
/* Store the plains in the allocated space so they are available if an
* algorithm such as SHA1 destroys them - or if we need to load them for
* NTLM or something else like that. */ \
StoreNormalPasswordInShared(plainStorage16HEX, pass_len); \
b14 = pass_len * 8; \
MD5_FULL_HASH(); \
if ((sharedBitmap[(a & 0x0000ffff) >> 3] >> (a & 0x00000007)) & 0x00000001) { \
if (!(deviceGlobalBitmap256kPlain16HEX) || ((deviceGlobalBitmap256kPlain16HEX[(a >> 3) & 0x0003FFFF] >> (a & 0x7)) & 0x1)) { \
if (!(deviceGlobalBitmapAPlain16HEX) || ((deviceGlobalBitmapAPlain16HEX[(a >> 3) & 0x07FFFFFF] >> (a & 0x7)) & 0x1)) { \
if (!deviceGlobalBitmapDPlain16HEX || ((deviceGlobalBitmapDPlain16HEX[(d >> 3) & 0x07FFFFFF] >> (d & 0x7)) & 0x1)) { \
if (!deviceGlobalBitmapCPlain16HEX || ((deviceGlobalBitmapCPlain16HEX[(c >> 3) & 0x07FFFFFF] >> (c & 0x7)) & 0x1)) { \
if (!deviceGlobalBitmapBPlain16HEX || ((deviceGlobalBitmapBPlain16HEX[(b >> 3) & 0x07FFFFFF] >> (b & 0x7)) & 0x1)) { \
checkHashList128LE(a, b, c, d, b0, b1, b2, b3, \
b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, \
deviceGlobalFoundPasswordsPlain16HEX, deviceGlobalFoundPasswordFlagsPlain16HEX, \
deviceGlobalHashlistAddressPlain16HEX, numberOfHashesPlain16HEX, \
passwordLengthPlain16HEX, MFN_PASSWORD_SINGLE_MD5); \
} } } } } } \
LoadHash16AsLEString(hashLookup); \
b8 = 0x00000080; \
b14 = 32 * 8; \
MD5_FULL_HASH(); \
LoadNormalPasswordFromShared(plainStorage16HEX, pass_len); \
if ((sharedBitmap[(a & 0x0000ffff) >> 3] >> (a & 0x00000007)) & 0x00000001) { \
if (!(deviceGlobalBitmap256kPlain16HEX) || ((deviceGlobalBitmap256kPlain16HEX[(a >> 3) & 0x0003FFFF] >> (a & 0x7)) & 0x1)) { \
if (!(deviceGlobalBitmapAPlain16HEX) || ((deviceGlobalBitmapAPlain16HEX[(a >> 3) & 0x07FFFFFF] >> (a & 0x7)) & 0x1)) { \
if (!deviceGlobalBitmapDPlain16HEX || ((deviceGlobalBitmapDPlain16HEX[(d >> 3) & 0x07FFFFFF] >> (d & 0x7)) & 0x1)) { \
if (!deviceGlobalBitmapCPlain16HEX || ((deviceGlobalBitmapCPlain16HEX[(c >> 3) & 0x07FFFFFF] >> (c & 0x7)) & 0x1)) { \
if (!deviceGlobalBitmapBPlain16HEX || ((deviceGlobalBitmapBPlain16HEX[(b >> 3) & 0x07FFFFFF] >> (b & 0x7)) & 0x1)) { \
checkHashList128LE(a, b, c, d, b0, b1, b2, b3, \
b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, \
deviceGlobalFoundPasswordsPlain16HEX, deviceGlobalFoundPasswordFlagsPlain16HEX, \
deviceGlobalHashlistAddressPlain16HEX, numberOfHashesPlain16HEX, \
passwordLengthPlain16HEX, MFN_PASSWORD_DOUBLE_MD5); \
} } } } } } \
b0 = b1 = b2 = b3 = b4 = b5 = b6 = b7 = b8 = b9 = b10 = b11 = b12 = b13 = b14 = b15 = 0; \
LoadNormalPasswordFromShared(plainStorage16HEX, pass_len); \
b14 = pass_len * 8; \
/* MD4 uses the same length setup as MD5 - this is plain MD4, not NTLM. */ \
MD4_FULL_HASH(); \
if ((sharedBitmap[(a & 0x0000ffff) >> 3] >> (a & 0x00000007)) & 0x00000001) { \
if (!(deviceGlobalBitmap256kPlain16HEX) || ((deviceGlobalBitmap256kPlain16HEX[(a >> 3) & 0x0003FFFF] >> (a & 0x7)) & 0x1)) { \
if (!(deviceGlobalBitmapAPlain16HEX) || ((deviceGlobalBitmapAPlain16HEX[(a >> 3) & 0x07FFFFFF] >> (a & 0x7)) & 0x1)) { \
if (!deviceGlobalBitmapDPlain16HEX || ((deviceGlobalBitmapDPlain16HEX[(d >> 3) & 0x07FFFFFF] >> (d & 0x7)) & 0x1)) { \
if (!deviceGlobalBitmapCPlain16HEX || ((deviceGlobalBitmapCPlain16HEX[(c >> 3) & 0x07FFFFFF] >> (c & 0x7)) & 0x1)) { \
if (!deviceGlobalBitmapBPlain16HEX || ((deviceGlobalBitmapBPlain16HEX[(b >> 3) & 0x07FFFFFF] >> (b & 0x7)) & 0x1)) { \
checkHashList128LE(a, b, c, d, b0, b1, b2, b3, \
b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, \
deviceGlobalFoundPasswordsPlain16HEX, deviceGlobalFoundPasswordFlagsPlain16HEX, \
deviceGlobalHashlistAddressPlain16HEX, numberOfHashesPlain16HEX, \
passwordLengthPlain16HEX, MFN_PASSWORD_MD4); \
} } } } } } \
ExpandNTLMPasswordsFromShared(plainStorage16HEX, pass_len); \
b14 = pass_len * 16; \
MD4_FULL_HASH(); \
checkHash128LENTLM(a, b, c, d, b0, b1, b2, b3, \
b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, \
sharedBitmap, \
deviceGlobalBitmapAPlain16HEX, deviceGlobalBitmapBPlain16HEX, \
deviceGlobalBitmapCPlain16HEX, deviceGlobalBitmapDPlain16HEX, \
deviceGlobalFoundPasswordsPlain16HEX, deviceGlobalFoundPasswordFlagsPlain16HEX, \
deviceGlobalHashlistAddressPlain16HEX, numberOfHashesPlain16HEX, \
passwordLengthPlain16HEX); \
/* Load the normal passwords back for the incrementors */ \
b0 = b1 = b2 = b3 = b4 = b5 = b6 = b7 = b8 = b9 = b10 = b11 = b12 = b13 = b14 = b15 = 0; \
LoadNormalPasswordFromShared(plainStorage16HEX, pass_len); \
if (charsetLengthsPlain16HEX[1] == 0) { \
makeMFNSingleIncrementors##pass_len (sharedCharsetPlain16HEX, sharedReverseCharsetPlain16HEX, sharedCharsetLengthsPlain16HEX); \
} else { \
makeMFNMultipleIncrementors##pass_len (sharedCharsetPlain16HEX, sharedReverseCharsetPlain16HEX, sharedCharsetLengthsPlain16HEX); \
} \
password_count++; \
} \
storePasswords32(deviceGlobalStartPasswords32Plain16HEX, deviceNumberThreadsPlain16HEX, pass_len); \
}
MAKE_MFN_16HEX_KERNEL1_8LENGTH(1);
MAKE_MFN_16HEX_KERNEL1_8LENGTH(2);
MAKE_MFN_16HEX_KERNEL1_8LENGTH(3);
MAKE_MFN_16HEX_KERNEL1_8LENGTH(4);
MAKE_MFN_16HEX_KERNEL1_8LENGTH(5);
MAKE_MFN_16HEX_KERNEL1_8LENGTH(6);
MAKE_MFN_16HEX_KERNEL1_8LENGTH(7);
MAKE_MFN_16HEX_KERNEL1_8LENGTH(8);
MAKE_MFN_16HEX_KERNEL1_8LENGTH(9);
MAKE_MFN_16HEX_KERNEL1_8LENGTH(10);
MAKE_MFN_16HEX_KERNEL1_8LENGTH(11);
MAKE_MFN_16HEX_KERNEL1_8LENGTH(12);
MAKE_MFN_16HEX_KERNEL1_8LENGTH(13);
MAKE_MFN_16HEX_KERNEL1_8LENGTH(14);
MAKE_MFN_16HEX_KERNEL1_8LENGTH(15);
MAKE_MFN_16HEX_KERNEL1_8LENGTH(16);
extern "C" hipError_t MFNHashTypePlainCUDA_16HEX_CopyValueToConstant(
const char *symbolName, void *hostDataAddress, size_t bytesToCopy) {
return hipMemcpyToSymbol(symbolName, hostDataAddress, bytesToCopy);
}
extern "C" hipError_t MFNHashTypePlainCUDA_16HEX_LaunchKernel(uint32_t passwordLength, uint32_t Blocks, uint32_t Threads) {
//printf("MFNHashTypePlainCUDA_16HEX_LaunchKernel()\n");
// Calculate the amount of shared memory needed for the SHA1 kernels.
// This is used to store the passwords between operations.
int sharedMemoryBytesRequired = (((passwordLength + 1) / 4) + 1) * 4 * Threads;
//cudaPrintfInit();
switch (passwordLength) {
case 1:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_16HEX_GeneratedKernel_1) , dim3(Blocks), dim3(Threads), sharedMemoryBytesRequired , 0, );
break;
case 2:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_16HEX_GeneratedKernel_2) , dim3(Blocks), dim3(Threads), sharedMemoryBytesRequired , 0, );
break;
case 3:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_16HEX_GeneratedKernel_3) , dim3(Blocks), dim3(Threads), sharedMemoryBytesRequired , 0, );
break;
case 4:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_16HEX_GeneratedKernel_4) , dim3(Blocks), dim3(Threads), sharedMemoryBytesRequired , 0, );
break;
case 5:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_16HEX_GeneratedKernel_5) , dim3(Blocks), dim3(Threads), sharedMemoryBytesRequired , 0, );
break;
case 6:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_16HEX_GeneratedKernel_6) , dim3(Blocks), dim3(Threads), sharedMemoryBytesRequired , 0, );
break;
case 7:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_16HEX_GeneratedKernel_7) , dim3(Blocks), dim3(Threads), sharedMemoryBytesRequired , 0, );
break;
case 8:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_16HEX_GeneratedKernel_8) , dim3(Blocks), dim3(Threads), sharedMemoryBytesRequired , 0, );
break;
case 9:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_16HEX_GeneratedKernel_9) , dim3(Blocks), dim3(Threads), sharedMemoryBytesRequired , 0, );
break;
case 10:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_16HEX_GeneratedKernel_10) , dim3(Blocks), dim3(Threads), sharedMemoryBytesRequired , 0, );
break;
case 11:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_16HEX_GeneratedKernel_11) , dim3(Blocks), dim3(Threads), sharedMemoryBytesRequired , 0, );
break;
case 12:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_16HEX_GeneratedKernel_12) , dim3(Blocks), dim3(Threads), sharedMemoryBytesRequired , 0, );
break;
case 13:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_16HEX_GeneratedKernel_13) , dim3(Blocks), dim3(Threads), sharedMemoryBytesRequired , 0, );
break;
case 14:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_16HEX_GeneratedKernel_14) , dim3(Blocks), dim3(Threads), sharedMemoryBytesRequired , 0, );
break;
case 15:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_16HEX_GeneratedKernel_15) , dim3(Blocks), dim3(Threads), sharedMemoryBytesRequired , 0, );
break;
case 16:
hipLaunchKernelGGL(( MFNHashTypePlainCUDA_16HEX_GeneratedKernel_16) , dim3(Blocks), dim3(Threads), sharedMemoryBytesRequired , 0, );
break;
default:
printf("Password length %d unsupported!\n", passwordLength);
exit(1);
break;
}
//cudaPrintfDisplay(stdout, true);
//cudaPrintfEnd();
return hipGetLastError();
}
| c2102a08c5b960a580fed2e9d0f68a7d5b8abee3.cu | /*
Cryptohaze Multiforcer & Wordyforcer - low performance GPU password cracking
Copyright (C) 2011 Bitweasil (http://www.cryptohaze.com/)
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
* @section DESCRIPTION
*
* This file implements 16HEX multihash cracking.
*/
#include <stdint.h>
#include <stdio.h>
#include <cuda.h>
//#include "CUDA_Common/cuPrintf.cu"
#include "MFN_CUDA_device/MFN_CUDA_Common.h"
#include "MFN_CUDA_device/MFN_CUDA_incrementors.h"
#include "MFN_CUDA_device/MFN_CUDA_MD5.h"
#include "MFN_CUDA_device/MFN_CUDA_MD4.h"
#if !defined(__CUDACC__)
// define the keywords, so that the IDE does not complain about them
#define __global__
#define __device__
#define __shared__
#define __constant__
#define blockIdx.x 1
#define blockDim.x 1
#define threadIdx.x 1
#define __align__() /**/
#endif
/**
* The maximum password length supported by this hash type.
*/
#define MFN_HASH_TYPE_PLAIN_CUDA_16HEX_MAX_PASSLEN 48
/**
* The maximum charset length supported by this hash type.
*/
#define MFN_HASH_TYPE_PLAIN_CUDA_16HEX_MAX_CHARSET_LENGTH 128
// Define the constant types used by the kernels here.
__device__ __constant__ __align__(16) uint8_t constantBitmapAPlain16HEX[8192];
__device__ __constant__ __align__(16) uint8_t deviceCharsetPlain16HEX[MFN_HASH_TYPE_PLAIN_CUDA_16HEX_MAX_CHARSET_LENGTH * \
MFN_HASH_TYPE_PLAIN_CUDA_16HEX_MAX_PASSLEN];
__device__ __constant__ __align__(16) uint8_t deviceReverseCharsetPlain16HEX[MFN_HASH_TYPE_PLAIN_CUDA_16HEX_MAX_CHARSET_LENGTH * \
MFN_HASH_TYPE_PLAIN_CUDA_16HEX_MAX_PASSLEN];
__device__ __constant__ uint8_t charsetLengthsPlain16HEX[MFN_HASH_TYPE_PLAIN_CUDA_16HEX_MAX_PASSLEN];
/**
* Constant parameters go here instead of getting passed as kernel arguments.
* This allows for faster accesses (as they are cached, and all threads will
* be accessing the same element), and also reduces the shared memory usage,
* which may allow for better occupancy in the future. The kernels will load
* these as needed, and theoretically will not need registers for some of them,
* which will help reduce the register pressure on kernels. Hopefully.
*/
// Password length. Needed for some offset calculations.
__device__ __constant__ uint8_t passwordLengthPlain16HEX;
// Number of hashes present in memory.
__device__ __constant__ uint64_t numberOfHashesPlain16HEX;
// Address of the hashlist in global memory.
__device__ __constant__ uint8_t *deviceGlobalHashlistAddressPlain16HEX;
// Addresses of the various global bitmaps.
__device__ __constant__ uint8_t *deviceGlobalBitmapAPlain16HEX;
__device__ __constant__ uint8_t *deviceGlobalBitmapBPlain16HEX;
__device__ __constant__ uint8_t *deviceGlobalBitmapCPlain16HEX;
__device__ __constant__ uint8_t *deviceGlobalBitmapDPlain16HEX;
__device__ __constant__ uint8_t *deviceGlobalBitmap256kPlain16HEX;
// Addresses of the arrays for found passwords & success flags
__device__ __constant__ uint8_t *deviceGlobalFoundPasswordsPlain16HEX;
__device__ __constant__ uint8_t *deviceGlobalFoundPasswordFlagsPlain16HEX;
__device__ __constant__ uint8_t *deviceGlobalStartPointsPlain16HEX;
__device__ __constant__ uint32_t *deviceGlobalStartPasswords32Plain16HEX;
__device__ __constant__ uint32_t deviceNumberStepsToRunPlain16HEX;
__device__ __constant__ uint64_t deviceNumberThreadsPlain16HEX;
__constant__ char hexLookupValues16HEX[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
extern __shared__ uint32_t plainStorage16HEX[];
#define MAKE_MFN_16HEX_KERNEL1_8LENGTH(pass_len) \
__global__ void MFNHashTypePlainCUDA_16HEX_GeneratedKernel_##pass_len () { \
uint32_t b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15, a, b, c, d; \
uint32_t password_count = 0, passOffset; \
__shared__ uint8_t __align__(16) sharedCharsetPlain16HEX[MFN_HASH_TYPE_PLAIN_CUDA_16HEX_MAX_CHARSET_LENGTH * pass_len]; \
__shared__ uint8_t __align__(16) sharedReverseCharsetPlain16HEX[MFN_HASH_TYPE_PLAIN_CUDA_16HEX_MAX_CHARSET_LENGTH * pass_len]; \
__shared__ uint8_t sharedCharsetLengthsPlain16HEX[pass_len]; \
__shared__ uint8_t __align__(16) sharedBitmap[8192]; \
__shared__ uint8_t hashLookup[256][2]; \
if (threadIdx.x == 0) { \
uint64_t *sharedCharset64 = (uint64_t *)sharedCharsetPlain16HEX; \
uint64_t *deviceCharset64 = (uint64_t *)deviceCharsetPlain16HEX; \
uint64_t *sharedReverseCharset64 = (uint64_t *)sharedReverseCharsetPlain16HEX; \
uint64_t *deviceReverseCharset64 = (uint64_t *)deviceReverseCharsetPlain16HEX; \
uint64_t *constantBitmap64 = (uint64_t *)constantBitmapAPlain16HEX; \
uint64_t *sharedBitmap64 = (uint64_t *)sharedBitmap; \
for (a = 0; a < ((MFN_HASH_TYPE_PLAIN_CUDA_16HEX_MAX_CHARSET_LENGTH * pass_len) / 8); a++) { \
sharedCharset64[a] = deviceCharset64[a]; \
sharedReverseCharset64[a] = deviceReverseCharset64[a]; \
} \
for (a = 0; a < pass_len; a++) { \
sharedCharsetLengthsPlain16HEX[a] = charsetLengthsPlain16HEX[a]; \
} \
for (a = 0; a < 8192 / 8; a++) { \
sharedBitmap64[a] = constantBitmap64[a]; \
} \
for (a = 0; a < 256; a++) { \
hashLookup[a][0] = hexLookupValues16HEX[a / 16]; \
hashLookup[a][1] = hexLookupValues16HEX[a % 16]; \
} \
} \
syncthreads(); \
b0 = b1 = b2 = b3 = b4 = b5 = b6 = b7 = b8 = b9 = b10 = b11 = b12 = b13 = b14 = b15 = 0; \
loadPasswords32(deviceGlobalStartPasswords32Plain16HEX, deviceNumberThreadsPlain16HEX, pass_len); \
while (password_count < deviceNumberStepsToRunPlain16HEX) { \
/* Store the plains in the allocated space so they are available if an
* algorithm such as SHA1 destroys them - or if we need to load them for
* NTLM or something else like that. */ \
StoreNormalPasswordInShared(plainStorage16HEX, pass_len); \
b14 = pass_len * 8; \
MD5_FULL_HASH(); \
if ((sharedBitmap[(a & 0x0000ffff) >> 3] >> (a & 0x00000007)) & 0x00000001) { \
if (!(deviceGlobalBitmap256kPlain16HEX) || ((deviceGlobalBitmap256kPlain16HEX[(a >> 3) & 0x0003FFFF] >> (a & 0x7)) & 0x1)) { \
if (!(deviceGlobalBitmapAPlain16HEX) || ((deviceGlobalBitmapAPlain16HEX[(a >> 3) & 0x07FFFFFF] >> (a & 0x7)) & 0x1)) { \
if (!deviceGlobalBitmapDPlain16HEX || ((deviceGlobalBitmapDPlain16HEX[(d >> 3) & 0x07FFFFFF] >> (d & 0x7)) & 0x1)) { \
if (!deviceGlobalBitmapCPlain16HEX || ((deviceGlobalBitmapCPlain16HEX[(c >> 3) & 0x07FFFFFF] >> (c & 0x7)) & 0x1)) { \
if (!deviceGlobalBitmapBPlain16HEX || ((deviceGlobalBitmapBPlain16HEX[(b >> 3) & 0x07FFFFFF] >> (b & 0x7)) & 0x1)) { \
checkHashList128LE(a, b, c, d, b0, b1, b2, b3, \
b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, \
deviceGlobalFoundPasswordsPlain16HEX, deviceGlobalFoundPasswordFlagsPlain16HEX, \
deviceGlobalHashlistAddressPlain16HEX, numberOfHashesPlain16HEX, \
passwordLengthPlain16HEX, MFN_PASSWORD_SINGLE_MD5); \
} } } } } } \
LoadHash16AsLEString(hashLookup); \
b8 = 0x00000080; \
b14 = 32 * 8; \
MD5_FULL_HASH(); \
LoadNormalPasswordFromShared(plainStorage16HEX, pass_len); \
if ((sharedBitmap[(a & 0x0000ffff) >> 3] >> (a & 0x00000007)) & 0x00000001) { \
if (!(deviceGlobalBitmap256kPlain16HEX) || ((deviceGlobalBitmap256kPlain16HEX[(a >> 3) & 0x0003FFFF] >> (a & 0x7)) & 0x1)) { \
if (!(deviceGlobalBitmapAPlain16HEX) || ((deviceGlobalBitmapAPlain16HEX[(a >> 3) & 0x07FFFFFF] >> (a & 0x7)) & 0x1)) { \
if (!deviceGlobalBitmapDPlain16HEX || ((deviceGlobalBitmapDPlain16HEX[(d >> 3) & 0x07FFFFFF] >> (d & 0x7)) & 0x1)) { \
if (!deviceGlobalBitmapCPlain16HEX || ((deviceGlobalBitmapCPlain16HEX[(c >> 3) & 0x07FFFFFF] >> (c & 0x7)) & 0x1)) { \
if (!deviceGlobalBitmapBPlain16HEX || ((deviceGlobalBitmapBPlain16HEX[(b >> 3) & 0x07FFFFFF] >> (b & 0x7)) & 0x1)) { \
checkHashList128LE(a, b, c, d, b0, b1, b2, b3, \
b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, \
deviceGlobalFoundPasswordsPlain16HEX, deviceGlobalFoundPasswordFlagsPlain16HEX, \
deviceGlobalHashlistAddressPlain16HEX, numberOfHashesPlain16HEX, \
passwordLengthPlain16HEX, MFN_PASSWORD_DOUBLE_MD5); \
} } } } } } \
b0 = b1 = b2 = b3 = b4 = b5 = b6 = b7 = b8 = b9 = b10 = b11 = b12 = b13 = b14 = b15 = 0; \
LoadNormalPasswordFromShared(plainStorage16HEX, pass_len); \
b14 = pass_len * 8; \
/* MD4 uses the same length setup as MD5 - this is plain MD4, not NTLM. */ \
MD4_FULL_HASH(); \
if ((sharedBitmap[(a & 0x0000ffff) >> 3] >> (a & 0x00000007)) & 0x00000001) { \
if (!(deviceGlobalBitmap256kPlain16HEX) || ((deviceGlobalBitmap256kPlain16HEX[(a >> 3) & 0x0003FFFF] >> (a & 0x7)) & 0x1)) { \
if (!(deviceGlobalBitmapAPlain16HEX) || ((deviceGlobalBitmapAPlain16HEX[(a >> 3) & 0x07FFFFFF] >> (a & 0x7)) & 0x1)) { \
if (!deviceGlobalBitmapDPlain16HEX || ((deviceGlobalBitmapDPlain16HEX[(d >> 3) & 0x07FFFFFF] >> (d & 0x7)) & 0x1)) { \
if (!deviceGlobalBitmapCPlain16HEX || ((deviceGlobalBitmapCPlain16HEX[(c >> 3) & 0x07FFFFFF] >> (c & 0x7)) & 0x1)) { \
if (!deviceGlobalBitmapBPlain16HEX || ((deviceGlobalBitmapBPlain16HEX[(b >> 3) & 0x07FFFFFF] >> (b & 0x7)) & 0x1)) { \
checkHashList128LE(a, b, c, d, b0, b1, b2, b3, \
b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, \
deviceGlobalFoundPasswordsPlain16HEX, deviceGlobalFoundPasswordFlagsPlain16HEX, \
deviceGlobalHashlistAddressPlain16HEX, numberOfHashesPlain16HEX, \
passwordLengthPlain16HEX, MFN_PASSWORD_MD4); \
} } } } } } \
ExpandNTLMPasswordsFromShared(plainStorage16HEX, pass_len); \
b14 = pass_len * 16; \
MD4_FULL_HASH(); \
checkHash128LENTLM(a, b, c, d, b0, b1, b2, b3, \
b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, \
sharedBitmap, \
deviceGlobalBitmapAPlain16HEX, deviceGlobalBitmapBPlain16HEX, \
deviceGlobalBitmapCPlain16HEX, deviceGlobalBitmapDPlain16HEX, \
deviceGlobalFoundPasswordsPlain16HEX, deviceGlobalFoundPasswordFlagsPlain16HEX, \
deviceGlobalHashlistAddressPlain16HEX, numberOfHashesPlain16HEX, \
passwordLengthPlain16HEX); \
/* Load the normal passwords back for the incrementors */ \
b0 = b1 = b2 = b3 = b4 = b5 = b6 = b7 = b8 = b9 = b10 = b11 = b12 = b13 = b14 = b15 = 0; \
LoadNormalPasswordFromShared(plainStorage16HEX, pass_len); \
if (charsetLengthsPlain16HEX[1] == 0) { \
makeMFNSingleIncrementors##pass_len (sharedCharsetPlain16HEX, sharedReverseCharsetPlain16HEX, sharedCharsetLengthsPlain16HEX); \
} else { \
makeMFNMultipleIncrementors##pass_len (sharedCharsetPlain16HEX, sharedReverseCharsetPlain16HEX, sharedCharsetLengthsPlain16HEX); \
} \
password_count++; \
} \
storePasswords32(deviceGlobalStartPasswords32Plain16HEX, deviceNumberThreadsPlain16HEX, pass_len); \
}
MAKE_MFN_16HEX_KERNEL1_8LENGTH(1);
MAKE_MFN_16HEX_KERNEL1_8LENGTH(2);
MAKE_MFN_16HEX_KERNEL1_8LENGTH(3);
MAKE_MFN_16HEX_KERNEL1_8LENGTH(4);
MAKE_MFN_16HEX_KERNEL1_8LENGTH(5);
MAKE_MFN_16HEX_KERNEL1_8LENGTH(6);
MAKE_MFN_16HEX_KERNEL1_8LENGTH(7);
MAKE_MFN_16HEX_KERNEL1_8LENGTH(8);
MAKE_MFN_16HEX_KERNEL1_8LENGTH(9);
MAKE_MFN_16HEX_KERNEL1_8LENGTH(10);
MAKE_MFN_16HEX_KERNEL1_8LENGTH(11);
MAKE_MFN_16HEX_KERNEL1_8LENGTH(12);
MAKE_MFN_16HEX_KERNEL1_8LENGTH(13);
MAKE_MFN_16HEX_KERNEL1_8LENGTH(14);
MAKE_MFN_16HEX_KERNEL1_8LENGTH(15);
MAKE_MFN_16HEX_KERNEL1_8LENGTH(16);
extern "C" cudaError_t MFNHashTypePlainCUDA_16HEX_CopyValueToConstant(
const char *symbolName, void *hostDataAddress, size_t bytesToCopy) {
return cudaMemcpyToSymbol(symbolName, hostDataAddress, bytesToCopy);
}
extern "C" cudaError_t MFNHashTypePlainCUDA_16HEX_LaunchKernel(uint32_t passwordLength, uint32_t Blocks, uint32_t Threads) {
//printf("MFNHashTypePlainCUDA_16HEX_LaunchKernel()\n");
// Calculate the amount of shared memory needed for the SHA1 kernels.
// This is used to store the passwords between operations.
int sharedMemoryBytesRequired = (((passwordLength + 1) / 4) + 1) * 4 * Threads;
//cudaPrintfInit();
switch (passwordLength) {
case 1:
MFNHashTypePlainCUDA_16HEX_GeneratedKernel_1 <<< Blocks, Threads, sharedMemoryBytesRequired >>> ();
break;
case 2:
MFNHashTypePlainCUDA_16HEX_GeneratedKernel_2 <<< Blocks, Threads, sharedMemoryBytesRequired >>> ();
break;
case 3:
MFNHashTypePlainCUDA_16HEX_GeneratedKernel_3 <<< Blocks, Threads, sharedMemoryBytesRequired >>> ();
break;
case 4:
MFNHashTypePlainCUDA_16HEX_GeneratedKernel_4 <<< Blocks, Threads, sharedMemoryBytesRequired >>> ();
break;
case 5:
MFNHashTypePlainCUDA_16HEX_GeneratedKernel_5 <<< Blocks, Threads, sharedMemoryBytesRequired >>> ();
break;
case 6:
MFNHashTypePlainCUDA_16HEX_GeneratedKernel_6 <<< Blocks, Threads, sharedMemoryBytesRequired >>> ();
break;
case 7:
MFNHashTypePlainCUDA_16HEX_GeneratedKernel_7 <<< Blocks, Threads, sharedMemoryBytesRequired >>> ();
break;
case 8:
MFNHashTypePlainCUDA_16HEX_GeneratedKernel_8 <<< Blocks, Threads, sharedMemoryBytesRequired >>> ();
break;
case 9:
MFNHashTypePlainCUDA_16HEX_GeneratedKernel_9 <<< Blocks, Threads, sharedMemoryBytesRequired >>> ();
break;
case 10:
MFNHashTypePlainCUDA_16HEX_GeneratedKernel_10 <<< Blocks, Threads, sharedMemoryBytesRequired >>> ();
break;
case 11:
MFNHashTypePlainCUDA_16HEX_GeneratedKernel_11 <<< Blocks, Threads, sharedMemoryBytesRequired >>> ();
break;
case 12:
MFNHashTypePlainCUDA_16HEX_GeneratedKernel_12 <<< Blocks, Threads, sharedMemoryBytesRequired >>> ();
break;
case 13:
MFNHashTypePlainCUDA_16HEX_GeneratedKernel_13 <<< Blocks, Threads, sharedMemoryBytesRequired >>> ();
break;
case 14:
MFNHashTypePlainCUDA_16HEX_GeneratedKernel_14 <<< Blocks, Threads, sharedMemoryBytesRequired >>> ();
break;
case 15:
MFNHashTypePlainCUDA_16HEX_GeneratedKernel_15 <<< Blocks, Threads, sharedMemoryBytesRequired >>> ();
break;
case 16:
MFNHashTypePlainCUDA_16HEX_GeneratedKernel_16 <<< Blocks, Threads, sharedMemoryBytesRequired >>> ();
break;
default:
printf("Password length %d unsupported!\n", passwordLength);
exit(1);
break;
}
//cudaPrintfDisplay(stdout, true);
//cudaPrintfEnd();
return cudaGetLastError();
}
|
cc0a9fe5187bbe228bf8bada5535a22f6e1b0c92.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@precisions normal d
*/
#include "common_magma.h"
#include "commonblas_d.h"
static __device__ void daxpy(double a,double *b, double *c) {
c[0] += a * b[0];
c[1] += a * b[1];
c[2] += a * b[2];
c[3] += a * b[3];
c[4] += a * b[4];
c[5] += a * b[5];
c[6] += a * b[6];
c[7] += a * b[7];
c[8] += a * b[8];
c[9] += a * b[9];
c[10] += a * b[10];
c[11] += a * b[11];
c[12] += a * b[12];
c[13] += a * b[13];
c[14] += a * b[14];
c[15] += a * b[15];
}
__global__ void
dgemm_kernel_N_N_64_16_16_16_4_special(double *C, const double *A, const double *B,
int m, int n, int k,
int lda, int ldb, int ldc,
double alpha, double beta)
{
/* -- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
Purpose:
========
This routine computes
C = alpha* A*B + beta * C
B is put into shared memory
Parameters Used:
blk_M=64 blk_N=16 blk_K=16 nthd_x=16 nthd_y=4
This kernel is for matrices devisible by the corresponding
blocking sizes.
=============================================================== */
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * 64;
const int iby = blockIdx.y *16;
const int idt = ty * 16 + tx;
B+=tx+__mul24(iby+ty,ldb);
A += ibx + idt;
C += ibx +idt +__mul24( iby,ldc);
const double *Bend = B + k;
double Cb[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
m = 2*lda ;
n = 3*lda ;
do {
//double Ab[4] = {A[0], A[lda], A[2*lda], A[3*lda]};
double Ab[4] = {A[0], A[lda], A[m], A[n]};
__shared__ double Bb[16][17];
Bb[tx][ty+0] = B[0];
Bb[tx][ty+4] = B[4*ldb];
Bb[tx][ty+8] = B[8*ldb];
Bb[tx][ty+12] = B[12*ldb];
__syncthreads();
A += 4 * lda;
daxpy(Ab[0], &Bb[0][0], Cb); Ab[0] = A[0];
daxpy(Ab[1], &Bb[1][0], Cb); Ab[1] = A[lda];
daxpy(Ab[2], &Bb[2][0], Cb); Ab[2] = A[m];
daxpy(Ab[3], &Bb[3][0], Cb); Ab[3] = A[n];
A += 4 * lda;
daxpy(Ab[0], &Bb[4][0], Cb); Ab[0] = A[0];
daxpy(Ab[1], &Bb[5][0], Cb); Ab[1] = A[lda];
daxpy(Ab[2], &Bb[6][0], Cb); Ab[2] = A[m];
daxpy(Ab[3], &Bb[7][0], Cb); Ab[3] = A[n];
A += 4 * lda;
daxpy(Ab[0], &Bb[8][0], Cb); Ab[0] = A[0];
daxpy(Ab[1], &Bb[9][0], Cb); Ab[1] = A[lda];
daxpy(Ab[2], &Bb[10][0], Cb); Ab[2] = A[m];
daxpy(Ab[3], &Bb[11][0], Cb); Ab[3] = A[n];
A += 4 * lda;
daxpy(Ab[0], &Bb[12][0], Cb);
daxpy(Ab[1], &Bb[13][0], Cb);
daxpy(Ab[2], &Bb[14][0], Cb);
daxpy(Ab[3], &Bb[15][0], Cb);
B += 16;
__syncthreads();
} while (B < Bend);
#pragma unroll 16
for (int i = 0; i < 16; i++, C += ldc) {
C[0] =alpha*Cb[i] + beta * C[0];
}
}
extern "C" void
magmablas_dgemm_kernel_N_N_64_16_16_16_4_special(double *C,
const double *A,
const double *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
double alpha, double beta)
{
dim3 threads( 16, 4 );
dim3 grid(m/64,n/16);
hipLaunchKernelGGL(( dgemm_kernel_N_N_64_16_16_16_4_special), dim3(grid), dim3(threads), 0, magma_stream , C, A, B,
m, n, k,
lda, ldb, ldc,
alpha, beta);
}
| cc0a9fe5187bbe228bf8bada5535a22f6e1b0c92.cu | /*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@precisions normal d
*/
#include "common_magma.h"
#include "commonblas_d.h"
static __device__ void daxpy(double a,double *b, double *c) {
c[0] += a * b[0];
c[1] += a * b[1];
c[2] += a * b[2];
c[3] += a * b[3];
c[4] += a * b[4];
c[5] += a * b[5];
c[6] += a * b[6];
c[7] += a * b[7];
c[8] += a * b[8];
c[9] += a * b[9];
c[10] += a * b[10];
c[11] += a * b[11];
c[12] += a * b[12];
c[13] += a * b[13];
c[14] += a * b[14];
c[15] += a * b[15];
}
__global__ void
dgemm_kernel_N_N_64_16_16_16_4_special(double *C, const double *A, const double *B,
int m, int n, int k,
int lda, int ldb, int ldc,
double alpha, double beta)
{
/* -- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
Purpose:
========
This routine computes
C = alpha* A*B + beta * C
B is put into shared memory
Parameters Used:
blk_M=64 blk_N=16 blk_K=16 nthd_x=16 nthd_y=4
This kernel is for matrices devisible by the corresponding
blocking sizes.
=============================================================== */
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * 64;
const int iby = blockIdx.y *16;
const int idt = ty * 16 + tx;
B+=tx+__mul24(iby+ty,ldb);
A += ibx + idt;
C += ibx +idt +__mul24( iby,ldc);
const double *Bend = B + k;
double Cb[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
m = 2*lda ;
n = 3*lda ;
do {
//double Ab[4] = {A[0], A[lda], A[2*lda], A[3*lda]};
double Ab[4] = {A[0], A[lda], A[m], A[n]};
__shared__ double Bb[16][17];
Bb[tx][ty+0] = B[0];
Bb[tx][ty+4] = B[4*ldb];
Bb[tx][ty+8] = B[8*ldb];
Bb[tx][ty+12] = B[12*ldb];
__syncthreads();
A += 4 * lda;
daxpy(Ab[0], &Bb[0][0], Cb); Ab[0] = A[0];
daxpy(Ab[1], &Bb[1][0], Cb); Ab[1] = A[lda];
daxpy(Ab[2], &Bb[2][0], Cb); Ab[2] = A[m];
daxpy(Ab[3], &Bb[3][0], Cb); Ab[3] = A[n];
A += 4 * lda;
daxpy(Ab[0], &Bb[4][0], Cb); Ab[0] = A[0];
daxpy(Ab[1], &Bb[5][0], Cb); Ab[1] = A[lda];
daxpy(Ab[2], &Bb[6][0], Cb); Ab[2] = A[m];
daxpy(Ab[3], &Bb[7][0], Cb); Ab[3] = A[n];
A += 4 * lda;
daxpy(Ab[0], &Bb[8][0], Cb); Ab[0] = A[0];
daxpy(Ab[1], &Bb[9][0], Cb); Ab[1] = A[lda];
daxpy(Ab[2], &Bb[10][0], Cb); Ab[2] = A[m];
daxpy(Ab[3], &Bb[11][0], Cb); Ab[3] = A[n];
A += 4 * lda;
daxpy(Ab[0], &Bb[12][0], Cb);
daxpy(Ab[1], &Bb[13][0], Cb);
daxpy(Ab[2], &Bb[14][0], Cb);
daxpy(Ab[3], &Bb[15][0], Cb);
B += 16;
__syncthreads();
} while (B < Bend);
#pragma unroll 16
for (int i = 0; i < 16; i++, C += ldc) {
C[0] =alpha*Cb[i] + beta * C[0];
}
}
extern "C" void
magmablas_dgemm_kernel_N_N_64_16_16_16_4_special(double *C,
const double *A,
const double *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
double alpha, double beta)
{
dim3 threads( 16, 4 );
dim3 grid(m/64,n/16);
dgemm_kernel_N_N_64_16_16_16_4_special<<< grid, threads, 0, magma_stream >>>(C, A, B,
m, n, k,
lda, ldb, ldc,
alpha, beta);
}
|
2f4bdb875ecc11aa50a5734276236575139094f4.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdio.h>
#define CUDA_CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
hipGetErrorString(error)); \
exit(1); \
} \
}
double cpuSecond()
/*< return cpu time>*/
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)(tp.tv_sec)+(double)tp.tv_usec*1.e-6);
}
__device__ int getGlobalIdx_1D_1D()
/*< device get GlobalIdx with 1D grid 1D block >*/
{
return blockIdx.x * blockDim.x + threadIdx.x;
}
__device__ int getGlobalIdx_1D_2D()
/*< device get GlobalIdx with 1D grid 2D block >*/
{
return blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
}
__device__ int getGlobalIdx_1D_3D()
/*< device get GlobalIdx with 1D grid 3D block >*/
{
return blockIdx.x * blockDim.x * blockDim.y * blockDim.z \
+ threadIdx.z * blockDim.x * blockDim.y \
+ threadIdx.y * blockDim.x + threadIdx.x;
}
__device__ int getGlobalIdx_2D_1D()
/*< device get GlobalIdx with 2D grid 1D block >*/
{
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
return blockId * blockDim.x + threadIdx.x;
}
__device__ int getGlobalIdx_2D_2D()
/*< device get GlobalIdx with 2D grid 2D block >*/
{
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
return blockId * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
}
__device__ int getGlobalIdx_2D_3D()
/*< device get GlobalIdx with 2D grid 3D block >*/
{
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
int threadId = blockId * blockDim.x * blockDim.y * blockDim.z \
+ threadIdx.z * blockDim.x * blockDim.y \
+ threadIdx.y * blockDim.x + threadIdx.x;
return threadId;
}
__device__ int getGlobalIdx_3D_1D()
/*< device get GlobalIdx with 3D grid 1D block >*/
{
int blockId = blockIdx.z * gridDim.x * gridDim.y + blockIdx.y * gridDim.x + blockIdx.x;
int threadId = blockId * blockDim.x + threadIdx.x;
return threadId;
}
__device__ int getGlobalIdx_3D_2D()
/*< device get GlobalIdx with 3D grid 2D block >*/
{
int blockId = blockIdx.z * gridDim.x * gridDim.y + blockIdx.y * gridDim.x + blockIdx.x;
return blockId * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
}
__device__ int getGlobalIdx_3D_3D()
/*< device get GlobalIdx with 3D grid 3D block >*/
{
int blockId = blockIdx.z * gridDim.x * gridDim.y + blockIdx.y * gridDim.x + blockIdx.x;
int threadId = blockId * blockDim.x * blockDim.y * blockDim.z \
+ threadIdx.z * blockDim.x * blockDim.y \
+ threadIdx.y * blockDim.x + threadIdx.x ;
return threadId;
}
| 2f4bdb875ecc11aa50a5734276236575139094f4.cu | #include <cuda_runtime.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdio.h>
#define CUDA_CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
double cpuSecond()
/*< return cpu time>*/
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)(tp.tv_sec)+(double)tp.tv_usec*1.e-6);
}
__device__ int getGlobalIdx_1D_1D()
/*< device get GlobalIdx with 1D grid 1D block >*/
{
return blockIdx.x * blockDim.x + threadIdx.x;
}
__device__ int getGlobalIdx_1D_2D()
/*< device get GlobalIdx with 1D grid 2D block >*/
{
return blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
}
__device__ int getGlobalIdx_1D_3D()
/*< device get GlobalIdx with 1D grid 3D block >*/
{
return blockIdx.x * blockDim.x * blockDim.y * blockDim.z \
+ threadIdx.z * blockDim.x * blockDim.y \
+ threadIdx.y * blockDim.x + threadIdx.x;
}
__device__ int getGlobalIdx_2D_1D()
/*< device get GlobalIdx with 2D grid 1D block >*/
{
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
return blockId * blockDim.x + threadIdx.x;
}
__device__ int getGlobalIdx_2D_2D()
/*< device get GlobalIdx with 2D grid 2D block >*/
{
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
return blockId * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
}
__device__ int getGlobalIdx_2D_3D()
/*< device get GlobalIdx with 2D grid 3D block >*/
{
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
int threadId = blockId * blockDim.x * blockDim.y * blockDim.z \
+ threadIdx.z * blockDim.x * blockDim.y \
+ threadIdx.y * blockDim.x + threadIdx.x;
return threadId;
}
__device__ int getGlobalIdx_3D_1D()
/*< device get GlobalIdx with 3D grid 1D block >*/
{
int blockId = blockIdx.z * gridDim.x * gridDim.y + blockIdx.y * gridDim.x + blockIdx.x;
int threadId = blockId * blockDim.x + threadIdx.x;
return threadId;
}
__device__ int getGlobalIdx_3D_2D()
/*< device get GlobalIdx with 3D grid 2D block >*/
{
int blockId = blockIdx.z * gridDim.x * gridDim.y + blockIdx.y * gridDim.x + blockIdx.x;
return blockId * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
}
__device__ int getGlobalIdx_3D_3D()
/*< device get GlobalIdx with 3D grid 3D block >*/
{
int blockId = blockIdx.z * gridDim.x * gridDim.y + blockIdx.y * gridDim.x + blockIdx.x;
int threadId = blockId * blockDim.x * blockDim.y * blockDim.z \
+ threadIdx.z * blockDim.x * blockDim.y \
+ threadIdx.y * blockDim.x + threadIdx.x ;
return threadId;
}
|
12b7e6576f7488844ee9d6944af4d0f537121c19.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CudaCommon.h"
__global__
void computeVoxelIdxKernel(
float *pts,
float *min_xyz,
unsigned int *voxel_idxs,
int pt_num,
int pt_stride,
float voxel_len
)
{
int pt_index = threadIdx.y + blockIdx.y*blockDim.y;
if(pt_index>=pt_num)
return;
float x=pts[pt_index*pt_stride];
float y=pts[pt_index*pt_stride+1];
float z=pts[pt_index*pt_stride+2];
float eps=1e-3;
float min_x=min_xyz[0]+eps;
float min_y=min_xyz[1]+eps;
float min_z=min_xyz[2]+eps;
voxel_idxs[pt_index*3] = floor((x-min_x)/voxel_len);
voxel_idxs[pt_index*3+1] = floor((y-min_y)/voxel_len);
voxel_idxs[pt_index*3+2] = floor((z-min_z)/voxel_len);
}
void computeVoxelIdxImpl(
float* pts,
float* min_xyz,
unsigned int* voxel_idxs,
int pt_num,
int pt_stride,
float voxel_len
)
{
int block_num=pt_num/1024;
if(pt_num%1024>0) block_num++;
dim3 block_dim(1,block_num);
dim3 thread_dim(1,1024);
hipLaunchKernelGGL(( computeVoxelIdxKernel), dim3(block_dim),dim3(thread_dim), 0, 0,
pts,min_xyz,voxel_idxs,pt_num,pt_stride,voxel_len
);
gpuErrchk(hipGetLastError())
} | 12b7e6576f7488844ee9d6944af4d0f537121c19.cu | #include "CudaCommon.h"
__global__
void computeVoxelIdxKernel(
float *pts,
float *min_xyz,
unsigned int *voxel_idxs,
int pt_num,
int pt_stride,
float voxel_len
)
{
int pt_index = threadIdx.y + blockIdx.y*blockDim.y;
if(pt_index>=pt_num)
return;
float x=pts[pt_index*pt_stride];
float y=pts[pt_index*pt_stride+1];
float z=pts[pt_index*pt_stride+2];
float eps=1e-3;
float min_x=min_xyz[0]+eps;
float min_y=min_xyz[1]+eps;
float min_z=min_xyz[2]+eps;
voxel_idxs[pt_index*3] = floor((x-min_x)/voxel_len);
voxel_idxs[pt_index*3+1] = floor((y-min_y)/voxel_len);
voxel_idxs[pt_index*3+2] = floor((z-min_z)/voxel_len);
}
void computeVoxelIdxImpl(
float* pts,
float* min_xyz,
unsigned int* voxel_idxs,
int pt_num,
int pt_stride,
float voxel_len
)
{
int block_num=pt_num/1024;
if(pt_num%1024>0) block_num++;
dim3 block_dim(1,block_num);
dim3 thread_dim(1,1024);
computeVoxelIdxKernel<<<block_dim,thread_dim>>>(
pts,min_xyz,voxel_idxs,pt_num,pt_stride,voxel_len
);
gpuErrchk(cudaGetLastError())
} |
e96506c4771648abde2091ed34bf30d7410e85a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
STREAM benchmark implementation in CUDA.
COPY: a(i) = b(i)
SCALE: a(i) = q*b(i)
SUM: a(i) = b(i) + c(i)
TRIAD: a(i) = b(i) + q*c(i)
It measures the memory system on the device.
The implementation is in double precision.
Code based on the code developed by John D. McCalpin
http://www.cs.virginia.edu/stream/FTP/Code/stream.c
Written by: Massimiliano Fatica, NVIDIA Corporation
Further modifications by: Ben Cumming, CSCS; Andreas Herten (JSC/FZJ)
*/
#define NTIMES 20
#include <string>
#include <vector>
#include <iostream>
#include <sstream>
#include <stdio.h>
#include <float.h>
#include <limits.h>
#include <unistd.h>
#include <sys/time.h>
#include <sys/time.h>
#define CUDA_SAFE_CALL(expr) \
do { \
hipError_t err = (expr); \
if (err != hipSuccess) { \
std::cerr << "[Error] '" << expr << "' failed : " \
<< hipGetErrorString(err) \
<< "(error code: " << err << ")" \
<< " at " << __FILE__ << ":" << __LINE__ \
<< std::endl; \
exit(err); \
} \
} while(0)
# ifndef MIN
# define MIN(x,y) ((x)<(y)?(x):(y))
# endif
# ifndef MAX
# define MAX(x,y) ((x)>(y)?(x):(y))
# endif
typedef double real;
static double avgtime[4] = {0}, maxtime[4] = {0},
mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX};
void print_help()
{
printf(
"Usage: stream [-s] [-n <elements>] [-b <blocksize>]\n\n"
" -s\n"
" Print results in SI units (by default IEC units are used)\n\n"
" -n <elements>\n"
" Put <elements> values in the arrays\n"
" (defaults to 1<<26)\n\n"
" -b <blocksize>\n"
" Use <blocksize> as the number of threads in each block\n"
" (defaults to 192)\n"
);
}
void parse_options(int argc, char** argv, bool& SI, size_t& N, int& blockSize)
{
// Default values
SI = false;
N = 1<<26;
blockSize = 192;
std::stringstream ss;
int c;
while ((c = getopt (argc, argv, "sn:b:h")) != -1)
switch (c)
{
case 's':
SI = true;
break;
case 'n':
ss << optarg;
ss >> N;
break;
case 'b':
blockSize = std::atoi(optarg);
break;
case 'h':
print_help();
std::exit(0);
break;
default:
print_help();
std::exit(1);
}
}
/* A gettimeofday routine to give access to the wall
clock timer on most UNIX-like systems. */
double mysecond()
{
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
template <typename T>
__global__ void set_array(T * __restrict__ const a, T value, size_t len)
{
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len)
a[idx] = value;
}
template <typename T>
__global__ void STREAM_Copy(T const * __restrict__ const a, T * __restrict__ const b, size_t len)
{
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len)
b[idx] = a[idx];
}
template <typename T>
__global__ void STREAM_Scale(T const * __restrict__ const a, T * __restrict__ const b, T scale, size_t len)
{
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len)
b[idx] = scale * a[idx];
}
template <typename T>
__global__ void STREAM_Add(T const * __restrict__ const a, T const * __restrict__ const b, T * __restrict__ const c, size_t len)
{
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len)
c[idx] = a[idx] + b[idx];
}
template <typename T>
__global__ void STREAM_Triad(T const * __restrict__ a, T const * __restrict__ b, T * __restrict__ const c, T scalar, size_t len)
{
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len)
c[idx] = a[idx] + scalar * b[idx];
}
int main(int argc, char** argv)
{
real *d_a, *d_b, *d_c;
int j,k;
double times[4][NTIMES];
real scalar;
std::vector<std::string> label{"Copy: ", "Scale: ", "Add: ", "Triad: "};
// Parse arguments
bool SI;
size_t N;
int blockSize;
parse_options(argc, argv, SI, N, blockSize);
printf(" STREAM Benchmark implementation in CUDA\n");
printf(" Array size (%s precision) =%7.2f MB\n", sizeof(double)==sizeof(real)?"double":"single", double(N)*double(sizeof(real))/1.e6);
/* Allocate memory on device */
CUDA_SAFE_CALL(hipMalloc((void**)&d_a, sizeof(real)*N));
CUDA_SAFE_CALL(hipMalloc((void**)&d_b, sizeof(real)*N));
CUDA_SAFE_CALL(hipMalloc((void**)&d_c, sizeof(real)*N));
/* Compute execution configuration */
dim3 dimBlock(blockSize);
dim3 dimGrid(N / dimBlock.x);
if( N % dimBlock.x != 0 ) {
dimGrid.x += 1;
}
printf(" using %d threads per block, %d blocks\n",dimBlock.x,dimGrid.x);
if (SI)
printf(" output in SI units (KB = 1000 B)\n");
else
printf(" output in IEC units (KiB = 1024 B)\n");
/* Initialize memory on the device */
hipLaunchKernelGGL(( set_array<real>), dim3(dimGrid),dim3(dimBlock), 0, 0, d_a, 2.f, N);
CUDA_SAFE_CALL(hipDeviceSynchronize());
hipLaunchKernelGGL(( set_array<real>), dim3(dimGrid),dim3(dimBlock), 0, 0, d_b, .5f, N);
CUDA_SAFE_CALL(hipDeviceSynchronize());
hipLaunchKernelGGL(( set_array<real>), dim3(dimGrid),dim3(dimBlock), 0, 0, d_c, .5f, N);
CUDA_SAFE_CALL(hipDeviceSynchronize());
/* --- MAIN LOOP --- repeat test cases NTIMES times --- */
scalar=3.0f;
for (k=0; k<NTIMES; k++)
{
times[0][k]= mysecond();
hipLaunchKernelGGL(( STREAM_Copy<real>), dim3(dimGrid),dim3(dimBlock), 0, 0, d_a, d_c, N);
CUDA_SAFE_CALL(hipDeviceSynchronize());
times[0][k]= mysecond() - times[0][k];
times[1][k]= mysecond();
hipLaunchKernelGGL(( STREAM_Scale<real>), dim3(dimGrid),dim3(dimBlock), 0, 0, d_b, d_c, scalar, N);
CUDA_SAFE_CALL(hipDeviceSynchronize());
times[1][k]= mysecond() - times[1][k];
times[2][k]= mysecond();
hipLaunchKernelGGL(( STREAM_Add<real>), dim3(dimGrid),dim3(dimBlock), 0, 0, d_a, d_b, d_c, N);
CUDA_SAFE_CALL(hipDeviceSynchronize());
times[2][k]= mysecond() - times[2][k];
times[3][k]= mysecond();
hipLaunchKernelGGL(( STREAM_Triad<real>), dim3(dimGrid),dim3(dimBlock), 0, 0, d_b, d_c, d_a, scalar, N);
CUDA_SAFE_CALL(hipDeviceSynchronize());
times[3][k]= mysecond() - times[3][k];
}
/* --- SUMMARY --- */
for (k=1; k<NTIMES; k++) /* note -- skip first iteration */
{
for (j=0; j<4; j++)
{
avgtime[j] = avgtime[j] + times[j][k];
mintime[j] = MIN(mintime[j], times[j][k]);
maxtime[j] = MAX(maxtime[j], times[j][k]);
}
}
double bytes[4] = {
2 * sizeof(real) * (double)N,
2 * sizeof(real) * (double)N,
3 * sizeof(real) * (double)N,
3 * sizeof(real) * (double)N
};
// Use right units
const double G = SI ? 1.e9 : static_cast<double>(1<<30);
printf("\nFunction Rate %s Avg time(s) Min time(s) Max time(s)\n",
SI ? "(GB/s) " : "(GiB/s)" );
printf("-----------------------------------------------------------------\n");
for (j=0; j<4; j++) {
avgtime[j] = avgtime[j]/(double)(NTIMES-1);
printf("%s%11.4f %11.8f %11.8f %11.8f\n", label[j].c_str(),
bytes[j]/mintime[j] / G,
avgtime[j],
mintime[j],
maxtime[j]);
}
/* Free memory on device */
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
}
| e96506c4771648abde2091ed34bf30d7410e85a4.cu | /*
STREAM benchmark implementation in CUDA.
COPY: a(i) = b(i)
SCALE: a(i) = q*b(i)
SUM: a(i) = b(i) + c(i)
TRIAD: a(i) = b(i) + q*c(i)
It measures the memory system on the device.
The implementation is in double precision.
Code based on the code developed by John D. McCalpin
http://www.cs.virginia.edu/stream/FTP/Code/stream.c
Written by: Massimiliano Fatica, NVIDIA Corporation
Further modifications by: Ben Cumming, CSCS; Andreas Herten (JSC/FZJ)
*/
#define NTIMES 20
#include <string>
#include <vector>
#include <iostream>
#include <sstream>
#include <stdio.h>
#include <float.h>
#include <limits.h>
#include <unistd.h>
#include <sys/time.h>
#include <sys/time.h>
#define CUDA_SAFE_CALL(expr) \
do { \
cudaError_t err = (expr); \
if (err != cudaSuccess) { \
std::cerr << "[Error] '" << expr << "' failed : " \
<< cudaGetErrorString(err) \
<< "(error code: " << err << ")" \
<< " at " << __FILE__ << ":" << __LINE__ \
<< std::endl; \
exit(err); \
} \
} while(0)
# ifndef MIN
# define MIN(x,y) ((x)<(y)?(x):(y))
# endif
# ifndef MAX
# define MAX(x,y) ((x)>(y)?(x):(y))
# endif
typedef double real;
static double avgtime[4] = {0}, maxtime[4] = {0},
mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX};
void print_help()
{
printf(
"Usage: stream [-s] [-n <elements>] [-b <blocksize>]\n\n"
" -s\n"
" Print results in SI units (by default IEC units are used)\n\n"
" -n <elements>\n"
" Put <elements> values in the arrays\n"
" (defaults to 1<<26)\n\n"
" -b <blocksize>\n"
" Use <blocksize> as the number of threads in each block\n"
" (defaults to 192)\n"
);
}
void parse_options(int argc, char** argv, bool& SI, size_t& N, int& blockSize)
{
// Default values
SI = false;
N = 1<<26;
blockSize = 192;
std::stringstream ss;
int c;
while ((c = getopt (argc, argv, "sn:b:h")) != -1)
switch (c)
{
case 's':
SI = true;
break;
case 'n':
ss << optarg;
ss >> N;
break;
case 'b':
blockSize = std::atoi(optarg);
break;
case 'h':
print_help();
std::exit(0);
break;
default:
print_help();
std::exit(1);
}
}
/* A gettimeofday routine to give access to the wall
clock timer on most UNIX-like systems. */
double mysecond()
{
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
template <typename T>
__global__ void set_array(T * __restrict__ const a, T value, size_t len)
{
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len)
a[idx] = value;
}
template <typename T>
__global__ void STREAM_Copy(T const * __restrict__ const a, T * __restrict__ const b, size_t len)
{
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len)
b[idx] = a[idx];
}
template <typename T>
__global__ void STREAM_Scale(T const * __restrict__ const a, T * __restrict__ const b, T scale, size_t len)
{
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len)
b[idx] = scale * a[idx];
}
template <typename T>
__global__ void STREAM_Add(T const * __restrict__ const a, T const * __restrict__ const b, T * __restrict__ const c, size_t len)
{
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len)
c[idx] = a[idx] + b[idx];
}
template <typename T>
__global__ void STREAM_Triad(T const * __restrict__ a, T const * __restrict__ b, T * __restrict__ const c, T scalar, size_t len)
{
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len)
c[idx] = a[idx] + scalar * b[idx];
}
int main(int argc, char** argv)
{
real *d_a, *d_b, *d_c;
int j,k;
double times[4][NTIMES];
real scalar;
std::vector<std::string> label{"Copy: ", "Scale: ", "Add: ", "Triad: "};
// Parse arguments
bool SI;
size_t N;
int blockSize;
parse_options(argc, argv, SI, N, blockSize);
printf(" STREAM Benchmark implementation in CUDA\n");
printf(" Array size (%s precision) =%7.2f MB\n", sizeof(double)==sizeof(real)?"double":"single", double(N)*double(sizeof(real))/1.e6);
/* Allocate memory on device */
CUDA_SAFE_CALL(cudaMalloc((void**)&d_a, sizeof(real)*N));
CUDA_SAFE_CALL(cudaMalloc((void**)&d_b, sizeof(real)*N));
CUDA_SAFE_CALL(cudaMalloc((void**)&d_c, sizeof(real)*N));
/* Compute execution configuration */
dim3 dimBlock(blockSize);
dim3 dimGrid(N / dimBlock.x);
if( N % dimBlock.x != 0 ) {
dimGrid.x += 1;
}
printf(" using %d threads per block, %d blocks\n",dimBlock.x,dimGrid.x);
if (SI)
printf(" output in SI units (KB = 1000 B)\n");
else
printf(" output in IEC units (KiB = 1024 B)\n");
/* Initialize memory on the device */
set_array<real><<<dimGrid,dimBlock>>>(d_a, 2.f, N);
CUDA_SAFE_CALL(cudaThreadSynchronize());
set_array<real><<<dimGrid,dimBlock>>>(d_b, .5f, N);
CUDA_SAFE_CALL(cudaThreadSynchronize());
set_array<real><<<dimGrid,dimBlock>>>(d_c, .5f, N);
CUDA_SAFE_CALL(cudaThreadSynchronize());
/* --- MAIN LOOP --- repeat test cases NTIMES times --- */
scalar=3.0f;
for (k=0; k<NTIMES; k++)
{
times[0][k]= mysecond();
STREAM_Copy<real><<<dimGrid,dimBlock>>>(d_a, d_c, N);
CUDA_SAFE_CALL(cudaThreadSynchronize());
times[0][k]= mysecond() - times[0][k];
times[1][k]= mysecond();
STREAM_Scale<real><<<dimGrid,dimBlock>>>(d_b, d_c, scalar, N);
CUDA_SAFE_CALL(cudaThreadSynchronize());
times[1][k]= mysecond() - times[1][k];
times[2][k]= mysecond();
STREAM_Add<real><<<dimGrid,dimBlock>>>(d_a, d_b, d_c, N);
CUDA_SAFE_CALL(cudaThreadSynchronize());
times[2][k]= mysecond() - times[2][k];
times[3][k]= mysecond();
STREAM_Triad<real><<<dimGrid,dimBlock>>>(d_b, d_c, d_a, scalar, N);
CUDA_SAFE_CALL(cudaThreadSynchronize());
times[3][k]= mysecond() - times[3][k];
}
/* --- SUMMARY --- */
for (k=1; k<NTIMES; k++) /* note -- skip first iteration */
{
for (j=0; j<4; j++)
{
avgtime[j] = avgtime[j] + times[j][k];
mintime[j] = MIN(mintime[j], times[j][k]);
maxtime[j] = MAX(maxtime[j], times[j][k]);
}
}
double bytes[4] = {
2 * sizeof(real) * (double)N,
2 * sizeof(real) * (double)N,
3 * sizeof(real) * (double)N,
3 * sizeof(real) * (double)N
};
// Use right units
const double G = SI ? 1.e9 : static_cast<double>(1<<30);
printf("\nFunction Rate %s Avg time(s) Min time(s) Max time(s)\n",
SI ? "(GB/s) " : "(GiB/s)" );
printf("-----------------------------------------------------------------\n");
for (j=0; j<4; j++) {
avgtime[j] = avgtime[j]/(double)(NTIMES-1);
printf("%s%11.4f %11.8f %11.8f %11.8f\n", label[j].c_str(),
bytes[j]/mintime[j] / G,
avgtime[j],
mintime[j],
maxtime[j]);
}
/* Free memory on device */
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
|
14f3e1e2a3fbeabaf16b442721cadc86a4b7aa4a.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <thrust/complex.h>
#include "header_hip.cuh"
#include <vector>
#include <time.h>
#include <hip/hip_runtime.h>
#include <math.h>
using namespace std;
__device__ void read_blockint_to_shareint(uchar4 *simg, uchar4 *img, int i_start, int i_end, int col_index, int block_col_index, int width, int channels, int window_w, int pad){
int i;
//assumes all warps are being used: stride 4, begins at warp_id= threadid/32
for(i= threadIdx.x/32;i<i_end-i_start;i=i+4){
simg[i*(window_w+pad) + block_col_index]=img[(i_start+i)*(width) + col_index];
}
}
__device__ void write_shareint_to_block( uchar4 *simg, uchar4 *img, int i_start, int i_end, int col_index, int block_col_index, int width, int channels, int window_w,int pad){
int i;
for(i=threadIdx.x/32;i<i_end-i_start;i=i+4){
img[(i_start+i)*(width) + col_index]=simg[i*(window_w+pad) + block_col_index];
}
}
__global__
void gaussian_filter_kernel_horizontal_anticausal(uchar4 *outputimage,float sigma_h,float s_quotient,thrust:: complex <float> *constant, uchar4 *img,int width, int height,int channels, int window_w, int window_h, int sharedpad, float kappa,int line_count){
int i,j,k,j_start=0,j_end = window_w,i_start,i_end;
uchar4 buffer;
int block_j;
int j_min;
float dist;
float f[3],f_prev[3],delta;
extern __shared__ uchar4 simg[]; //shared image de size window_w*window_h*channels
thrust::complex<float> prevg0_acausal[3],prevg1_acausal[3],g0[3],g1[3];
thrust::complex<float> b_delta,aux;
i = threadIdx.x + window_h*blockIdx.x;
j_end = width - (width/line_count)*blockIdx.y;
j_start = j_end - window_w;
j_min = j_end - (width/line_count);
if(j_min<0)
j_min = 0;
aux.real(0);
aux.imag(0);
//APPROXIMATE
if(kappa>=0){
//GET APPROXIMATION FOR INITIAL CONDITIONS
//compute extended length
dist=0;
j=j_end-1;
buffer = img[i*width+j_end-1];
f_prev[0] = buffer.x;
f_prev[1] = buffer.y;
f_prev[2] = buffer.z;
while(j<width-1 && (dist<sigma_h*kappa)){
j=j+1;
delta=0;
//get current values of the image and store in shared memory
buffer = img[i*width +j];
f[0] = buffer.x;
f[1] = buffer.y;
f[2] = buffer.z;
for(k=0;k<channels;k++){
delta = delta + 1.00*((f[k]-f_prev[k])*(f[k]-f_prev[k]));
}
delta = delta*s_quotient +1;
delta = sqrt(delta);
if(j==0){
delta=1;
}
//
dist = dist + delta;
//update previous values
for(k=0;k<channels;k++){
f_prev[k] = f[k];
}
}
// COMPUTE INITIAL CONDITIONS
dist=j;
for(j = j; j>= j_end; j= j-1){
delta=0;
buffer = img[i*width +j];
f[0] = buffer.x;
f[1] = buffer.y;
f[2] = buffer.z;
if(j == width-1){
for(k=0;k<channels;k++){
f_prev[k] = f[k];
prevg0_acausal[k] = constant[1]*constant[3]/(float(1.00)-constant[3])*f[k];
prevg1_acausal[k] = constant[2]*constant[4]/(float(1.00)-constant[4])*f[k];
}
}
for(k=0;k<channels;k++){
delta = delta + float(1.00)*((f[k]-f_prev[k])*(f[k]-f_prev[k]));
}
delta = delta*s_quotient +float(1.00);
delta = sqrt(delta);
//b_delta = pow(constant[3],delta);
b_delta.real(__cosf(delta*constant[9].real()));
b_delta.imag(__sinf(delta*constant[9].real()));
b_delta = b_delta*__powf(constant[11].real(),delta);
aux = (b_delta-float(1.00))/(constant[5]*delta);
for(k = 0;k <channels; k++){
g0[k] = constant[1]*b_delta*f_prev[k]+ b_delta*prevg0_acausal[k];
g0[k] = g0[k] + (aux - constant[6]*constant[3])*f[k] - (aux - constant[6]*b_delta)*f_prev[k];
}
//b_delta = pow(constant[4],delta);
b_delta.real(__cosf(delta*constant[10].real()));
b_delta.imag(__sinf(delta*constant[10].real()));
b_delta = b_delta*__powf(constant[12].real(),delta);
aux = (b_delta-float(1.00))/(constant[7]*delta);
for(k = 0;k <channels; k++){
g1[k] = constant[2]*b_delta*f_prev[k]+ b_delta*prevg1_acausal[k];
g1[k] = g1[k] + (aux - constant[8]*constant[4])*f[k] - (aux - constant[8]*b_delta)*f_prev[k];
}
for(k=0;k<channels;k++){
f_prev[k] = f[k];
prevg0_acausal[k] = g0[k];
prevg1_acausal[k] = g1[k];
}
}
}
//Read image to shared
while(j_end > j_min){ //travels horizontally ->
i_start = window_h*blockIdx.x;
i_end = i_start + window_h;
if(i_end> height){
i_end = height;
}
//column = threadidx.x%32
if(threadIdx.x%32<j_end-j_start)
read_blockint_to_shareint(simg, img,i_start,i_end, j_start+threadIdx.x%32, threadIdx.x%32, width, channels, window_w, sharedpad);
__syncthreads();
if(1==1 && i<height){ //backward anticausal left
block_j = (j_end-j_start)-1;
for(j = j_end-1; j>= j_start; j= j-1){
delta=0;
buffer = simg[threadIdx.x*(window_w + sharedpad) + block_j];
f[0] = buffer.x;
f[1] = buffer.y;
f[2] = buffer.z;
if(j == width-1){
for(k=0;k<channels;k++){
f_prev[k] = f[k];
prevg0_acausal[k] = constant[1]*constant[3]/(float(1.00)-constant[3])*f[k];
prevg1_acausal[k] = constant[2]*constant[4]/(float(1.00)-constant[4])*f[k];
}
}
for(k=0;k<channels;k++){
delta = delta + float(1.00)*((f[k]-f_prev[k])*(f[k]-f_prev[k]));
}
delta = delta*s_quotient +float(1.00);
delta = sqrt(delta);
//b_delta = pow(constant[3],delta);
b_delta.real(__cosf(delta*constant[9].real()));
b_delta.imag(__sinf(delta*constant[9].real()));
b_delta = b_delta*__powf(constant[11].real(),delta);
aux = (b_delta-float(1.00))/(constant[5]*delta);
for(k = 0;k <channels; k++){
g0[k] = constant[1]*b_delta*f_prev[k]+ b_delta*prevg0_acausal[k];
g0[k] = g0[k] + (aux - constant[6]*constant[3])*f[k] - (aux - constant[6]*b_delta)*f_prev[k];
}
//b_delta = pow(constant[4],delta);
b_delta.real(__cosf(delta*constant[10].real()));
b_delta.imag(__sinf(delta*constant[10].real()));
b_delta = b_delta*__powf(constant[12].real(),delta);
aux = (b_delta-float(1.00))/(constant[7]*delta);
for(k = 0;k <channels; k++){
g1[k] = constant[2]*b_delta*f_prev[k]+ b_delta*prevg1_acausal[k];
g1[k] = g1[k] + (aux - constant[8]*constant[4])*f[k] - (aux - constant[8]*b_delta)*f_prev[k];
}
for(k=0;k<channels;k++){
f_prev[k] = f[k];
prevg0_acausal[k] = g0[k];
prevg1_acausal[k] = g1[k];
}
aux.real(int(abs((g0[0]+g1[0]).real())));
if(aux.real()>255){
buffer.x = 255;
}
else{ buffer.x = aux.real();}
aux.real(int(abs((g0[1]+g1[1]).real())));
if(aux.real()>255){
buffer.y = 255;
}
else{ buffer.y = aux.real();}
aux.real(int(abs((g0[2]+g1[2]).real())));
if(aux.real()>255){
buffer.z = 255;
}
else{ buffer.z = aux.real();}
simg[threadIdx.x*(window_w + sharedpad) + block_j] = buffer;
block_j = block_j -1;
}
}
//output to device
__syncthreads();
if(threadIdx.x%32<j_end-j_start)
write_shareint_to_block(simg, outputimage,i_start,i_end, j_start+threadIdx.x%32, threadIdx.x%32, width, channels, window_w, sharedpad);
__syncthreads();
j_end = j_start;
j_start = j_start - window_w;
if(j_start<0){
j_start = 0;
}
}
}
__global__
void gaussian_filter_kernel_horizontal_causal(uchar4 *outputimage,float sigma_h,float s_quotient,thrust:: complex <float> *constant, uchar4 *img,int width, int height,int channels, int window_w, int window_h, int sharedpad, float kappa,int line_count){
int i,j,k,i_start,i_end;
int j_start = (width/line_count)*blockIdx.y,j_end = window_w + j_start;
int j_max = j_start + (width/line_count);
float dist;
if(j_max>width)
j_max = width;
uchar4 buffer;
int block_j;
float f[3],f_prev[3],delta;
extern __shared__ uchar4 simg[];
thrust::complex<float> prevg0_causal[3],prevg0_acausal[3],prevg1_acausal[3],prevg1_causal[3],g0[3],g1[3];
thrust::complex<float> b_delta,aux;
i = threadIdx.x + window_h*blockIdx.x;
aux.real(0);
aux.imag(0);
if(kappa>=0){
//GET APPROXIMATION FOR INITIAL CONDITIONS
//compute extended length
dist=0;
j=j_start;
while(j>0 && (dist<sigma_h*kappa)){
j=j-1;
delta=0;
buffer = img[i*width+j_start];
f_prev[0] = buffer.x;
f_prev[1] = buffer.y;
f_prev[2] = buffer.z;
buffer = img[i*width +j];
f[0] = buffer.x;
f[1] = buffer.y;
f[2] = buffer.z;
for(k=0;k<channels;k++){
delta = delta + 1.00*((f[k]-f_prev[k])*(f[k]-f_prev[k]));
}
delta = delta*s_quotient +1;
delta = sqrt(delta);
if(j==0){
delta=1;
}
//
dist = dist + delta;
for(k=0;k<channels;k++){
f_prev[k] = f[k];
}
}
// COMPUTE INITIAL CONDITIONS
dist = j;
for(j=j;j<= j_start;j++){
delta=0;
buffer = img[i*width +j];
f[0] = buffer.x;
f[1] = buffer.y;
f[2] = buffer.z;
if(j==dist){
for(k=0;k<channels;k++){
f_prev[k] = f[k];
prevg0_causal[k] = constant[1]/(float(1.00)-constant[3])*f[k];
prevg1_causal[k] = constant[2]/(float(1.00)-constant[4])*f[k];
}
}
for(k=0;k<channels;k++){
delta = delta + float(1.00)*((f[k]-f_prev[k])*(f[k]-f_prev[k]));
}
delta = delta*s_quotient +1;
delta = sqrt(delta);
b_delta.real(__cosf(delta*constant[9].real()));
b_delta.imag(__sinf(delta*constant[9].real()));
b_delta = b_delta*__powf(constant[11].real(),delta);
aux = (b_delta-float(1.00))/(constant[5]*delta);
for(k = 0;k <channels; k++){
g0[k] = constant[1]*f[k]+ b_delta*prevg0_causal[k];
g0[k] = g0[k] + (aux - constant[6]*constant[3])*f[k] - (aux - constant[6]*b_delta)*f_prev[k];
}
// b_delta = pow(constant[4],delta);
b_delta.real(__cosf(delta*constant[10].real()));
b_delta.imag(__sinf(delta*constant[10].real()));
b_delta = b_delta*__powf(constant[12].real(),delta);
aux= (b_delta-float(1.00))/(constant[7]*delta);
for(k = 0;k <channels; k++){
g1[k] = constant[2]*f[k]+ b_delta*prevg1_causal[k];
g1[k] = g1[k] + (aux - constant[8]*constant[4])*f[k] - (aux - constant[8]*b_delta)*f_prev[k];
}
for(k=0;k<channels;k++){
f_prev[k] = f[k];
prevg0_causal[k] = g0[k];
prevg1_causal[k] = g1[k];
}
}
}
//Read image to shared
while(j_start < j_max){
j_end = j_start +window_w;
if(j_end> width){
j_end = width;
}
i_start = window_h*blockIdx.x;
i_end = i_start + window_h;
if(i_end> height){
i_end = height;
}
//coluna = threadidx.x%32
if(threadIdx.x%32<j_end-j_start)
read_blockint_to_shareint(simg, img,i_start,i_end, j_start+threadIdx.x%32, threadIdx.x%32, width, channels, window_w, sharedpad);
__syncthreads();
if(i<height){
block_j = 0;
for(j = j_start; j< j_end; j++){
delta=0;
buffer = simg[threadIdx.x*(window_w + sharedpad)+block_j];
f[0] = buffer.x;
f[1] = buffer.y;
f[2] = buffer.z;
if(j==0){
for(k=0;k<channels;k++){
f_prev[k] = f[k];
prevg0_causal[k] = constant[1]/(float(1.00)-constant[3])*f[k];
prevg1_causal[k] = constant[2]/(float(1.00)-constant[4])*f[k];
}
}
for(k=0;k<channels;k++){
delta = delta + float(1.00)*((f[k]-f_prev[k])*(f[k]-f_prev[k]));
}
delta = delta*s_quotient +1;
delta = sqrt(delta);
b_delta.real(__cosf(delta*constant[9].real()));
b_delta.imag(__sinf(delta*constant[9].real()));
b_delta = b_delta*__powf(constant[11].real(),delta);
aux = (b_delta-float(1.00))/(constant[5]*delta);
for(k = 0;k <channels; k++){
g0[k] = constant[1]*f[k]+ b_delta*prevg0_causal[k];
g0[k] = g0[k] + (aux - constant[6]*constant[3])*f[k] - (aux - constant[6]*b_delta)*f_prev[k];
}
// b_delta = pow(constant[4],delta);
b_delta.real(__cosf(delta*constant[10].real()));
b_delta.imag(__sinf(delta*constant[10].real()));
b_delta = b_delta*__powf(constant[12].real(),delta);
aux= (b_delta-float(1.00))/(constant[7]*delta);
for(k = 0;k <channels; k++){
g1[k] = constant[2]*f[k]+ b_delta*prevg1_causal[k];
g1[k] = g1[k] + (aux - constant[8]*constant[4])*f[k] - (aux - constant[8]*b_delta)*f_prev[k];
}
//atualiza vetores
for(k=0;k<channels;k++){
f_prev[k] = f[k];
prevg0_causal[k] = g0[k];
prevg1_causal[k] = g1[k];
}
aux.real(int(abs((g0[0]+g1[0]).real())));
if(aux.real()>255){
buffer.x = 255;
}
else{ buffer.x = aux.real();}
aux.real(int(abs((g0[1]+g1[1]).real())));
if(aux.real()>255){
buffer.y = 255;
}
else{ buffer.y = aux.real();}
aux.real(int(abs((g0[2]+g1[2]).real())));
if(aux.real()>255){
buffer.z = 255;
}
else{ buffer.z = aux.real();}
simg[threadIdx.x*(window_w + sharedpad) + block_j] = buffer;
block_j++;
}
}
__syncthreads();
if(threadIdx.x%32<j_end-j_start)
write_shareint_to_block(simg, outputimage,i_start,i_end, j_start+threadIdx.x%32, threadIdx.x%32, width, channels, window_w, sharedpad);
__syncthreads();
j_start = j_end;
}
}
| 14f3e1e2a3fbeabaf16b442721cadc86a4b7aa4a.cu | #include <iostream>
#include <thrust/complex.h>
#include "header.cuh"
#include <vector>
#include <time.h>
#include <cuda_runtime.h>
#include <math.h>
using namespace std;
__device__ void read_blockint_to_shareint(uchar4 *simg, uchar4 *img, int i_start, int i_end, int col_index, int block_col_index, int width, int channels, int window_w, int pad){
int i;
//assumes all warps are being used: stride 4, begins at warp_id= threadid/32
for(i= threadIdx.x/32;i<i_end-i_start;i=i+4){
simg[i*(window_w+pad) + block_col_index]=img[(i_start+i)*(width) + col_index];
}
}
__device__ void write_shareint_to_block( uchar4 *simg, uchar4 *img, int i_start, int i_end, int col_index, int block_col_index, int width, int channels, int window_w,int pad){
int i;
for(i=threadIdx.x/32;i<i_end-i_start;i=i+4){
img[(i_start+i)*(width) + col_index]=simg[i*(window_w+pad) + block_col_index];
}
}
__global__
void gaussian_filter_kernel_horizontal_anticausal(uchar4 *outputimage,float sigma_h,float s_quotient,thrust:: complex <float> *constant, uchar4 *img,int width, int height,int channels, int window_w, int window_h, int sharedpad, float kappa,int line_count){
int i,j,k,j_start=0,j_end = window_w,i_start,i_end;
uchar4 buffer;
int block_j;
int j_min;
float dist;
float f[3],f_prev[3],delta;
extern __shared__ uchar4 simg[]; //shared image de size window_w*window_h*channels
thrust::complex<float> prevg0_acausal[3],prevg1_acausal[3],g0[3],g1[3];
thrust::complex<float> b_delta,aux;
i = threadIdx.x + window_h*blockIdx.x;
j_end = width - (width/line_count)*blockIdx.y;
j_start = j_end - window_w;
j_min = j_end - (width/line_count);
if(j_min<0)
j_min = 0;
aux.real(0);
aux.imag(0);
//APPROXIMATE
if(kappa>=0){
//GET APPROXIMATION FOR INITIAL CONDITIONS
//compute extended length
dist=0;
j=j_end-1;
buffer = img[i*width+j_end-1];
f_prev[0] = buffer.x;
f_prev[1] = buffer.y;
f_prev[2] = buffer.z;
while(j<width-1 && (dist<sigma_h*kappa)){
j=j+1;
delta=0;
//get current values of the image and store in shared memory
buffer = img[i*width +j];
f[0] = buffer.x;
f[1] = buffer.y;
f[2] = buffer.z;
for(k=0;k<channels;k++){
delta = delta + 1.00*((f[k]-f_prev[k])*(f[k]-f_prev[k]));
}
delta = delta*s_quotient +1;
delta = sqrt(delta);
if(j==0){
delta=1;
}
//
dist = dist + delta;
//update previous values
for(k=0;k<channels;k++){
f_prev[k] = f[k];
}
}
// COMPUTE INITIAL CONDITIONS
dist=j;
for(j = j; j>= j_end; j= j-1){
delta=0;
buffer = img[i*width +j];
f[0] = buffer.x;
f[1] = buffer.y;
f[2] = buffer.z;
if(j == width-1){
for(k=0;k<channels;k++){
f_prev[k] = f[k];
prevg0_acausal[k] = constant[1]*constant[3]/(float(1.00)-constant[3])*f[k];
prevg1_acausal[k] = constant[2]*constant[4]/(float(1.00)-constant[4])*f[k];
}
}
for(k=0;k<channels;k++){
delta = delta + float(1.00)*((f[k]-f_prev[k])*(f[k]-f_prev[k]));
}
delta = delta*s_quotient +float(1.00);
delta = sqrt(delta);
//b_delta = pow(constant[3],delta);
b_delta.real(__cosf(delta*constant[9].real()));
b_delta.imag(__sinf(delta*constant[9].real()));
b_delta = b_delta*__powf(constant[11].real(),delta);
aux = (b_delta-float(1.00))/(constant[5]*delta);
for(k = 0;k <channels; k++){
g0[k] = constant[1]*b_delta*f_prev[k]+ b_delta*prevg0_acausal[k];
g0[k] = g0[k] + (aux - constant[6]*constant[3])*f[k] - (aux - constant[6]*b_delta)*f_prev[k];
}
//b_delta = pow(constant[4],delta);
b_delta.real(__cosf(delta*constant[10].real()));
b_delta.imag(__sinf(delta*constant[10].real()));
b_delta = b_delta*__powf(constant[12].real(),delta);
aux = (b_delta-float(1.00))/(constant[7]*delta);
for(k = 0;k <channels; k++){
g1[k] = constant[2]*b_delta*f_prev[k]+ b_delta*prevg1_acausal[k];
g1[k] = g1[k] + (aux - constant[8]*constant[4])*f[k] - (aux - constant[8]*b_delta)*f_prev[k];
}
for(k=0;k<channels;k++){
f_prev[k] = f[k];
prevg0_acausal[k] = g0[k];
prevg1_acausal[k] = g1[k];
}
}
}
//Read image to shared
while(j_end > j_min){ //travels horizontally ->
i_start = window_h*blockIdx.x;
i_end = i_start + window_h;
if(i_end> height){
i_end = height;
}
//column = threadidx.x%32
if(threadIdx.x%32<j_end-j_start)
read_blockint_to_shareint(simg, img,i_start,i_end, j_start+threadIdx.x%32, threadIdx.x%32, width, channels, window_w, sharedpad);
__syncthreads();
if(1==1 && i<height){ //backward anticausal left
block_j = (j_end-j_start)-1;
for(j = j_end-1; j>= j_start; j= j-1){
delta=0;
buffer = simg[threadIdx.x*(window_w + sharedpad) + block_j];
f[0] = buffer.x;
f[1] = buffer.y;
f[2] = buffer.z;
if(j == width-1){
for(k=0;k<channels;k++){
f_prev[k] = f[k];
prevg0_acausal[k] = constant[1]*constant[3]/(float(1.00)-constant[3])*f[k];
prevg1_acausal[k] = constant[2]*constant[4]/(float(1.00)-constant[4])*f[k];
}
}
for(k=0;k<channels;k++){
delta = delta + float(1.00)*((f[k]-f_prev[k])*(f[k]-f_prev[k]));
}
delta = delta*s_quotient +float(1.00);
delta = sqrt(delta);
//b_delta = pow(constant[3],delta);
b_delta.real(__cosf(delta*constant[9].real()));
b_delta.imag(__sinf(delta*constant[9].real()));
b_delta = b_delta*__powf(constant[11].real(),delta);
aux = (b_delta-float(1.00))/(constant[5]*delta);
for(k = 0;k <channels; k++){
g0[k] = constant[1]*b_delta*f_prev[k]+ b_delta*prevg0_acausal[k];
g0[k] = g0[k] + (aux - constant[6]*constant[3])*f[k] - (aux - constant[6]*b_delta)*f_prev[k];
}
//b_delta = pow(constant[4],delta);
b_delta.real(__cosf(delta*constant[10].real()));
b_delta.imag(__sinf(delta*constant[10].real()));
b_delta = b_delta*__powf(constant[12].real(),delta);
aux = (b_delta-float(1.00))/(constant[7]*delta);
for(k = 0;k <channels; k++){
g1[k] = constant[2]*b_delta*f_prev[k]+ b_delta*prevg1_acausal[k];
g1[k] = g1[k] + (aux - constant[8]*constant[4])*f[k] - (aux - constant[8]*b_delta)*f_prev[k];
}
for(k=0;k<channels;k++){
f_prev[k] = f[k];
prevg0_acausal[k] = g0[k];
prevg1_acausal[k] = g1[k];
}
aux.real(int(abs((g0[0]+g1[0]).real())));
if(aux.real()>255){
buffer.x = 255;
}
else{ buffer.x = aux.real();}
aux.real(int(abs((g0[1]+g1[1]).real())));
if(aux.real()>255){
buffer.y = 255;
}
else{ buffer.y = aux.real();}
aux.real(int(abs((g0[2]+g1[2]).real())));
if(aux.real()>255){
buffer.z = 255;
}
else{ buffer.z = aux.real();}
simg[threadIdx.x*(window_w + sharedpad) + block_j] = buffer;
block_j = block_j -1;
}
}
//output to device
__syncthreads();
if(threadIdx.x%32<j_end-j_start)
write_shareint_to_block(simg, outputimage,i_start,i_end, j_start+threadIdx.x%32, threadIdx.x%32, width, channels, window_w, sharedpad);
__syncthreads();
j_end = j_start;
j_start = j_start - window_w;
if(j_start<0){
j_start = 0;
}
}
}
__global__
void gaussian_filter_kernel_horizontal_causal(uchar4 *outputimage,float sigma_h,float s_quotient,thrust:: complex <float> *constant, uchar4 *img,int width, int height,int channels, int window_w, int window_h, int sharedpad, float kappa,int line_count){
int i,j,k,i_start,i_end;
int j_start = (width/line_count)*blockIdx.y,j_end = window_w + j_start;
int j_max = j_start + (width/line_count);
float dist;
if(j_max>width)
j_max = width;
uchar4 buffer;
int block_j;
float f[3],f_prev[3],delta;
extern __shared__ uchar4 simg[];
thrust::complex<float> prevg0_causal[3],prevg0_acausal[3],prevg1_acausal[3],prevg1_causal[3],g0[3],g1[3];
thrust::complex<float> b_delta,aux;
i = threadIdx.x + window_h*blockIdx.x;
aux.real(0);
aux.imag(0);
if(kappa>=0){
//GET APPROXIMATION FOR INITIAL CONDITIONS
//compute extended length
dist=0;
j=j_start;
while(j>0 && (dist<sigma_h*kappa)){
j=j-1;
delta=0;
buffer = img[i*width+j_start];
f_prev[0] = buffer.x;
f_prev[1] = buffer.y;
f_prev[2] = buffer.z;
buffer = img[i*width +j];
f[0] = buffer.x;
f[1] = buffer.y;
f[2] = buffer.z;
for(k=0;k<channels;k++){
delta = delta + 1.00*((f[k]-f_prev[k])*(f[k]-f_prev[k]));
}
delta = delta*s_quotient +1;
delta = sqrt(delta);
if(j==0){
delta=1;
}
//
dist = dist + delta;
for(k=0;k<channels;k++){
f_prev[k] = f[k];
}
}
// COMPUTE INITIAL CONDITIONS
dist = j;
for(j=j;j<= j_start;j++){
delta=0;
buffer = img[i*width +j];
f[0] = buffer.x;
f[1] = buffer.y;
f[2] = buffer.z;
if(j==dist){
for(k=0;k<channels;k++){
f_prev[k] = f[k];
prevg0_causal[k] = constant[1]/(float(1.00)-constant[3])*f[k];
prevg1_causal[k] = constant[2]/(float(1.00)-constant[4])*f[k];
}
}
for(k=0;k<channels;k++){
delta = delta + float(1.00)*((f[k]-f_prev[k])*(f[k]-f_prev[k]));
}
delta = delta*s_quotient +1;
delta = sqrt(delta);
b_delta.real(__cosf(delta*constant[9].real()));
b_delta.imag(__sinf(delta*constant[9].real()));
b_delta = b_delta*__powf(constant[11].real(),delta);
aux = (b_delta-float(1.00))/(constant[5]*delta);
for(k = 0;k <channels; k++){
g0[k] = constant[1]*f[k]+ b_delta*prevg0_causal[k];
g0[k] = g0[k] + (aux - constant[6]*constant[3])*f[k] - (aux - constant[6]*b_delta)*f_prev[k];
}
// b_delta = pow(constant[4],delta);
b_delta.real(__cosf(delta*constant[10].real()));
b_delta.imag(__sinf(delta*constant[10].real()));
b_delta = b_delta*__powf(constant[12].real(),delta);
aux= (b_delta-float(1.00))/(constant[7]*delta);
for(k = 0;k <channels; k++){
g1[k] = constant[2]*f[k]+ b_delta*prevg1_causal[k];
g1[k] = g1[k] + (aux - constant[8]*constant[4])*f[k] - (aux - constant[8]*b_delta)*f_prev[k];
}
for(k=0;k<channels;k++){
f_prev[k] = f[k];
prevg0_causal[k] = g0[k];
prevg1_causal[k] = g1[k];
}
}
}
//Read image to shared
while(j_start < j_max){
j_end = j_start +window_w;
if(j_end> width){
j_end = width;
}
i_start = window_h*blockIdx.x;
i_end = i_start + window_h;
if(i_end> height){
i_end = height;
}
//coluna = threadidx.x%32
if(threadIdx.x%32<j_end-j_start)
read_blockint_to_shareint(simg, img,i_start,i_end, j_start+threadIdx.x%32, threadIdx.x%32, width, channels, window_w, sharedpad);
__syncthreads();
if(i<height){
block_j = 0;
for(j = j_start; j< j_end; j++){
delta=0;
buffer = simg[threadIdx.x*(window_w + sharedpad)+block_j];
f[0] = buffer.x;
f[1] = buffer.y;
f[2] = buffer.z;
if(j==0){
for(k=0;k<channels;k++){
f_prev[k] = f[k];
prevg0_causal[k] = constant[1]/(float(1.00)-constant[3])*f[k];
prevg1_causal[k] = constant[2]/(float(1.00)-constant[4])*f[k];
}
}
for(k=0;k<channels;k++){
delta = delta + float(1.00)*((f[k]-f_prev[k])*(f[k]-f_prev[k]));
}
delta = delta*s_quotient +1;
delta = sqrt(delta);
b_delta.real(__cosf(delta*constant[9].real()));
b_delta.imag(__sinf(delta*constant[9].real()));
b_delta = b_delta*__powf(constant[11].real(),delta);
aux = (b_delta-float(1.00))/(constant[5]*delta);
for(k = 0;k <channels; k++){
g0[k] = constant[1]*f[k]+ b_delta*prevg0_causal[k];
g0[k] = g0[k] + (aux - constant[6]*constant[3])*f[k] - (aux - constant[6]*b_delta)*f_prev[k];
}
// b_delta = pow(constant[4],delta);
b_delta.real(__cosf(delta*constant[10].real()));
b_delta.imag(__sinf(delta*constant[10].real()));
b_delta = b_delta*__powf(constant[12].real(),delta);
aux= (b_delta-float(1.00))/(constant[7]*delta);
for(k = 0;k <channels; k++){
g1[k] = constant[2]*f[k]+ b_delta*prevg1_causal[k];
g1[k] = g1[k] + (aux - constant[8]*constant[4])*f[k] - (aux - constant[8]*b_delta)*f_prev[k];
}
//atualiza vetores
for(k=0;k<channels;k++){
f_prev[k] = f[k];
prevg0_causal[k] = g0[k];
prevg1_causal[k] = g1[k];
}
aux.real(int(abs((g0[0]+g1[0]).real())));
if(aux.real()>255){
buffer.x = 255;
}
else{ buffer.x = aux.real();}
aux.real(int(abs((g0[1]+g1[1]).real())));
if(aux.real()>255){
buffer.y = 255;
}
else{ buffer.y = aux.real();}
aux.real(int(abs((g0[2]+g1[2]).real())));
if(aux.real()>255){
buffer.z = 255;
}
else{ buffer.z = aux.real();}
simg[threadIdx.x*(window_w + sharedpad) + block_j] = buffer;
block_j++;
}
}
__syncthreads();
if(threadIdx.x%32<j_end-j_start)
write_shareint_to_block(simg, outputimage,i_start,i_end, j_start+threadIdx.x%32, threadIdx.x%32, width, channels, window_w, sharedpad);
__syncthreads();
j_start = j_end;
}
}
|
5cdb3f53d4fd4b8119eace8e7749578e3dad4a4f.hip | // !!! This is a file automatically generated by hipify!!!
/*************************************************************************
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* See LICENSE.txt for license information
************************************************************************/
#include "hip/hip_runtime.h"
#include "common.h"
void print_header() {
PRINT("# %10s %12s %6s out-of-place in-place \n", "", "", "");
PRINT("# %10s %12s %6s %7s %6s %6s %5s %7s %6s %6s %5s\n", "size", "count", "type",
"time", "algbw", "busbw", "error", "time", "algbw", "busbw", "error");
PRINT("# %10s %12s %6s %7s %6s %6s %5s %7s %6s %6s %5s\n", "(B)", "(elements)", "",
"(us)", "(GB/s)", "(GB/s)", "", "(us)", "(GB/s)", "(GB/s)", "");
}
void print_line_header (size_t size, size_t count, const char *typeName, const char *opName, int root) {
PRINT("%12li %12li %6s", size, count, typeName);
}
void AllGatherGetCollByteCount(size_t *sendcount, size_t *recvcount, size_t *paramcount, size_t *sendInplaceOffset, size_t *recvInplaceOffset, size_t count, int nranks) {
*sendcount = count/nranks;
*recvcount = (count/nranks)*nranks;
*sendInplaceOffset = count/nranks;
*recvInplaceOffset = 0;
*paramcount = *sendcount;
}
testResult_t AllGatherInitData(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int rep, int in_place) {
size_t sendcount = args->sendBytes / wordSize(type);
size_t recvcount = args->expectedBytes / wordSize(type);
int nranks = args->nProcs*args->nThreads*args->nGpus;
for (int i=0; i<args->nGpus; i++) {
int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus + i;
CUDACHECK(hipSetDevice(gpuid));
int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i);
CUDACHECK(hipMemset(args->recvbuffs[i], 0, args->expectedBytes));
void* data = in_place ? ((char*)args->recvbuffs[i])+rank*args->sendBytes : args->sendbuffs[i];
TESTCHECK(InitData(data, sendcount, type, rep, rank));
for (int j=0; j<nranks; j++) {
TESTCHECK(InitData(((char*)args->expected[i])+args->sendBytes*j, sendcount, type, rep, j));
}
CUDACHECK(hipDeviceSynchronize());
}
return testSuccess;
}
void AllGatherGetBw(size_t count, int typesize, double sec, double* algBw, double* busBw, int nranks) {
double baseBw = (double)(count * typesize * nranks) / 1.0E9 / sec;
*algBw = baseBw;
double factor = ((double)(nranks - 1))/((double)nranks);
PRINT("factor: %f", factor);
*busBw = baseBw * factor;
}
testResult_t AllGatherRunColl(void* sendbuff, void* recvbuff, size_t count, ncclDataType_t type, ncclRedOp_t op, int root, ncclComm_t comm, hipStream_t stream) {
NCCLCHECK(ncclAllGather(sendbuff, recvbuff, count, type, comm, stream));
return testSuccess;
}
struct testColl allGatherTest = {
"AllGather",
AllGatherGetCollByteCount,
AllGatherInitData,
AllGatherGetBw,
AllGatherRunColl
};
void AllGatherGetBuffSize(size_t *sendcount, size_t *recvcount, size_t count, int nranks) {
size_t paramcount, sendInplaceOffset, recvInplaceOffset;
AllGatherGetCollByteCount(sendcount, recvcount, ¶mcount, &sendInplaceOffset, &recvInplaceOffset, count, nranks);
}
testResult_t AllGatherRunTest(struct threadArgs* args, int root, ncclDataType_t type, const char* typeName, ncclRedOp_t op, const char* opName) {
args->collTest = &allGatherTest;
ncclDataType_t *run_types;
const char **run_typenames;
int type_count;
if ((int)type != -1) {
type_count = 1;
run_types = &type;
run_typenames = &typeName;
} else {
type_count = ncclNumTypes;
run_types = test_types;
run_typenames = test_typenames;
}
for (int i=0; i<type_count; i++) {
TESTCHECK(TimeTest(args, run_types[i], run_typenames[i], (ncclRedOp_t)0, "", -1));
}
return testSuccess;
}
struct testEngine allGatherEngine = {
AllGatherGetBuffSize,
AllGatherRunTest
};
#pragma weak ncclTestEngine=allGatherEngine
| 5cdb3f53d4fd4b8119eace8e7749578e3dad4a4f.cu | /*************************************************************************
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* See LICENSE.txt for license information
************************************************************************/
#include "cuda_runtime.h"
#include "common.h"
void print_header() {
PRINT("# %10s %12s %6s out-of-place in-place \n", "", "", "");
PRINT("# %10s %12s %6s %7s %6s %6s %5s %7s %6s %6s %5s\n", "size", "count", "type",
"time", "algbw", "busbw", "error", "time", "algbw", "busbw", "error");
PRINT("# %10s %12s %6s %7s %6s %6s %5s %7s %6s %6s %5s\n", "(B)", "(elements)", "",
"(us)", "(GB/s)", "(GB/s)", "", "(us)", "(GB/s)", "(GB/s)", "");
}
void print_line_header (size_t size, size_t count, const char *typeName, const char *opName, int root) {
PRINT("%12li %12li %6s", size, count, typeName);
}
void AllGatherGetCollByteCount(size_t *sendcount, size_t *recvcount, size_t *paramcount, size_t *sendInplaceOffset, size_t *recvInplaceOffset, size_t count, int nranks) {
*sendcount = count/nranks;
*recvcount = (count/nranks)*nranks;
*sendInplaceOffset = count/nranks;
*recvInplaceOffset = 0;
*paramcount = *sendcount;
}
testResult_t AllGatherInitData(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int rep, int in_place) {
size_t sendcount = args->sendBytes / wordSize(type);
size_t recvcount = args->expectedBytes / wordSize(type);
int nranks = args->nProcs*args->nThreads*args->nGpus;
for (int i=0; i<args->nGpus; i++) {
int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus + i;
CUDACHECK(cudaSetDevice(gpuid));
int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i);
CUDACHECK(cudaMemset(args->recvbuffs[i], 0, args->expectedBytes));
void* data = in_place ? ((char*)args->recvbuffs[i])+rank*args->sendBytes : args->sendbuffs[i];
TESTCHECK(InitData(data, sendcount, type, rep, rank));
for (int j=0; j<nranks; j++) {
TESTCHECK(InitData(((char*)args->expected[i])+args->sendBytes*j, sendcount, type, rep, j));
}
CUDACHECK(cudaDeviceSynchronize());
}
return testSuccess;
}
void AllGatherGetBw(size_t count, int typesize, double sec, double* algBw, double* busBw, int nranks) {
double baseBw = (double)(count * typesize * nranks) / 1.0E9 / sec;
*algBw = baseBw;
double factor = ((double)(nranks - 1))/((double)nranks);
PRINT("factor: %f", factor);
*busBw = baseBw * factor;
}
testResult_t AllGatherRunColl(void* sendbuff, void* recvbuff, size_t count, ncclDataType_t type, ncclRedOp_t op, int root, ncclComm_t comm, cudaStream_t stream) {
NCCLCHECK(ncclAllGather(sendbuff, recvbuff, count, type, comm, stream));
return testSuccess;
}
struct testColl allGatherTest = {
"AllGather",
AllGatherGetCollByteCount,
AllGatherInitData,
AllGatherGetBw,
AllGatherRunColl
};
void AllGatherGetBuffSize(size_t *sendcount, size_t *recvcount, size_t count, int nranks) {
size_t paramcount, sendInplaceOffset, recvInplaceOffset;
AllGatherGetCollByteCount(sendcount, recvcount, ¶mcount, &sendInplaceOffset, &recvInplaceOffset, count, nranks);
}
testResult_t AllGatherRunTest(struct threadArgs* args, int root, ncclDataType_t type, const char* typeName, ncclRedOp_t op, const char* opName) {
args->collTest = &allGatherTest;
ncclDataType_t *run_types;
const char **run_typenames;
int type_count;
if ((int)type != -1) {
type_count = 1;
run_types = &type;
run_typenames = &typeName;
} else {
type_count = ncclNumTypes;
run_types = test_types;
run_typenames = test_typenames;
}
for (int i=0; i<type_count; i++) {
TESTCHECK(TimeTest(args, run_types[i], run_typenames[i], (ncclRedOp_t)0, "", -1));
}
return testSuccess;
}
struct testEngine allGatherEngine = {
AllGatherGetBuffSize,
AllGatherRunTest
};
#pragma weak ncclTestEngine=allGatherEngine
|
74a4db547a4546623623a06f36105c1b56fc6d21.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/emulation.hpp"
namespace cv { namespace cuda { namespace device
{
namespace hough
{
template <int PIXELS_PER_THREAD>
__global__ void buildPointList(const PtrStepSzb src, unsigned int* list, int* counterPtr)
{
__shared__ unsigned int s_queues[4][32 * PIXELS_PER_THREAD];
__shared__ int s_qsize[4];
__shared__ int s_globStart[4];
const int x = blockIdx.x * blockDim.x * PIXELS_PER_THREAD + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (threadIdx.x == 0)
s_qsize[threadIdx.y] = 0;
__syncthreads();
if (y < src.rows)
{
// fill the queue
const uchar* srcRow = src.ptr(y);
for (int i = 0, xx = x; i < PIXELS_PER_THREAD && xx < src.cols; ++i, xx += blockDim.x)
{
if (srcRow[xx])
{
const unsigned int val = (y << 16) | xx;
const int qidx = Emulation::smem::atomicAdd(&s_qsize[threadIdx.y], 1);
s_queues[threadIdx.y][qidx] = val;
}
}
}
__syncthreads();
// let one thread reserve the space required in the global list
if (threadIdx.x == 0 && threadIdx.y == 0)
{
// find how many items are stored in each list
int totalSize = 0;
for (int i = 0; i < blockDim.y; ++i)
{
s_globStart[i] = totalSize;
totalSize += s_qsize[i];
}
// calculate the offset in the global list
const int globalOffset = atomicAdd(counterPtr, totalSize);
for (int i = 0; i < blockDim.y; ++i)
s_globStart[i] += globalOffset;
}
__syncthreads();
// copy local queues to global queue
const int qsize = s_qsize[threadIdx.y];
int gidx = s_globStart[threadIdx.y] + threadIdx.x;
for(int i = threadIdx.x; i < qsize; i += blockDim.x, gidx += blockDim.x)
list[gidx] = s_queues[threadIdx.y][i];
}
int buildPointList_gpu(PtrStepSzb src, unsigned int* list, int* counterPtr, hipStream_t stream)
{
const int PIXELS_PER_THREAD = 16;
cudaSafeCall( hipMemsetAsync(counterPtr, 0, sizeof(int), stream) );
const dim3 block(32, 4);
const dim3 grid(divUp(src.cols, block.x * PIXELS_PER_THREAD), divUp(src.rows, block.y));
cudaSafeCall( hipFuncSetCacheConfig(buildPointList<PIXELS_PER_THREAD>, hipFuncCachePreferShared) );
hipLaunchKernelGGL(( buildPointList<PIXELS_PER_THREAD>), dim3(grid), dim3(block), 0, stream, src, list, counterPtr);
cudaSafeCall( hipGetLastError() );
int totalCount;
cudaSafeCall( hipMemcpyAsync(&totalCount, counterPtr, sizeof(int), hipMemcpyDeviceToHost, stream) );
cudaSafeCall( hipStreamSynchronize(stream) );
return totalCount;
}
}
}}}
#endif /* CUDA_DISABLER */
| 74a4db547a4546623623a06f36105c1b56fc6d21.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/emulation.hpp"
namespace cv { namespace cuda { namespace device
{
namespace hough
{
template <int PIXELS_PER_THREAD>
__global__ void buildPointList(const PtrStepSzb src, unsigned int* list, int* counterPtr)
{
__shared__ unsigned int s_queues[4][32 * PIXELS_PER_THREAD];
__shared__ int s_qsize[4];
__shared__ int s_globStart[4];
const int x = blockIdx.x * blockDim.x * PIXELS_PER_THREAD + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (threadIdx.x == 0)
s_qsize[threadIdx.y] = 0;
__syncthreads();
if (y < src.rows)
{
// fill the queue
const uchar* srcRow = src.ptr(y);
for (int i = 0, xx = x; i < PIXELS_PER_THREAD && xx < src.cols; ++i, xx += blockDim.x)
{
if (srcRow[xx])
{
const unsigned int val = (y << 16) | xx;
const int qidx = Emulation::smem::atomicAdd(&s_qsize[threadIdx.y], 1);
s_queues[threadIdx.y][qidx] = val;
}
}
}
__syncthreads();
// let one thread reserve the space required in the global list
if (threadIdx.x == 0 && threadIdx.y == 0)
{
// find how many items are stored in each list
int totalSize = 0;
for (int i = 0; i < blockDim.y; ++i)
{
s_globStart[i] = totalSize;
totalSize += s_qsize[i];
}
// calculate the offset in the global list
const int globalOffset = atomicAdd(counterPtr, totalSize);
for (int i = 0; i < blockDim.y; ++i)
s_globStart[i] += globalOffset;
}
__syncthreads();
// copy local queues to global queue
const int qsize = s_qsize[threadIdx.y];
int gidx = s_globStart[threadIdx.y] + threadIdx.x;
for(int i = threadIdx.x; i < qsize; i += blockDim.x, gidx += blockDim.x)
list[gidx] = s_queues[threadIdx.y][i];
}
int buildPointList_gpu(PtrStepSzb src, unsigned int* list, int* counterPtr, cudaStream_t stream)
{
const int PIXELS_PER_THREAD = 16;
cudaSafeCall( cudaMemsetAsync(counterPtr, 0, sizeof(int), stream) );
const dim3 block(32, 4);
const dim3 grid(divUp(src.cols, block.x * PIXELS_PER_THREAD), divUp(src.rows, block.y));
cudaSafeCall( cudaFuncSetCacheConfig(buildPointList<PIXELS_PER_THREAD>, cudaFuncCachePreferShared) );
buildPointList<PIXELS_PER_THREAD><<<grid, block, 0, stream>>>(src, list, counterPtr);
cudaSafeCall( cudaGetLastError() );
int totalCount;
cudaSafeCall( cudaMemcpyAsync(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost, stream) );
cudaSafeCall( cudaStreamSynchronize(stream) );
return totalCount;
}
}
}}}
#endif /* CUDA_DISABLER */
|
cf49e0d50baef786f408afaaf8157310e62d634a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Babak Poursartip
// 02/14/2021
// CUDA
//topic: scan
#include <cstdio>
#include <ctime>
#include <iostream>
// ==============================
__global__ void sum(int *d)
{
int tds = blockDim.x;
int tid = threadIdx.x;
// tc: total number of threads
for (int tc = tds, stepSize = 1; tc > 0; tc /=2, stepSize *=2) // changes the number of threads by half(tc>>=1)
{
// thread must be allowed to write
if (tid < tc)
{
d[tid+stepSize] += d[tid];
# if __CUDA_ARCH__>=200
printf("%d, %d, %d, %d \n", tds, tid, stepSize, tc);
#endif
}
tc -=stepSize;
}
}
// ==============================
int main()
{
printf(" starts \n");
const int count = 16;
const int size = count * sizeof(int);
int h[count];
for (int i = 0; i < count; ++i)
h[i] = i + 1;
int *d;
hipMalloc(&d, size);
hipMemcpy(d, h, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL((
sum), dim3(1), dim3(count-1), 0, 0, d);
hipMemcpy(h, d, size, hipMemcpyDeviceToHost);
for (int i = 0; i < count; ++i)
std::cout << h[i] << " ";
std::cout << std::endl;
hipFree(d);
printf(" done \n");
return 0;
}
| cf49e0d50baef786f408afaaf8157310e62d634a.cu |
// Babak Poursartip
// 02/14/2021
// CUDA
//topic: scan
#include <cstdio>
#include <ctime>
#include <iostream>
// ==============================
__global__ void sum(int *d)
{
int tds = blockDim.x;
int tid = threadIdx.x;
// tc: total number of threads
for (int tc = tds, stepSize = 1; tc > 0; tc /=2, stepSize *=2) // changes the number of threads by half(tc>>=1)
{
// thread must be allowed to write
if (tid < tc)
{
d[tid+stepSize] += d[tid];
# if __CUDA_ARCH__>=200
printf("%d, %d, %d, %d \n", tds, tid, stepSize, tc);
#endif
}
tc -=stepSize;
}
}
// ==============================
int main()
{
printf(" starts \n");
const int count = 16;
const int size = count * sizeof(int);
int h[count];
for (int i = 0; i < count; ++i)
h[i] = i + 1;
int *d;
cudaMalloc(&d, size);
cudaMemcpy(d, h, size, cudaMemcpyHostToDevice);
sum<<<1, count-1>>>(d);
cudaMemcpy(h, d, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < count; ++i)
std::cout << h[i] << " ";
std::cout << std::endl;
cudaFree(d);
printf(" done \n");
return 0;
}
|
a72400ec723fdcda385d75906dde9b1375e439de.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample demonstrates Inter Process Communication
* features new to SDK 4.1 and uses one process per GPU for computation.
* Note: Multiple processes per single device are possible but not recommended.
* In such cases, one should use IPC events for hardware synchronization.
*/
// Includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime includes
#include <hip/hip_runtime_api.h>
// CUDA utilities and system includes
#include <helper_cuda.h>
int *pArgc = NULL;
char **pArgv = NULL;
#define MAX_DEVICES 8
#define PROCESSES_PER_DEVICE 1
#define DATA_BUF_SIZE 4096
#ifdef __linux
#include <unistd.h>
#include <sched.h>
#include <sys/mman.h>
#include <sys/wait.h>
#include <linux/version.h>
typedef struct ipcCUDA_st
{
int device;
pid_t pid;
hipIpcEventHandle_t eventHandle;
hipIpcMemHandle_t memHandle;
} ipcCUDA_t;
typedef struct ipcDevices_st
{
int count;
int ordinals[MAX_DEVICES];
} ipcDevices_t;
typedef struct ipcBarrier_st
{
int count;
bool sense;
bool allExit;
} ipcBarrier_t;
ipcBarrier_t *g_barrier = NULL;
bool g_procSense;
int g_processCount;
void procBarrier()
{
int newCount = __sync_add_and_fetch(&g_barrier->count, 1);
if (newCount == g_processCount)
{
g_barrier->count = 0;
g_barrier->sense = !g_procSense;
}
else
{
while (g_barrier->sense == g_procSense)
{
if (!g_barrier->allExit)
{
sched_yield();
}
else
{
exit(EXIT_FAILURE);
}
}
}
g_procSense = !g_procSense;
}
// CUDA Kernel
__global__ void simpleKernel(int *dst, int *src, int num)
{
// Dummy kernel
int idx = blockIdx.x * blockDim.x + threadIdx.x;
dst[idx] = src[idx] / num;
}
void getDeviceCount(ipcDevices_t *devices)
{
// We can't initialize CUDA before fork() so we need to spawn a new process
pid_t pid = fork();
if (0 == pid)
{
int i;
int count, uvaCount = 0;
int uvaOrdinals[MAX_DEVICES];
printf("\nChecking for multiple GPUs...\n");
checkCudaErrors(hipGetDeviceCount(&count));
printf("CUDA-capable device count: %i\n", count);
printf("\nSearching for UVA capable devices...\n");
for (i = 0; i < count; i++)
{
hipDeviceProp_t prop;
checkCudaErrors(hipGetDeviceProperties(&prop, i));
if (prop.unifiedAddressing)
{
uvaOrdinals[uvaCount] = i;
printf("> GPU%d = \"%15s\" IS capable of UVA\n", i, prop.name);
uvaCount += 1;
}
if (prop.computeMode != hipComputeModeDefault)
{
printf("> GPU device must be in Compute Mode Default to run\n");
printf("> Please use nvidia-smi to change the Compute Mode to Default\n");
exit(EXIT_SUCCESS);
}
}
devices->ordinals[0] = uvaOrdinals[0];
if (uvaCount < 2)
{
devices->count = uvaCount;
exit(EXIT_SUCCESS);
}
// Check possibility for peer accesses, relevant to our tests
printf("\nChecking GPU(s) for support of peer to peer memory access...\n");
devices->count = 1;
int canAccessPeer_0i, canAccessPeer_i0;
for (i = 1; i < uvaCount; i++)
{
checkCudaErrors(hipDeviceCanAccessPeer(&canAccessPeer_0i, uvaOrdinals[0], uvaOrdinals[i]));
checkCudaErrors(hipDeviceCanAccessPeer(&canAccessPeer_i0, uvaOrdinals[i], uvaOrdinals[0]));
if (canAccessPeer_0i*canAccessPeer_i0)
{
devices->ordinals[devices->count] = uvaOrdinals[i];
printf("> Two-way peer access between GPU%d and GPU%d: YES\n", devices->ordinals[0], devices->ordinals[devices->count]);
devices->count += 1;
}
}
exit(EXIT_SUCCESS);
}
else
{
int status;
waitpid(pid, &status, 0);
assert(!status);
}
}
inline bool IsAppBuiltAs64()
{
#if defined(__x86_64) || defined(AMD64) || defined(_M_AMD64)
return 1;
#else
return 0;
#endif
}
void runTestMultiKernel(ipcCUDA_t *s_mem, int index)
{
/*
* a) Process 0 loads a reference buffer into GPU0 memory
* b) Other processes launch a kernel on the GPU0 memory using P2P
* c) Process 0 checks the resulting buffer
*/
// memory buffer in gpu
int *d_ptr;
// reference buffer in host memory (do in all processes for rand() consistency)
int h_refData[DATA_BUF_SIZE];
for (int i = 0; i < DATA_BUF_SIZE; i++)
{
h_refData[i] = rand();
}
checkCudaErrors(hipSetDevice(s_mem[index].device));
if (index == 0)
{
printf("\nLaunching kernels...\n");
// host memory buffer for checking results
int h_results[DATA_BUF_SIZE * MAX_DEVICES * PROCESSES_PER_DEVICE];
hipEvent_t event[MAX_DEVICES * PROCESSES_PER_DEVICE];
checkCudaErrors(hipMalloc((void **) &d_ptr, DATA_BUF_SIZE * g_processCount * sizeof(int)));
checkCudaErrors(hipIpcGetMemHandle((hipIpcMemHandle_t *) &s_mem[0].memHandle, (void *) d_ptr));
checkCudaErrors(hipMemcpy((void *) d_ptr, (void *) h_refData, DATA_BUF_SIZE * sizeof(int), hipMemcpyHostToDevice));
// b.1: wait until all event handles are created in other processes
procBarrier();
for (int i = 1; i < g_processCount; i++)
{
checkCudaErrors(hipIpcOpenEventHandle(&event[i], s_mem[i].eventHandle));
}
// b.2: wait until all kernels launched and events recorded
procBarrier();
for (int i = 1; i < g_processCount; i++)
{
checkCudaErrors(hipEventSynchronize(event[i]));
}
// b.3
procBarrier();
checkCudaErrors(hipMemcpy(h_results, d_ptr + DATA_BUF_SIZE,
DATA_BUF_SIZE * (g_processCount - 1) * sizeof(int), hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_ptr));
printf("Checking test results...\n");
for (int n = 1; n < g_processCount; n++)
{
for (int i = 0; i < DATA_BUF_SIZE; i++)
{
if (h_refData[i]/(n + 1) != h_results[(n-1) * DATA_BUF_SIZE + i])
{
fprintf(stderr, "Data check error at index %d in process %d!: %i, %i\n",i,
n, h_refData[i], h_results[(n-1) * DATA_BUF_SIZE + i]);
g_barrier->allExit = true;
exit(EXIT_FAILURE);
}
}
}
}
else
{
hipEvent_t event;
checkCudaErrors(hipEventCreate(&event, hipEventDisableTiming | hipEventInterprocess));
checkCudaErrors(hipIpcGetEventHandle((hipIpcEventHandle_t *) &s_mem[index].eventHandle, event));
// b.1: wait until proc 0 initializes device memory
procBarrier();
checkCudaErrors(hipIpcOpenMemHandle((void **) &d_ptr, s_mem[0].memHandle,
hipIpcMemLazyEnablePeerAccess));
printf("> Process %3d: Run kernel on GPU%d, taking source data from and writing results to process %d, GPU%d...\n",
index, s_mem[index].device, 0, s_mem[0].device);
const dim3 threads(512, 1);
const dim3 blocks(DATA_BUF_SIZE / threads.x, 1);
hipLaunchKernelGGL(( simpleKernel), dim3(blocks), dim3(threads), 0, 0, d_ptr + index *DATA_BUF_SIZE, d_ptr, index + 1);
checkCudaErrors(hipEventRecord(event));
// b.2
procBarrier();
checkCudaErrors(hipIpcCloseMemHandle(d_ptr));
// b.3: wait till all the events are used up by proc g_processCount - 1
procBarrier();
checkCudaErrors(hipEventDestroy(event));
}
}
#endif
int main(int argc, char **argv)
{
pArgc = &argc;
pArgv = argv;
#if CUDART_VERSION >= 4010 && defined(__linux)
if (!IsAppBuiltAs64())
{
printf("%s is only supported on 64-bit Linux OS and the application must be built as a 64-bit target. Test is being waived.\n", argv[0]);
exit(EXIT_WAIVED);
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
printf("%s is only supported with Linux OS kernel version 2.6.18 and higher. Test is being waived.\n", argv[0]);
exit(EXIT_WAIVED);
#endif
ipcDevices_t *s_devices = (ipcDevices_t *) mmap(NULL, sizeof(*s_devices),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
assert(MAP_FAILED != s_devices);
// We can't initialize CUDA before fork() so we need to spawn a new process
getDeviceCount(s_devices);
if (s_devices->count < 1)
{
printf("One or more (SM 2.0) class GPUs are required for %s.\n", argv[0]);
printf("Waiving test.\n");
exit(EXIT_SUCCESS);
}
// initialize our process and barrier data
// if there is more than one device, 1 process per device
if (s_devices->count > 1)
{
g_processCount = PROCESSES_PER_DEVICE * s_devices->count;
}
else
{
g_processCount = 2; // two processes per single device
}
g_barrier = (ipcBarrier_t *) mmap(NULL, sizeof(*g_barrier),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
assert(MAP_FAILED != g_barrier);
memset((void *) g_barrier, 0, sizeof(*g_barrier));
// set local barrier sense flag
g_procSense = 0;
// shared memory for CUDA memory an event handlers
ipcCUDA_t *s_mem = (ipcCUDA_t *) mmap(NULL, g_processCount * sizeof(*s_mem),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
assert(MAP_FAILED != s_mem);
// initialize shared memory
memset((void *) s_mem, 0, g_processCount * sizeof(*s_mem));
printf("\nSpawning processes and assigning GPUs...\n");
// index = 0,.., g_processCount - 1
int index = 0;
// spawn "g_processCount - 1" additional processes
for (int i = 1; i < g_processCount; i++)
{
int pid = fork();
if (!pid)
{
index = i;
break;
}
else
{
s_mem[i].pid = pid;
}
}
// distribute UVA capable devices among processes (1 device per PROCESSES_PER_DEVICE processes)
// if there is only one device, have 1 extra process
if (s_devices->count > 1)
{
s_mem[index].device = s_devices->ordinals[ index / PROCESSES_PER_DEVICE ];
}
else
{
s_mem[0].device = s_mem[1].device = s_devices->ordinals[ 0 ];
}
printf("> Process %3d -> GPU%d\n", index, s_mem[index].device);
// launch our test
runTestMultiKernel(s_mem, index);
// Cleanup and shutdown
if (index == 0)
{
// wait for processes to complete
for (int i = 1; i < g_processCount; i++)
{
int status;
waitpid(s_mem[i].pid, &status, 0);
assert(WIFEXITED(status));
}
printf("\nShutting down...\n");
for (int i = 0; i < s_devices->count; i++)
{
checkCudaErrors(hipSetDevice(s_devices->ordinals[i]));
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
}
exit(EXIT_SUCCESS);
}
#else // Using CUDA 4.0 and older or non Linux OS
printf("simpleIPC requires CUDA 4.1 and Linux to build and run, waiving testing\n\n");
exit(EXIT_WAIVED);
#endif
}
| a72400ec723fdcda385d75906dde9b1375e439de.cu | /*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample demonstrates Inter Process Communication
* features new to SDK 4.1 and uses one process per GPU for computation.
* Note: Multiple processes per single device are possible but not recommended.
* In such cases, one should use IPC events for hardware synchronization.
*/
// Includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime includes
#include <cuda_runtime_api.h>
// CUDA utilities and system includes
#include <helper_cuda.h>
int *pArgc = NULL;
char **pArgv = NULL;
#define MAX_DEVICES 8
#define PROCESSES_PER_DEVICE 1
#define DATA_BUF_SIZE 4096
#ifdef __linux
#include <unistd.h>
#include <sched.h>
#include <sys/mman.h>
#include <sys/wait.h>
#include <linux/version.h>
typedef struct ipcCUDA_st
{
int device;
pid_t pid;
cudaIpcEventHandle_t eventHandle;
cudaIpcMemHandle_t memHandle;
} ipcCUDA_t;
typedef struct ipcDevices_st
{
int count;
int ordinals[MAX_DEVICES];
} ipcDevices_t;
typedef struct ipcBarrier_st
{
int count;
bool sense;
bool allExit;
} ipcBarrier_t;
ipcBarrier_t *g_barrier = NULL;
bool g_procSense;
int g_processCount;
void procBarrier()
{
int newCount = __sync_add_and_fetch(&g_barrier->count, 1);
if (newCount == g_processCount)
{
g_barrier->count = 0;
g_barrier->sense = !g_procSense;
}
else
{
while (g_barrier->sense == g_procSense)
{
if (!g_barrier->allExit)
{
sched_yield();
}
else
{
exit(EXIT_FAILURE);
}
}
}
g_procSense = !g_procSense;
}
// CUDA Kernel
__global__ void simpleKernel(int *dst, int *src, int num)
{
// Dummy kernel
int idx = blockIdx.x * blockDim.x + threadIdx.x;
dst[idx] = src[idx] / num;
}
void getDeviceCount(ipcDevices_t *devices)
{
// We can't initialize CUDA before fork() so we need to spawn a new process
pid_t pid = fork();
if (0 == pid)
{
int i;
int count, uvaCount = 0;
int uvaOrdinals[MAX_DEVICES];
printf("\nChecking for multiple GPUs...\n");
checkCudaErrors(cudaGetDeviceCount(&count));
printf("CUDA-capable device count: %i\n", count);
printf("\nSearching for UVA capable devices...\n");
for (i = 0; i < count; i++)
{
cudaDeviceProp prop;
checkCudaErrors(cudaGetDeviceProperties(&prop, i));
if (prop.unifiedAddressing)
{
uvaOrdinals[uvaCount] = i;
printf("> GPU%d = \"%15s\" IS capable of UVA\n", i, prop.name);
uvaCount += 1;
}
if (prop.computeMode != cudaComputeModeDefault)
{
printf("> GPU device must be in Compute Mode Default to run\n");
printf("> Please use nvidia-smi to change the Compute Mode to Default\n");
exit(EXIT_SUCCESS);
}
}
devices->ordinals[0] = uvaOrdinals[0];
if (uvaCount < 2)
{
devices->count = uvaCount;
exit(EXIT_SUCCESS);
}
// Check possibility for peer accesses, relevant to our tests
printf("\nChecking GPU(s) for support of peer to peer memory access...\n");
devices->count = 1;
int canAccessPeer_0i, canAccessPeer_i0;
for (i = 1; i < uvaCount; i++)
{
checkCudaErrors(cudaDeviceCanAccessPeer(&canAccessPeer_0i, uvaOrdinals[0], uvaOrdinals[i]));
checkCudaErrors(cudaDeviceCanAccessPeer(&canAccessPeer_i0, uvaOrdinals[i], uvaOrdinals[0]));
if (canAccessPeer_0i*canAccessPeer_i0)
{
devices->ordinals[devices->count] = uvaOrdinals[i];
printf("> Two-way peer access between GPU%d and GPU%d: YES\n", devices->ordinals[0], devices->ordinals[devices->count]);
devices->count += 1;
}
}
exit(EXIT_SUCCESS);
}
else
{
int status;
waitpid(pid, &status, 0);
assert(!status);
}
}
inline bool IsAppBuiltAs64()
{
#if defined(__x86_64) || defined(AMD64) || defined(_M_AMD64)
return 1;
#else
return 0;
#endif
}
void runTestMultiKernel(ipcCUDA_t *s_mem, int index)
{
/*
* a) Process 0 loads a reference buffer into GPU0 memory
* b) Other processes launch a kernel on the GPU0 memory using P2P
* c) Process 0 checks the resulting buffer
*/
// memory buffer in gpu
int *d_ptr;
// reference buffer in host memory (do in all processes for rand() consistency)
int h_refData[DATA_BUF_SIZE];
for (int i = 0; i < DATA_BUF_SIZE; i++)
{
h_refData[i] = rand();
}
checkCudaErrors(cudaSetDevice(s_mem[index].device));
if (index == 0)
{
printf("\nLaunching kernels...\n");
// host memory buffer for checking results
int h_results[DATA_BUF_SIZE * MAX_DEVICES * PROCESSES_PER_DEVICE];
cudaEvent_t event[MAX_DEVICES * PROCESSES_PER_DEVICE];
checkCudaErrors(cudaMalloc((void **) &d_ptr, DATA_BUF_SIZE * g_processCount * sizeof(int)));
checkCudaErrors(cudaIpcGetMemHandle((cudaIpcMemHandle_t *) &s_mem[0].memHandle, (void *) d_ptr));
checkCudaErrors(cudaMemcpy((void *) d_ptr, (void *) h_refData, DATA_BUF_SIZE * sizeof(int), cudaMemcpyHostToDevice));
// b.1: wait until all event handles are created in other processes
procBarrier();
for (int i = 1; i < g_processCount; i++)
{
checkCudaErrors(cudaIpcOpenEventHandle(&event[i], s_mem[i].eventHandle));
}
// b.2: wait until all kernels launched and events recorded
procBarrier();
for (int i = 1; i < g_processCount; i++)
{
checkCudaErrors(cudaEventSynchronize(event[i]));
}
// b.3
procBarrier();
checkCudaErrors(cudaMemcpy(h_results, d_ptr + DATA_BUF_SIZE,
DATA_BUF_SIZE * (g_processCount - 1) * sizeof(int), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_ptr));
printf("Checking test results...\n");
for (int n = 1; n < g_processCount; n++)
{
for (int i = 0; i < DATA_BUF_SIZE; i++)
{
if (h_refData[i]/(n + 1) != h_results[(n-1) * DATA_BUF_SIZE + i])
{
fprintf(stderr, "Data check error at index %d in process %d!: %i, %i\n",i,
n, h_refData[i], h_results[(n-1) * DATA_BUF_SIZE + i]);
g_barrier->allExit = true;
exit(EXIT_FAILURE);
}
}
}
}
else
{
cudaEvent_t event;
checkCudaErrors(cudaEventCreate(&event, cudaEventDisableTiming | cudaEventInterprocess));
checkCudaErrors(cudaIpcGetEventHandle((cudaIpcEventHandle_t *) &s_mem[index].eventHandle, event));
// b.1: wait until proc 0 initializes device memory
procBarrier();
checkCudaErrors(cudaIpcOpenMemHandle((void **) &d_ptr, s_mem[0].memHandle,
cudaIpcMemLazyEnablePeerAccess));
printf("> Process %3d: Run kernel on GPU%d, taking source data from and writing results to process %d, GPU%d...\n",
index, s_mem[index].device, 0, s_mem[0].device);
const dim3 threads(512, 1);
const dim3 blocks(DATA_BUF_SIZE / threads.x, 1);
simpleKernel<<<blocks, threads>>> (d_ptr + index *DATA_BUF_SIZE, d_ptr, index + 1);
checkCudaErrors(cudaEventRecord(event));
// b.2
procBarrier();
checkCudaErrors(cudaIpcCloseMemHandle(d_ptr));
// b.3: wait till all the events are used up by proc g_processCount - 1
procBarrier();
checkCudaErrors(cudaEventDestroy(event));
}
}
#endif
int main(int argc, char **argv)
{
pArgc = &argc;
pArgv = argv;
#if CUDART_VERSION >= 4010 && defined(__linux)
if (!IsAppBuiltAs64())
{
printf("%s is only supported on 64-bit Linux OS and the application must be built as a 64-bit target. Test is being waived.\n", argv[0]);
exit(EXIT_WAIVED);
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
printf("%s is only supported with Linux OS kernel version 2.6.18 and higher. Test is being waived.\n", argv[0]);
exit(EXIT_WAIVED);
#endif
ipcDevices_t *s_devices = (ipcDevices_t *) mmap(NULL, sizeof(*s_devices),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
assert(MAP_FAILED != s_devices);
// We can't initialize CUDA before fork() so we need to spawn a new process
getDeviceCount(s_devices);
if (s_devices->count < 1)
{
printf("One or more (SM 2.0) class GPUs are required for %s.\n", argv[0]);
printf("Waiving test.\n");
exit(EXIT_SUCCESS);
}
// initialize our process and barrier data
// if there is more than one device, 1 process per device
if (s_devices->count > 1)
{
g_processCount = PROCESSES_PER_DEVICE * s_devices->count;
}
else
{
g_processCount = 2; // two processes per single device
}
g_barrier = (ipcBarrier_t *) mmap(NULL, sizeof(*g_barrier),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
assert(MAP_FAILED != g_barrier);
memset((void *) g_barrier, 0, sizeof(*g_barrier));
// set local barrier sense flag
g_procSense = 0;
// shared memory for CUDA memory an event handlers
ipcCUDA_t *s_mem = (ipcCUDA_t *) mmap(NULL, g_processCount * sizeof(*s_mem),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
assert(MAP_FAILED != s_mem);
// initialize shared memory
memset((void *) s_mem, 0, g_processCount * sizeof(*s_mem));
printf("\nSpawning processes and assigning GPUs...\n");
// index = 0,.., g_processCount - 1
int index = 0;
// spawn "g_processCount - 1" additional processes
for (int i = 1; i < g_processCount; i++)
{
int pid = fork();
if (!pid)
{
index = i;
break;
}
else
{
s_mem[i].pid = pid;
}
}
// distribute UVA capable devices among processes (1 device per PROCESSES_PER_DEVICE processes)
// if there is only one device, have 1 extra process
if (s_devices->count > 1)
{
s_mem[index].device = s_devices->ordinals[ index / PROCESSES_PER_DEVICE ];
}
else
{
s_mem[0].device = s_mem[1].device = s_devices->ordinals[ 0 ];
}
printf("> Process %3d -> GPU%d\n", index, s_mem[index].device);
// launch our test
runTestMultiKernel(s_mem, index);
// Cleanup and shutdown
if (index == 0)
{
// wait for processes to complete
for (int i = 1; i < g_processCount; i++)
{
int status;
waitpid(s_mem[i].pid, &status, 0);
assert(WIFEXITED(status));
}
printf("\nShutting down...\n");
for (int i = 0; i < s_devices->count; i++)
{
checkCudaErrors(cudaSetDevice(s_devices->ordinals[i]));
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
}
exit(EXIT_SUCCESS);
}
#else // Using CUDA 4.0 and older or non Linux OS
printf("simpleIPC requires CUDA 4.1 and Linux to build and run, waiving testing\n\n");
exit(EXIT_WAIVED);
#endif
}
|
b4cff3581310242db32bd217cc65f99a71a783ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
using namespace std;
__global__ void sum(int *input,int *n)
{
int tid=threadIdx.x;
int num_of_threads=blockDim.x;
float tsize=(float)num_of_threads;
int step_size=1;
while(num_of_threads>0)
{
if(tid<num_of_threads)
{
int fst=tid*step_size*2;
int snd=fst+step_size;
if(snd<*n)
{
printf("fst = %d %d snd = %d %d\n",fst,input[fst],snd,input[snd]);
input[fst]+=input[snd];
}
}
step_size*=2;
if(num_of_threads!=1)
{
tsize/=2;
num_of_threads=(int)ceil(tsize);
}
else
{
num_of_threads=0;
}
}
}
int main()
{
int count=5;
//cout<<"Enter number of elements\n";
//cin>>count;
int *c=&count;
const int size=count*sizeof(int);
int a[count];
for(int i=0;i<count;i++)
{
a[i]=rand()%100;
}
for(int i=0;i<count;i++)
{
cout<<a[i]<<endl;
}
int *d;
int *n;
hipMalloc(&d,size);
hipMalloc(&n,sizeof(int));
hipMemcpy(d,a,size,hipMemcpyHostToDevice);
hipMemcpy(n,c,sizeof(int),hipMemcpyHostToDevice);
if(count%2==0)
{
hipLaunchKernelGGL(( sum), dim3(1),dim3(count/2), 0, 0, d,n);
}else
{
hipLaunchKernelGGL(( sum), dim3(1),dim3((count/2)+1), 0, 0, d,n);
}
int result;
hipMemcpy(&result,d,sizeof(int),hipMemcpyDeviceToHost);
cout<<"sum = "<<result<<endl;
double ans = (double)result/count;
cout << " Average is: " << ans << endl;
hipFree(d);
return 0;
}
| b4cff3581310242db32bd217cc65f99a71a783ca.cu | using namespace std;
__global__ void sum(int *input,int *n)
{
int tid=threadIdx.x;
int num_of_threads=blockDim.x;
float tsize=(float)num_of_threads;
int step_size=1;
while(num_of_threads>0)
{
if(tid<num_of_threads)
{
int fst=tid*step_size*2;
int snd=fst+step_size;
if(snd<*n)
{
printf("fst = %d %d snd = %d %d\n",fst,input[fst],snd,input[snd]);
input[fst]+=input[snd];
}
}
step_size*=2;
if(num_of_threads!=1)
{
tsize/=2;
num_of_threads=(int)ceil(tsize);
}
else
{
num_of_threads=0;
}
}
}
int main()
{
int count=5;
//cout<<"Enter number of elements\n";
//cin>>count;
int *c=&count;
const int size=count*sizeof(int);
int a[count];
for(int i=0;i<count;i++)
{
a[i]=rand()%100;
}
for(int i=0;i<count;i++)
{
cout<<a[i]<<endl;
}
int *d;
int *n;
cudaMalloc(&d,size);
cudaMalloc(&n,sizeof(int));
cudaMemcpy(d,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(n,c,sizeof(int),cudaMemcpyHostToDevice);
if(count%2==0)
{
sum<<<1,count/2>>>(d,n);
}else
{
sum<<<1,(count/2)+1>>>(d,n);
}
int result;
cudaMemcpy(&result,d,sizeof(int),cudaMemcpyDeviceToHost);
cout<<"sum = "<<result<<endl;
double ans = (double)result/count;
cout << " Average is: " << ans << endl;
cudaFree(d);
return 0;
}
|
d910196ca9d83bb37a4a10c342ba2aae0a7c57e2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kern_FindLeafSinkPotential.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *sinkBuffer = NULL;
hipMalloc(&sinkBuffer, XSIZE*YSIZE);
float *incBuffer = NULL;
hipMalloc(&incBuffer, XSIZE*YSIZE);
float *divBuffer = NULL;
hipMalloc(&divBuffer, XSIZE*YSIZE);
float *labelBuffer = NULL;
hipMalloc(&labelBuffer, XSIZE*YSIZE);
float iCC = 1;
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kern_FindLeafSinkPotential), dim3(gridBlock),dim3(threadBlock), 0, 0, sinkBuffer,incBuffer,divBuffer,labelBuffer,iCC,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kern_FindLeafSinkPotential), dim3(gridBlock),dim3(threadBlock), 0, 0, sinkBuffer,incBuffer,divBuffer,labelBuffer,iCC,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kern_FindLeafSinkPotential), dim3(gridBlock),dim3(threadBlock), 0, 0, sinkBuffer,incBuffer,divBuffer,labelBuffer,iCC,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | d910196ca9d83bb37a4a10c342ba2aae0a7c57e2.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kern_FindLeafSinkPotential.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *sinkBuffer = NULL;
cudaMalloc(&sinkBuffer, XSIZE*YSIZE);
float *incBuffer = NULL;
cudaMalloc(&incBuffer, XSIZE*YSIZE);
float *divBuffer = NULL;
cudaMalloc(&divBuffer, XSIZE*YSIZE);
float *labelBuffer = NULL;
cudaMalloc(&labelBuffer, XSIZE*YSIZE);
float iCC = 1;
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kern_FindLeafSinkPotential<<<gridBlock,threadBlock>>>(sinkBuffer,incBuffer,divBuffer,labelBuffer,iCC,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kern_FindLeafSinkPotential<<<gridBlock,threadBlock>>>(sinkBuffer,incBuffer,divBuffer,labelBuffer,iCC,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kern_FindLeafSinkPotential<<<gridBlock,threadBlock>>>(sinkBuffer,incBuffer,divBuffer,labelBuffer,iCC,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
499652068928330f12c76efd472728c059451715.hip | // !!! This is a file automatically generated by hipify!!!
/*
* phase_oscillator_ensemble.cu
*
* The example how the phase_oscillator ensemble can be implemented using CUDA and thrust
*
* Created on: July 15, 2011
* Author: karsten
*/
#include <iostream>
#include <cmath>
#include <utility>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <boost/numeric/odeint.hpp>
#include <boost/numeric/odeint/external/thrust/thrust_algebra.hpp>
#include <boost/numeric/odeint/external/thrust/thrust_operations.hpp>
#include <boost/numeric/odeint/external/thrust/thrust_resize.hpp>
#include <boost/random/mersenne_twister.hpp>
#include <boost/random/uniform_real.hpp>
#include <boost/random/variate_generator.hpp>
using namespace std;
using namespace boost::numeric::odeint;
//change this to float if your device does not support double computation
typedef double value_type;
//change this to host_vector< ... > of you want to run on CPU
typedef thrust::device_vector< value_type > state_type;
typedef thrust::device_vector< size_t > index_vector_type;
// typedef thrust::host_vector< value_type > state_type;
// typedef thrust::host_vector< size_t > index_vector_type;
const value_type sigma = 10.0;
const value_type b = 8.0 / 3.0;
//[ thrust_lorenz_parameters_define_simple_system
struct lorenz_system
{
struct lorenz_functor
{
template< class T >
__host__ __device__
void operator()( T t ) const
{
// unpack the parameter we want to vary and the Lorenz variables
value_type R = thrust::get< 3 >( t );
value_type x = thrust::get< 0 >( t );
value_type y = thrust::get< 1 >( t );
value_type z = thrust::get< 2 >( t );
thrust::get< 4 >( t ) = sigma * ( y - x );
thrust::get< 5 >( t ) = R * x - y - x * z;
thrust::get< 6 >( t ) = -b * z + x * y ;
}
};
lorenz_system( size_t N , const state_type &beta )
: m_N( N ) , m_beta( beta ) { }
template< class State , class Deriv >
void operator()( const State &x , Deriv &dxdt , value_type t ) const
{
thrust::for_each(
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( x ) ,
boost::begin( x ) + m_N ,
boost::begin( x ) + 2 * m_N ,
m_beta.begin() ,
boost::begin( dxdt ) ,
boost::begin( dxdt ) + m_N ,
boost::begin( dxdt ) + 2 * m_N ) ) ,
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( x ) + m_N ,
boost::begin( x ) + 2 * m_N ,
boost::begin( x ) + 3 * m_N ,
m_beta.begin() ,
boost::begin( dxdt ) + m_N ,
boost::begin( dxdt ) + 2 * m_N ,
boost::begin( dxdt ) + 3 * m_N ) ) ,
lorenz_functor() );
}
size_t m_N;
const state_type &m_beta;
};
//]
struct lorenz_perturbation_system
{
struct lorenz_perturbation_functor
{
template< class T >
__host__ __device__
void operator()( T t ) const
{
value_type R = thrust::get< 1 >( t );
value_type x = thrust::get< 0 >( thrust::get< 0 >( t ) );
value_type y = thrust::get< 1 >( thrust::get< 0 >( t ) );
value_type z = thrust::get< 2 >( thrust::get< 0 >( t ) );
value_type dx = thrust::get< 3 >( thrust::get< 0 >( t ) );
value_type dy = thrust::get< 4 >( thrust::get< 0 >( t ) );
value_type dz = thrust::get< 5 >( thrust::get< 0 >( t ) );
thrust::get< 0 >( thrust::get< 2 >( t ) ) = sigma * ( y - x );
thrust::get< 1 >( thrust::get< 2 >( t ) ) = R * x - y - x * z;
thrust::get< 2 >( thrust::get< 2 >( t ) ) = -b * z + x * y ;
thrust::get< 3 >( thrust::get< 2 >( t ) ) = sigma * ( dy - dx );
thrust::get< 4 >( thrust::get< 2 >( t ) ) = ( R - z ) * dx - dy - x * dz;
thrust::get< 5 >( thrust::get< 2 >( t ) ) = y * dx + x * dy - b * dz;
}
};
lorenz_perturbation_system( size_t N , const state_type &beta )
: m_N( N ) , m_beta( beta ) { }
template< class State , class Deriv >
void operator()( const State &x , Deriv &dxdt , value_type t ) const
{
thrust::for_each(
thrust::make_zip_iterator( thrust::make_tuple(
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( x ) ,
boost::begin( x ) + m_N ,
boost::begin( x ) + 2 * m_N ,
boost::begin( x ) + 3 * m_N ,
boost::begin( x ) + 4 * m_N ,
boost::begin( x ) + 5 * m_N ) ) ,
m_beta.begin() ,
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( dxdt ) ,
boost::begin( dxdt ) + m_N ,
boost::begin( dxdt ) + 2 * m_N ,
boost::begin( dxdt ) + 3 * m_N ,
boost::begin( dxdt ) + 4 * m_N ,
boost::begin( dxdt ) + 5 * m_N ) )
) ) ,
thrust::make_zip_iterator( thrust::make_tuple(
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( x ) + m_N ,
boost::begin( x ) + 2 * m_N ,
boost::begin( x ) + 3 * m_N ,
boost::begin( x ) + 4 * m_N ,
boost::begin( x ) + 5 * m_N ,
boost::begin( x ) + 6 * m_N ) ) ,
m_beta.begin() ,
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( dxdt ) + m_N ,
boost::begin( dxdt ) + 2 * m_N ,
boost::begin( dxdt ) + 3 * m_N ,
boost::begin( dxdt ) + 4 * m_N ,
boost::begin( dxdt ) + 5 * m_N ,
boost::begin( dxdt ) + 6 * m_N ) )
) ) ,
lorenz_perturbation_functor() );
}
size_t m_N;
const state_type &m_beta;
};
struct lyap_observer
{
//[thrust_lorenz_parameters_observer_functor
struct lyap_functor
{
template< class T >
__host__ __device__
void operator()( T t ) const
{
value_type &dx = thrust::get< 0 >( t );
value_type &dy = thrust::get< 1 >( t );
value_type &dz = thrust::get< 2 >( t );
value_type norm = sqrt( dx * dx + dy * dy + dz * dz );
dx /= norm;
dy /= norm;
dz /= norm;
thrust::get< 3 >( t ) += log( norm );
}
};
//]
lyap_observer( size_t N , size_t every = 100 )
: m_N( N ) , m_lyap( N ) , m_every( every ) , m_count( 0 )
{
thrust::fill( m_lyap.begin() , m_lyap.end() , 0.0 );
}
template< class Lyap >
void fill_lyap( Lyap &lyap )
{
thrust::copy( m_lyap.begin() , m_lyap.end() , lyap.begin() );
for( size_t i=0 ; i<lyap.size() ; ++i )
lyap[i] /= m_t_overall;
}
template< class State >
void operator()( State &x , value_type t )
{
if( ( m_count != 0 ) && ( ( m_count % m_every ) == 0 ) )
{
thrust::for_each(
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( x ) + 3 * m_N ,
boost::begin( x ) + 4 * m_N ,
boost::begin( x ) + 5 * m_N ,
m_lyap.begin() ) ) ,
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( x ) + 4 * m_N ,
boost::begin( x ) + 5 * m_N ,
boost::begin( x ) + 6 * m_N ,
m_lyap.end() ) ) ,
lyap_functor() );
clog << t << "\n";
}
++m_count;
m_t_overall = t;
}
size_t m_N;
state_type m_lyap;
size_t m_every;
size_t m_count;
value_type m_t_overall;
};
const size_t N = 1024*2;
const value_type dt = 0.01;
int main( int arc , char* argv[] )
{
int driver_version , runtime_version;
hipDriverGetVersion( &driver_version );
hipRuntimeGetVersion ( &runtime_version );
cout << driver_version << "\t" << runtime_version << endl;
//[ thrust_lorenz_parameters_define_beta
vector< value_type > beta_host( N );
const value_type beta_min = 0.0 , beta_max = 56.0;
for( size_t i=0 ; i<N ; ++i )
beta_host[i] = beta_min + value_type( i ) * ( beta_max - beta_min ) / value_type( N - 1 );
state_type beta = beta_host;
//]
//[ thrust_lorenz_parameters_integration
state_type x( 6 * N );
// initialize x,y,z
thrust::fill( x.begin() , x.begin() + 3 * N , 10.0 );
// initial dx
thrust::fill( x.begin() + 3 * N , x.begin() + 4 * N , 1.0 );
// initialize dy,dz
thrust::fill( x.begin() + 4 * N , x.end() , 0.0 );
// create error stepper, can be used with make_controlled or make_dense_output
typedef runge_kutta_dopri5< state_type , value_type , state_type , value_type , thrust_algebra , thrust_operations > stepper_type;
lorenz_system lorenz( N , beta );
lorenz_perturbation_system lorenz_perturbation( N , beta );
lyap_observer obs( N , 1 );
// calculate transients
integrate_adaptive( make_controlled( 1.0e-6 , 1.0e-6 , stepper_type() ) , lorenz , std::make_pair( x.begin() , x.begin() + 3 * N ) , 0.0 , 10.0 , dt );
// calculate the Lyapunov exponents -- the main loop
double t = 0.0;
while( t < 10000.0 )
{
integrate_adaptive( make_controlled( 1.0e-6 , 1.0e-6 , stepper_type() ) , lorenz_perturbation , x , t , t + 1.0 , 0.1 );
t += 1.0;
obs( x , t );
}
vector< value_type > lyap( N );
obs.fill_lyap( lyap );
for( size_t i=0 ; i<N ; ++i )
cout << beta_host[i] << "\t" << lyap[i] << "\n";
//]
return 0;
}
| 499652068928330f12c76efd472728c059451715.cu | /*
* phase_oscillator_ensemble.cu
*
* The example how the phase_oscillator ensemble can be implemented using CUDA and thrust
*
* Created on: July 15, 2011
* Author: karsten
*/
#include <iostream>
#include <cmath>
#include <utility>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <boost/numeric/odeint.hpp>
#include <boost/numeric/odeint/external/thrust/thrust_algebra.hpp>
#include <boost/numeric/odeint/external/thrust/thrust_operations.hpp>
#include <boost/numeric/odeint/external/thrust/thrust_resize.hpp>
#include <boost/random/mersenne_twister.hpp>
#include <boost/random/uniform_real.hpp>
#include <boost/random/variate_generator.hpp>
using namespace std;
using namespace boost::numeric::odeint;
//change this to float if your device does not support double computation
typedef double value_type;
//change this to host_vector< ... > of you want to run on CPU
typedef thrust::device_vector< value_type > state_type;
typedef thrust::device_vector< size_t > index_vector_type;
// typedef thrust::host_vector< value_type > state_type;
// typedef thrust::host_vector< size_t > index_vector_type;
const value_type sigma = 10.0;
const value_type b = 8.0 / 3.0;
//[ thrust_lorenz_parameters_define_simple_system
struct lorenz_system
{
struct lorenz_functor
{
template< class T >
__host__ __device__
void operator()( T t ) const
{
// unpack the parameter we want to vary and the Lorenz variables
value_type R = thrust::get< 3 >( t );
value_type x = thrust::get< 0 >( t );
value_type y = thrust::get< 1 >( t );
value_type z = thrust::get< 2 >( t );
thrust::get< 4 >( t ) = sigma * ( y - x );
thrust::get< 5 >( t ) = R * x - y - x * z;
thrust::get< 6 >( t ) = -b * z + x * y ;
}
};
lorenz_system( size_t N , const state_type &beta )
: m_N( N ) , m_beta( beta ) { }
template< class State , class Deriv >
void operator()( const State &x , Deriv &dxdt , value_type t ) const
{
thrust::for_each(
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( x ) ,
boost::begin( x ) + m_N ,
boost::begin( x ) + 2 * m_N ,
m_beta.begin() ,
boost::begin( dxdt ) ,
boost::begin( dxdt ) + m_N ,
boost::begin( dxdt ) + 2 * m_N ) ) ,
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( x ) + m_N ,
boost::begin( x ) + 2 * m_N ,
boost::begin( x ) + 3 * m_N ,
m_beta.begin() ,
boost::begin( dxdt ) + m_N ,
boost::begin( dxdt ) + 2 * m_N ,
boost::begin( dxdt ) + 3 * m_N ) ) ,
lorenz_functor() );
}
size_t m_N;
const state_type &m_beta;
};
//]
struct lorenz_perturbation_system
{
struct lorenz_perturbation_functor
{
template< class T >
__host__ __device__
void operator()( T t ) const
{
value_type R = thrust::get< 1 >( t );
value_type x = thrust::get< 0 >( thrust::get< 0 >( t ) );
value_type y = thrust::get< 1 >( thrust::get< 0 >( t ) );
value_type z = thrust::get< 2 >( thrust::get< 0 >( t ) );
value_type dx = thrust::get< 3 >( thrust::get< 0 >( t ) );
value_type dy = thrust::get< 4 >( thrust::get< 0 >( t ) );
value_type dz = thrust::get< 5 >( thrust::get< 0 >( t ) );
thrust::get< 0 >( thrust::get< 2 >( t ) ) = sigma * ( y - x );
thrust::get< 1 >( thrust::get< 2 >( t ) ) = R * x - y - x * z;
thrust::get< 2 >( thrust::get< 2 >( t ) ) = -b * z + x * y ;
thrust::get< 3 >( thrust::get< 2 >( t ) ) = sigma * ( dy - dx );
thrust::get< 4 >( thrust::get< 2 >( t ) ) = ( R - z ) * dx - dy - x * dz;
thrust::get< 5 >( thrust::get< 2 >( t ) ) = y * dx + x * dy - b * dz;
}
};
lorenz_perturbation_system( size_t N , const state_type &beta )
: m_N( N ) , m_beta( beta ) { }
template< class State , class Deriv >
void operator()( const State &x , Deriv &dxdt , value_type t ) const
{
thrust::for_each(
thrust::make_zip_iterator( thrust::make_tuple(
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( x ) ,
boost::begin( x ) + m_N ,
boost::begin( x ) + 2 * m_N ,
boost::begin( x ) + 3 * m_N ,
boost::begin( x ) + 4 * m_N ,
boost::begin( x ) + 5 * m_N ) ) ,
m_beta.begin() ,
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( dxdt ) ,
boost::begin( dxdt ) + m_N ,
boost::begin( dxdt ) + 2 * m_N ,
boost::begin( dxdt ) + 3 * m_N ,
boost::begin( dxdt ) + 4 * m_N ,
boost::begin( dxdt ) + 5 * m_N ) )
) ) ,
thrust::make_zip_iterator( thrust::make_tuple(
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( x ) + m_N ,
boost::begin( x ) + 2 * m_N ,
boost::begin( x ) + 3 * m_N ,
boost::begin( x ) + 4 * m_N ,
boost::begin( x ) + 5 * m_N ,
boost::begin( x ) + 6 * m_N ) ) ,
m_beta.begin() ,
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( dxdt ) + m_N ,
boost::begin( dxdt ) + 2 * m_N ,
boost::begin( dxdt ) + 3 * m_N ,
boost::begin( dxdt ) + 4 * m_N ,
boost::begin( dxdt ) + 5 * m_N ,
boost::begin( dxdt ) + 6 * m_N ) )
) ) ,
lorenz_perturbation_functor() );
}
size_t m_N;
const state_type &m_beta;
};
struct lyap_observer
{
//[thrust_lorenz_parameters_observer_functor
struct lyap_functor
{
template< class T >
__host__ __device__
void operator()( T t ) const
{
value_type &dx = thrust::get< 0 >( t );
value_type &dy = thrust::get< 1 >( t );
value_type &dz = thrust::get< 2 >( t );
value_type norm = sqrt( dx * dx + dy * dy + dz * dz );
dx /= norm;
dy /= norm;
dz /= norm;
thrust::get< 3 >( t ) += log( norm );
}
};
//]
lyap_observer( size_t N , size_t every = 100 )
: m_N( N ) , m_lyap( N ) , m_every( every ) , m_count( 0 )
{
thrust::fill( m_lyap.begin() , m_lyap.end() , 0.0 );
}
template< class Lyap >
void fill_lyap( Lyap &lyap )
{
thrust::copy( m_lyap.begin() , m_lyap.end() , lyap.begin() );
for( size_t i=0 ; i<lyap.size() ; ++i )
lyap[i] /= m_t_overall;
}
template< class State >
void operator()( State &x , value_type t )
{
if( ( m_count != 0 ) && ( ( m_count % m_every ) == 0 ) )
{
thrust::for_each(
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( x ) + 3 * m_N ,
boost::begin( x ) + 4 * m_N ,
boost::begin( x ) + 5 * m_N ,
m_lyap.begin() ) ) ,
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( x ) + 4 * m_N ,
boost::begin( x ) + 5 * m_N ,
boost::begin( x ) + 6 * m_N ,
m_lyap.end() ) ) ,
lyap_functor() );
clog << t << "\n";
}
++m_count;
m_t_overall = t;
}
size_t m_N;
state_type m_lyap;
size_t m_every;
size_t m_count;
value_type m_t_overall;
};
const size_t N = 1024*2;
const value_type dt = 0.01;
int main( int arc , char* argv[] )
{
int driver_version , runtime_version;
cudaDriverGetVersion( &driver_version );
cudaRuntimeGetVersion ( &runtime_version );
cout << driver_version << "\t" << runtime_version << endl;
//[ thrust_lorenz_parameters_define_beta
vector< value_type > beta_host( N );
const value_type beta_min = 0.0 , beta_max = 56.0;
for( size_t i=0 ; i<N ; ++i )
beta_host[i] = beta_min + value_type( i ) * ( beta_max - beta_min ) / value_type( N - 1 );
state_type beta = beta_host;
//]
//[ thrust_lorenz_parameters_integration
state_type x( 6 * N );
// initialize x,y,z
thrust::fill( x.begin() , x.begin() + 3 * N , 10.0 );
// initial dx
thrust::fill( x.begin() + 3 * N , x.begin() + 4 * N , 1.0 );
// initialize dy,dz
thrust::fill( x.begin() + 4 * N , x.end() , 0.0 );
// create error stepper, can be used with make_controlled or make_dense_output
typedef runge_kutta_dopri5< state_type , value_type , state_type , value_type , thrust_algebra , thrust_operations > stepper_type;
lorenz_system lorenz( N , beta );
lorenz_perturbation_system lorenz_perturbation( N , beta );
lyap_observer obs( N , 1 );
// calculate transients
integrate_adaptive( make_controlled( 1.0e-6 , 1.0e-6 , stepper_type() ) , lorenz , std::make_pair( x.begin() , x.begin() + 3 * N ) , 0.0 , 10.0 , dt );
// calculate the Lyapunov exponents -- the main loop
double t = 0.0;
while( t < 10000.0 )
{
integrate_adaptive( make_controlled( 1.0e-6 , 1.0e-6 , stepper_type() ) , lorenz_perturbation , x , t , t + 1.0 , 0.1 );
t += 1.0;
obs( x , t );
}
vector< value_type > lyap( N );
obs.fill_lyap( lyap );
for( size_t i=0 ; i<N ; ++i )
cout << beta_host[i] << "\t" << lyap[i] << "\n";
//]
return 0;
}
|
7205e1f25a4891314873f65cec42f44879d773c1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifdef PADDLE_WITH_HIP
#include <hiprand.h>
#include <hiprand_kernel.h>
#include <hipcub/hipcub.hpp>
typedef hiprandState hiprandState_t;
namespace cub = hipcub;
#else
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hipcub/hipcub.hpp>
#endif
#include <iterator>
#include <random>
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/tensor_utils.h"
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
#include "paddle/fluid/distributed/collective/process_group.h"
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/device/gpu/nccl_helper.h"
#endif
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
#define CUDA_KERNEL_LOOP(i, n) \
for (int32_t i = blockIdx.x * blockDim.x + threadIdx.x, \
step = blockDim.x * gridDim.x; \
i < (n); \
i += step)
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
inline int32_t NumBlocks(const int32_t n) {
return ::min((n + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
template <typename T>
__global__ void RandomSampleClassCenter(const int64_t n,
int64_t seed,
int64_t increment,
const int64_t max_val,
T* buffer) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
hiprandState_t localState;
size_t local_seed =
(static_cast<size_t>(seed) + 0x9E3779B9U +
(static_cast<size_t>(id) << 6U) + (static_cast<size_t>(id) >> 2U));
#ifdef PADDLE_WITH_HIP
hiprand_init(local_seed, id, increment, &localState);
CUDA_KERNEL_LOOP(i, n) {
buffer[i] = static_cast<T>(hiprand(&localState) % max_val);
}
#else
hiprand_init(local_seed, id, increment, &localState);
CUDA_KERNEL_LOOP(i, n) {
buffer[i] = static_cast<T>(hiprand(&localState) % max_val);
}
#endif
}
template <typename T>
__global__ void Range(const int64_t n, T* out) {
CUDA_KERNEL_LOOP(i, n) { out[i] = static_cast<T>(i); }
}
template <typename T>
__global__ void MarkPositiveClassCenter(const int64_t n,
const int64_t rank,
const T* class_interval_ptr,
const int num_classes,
const T* labels,
T* out) {
CUDA_KERNEL_LOOP(i, n) {
T label = labels[i] - class_interval_ptr[rank];
if (label >= 0 && label < num_classes) {
out[label] = label - num_classes;
}
}
}
template <typename T>
__device__ void FindIntervalIndex(const T* class_interval_ptr,
const int64_t nranks,
const T value,
int64_t* find_index) {
int64_t start = 0;
int64_t end = nranks;
int64_t mid = ((end - start) >> 1) + start + 1;
while (start < end) {
if (class_interval_ptr[mid] == value) break;
if (class_interval_ptr[mid] > value)
end = mid - 1;
else
start = mid;
mid = ((end - start) >> 1) + start + 1;
}
*find_index = min(mid, end);
}
template <typename T>
__global__ void GetClassCenterBound(const int64_t n,
const int64_t nranks,
const T* class_interval_ptr,
const T* key_ptr,
const T* value_ptr,
T* bound_index,
T* bound_value) {
CUDA_KERNEL_LOOP(i, n) {
if (i != 0) {
int64_t cur_index, pre_index;
FindIntervalIndex(class_interval_ptr, nranks, key_ptr[i], &cur_index);
FindIntervalIndex(class_interval_ptr, nranks, key_ptr[i - 1], &pre_index);
if (cur_index > pre_index) {
assert(cur_index < nranks);
#pragma unroll
for (int32_t j = pre_index + 1; j <= cur_index; ++j) {
bound_index[j] = static_cast<T>(i);
bound_value[j] = value_ptr[i];
}
}
}
}
CUDA_KERNEL_LOOP(i, nranks + 1) {
int64_t first_index, last_index;
FindIntervalIndex(class_interval_ptr, nranks, key_ptr[0], &first_index);
FindIntervalIndex(class_interval_ptr, nranks, key_ptr[n - 1], &last_index);
if (i <= first_index) {
bound_index[i] = 0;
bound_value[i] = value_ptr[0];
} else if (i > last_index) {
bound_index[i] = n;
bound_value[i] = value_ptr[n - 1] + 1;
}
}
}
template <typename T>
__global__ void GetRemappedLabel(const int64_t n,
const int64_t nranks,
const T* sampled_class_interval_ptr,
const T* bound_index,
const T* bound_value,
const T* label_map_key,
T* label_map_value,
T* mapped_label) {
CUDA_KERNEL_LOOP(i, n) {
#pragma unroll
for (int64_t j = 0; j < nranks; j++) {
if (i >= bound_index[j] && i < bound_index[j + 1]) {
label_map_value[i] =
label_map_value[i] - bound_value[j] + sampled_class_interval_ptr[j];
}
}
mapped_label[label_map_key[i]] = label_map_value[i];
}
}
// aligned vector generates vectorized load/store on CUDA
template <typename T, int Size>
struct alignas(sizeof(T) * Size) AlignedVector {
T val[Size];
};
template <typename T>
inline int VectorizedSize(const T* pointer) {
uint64_t address = reinterpret_cast<uint64_t>(pointer);
constexpr int vec4 = std::alignment_of<AlignedVector<T, 4>>::value; // NOLINT
if (address % vec4 == 0) {
return 4;
}
return 1;
}
#undef CUDA_KERNEL_LOOP
template <typename T>
class NotEqualToPreviousAdjacentIterator {
public:
using self_type = NotEqualToPreviousAdjacentIterator;
using value_type = T;
using difference_type = std::ptrdiff_t;
using pointer = T*;
using reference = T;
using iterator_category = std::input_iterator_tag;
public:
__host__ __device__ __forceinline__
NotEqualToPreviousAdjacentIterator(const T* arr, int64_t offset)
: arr_(arr), offset_(offset) {}
__host__ __device__ __forceinline__ reference operator*() const {
return offset_ == 0 ? 0 : (arr_[offset_] == arr_[offset_ - 1] ? 0 : 1);
}
template <typename Distance>
__host__ __device__ __forceinline__ self_type operator+(Distance n) const {
self_type ret(arr_, offset_ + n);
return ret;
}
template <typename Distance>
__host__ __device__ __forceinline__ self_type operator-(Distance n) const {
self_type ret(arr_, offset_ - n);
return ret;
}
template <typename Distance>
__host__ __device__ __forceinline__ reference operator[](Distance n) const {
return *(*this + n);
}
private:
const T* arr_;
int64_t offset_;
};
template <typename T>
struct ActualNumSampledFunctor {
__host__ __device__ __forceinline__ T operator()(const T& a,
const T& b) const {
return max(num_samples, (b - a));
}
T num_samples;
explicit ActualNumSampledFunctor(const T num) : num_samples(num) {}
};
template <typename T, typename Context>
class MemoryBuffer {
public:
MemoryBuffer(const int num_buffer_ele,
const int num_temp_ele,
const int nranks,
const Context& dev_ctx) {
offset1 = 0;
offset2 = offset1 + num_buffer_ele;
offset3 = offset2 + num_buffer_ele;
offset4 = offset3 + num_buffer_ele;
offset5 = offset4 + num_buffer_ele;
offset6 = offset5 + (nranks + 1);
offset7 = offset6 + (nranks + 1);
offset8 = offset7 + (nranks + 1);
offset9 = offset8 + num_temp_ele;
buffer.Resize({4 * num_buffer_ele + 3 * (nranks + 1) + num_temp_ele});
buffer_ptr = dev_ctx.template Alloc<T>(&buffer);
}
T* cub_sort_keys_ptr() { return buffer_ptr + offset1; }
T* cub_sort_keys_out_ptr() { return buffer_ptr + offset2; }
T* cub_sort_values_ptr() { return buffer_ptr + offset3; }
T* cub_sort_values_out_ptr() { return buffer_ptr + offset4; }
T* bound_index_ptr() { return buffer_ptr + offset5; }
T* bound_value_ptr() { return buffer_ptr + offset6; }
T* class_interval_ptr() { return buffer_ptr + offset7; }
void* cub_temp_storage_ptr() {
return reinterpret_cast<void*>(buffer_ptr + offset8);
}
private:
DenseTensor buffer;
T* buffer_ptr;
int offset1;
int offset2;
int offset3;
int offset4;
int offset5;
int offset6;
int offset7;
int offset8;
int offset9;
};
template <typename T, typename Context>
void ClassCenterSampleKernel(const Context& dev_ctx,
const DenseTensor& label,
int num_classes,
int num_samples,
int ring_id,
int rank,
int nranks,
bool fix_seed,
int seed,
DenseTensor* remapped_label,
DenseTensor* sampled_local_class_center) {
PADDLE_ENFORCE_GT(num_classes,
0,
errors::InvalidArgument(
"The value 'num_classes' for Op(class_center_sample) "
"must be greater than 0, "
"but the value given is %d.",
num_classes));
PADDLE_ENFORCE_GT(num_samples,
0,
errors::InvalidArgument(
"The value 'num_samples' for Op(class_center_sample) "
"must be greater than 0, "
"but the value given is %d.",
num_samples));
PADDLE_ENFORCE_LE(num_samples,
num_classes,
errors::InvalidArgument(
"The value 'num_samples' for Op(class_center_sample) "
"must be less than or equal to %d, "
"but the value given is %d.",
num_classes,
num_samples));
auto place = dev_ctx.GetPlace();
int batch_size = label.numel();
// Algorithm:
// We first randomly generate a value in [0, num_classes) on each position
// in a array(shape[num_classes]). Then, we mark the element as negative
// value in the array according input label. Now, we can sort the array
// by ascending to ensure that the positive class center always in the
// front of the sorted array. So, we can get the sampled class center
// index by sorted keys. Finally, we can get the rempped label by remap
// the input label according sampled class center.
// step 1: Calculate num classes per device using nccl all reduce
std::vector<T> shard_dim_vec(nranks + 1, 0);
shard_dim_vec[rank + 1] = num_classes;
DenseTensor num_classes_per_device;
phi::TensorFromVector(shard_dim_vec, dev_ctx, &num_classes_per_device);
T* num_classes_per_device_ptr = num_classes_per_device.data<T>();
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
if (nranks > 1) {
auto map = paddle::distributed::ProcessGroupMapFromGid::getInstance();
if (map->has(ring_id)) {
// Use ProcessGroup
paddle::distributed::ProcessGroup* pg = map->get(ring_id);
std::vector<phi::DenseTensor> in_tensor;
std::vector<phi::DenseTensor> out_tensor;
in_tensor.push_back(num_classes_per_device);
out_tensor.push_back(num_classes_per_device);
paddle::distributed::AllreduceOptions opts;
opts.reduce_op = paddle::distributed::ReduceOp::SUM;
auto task = pg->AllReduce(in_tensor, out_tensor, opts);
task->Wait();
} else {
const auto& comm = paddle::platform::NCCLCommContext::Instance().Get(
ring_id, dev_ctx.GetPlace());
// use global calculate stream
const auto calcu_stream =
static_cast<GPUContext*>(
paddle::platform::DeviceContextPool::Instance().Get(
dev_ctx.GetPlace()))
->stream();
PADDLE_ENFORCE_GPU_SUCCESS(paddle::platform::dynload::ncclAllReduce(
num_classes_per_device_ptr,
num_classes_per_device_ptr,
num_classes_per_device.numel(),
paddle::platform::ToNCCLDataType(
paddle::framework::TransToProtoVarType(
num_classes_per_device.dtype())),
ncclSum,
comm->comm(),
calcu_stream));
}
}
#endif
// step 2: Determine temporary device storage requirements
int num_buffer_ele = ::max(batch_size, num_classes);
size_t cub_sort_temp_store_size = 0;
PADDLE_ENFORCE_GPU_SUCCESS(
(hipcub::DeviceRadixSort::SortPairs<T, T>(nullptr,
cub_sort_temp_store_size,
nullptr,
nullptr,
nullptr,
nullptr,
num_buffer_ele,
0,
sizeof(T) * 8,
dev_ctx.stream())));
size_t cub_sum_temp_store_size = 0;
NotEqualToPreviousAdjacentIterator<T> unique_counting_iter_temp(nullptr, 0);
PADDLE_ENFORCE_GPU_SUCCESS(
(hipcub::DeviceScan::InclusiveSum<NotEqualToPreviousAdjacentIterator<T>, T*>(
nullptr,
cub_sum_temp_store_size,
unique_counting_iter_temp,
nullptr,
batch_size,
dev_ctx.stream())));
size_t cub_scan_temp_store_size = 0;
ActualNumSampledFunctor<T> actual_num_sampled_op_temp(num_samples);
PADDLE_ENFORCE_GPU_SUCCESS(
(hipcub::DeviceScan::InclusiveScan(nullptr,
cub_scan_temp_store_size,
num_classes_per_device_ptr,
num_classes_per_device_ptr,
actual_num_sampled_op_temp,
nranks + 1,
dev_ctx.stream())));
size_t cub_temp_storage_bytes =
::max(::max(cub_sort_temp_store_size, cub_scan_temp_store_size),
cub_sum_temp_store_size);
int num_temp_ele = cub_temp_storage_bytes / sizeof(T) + 1;
// step 3: Alloc buffer memory so that we can reuse allocated memory
MemoryBuffer<T, Context> memory_buffer =
MemoryBuffer<T, Context>(num_buffer_ele, num_temp_ele, nranks, dev_ctx);
T* cub_sort_keys_ptr = memory_buffer.cub_sort_keys_ptr();
T* cub_sort_keys_out_ptr = memory_buffer.cub_sort_keys_out_ptr();
T* cub_sort_values_ptr = memory_buffer.cub_sort_values_ptr();
T* cub_sort_values_out_ptr = memory_buffer.cub_sort_values_out_ptr();
T* bound_index_ptr = memory_buffer.bound_index_ptr();
T* bound_value_ptr = memory_buffer.bound_value_ptr();
T* class_interval_ptr = memory_buffer.class_interval_ptr();
void* cub_temp_storage_ptr = memory_buffer.cub_temp_storage_ptr();
// step 4: Calculate class interval among nranks
PADDLE_ENFORCE_GPU_SUCCESS(
(hipcub::DeviceScan::InclusiveSum(cub_temp_storage_ptr,
cub_temp_storage_bytes,
num_classes_per_device_ptr,
class_interval_ptr,
nranks + 1,
dev_ctx.stream())));
// step 5: random sample negative class center
uint64_t seed_data;
uint64_t increment;
int vec_size = VectorizedSize<T>(cub_sort_keys_ptr);
auto offset = ((num_classes - 1) /
(NumBlocks(num_classes) * kNumCUDAThreads * vec_size) +
1) *
vec_size;
// auto gen_cuda = paddle::framework::DefaultCUDAGenerator(device_id);
auto gen_cuda = dev_ctx.GetGenerator();
if (!fix_seed) {
auto seed_offset = gen_cuda->IncrementOffset(offset);
seed_data = seed_offset.first;
increment = seed_offset.second;
} else {
seed_data = seed + rank;
increment = offset;
}
hipLaunchKernelGGL(( RandomSampleClassCenter<T>)
, dim3(NumBlocks(num_classes)), dim3(kNumCUDAThreads), 0, dev_ctx.stream(),
num_classes, seed_data, increment, num_classes, cub_sort_keys_ptr);
// step 6: mark positive class center as negative value
// fill the sort values to index 0, 1, ..., batch_size-1
hipLaunchKernelGGL(( MarkPositiveClassCenter<T>)
, dim3(NumBlocks(batch_size)), dim3(kNumCUDAThreads), 0, dev_ctx.stream(),
batch_size,
rank,
class_interval_ptr,
num_classes,
label.data<T>(),
cub_sort_keys_ptr);
hipLaunchKernelGGL(( Range<T>), dim3(NumBlocks(num_buffer_ele)), dim3(kNumCUDAThreads), 0, dev_ctx.stream(),
num_buffer_ele, cub_sort_values_ptr);
// step 7: sort class center by ascending, so that positive class center
// always be sampled.
PADDLE_ENFORCE_GPU_SUCCESS(
(hipcub::DeviceRadixSort::SortPairs<T, T>(cub_temp_storage_ptr,
cub_temp_storage_bytes,
cub_sort_keys_ptr,
cub_sort_keys_out_ptr,
cub_sort_values_ptr,
cub_sort_values_out_ptr,
num_classes,
0,
sizeof(T) * 8,
dev_ctx.stream())));
// step 8: sort input label ascending
PADDLE_ENFORCE_GPU_SUCCESS(
(hipcub::DeviceRadixSort::SortPairs<T, T>(cub_temp_storage_ptr,
cub_temp_storage_bytes,
label.data<T>(),
cub_sort_keys_out_ptr,
cub_sort_values_ptr,
cub_sort_keys_ptr,
batch_size,
0,
sizeof(T) * 8,
dev_ctx.stream())));
// step 9: Calculate new index using InclusiveSum on ascending sorted input
// label
NotEqualToPreviousAdjacentIterator<T> unique_counting_iter(
cub_sort_keys_out_ptr, 0);
PADDLE_ENFORCE_GPU_SUCCESS(
(hipcub::DeviceScan::InclusiveSum<NotEqualToPreviousAdjacentIterator<T>, T*>(
cub_temp_storage_ptr,
cub_temp_storage_bytes,
unique_counting_iter,
cub_sort_values_ptr,
batch_size,
dev_ctx.stream())));
// step 10: Calculate new class center bound among ranks
hipLaunchKernelGGL(( GetClassCenterBound<T>)
, dim3(NumBlocks(batch_size)), dim3(kNumCUDAThreads), 0, dev_ctx.stream(),
batch_size,
nranks,
class_interval_ptr,
cub_sort_keys_out_ptr,
cub_sort_values_ptr,
bound_index_ptr,
bound_value_ptr);
// step 11: Calculate actual number of sampled class per device.
// Since maybe num_positive_class_center > num_samples,
// we need to ensure all positive class center per device are sampled.
ActualNumSampledFunctor<T> actual_num_sampled_op(num_samples);
PADDLE_ENFORCE_GPU_SUCCESS(
(hipcub::DeviceScan::InclusiveScan(cub_temp_storage_ptr,
cub_temp_storage_bytes,
bound_value_ptr,
num_classes_per_device_ptr,
actual_num_sampled_op,
nranks + 1,
dev_ctx.stream())));
// step 12: Calculate actual sampled class interval among nranks
PADDLE_ENFORCE_GPU_SUCCESS(
(hipcub::DeviceScan::InclusiveSum(cub_temp_storage_ptr,
cub_temp_storage_bytes,
num_classes_per_device_ptr,
class_interval_ptr,
nranks + 1,
dev_ctx.stream())));
// step 13: Get remapped label for output
hipLaunchKernelGGL(( GetRemappedLabel<T>)
, dim3(NumBlocks(batch_size)), dim3(kNumCUDAThreads), 0, dev_ctx.stream(),
batch_size,
nranks,
class_interval_ptr,
bound_index_ptr,
bound_value_ptr,
cub_sort_keys_ptr,
cub_sort_values_ptr,
dev_ctx.template Alloc<T>(remapped_label));
// step 14: Get sampled class center for output
phi::Copy<Context>(dev_ctx,
num_classes_per_device,
phi::CPUPlace(),
true,
&num_classes_per_device);
T actual_num_samples = num_classes_per_device.data<T>()[rank + 1];
sampled_local_class_center->Resize(phi::make_ddim({actual_num_samples}));
T* sampled_local_class_center_ptr =
dev_ctx.template Alloc<T>(sampled_local_class_center);
paddle::memory::Copy(dev_ctx.GetPlace(),
sampled_local_class_center_ptr,
dev_ctx.GetPlace(),
cub_sort_values_out_ptr,
actual_num_samples * sizeof(T),
nullptr);
}
} // namespace phi
PD_REGISTER_KERNEL(class_center_sample,
GPU,
ALL_LAYOUT,
phi::ClassCenterSampleKernel,
int64_t,
int) {}
| 7205e1f25a4891314873f65cec42f44879d773c1.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifdef PADDLE_WITH_HIP
#include <hiprand.h>
#include <hiprand_kernel.h>
#include <hipcub/hipcub.hpp>
typedef hiprandState curandState;
namespace cub = hipcub;
#else
#include <curand.h>
#include <curand_kernel.h>
#include <cub/cub.cuh>
#endif
#include <iterator>
#include <random>
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/tensor_utils.h"
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
#include "paddle/fluid/distributed/collective/process_group.h"
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/device/gpu/nccl_helper.h"
#endif
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
#define CUDA_KERNEL_LOOP(i, n) \
for (int32_t i = blockIdx.x * blockDim.x + threadIdx.x, \
step = blockDim.x * gridDim.x; \
i < (n); \
i += step)
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
inline int32_t NumBlocks(const int32_t n) {
return std::min((n + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
template <typename T>
__global__ void RandomSampleClassCenter(const int64_t n,
int64_t seed,
int64_t increment,
const int64_t max_val,
T* buffer) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
curandState localState;
size_t local_seed =
(static_cast<size_t>(seed) + 0x9E3779B9U +
(static_cast<size_t>(id) << 6U) + (static_cast<size_t>(id) >> 2U));
#ifdef PADDLE_WITH_HIP
hiprand_init(local_seed, id, increment, &localState);
CUDA_KERNEL_LOOP(i, n) {
buffer[i] = static_cast<T>(hiprand(&localState) % max_val);
}
#else
curand_init(local_seed, id, increment, &localState);
CUDA_KERNEL_LOOP(i, n) {
buffer[i] = static_cast<T>(curand(&localState) % max_val);
}
#endif
}
template <typename T>
__global__ void Range(const int64_t n, T* out) {
CUDA_KERNEL_LOOP(i, n) { out[i] = static_cast<T>(i); }
}
template <typename T>
__global__ void MarkPositiveClassCenter(const int64_t n,
const int64_t rank,
const T* class_interval_ptr,
const int num_classes,
const T* labels,
T* out) {
CUDA_KERNEL_LOOP(i, n) {
T label = labels[i] - class_interval_ptr[rank];
if (label >= 0 && label < num_classes) {
out[label] = label - num_classes;
}
}
}
template <typename T>
__device__ void FindIntervalIndex(const T* class_interval_ptr,
const int64_t nranks,
const T value,
int64_t* find_index) {
int64_t start = 0;
int64_t end = nranks;
int64_t mid = ((end - start) >> 1) + start + 1;
while (start < end) {
if (class_interval_ptr[mid] == value) break;
if (class_interval_ptr[mid] > value)
end = mid - 1;
else
start = mid;
mid = ((end - start) >> 1) + start + 1;
}
*find_index = min(mid, end);
}
template <typename T>
__global__ void GetClassCenterBound(const int64_t n,
const int64_t nranks,
const T* class_interval_ptr,
const T* key_ptr,
const T* value_ptr,
T* bound_index,
T* bound_value) {
CUDA_KERNEL_LOOP(i, n) {
if (i != 0) {
int64_t cur_index, pre_index;
FindIntervalIndex(class_interval_ptr, nranks, key_ptr[i], &cur_index);
FindIntervalIndex(class_interval_ptr, nranks, key_ptr[i - 1], &pre_index);
if (cur_index > pre_index) {
assert(cur_index < nranks);
#pragma unroll
for (int32_t j = pre_index + 1; j <= cur_index; ++j) {
bound_index[j] = static_cast<T>(i);
bound_value[j] = value_ptr[i];
}
}
}
}
CUDA_KERNEL_LOOP(i, nranks + 1) {
int64_t first_index, last_index;
FindIntervalIndex(class_interval_ptr, nranks, key_ptr[0], &first_index);
FindIntervalIndex(class_interval_ptr, nranks, key_ptr[n - 1], &last_index);
if (i <= first_index) {
bound_index[i] = 0;
bound_value[i] = value_ptr[0];
} else if (i > last_index) {
bound_index[i] = n;
bound_value[i] = value_ptr[n - 1] + 1;
}
}
}
template <typename T>
__global__ void GetRemappedLabel(const int64_t n,
const int64_t nranks,
const T* sampled_class_interval_ptr,
const T* bound_index,
const T* bound_value,
const T* label_map_key,
T* label_map_value,
T* mapped_label) {
CUDA_KERNEL_LOOP(i, n) {
#pragma unroll
for (int64_t j = 0; j < nranks; j++) {
if (i >= bound_index[j] && i < bound_index[j + 1]) {
label_map_value[i] =
label_map_value[i] - bound_value[j] + sampled_class_interval_ptr[j];
}
}
mapped_label[label_map_key[i]] = label_map_value[i];
}
}
// aligned vector generates vectorized load/store on CUDA
template <typename T, int Size>
struct alignas(sizeof(T) * Size) AlignedVector {
T val[Size];
};
template <typename T>
inline int VectorizedSize(const T* pointer) {
uint64_t address = reinterpret_cast<uint64_t>(pointer);
constexpr int vec4 = std::alignment_of<AlignedVector<T, 4>>::value; // NOLINT
if (address % vec4 == 0) {
return 4;
}
return 1;
}
#undef CUDA_KERNEL_LOOP
template <typename T>
class NotEqualToPreviousAdjacentIterator {
public:
using self_type = NotEqualToPreviousAdjacentIterator;
using value_type = T;
using difference_type = std::ptrdiff_t;
using pointer = T*;
using reference = T;
using iterator_category = std::input_iterator_tag;
public:
__host__ __device__ __forceinline__
NotEqualToPreviousAdjacentIterator(const T* arr, int64_t offset)
: arr_(arr), offset_(offset) {}
__host__ __device__ __forceinline__ reference operator*() const {
return offset_ == 0 ? 0 : (arr_[offset_] == arr_[offset_ - 1] ? 0 : 1);
}
template <typename Distance>
__host__ __device__ __forceinline__ self_type operator+(Distance n) const {
self_type ret(arr_, offset_ + n);
return ret;
}
template <typename Distance>
__host__ __device__ __forceinline__ self_type operator-(Distance n) const {
self_type ret(arr_, offset_ - n);
return ret;
}
template <typename Distance>
__host__ __device__ __forceinline__ reference operator[](Distance n) const {
return *(*this + n);
}
private:
const T* arr_;
int64_t offset_;
};
template <typename T>
struct ActualNumSampledFunctor {
__host__ __device__ __forceinline__ T operator()(const T& a,
const T& b) const {
return max(num_samples, (b - a));
}
T num_samples;
explicit ActualNumSampledFunctor(const T num) : num_samples(num) {}
};
template <typename T, typename Context>
class MemoryBuffer {
public:
MemoryBuffer(const int num_buffer_ele,
const int num_temp_ele,
const int nranks,
const Context& dev_ctx) {
offset1 = 0;
offset2 = offset1 + num_buffer_ele;
offset3 = offset2 + num_buffer_ele;
offset4 = offset3 + num_buffer_ele;
offset5 = offset4 + num_buffer_ele;
offset6 = offset5 + (nranks + 1);
offset7 = offset6 + (nranks + 1);
offset8 = offset7 + (nranks + 1);
offset9 = offset8 + num_temp_ele;
buffer.Resize({4 * num_buffer_ele + 3 * (nranks + 1) + num_temp_ele});
buffer_ptr = dev_ctx.template Alloc<T>(&buffer);
}
T* cub_sort_keys_ptr() { return buffer_ptr + offset1; }
T* cub_sort_keys_out_ptr() { return buffer_ptr + offset2; }
T* cub_sort_values_ptr() { return buffer_ptr + offset3; }
T* cub_sort_values_out_ptr() { return buffer_ptr + offset4; }
T* bound_index_ptr() { return buffer_ptr + offset5; }
T* bound_value_ptr() { return buffer_ptr + offset6; }
T* class_interval_ptr() { return buffer_ptr + offset7; }
void* cub_temp_storage_ptr() {
return reinterpret_cast<void*>(buffer_ptr + offset8);
}
private:
DenseTensor buffer;
T* buffer_ptr;
int offset1;
int offset2;
int offset3;
int offset4;
int offset5;
int offset6;
int offset7;
int offset8;
int offset9;
};
template <typename T, typename Context>
void ClassCenterSampleKernel(const Context& dev_ctx,
const DenseTensor& label,
int num_classes,
int num_samples,
int ring_id,
int rank,
int nranks,
bool fix_seed,
int seed,
DenseTensor* remapped_label,
DenseTensor* sampled_local_class_center) {
PADDLE_ENFORCE_GT(num_classes,
0,
errors::InvalidArgument(
"The value 'num_classes' for Op(class_center_sample) "
"must be greater than 0, "
"but the value given is %d.",
num_classes));
PADDLE_ENFORCE_GT(num_samples,
0,
errors::InvalidArgument(
"The value 'num_samples' for Op(class_center_sample) "
"must be greater than 0, "
"but the value given is %d.",
num_samples));
PADDLE_ENFORCE_LE(num_samples,
num_classes,
errors::InvalidArgument(
"The value 'num_samples' for Op(class_center_sample) "
"must be less than or equal to %d, "
"but the value given is %d.",
num_classes,
num_samples));
auto place = dev_ctx.GetPlace();
int batch_size = label.numel();
// Algorithm:
// We first randomly generate a value in [0, num_classes) on each position
// in a array(shape[num_classes]). Then, we mark the element as negative
// value in the array according input label. Now, we can sort the array
// by ascending to ensure that the positive class center always in the
// front of the sorted array. So, we can get the sampled class center
// index by sorted keys. Finally, we can get the rempped label by remap
// the input label according sampled class center.
// step 1: Calculate num classes per device using nccl all reduce
std::vector<T> shard_dim_vec(nranks + 1, 0);
shard_dim_vec[rank + 1] = num_classes;
DenseTensor num_classes_per_device;
phi::TensorFromVector(shard_dim_vec, dev_ctx, &num_classes_per_device);
T* num_classes_per_device_ptr = num_classes_per_device.data<T>();
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
if (nranks > 1) {
auto map = paddle::distributed::ProcessGroupMapFromGid::getInstance();
if (map->has(ring_id)) {
// Use ProcessGroup
paddle::distributed::ProcessGroup* pg = map->get(ring_id);
std::vector<phi::DenseTensor> in_tensor;
std::vector<phi::DenseTensor> out_tensor;
in_tensor.push_back(num_classes_per_device);
out_tensor.push_back(num_classes_per_device);
paddle::distributed::AllreduceOptions opts;
opts.reduce_op = paddle::distributed::ReduceOp::SUM;
auto task = pg->AllReduce(in_tensor, out_tensor, opts);
task->Wait();
} else {
const auto& comm = paddle::platform::NCCLCommContext::Instance().Get(
ring_id, dev_ctx.GetPlace());
// use global calculate stream
const auto calcu_stream =
static_cast<GPUContext*>(
paddle::platform::DeviceContextPool::Instance().Get(
dev_ctx.GetPlace()))
->stream();
PADDLE_ENFORCE_GPU_SUCCESS(paddle::platform::dynload::ncclAllReduce(
num_classes_per_device_ptr,
num_classes_per_device_ptr,
num_classes_per_device.numel(),
paddle::platform::ToNCCLDataType(
paddle::framework::TransToProtoVarType(
num_classes_per_device.dtype())),
ncclSum,
comm->comm(),
calcu_stream));
}
}
#endif
// step 2: Determine temporary device storage requirements
int num_buffer_ele = std::max(batch_size, num_classes);
size_t cub_sort_temp_store_size = 0;
PADDLE_ENFORCE_GPU_SUCCESS(
(cub::DeviceRadixSort::SortPairs<T, T>(nullptr,
cub_sort_temp_store_size,
nullptr,
nullptr,
nullptr,
nullptr,
num_buffer_ele,
0,
sizeof(T) * 8,
dev_ctx.stream())));
size_t cub_sum_temp_store_size = 0;
NotEqualToPreviousAdjacentIterator<T> unique_counting_iter_temp(nullptr, 0);
PADDLE_ENFORCE_GPU_SUCCESS(
(cub::DeviceScan::InclusiveSum<NotEqualToPreviousAdjacentIterator<T>, T*>(
nullptr,
cub_sum_temp_store_size,
unique_counting_iter_temp,
nullptr,
batch_size,
dev_ctx.stream())));
size_t cub_scan_temp_store_size = 0;
ActualNumSampledFunctor<T> actual_num_sampled_op_temp(num_samples);
PADDLE_ENFORCE_GPU_SUCCESS(
(cub::DeviceScan::InclusiveScan(nullptr,
cub_scan_temp_store_size,
num_classes_per_device_ptr,
num_classes_per_device_ptr,
actual_num_sampled_op_temp,
nranks + 1,
dev_ctx.stream())));
size_t cub_temp_storage_bytes =
std::max(std::max(cub_sort_temp_store_size, cub_scan_temp_store_size),
cub_sum_temp_store_size);
int num_temp_ele = cub_temp_storage_bytes / sizeof(T) + 1;
// step 3: Alloc buffer memory so that we can reuse allocated memory
MemoryBuffer<T, Context> memory_buffer =
MemoryBuffer<T, Context>(num_buffer_ele, num_temp_ele, nranks, dev_ctx);
T* cub_sort_keys_ptr = memory_buffer.cub_sort_keys_ptr();
T* cub_sort_keys_out_ptr = memory_buffer.cub_sort_keys_out_ptr();
T* cub_sort_values_ptr = memory_buffer.cub_sort_values_ptr();
T* cub_sort_values_out_ptr = memory_buffer.cub_sort_values_out_ptr();
T* bound_index_ptr = memory_buffer.bound_index_ptr();
T* bound_value_ptr = memory_buffer.bound_value_ptr();
T* class_interval_ptr = memory_buffer.class_interval_ptr();
void* cub_temp_storage_ptr = memory_buffer.cub_temp_storage_ptr();
// step 4: Calculate class interval among nranks
PADDLE_ENFORCE_GPU_SUCCESS(
(cub::DeviceScan::InclusiveSum(cub_temp_storage_ptr,
cub_temp_storage_bytes,
num_classes_per_device_ptr,
class_interval_ptr,
nranks + 1,
dev_ctx.stream())));
// step 5: random sample negative class center
uint64_t seed_data;
uint64_t increment;
int vec_size = VectorizedSize<T>(cub_sort_keys_ptr);
auto offset = ((num_classes - 1) /
(NumBlocks(num_classes) * kNumCUDAThreads * vec_size) +
1) *
vec_size;
// auto gen_cuda = paddle::framework::DefaultCUDAGenerator(device_id);
auto gen_cuda = dev_ctx.GetGenerator();
if (!fix_seed) {
auto seed_offset = gen_cuda->IncrementOffset(offset);
seed_data = seed_offset.first;
increment = seed_offset.second;
} else {
seed_data = seed + rank;
increment = offset;
}
RandomSampleClassCenter<T>
<<<NumBlocks(num_classes), kNumCUDAThreads, 0, dev_ctx.stream()>>>(
num_classes, seed_data, increment, num_classes, cub_sort_keys_ptr);
// step 6: mark positive class center as negative value
// fill the sort values to index 0, 1, ..., batch_size-1
MarkPositiveClassCenter<T>
<<<NumBlocks(batch_size), kNumCUDAThreads, 0, dev_ctx.stream()>>>(
batch_size,
rank,
class_interval_ptr,
num_classes,
label.data<T>(),
cub_sort_keys_ptr);
Range<T><<<NumBlocks(num_buffer_ele), kNumCUDAThreads, 0, dev_ctx.stream()>>>(
num_buffer_ele, cub_sort_values_ptr);
// step 7: sort class center by ascending, so that positive class center
// always be sampled.
PADDLE_ENFORCE_GPU_SUCCESS(
(cub::DeviceRadixSort::SortPairs<T, T>(cub_temp_storage_ptr,
cub_temp_storage_bytes,
cub_sort_keys_ptr,
cub_sort_keys_out_ptr,
cub_sort_values_ptr,
cub_sort_values_out_ptr,
num_classes,
0,
sizeof(T) * 8,
dev_ctx.stream())));
// step 8: sort input label ascending
PADDLE_ENFORCE_GPU_SUCCESS(
(cub::DeviceRadixSort::SortPairs<T, T>(cub_temp_storage_ptr,
cub_temp_storage_bytes,
label.data<T>(),
cub_sort_keys_out_ptr,
cub_sort_values_ptr,
cub_sort_keys_ptr,
batch_size,
0,
sizeof(T) * 8,
dev_ctx.stream())));
// step 9: Calculate new index using InclusiveSum on ascending sorted input
// label
NotEqualToPreviousAdjacentIterator<T> unique_counting_iter(
cub_sort_keys_out_ptr, 0);
PADDLE_ENFORCE_GPU_SUCCESS(
(cub::DeviceScan::InclusiveSum<NotEqualToPreviousAdjacentIterator<T>, T*>(
cub_temp_storage_ptr,
cub_temp_storage_bytes,
unique_counting_iter,
cub_sort_values_ptr,
batch_size,
dev_ctx.stream())));
// step 10: Calculate new class center bound among ranks
GetClassCenterBound<T>
<<<NumBlocks(batch_size), kNumCUDAThreads, 0, dev_ctx.stream()>>>(
batch_size,
nranks,
class_interval_ptr,
cub_sort_keys_out_ptr,
cub_sort_values_ptr,
bound_index_ptr,
bound_value_ptr);
// step 11: Calculate actual number of sampled class per device.
// Since maybe num_positive_class_center > num_samples,
// we need to ensure all positive class center per device are sampled.
ActualNumSampledFunctor<T> actual_num_sampled_op(num_samples);
PADDLE_ENFORCE_GPU_SUCCESS(
(cub::DeviceScan::InclusiveScan(cub_temp_storage_ptr,
cub_temp_storage_bytes,
bound_value_ptr,
num_classes_per_device_ptr,
actual_num_sampled_op,
nranks + 1,
dev_ctx.stream())));
// step 12: Calculate actual sampled class interval among nranks
PADDLE_ENFORCE_GPU_SUCCESS(
(cub::DeviceScan::InclusiveSum(cub_temp_storage_ptr,
cub_temp_storage_bytes,
num_classes_per_device_ptr,
class_interval_ptr,
nranks + 1,
dev_ctx.stream())));
// step 13: Get remapped label for output
GetRemappedLabel<T>
<<<NumBlocks(batch_size), kNumCUDAThreads, 0, dev_ctx.stream()>>>(
batch_size,
nranks,
class_interval_ptr,
bound_index_ptr,
bound_value_ptr,
cub_sort_keys_ptr,
cub_sort_values_ptr,
dev_ctx.template Alloc<T>(remapped_label));
// step 14: Get sampled class center for output
phi::Copy<Context>(dev_ctx,
num_classes_per_device,
phi::CPUPlace(),
true,
&num_classes_per_device);
T actual_num_samples = num_classes_per_device.data<T>()[rank + 1];
sampled_local_class_center->Resize(phi::make_ddim({actual_num_samples}));
T* sampled_local_class_center_ptr =
dev_ctx.template Alloc<T>(sampled_local_class_center);
paddle::memory::Copy(dev_ctx.GetPlace(),
sampled_local_class_center_ptr,
dev_ctx.GetPlace(),
cub_sort_values_out_ptr,
actual_num_samples * sizeof(T),
nullptr);
}
} // namespace phi
PD_REGISTER_KERNEL(class_center_sample,
GPU,
ALL_LAYOUT,
phi::ClassCenterSampleKernel,
int64_t,
int) {}
|
29eda343c0f91290bf46a798f4f7fb7af58f5afc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit
/*
* Description:
* this function avg-pools an input 3D tensor along dimensions 1 and 2
* 3D input, 3D output
*/
__global__ void subsample(float *input, float *output,
int input_n, int input_h, int input_w,
int kH, int kW, int dH, int dW)
{
// iterators
int xx, yy;
// output size
int output_w = (input_w - kW) / dW + 1;
int output_h = (input_h - kH) / dH + 1;
// compute offsets based on thread/block ID
int o = blockIdx.x;
int i = o;
int xx_start = threadIdx.x;
int xx_end = output_w;
int xx_step = blockDim.x;
int yy_start = blockDim.y*blockIdx.y + threadIdx.y;
int yy_end = output_h;
int yy_step = blockDim.y*gridDim.y;
// select input/output plane
output = output + o*output_w*output_h;
input = input + i*input_w*input_h;
// For all output pixels...
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
// Compute the mean of the input image...
float *ptr_input = input + yy*dH*input_w + xx*dW;
float *ptr_output = output + yy*output_w + xx;
float sum = 0;
int kx, ky;
for(ky = 0; ky < kH; ky++) {
for(kx = 0; kx < kW; kx++)
sum += ptr_input[kx];
ptr_input += input_w; // next input line
}
// Update output
*ptr_output = sum/float(kW*kH);
}
}
}
static int cunn_SpatialAveragePooling_updateOutput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
THAssert(THCudaTensor_checkGPU(state, 2, input, output));
float *output_data;
float *input_data;
luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch) tensor expected");
if (input->nDimension == 3) {
long nInputCols = input->size[2];
long nInputRows = input->size[1];
long nOutputCols = (nInputCols - kW) / dW + 1;
long nOutputRows = (nInputRows - kH) / dH + 1;
long nInputPlane = input->size[0];
luaL_argcheck(L, nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size");
input = THCudaTensor_newContiguous(state, input);
input_data = THCudaTensor_data(state, input);
THCudaTensor_resize3d(state, output, nInputPlane, nOutputRows, nOutputCols);
output_data = THCudaTensor_data(state, output);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane,yblocks);
dim3 threads(32,8);
// run subsample kernel
hipLaunchKernelGGL(( subsample) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), input_data, output_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
} else {
long nInputCols = input->size[3];
long nInputRows = input->size[2];
long nbatch = input->size[0];
long nOutputCols = (nInputCols - kW) / dW + 1;
long nOutputRows = (nInputRows - kH) / dH + 1;
long nInputPlane = input->size[1];
luaL_argcheck(L, nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size");
input = THCudaTensor_newContiguous(state, input);
input_data = THCudaTensor_data(state, input);
THCudaTensor_resize4d(state, output, nbatch, nInputPlane, nOutputRows, nOutputCols);
output_data = THCudaTensor_data(state, output);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane*nbatch,yblocks);
dim3 threads(32,8);
// run subsample kernel
hipLaunchKernelGGL(( subsample) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), input_data, output_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
}
// clean
THCudaTensor_free(state, input);
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in SpatialAveragePooling.updateOutput: %s\n", hipGetErrorString(err));
THError("aborting");
}
return 1;
}
/*
* Description:
* this function computes the gradInput from gradOutput
*/
__global__ void subgradinput(float *gradInput, float *gradOutput,
int input_n, int input_h, int input_w,
int kH, int kW, int dH, int dW)
{
// iterators
int xx, yy;
// output size
int output_w = (input_w - kW) / dW + 1;
int output_h = (input_h - kH) / dH + 1;
// compute offsets based on thread/block ID
int o = blockIdx.x;
int i = o;
int xx_start = threadIdx.x;
int xx_end = output_w;
int xx_step = blockDim.x;
int yy_start = blockDim.y*blockIdx.y + threadIdx.y;
int yy_end = output_h;
int yy_step = blockDim.y*gridDim.y;
// select input/output plane
gradOutput = gradOutput + o*output_w*output_h;
gradInput = gradInput + i*input_w*input_h;
// compute gradInput
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
float *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW;
float *ptr_gradOutput = gradOutput + yy*output_w + xx;
float z = *ptr_gradOutput;
int kx, ky;
for(ky = 0; ky < kH; ky++) {
for(kx = 0; kx < kW; kx++)
ptr_gradInput[kx] += z / float(kW*kH);
ptr_gradInput += input_w;
}
}
}
}
/*
* Description:
* this function computes the gradInput from gradOutput
* but with an atomic accumulation. It is needed to be done so
* for cases of kH != dH and kW != dW
*/
__global__ void subgradinputAtomic(float *gradInput, float *gradOutput,
int input_n, int input_h, int input_w,
int kH, int kW, int dH, int dW)
{
// iterators
int xx, yy;
// output size
int output_w = (input_w - kW) / dW + 1;
int output_h = (input_h - kH) / dH + 1;
// compute offsets based on thread/block ID
int o = blockIdx.x;
int i = o;
int xx_start = threadIdx.x;
int xx_end = output_w;
int xx_step = blockDim.x;
int yy_start = blockDim.y*blockIdx.y + threadIdx.y;
int yy_end = output_h;
int yy_step = blockDim.y*gridDim.y;
// select input/output plane
gradOutput = gradOutput + o*output_w*output_h;
gradInput = gradInput + i*input_w*input_h;
// compute gradInput
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
float *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW;
float *ptr_gradOutput = gradOutput + yy*output_w + xx;
float z = *ptr_gradOutput;
int kx, ky;
for(ky = 0; ky < kH; ky++) {
for(kx = 0; kx < kW; kx++) {
atomicAdd(&(ptr_gradInput[kx]), z / float(kW*kH));
}
ptr_gradInput += input_w;
}
}
}
}
static int cunn_SpatialAveragePooling_updateGradInput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor");
THAssert(THCudaTensor_checkGPU(state, 3, input, gradInput, gradOutput));
if (input->nDimension == 3) {
long nInputCols = input->size[2];
long nInputRows = input->size[1];
long nInputPlane = input->size[0];
float *gradOutput_data = THCudaTensor_data(state, gradOutput);
float *gradInput_data;
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_zero(state, gradInput);
gradInput_data = THCudaTensor_data(state, gradInput);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane,yblocks);
dim3 threads(32,8);
// run updateGradInput kernel
if (kH == dH && kW == dW) {
hipLaunchKernelGGL(( subgradinput) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data,
nInputPlane, nInputRows, nInputCols,
kH, kW, dH, dW);
} else {
hipLaunchKernelGGL(( subgradinputAtomic) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data,
nInputPlane, nInputRows, nInputCols,
kH, kW, dH, dW);
}
} else {
long nInputCols = input->size[3];
long nInputRows = input->size[2];
long nInputPlane = input->size[1];
long nbatch = input->size[0];
float *gradOutput_data = THCudaTensor_data(state, gradOutput);
float *gradInput_data;
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_zero(state, gradInput);
gradInput_data = THCudaTensor_data(state, gradInput);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane*nbatch,yblocks);
dim3 threads(32,8);
// run updateGradInput kernel
if (kH == dH && kW == dW) {
hipLaunchKernelGGL(( subgradinput) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data,
nInputPlane, nInputRows, nInputCols,
kH, kW, dH, dW);
} else {
hipLaunchKernelGGL(( subgradinputAtomic) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data,
nInputPlane, nInputRows, nInputCols,
kH, kW, dH, dW);
}
}
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in SpatialAveragePooling.updateGradInput: %s\n", hipGetErrorString(err));
THError("aborting");
}
return 1;
}
static const struct luaL_Reg cunn_SpatialAveragePooling__ [] = {
{"SpatialAveragePooling_updateOutput", cunn_SpatialAveragePooling_updateOutput},
{"SpatialAveragePooling_updateGradInput", cunn_SpatialAveragePooling_updateGradInput},
{NULL, NULL}
};
static void cunn_SpatialAveragePooling_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunn_SpatialAveragePooling__, "nn");
lua_pop(L,1);
}
#undef CUDA_MAX_THREADS
| 29eda343c0f91290bf46a798f4f7fb7af58f5afc.cu | #include "utils.h"
#define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit
/*
* Description:
* this function avg-pools an input 3D tensor along dimensions 1 and 2
* 3D input, 3D output
*/
__global__ void subsample(float *input, float *output,
int input_n, int input_h, int input_w,
int kH, int kW, int dH, int dW)
{
// iterators
int xx, yy;
// output size
int output_w = (input_w - kW) / dW + 1;
int output_h = (input_h - kH) / dH + 1;
// compute offsets based on thread/block ID
int o = blockIdx.x;
int i = o;
int xx_start = threadIdx.x;
int xx_end = output_w;
int xx_step = blockDim.x;
int yy_start = blockDim.y*blockIdx.y + threadIdx.y;
int yy_end = output_h;
int yy_step = blockDim.y*gridDim.y;
// select input/output plane
output = output + o*output_w*output_h;
input = input + i*input_w*input_h;
// For all output pixels...
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
// Compute the mean of the input image...
float *ptr_input = input + yy*dH*input_w + xx*dW;
float *ptr_output = output + yy*output_w + xx;
float sum = 0;
int kx, ky;
for(ky = 0; ky < kH; ky++) {
for(kx = 0; kx < kW; kx++)
sum += ptr_input[kx];
ptr_input += input_w; // next input line
}
// Update output
*ptr_output = sum/float(kW*kH);
}
}
}
static int cunn_SpatialAveragePooling_updateOutput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
THAssert(THCudaTensor_checkGPU(state, 2, input, output));
float *output_data;
float *input_data;
luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch) tensor expected");
if (input->nDimension == 3) {
long nInputCols = input->size[2];
long nInputRows = input->size[1];
long nOutputCols = (nInputCols - kW) / dW + 1;
long nOutputRows = (nInputRows - kH) / dH + 1;
long nInputPlane = input->size[0];
luaL_argcheck(L, nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size");
input = THCudaTensor_newContiguous(state, input);
input_data = THCudaTensor_data(state, input);
THCudaTensor_resize3d(state, output, nInputPlane, nOutputRows, nOutputCols);
output_data = THCudaTensor_data(state, output);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane,yblocks);
dim3 threads(32,8);
// run subsample kernel
subsample <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (input_data, output_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
} else {
long nInputCols = input->size[3];
long nInputRows = input->size[2];
long nbatch = input->size[0];
long nOutputCols = (nInputCols - kW) / dW + 1;
long nOutputRows = (nInputRows - kH) / dH + 1;
long nInputPlane = input->size[1];
luaL_argcheck(L, nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size");
input = THCudaTensor_newContiguous(state, input);
input_data = THCudaTensor_data(state, input);
THCudaTensor_resize4d(state, output, nbatch, nInputPlane, nOutputRows, nOutputCols);
output_data = THCudaTensor_data(state, output);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane*nbatch,yblocks);
dim3 threads(32,8);
// run subsample kernel
subsample <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (input_data, output_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
}
// clean
THCudaTensor_free(state, input);
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in SpatialAveragePooling.updateOutput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
return 1;
}
/*
* Description:
* this function computes the gradInput from gradOutput
*/
__global__ void subgradinput(float *gradInput, float *gradOutput,
int input_n, int input_h, int input_w,
int kH, int kW, int dH, int dW)
{
// iterators
int xx, yy;
// output size
int output_w = (input_w - kW) / dW + 1;
int output_h = (input_h - kH) / dH + 1;
// compute offsets based on thread/block ID
int o = blockIdx.x;
int i = o;
int xx_start = threadIdx.x;
int xx_end = output_w;
int xx_step = blockDim.x;
int yy_start = blockDim.y*blockIdx.y + threadIdx.y;
int yy_end = output_h;
int yy_step = blockDim.y*gridDim.y;
// select input/output plane
gradOutput = gradOutput + o*output_w*output_h;
gradInput = gradInput + i*input_w*input_h;
// compute gradInput
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
float *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW;
float *ptr_gradOutput = gradOutput + yy*output_w + xx;
float z = *ptr_gradOutput;
int kx, ky;
for(ky = 0; ky < kH; ky++) {
for(kx = 0; kx < kW; kx++)
ptr_gradInput[kx] += z / float(kW*kH);
ptr_gradInput += input_w;
}
}
}
}
/*
* Description:
* this function computes the gradInput from gradOutput
* but with an atomic accumulation. It is needed to be done so
* for cases of kH != dH and kW != dW
*/
__global__ void subgradinputAtomic(float *gradInput, float *gradOutput,
int input_n, int input_h, int input_w,
int kH, int kW, int dH, int dW)
{
// iterators
int xx, yy;
// output size
int output_w = (input_w - kW) / dW + 1;
int output_h = (input_h - kH) / dH + 1;
// compute offsets based on thread/block ID
int o = blockIdx.x;
int i = o;
int xx_start = threadIdx.x;
int xx_end = output_w;
int xx_step = blockDim.x;
int yy_start = blockDim.y*blockIdx.y + threadIdx.y;
int yy_end = output_h;
int yy_step = blockDim.y*gridDim.y;
// select input/output plane
gradOutput = gradOutput + o*output_w*output_h;
gradInput = gradInput + i*input_w*input_h;
// compute gradInput
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
float *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW;
float *ptr_gradOutput = gradOutput + yy*output_w + xx;
float z = *ptr_gradOutput;
int kx, ky;
for(ky = 0; ky < kH; ky++) {
for(kx = 0; kx < kW; kx++) {
atomicAdd(&(ptr_gradInput[kx]), z / float(kW*kH));
}
ptr_gradInput += input_w;
}
}
}
}
static int cunn_SpatialAveragePooling_updateGradInput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor");
THAssert(THCudaTensor_checkGPU(state, 3, input, gradInput, gradOutput));
if (input->nDimension == 3) {
long nInputCols = input->size[2];
long nInputRows = input->size[1];
long nInputPlane = input->size[0];
float *gradOutput_data = THCudaTensor_data(state, gradOutput);
float *gradInput_data;
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_zero(state, gradInput);
gradInput_data = THCudaTensor_data(state, gradInput);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane,yblocks);
dim3 threads(32,8);
// run updateGradInput kernel
if (kH == dH && kW == dW) {
subgradinput <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (gradInput_data, gradOutput_data,
nInputPlane, nInputRows, nInputCols,
kH, kW, dH, dW);
} else {
subgradinputAtomic <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (gradInput_data, gradOutput_data,
nInputPlane, nInputRows, nInputCols,
kH, kW, dH, dW);
}
} else {
long nInputCols = input->size[3];
long nInputRows = input->size[2];
long nInputPlane = input->size[1];
long nbatch = input->size[0];
float *gradOutput_data = THCudaTensor_data(state, gradOutput);
float *gradInput_data;
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_zero(state, gradInput);
gradInput_data = THCudaTensor_data(state, gradInput);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane*nbatch,yblocks);
dim3 threads(32,8);
// run updateGradInput kernel
if (kH == dH && kW == dW) {
subgradinput <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (gradInput_data, gradOutput_data,
nInputPlane, nInputRows, nInputCols,
kH, kW, dH, dW);
} else {
subgradinputAtomic <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (gradInput_data, gradOutput_data,
nInputPlane, nInputRows, nInputCols,
kH, kW, dH, dW);
}
}
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in SpatialAveragePooling.updateGradInput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
return 1;
}
static const struct luaL_Reg cunn_SpatialAveragePooling__ [] = {
{"SpatialAveragePooling_updateOutput", cunn_SpatialAveragePooling_updateOutput},
{"SpatialAveragePooling_updateGradInput", cunn_SpatialAveragePooling_updateGradInput},
{NULL, NULL}
};
static void cunn_SpatialAveragePooling_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunn_SpatialAveragePooling__, "nn");
lua_pop(L,1);
}
#undef CUDA_MAX_THREADS
|
85d30cfacf12363ef3f09d4804639393c604de04.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017 XGBoost contributors
*/
#include "./host_device_vector.h"
#include <thrust/fill.h>
#include <xgboost/data.h>
#include <algorithm>
#include <cstdint>
#include <mutex>
#include "device_helpers_hip.cuh"
namespace xgboost {
// the handler to call instead of hipSetDevice; only used for testing
static void (*cudaSetDeviceHandler)(int) = nullptr; // NOLINT
void SetCudaSetDeviceHandler(void (*handler)(int)) {
cudaSetDeviceHandler = handler;
}
template <typename T>
class HostDeviceVectorImpl {
public:
HostDeviceVectorImpl(size_t size, T v, int device) : device_(device) {
if (device >= 0) {
gpu_access_ = GPUAccess::kWrite;
SetDevice();
data_d_.resize(size, v);
} else {
data_h_.resize(size, v);
}
}
// Initializer can be std::vector<T> or std::initializer_list<T>
template <class Initializer>
HostDeviceVectorImpl(const Initializer& init, int device) : device_(device) {
if (device >= 0) {
gpu_access_ = GPUAccess::kWrite;
LazyResizeDevice(init.size());
Copy(init);
} else {
data_h_ = init;
}
}
~HostDeviceVectorImpl() {
if (device_ >= 0) {
SetDevice();
}
}
size_t Size() const { return HostCanRead() ? data_h_.size() : data_d_.size(); }
int DeviceIdx() const { return device_; }
T* DevicePointer() {
LazySyncDevice(GPUAccess::kWrite);
return data_d_.data().get();
}
const T* ConstDevicePointer() {
LazySyncDevice(GPUAccess::kRead);
return data_d_.data().get();
}
common::Span<T> DeviceSpan() {
LazySyncDevice(GPUAccess::kWrite);
return {data_d_.data().get(), static_cast<typename common::Span<T>::index_type>(Size())};
}
common::Span<const T> ConstDeviceSpan() {
LazySyncDevice(GPUAccess::kRead);
using SpanInd = typename common::Span<const T>::index_type;
return {data_d_.data().get(), static_cast<SpanInd>(Size())};
}
thrust::device_ptr<T> tbegin() { // NOLINT
return thrust::device_ptr<T>(DevicePointer());
}
thrust::device_ptr<const T> tcbegin() { // NOLINT
return thrust::device_ptr<const T>(ConstDevicePointer());
}
thrust::device_ptr<T> tend() { // NOLINT
return tbegin() + Size();
}
thrust::device_ptr<const T> tcend() { // NOLINT
return tcbegin() + Size();
}
void Fill(T v) { // NOLINT
if (HostCanWrite()) {
std::fill(data_h_.begin(), data_h_.end(), v);
} else {
gpu_access_ = GPUAccess::kWrite;
SetDevice();
thrust::fill(data_d_.begin(), data_d_.end(), v);
}
}
void Copy(HostDeviceVectorImpl<T>* other) {
CHECK_EQ(Size(), other->Size());
// Data is on host.
if (HostCanWrite() && other->HostCanWrite()) {
std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin());
return;
}
CopyToDevice(other);
}
void Copy(const std::vector<T>& other) {
CHECK_EQ(Size(), other.size());
if (HostCanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
CopyToDevice(other.data());
}
}
void Copy(std::initializer_list<T> other) {
CHECK_EQ(Size(), other.size());
if (HostCanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
CopyToDevice(other.begin());
}
}
std::vector<T>& HostVector() {
LazySyncHost(GPUAccess::kNone);
return data_h_;
}
const std::vector<T>& ConstHostVector() {
LazySyncHost(GPUAccess::kRead);
return data_h_;
}
void SetDevice(int device) {
if (device_ == device) { return; }
if (device_ >= 0) {
LazySyncHost(GPUAccess::kNone);
}
device_ = device;
if (device_ >= 0) {
LazyResizeDevice(data_h_.size());
}
}
void Resize(size_t new_size, T v) {
if (new_size == Size()) { return; }
if (Size() == 0 && device_ >= 0) {
// fast on-device resize
gpu_access_ = GPUAccess::kWrite;
SetDevice();
data_d_.resize(new_size, v);
} else {
// resize on host
LazySyncHost(GPUAccess::kNone);
data_h_.resize(new_size, v);
}
}
void LazySyncHost(GPUAccess access) {
if (HostCanAccess(access)) { return; }
if (HostCanRead()) {
// data is present, just need to deny access to the device
gpu_access_ = access;
return;
}
gpu_access_ = access;
if (data_h_.size() != data_d_.size()) { data_h_.resize(data_d_.size()); }
SetDevice();
dh::safe_cuda(hipMemcpy(data_h_.data(),
data_d_.data().get(),
data_d_.size() * sizeof(T),
hipMemcpyDeviceToHost));
}
void LazySyncDevice(GPUAccess access) {
if (DeviceCanAccess(access)) { return; }
if (DeviceCanRead()) {
// deny read to the host
gpu_access_ = access;
return;
}
// data is on the host
LazyResizeDevice(data_h_.size());
SetDevice();
dh::safe_cuda(hipMemcpy(data_d_.data().get(),
data_h_.data(),
data_d_.size() * sizeof(T),
hipMemcpyHostToDevice));
gpu_access_ = access;
}
bool HostCanAccess(GPUAccess access) const { return gpu_access_ <= access; }
bool HostCanRead() const { return HostCanAccess(GPUAccess::kRead); }
bool HostCanWrite() const { return HostCanAccess(GPUAccess::kNone); }
bool DeviceCanAccess(GPUAccess access) const { return gpu_access_ >= access; }
bool DeviceCanRead() const { return DeviceCanAccess(GPUAccess::kRead); }
bool DeviceCanWrite() const { return DeviceCanAccess(GPUAccess::kWrite); }
private:
int device_{-1};
std::vector<T> data_h_{};
dh::device_vector<T> data_d_{};
GPUAccess gpu_access_{GPUAccess::kNone};
void CopyToDevice(HostDeviceVectorImpl* other) {
if (other->HostCanWrite()) {
CopyToDevice(other->data_h_.data());
} else {
LazyResizeDevice(Size());
gpu_access_ = GPUAccess::kWrite;
SetDevice();
dh::safe_cuda(hipMemcpyAsync(data_d_.data().get(), other->data_d_.data().get(),
data_d_.size() * sizeof(T), hipMemcpyDefault));
}
}
void CopyToDevice(const T* begin) {
LazyResizeDevice(Size());
gpu_access_ = GPUAccess::kWrite;
SetDevice();
dh::safe_cuda(hipMemcpyAsync(data_d_.data().get(), begin,
data_d_.size() * sizeof(T), hipMemcpyDefault));
}
void LazyResizeDevice(size_t new_size) {
if (new_size == data_d_.size()) { return; }
SetDevice();
data_d_.resize(new_size);
}
void SetDevice() {
CHECK_GE(device_, 0);
if (cudaSetDeviceHandler == nullptr) {
dh::safe_cuda(hipSetDevice(device_));
} else {
(*cudaSetDeviceHandler)(device_);
}
}
};
template<typename T>
HostDeviceVector<T>::HostDeviceVector(size_t size, T v, int device)
: impl_(new HostDeviceVectorImpl<T>(size, v, device)) {}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(std::initializer_list<T> init, int device)
: impl_(new HostDeviceVectorImpl<T>(init, device)) {}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(const std::vector<T>& init, int device)
: impl_(new HostDeviceVectorImpl<T>(init, device)) {}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(const HostDeviceVector<T>& other)
: impl_(new HostDeviceVectorImpl<T>(*other.impl_)) {}
template <typename T>
HostDeviceVector<T>& HostDeviceVector<T>::operator=(const HostDeviceVector<T>& other) {
if (this == &other) { return *this; }
std::unique_ptr<HostDeviceVectorImpl<T>> newImpl(new HostDeviceVectorImpl<T>(*other.impl_));
delete impl_;
impl_ = newImpl.release();
return *this;
}
template <typename T>
HostDeviceVector<T>::~HostDeviceVector() {
delete impl_;
impl_ = nullptr;
}
template <typename T>
size_t HostDeviceVector<T>::Size() const { return impl_->Size(); }
template <typename T>
int HostDeviceVector<T>::DeviceIdx() const { return impl_->DeviceIdx(); }
template <typename T>
T* HostDeviceVector<T>::DevicePointer() {
return impl_->DevicePointer();
}
template <typename T>
const T* HostDeviceVector<T>::ConstDevicePointer() const {
return impl_->ConstDevicePointer();
}
template <typename T>
common::Span<T> HostDeviceVector<T>::DeviceSpan() {
return impl_->DeviceSpan();
}
template <typename T>
common::Span<const T> HostDeviceVector<T>::ConstDeviceSpan() const {
return impl_->ConstDeviceSpan();
}
template <typename T>
thrust::device_ptr<T> HostDeviceVector<T>::tbegin() { // NOLINT
return impl_->tbegin();
}
template <typename T>
thrust::device_ptr<const T> HostDeviceVector<T>::tcbegin() const { // NOLINT
return impl_->tcbegin();
}
template <typename T>
thrust::device_ptr<T> HostDeviceVector<T>::tend() { // NOLINT
return impl_->tend();
}
template <typename T>
thrust::device_ptr<const T> HostDeviceVector<T>::tcend() const { // NOLINT
return impl_->tcend();
}
template <typename T>
void HostDeviceVector<T>::Fill(T v) {
impl_->Fill(v);
}
template <typename T>
void HostDeviceVector<T>::Copy(const HostDeviceVector<T>& other) {
impl_->Copy(other.impl_);
}
template <typename T>
void HostDeviceVector<T>::Copy(const std::vector<T>& other) {
impl_->Copy(other);
}
template <typename T>
void HostDeviceVector<T>::Copy(std::initializer_list<T> other) {
impl_->Copy(other);
}
template <typename T>
std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); }
template <typename T>
const std::vector<T>& HostDeviceVector<T>::ConstHostVector() const {
return impl_->ConstHostVector();
}
template <typename T>
bool HostDeviceVector<T>::HostCanRead() const {
return impl_->HostCanRead();
}
template <typename T>
bool HostDeviceVector<T>::HostCanWrite() const {
return impl_->HostCanWrite();
}
template <typename T>
bool HostDeviceVector<T>::DeviceCanRead() const {
return impl_->DeviceCanRead();
}
template <typename T>
bool HostDeviceVector<T>::DeviceCanWrite() const {
return impl_->DeviceCanWrite();
}
template <typename T>
void HostDeviceVector<T>::SetDevice(int device) const {
impl_->SetDevice(device);
}
template <typename T>
void HostDeviceVector<T>::Resize(size_t new_size, T v) {
impl_->Resize(new_size, v);
}
// explicit instantiations are required, as HostDeviceVector isn't header-only
template class HostDeviceVector<bst_float>;
template class HostDeviceVector<GradientPair>;
template class HostDeviceVector<int>;
template class HostDeviceVector<Entry>;
template class HostDeviceVector<size_t>;
} // namespace xgboost
| 85d30cfacf12363ef3f09d4804639393c604de04.cu | /*!
* Copyright 2017 XGBoost contributors
*/
#include "./host_device_vector.h"
#include <thrust/fill.h>
#include <xgboost/data.h>
#include <algorithm>
#include <cstdint>
#include <mutex>
#include "./device_helpers.cuh"
namespace xgboost {
// the handler to call instead of cudaSetDevice; only used for testing
static void (*cudaSetDeviceHandler)(int) = nullptr; // NOLINT
void SetCudaSetDeviceHandler(void (*handler)(int)) {
cudaSetDeviceHandler = handler;
}
template <typename T>
class HostDeviceVectorImpl {
public:
HostDeviceVectorImpl(size_t size, T v, int device) : device_(device) {
if (device >= 0) {
gpu_access_ = GPUAccess::kWrite;
SetDevice();
data_d_.resize(size, v);
} else {
data_h_.resize(size, v);
}
}
// Initializer can be std::vector<T> or std::initializer_list<T>
template <class Initializer>
HostDeviceVectorImpl(const Initializer& init, int device) : device_(device) {
if (device >= 0) {
gpu_access_ = GPUAccess::kWrite;
LazyResizeDevice(init.size());
Copy(init);
} else {
data_h_ = init;
}
}
~HostDeviceVectorImpl() {
if (device_ >= 0) {
SetDevice();
}
}
size_t Size() const { return HostCanRead() ? data_h_.size() : data_d_.size(); }
int DeviceIdx() const { return device_; }
T* DevicePointer() {
LazySyncDevice(GPUAccess::kWrite);
return data_d_.data().get();
}
const T* ConstDevicePointer() {
LazySyncDevice(GPUAccess::kRead);
return data_d_.data().get();
}
common::Span<T> DeviceSpan() {
LazySyncDevice(GPUAccess::kWrite);
return {data_d_.data().get(), static_cast<typename common::Span<T>::index_type>(Size())};
}
common::Span<const T> ConstDeviceSpan() {
LazySyncDevice(GPUAccess::kRead);
using SpanInd = typename common::Span<const T>::index_type;
return {data_d_.data().get(), static_cast<SpanInd>(Size())};
}
thrust::device_ptr<T> tbegin() { // NOLINT
return thrust::device_ptr<T>(DevicePointer());
}
thrust::device_ptr<const T> tcbegin() { // NOLINT
return thrust::device_ptr<const T>(ConstDevicePointer());
}
thrust::device_ptr<T> tend() { // NOLINT
return tbegin() + Size();
}
thrust::device_ptr<const T> tcend() { // NOLINT
return tcbegin() + Size();
}
void Fill(T v) { // NOLINT
if (HostCanWrite()) {
std::fill(data_h_.begin(), data_h_.end(), v);
} else {
gpu_access_ = GPUAccess::kWrite;
SetDevice();
thrust::fill(data_d_.begin(), data_d_.end(), v);
}
}
void Copy(HostDeviceVectorImpl<T>* other) {
CHECK_EQ(Size(), other->Size());
// Data is on host.
if (HostCanWrite() && other->HostCanWrite()) {
std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin());
return;
}
CopyToDevice(other);
}
void Copy(const std::vector<T>& other) {
CHECK_EQ(Size(), other.size());
if (HostCanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
CopyToDevice(other.data());
}
}
void Copy(std::initializer_list<T> other) {
CHECK_EQ(Size(), other.size());
if (HostCanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
CopyToDevice(other.begin());
}
}
std::vector<T>& HostVector() {
LazySyncHost(GPUAccess::kNone);
return data_h_;
}
const std::vector<T>& ConstHostVector() {
LazySyncHost(GPUAccess::kRead);
return data_h_;
}
void SetDevice(int device) {
if (device_ == device) { return; }
if (device_ >= 0) {
LazySyncHost(GPUAccess::kNone);
}
device_ = device;
if (device_ >= 0) {
LazyResizeDevice(data_h_.size());
}
}
void Resize(size_t new_size, T v) {
if (new_size == Size()) { return; }
if (Size() == 0 && device_ >= 0) {
// fast on-device resize
gpu_access_ = GPUAccess::kWrite;
SetDevice();
data_d_.resize(new_size, v);
} else {
// resize on host
LazySyncHost(GPUAccess::kNone);
data_h_.resize(new_size, v);
}
}
void LazySyncHost(GPUAccess access) {
if (HostCanAccess(access)) { return; }
if (HostCanRead()) {
// data is present, just need to deny access to the device
gpu_access_ = access;
return;
}
gpu_access_ = access;
if (data_h_.size() != data_d_.size()) { data_h_.resize(data_d_.size()); }
SetDevice();
dh::safe_cuda(cudaMemcpy(data_h_.data(),
data_d_.data().get(),
data_d_.size() * sizeof(T),
cudaMemcpyDeviceToHost));
}
void LazySyncDevice(GPUAccess access) {
if (DeviceCanAccess(access)) { return; }
if (DeviceCanRead()) {
// deny read to the host
gpu_access_ = access;
return;
}
// data is on the host
LazyResizeDevice(data_h_.size());
SetDevice();
dh::safe_cuda(cudaMemcpy(data_d_.data().get(),
data_h_.data(),
data_d_.size() * sizeof(T),
cudaMemcpyHostToDevice));
gpu_access_ = access;
}
bool HostCanAccess(GPUAccess access) const { return gpu_access_ <= access; }
bool HostCanRead() const { return HostCanAccess(GPUAccess::kRead); }
bool HostCanWrite() const { return HostCanAccess(GPUAccess::kNone); }
bool DeviceCanAccess(GPUAccess access) const { return gpu_access_ >= access; }
bool DeviceCanRead() const { return DeviceCanAccess(GPUAccess::kRead); }
bool DeviceCanWrite() const { return DeviceCanAccess(GPUAccess::kWrite); }
private:
int device_{-1};
std::vector<T> data_h_{};
dh::device_vector<T> data_d_{};
GPUAccess gpu_access_{GPUAccess::kNone};
void CopyToDevice(HostDeviceVectorImpl* other) {
if (other->HostCanWrite()) {
CopyToDevice(other->data_h_.data());
} else {
LazyResizeDevice(Size());
gpu_access_ = GPUAccess::kWrite;
SetDevice();
dh::safe_cuda(cudaMemcpyAsync(data_d_.data().get(), other->data_d_.data().get(),
data_d_.size() * sizeof(T), cudaMemcpyDefault));
}
}
void CopyToDevice(const T* begin) {
LazyResizeDevice(Size());
gpu_access_ = GPUAccess::kWrite;
SetDevice();
dh::safe_cuda(cudaMemcpyAsync(data_d_.data().get(), begin,
data_d_.size() * sizeof(T), cudaMemcpyDefault));
}
void LazyResizeDevice(size_t new_size) {
if (new_size == data_d_.size()) { return; }
SetDevice();
data_d_.resize(new_size);
}
void SetDevice() {
CHECK_GE(device_, 0);
if (cudaSetDeviceHandler == nullptr) {
dh::safe_cuda(cudaSetDevice(device_));
} else {
(*cudaSetDeviceHandler)(device_);
}
}
};
template<typename T>
HostDeviceVector<T>::HostDeviceVector(size_t size, T v, int device)
: impl_(new HostDeviceVectorImpl<T>(size, v, device)) {}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(std::initializer_list<T> init, int device)
: impl_(new HostDeviceVectorImpl<T>(init, device)) {}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(const std::vector<T>& init, int device)
: impl_(new HostDeviceVectorImpl<T>(init, device)) {}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(const HostDeviceVector<T>& other)
: impl_(new HostDeviceVectorImpl<T>(*other.impl_)) {}
template <typename T>
HostDeviceVector<T>& HostDeviceVector<T>::operator=(const HostDeviceVector<T>& other) {
if (this == &other) { return *this; }
std::unique_ptr<HostDeviceVectorImpl<T>> newImpl(new HostDeviceVectorImpl<T>(*other.impl_));
delete impl_;
impl_ = newImpl.release();
return *this;
}
template <typename T>
HostDeviceVector<T>::~HostDeviceVector() {
delete impl_;
impl_ = nullptr;
}
template <typename T>
size_t HostDeviceVector<T>::Size() const { return impl_->Size(); }
template <typename T>
int HostDeviceVector<T>::DeviceIdx() const { return impl_->DeviceIdx(); }
template <typename T>
T* HostDeviceVector<T>::DevicePointer() {
return impl_->DevicePointer();
}
template <typename T>
const T* HostDeviceVector<T>::ConstDevicePointer() const {
return impl_->ConstDevicePointer();
}
template <typename T>
common::Span<T> HostDeviceVector<T>::DeviceSpan() {
return impl_->DeviceSpan();
}
template <typename T>
common::Span<const T> HostDeviceVector<T>::ConstDeviceSpan() const {
return impl_->ConstDeviceSpan();
}
template <typename T>
thrust::device_ptr<T> HostDeviceVector<T>::tbegin() { // NOLINT
return impl_->tbegin();
}
template <typename T>
thrust::device_ptr<const T> HostDeviceVector<T>::tcbegin() const { // NOLINT
return impl_->tcbegin();
}
template <typename T>
thrust::device_ptr<T> HostDeviceVector<T>::tend() { // NOLINT
return impl_->tend();
}
template <typename T>
thrust::device_ptr<const T> HostDeviceVector<T>::tcend() const { // NOLINT
return impl_->tcend();
}
template <typename T>
void HostDeviceVector<T>::Fill(T v) {
impl_->Fill(v);
}
template <typename T>
void HostDeviceVector<T>::Copy(const HostDeviceVector<T>& other) {
impl_->Copy(other.impl_);
}
template <typename T>
void HostDeviceVector<T>::Copy(const std::vector<T>& other) {
impl_->Copy(other);
}
template <typename T>
void HostDeviceVector<T>::Copy(std::initializer_list<T> other) {
impl_->Copy(other);
}
template <typename T>
std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); }
template <typename T>
const std::vector<T>& HostDeviceVector<T>::ConstHostVector() const {
return impl_->ConstHostVector();
}
template <typename T>
bool HostDeviceVector<T>::HostCanRead() const {
return impl_->HostCanRead();
}
template <typename T>
bool HostDeviceVector<T>::HostCanWrite() const {
return impl_->HostCanWrite();
}
template <typename T>
bool HostDeviceVector<T>::DeviceCanRead() const {
return impl_->DeviceCanRead();
}
template <typename T>
bool HostDeviceVector<T>::DeviceCanWrite() const {
return impl_->DeviceCanWrite();
}
template <typename T>
void HostDeviceVector<T>::SetDevice(int device) const {
impl_->SetDevice(device);
}
template <typename T>
void HostDeviceVector<T>::Resize(size_t new_size, T v) {
impl_->Resize(new_size, v);
}
// explicit instantiations are required, as HostDeviceVector isn't header-only
template class HostDeviceVector<bst_float>;
template class HostDeviceVector<GradientPair>;
template class HostDeviceVector<int>;
template class HostDeviceVector<Entry>;
template class HostDeviceVector<size_t>;
} // namespace xgboost
|
550f95d5c3513a9cc5f4c3b27eaa80546202d8b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//---------------------------------*-CUDA-*----------------------------------//
// Copyright 2020 UT-Battelle, LLC, and other Celeritas developers.
// See the top-level COPYRIGHT file for details.
// SPDX-License-Identifier: (Apache-2.0 OR MIT)
//---------------------------------------------------------------------------//
//! \file Material.test.cu
//---------------------------------------------------------------------------//
#include "Material.test.hh"
#include <thrust/device_vector.h>
#include "base/Range.hh"
#include "base/KernelParamCalculator.cuda.hh"
#include "physics/material/MaterialTrackView.hh"
using thrust::raw_pointer_cast;
namespace celeritas_test
{
namespace
{
//---------------------------------------------------------------------------//
// KERNELS
//---------------------------------------------------------------------------//
__global__ void m_test_kernel(unsigned int const size,
MTestInput::MaterialParamsPointers const params,
MTestInput::MaterialStatePointers const states,
const MaterialTrackState* const init,
real_type* temperatures,
real_type* rad_len,
real_type* tot_z)
{
auto tid = celeritas::KernelParamCalculator::thread_id();
if (tid.get() >= size)
return;
MaterialTrackView mat_track(params, states, tid);
// Initialize state
mat_track = init[tid.get()];
CELER_ASSERT(mat_track.material_id() == init[tid.get()].material_id);
// Get material properties
const auto& mat = mat_track.material_view();
temperatures[tid.get()] = mat.temperature();
rad_len[tid.get()] = mat.radiation_length();
// Fill elements with finctional cross sections
celeritas::Span<real_type> scratch = mat_track.element_scratch();
for (auto ec : celeritas::range(mat.num_elements()))
{
// Pretend to calculate cross section for the ec'th element
const auto& element = mat.element_view(ElementComponentId{ec});
scratch[ec] = static_cast<real_type>(element.atomic_number());
}
real_type tz = 0.0;
for (auto ec : celeritas::range(mat.num_elements()))
{
// Get its atomic number weighted by its fractional number density
tz += scratch[ec] * mat.get_element_density(ElementComponentId{ec});
}
tot_z[tid.get()] = tz;
}
} // namespace
//---------------------------------------------------------------------------//
// TESTING INTERFACE
//---------------------------------------------------------------------------//
//! Run on device and return results
MTestOutput m_test(const MTestInput& input)
{
thrust::device_vector<MaterialTrackState> init = input.init;
thrust::device_vector<real_type> temperatures(input.size());
thrust::device_vector<real_type> rad_len(input.size());
thrust::device_vector<real_type> tot_z(input.size());
static const celeritas::KernelParamCalculator calc_launch_params(
m_test_kernel, "m_test");
auto params = calc_launch_params(init.size());
hipLaunchKernelGGL(( m_test_kernel), dim3(params.grid_size), dim3(params.block_size), 0, 0,
init.size(),
input.params,
input.states,
raw_pointer_cast(init.data()),
raw_pointer_cast(temperatures.data()),
raw_pointer_cast(rad_len.data()),
raw_pointer_cast(tot_z.data()));
CELER_CUDA_CHECK_ERROR();
CELER_CUDA_CALL(hipDeviceSynchronize());
MTestOutput result;
result.temperatures.resize(init.size());
result.rad_len.resize(init.size());
result.tot_z.resize(init.size());
thrust::copy(
temperatures.begin(), temperatures.end(), result.temperatures.begin());
thrust::copy(rad_len.begin(), rad_len.end(), result.rad_len.begin());
thrust::copy(tot_z.begin(), tot_z.end(), result.tot_z.begin());
return result;
}
//---------------------------------------------------------------------------//
} // namespace celeritas_test
| 550f95d5c3513a9cc5f4c3b27eaa80546202d8b2.cu | //---------------------------------*-CUDA-*----------------------------------//
// Copyright 2020 UT-Battelle, LLC, and other Celeritas developers.
// See the top-level COPYRIGHT file for details.
// SPDX-License-Identifier: (Apache-2.0 OR MIT)
//---------------------------------------------------------------------------//
//! \file Material.test.cu
//---------------------------------------------------------------------------//
#include "Material.test.hh"
#include <thrust/device_vector.h>
#include "base/Range.hh"
#include "base/KernelParamCalculator.cuda.hh"
#include "physics/material/MaterialTrackView.hh"
using thrust::raw_pointer_cast;
namespace celeritas_test
{
namespace
{
//---------------------------------------------------------------------------//
// KERNELS
//---------------------------------------------------------------------------//
__global__ void m_test_kernel(unsigned int const size,
MTestInput::MaterialParamsPointers const params,
MTestInput::MaterialStatePointers const states,
const MaterialTrackState* const init,
real_type* temperatures,
real_type* rad_len,
real_type* tot_z)
{
auto tid = celeritas::KernelParamCalculator::thread_id();
if (tid.get() >= size)
return;
MaterialTrackView mat_track(params, states, tid);
// Initialize state
mat_track = init[tid.get()];
CELER_ASSERT(mat_track.material_id() == init[tid.get()].material_id);
// Get material properties
const auto& mat = mat_track.material_view();
temperatures[tid.get()] = mat.temperature();
rad_len[tid.get()] = mat.radiation_length();
// Fill elements with finctional cross sections
celeritas::Span<real_type> scratch = mat_track.element_scratch();
for (auto ec : celeritas::range(mat.num_elements()))
{
// Pretend to calculate cross section for the ec'th element
const auto& element = mat.element_view(ElementComponentId{ec});
scratch[ec] = static_cast<real_type>(element.atomic_number());
}
real_type tz = 0.0;
for (auto ec : celeritas::range(mat.num_elements()))
{
// Get its atomic number weighted by its fractional number density
tz += scratch[ec] * mat.get_element_density(ElementComponentId{ec});
}
tot_z[tid.get()] = tz;
}
} // namespace
//---------------------------------------------------------------------------//
// TESTING INTERFACE
//---------------------------------------------------------------------------//
//! Run on device and return results
MTestOutput m_test(const MTestInput& input)
{
thrust::device_vector<MaterialTrackState> init = input.init;
thrust::device_vector<real_type> temperatures(input.size());
thrust::device_vector<real_type> rad_len(input.size());
thrust::device_vector<real_type> tot_z(input.size());
static const celeritas::KernelParamCalculator calc_launch_params(
m_test_kernel, "m_test");
auto params = calc_launch_params(init.size());
m_test_kernel<<<params.grid_size, params.block_size>>>(
init.size(),
input.params,
input.states,
raw_pointer_cast(init.data()),
raw_pointer_cast(temperatures.data()),
raw_pointer_cast(rad_len.data()),
raw_pointer_cast(tot_z.data()));
CELER_CUDA_CHECK_ERROR();
CELER_CUDA_CALL(cudaDeviceSynchronize());
MTestOutput result;
result.temperatures.resize(init.size());
result.rad_len.resize(init.size());
result.tot_z.resize(init.size());
thrust::copy(
temperatures.begin(), temperatures.end(), result.temperatures.begin());
thrust::copy(rad_len.begin(), rad_len.end(), result.rad_len.begin());
thrust::copy(tot_z.begin(), tot_z.end(), result.tot_z.begin());
return result;
}
//---------------------------------------------------------------------------//
} // namespace celeritas_test
|
51590b23ef303fa65f73453a52226879314e1899.hip | // !!! This is a file automatically generated by hipify!!!
#include "relulayer.h"
#include "hip/hip_runtime.h"
#include "math.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdexcept>
__global__ void PoolLayer_Forward_reference_cu(double *previousLayerForward, double *out, int* backwardData, int width, int height, int depth, int stride, int previousLayerWidth, int previousLayerHeight, int previousLayerDepth)
{
for (int d = 0;d < depth;d++)
{
for (int y = 0;y < height;y++)
{
for (int x = 0;x < width;x++)
{
int index = x + (y * width) + (d * width * height);
for (int ys = 0;ys < stride;ys++)
{
for (int xs = 0;xs < stride;xs++)
{
int previousLayerIndex = xs + (x * stride) + (((y * stride) + ys) * previousLayerWidth) + (d * previousLayerWidth * previousLayerHeight);
double val = previousLayerForward[previousLayerIndex];
if (val > out[index])
{
out[index] = val;
backwardData[index] = previousLayerIndex;
}
}
}
}
}
}
}
__global__ void PoolLayer_Backward_reference_cu(double* nextlayerBackward, double *out, int* backwardData, int nodeCount)
{
for (int i = 0;i < nodeCount;i++)
{
int index = backwardData[i];
out[index] += nextlayerBackward[i];
}
}
void PoolLayer_Forward_reference(double *previousLayerForward, double *output, int* backwardData, int nodeCount, int width, int height, int depth, int stride, int previousLayerWidth, int previousLayerHeight, int previousLayerDepth)
{
PoolLayer_Forward_reference_cu << <1, 1 >> >(previousLayerForward, output, backwardData, width, height, depth, stride, previousLayerWidth, previousLayerHeight, previousLayerDepth);
LayerSynchronize();
}
void PoolLayer_Backward_reference(double* nextlayerBackward, double *output, int* backwardData, int nodeCount)
{
PoolLayer_Backward_reference_cu << <1, 1 >> >(nextlayerBackward, output, backwardData, nodeCount);
LayerSynchronize();
} | 51590b23ef303fa65f73453a52226879314e1899.cu |
#include "relulayer.h"
#include "cuda_runtime.h"
#include "math.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdexcept>
__global__ void PoolLayer_Forward_reference_cu(double *previousLayerForward, double *out, int* backwardData, int width, int height, int depth, int stride, int previousLayerWidth, int previousLayerHeight, int previousLayerDepth)
{
for (int d = 0;d < depth;d++)
{
for (int y = 0;y < height;y++)
{
for (int x = 0;x < width;x++)
{
int index = x + (y * width) + (d * width * height);
for (int ys = 0;ys < stride;ys++)
{
for (int xs = 0;xs < stride;xs++)
{
int previousLayerIndex = xs + (x * stride) + (((y * stride) + ys) * previousLayerWidth) + (d * previousLayerWidth * previousLayerHeight);
double val = previousLayerForward[previousLayerIndex];
if (val > out[index])
{
out[index] = val;
backwardData[index] = previousLayerIndex;
}
}
}
}
}
}
}
__global__ void PoolLayer_Backward_reference_cu(double* nextlayerBackward, double *out, int* backwardData, int nodeCount)
{
for (int i = 0;i < nodeCount;i++)
{
int index = backwardData[i];
out[index] += nextlayerBackward[i];
}
}
void PoolLayer_Forward_reference(double *previousLayerForward, double *output, int* backwardData, int nodeCount, int width, int height, int depth, int stride, int previousLayerWidth, int previousLayerHeight, int previousLayerDepth)
{
PoolLayer_Forward_reference_cu << <1, 1 >> >(previousLayerForward, output, backwardData, width, height, depth, stride, previousLayerWidth, previousLayerHeight, previousLayerDepth);
LayerSynchronize();
}
void PoolLayer_Backward_reference(double* nextlayerBackward, double *output, int* backwardData, int nodeCount)
{
PoolLayer_Backward_reference_cu << <1, 1 >> >(nextlayerBackward, output, backwardData, nodeCount);
LayerSynchronize();
} |
74bfbf1a49b5fde0e0460ab584af1c43d3ad254e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include "custom_cuda_layers.h"
#include "memory_access_utils.h"
namespace cg = cooperative_groups;
__global__ void quantize_kernel(__half* vals, int group_size, int num_bits)
{
#if __CUDA_ARCH__ >= 700 || defined(__HIP_PLATFORM_HCC__)
cg::thread_block b = cg::this_thread_block(); // tb
cg::thread_block_tile<32> g =
cg::tiled_partition<32>(b); // warp, 32 not optimal for AMD which should be 64.
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
constexpr int granularity = 16;
constexpr int vals_per_access = granularity / sizeof(__half);
__half data[vals_per_access];
int group_id = blockIdx.x;
int thread_index = id * vals_per_access;
int reg_count = 0;
int offset = group_id * group_size;
float max = -10000.0;
for (int thread_index = id * vals_per_access; thread_index < group_size;
thread_index += blockDim.x * vals_per_access) {
mem_access::load_global<granularity>(data, vals + offset + thread_index);
#pragma unroll
for (int i = 0; i < vals_per_access; i++) {
if (abs((float)data[i]) > max) max = abs((float)data[i]);
}
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
__shared__ float partialMax[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
b.sync();
if (lane < warp_num) max = partialMax[lane];
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
max = g.shfl(max, 0);
float q_scale = (float)(1 << num_bits) / (2 * max + 1e-5);
float q_scale_inv = 1 / q_scale;
int q_range_max = (1 << (num_bits - 1)) - 1;
int q_range_min = -(1 << (num_bits - 1));
for (int thread_index = id * vals_per_access; thread_index < group_size;
thread_index += blockDim.x * vals_per_access) {
mem_access::load_global<granularity>(data, vals + offset + thread_index);
#pragma unroll
for (int j = 0; j < vals_per_access; j++) {
float q_data;
q_data = __half2float(data[j]);
q_data = __float2int_rn(q_data * q_scale);
q_data = q_data > (q_range_max) ? (q_range_max)
: (q_data < (q_range_min) ? (q_range_min) : q_data);
data[j] = __float2half_rn(q_data * q_scale_inv);
}
mem_access::store_global<granularity>(vals + offset + thread_index, data);
}
#endif
}
__global__ void quantize_kernel(float* vals, int group_size, int num_bits)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
constexpr int granularity = 16;
constexpr int vals_per_access = granularity / sizeof(float);
float data[vals_per_access];
int bid = blockIdx.x;
int thread_index = id * vals_per_access;
int reg_count = 0;
int offset = bid * group_size;
float max = -10000.0;
for (int thread_index = id * vals_per_access; thread_index < group_size;
thread_index += blockDim.x * vals_per_access) {
mem_access::load_global<granularity>(data, vals + offset + thread_index);
#pragma unroll
for (int i = 0; i < vals_per_access; i++) {
if (abs(data[i]) > max) max = abs(data[i]);
}
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
__shared__ float partialMax[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
b.sync();
if (lane < warp_num) max = partialMax[lane];
b.sync();
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
max = g.shfl(max, 0);
float q_scale = (1 << num_bits) / (2 * max + 1e-5);
float q_scale_inv = 1 / q_scale;
int q_range_max = (1 << (num_bits - 1)) - 1;
int q_range_min = -(1 << (num_bits - 1));
for (int thread_index = id * vals_per_access; thread_index < group_size;
thread_index += blockDim.x * vals_per_access) {
mem_access::load_global<granularity>(data, vals + offset + thread_index);
#pragma unroll
for (int j = 0; j < vals_per_access; j++) {
float q_data;
q_data = __float2int_rn(data[j] * q_scale);
q_data = q_data > (q_range_max) ? (q_range_max)
: (q_data < (q_range_min) ? (q_range_min) : q_data);
data[j] = roundf(q_data * q_scale_inv);
}
mem_access::store_global<granularity>(vals + offset + thread_index, data);
}
}
template <typename T>
void launch_quantize_kernel(T* vals,
int total_count,
int group_num,
int num_bits,
hipStream_t stream)
{
dim3 grid_dim(group_num);
dim3 block_dim(1024);
hipLaunchKernelGGL(( quantize_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, vals, total_count / group_num, num_bits);
}
template void launch_quantize_kernel(float* vals,
int total_count,
int group_num,
int num_bits,
hipStream_t stream);
template void launch_quantize_kernel(__half* vals,
int total_count,
int group_num,
int num_bits,
hipStream_t stream);
__global__ void sr_quantize_kernel(__half* vals,
int token_size,
int token_num,
int num_bits,
std::pair<uint64_t, uint64_t> seed)
{
#if __CUDA_ARCH__ >= 700 || defined(__HIP_PLATFORM_HCC__)
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float2* vals_cast = reinterpret_cast<float2*>(vals);
__half2 data_low[128];
__half2 data_high[128];
int bid = blockIdx.x;
hiprandStatePhilox4_32_10_t state;
hiprand_init(seed.first, idx, seed.second, &state);
unsigned int tid = threadIdx.x;
int reg_count = 0;
int offset = bid * token_size;
int group_index = bid * token_size + tid;
int total_count = token_size * token_num;
if (group_index < total_count) {
// float min = 10000.0;
float max = -10000.0;
while (tid < token_size) {
float2 data = vals_cast[offset + tid];
__half2* data_h = reinterpret_cast<__half2*>(&data);
data_low[reg_count] = data_h[0];
data_high[reg_count] = data_h[1];
float2 data_f[2];
data_f[0] = __half22float2(data_h[0]);
data_f[1] = __half22float2(data_h[1]);
if (abs((float)data_f[0].x) > max) max = abs((float)data_f[0].x);
if (abs((float)data_f[0].y) > max) max = abs((float)data_f[0].y);
if (abs((float)data_f[1].x) > max) max = abs((float)data_f[1].x);
if (abs((float)data_f[1].y) > max) max = abs((float)data_f[1].y);
tid += blockDim.x;
reg_count++;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
__shared__ float partialMax[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
b.sync();
if (lane < warp_num) max = partialMax[lane];
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
max = g.shfl(max, 0);
float q_scale_val = (float)(1 << num_bits) / (max * 2 + 1e-5);
float high_q = (float)((1 << (num_bits - 1)) - 1);
float low_q = (float)(-((1 << (num_bits - 1))));
for (int i = 0; i < reg_count; i++) {
int token_index = i * blockDim.x + threadIdx.x;
if (token_index < token_size) {
float2 data_f[2];
data_f[0] = __half22float2(data_low[i]);
data_f[1] = __half22float2(data_high[i]);
float2 q_data_int[2];
q_data_int[0].x = (float)((int)(data_f[0].x * q_scale_val));
q_data_int[0].y = (float)((int)(data_f[0].y * q_scale_val));
q_data_int[1].x = (float)((int)(data_f[1].x * q_scale_val));
q_data_int[1].y = (float)((int)(data_f[1].y * q_scale_val));
// Stochastic rounding
float4 rand = hiprand_uniform4(&state);
float q_error[4];
q_error[0] = abs(data_f[0].x - (q_data_int[0].x / q_scale_val)) * q_scale_val;
q_error[1] = abs(data_f[0].y - (q_data_int[0].y / q_scale_val)) * q_scale_val;
q_error[2] = abs(data_f[1].x - (q_data_int[1].x / q_scale_val)) * q_scale_val;
q_error[3] = abs(data_f[1].y - (q_data_int[1].y / q_scale_val)) * q_scale_val;
q_data_int[0].x =
(rand.x < q_error[0] && q_data_int[0].x > low_q && q_data_int[0].x < high_q)
? (q_data_int[0].x + (data_f[0].x > 0 ? 1 : -1))
: q_data_int[0].x;
q_data_int[0].y =
(rand.y < q_error[1] && q_data_int[0].y > low_q && q_data_int[0].y < high_q)
? (q_data_int[0].y + (data_f[0].y > 0 ? 1 : -1))
: q_data_int[0].y;
q_data_int[1].x =
(rand.w < q_error[2] && q_data_int[1].x > low_q && q_data_int[1].x < high_q)
? (q_data_int[1].x + (data_f[1].x > 0 ? 1 : -1))
: q_data_int[1].x;
q_data_int[1].y =
(rand.z < q_error[3] && q_data_int[1].y > low_q && q_data_int[1].y < high_q)
? (q_data_int[1].y + (data_f[1].y > 0 ? 1 : -1))
: q_data_int[1].y;
data_f[0].x = q_data_int[0].x / q_scale_val;
data_f[0].y = q_data_int[0].y / q_scale_val;
data_f[1].x = q_data_int[1].x / q_scale_val;
data_f[1].y = q_data_int[1].y / q_scale_val;
float2 result;
__half2* result_h = reinterpret_cast<__half2*>(&result);
result_h[0] = __float22half2_rn(data_f[0]);
result_h[1] = __float22half2_rn(data_f[1]);
vals_cast[offset + token_index] = result;
}
}
}
#endif
}
__global__ void sr_quantize_kernel(float* vals,
int token_size,
int token_num,
int num_bits,
std::pair<uint64_t, uint64_t> seed)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
int idx = blockIdx.x * blockDim.x + id;
float4* vals_cast = reinterpret_cast<float4*>(vals);
float4 data[128];
int bid = blockIdx.x;
int tid = threadIdx.x;
hiprandStatePhilox4_32_10_t state;
hiprand_init(seed.first, idx, seed.second, &state);
int group_index = bid * token_size + threadIdx.x;
int reg_count = 0;
int total_count = token_size * token_num;
if (group_index < total_count) {
// float min = 10000.0;
float max = -10000.0;
while (tid < token_size) {
data[reg_count] = vals_cast[group_index];
if (abs(data[reg_count].x) > max) max = abs(data[reg_count].x);
if (abs(data[reg_count].y) > max) max = abs(data[reg_count].y);
if (abs(data[reg_count].z) > max) max = abs(data[reg_count].z);
if (abs(data[reg_count].w) > max) max = abs(data[reg_count].w);
group_index += blockDim.x;
tid += blockDim.x;
reg_count++;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
__shared__ float partialMax[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
b.sync();
if (lane < warp_num) max = partialMax[lane];
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
max = g.shfl(max, 0);
float q_scale_val = (float)(1 << num_bits) / (max * 2 + 1e-5);
float high_q = (float)((1 << (num_bits - 1)) - 1);
float low_q = (float)(-((1 << (num_bits - 1))));
int offset = (bid)*token_size;
for (int i = 0; i < reg_count; i++) {
group_index = i * blockDim.x + threadIdx.x;
if (group_index < token_size) {
float4 q_data = data[i];
float4 q_data_int;
q_data_int.x = (float)((int)(q_data.x * q_scale_val));
q_data_int.y = (float)((int)(q_data.y * q_scale_val));
q_data_int.w = (float)((int)(q_data.w * q_scale_val));
q_data_int.z = (float)((int)(q_data.z * q_scale_val));
// Stochastic rounding
float4 rand = hiprand_uniform4(&state);
float q_error[4];
q_error[0] = abs(q_data.x - (q_data_int.x / q_scale_val)) * q_scale_val;
q_error[1] = abs(q_data.y - (q_data_int.y / q_scale_val)) * q_scale_val;
q_error[2] = abs(q_data.w - (q_data_int.w / q_scale_val)) * q_scale_val;
q_error[3] = abs(q_data.z - (q_data_int.z / q_scale_val)) * q_scale_val;
q_data_int.x =
(rand.x < q_error[0] && q_data_int.x > low_q && q_data_int.x < high_q)
? (q_data_int.x + (q_data.x > 0 ? 1 : -1))
: q_data_int.x;
q_data_int.y =
(rand.y < q_error[1] && q_data_int.y > low_q && q_data_int.y < high_q)
? (q_data_int.y + (q_data.y > 0 ? 1 : -1))
: q_data_int.y;
q_data_int.w =
(rand.w < q_error[2] && q_data_int.w > low_q && q_data_int.w < high_q)
? (q_data_int.w + (q_data.w > 0 ? 1 : -1))
: q_data_int.w;
q_data_int.z =
(rand.z < q_error[3] && q_data_int.z > low_q && q_data_int.z < high_q)
? (q_data_int.z + (q_data.z > 0 ? 1 : -1))
: q_data_int.z;
q_data_int.x /= q_scale_val;
q_data_int.y /= q_scale_val;
q_data_int.w /= q_scale_val;
q_data_int.z /= q_scale_val;
vals_cast[group_index + offset] = q_data_int;
}
}
}
}
template <typename T>
void launch_sr_quantize_kernel(T* vals,
int total_count,
int group_num,
int num_bits,
hipStream_t stream)
{
dim3 block_dim(1024);
dim3 grid_dim(group_num);
uint64_t inc = total_count / grid_dim.x / block_dim.x;
std::pair<uint64_t, uint64_t> seed = Context::Instance().IncrementOffset(inc);
hipLaunchKernelGGL(( sr_quantize_kernel), dim3(grid_dim), dim3(block_dim), 0, stream,
vals, (total_count / group_num) / 4, group_num, num_bits, seed);
}
template void launch_sr_quantize_kernel(float* vals,
int total_count,
int group_num,
int num_bits,
hipStream_t stream);
template void launch_sr_quantize_kernel(__half* vals,
int total_count,
int group_num,
int num_bits,
hipStream_t stream);
__global__ void quantize_kernel_asym(__half* vals, int group_size, int num_bits)
{
#if __CUDA_ARCH__ >= 700 || defined(__HIP_PLATFORM_HCC__)
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
float2* vals_cast = reinterpret_cast<float2*>(vals);
float2 data[MAX_REG];
int group_id = blockIdx.x;
{
int group_index = id;
int reg_count = 0;
int offset = group_id * group_size;
float max = -10000.0;
float min = 10000.0;
while (group_index < group_size && reg_count < MAX_REG) {
data[reg_count] = vals_cast[offset + group_index];
__half* data_h = reinterpret_cast<__half*>(&data[reg_count]);
if (((float)data_h[0]) > max) max = (float)data_h[0];
if (((float)data_h[1]) > max) max = (float)data_h[1];
if (((float)data_h[2]) > max) max = (float)data_h[2];
if (((float)data_h[3]) > max) max = (float)data_h[3];
if (((float)data_h[0]) < min) min = (float)data_h[0];
if (((float)data_h[1]) < min) min = (float)data_h[1];
if (((float)data_h[2]) < min) min = (float)data_h[2];
if (((float)data_h[3]) < min) min = (float)data_h[3];
group_index += blockDim.x;
reg_count++;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(min, i);
if (min > temp) min = temp;
}
__shared__ float partialMax[WARP_SIZE];
__shared__ float partialMin[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
if (lane == 0) partialMin[gid] = min;
b.sync();
if (lane < warp_num) max = partialMax[lane];
if (lane < warp_num) min = partialMin[lane];
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(min, i);
if (min > temp) min = temp;
}
max = g.shfl(max, 0);
min = g.shfl(min, 0);
float q_scale = ((max - min) + 1e-5) / (float)(1 << num_bits);
float q_scale_inv = 1 / q_scale;
for (int i = 0; i < reg_count; i++) {
group_index = i * blockDim.x + id;
if (group_index < group_size) {
__half2* data_h = reinterpret_cast<__half2*>(&data[i]);
float2 q_data[2];
q_data[0] = __half22float2(data_h[0]);
q_data[1] = __half22float2(data_h[1]);
float2 q_data_int[2];
q_data_int[0].x = roundf((q_data[0].x - min) * q_scale_inv);
q_data_int[0].y = roundf((q_data[0].y - min) * q_scale_inv);
q_data_int[1].x = roundf((q_data[1].x - min) * q_scale_inv);
q_data_int[1].y = roundf((q_data[1].y - min) * q_scale_inv);
q_data_int[0].x = q_data_int[0].x * q_scale + min;
q_data_int[0].y = q_data_int[0].y * q_scale + min;
q_data_int[1].x = q_data_int[1].x * q_scale + min;
q_data_int[1].y = q_data_int[1].y * q_scale + min;
data_h[0] = __float22half2_rn(q_data_int[0]);
data_h[1] = __float22half2_rn(q_data_int[1]);
vals_cast[offset + group_index] = data[i];
}
}
}
#endif
}
__global__ void quantize_kernel_asym(float* vals, int group_size, int num_bits)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
float4* vals_cast = reinterpret_cast<float4*>(vals);
float4 data[MAX_REG];
int bid = blockIdx.x;
int group_index = bid * group_size + id;
int reg_count = 0;
float max = -10000.0;
float min = 10000.0;
while (id < group_size && reg_count < MAX_REG) {
float4 data_reg = vals_cast[group_index];
data[reg_count] = data_reg;
if (data_reg.x > max) max = data_reg.x;
if (data_reg.y > max) max = data_reg.y;
if (data_reg.w > max) max = data_reg.w;
if (data_reg.z > max) max = data_reg.z;
if (data_reg.x < min) min = data_reg.x;
if (data_reg.y < min) min = data_reg.y;
if (data_reg.w < min) min = data_reg.w;
if (data_reg.z < min) min = data_reg.z;
group_index += blockDim.x;
id += blockDim.x;
reg_count++;
}
id = threadIdx.x;
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(min, i);
if (min > temp) min = temp;
}
__shared__ float partialMax[WARP_SIZE];
__shared__ float partialMin[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
if (lane == 0) partialMin[gid] = min;
b.sync();
if (lane < warp_num) max = partialMax[lane];
if (lane < warp_num) min = partialMin[lane];
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(min, i);
if (min > temp) min = temp;
}
max = g.shfl(max, 0);
min = g.shfl(min, 0);
float q_scale = ((max - min) + 1e-5) / (float)(1 << num_bits);
float q_scale_inv = 1 / q_scale;
for (int i = 0; i < reg_count; i++) {
group_index = i * blockDim.x + id;
if (group_index < group_size) {
float4 q_data;
q_data = data[i];
float4 q_data_int;
q_data_int.x = roundf((q_data.x - min) * q_scale_inv);
q_data_int.y = roundf((q_data.y - min) * q_scale_inv);
q_data_int.w = roundf((q_data.w - min) * q_scale_inv);
q_data_int.z = roundf((q_data.z - min) * q_scale_inv);
q_data.x = q_data_int.x * q_scale + min;
q_data.y = q_data_int.y * q_scale + min;
q_data.w = q_data_int.w * q_scale + min;
q_data.z = q_data_int.z * q_scale + min;
vals_cast[group_index + bid * group_size] = q_data;
}
}
}
template <typename T>
void launch_quantize_kernel_asym(T* vals,
int total_count,
int group_num,
int num_bits,
hipStream_t stream)
{
dim3 grid_dim(group_num);
dim3 block_dim(1024);
hipLaunchKernelGGL(( quantize_kernel_asym), dim3(grid_dim), dim3(block_dim), 0, stream,
vals, (total_count / group_num) / 4, num_bits);
}
template void launch_quantize_kernel_asym(float* vals,
int total_count,
int group_num,
int num_bits,
hipStream_t stream);
template void launch_quantize_kernel_asym(__half* vals,
int total_count,
int group_num,
int num_bits,
hipStream_t stream);
__global__ void sr_quantize_kernel_asym(__half* vals,
int token_size,
int token_num,
int num_bits,
std::pair<uint64_t, uint64_t> seed)
{
#if __CUDA_ARCH__ >= 700 || defined(__HIP_PLATFORM_HCC__)
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float2* vals_cast = reinterpret_cast<float2*>(vals);
__half2 data_low[128];
__half2 data_high[128];
int bid = blockIdx.x;
hiprandStatePhilox4_32_10_t state;
hiprand_init(seed.first, idx, seed.second, &state);
unsigned int tid = threadIdx.x;
int reg_count = 0;
int offset = bid * token_size;
int group_index = bid * token_size + tid;
int total_count = token_size * token_num;
if (group_index < total_count) {
float min = 10000.0;
float max = -10000.0;
while (tid < token_size) {
float2 data = vals_cast[offset + tid];
__half2* data_h = reinterpret_cast<__half2*>(&data);
data_low[reg_count] = data_h[0];
data_high[reg_count] = data_h[1];
float2 data_f[2];
data_f[0] = __half22float2(data_h[0]);
data_f[1] = __half22float2(data_h[1]);
if (((float)data_f[0].x) > max) max = (float)data_f[0].x;
if (((float)data_f[0].y) > max) max = (float)data_f[0].y;
if (((float)data_f[1].x) > max) max = (float)data_f[1].x;
if (((float)data_f[1].y) > max) max = (float)data_f[1].y;
if (((float)data_f[0].x) < min) min = (float)data_f[0].x;
if (((float)data_f[0].y) < min) min = (float)data_f[0].y;
if (((float)data_f[1].x) < min) min = (float)data_f[1].x;
if (((float)data_f[1].y) < min) min = (float)data_f[1].y;
tid += blockDim.x;
reg_count++;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(min, i);
if (min > temp) min = temp;
}
__shared__ float partialMax[WARP_SIZE];
__shared__ float partialMin[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
if (lane == 0) partialMin[gid] = min;
b.sync();
if (lane < warp_num) max = partialMax[lane];
if (lane < warp_num) min = partialMin[lane];
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(min, i);
if (min > temp) min = temp;
}
max = g.shfl(max, 0);
min = g.shfl(min, 0);
float q_scale_val = ((max - min) + 1e-5) / (float)(1 << num_bits);
float q_scale_val_inv = 1 / q_scale_val;
float high_q = (float)((1 << num_bits) - 1);
for (int i = 0; i < reg_count; i++) {
int token_index = i * blockDim.x + threadIdx.x;
if (token_index < token_size) {
float2 data_f[2];
data_f[0] = __half22float2(data_low[i]);
data_f[1] = __half22float2(data_high[i]);
float2 q_data_int[2];
q_data_int[0].x = (float)((unsigned int)((data_f[0].x - min) * q_scale_val_inv));
q_data_int[0].y = (float)((unsigned int)((data_f[0].y - min) * q_scale_val_inv));
q_data_int[1].x = (float)((unsigned int)((data_f[1].x - min) * q_scale_val_inv));
q_data_int[1].y = (float)((unsigned int)((data_f[1].y - min) * q_scale_val_inv));
// Stochastic rounding
float4 rand = hiprand_uniform4(&state);
float q_error[4];
q_error[0] =
abs(data_f[0].x - ((q_data_int[0].x * q_scale_val) + min)) * q_scale_val_inv;
q_error[1] =
abs(data_f[0].y - ((q_data_int[0].y * q_scale_val) + min)) * q_scale_val_inv;
q_error[2] =
abs(data_f[1].x - ((q_data_int[1].x * q_scale_val) + min)) * q_scale_val_inv;
q_error[3] =
abs(data_f[1].y - ((q_data_int[1].y * q_scale_val) + min)) * q_scale_val_inv;
q_data_int[0].x = (rand.x < q_error[0] && q_data_int[0].x < high_q)
? (q_data_int[0].x + 1)
: q_data_int[0].x;
q_data_int[0].y = (rand.y < q_error[1] && q_data_int[0].y < high_q)
? (q_data_int[0].y + 1)
: q_data_int[0].y;
q_data_int[1].x = (rand.w < q_error[2] && q_data_int[1].x < high_q)
? (q_data_int[1].x + 1)
: q_data_int[1].x;
q_data_int[1].y = (rand.z < q_error[3] && q_data_int[1].y < high_q)
? (q_data_int[1].y + 1)
: q_data_int[1].y;
data_f[0].x = q_data_int[0].x * q_scale_val + min;
data_f[0].y = q_data_int[0].y * q_scale_val + min;
data_f[1].x = q_data_int[1].x * q_scale_val + min;
data_f[1].y = q_data_int[1].y * q_scale_val + min;
float2 result;
__half2* result_h = reinterpret_cast<__half2*>(&result);
result_h[0] = __float22half2_rn(data_f[0]);
result_h[1] = __float22half2_rn(data_f[1]);
vals_cast[offset + token_index] = result;
}
}
}
#endif
}
__global__ void sr_quantize_kernel_asym(float* vals,
int token_size,
int token_num,
int num_bits,
std::pair<uint64_t, uint64_t> seed)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
int idx = blockIdx.x * blockDim.x + id;
float4* vals_cast = reinterpret_cast<float4*>(vals);
float4 data[128];
int bid = blockIdx.x;
int tid = threadIdx.x;
hiprandStatePhilox4_32_10_t state;
hiprand_init(seed.first, idx, seed.second, &state);
int group_index = bid * token_size + threadIdx.x;
int reg_count = 0;
int total_count = token_size * token_num;
if (group_index < total_count) {
float min = 10000.0;
float max = -10000.0;
while (tid < token_size) {
float4 data_reg = vals_cast[group_index];
data[reg_count] = data_reg;
if (data_reg.x > max) max = data_reg.x;
if (data_reg.y > max) max = data_reg.y;
if (data_reg.w > max) max = data_reg.w;
if (data_reg.z > max) max = data_reg.z;
if (data_reg.x < min) min = data_reg.x;
if (data_reg.y < min) min = data_reg.y;
if (data_reg.w < min) min = data_reg.w;
if (data_reg.z < min) min = data_reg.z;
group_index += blockDim.x;
tid += blockDim.x;
reg_count++;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(min, i);
if (min > temp) min = temp;
}
__shared__ float partialMax[WARP_SIZE];
__shared__ float partialMin[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
if (lane == 0) partialMin[gid] = min;
b.sync();
if (lane < warp_num) max = partialMax[lane];
if (lane < warp_num) min = partialMin[lane];
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(min, i);
if (min > temp) min = temp;
}
max = g.shfl(max, 0);
min = g.shfl(min, 0);
float q_scale_val = ((max - min) + 1e-5) / (float)(1 << num_bits);
float high_q = (float)((1 << num_bits) - 1);
int offset = (bid)*token_size;
for (int i = 0; i < reg_count; i++) {
group_index = i * blockDim.x + threadIdx.x;
if (group_index < token_size) {
float4 q_data = data[i];
float4 q_data_int;
q_data_int.x = (float)((int)((q_data.x - min) / q_scale_val));
q_data_int.y = (float)((int)((q_data.y - min) / q_scale_val));
q_data_int.w = (float)((int)((q_data.w - min) / q_scale_val));
q_data_int.z = (float)((int)((q_data.z - min) / q_scale_val));
// Stochastic rounding
float4 rand = hiprand_uniform4(&state);
float q_error[4];
q_error[0] = abs(q_data.x - ((q_data_int.x * q_scale_val) + min)) / q_scale_val;
q_error[1] = abs(q_data.y - ((q_data_int.y * q_scale_val) + min)) / q_scale_val;
q_error[2] = abs(q_data.w - ((q_data_int.w * q_scale_val) + min)) / q_scale_val;
q_error[3] = abs(q_data.z - ((q_data_int.z * q_scale_val) + min)) / q_scale_val;
q_data_int.x = (rand.x < q_error[0] && q_data_int.x < high_q) ? (q_data_int.x + 1)
: q_data_int.x;
q_data_int.y = (rand.y < q_error[1] && q_data_int.y < high_q) ? (q_data_int.y + 1)
: q_data_int.y;
q_data_int.w = (rand.w < q_error[2] && q_data_int.w < high_q) ? (q_data_int.w + 1)
: q_data_int.w;
q_data_int.z = (rand.z < q_error[3] && q_data_int.z < high_q) ? (q_data_int.z + 1)
: q_data_int.z;
q_data_int.x = q_data_int.x * q_scale_val + min;
q_data_int.y = q_data_int.y * q_scale_val + min;
q_data_int.w = q_data_int.w * q_scale_val + min;
q_data_int.z = q_data_int.z * q_scale_val + min;
vals_cast[group_index + offset] = q_data_int;
}
}
}
}
template <typename T>
void launch_sr_quantize_kernel_asym(T* vals,
int total_count,
int group_num,
int num_bits,
hipStream_t stream)
{
dim3 block_dim(1024);
dim3 grid_dim(group_num);
uint64_t inc = total_count / grid_dim.x / block_dim.x;
std::pair<uint64_t, uint64_t> seed = Context::Instance().IncrementOffset(inc);
hipLaunchKernelGGL(( sr_quantize_kernel), dim3(grid_dim), dim3(block_dim), 0, stream,
vals, (total_count / group_num) / 4, group_num, num_bits, seed);
}
template void launch_sr_quantize_kernel_asym(float* vals,
int total_count,
int group_num,
int num_bits,
hipStream_t stream);
template void launch_sr_quantize_kernel_asym(__half* vals,
int total_count,
int group_num,
int num_bits,
hipStream_t stream);
| 74bfbf1a49b5fde0e0460ab584af1c43d3ad254e.cu | #include <math.h>
#include "custom_cuda_layers.h"
#include "memory_access_utils.h"
namespace cg = cooperative_groups;
__global__ void quantize_kernel(__half* vals, int group_size, int num_bits)
{
#if __CUDA_ARCH__ >= 700 || defined(__HIP_PLATFORM_HCC__)
cg::thread_block b = cg::this_thread_block(); // tb
cg::thread_block_tile<32> g =
cg::tiled_partition<32>(b); // warp, 32 not optimal for AMD which should be 64.
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
constexpr int granularity = 16;
constexpr int vals_per_access = granularity / sizeof(__half);
__half data[vals_per_access];
int group_id = blockIdx.x;
int thread_index = id * vals_per_access;
int reg_count = 0;
int offset = group_id * group_size;
float max = -10000.0;
for (int thread_index = id * vals_per_access; thread_index < group_size;
thread_index += blockDim.x * vals_per_access) {
mem_access::load_global<granularity>(data, vals + offset + thread_index);
#pragma unroll
for (int i = 0; i < vals_per_access; i++) {
if (abs((float)data[i]) > max) max = abs((float)data[i]);
}
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
__shared__ float partialMax[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
b.sync();
if (lane < warp_num) max = partialMax[lane];
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
max = g.shfl(max, 0);
float q_scale = (float)(1 << num_bits) / (2 * max + 1e-5);
float q_scale_inv = 1 / q_scale;
int q_range_max = (1 << (num_bits - 1)) - 1;
int q_range_min = -(1 << (num_bits - 1));
for (int thread_index = id * vals_per_access; thread_index < group_size;
thread_index += blockDim.x * vals_per_access) {
mem_access::load_global<granularity>(data, vals + offset + thread_index);
#pragma unroll
for (int j = 0; j < vals_per_access; j++) {
float q_data;
q_data = __half2float(data[j]);
q_data = __float2int_rn(q_data * q_scale);
q_data = q_data > (q_range_max) ? (q_range_max)
: (q_data < (q_range_min) ? (q_range_min) : q_data);
data[j] = __float2half_rn(q_data * q_scale_inv);
}
mem_access::store_global<granularity>(vals + offset + thread_index, data);
}
#endif
}
__global__ void quantize_kernel(float* vals, int group_size, int num_bits)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
constexpr int granularity = 16;
constexpr int vals_per_access = granularity / sizeof(float);
float data[vals_per_access];
int bid = blockIdx.x;
int thread_index = id * vals_per_access;
int reg_count = 0;
int offset = bid * group_size;
float max = -10000.0;
for (int thread_index = id * vals_per_access; thread_index < group_size;
thread_index += blockDim.x * vals_per_access) {
mem_access::load_global<granularity>(data, vals + offset + thread_index);
#pragma unroll
for (int i = 0; i < vals_per_access; i++) {
if (abs(data[i]) > max) max = abs(data[i]);
}
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
__shared__ float partialMax[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
b.sync();
if (lane < warp_num) max = partialMax[lane];
b.sync();
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
max = g.shfl(max, 0);
float q_scale = (1 << num_bits) / (2 * max + 1e-5);
float q_scale_inv = 1 / q_scale;
int q_range_max = (1 << (num_bits - 1)) - 1;
int q_range_min = -(1 << (num_bits - 1));
for (int thread_index = id * vals_per_access; thread_index < group_size;
thread_index += blockDim.x * vals_per_access) {
mem_access::load_global<granularity>(data, vals + offset + thread_index);
#pragma unroll
for (int j = 0; j < vals_per_access; j++) {
float q_data;
q_data = __float2int_rn(data[j] * q_scale);
q_data = q_data > (q_range_max) ? (q_range_max)
: (q_data < (q_range_min) ? (q_range_min) : q_data);
data[j] = roundf(q_data * q_scale_inv);
}
mem_access::store_global<granularity>(vals + offset + thread_index, data);
}
}
template <typename T>
void launch_quantize_kernel(T* vals,
int total_count,
int group_num,
int num_bits,
cudaStream_t stream)
{
dim3 grid_dim(group_num);
dim3 block_dim(1024);
quantize_kernel<<<grid_dim, block_dim, 0, stream>>>(vals, total_count / group_num, num_bits);
}
template void launch_quantize_kernel(float* vals,
int total_count,
int group_num,
int num_bits,
cudaStream_t stream);
template void launch_quantize_kernel(__half* vals,
int total_count,
int group_num,
int num_bits,
cudaStream_t stream);
__global__ void sr_quantize_kernel(__half* vals,
int token_size,
int token_num,
int num_bits,
std::pair<uint64_t, uint64_t> seed)
{
#if __CUDA_ARCH__ >= 700 || defined(__HIP_PLATFORM_HCC__)
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float2* vals_cast = reinterpret_cast<float2*>(vals);
__half2 data_low[128];
__half2 data_high[128];
int bid = blockIdx.x;
curandStatePhilox4_32_10_t state;
curand_init(seed.first, idx, seed.second, &state);
unsigned int tid = threadIdx.x;
int reg_count = 0;
int offset = bid * token_size;
int group_index = bid * token_size + tid;
int total_count = token_size * token_num;
if (group_index < total_count) {
// float min = 10000.0;
float max = -10000.0;
while (tid < token_size) {
float2 data = vals_cast[offset + tid];
__half2* data_h = reinterpret_cast<__half2*>(&data);
data_low[reg_count] = data_h[0];
data_high[reg_count] = data_h[1];
float2 data_f[2];
data_f[0] = __half22float2(data_h[0]);
data_f[1] = __half22float2(data_h[1]);
if (abs((float)data_f[0].x) > max) max = abs((float)data_f[0].x);
if (abs((float)data_f[0].y) > max) max = abs((float)data_f[0].y);
if (abs((float)data_f[1].x) > max) max = abs((float)data_f[1].x);
if (abs((float)data_f[1].y) > max) max = abs((float)data_f[1].y);
tid += blockDim.x;
reg_count++;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
__shared__ float partialMax[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
b.sync();
if (lane < warp_num) max = partialMax[lane];
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
max = g.shfl(max, 0);
float q_scale_val = (float)(1 << num_bits) / (max * 2 + 1e-5);
float high_q = (float)((1 << (num_bits - 1)) - 1);
float low_q = (float)(-((1 << (num_bits - 1))));
for (int i = 0; i < reg_count; i++) {
int token_index = i * blockDim.x + threadIdx.x;
if (token_index < token_size) {
float2 data_f[2];
data_f[0] = __half22float2(data_low[i]);
data_f[1] = __half22float2(data_high[i]);
float2 q_data_int[2];
q_data_int[0].x = (float)((int)(data_f[0].x * q_scale_val));
q_data_int[0].y = (float)((int)(data_f[0].y * q_scale_val));
q_data_int[1].x = (float)((int)(data_f[1].x * q_scale_val));
q_data_int[1].y = (float)((int)(data_f[1].y * q_scale_val));
// Stochastic rounding
float4 rand = curand_uniform4(&state);
float q_error[4];
q_error[0] = abs(data_f[0].x - (q_data_int[0].x / q_scale_val)) * q_scale_val;
q_error[1] = abs(data_f[0].y - (q_data_int[0].y / q_scale_val)) * q_scale_val;
q_error[2] = abs(data_f[1].x - (q_data_int[1].x / q_scale_val)) * q_scale_val;
q_error[3] = abs(data_f[1].y - (q_data_int[1].y / q_scale_val)) * q_scale_val;
q_data_int[0].x =
(rand.x < q_error[0] && q_data_int[0].x > low_q && q_data_int[0].x < high_q)
? (q_data_int[0].x + (data_f[0].x > 0 ? 1 : -1))
: q_data_int[0].x;
q_data_int[0].y =
(rand.y < q_error[1] && q_data_int[0].y > low_q && q_data_int[0].y < high_q)
? (q_data_int[0].y + (data_f[0].y > 0 ? 1 : -1))
: q_data_int[0].y;
q_data_int[1].x =
(rand.w < q_error[2] && q_data_int[1].x > low_q && q_data_int[1].x < high_q)
? (q_data_int[1].x + (data_f[1].x > 0 ? 1 : -1))
: q_data_int[1].x;
q_data_int[1].y =
(rand.z < q_error[3] && q_data_int[1].y > low_q && q_data_int[1].y < high_q)
? (q_data_int[1].y + (data_f[1].y > 0 ? 1 : -1))
: q_data_int[1].y;
data_f[0].x = q_data_int[0].x / q_scale_val;
data_f[0].y = q_data_int[0].y / q_scale_val;
data_f[1].x = q_data_int[1].x / q_scale_val;
data_f[1].y = q_data_int[1].y / q_scale_val;
float2 result;
__half2* result_h = reinterpret_cast<__half2*>(&result);
result_h[0] = __float22half2_rn(data_f[0]);
result_h[1] = __float22half2_rn(data_f[1]);
vals_cast[offset + token_index] = result;
}
}
}
#endif
}
__global__ void sr_quantize_kernel(float* vals,
int token_size,
int token_num,
int num_bits,
std::pair<uint64_t, uint64_t> seed)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
int idx = blockIdx.x * blockDim.x + id;
float4* vals_cast = reinterpret_cast<float4*>(vals);
float4 data[128];
int bid = blockIdx.x;
int tid = threadIdx.x;
curandStatePhilox4_32_10_t state;
curand_init(seed.first, idx, seed.second, &state);
int group_index = bid * token_size + threadIdx.x;
int reg_count = 0;
int total_count = token_size * token_num;
if (group_index < total_count) {
// float min = 10000.0;
float max = -10000.0;
while (tid < token_size) {
data[reg_count] = vals_cast[group_index];
if (abs(data[reg_count].x) > max) max = abs(data[reg_count].x);
if (abs(data[reg_count].y) > max) max = abs(data[reg_count].y);
if (abs(data[reg_count].z) > max) max = abs(data[reg_count].z);
if (abs(data[reg_count].w) > max) max = abs(data[reg_count].w);
group_index += blockDim.x;
tid += blockDim.x;
reg_count++;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
__shared__ float partialMax[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
b.sync();
if (lane < warp_num) max = partialMax[lane];
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
max = g.shfl(max, 0);
float q_scale_val = (float)(1 << num_bits) / (max * 2 + 1e-5);
float high_q = (float)((1 << (num_bits - 1)) - 1);
float low_q = (float)(-((1 << (num_bits - 1))));
int offset = (bid)*token_size;
for (int i = 0; i < reg_count; i++) {
group_index = i * blockDim.x + threadIdx.x;
if (group_index < token_size) {
float4 q_data = data[i];
float4 q_data_int;
q_data_int.x = (float)((int)(q_data.x * q_scale_val));
q_data_int.y = (float)((int)(q_data.y * q_scale_val));
q_data_int.w = (float)((int)(q_data.w * q_scale_val));
q_data_int.z = (float)((int)(q_data.z * q_scale_val));
// Stochastic rounding
float4 rand = curand_uniform4(&state);
float q_error[4];
q_error[0] = abs(q_data.x - (q_data_int.x / q_scale_val)) * q_scale_val;
q_error[1] = abs(q_data.y - (q_data_int.y / q_scale_val)) * q_scale_val;
q_error[2] = abs(q_data.w - (q_data_int.w / q_scale_val)) * q_scale_val;
q_error[3] = abs(q_data.z - (q_data_int.z / q_scale_val)) * q_scale_val;
q_data_int.x =
(rand.x < q_error[0] && q_data_int.x > low_q && q_data_int.x < high_q)
? (q_data_int.x + (q_data.x > 0 ? 1 : -1))
: q_data_int.x;
q_data_int.y =
(rand.y < q_error[1] && q_data_int.y > low_q && q_data_int.y < high_q)
? (q_data_int.y + (q_data.y > 0 ? 1 : -1))
: q_data_int.y;
q_data_int.w =
(rand.w < q_error[2] && q_data_int.w > low_q && q_data_int.w < high_q)
? (q_data_int.w + (q_data.w > 0 ? 1 : -1))
: q_data_int.w;
q_data_int.z =
(rand.z < q_error[3] && q_data_int.z > low_q && q_data_int.z < high_q)
? (q_data_int.z + (q_data.z > 0 ? 1 : -1))
: q_data_int.z;
q_data_int.x /= q_scale_val;
q_data_int.y /= q_scale_val;
q_data_int.w /= q_scale_val;
q_data_int.z /= q_scale_val;
vals_cast[group_index + offset] = q_data_int;
}
}
}
}
template <typename T>
void launch_sr_quantize_kernel(T* vals,
int total_count,
int group_num,
int num_bits,
cudaStream_t stream)
{
dim3 block_dim(1024);
dim3 grid_dim(group_num);
uint64_t inc = total_count / grid_dim.x / block_dim.x;
std::pair<uint64_t, uint64_t> seed = Context::Instance().IncrementOffset(inc);
sr_quantize_kernel<<<grid_dim, block_dim, 0, stream>>>(
vals, (total_count / group_num) / 4, group_num, num_bits, seed);
}
template void launch_sr_quantize_kernel(float* vals,
int total_count,
int group_num,
int num_bits,
cudaStream_t stream);
template void launch_sr_quantize_kernel(__half* vals,
int total_count,
int group_num,
int num_bits,
cudaStream_t stream);
__global__ void quantize_kernel_asym(__half* vals, int group_size, int num_bits)
{
#if __CUDA_ARCH__ >= 700 || defined(__HIP_PLATFORM_HCC__)
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
float2* vals_cast = reinterpret_cast<float2*>(vals);
float2 data[MAX_REG];
int group_id = blockIdx.x;
{
int group_index = id;
int reg_count = 0;
int offset = group_id * group_size;
float max = -10000.0;
float min = 10000.0;
while (group_index < group_size && reg_count < MAX_REG) {
data[reg_count] = vals_cast[offset + group_index];
__half* data_h = reinterpret_cast<__half*>(&data[reg_count]);
if (((float)data_h[0]) > max) max = (float)data_h[0];
if (((float)data_h[1]) > max) max = (float)data_h[1];
if (((float)data_h[2]) > max) max = (float)data_h[2];
if (((float)data_h[3]) > max) max = (float)data_h[3];
if (((float)data_h[0]) < min) min = (float)data_h[0];
if (((float)data_h[1]) < min) min = (float)data_h[1];
if (((float)data_h[2]) < min) min = (float)data_h[2];
if (((float)data_h[3]) < min) min = (float)data_h[3];
group_index += blockDim.x;
reg_count++;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(min, i);
if (min > temp) min = temp;
}
__shared__ float partialMax[WARP_SIZE];
__shared__ float partialMin[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
if (lane == 0) partialMin[gid] = min;
b.sync();
if (lane < warp_num) max = partialMax[lane];
if (lane < warp_num) min = partialMin[lane];
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(min, i);
if (min > temp) min = temp;
}
max = g.shfl(max, 0);
min = g.shfl(min, 0);
float q_scale = ((max - min) + 1e-5) / (float)(1 << num_bits);
float q_scale_inv = 1 / q_scale;
for (int i = 0; i < reg_count; i++) {
group_index = i * blockDim.x + id;
if (group_index < group_size) {
__half2* data_h = reinterpret_cast<__half2*>(&data[i]);
float2 q_data[2];
q_data[0] = __half22float2(data_h[0]);
q_data[1] = __half22float2(data_h[1]);
float2 q_data_int[2];
q_data_int[0].x = roundf((q_data[0].x - min) * q_scale_inv);
q_data_int[0].y = roundf((q_data[0].y - min) * q_scale_inv);
q_data_int[1].x = roundf((q_data[1].x - min) * q_scale_inv);
q_data_int[1].y = roundf((q_data[1].y - min) * q_scale_inv);
q_data_int[0].x = q_data_int[0].x * q_scale + min;
q_data_int[0].y = q_data_int[0].y * q_scale + min;
q_data_int[1].x = q_data_int[1].x * q_scale + min;
q_data_int[1].y = q_data_int[1].y * q_scale + min;
data_h[0] = __float22half2_rn(q_data_int[0]);
data_h[1] = __float22half2_rn(q_data_int[1]);
vals_cast[offset + group_index] = data[i];
}
}
}
#endif
}
__global__ void quantize_kernel_asym(float* vals, int group_size, int num_bits)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
float4* vals_cast = reinterpret_cast<float4*>(vals);
float4 data[MAX_REG];
int bid = blockIdx.x;
int group_index = bid * group_size + id;
int reg_count = 0;
float max = -10000.0;
float min = 10000.0;
while (id < group_size && reg_count < MAX_REG) {
float4 data_reg = vals_cast[group_index];
data[reg_count] = data_reg;
if (data_reg.x > max) max = data_reg.x;
if (data_reg.y > max) max = data_reg.y;
if (data_reg.w > max) max = data_reg.w;
if (data_reg.z > max) max = data_reg.z;
if (data_reg.x < min) min = data_reg.x;
if (data_reg.y < min) min = data_reg.y;
if (data_reg.w < min) min = data_reg.w;
if (data_reg.z < min) min = data_reg.z;
group_index += blockDim.x;
id += blockDim.x;
reg_count++;
}
id = threadIdx.x;
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(min, i);
if (min > temp) min = temp;
}
__shared__ float partialMax[WARP_SIZE];
__shared__ float partialMin[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
if (lane == 0) partialMin[gid] = min;
b.sync();
if (lane < warp_num) max = partialMax[lane];
if (lane < warp_num) min = partialMin[lane];
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(min, i);
if (min > temp) min = temp;
}
max = g.shfl(max, 0);
min = g.shfl(min, 0);
float q_scale = ((max - min) + 1e-5) / (float)(1 << num_bits);
float q_scale_inv = 1 / q_scale;
for (int i = 0; i < reg_count; i++) {
group_index = i * blockDim.x + id;
if (group_index < group_size) {
float4 q_data;
q_data = data[i];
float4 q_data_int;
q_data_int.x = roundf((q_data.x - min) * q_scale_inv);
q_data_int.y = roundf((q_data.y - min) * q_scale_inv);
q_data_int.w = roundf((q_data.w - min) * q_scale_inv);
q_data_int.z = roundf((q_data.z - min) * q_scale_inv);
q_data.x = q_data_int.x * q_scale + min;
q_data.y = q_data_int.y * q_scale + min;
q_data.w = q_data_int.w * q_scale + min;
q_data.z = q_data_int.z * q_scale + min;
vals_cast[group_index + bid * group_size] = q_data;
}
}
}
template <typename T>
void launch_quantize_kernel_asym(T* vals,
int total_count,
int group_num,
int num_bits,
cudaStream_t stream)
{
dim3 grid_dim(group_num);
dim3 block_dim(1024);
quantize_kernel_asym<<<grid_dim, block_dim, 0, stream>>>(
vals, (total_count / group_num) / 4, num_bits);
}
template void launch_quantize_kernel_asym(float* vals,
int total_count,
int group_num,
int num_bits,
cudaStream_t stream);
template void launch_quantize_kernel_asym(__half* vals,
int total_count,
int group_num,
int num_bits,
cudaStream_t stream);
__global__ void sr_quantize_kernel_asym(__half* vals,
int token_size,
int token_num,
int num_bits,
std::pair<uint64_t, uint64_t> seed)
{
#if __CUDA_ARCH__ >= 700 || defined(__HIP_PLATFORM_HCC__)
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float2* vals_cast = reinterpret_cast<float2*>(vals);
__half2 data_low[128];
__half2 data_high[128];
int bid = blockIdx.x;
curandStatePhilox4_32_10_t state;
curand_init(seed.first, idx, seed.second, &state);
unsigned int tid = threadIdx.x;
int reg_count = 0;
int offset = bid * token_size;
int group_index = bid * token_size + tid;
int total_count = token_size * token_num;
if (group_index < total_count) {
float min = 10000.0;
float max = -10000.0;
while (tid < token_size) {
float2 data = vals_cast[offset + tid];
__half2* data_h = reinterpret_cast<__half2*>(&data);
data_low[reg_count] = data_h[0];
data_high[reg_count] = data_h[1];
float2 data_f[2];
data_f[0] = __half22float2(data_h[0]);
data_f[1] = __half22float2(data_h[1]);
if (((float)data_f[0].x) > max) max = (float)data_f[0].x;
if (((float)data_f[0].y) > max) max = (float)data_f[0].y;
if (((float)data_f[1].x) > max) max = (float)data_f[1].x;
if (((float)data_f[1].y) > max) max = (float)data_f[1].y;
if (((float)data_f[0].x) < min) min = (float)data_f[0].x;
if (((float)data_f[0].y) < min) min = (float)data_f[0].y;
if (((float)data_f[1].x) < min) min = (float)data_f[1].x;
if (((float)data_f[1].y) < min) min = (float)data_f[1].y;
tid += blockDim.x;
reg_count++;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(min, i);
if (min > temp) min = temp;
}
__shared__ float partialMax[WARP_SIZE];
__shared__ float partialMin[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
if (lane == 0) partialMin[gid] = min;
b.sync();
if (lane < warp_num) max = partialMax[lane];
if (lane < warp_num) min = partialMin[lane];
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(min, i);
if (min > temp) min = temp;
}
max = g.shfl(max, 0);
min = g.shfl(min, 0);
float q_scale_val = ((max - min) + 1e-5) / (float)(1 << num_bits);
float q_scale_val_inv = 1 / q_scale_val;
float high_q = (float)((1 << num_bits) - 1);
for (int i = 0; i < reg_count; i++) {
int token_index = i * blockDim.x + threadIdx.x;
if (token_index < token_size) {
float2 data_f[2];
data_f[0] = __half22float2(data_low[i]);
data_f[1] = __half22float2(data_high[i]);
float2 q_data_int[2];
q_data_int[0].x = (float)((unsigned int)((data_f[0].x - min) * q_scale_val_inv));
q_data_int[0].y = (float)((unsigned int)((data_f[0].y - min) * q_scale_val_inv));
q_data_int[1].x = (float)((unsigned int)((data_f[1].x - min) * q_scale_val_inv));
q_data_int[1].y = (float)((unsigned int)((data_f[1].y - min) * q_scale_val_inv));
// Stochastic rounding
float4 rand = curand_uniform4(&state);
float q_error[4];
q_error[0] =
abs(data_f[0].x - ((q_data_int[0].x * q_scale_val) + min)) * q_scale_val_inv;
q_error[1] =
abs(data_f[0].y - ((q_data_int[0].y * q_scale_val) + min)) * q_scale_val_inv;
q_error[2] =
abs(data_f[1].x - ((q_data_int[1].x * q_scale_val) + min)) * q_scale_val_inv;
q_error[3] =
abs(data_f[1].y - ((q_data_int[1].y * q_scale_val) + min)) * q_scale_val_inv;
q_data_int[0].x = (rand.x < q_error[0] && q_data_int[0].x < high_q)
? (q_data_int[0].x + 1)
: q_data_int[0].x;
q_data_int[0].y = (rand.y < q_error[1] && q_data_int[0].y < high_q)
? (q_data_int[0].y + 1)
: q_data_int[0].y;
q_data_int[1].x = (rand.w < q_error[2] && q_data_int[1].x < high_q)
? (q_data_int[1].x + 1)
: q_data_int[1].x;
q_data_int[1].y = (rand.z < q_error[3] && q_data_int[1].y < high_q)
? (q_data_int[1].y + 1)
: q_data_int[1].y;
data_f[0].x = q_data_int[0].x * q_scale_val + min;
data_f[0].y = q_data_int[0].y * q_scale_val + min;
data_f[1].x = q_data_int[1].x * q_scale_val + min;
data_f[1].y = q_data_int[1].y * q_scale_val + min;
float2 result;
__half2* result_h = reinterpret_cast<__half2*>(&result);
result_h[0] = __float22half2_rn(data_f[0]);
result_h[1] = __float22half2_rn(data_f[1]);
vals_cast[offset + token_index] = result;
}
}
}
#endif
}
__global__ void sr_quantize_kernel_asym(float* vals,
int token_size,
int token_num,
int num_bits,
std::pair<uint64_t, uint64_t> seed)
{
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int gid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
int warp_num = blockDim.x >> 5;
int id = threadIdx.x;
int idx = blockIdx.x * blockDim.x + id;
float4* vals_cast = reinterpret_cast<float4*>(vals);
float4 data[128];
int bid = blockIdx.x;
int tid = threadIdx.x;
curandStatePhilox4_32_10_t state;
curand_init(seed.first, idx, seed.second, &state);
int group_index = bid * token_size + threadIdx.x;
int reg_count = 0;
int total_count = token_size * token_num;
if (group_index < total_count) {
float min = 10000.0;
float max = -10000.0;
while (tid < token_size) {
float4 data_reg = vals_cast[group_index];
data[reg_count] = data_reg;
if (data_reg.x > max) max = data_reg.x;
if (data_reg.y > max) max = data_reg.y;
if (data_reg.w > max) max = data_reg.w;
if (data_reg.z > max) max = data_reg.z;
if (data_reg.x < min) min = data_reg.x;
if (data_reg.y < min) min = data_reg.y;
if (data_reg.w < min) min = data_reg.w;
if (data_reg.z < min) min = data_reg.z;
group_index += blockDim.x;
tid += blockDim.x;
reg_count++;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < WARP_SIZE; i <<= 1) {
auto temp = g.shfl_xor(min, i);
if (min > temp) min = temp;
}
__shared__ float partialMax[WARP_SIZE];
__shared__ float partialMin[WARP_SIZE];
if (lane == 0) partialMax[gid] = max;
if (lane == 0) partialMin[gid] = min;
b.sync();
if (lane < warp_num) max = partialMax[lane];
if (lane < warp_num) min = partialMin[lane];
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(max, i);
if (max < temp) max = temp;
}
#pragma unroll
for (int i = 1; i < warp_num; i <<= 1) {
auto temp = g.shfl_down(min, i);
if (min > temp) min = temp;
}
max = g.shfl(max, 0);
min = g.shfl(min, 0);
float q_scale_val = ((max - min) + 1e-5) / (float)(1 << num_bits);
float high_q = (float)((1 << num_bits) - 1);
int offset = (bid)*token_size;
for (int i = 0; i < reg_count; i++) {
group_index = i * blockDim.x + threadIdx.x;
if (group_index < token_size) {
float4 q_data = data[i];
float4 q_data_int;
q_data_int.x = (float)((int)((q_data.x - min) / q_scale_val));
q_data_int.y = (float)((int)((q_data.y - min) / q_scale_val));
q_data_int.w = (float)((int)((q_data.w - min) / q_scale_val));
q_data_int.z = (float)((int)((q_data.z - min) / q_scale_val));
// Stochastic rounding
float4 rand = curand_uniform4(&state);
float q_error[4];
q_error[0] = abs(q_data.x - ((q_data_int.x * q_scale_val) + min)) / q_scale_val;
q_error[1] = abs(q_data.y - ((q_data_int.y * q_scale_val) + min)) / q_scale_val;
q_error[2] = abs(q_data.w - ((q_data_int.w * q_scale_val) + min)) / q_scale_val;
q_error[3] = abs(q_data.z - ((q_data_int.z * q_scale_val) + min)) / q_scale_val;
q_data_int.x = (rand.x < q_error[0] && q_data_int.x < high_q) ? (q_data_int.x + 1)
: q_data_int.x;
q_data_int.y = (rand.y < q_error[1] && q_data_int.y < high_q) ? (q_data_int.y + 1)
: q_data_int.y;
q_data_int.w = (rand.w < q_error[2] && q_data_int.w < high_q) ? (q_data_int.w + 1)
: q_data_int.w;
q_data_int.z = (rand.z < q_error[3] && q_data_int.z < high_q) ? (q_data_int.z + 1)
: q_data_int.z;
q_data_int.x = q_data_int.x * q_scale_val + min;
q_data_int.y = q_data_int.y * q_scale_val + min;
q_data_int.w = q_data_int.w * q_scale_val + min;
q_data_int.z = q_data_int.z * q_scale_val + min;
vals_cast[group_index + offset] = q_data_int;
}
}
}
}
template <typename T>
void launch_sr_quantize_kernel_asym(T* vals,
int total_count,
int group_num,
int num_bits,
cudaStream_t stream)
{
dim3 block_dim(1024);
dim3 grid_dim(group_num);
uint64_t inc = total_count / grid_dim.x / block_dim.x;
std::pair<uint64_t, uint64_t> seed = Context::Instance().IncrementOffset(inc);
sr_quantize_kernel<<<grid_dim, block_dim, 0, stream>>>(
vals, (total_count / group_num) / 4, group_num, num_bits, seed);
}
template void launch_sr_quantize_kernel_asym(float* vals,
int total_count,
int group_num,
int num_bits,
cudaStream_t stream);
template void launch_sr_quantize_kernel_asym(__half* vals,
int total_count,
int group_num,
int num_bits,
cudaStream_t stream);
|
7ca508f8f7813d538e900a3b862b5bcba5a331db.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THHNumerics.cuh>
#include "THH/THH.h"
#include "batch_norm.h"
#include <hip/hip_runtime.h>
#include "compat.h"
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, hipGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
static size_t round_up_to_multiple(size_t x, int multiple) {
return ((x + multiple - 1) / multiple) * multiple;
}
// TODO: Stop manually allocating CUDA memory; allocate an ATen byte
// tensor instead.
struct Workspace {
Workspace(size_t size) : size(size), data(NULL) {
data = THCudaMalloc(at::globalContext().lazyInitCUDA(), size);
}
Workspace(const Workspace&) = delete;
Workspace(Workspace&&) = default;
Workspace& operator=(Workspace&&) = default;
~Workspace() {
if (data) {
THCudaFree(at::globalContext().lazyInitCUDA(), data);
}
}
size_t size;
void* data;
};
// Return {y}
at::Tensor nhwc_bn_fwd_train(
const at::Tensor& x,
const at::Tensor& scale,
const at::Tensor& bias,
const at::Tensor& running_mean,
const at::Tensor& running_inv_var,
const at::Tensor& minibatch_mean,
const at::Tensor& minibatch_inv_var,
const at::Tensor& ret_cta,
const float momentum,
const float epsilon,
const bool fuse_relu,
void * my_data,
void * pair_data,
void * pair_data2,
void * pair_data3,
const int bn_group,
const at::Tensor& magic_tensor,
const int occupancy,
const int grid_dim_x,
const bool coop) {
const int N = x.size(0);
const int H = x.size(1);
const int W = x.size(2);
const int C = x.size(3);
// generating new magic number and use that for sync
int* magic = magic_tensor.DATA_PTR<int>();
*magic = (*magic + 1) & 0xff;
// Allocate output tensor
at::Tensor y = at::empty({N, H, W, C}, x.options());
// Create wrapper
NhwcBatchNorm *bn = new NhwcBatchNorm();
bn->setInputDescriptor(CUDNN_TENSOR_NHWC, CUDNN_DATA_HALF, N, C, H, W, bn_group);
bn->setOutputDescriptor(CUDNN_TENSOR_NHWC, CUDNN_DATA_HALF, N, C, H, W);
bn->setConstants(momentum, epsilon);
// set pointers within the wrapper
bn->setInputOutputPointers(x.DATA_PTR<at::Half>(),
nullptr,
y.DATA_PTR<at::Half>(),
nullptr);
bn->setWeightPointers({scale.DATA_PTR<float>(), bias.DATA_PTR<float>()}, {nullptr, nullptr});
bn->setParameterPointers({running_mean.DATA_PTR<float>(), running_inv_var.DATA_PTR<float>()});
// deal with workspace(s)
auto workspace_bytes = bn->numWorkspaceBytes();
// We'll create explicit tensors for the first 2 workspace ptrs, then allocate & offset
// an allocated workspace for the others
size_t total_workspace_bytes = 0;
std::vector<size_t> workspace_offsets;
for (auto index = 3; index < workspace_bytes.size(); ++index) {
total_workspace_bytes = round_up_to_multiple(total_workspace_bytes, 512);
workspace_offsets.push_back(total_workspace_bytes);
auto alloc_bytes = workspace_bytes[index];
total_workspace_bytes += alloc_bytes;
}
// Allocate the workspace
Workspace ws(total_workspace_bytes);
std::vector<void *> workspace;
workspace.push_back(minibatch_mean.DATA_PTR<float>());
workspace.push_back(minibatch_inv_var.DATA_PTR<float>());
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
const int retired_cta_bytes = workspace_bytes[2];
void* retired_ctas = ret_cta.DATA_PTR<uint8_t>();
assert(ret_cta.size(0)>=retired_cta_bytes);
workspace.push_back(retired_ctas);
for (auto index = 3; index < workspace_bytes.size(); ++index) {
void *ptr = reinterpret_cast<uint8_t*>(ws.data) + workspace_offsets[index-3];
workspace.push_back(ptr);
}
bn->setWorkspacePointers(workspace, workspace_bytes);
// Don't fuse in ReLU for now at least
bn->fwd(stream, fuse_relu, my_data, pair_data, pair_data2, pair_data3, bn_group, *magic, occupancy, grid_dim_x, coop);
return y;
}
at::Tensor nhwc_bn_fwd_eval(
const at::Tensor& x,
const at::Tensor& scale,
const at::Tensor& bias,
const at::Tensor& running_mean,
const at::Tensor& running_inv_var,
const at::Tensor& ret_cta,
const int bn_group,
const float momentum,
const float epsilon,
const bool fuse_relu) {
const int N = x.size(0);
const int H = x.size(1);
const int W = x.size(2);
const int C = x.size(3);
// Allocate output tensor
at::Tensor y = at::empty({N, H, W, C}, x.options());
// Create wrapper
NhwcBatchNorm *bn = new NhwcBatchNorm();
bn->setInputDescriptor(CUDNN_TENSOR_NHWC, CUDNN_DATA_HALF, N, C, H, W, bn_group);
bn->setOutputDescriptor(CUDNN_TENSOR_NHWC, CUDNN_DATA_HALF, N, C, H, W);
bn->setConstants(momentum, epsilon);
// set pointers within the wrapper
bn->setInputOutputPointers(x.DATA_PTR<at::Half>(),
nullptr,
y.DATA_PTR<at::Half>(),
nullptr);
bn->setWeightPointers({scale.DATA_PTR<float>(), bias.DATA_PTR<float>()}, {nullptr, nullptr});
bn->setParameterPointers({running_mean.DATA_PTR<float>(), running_inv_var.DATA_PTR<float>()});
// deal with workspace(s)
auto workspace_bytes = bn->numWorkspaceBytes();
// We'll create explicit tensors for the first 2 workspace ptrs, then allocate & offset
// an allocated workspace for the others
size_t total_workspace_bytes = 0;
std::vector<size_t> workspace_offsets;
for (auto index = 3; index < workspace_bytes.size(); ++index) {
total_workspace_bytes = round_up_to_multiple(total_workspace_bytes, 512);
workspace_offsets.push_back(total_workspace_bytes);
auto alloc_bytes = workspace_bytes[index];
total_workspace_bytes += alloc_bytes;
}
// Allocate the workspace
Workspace ws(total_workspace_bytes);
std::vector<void *> workspace;
workspace.push_back(nullptr);
workspace.push_back(nullptr);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
const int retired_cta_bytes = workspace_bytes[2];
void* retired_ctas = ret_cta.DATA_PTR<uint8_t>();
assert(ret_cta.size(0)>=retired_cta_bytes);
workspace.push_back(retired_ctas);
for (auto index = 3; index < workspace_bytes.size(); ++index) {
void *ptr = reinterpret_cast<uint8_t*>(ws.data) + workspace_offsets[index-3];
workspace.push_back(ptr);
}
bn->setWorkspacePointers(workspace, workspace_bytes);
// Don't fuse in ReLU for now at least
bn->fwdInference(stream, fuse_relu);
return y;
}
std::vector<at::Tensor> nhwc_bn_bwd(
const at::Tensor& x,
const at::Tensor& dy,
const at::Tensor& scale,
const at::Tensor& bias,
const at::Tensor& running_mean,
const at::Tensor& running_inv_var,
const at::Tensor& minibatch_mean,
const at::Tensor& minibatch_inv_var,
const at::Tensor& ret_cta,
const float momentum,
const float epsilon,
const bool fuse_relu,
void * my_data,
void * pair_data,
void * pair_data2,
void * pair_data3,
const int bn_group,
const at::Tensor& magic_tensor,
const int occupancy,
const int grid_dim_x,
const bool coop) {
// shape
const int N = x.size(0);
const int H = x.size(1);
const int W = x.size(2);
const int C = x.size(3);
// generating new magic number and use that for sync
int* magic = magic_tensor.DATA_PTR<int>();
*magic = (*magic + 1) & 0xff;
// outputs
at::Tensor x_grad, scale_grad, bias_grad;
// Allocate outputs
x_grad = at::empty_like(x);
scale_grad = at::empty_like(scale);
bias_grad = at::empty_like(bias);
// Create wrapper
NhwcBatchNorm *bn = new NhwcBatchNorm();
bn->setInputDescriptor(CUDNN_TENSOR_NHWC, CUDNN_DATA_HALF, N, C, H, W, bn_group);
bn->setOutputDescriptor(CUDNN_TENSOR_NHWC, CUDNN_DATA_HALF, N, C, H, W);
bn->setConstants(momentum, epsilon);
// set pointers within the wrapper
bn->setInputOutputPointers(x.DATA_PTR<at::Half>(),
x_grad.DATA_PTR<at::Half>(),
nullptr,
dy.DATA_PTR<at::Half>());
bn->setWeightPointers({scale.DATA_PTR<float>(), bias.DATA_PTR<float>()}, {scale_grad.DATA_PTR<float>(), bias_grad.DATA_PTR<float>()});
bn->setParameterPointers({running_mean.DATA_PTR<float>(), running_inv_var.DATA_PTR<float>()});
// deal with workspace(s)
auto workspace_bytes = bn->numWorkspaceBytes();
// We'll create explicit tensors for the first 2 workspace ptrs, then allocate & offset
// an allocated workspace for the others
size_t total_workspace_bytes = 0;
std::vector<size_t> workspace_offsets;
for (auto index = 3; index < workspace_bytes.size(); ++index) {
total_workspace_bytes = round_up_to_multiple(total_workspace_bytes, 512);
workspace_offsets.push_back(total_workspace_bytes);
auto alloc_bytes = workspace_bytes[index];
total_workspace_bytes += alloc_bytes;
}
// Allocate the workspace
Workspace ws(total_workspace_bytes);
std::vector<void *> workspace;
workspace.push_back(minibatch_mean.DATA_PTR<float>());
workspace.push_back(minibatch_inv_var.DATA_PTR<float>());
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
const int retired_cta_bytes = workspace_bytes[2];
void* retired_ctas = ret_cta.DATA_PTR<uint8_t>();
assert(ret_cta.size(0)>=retired_cta_bytes);
workspace.push_back(retired_ctas);
for (auto index = 3; index < workspace_bytes.size(); ++index) {
void *ptr = reinterpret_cast<uint8_t*>(ws.data) + workspace_offsets[index-3];
workspace.push_back(ptr);
}
bn->setWorkspacePointers(workspace, workspace_bytes);
bn->dgrad(stream, fuse_relu, my_data, pair_data, pair_data2, pair_data3, bn_group, *magic, occupancy, grid_dim_x, coop);
return std::vector<at::Tensor>{x_grad, scale_grad, bias_grad};
}
int nhwc_bn_fwd_occupancy() {
int device_id=-1;
hipGetDevice(&device_id);
//max occupancy supported by the code is 2
return NhwcBatchNorm::smem_driven_fwd_occupancy(device_id, 2);
}
int nhwc_bn_bwd_occupancy() {
int device_id=-1;
hipGetDevice(&device_id);
//max occupancy supported by the code is 2
return NhwcBatchNorm::smem_driven_bwd_occupancy(device_id, 2);
}
| 7ca508f8f7813d538e900a3b862b5bcba5a331db.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THCNumerics.cuh>
#include "THC/THC.h"
#include "batch_norm.h"
#include <cuda.h>
#include "compat.h"
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
static size_t round_up_to_multiple(size_t x, int multiple) {
return ((x + multiple - 1) / multiple) * multiple;
}
// TODO: Stop manually allocating CUDA memory; allocate an ATen byte
// tensor instead.
struct Workspace {
Workspace(size_t size) : size(size), data(NULL) {
data = THCudaMalloc(at::globalContext().lazyInitCUDA(), size);
}
Workspace(const Workspace&) = delete;
Workspace(Workspace&&) = default;
Workspace& operator=(Workspace&&) = default;
~Workspace() {
if (data) {
THCudaFree(at::globalContext().lazyInitCUDA(), data);
}
}
size_t size;
void* data;
};
// Return {y}
at::Tensor nhwc_bn_fwd_train(
const at::Tensor& x,
const at::Tensor& scale,
const at::Tensor& bias,
const at::Tensor& running_mean,
const at::Tensor& running_inv_var,
const at::Tensor& minibatch_mean,
const at::Tensor& minibatch_inv_var,
const at::Tensor& ret_cta,
const float momentum,
const float epsilon,
const bool fuse_relu,
void * my_data,
void * pair_data,
void * pair_data2,
void * pair_data3,
const int bn_group,
const at::Tensor& magic_tensor,
const int occupancy,
const int grid_dim_x,
const bool coop) {
const int N = x.size(0);
const int H = x.size(1);
const int W = x.size(2);
const int C = x.size(3);
// generating new magic number and use that for sync
int* magic = magic_tensor.DATA_PTR<int>();
*magic = (*magic + 1) & 0xff;
// Allocate output tensor
at::Tensor y = at::empty({N, H, W, C}, x.options());
// Create wrapper
NhwcBatchNorm *bn = new NhwcBatchNorm();
bn->setInputDescriptor(CUDNN_TENSOR_NHWC, CUDNN_DATA_HALF, N, C, H, W, bn_group);
bn->setOutputDescriptor(CUDNN_TENSOR_NHWC, CUDNN_DATA_HALF, N, C, H, W);
bn->setConstants(momentum, epsilon);
// set pointers within the wrapper
bn->setInputOutputPointers(x.DATA_PTR<at::Half>(),
nullptr,
y.DATA_PTR<at::Half>(),
nullptr);
bn->setWeightPointers({scale.DATA_PTR<float>(), bias.DATA_PTR<float>()}, {nullptr, nullptr});
bn->setParameterPointers({running_mean.DATA_PTR<float>(), running_inv_var.DATA_PTR<float>()});
// deal with workspace(s)
auto workspace_bytes = bn->numWorkspaceBytes();
// We'll create explicit tensors for the first 2 workspace ptrs, then allocate & offset
// an allocated workspace for the others
size_t total_workspace_bytes = 0;
std::vector<size_t> workspace_offsets;
for (auto index = 3; index < workspace_bytes.size(); ++index) {
total_workspace_bytes = round_up_to_multiple(total_workspace_bytes, 512);
workspace_offsets.push_back(total_workspace_bytes);
auto alloc_bytes = workspace_bytes[index];
total_workspace_bytes += alloc_bytes;
}
// Allocate the workspace
Workspace ws(total_workspace_bytes);
std::vector<void *> workspace;
workspace.push_back(minibatch_mean.DATA_PTR<float>());
workspace.push_back(minibatch_inv_var.DATA_PTR<float>());
auto stream = at::cuda::getCurrentCUDAStream().stream();
const int retired_cta_bytes = workspace_bytes[2];
void* retired_ctas = ret_cta.DATA_PTR<uint8_t>();
assert(ret_cta.size(0)>=retired_cta_bytes);
workspace.push_back(retired_ctas);
for (auto index = 3; index < workspace_bytes.size(); ++index) {
void *ptr = reinterpret_cast<uint8_t*>(ws.data) + workspace_offsets[index-3];
workspace.push_back(ptr);
}
bn->setWorkspacePointers(workspace, workspace_bytes);
// Don't fuse in ReLU for now at least
bn->fwd(stream, fuse_relu, my_data, pair_data, pair_data2, pair_data3, bn_group, *magic, occupancy, grid_dim_x, coop);
return y;
}
at::Tensor nhwc_bn_fwd_eval(
const at::Tensor& x,
const at::Tensor& scale,
const at::Tensor& bias,
const at::Tensor& running_mean,
const at::Tensor& running_inv_var,
const at::Tensor& ret_cta,
const int bn_group,
const float momentum,
const float epsilon,
const bool fuse_relu) {
const int N = x.size(0);
const int H = x.size(1);
const int W = x.size(2);
const int C = x.size(3);
// Allocate output tensor
at::Tensor y = at::empty({N, H, W, C}, x.options());
// Create wrapper
NhwcBatchNorm *bn = new NhwcBatchNorm();
bn->setInputDescriptor(CUDNN_TENSOR_NHWC, CUDNN_DATA_HALF, N, C, H, W, bn_group);
bn->setOutputDescriptor(CUDNN_TENSOR_NHWC, CUDNN_DATA_HALF, N, C, H, W);
bn->setConstants(momentum, epsilon);
// set pointers within the wrapper
bn->setInputOutputPointers(x.DATA_PTR<at::Half>(),
nullptr,
y.DATA_PTR<at::Half>(),
nullptr);
bn->setWeightPointers({scale.DATA_PTR<float>(), bias.DATA_PTR<float>()}, {nullptr, nullptr});
bn->setParameterPointers({running_mean.DATA_PTR<float>(), running_inv_var.DATA_PTR<float>()});
// deal with workspace(s)
auto workspace_bytes = bn->numWorkspaceBytes();
// We'll create explicit tensors for the first 2 workspace ptrs, then allocate & offset
// an allocated workspace for the others
size_t total_workspace_bytes = 0;
std::vector<size_t> workspace_offsets;
for (auto index = 3; index < workspace_bytes.size(); ++index) {
total_workspace_bytes = round_up_to_multiple(total_workspace_bytes, 512);
workspace_offsets.push_back(total_workspace_bytes);
auto alloc_bytes = workspace_bytes[index];
total_workspace_bytes += alloc_bytes;
}
// Allocate the workspace
Workspace ws(total_workspace_bytes);
std::vector<void *> workspace;
workspace.push_back(nullptr);
workspace.push_back(nullptr);
auto stream = at::cuda::getCurrentCUDAStream().stream();
const int retired_cta_bytes = workspace_bytes[2];
void* retired_ctas = ret_cta.DATA_PTR<uint8_t>();
assert(ret_cta.size(0)>=retired_cta_bytes);
workspace.push_back(retired_ctas);
for (auto index = 3; index < workspace_bytes.size(); ++index) {
void *ptr = reinterpret_cast<uint8_t*>(ws.data) + workspace_offsets[index-3];
workspace.push_back(ptr);
}
bn->setWorkspacePointers(workspace, workspace_bytes);
// Don't fuse in ReLU for now at least
bn->fwdInference(stream, fuse_relu);
return y;
}
std::vector<at::Tensor> nhwc_bn_bwd(
const at::Tensor& x,
const at::Tensor& dy,
const at::Tensor& scale,
const at::Tensor& bias,
const at::Tensor& running_mean,
const at::Tensor& running_inv_var,
const at::Tensor& minibatch_mean,
const at::Tensor& minibatch_inv_var,
const at::Tensor& ret_cta,
const float momentum,
const float epsilon,
const bool fuse_relu,
void * my_data,
void * pair_data,
void * pair_data2,
void * pair_data3,
const int bn_group,
const at::Tensor& magic_tensor,
const int occupancy,
const int grid_dim_x,
const bool coop) {
// shape
const int N = x.size(0);
const int H = x.size(1);
const int W = x.size(2);
const int C = x.size(3);
// generating new magic number and use that for sync
int* magic = magic_tensor.DATA_PTR<int>();
*magic = (*magic + 1) & 0xff;
// outputs
at::Tensor x_grad, scale_grad, bias_grad;
// Allocate outputs
x_grad = at::empty_like(x);
scale_grad = at::empty_like(scale);
bias_grad = at::empty_like(bias);
// Create wrapper
NhwcBatchNorm *bn = new NhwcBatchNorm();
bn->setInputDescriptor(CUDNN_TENSOR_NHWC, CUDNN_DATA_HALF, N, C, H, W, bn_group);
bn->setOutputDescriptor(CUDNN_TENSOR_NHWC, CUDNN_DATA_HALF, N, C, H, W);
bn->setConstants(momentum, epsilon);
// set pointers within the wrapper
bn->setInputOutputPointers(x.DATA_PTR<at::Half>(),
x_grad.DATA_PTR<at::Half>(),
nullptr,
dy.DATA_PTR<at::Half>());
bn->setWeightPointers({scale.DATA_PTR<float>(), bias.DATA_PTR<float>()}, {scale_grad.DATA_PTR<float>(), bias_grad.DATA_PTR<float>()});
bn->setParameterPointers({running_mean.DATA_PTR<float>(), running_inv_var.DATA_PTR<float>()});
// deal with workspace(s)
auto workspace_bytes = bn->numWorkspaceBytes();
// We'll create explicit tensors for the first 2 workspace ptrs, then allocate & offset
// an allocated workspace for the others
size_t total_workspace_bytes = 0;
std::vector<size_t> workspace_offsets;
for (auto index = 3; index < workspace_bytes.size(); ++index) {
total_workspace_bytes = round_up_to_multiple(total_workspace_bytes, 512);
workspace_offsets.push_back(total_workspace_bytes);
auto alloc_bytes = workspace_bytes[index];
total_workspace_bytes += alloc_bytes;
}
// Allocate the workspace
Workspace ws(total_workspace_bytes);
std::vector<void *> workspace;
workspace.push_back(minibatch_mean.DATA_PTR<float>());
workspace.push_back(minibatch_inv_var.DATA_PTR<float>());
auto stream = at::cuda::getCurrentCUDAStream().stream();
const int retired_cta_bytes = workspace_bytes[2];
void* retired_ctas = ret_cta.DATA_PTR<uint8_t>();
assert(ret_cta.size(0)>=retired_cta_bytes);
workspace.push_back(retired_ctas);
for (auto index = 3; index < workspace_bytes.size(); ++index) {
void *ptr = reinterpret_cast<uint8_t*>(ws.data) + workspace_offsets[index-3];
workspace.push_back(ptr);
}
bn->setWorkspacePointers(workspace, workspace_bytes);
bn->dgrad(stream, fuse_relu, my_data, pair_data, pair_data2, pair_data3, bn_group, *magic, occupancy, grid_dim_x, coop);
return std::vector<at::Tensor>{x_grad, scale_grad, bias_grad};
}
int nhwc_bn_fwd_occupancy() {
int device_id=-1;
cudaGetDevice(&device_id);
//max occupancy supported by the code is 2
return NhwcBatchNorm::smem_driven_fwd_occupancy(device_id, 2);
}
int nhwc_bn_bwd_occupancy() {
int device_id=-1;
cudaGetDevice(&device_id);
//max occupancy supported by the code is 2
return NhwcBatchNorm::smem_driven_bwd_occupancy(device_id, 2);
}
|
43096910d248d660dd7c9dd611049b9974b95aca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/common/balanced_splitter.h"
#include "oneflow/core/kernel/kernel_util.h"
#include "oneflow/user/kernels/math_unary_elementwise_func.h"
namespace oneflow {
namespace {
template<typename T, typename K, bool is_cosine_loss>
__global__ void GpuForward(const int64_t n, const int64_t num_classes, const int64_t lower_bound,
const T m1, const T m2, const T m3, const T* in, const K* labels, T* out,
T* theta) {
CUDA_1D_KERNEL_LOOP(i, n) {
const int32_t row_id = i / num_classes;
const int32_t col_id = i - row_id * num_classes;
const T in_data = in[i];
T out_data = in_data;
K label = labels[row_id] - lower_bound;
if (is_cosine_loss) {
if (label == col_id) { out_data = in_data - m3; }
} else {
if (label == col_id) {
const T theta_data = AcosFunctor<T>::Forward(in_data);
out_data = CosFunctor<T>::Forward(theta_data * m1 + m2) - m3;
theta[row_id] = theta_data;
} else if ((label < 0 || label >= num_classes) && col_id == 0) {
theta[row_id] = 0;
}
}
out[i] = out_data;
}
}
template<typename T, typename K, bool is_cosine_loss>
__global__ void GpuBackward(const int64_t n, const int64_t num_classes, const int64_t lower_bound,
const T m1, const T m2, const T m3, const T* dy, const K* labels,
const T* theta, T* dx) {
CUDA_1D_KERNEL_LOOP(i, n) {
const int32_t row_id = i / num_classes;
const int32_t col_id = i - row_id * num_classes;
K label = labels[row_id] - lower_bound;
const T dy_data = dy[i];
const T theta_data = theta[row_id];
T dx_data = dy_data;
if (label == col_id && !is_cosine_loss) {
dx_data = dy_data * SinFunctor<T>::Forward(theta_data * m1 + m2) * m1
/ SinFunctor<T>::Forward(theta_data);
}
dx[i] = dx_data;
}
}
class CombinedMarginLossOpKernelState final : public user_op::OpKernelState {
public:
CombinedMarginLossOpKernelState(int64_t lower, int64_t upper) : lower_(lower), upper_(upper) {}
~CombinedMarginLossOpKernelState() override = default;
int64_t lower() const { return lower_; }
int64_t upper() const { return upper_; }
private:
const int64_t lower_;
const int64_t upper_;
};
std::shared_ptr<user_op::OpKernelState> CreateCombinedMarginLossOpKernelState(
user_op::KernelInitContext* ctx, const std::string& in_arg_name) {
const SbpParallel& in_sbp = ctx->SbpParallel4ArgNameAndIndex(in_arg_name, 0);
if (in_sbp.has_split_parallel() && in_sbp.split_parallel().axis() == 1
&& ctx->parallel_ctx().parallel_num() > 1) {
CHECK(ctx->SbpParallel4ArgNameAndIndex("label", 0).has_broadcast_parallel());
const user_op::TensorDesc* in_logical_desc =
ctx->LogicalTensorDesc4ArgNameAndIndex(in_arg_name, 0);
const auto depth = ctx->Attr<int64_t>("depth");
CHECK_EQ(depth, in_logical_desc->shape().At(1));
BalancedSplitter bs(depth, ctx->parallel_ctx().parallel_num());
return std::make_shared<CombinedMarginLossOpKernelState>(
bs.At(ctx->parallel_ctx().parallel_id()).begin(),
bs.At(ctx->parallel_ctx().parallel_id()).end());
} else {
return std::shared_ptr<user_op::OpKernelState>(nullptr);
}
}
} // namespace
template<typename T, typename K>
class CombinedMarginLossGpuKernel final : public user_op::OpKernel {
public:
CombinedMarginLossGpuKernel() = default;
~CombinedMarginLossGpuKernel() override = default;
std::shared_ptr<user_op::OpKernelState> CreateOpKernelState(
user_op::KernelInitContext* ctx) const override {
return CreateCombinedMarginLossOpKernelState(ctx, "x");
}
private:
void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state) const override {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0);
const user_op::Tensor* label = ctx->Tensor4ArgNameAndIndex("label", 0);
user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0);
user_op::Tensor* theta = ctx->Tensor4ArgNameAndIndex("theta", 0);
const float m1 = ctx->Attr<float>("m1");
const float m2 = ctx->Attr<float>("m2");
const float m3 = ctx->Attr<float>("m3");
int64_t lower_bound = 0;
if (state != nullptr) {
auto* kernel_state = dynamic_cast<CombinedMarginLossOpKernelState*>(state);
CHECK_NOTNULL(kernel_state);
CHECK_EQ(x->shape().Count(1), kernel_state->upper() - kernel_state->lower());
lower_bound = kernel_state->lower();
}
if (m1 == 1.0 && m2 == 0.0) {
hipLaunchKernelGGL(( GpuForward<T, K, true>), dim3(BlocksNum4ThreadsNum(x->shape().elem_cnt())), dim3(kCudaThreadsNumPerBlock),
0, ctx->device_ctx()->cuda_stream(),
x->shape().elem_cnt(), x->shape().Count(1), lower_bound, static_cast<T>(m1),
static_cast<T>(m2), static_cast<T>(m3), x->dptr<T>(), label->dptr<K>(), y->mut_dptr<T>(),
theta->mut_dptr<T>());
} else {
hipLaunchKernelGGL(( GpuForward<T, K, false>), dim3(BlocksNum4ThreadsNum(x->shape().elem_cnt())),
dim3(kCudaThreadsNumPerBlock), 0, ctx->device_ctx()->cuda_stream(),
x->shape().elem_cnt(), x->shape().Count(1), lower_bound, static_cast<T>(m1),
static_cast<T>(m2), static_cast<T>(m3), x->dptr<T>(), label->dptr<K>(), y->mut_dptr<T>(),
theta->mut_dptr<T>());
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_COMBINED_MARGIN_LOSS_GPU_KERNEL(in_type, indices_type) \
REGISTER_USER_KERNEL("combined_margin_loss") \
.SetCreateFn<CombinedMarginLossGpuKernel<OF_PP_PAIR_FIRST(in_type), \
OF_PP_PAIR_FIRST(indices_type)>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("x", 0) == OF_PP_PAIR_SECOND(in_type)) \
& (user_op::HobDataType("label", 0) == OF_PP_PAIR_SECOND(indices_type)));
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_COMBINED_MARGIN_LOSS_GPU_KERNEL, FLOATING_DATA_TYPE_SEQ,
INDEX_DATA_TYPE_SEQ)
template<typename T, typename K>
class CombinedMarginLossGradGpuKernel final : public user_op::OpKernel {
public:
CombinedMarginLossGradGpuKernel() = default;
~CombinedMarginLossGradGpuKernel() override = default;
std::shared_ptr<user_op::OpKernelState> CreateOpKernelState(
user_op::KernelInitContext* ctx) const override {
return CreateCombinedMarginLossOpKernelState(ctx, "dy");
}
private:
void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state) const override {
const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
const user_op::Tensor* label = ctx->Tensor4ArgNameAndIndex("label", 0);
const user_op::Tensor* theta = ctx->Tensor4ArgNameAndIndex("theta", 0);
user_op::Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0);
const float m1 = ctx->Attr<float>("m1");
const float m2 = ctx->Attr<float>("m2");
const float m3 = ctx->Attr<float>("m3");
int64_t lower_bound = 0;
if (state != nullptr) {
auto* kernel_state = dynamic_cast<CombinedMarginLossOpKernelState*>(state);
CHECK_NOTNULL(kernel_state);
CHECK_EQ(dy->shape().Count(1), kernel_state->upper() - kernel_state->lower());
lower_bound = kernel_state->lower();
}
if (m1 == 1.0 && m2 == 0.0) {
hipLaunchKernelGGL(( GpuBackward<T, K, true>), dim3(BlocksNum4ThreadsNum(dy->shape().elem_cnt())),
dim3(kCudaThreadsNumPerBlock), 0, ctx->device_ctx()->cuda_stream(),
dy->shape().elem_cnt(), dy->shape().Count(1), lower_bound, static_cast<T>(m1),
static_cast<T>(m2), static_cast<T>(m3), dy->dptr<T>(), label->dptr<K>(), theta->dptr<T>(),
dx->mut_dptr<T>());
} else {
hipLaunchKernelGGL(( GpuBackward<T, K, false>), dim3(BlocksNum4ThreadsNum(dy->shape().elem_cnt())),
dim3(kCudaThreadsNumPerBlock), 0, ctx->device_ctx()->cuda_stream(),
dy->shape().elem_cnt(), dy->shape().Count(1), lower_bound, static_cast<T>(m1),
static_cast<T>(m2), static_cast<T>(m3), dy->dptr<T>(), label->dptr<K>(), theta->dptr<T>(),
dx->mut_dptr<T>());
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_COMBINED_MARGIN_LOSS_GRAD_GPU_KERNEL(dy_type, indices_type) \
REGISTER_USER_KERNEL("combined_margin_loss_grad") \
.SetCreateFn<CombinedMarginLossGradGpuKernel<OF_PP_PAIR_FIRST(dy_type), \
OF_PP_PAIR_FIRST(indices_type)>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("dy", 0) == OF_PP_PAIR_SECOND(dy_type)) \
& (user_op::HobDataType("label", 0) == OF_PP_PAIR_SECOND(indices_type)));
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_COMBINED_MARGIN_LOSS_GRAD_GPU_KERNEL,
FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
} // namespace oneflow
| 43096910d248d660dd7c9dd611049b9974b95aca.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/common/balanced_splitter.h"
#include "oneflow/core/kernel/kernel_util.h"
#include "oneflow/user/kernels/math_unary_elementwise_func.h"
namespace oneflow {
namespace {
template<typename T, typename K, bool is_cosine_loss>
__global__ void GpuForward(const int64_t n, const int64_t num_classes, const int64_t lower_bound,
const T m1, const T m2, const T m3, const T* in, const K* labels, T* out,
T* theta) {
CUDA_1D_KERNEL_LOOP(i, n) {
const int32_t row_id = i / num_classes;
const int32_t col_id = i - row_id * num_classes;
const T in_data = in[i];
T out_data = in_data;
K label = labels[row_id] - lower_bound;
if (is_cosine_loss) {
if (label == col_id) { out_data = in_data - m3; }
} else {
if (label == col_id) {
const T theta_data = AcosFunctor<T>::Forward(in_data);
out_data = CosFunctor<T>::Forward(theta_data * m1 + m2) - m3;
theta[row_id] = theta_data;
} else if ((label < 0 || label >= num_classes) && col_id == 0) {
theta[row_id] = 0;
}
}
out[i] = out_data;
}
}
template<typename T, typename K, bool is_cosine_loss>
__global__ void GpuBackward(const int64_t n, const int64_t num_classes, const int64_t lower_bound,
const T m1, const T m2, const T m3, const T* dy, const K* labels,
const T* theta, T* dx) {
CUDA_1D_KERNEL_LOOP(i, n) {
const int32_t row_id = i / num_classes;
const int32_t col_id = i - row_id * num_classes;
K label = labels[row_id] - lower_bound;
const T dy_data = dy[i];
const T theta_data = theta[row_id];
T dx_data = dy_data;
if (label == col_id && !is_cosine_loss) {
dx_data = dy_data * SinFunctor<T>::Forward(theta_data * m1 + m2) * m1
/ SinFunctor<T>::Forward(theta_data);
}
dx[i] = dx_data;
}
}
class CombinedMarginLossOpKernelState final : public user_op::OpKernelState {
public:
CombinedMarginLossOpKernelState(int64_t lower, int64_t upper) : lower_(lower), upper_(upper) {}
~CombinedMarginLossOpKernelState() override = default;
int64_t lower() const { return lower_; }
int64_t upper() const { return upper_; }
private:
const int64_t lower_;
const int64_t upper_;
};
std::shared_ptr<user_op::OpKernelState> CreateCombinedMarginLossOpKernelState(
user_op::KernelInitContext* ctx, const std::string& in_arg_name) {
const SbpParallel& in_sbp = ctx->SbpParallel4ArgNameAndIndex(in_arg_name, 0);
if (in_sbp.has_split_parallel() && in_sbp.split_parallel().axis() == 1
&& ctx->parallel_ctx().parallel_num() > 1) {
CHECK(ctx->SbpParallel4ArgNameAndIndex("label", 0).has_broadcast_parallel());
const user_op::TensorDesc* in_logical_desc =
ctx->LogicalTensorDesc4ArgNameAndIndex(in_arg_name, 0);
const auto depth = ctx->Attr<int64_t>("depth");
CHECK_EQ(depth, in_logical_desc->shape().At(1));
BalancedSplitter bs(depth, ctx->parallel_ctx().parallel_num());
return std::make_shared<CombinedMarginLossOpKernelState>(
bs.At(ctx->parallel_ctx().parallel_id()).begin(),
bs.At(ctx->parallel_ctx().parallel_id()).end());
} else {
return std::shared_ptr<user_op::OpKernelState>(nullptr);
}
}
} // namespace
template<typename T, typename K>
class CombinedMarginLossGpuKernel final : public user_op::OpKernel {
public:
CombinedMarginLossGpuKernel() = default;
~CombinedMarginLossGpuKernel() override = default;
std::shared_ptr<user_op::OpKernelState> CreateOpKernelState(
user_op::KernelInitContext* ctx) const override {
return CreateCombinedMarginLossOpKernelState(ctx, "x");
}
private:
void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state) const override {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0);
const user_op::Tensor* label = ctx->Tensor4ArgNameAndIndex("label", 0);
user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0);
user_op::Tensor* theta = ctx->Tensor4ArgNameAndIndex("theta", 0);
const float m1 = ctx->Attr<float>("m1");
const float m2 = ctx->Attr<float>("m2");
const float m3 = ctx->Attr<float>("m3");
int64_t lower_bound = 0;
if (state != nullptr) {
auto* kernel_state = dynamic_cast<CombinedMarginLossOpKernelState*>(state);
CHECK_NOTNULL(kernel_state);
CHECK_EQ(x->shape().Count(1), kernel_state->upper() - kernel_state->lower());
lower_bound = kernel_state->lower();
}
if (m1 == 1.0 && m2 == 0.0) {
GpuForward<T, K, true><<<BlocksNum4ThreadsNum(x->shape().elem_cnt()), kCudaThreadsNumPerBlock,
0, ctx->device_ctx()->cuda_stream()>>>(
x->shape().elem_cnt(), x->shape().Count(1), lower_bound, static_cast<T>(m1),
static_cast<T>(m2), static_cast<T>(m3), x->dptr<T>(), label->dptr<K>(), y->mut_dptr<T>(),
theta->mut_dptr<T>());
} else {
GpuForward<T, K, false><<<BlocksNum4ThreadsNum(x->shape().elem_cnt()),
kCudaThreadsNumPerBlock, 0, ctx->device_ctx()->cuda_stream()>>>(
x->shape().elem_cnt(), x->shape().Count(1), lower_bound, static_cast<T>(m1),
static_cast<T>(m2), static_cast<T>(m3), x->dptr<T>(), label->dptr<K>(), y->mut_dptr<T>(),
theta->mut_dptr<T>());
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_COMBINED_MARGIN_LOSS_GPU_KERNEL(in_type, indices_type) \
REGISTER_USER_KERNEL("combined_margin_loss") \
.SetCreateFn<CombinedMarginLossGpuKernel<OF_PP_PAIR_FIRST(in_type), \
OF_PP_PAIR_FIRST(indices_type)>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("x", 0) == OF_PP_PAIR_SECOND(in_type)) \
& (user_op::HobDataType("label", 0) == OF_PP_PAIR_SECOND(indices_type)));
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_COMBINED_MARGIN_LOSS_GPU_KERNEL, FLOATING_DATA_TYPE_SEQ,
INDEX_DATA_TYPE_SEQ)
template<typename T, typename K>
class CombinedMarginLossGradGpuKernel final : public user_op::OpKernel {
public:
CombinedMarginLossGradGpuKernel() = default;
~CombinedMarginLossGradGpuKernel() override = default;
std::shared_ptr<user_op::OpKernelState> CreateOpKernelState(
user_op::KernelInitContext* ctx) const override {
return CreateCombinedMarginLossOpKernelState(ctx, "dy");
}
private:
void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state) const override {
const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
const user_op::Tensor* label = ctx->Tensor4ArgNameAndIndex("label", 0);
const user_op::Tensor* theta = ctx->Tensor4ArgNameAndIndex("theta", 0);
user_op::Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0);
const float m1 = ctx->Attr<float>("m1");
const float m2 = ctx->Attr<float>("m2");
const float m3 = ctx->Attr<float>("m3");
int64_t lower_bound = 0;
if (state != nullptr) {
auto* kernel_state = dynamic_cast<CombinedMarginLossOpKernelState*>(state);
CHECK_NOTNULL(kernel_state);
CHECK_EQ(dy->shape().Count(1), kernel_state->upper() - kernel_state->lower());
lower_bound = kernel_state->lower();
}
if (m1 == 1.0 && m2 == 0.0) {
GpuBackward<T, K, true><<<BlocksNum4ThreadsNum(dy->shape().elem_cnt()),
kCudaThreadsNumPerBlock, 0, ctx->device_ctx()->cuda_stream()>>>(
dy->shape().elem_cnt(), dy->shape().Count(1), lower_bound, static_cast<T>(m1),
static_cast<T>(m2), static_cast<T>(m3), dy->dptr<T>(), label->dptr<K>(), theta->dptr<T>(),
dx->mut_dptr<T>());
} else {
GpuBackward<T, K, false><<<BlocksNum4ThreadsNum(dy->shape().elem_cnt()),
kCudaThreadsNumPerBlock, 0, ctx->device_ctx()->cuda_stream()>>>(
dy->shape().elem_cnt(), dy->shape().Count(1), lower_bound, static_cast<T>(m1),
static_cast<T>(m2), static_cast<T>(m3), dy->dptr<T>(), label->dptr<K>(), theta->dptr<T>(),
dx->mut_dptr<T>());
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_COMBINED_MARGIN_LOSS_GRAD_GPU_KERNEL(dy_type, indices_type) \
REGISTER_USER_KERNEL("combined_margin_loss_grad") \
.SetCreateFn<CombinedMarginLossGradGpuKernel<OF_PP_PAIR_FIRST(dy_type), \
OF_PP_PAIR_FIRST(indices_type)>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("dy", 0) == OF_PP_PAIR_SECOND(dy_type)) \
& (user_op::HobDataType("label", 0) == OF_PP_PAIR_SECOND(indices_type)));
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_COMBINED_MARGIN_LOSS_GRAD_GPU_KERNEL,
FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
} // namespace oneflow
|
702ec29d60f7e04912503d4cce9327bb060055ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/cudev.hpp"
namespace cv { namespace cuda { namespace device
{
namespace hough_segments
{
__global__ void houghLinesProbabilistic(cv::cudev::Texture<uchar> src, const PtrStepSzi accum,
int4* out, const int maxSize,
const float rho, const float theta,
const int lineGap, const int lineLength,
const int rows, const int cols,
int* counterPtr)
{
const int r = blockIdx.x * blockDim.x + threadIdx.x;
const int n = blockIdx.y * blockDim.y + threadIdx.y;
if (r >= accum.cols - 2 || n >= accum.rows - 2)
return;
const int curVotes = accum(n + 1, r + 1);
if (curVotes >= lineLength &&
curVotes > accum(n, r) &&
curVotes > accum(n, r + 1) &&
curVotes > accum(n, r + 2) &&
curVotes > accum(n + 1, r) &&
curVotes > accum(n + 1, r + 2) &&
curVotes > accum(n + 2, r) &&
curVotes > accum(n + 2, r + 1) &&
curVotes > accum(n + 2, r + 2))
{
const float radius = (r - (accum.cols - 2 - 1) * 0.5f) * rho;
const float angle = n * theta;
float cosa;
float sina;
sincosf(angle, &sina, &cosa);
float2 p0 = make_float2(cosa * radius, sina * radius);
float2 dir = make_float2(-sina, cosa);
float2 pb[4] = {make_float2(-1, -1), make_float2(-1, -1), make_float2(-1, -1), make_float2(-1, -1)};
float a;
if (dir.x != 0)
{
a = -p0.x / dir.x;
pb[0].x = 0;
pb[0].y = p0.y + a * dir.y;
a = (cols - 1 - p0.x) / dir.x;
pb[1].x = cols - 1;
pb[1].y = p0.y + a * dir.y;
}
if (dir.y != 0)
{
a = -p0.y / dir.y;
pb[2].x = p0.x + a * dir.x;
pb[2].y = 0;
a = (rows - 1 - p0.y) / dir.y;
pb[3].x = p0.x + a * dir.x;
pb[3].y = rows - 1;
}
if (pb[0].x == 0 && (pb[0].y >= 0 && pb[0].y < rows))
{
p0 = pb[0];
if (dir.x < 0)
dir = -dir;
}
else if (pb[1].x == cols - 1 && (pb[1].y >= 0 && pb[1].y < rows))
{
p0 = pb[1];
if (dir.x > 0)
dir = -dir;
}
else if (pb[2].y == 0 && (pb[2].x >= 0 && pb[2].x < cols))
{
p0 = pb[2];
if (dir.y < 0)
dir = -dir;
}
else if (pb[3].y == rows - 1 && (pb[3].x >= 0 && pb[3].x < cols))
{
p0 = pb[3];
if (dir.y > 0)
dir = -dir;
}
float2 d;
if (::fabsf(dir.x) > ::fabsf(dir.y))
{
d.x = dir.x > 0 ? 1 : -1;
d.y = dir.y / ::fabsf(dir.x);
}
else
{
d.x = dir.x / ::fabsf(dir.y);
d.y = dir.y > 0 ? 1 : -1;
}
float2 line_end[2];
int gap;
bool inLine = false;
float2 p1 = p0;
if (p1.x < 0 || p1.x >= cols || p1.y < 0 || p1.y >= rows)
return;
for (;;)
{
if (src(p1.y, p1.x))
{
gap = 0;
if (!inLine)
{
line_end[0] = p1;
line_end[1] = p1;
inLine = true;
}
else
{
line_end[1] = p1;
}
}
else if (inLine)
{
if (++gap > lineGap)
{
bool good_line = ::abs(line_end[1].x - line_end[0].x) >= lineLength ||
::abs(line_end[1].y - line_end[0].y) >= lineLength;
if (good_line)
{
const int ind = ::atomicAdd(counterPtr, 1);
if (ind < maxSize)
out[ind] = make_int4(line_end[0].x, line_end[0].y, line_end[1].x, line_end[1].y);
}
gap = 0;
inLine = false;
}
}
p1 = p1 + d;
if (p1.x < 0 || p1.x >= cols || p1.y < 0 || p1.y >= rows)
{
if (inLine)
{
bool good_line = ::abs(line_end[1].x - line_end[0].x) >= lineLength ||
::abs(line_end[1].y - line_end[0].y) >= lineLength;
if (good_line)
{
const int ind = ::atomicAdd(counterPtr, 1);
if (ind < maxSize)
out[ind] = make_int4(line_end[0].x, line_end[0].y, line_end[1].x, line_end[1].y);
}
}
break;
}
}
}
}
int houghLinesProbabilistic_gpu(GpuMat &mask, PtrStepSzi accum, int4* out, int maxSize, float rho, float theta, int lineGap, int lineLength, int* counterPtr, hipStream_t stream)
{
cudaSafeCall( hipMemsetAsync(counterPtr, 0, sizeof(int), stream) );
const dim3 block(32, 8);
const dim3 grid(divUp(accum.cols - 2, block.x), divUp(accum.rows - 2, block.y));
cv::cudev::GpuMat_<uchar> src_(mask);
cv::cudev::Texture<uchar> tex(src_, false, hipFilterModePoint, hipAddressModeClamp);
hipLaunchKernelGGL(( houghLinesProbabilistic), dim3(grid), dim3(block), 0, stream, tex, accum,
out, maxSize,
rho, theta,
lineGap, lineLength,
mask.rows, mask.cols,
counterPtr);
cudaSafeCall( hipGetLastError() );
int totalCount;
cudaSafeCall( hipMemcpyAsync(&totalCount, counterPtr, sizeof(int), hipMemcpyDeviceToHost, stream) );
cudaSafeCall( hipStreamSynchronize(stream) );
totalCount = ::min(totalCount, maxSize);
return totalCount;
}
}
}}}
#endif /* CUDA_DISABLER */
| 702ec29d60f7e04912503d4cce9327bb060055ef.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/cudev.hpp"
namespace cv { namespace cuda { namespace device
{
namespace hough_segments
{
__global__ void houghLinesProbabilistic(cv::cudev::Texture<uchar> src, const PtrStepSzi accum,
int4* out, const int maxSize,
const float rho, const float theta,
const int lineGap, const int lineLength,
const int rows, const int cols,
int* counterPtr)
{
const int r = blockIdx.x * blockDim.x + threadIdx.x;
const int n = blockIdx.y * blockDim.y + threadIdx.y;
if (r >= accum.cols - 2 || n >= accum.rows - 2)
return;
const int curVotes = accum(n + 1, r + 1);
if (curVotes >= lineLength &&
curVotes > accum(n, r) &&
curVotes > accum(n, r + 1) &&
curVotes > accum(n, r + 2) &&
curVotes > accum(n + 1, r) &&
curVotes > accum(n + 1, r + 2) &&
curVotes > accum(n + 2, r) &&
curVotes > accum(n + 2, r + 1) &&
curVotes > accum(n + 2, r + 2))
{
const float radius = (r - (accum.cols - 2 - 1) * 0.5f) * rho;
const float angle = n * theta;
float cosa;
float sina;
sincosf(angle, &sina, &cosa);
float2 p0 = make_float2(cosa * radius, sina * radius);
float2 dir = make_float2(-sina, cosa);
float2 pb[4] = {make_float2(-1, -1), make_float2(-1, -1), make_float2(-1, -1), make_float2(-1, -1)};
float a;
if (dir.x != 0)
{
a = -p0.x / dir.x;
pb[0].x = 0;
pb[0].y = p0.y + a * dir.y;
a = (cols - 1 - p0.x) / dir.x;
pb[1].x = cols - 1;
pb[1].y = p0.y + a * dir.y;
}
if (dir.y != 0)
{
a = -p0.y / dir.y;
pb[2].x = p0.x + a * dir.x;
pb[2].y = 0;
a = (rows - 1 - p0.y) / dir.y;
pb[3].x = p0.x + a * dir.x;
pb[3].y = rows - 1;
}
if (pb[0].x == 0 && (pb[0].y >= 0 && pb[0].y < rows))
{
p0 = pb[0];
if (dir.x < 0)
dir = -dir;
}
else if (pb[1].x == cols - 1 && (pb[1].y >= 0 && pb[1].y < rows))
{
p0 = pb[1];
if (dir.x > 0)
dir = -dir;
}
else if (pb[2].y == 0 && (pb[2].x >= 0 && pb[2].x < cols))
{
p0 = pb[2];
if (dir.y < 0)
dir = -dir;
}
else if (pb[3].y == rows - 1 && (pb[3].x >= 0 && pb[3].x < cols))
{
p0 = pb[3];
if (dir.y > 0)
dir = -dir;
}
float2 d;
if (::fabsf(dir.x) > ::fabsf(dir.y))
{
d.x = dir.x > 0 ? 1 : -1;
d.y = dir.y / ::fabsf(dir.x);
}
else
{
d.x = dir.x / ::fabsf(dir.y);
d.y = dir.y > 0 ? 1 : -1;
}
float2 line_end[2];
int gap;
bool inLine = false;
float2 p1 = p0;
if (p1.x < 0 || p1.x >= cols || p1.y < 0 || p1.y >= rows)
return;
for (;;)
{
if (src(p1.y, p1.x))
{
gap = 0;
if (!inLine)
{
line_end[0] = p1;
line_end[1] = p1;
inLine = true;
}
else
{
line_end[1] = p1;
}
}
else if (inLine)
{
if (++gap > lineGap)
{
bool good_line = ::abs(line_end[1].x - line_end[0].x) >= lineLength ||
::abs(line_end[1].y - line_end[0].y) >= lineLength;
if (good_line)
{
const int ind = ::atomicAdd(counterPtr, 1);
if (ind < maxSize)
out[ind] = make_int4(line_end[0].x, line_end[0].y, line_end[1].x, line_end[1].y);
}
gap = 0;
inLine = false;
}
}
p1 = p1 + d;
if (p1.x < 0 || p1.x >= cols || p1.y < 0 || p1.y >= rows)
{
if (inLine)
{
bool good_line = ::abs(line_end[1].x - line_end[0].x) >= lineLength ||
::abs(line_end[1].y - line_end[0].y) >= lineLength;
if (good_line)
{
const int ind = ::atomicAdd(counterPtr, 1);
if (ind < maxSize)
out[ind] = make_int4(line_end[0].x, line_end[0].y, line_end[1].x, line_end[1].y);
}
}
break;
}
}
}
}
int houghLinesProbabilistic_gpu(GpuMat &mask, PtrStepSzi accum, int4* out, int maxSize, float rho, float theta, int lineGap, int lineLength, int* counterPtr, cudaStream_t stream)
{
cudaSafeCall( cudaMemsetAsync(counterPtr, 0, sizeof(int), stream) );
const dim3 block(32, 8);
const dim3 grid(divUp(accum.cols - 2, block.x), divUp(accum.rows - 2, block.y));
cv::cudev::GpuMat_<uchar> src_(mask);
cv::cudev::Texture<uchar> tex(src_, false, cudaFilterModePoint, cudaAddressModeClamp);
houghLinesProbabilistic<<<grid, block, 0, stream>>>(tex, accum,
out, maxSize,
rho, theta,
lineGap, lineLength,
mask.rows, mask.cols,
counterPtr);
cudaSafeCall( cudaGetLastError() );
int totalCount;
cudaSafeCall( cudaMemcpyAsync(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost, stream) );
cudaSafeCall( cudaStreamSynchronize(stream) );
totalCount = ::min(totalCount, maxSize);
return totalCount;
}
}
}}}
#endif /* CUDA_DISABLER */
|
429e59aa83f72c247bb802d72fe70182fca56303.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <algorithm>
#include "cifar10_reader.cuh"
#include "matrix_hip.cuh"
int trainLen = 50000;
int testLen = 10000;
int cnt, loss;
const int batch = 200;
double epsilon = 0.0001;
double regLambda = 0.00;
int numExamples, nbsPerEpoch, inputDim, nnHdim[5] = {0, 2048, 1024, 512, 10};
Matrix *Xb, *Yb, *Xt, *Yt, *Y_pred, *W[4], *B[4], *a[4], *delta[4], *dW[4], *dB[4];
Matrix *softMaxSum;
Matrix X_train, Y_train, X_test, Y_test;
void generate();
void predict();
void calLoss();
void forwardPropagation();
void backPropagation();
void trainModel(int numPasses, bool printLoss);
int main() {
cout.precision(16);
unordered_map<string, Matrix> dataMap = readData();
X_train = dataMap["trainImages"];
Y_train = dataMap["trainLabels"];
X_test = dataMap["testImages"];
Y_test = dataMap["testLabels"];
printf("(%d, %d) (%d, %d)\n", X_train.height, X_train.width, Y_train.height, Y_train.width);
printf("(%d, %d) (%d, %d)\n", X_test.height, X_test.width, Y_test.height, Y_test.width);
numExamples = X_train.height;
inputDim = X_train.width;
nbsPerEpoch = (int)(numExamples / batch);
generate();
trainModel(10000, true);
return 0;
}
void generate() {
printf("input dim: %d\n", inputDim);
//
nnHdim[0] = inputDim;
hipMallocManaged((void **)&(Xb), sizeof(Matrix));
Xb->height = batch; Xb->width = inputDim;
hipMallocManaged((void **)&(Yb), sizeof(Matrix));
Yb->height = batch; Yb->width = 10;
hipMallocManaged((void **)&(Xt), sizeof(Matrix));
Xt->height = batch; Xt->width = inputDim;
hipMallocManaged((void **)&(Yt), sizeof(Matrix));
Yt->height = batch; Yt->width = 10;
hipMallocManaged((void **)&(Y_pred), sizeof(Matrix));
Y_pred->height = batch; Y_pred->width = 10;
hipMallocManaged((void **)&(Xb->elements), batch * inputDim * sizeof(double));
hipMallocManaged((void **)&(Yb->elements), batch * 10 * sizeof(double));
hipMallocManaged((void **)&(Xt->elements), batch * inputDim * sizeof(double));
hipMallocManaged((void **)&(Yt->elements), batch * 10 * sizeof(double));
hipMallocManaged((void **)&(Y_pred->elements), batch * 10 * sizeof(double));
for (int i = 0; i < 4; i++) {
int row = nnHdim[i], col = nnHdim[i + 1];
double std = sqrt(col);
hipMallocManaged((void **)&(W[i]), sizeof(Matrix));
W[i]->width = col; W[i]->height = row;
hipMallocManaged((void **)&(B[i]), sizeof(Matrix));
B[i]->width = col; B[i]->height = 1;
hipMallocManaged((void **)&(a[i]), sizeof(Matrix));
a[i]->width = col; a[i]->height = batch;
hipMallocManaged((void **)&(delta[i]), sizeof(Matrix));
delta[i]->width = col; delta[i]->height = batch;
hipMallocManaged((void **)&(dW[i]), sizeof(Matrix));
dW[i]->width = col; dW[i]->height = row;
hipMallocManaged((void **)&(dB[i]), sizeof(Matrix));
dB[i]->width = col; dB[i]->height = 1;
hipMallocManaged((void **)&(W[i]->elements), row * col * sizeof(double));
hipMallocManaged((void **)&(B[i]->elements), 1 * col * sizeof(double));
hipMallocManaged((void **)&(a[i]->elements), batch * col * sizeof(double));
hipMallocManaged((void **)&(delta[i]->elements), batch * col * sizeof(double));
hipMallocManaged((void **)&(dW[i]->elements), row * col * sizeof(double));
hipMallocManaged((void **)&(dB[i]->elements), row * col * sizeof(double));
printf("fc: %d -> %d\n", row, col);
initialize(W[i], std);
initialize(B[i], 0);
}
hipMallocManaged((void **)&(softMaxSum), sizeof(Matrix));
softMaxSum->width = 1; softMaxSum->height = batch;
hipMallocManaged((void **)&(softMaxSum->elements), batch * 1 * sizeof(double));
hipDeviceSynchronize();
}
void predict() {
dim3 blockSize(32, 32);
dim3 gridSize(32, 32);
//Xba[0]a[1]a[2]a[3]
hipLaunchKernelGGL(( matDotKernel) , dim3(gridSize), dim3(blockSize), 0, 0, Xt, W[0], a[0]);
hipLaunchKernelGGL(( matPlusKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[0], B[0], a[0]);
hipLaunchKernelGGL(( matReLUKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[0]);
hipLaunchKernelGGL(( matDotKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[0], W[1], a[1]);
hipLaunchKernelGGL(( matPlusKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[1], B[1], a[1]);
hipLaunchKernelGGL(( matReLUKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[1]);
hipLaunchKernelGGL(( matDotKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[1], W[2], a[2]);
hipLaunchKernelGGL(( matPlusKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[2], B[2], a[2]);
hipLaunchKernelGGL(( matReLUKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[2]);
hipLaunchKernelGGL(( matDotKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[2], W[3], a[3]);
hipLaunchKernelGGL(( matPlusKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[3], B[3], a[3]);
hipDeviceSynchronize();
for (int i = 0; i < a[3]->height; i++) {
int maxIndex = 0;
double maxValue = a[3]->elements[i * a[3]->width];
for (int j = 0; j < a[3]->width; j++)
if (a[3]->elements[i * a[3]->width + j] > maxValue) {
maxIndex = j;
maxValue = a[3]->elements[i * a[3]->width + j];
}
if (Yt->elements[i * a[3]->width + maxIndex])
cnt++;
}
hipDeviceSynchronize();
}
void calLoss() {
dim3 blockSize(32, 32);
dim3 gridSize(32, 32);
//Xba[0]a[1]a[2]a[3]
hipLaunchKernelGGL(( matDotKernel) , dim3(gridSize), dim3(blockSize), 0, 0, Xb, W[0], a[0]);
hipLaunchKernelGGL(( matPlusKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[0], B[0], a[0]);
hipLaunchKernelGGL(( matReLUKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[0]);
hipLaunchKernelGGL(( matDotKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[0], W[1], a[1]);
hipLaunchKernelGGL(( matPlusKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[1], B[1], a[1]);
hipLaunchKernelGGL(( matReLUKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[1]);
hipLaunchKernelGGL(( matDotKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[1], W[2], a[2]);
hipLaunchKernelGGL(( matPlusKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[2], B[2], a[2]);
hipLaunchKernelGGL(( matReLUKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[2]);
hipLaunchKernelGGL(( matDotKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[2], W[3], a[3]);
hipLaunchKernelGGL(( matPlusKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[3], B[3], a[3]);
hipDeviceSynchronize();
for (int i = 0; i < a[3]->height; i++) {
int maxIndex = 0;
double maxValue = a[3]->elements[i * a[3]->width];
for (int j = 0; j < a[3]->width; j++)
if (a[3]->elements[i * a[3]->width + j] > maxValue) {
maxIndex = j;
maxValue = a[3]->elements[i * a[3]->width + j];
}
if (Yb->elements[i * a[3]->width + maxIndex])
loss++;
}
hipDeviceSynchronize();
}
void forwardPropagation() {
dim3 blockSize(32, 32);
dim3 gridSize(32, 32);
//Xba[0]a[1]a[2]a[3]
hipLaunchKernelGGL(( matDotKernel) , dim3(gridSize), dim3(blockSize), 0, 0, Xb, W[0], a[0]);
hipLaunchKernelGGL(( matPlusKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[0], B[0], a[0]);
hipLaunchKernelGGL(( matReLUKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[0]);
hipLaunchKernelGGL(( matDotKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[0], W[1], a[1]);
hipLaunchKernelGGL(( matPlusKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[1], B[1], a[1]);
hipLaunchKernelGGL(( matReLUKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[1]);
hipLaunchKernelGGL(( matDotKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[1], W[2], a[2]);
hipLaunchKernelGGL(( matPlusKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[2], B[2], a[2]);
hipLaunchKernelGGL(( matReLUKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[2]);
hipLaunchKernelGGL(( matDotKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[2], W[3], a[3]);
hipLaunchKernelGGL(( matPlusKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[3], B[3], a[3]);
//softmax
hipLaunchKernelGGL(( matExpKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[3]);
hipLaunchKernelGGL(( matSumKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[3], softMaxSum, 1);
hipLaunchKernelGGL(( matDivKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[3], softMaxSum);
hipDeviceSynchronize();
}
void backPropagation() {
dim3 blockSize(32, 32);
dim3 gridSize(32, 32);
//
hipLaunchKernelGGL(( matSubKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[3], Yb, delta[3]);
hipDeviceSynchronize();
hipLaunchKernelGGL(( matDotKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[2], delta[3], dW[3], true);
hipLaunchKernelGGL(( matSumKernel) , dim3(gridSize), dim3(blockSize), 0, 0, delta[3], dB[3], 0);
hipDeviceSynchronize();
hipLaunchKernelGGL(( matDerReLUKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[2]);
hipLaunchKernelGGL(( matDotKernel) , dim3(gridSize), dim3(blockSize), 0, 0, delta[3], W[3], delta[2], false, true);
hipLaunchKernelGGL(( matMulKernel) , dim3(gridSize), dim3(blockSize), 0, 0, delta[2], a[2], delta[2]);
hipLaunchKernelGGL(( matDotKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[1], delta[2], dW[2], true);
hipLaunchKernelGGL(( matSumKernel) , dim3(gridSize), dim3(blockSize), 0, 0, delta[2], dB[2], 0);
hipDeviceSynchronize();
hipLaunchKernelGGL(( matDerReLUKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[1]);
hipLaunchKernelGGL(( matDotKernel) , dim3(gridSize), dim3(blockSize), 0, 0, delta[2], W[2], delta[1], false, true);
hipLaunchKernelGGL(( matMulKernel) , dim3(gridSize), dim3(blockSize), 0, 0, delta[1], a[1], delta[1]);
hipLaunchKernelGGL(( matDotKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[0], delta[1], dW[1], true);
hipLaunchKernelGGL(( matSumKernel) , dim3(gridSize), dim3(blockSize), 0, 0, delta[1], dB[1], 0);
hipDeviceSynchronize();
hipLaunchKernelGGL(( matDerReLUKernel) , dim3(gridSize), dim3(blockSize), 0, 0, a[0]);
hipLaunchKernelGGL(( matDotKernel) , dim3(gridSize), dim3(blockSize), 0, 0, delta[1], W[1], delta[0], false, true);
hipLaunchKernelGGL(( matMulKernel) , dim3(gridSize), dim3(blockSize), 0, 0, delta[0], a[0], delta[0]);
hipLaunchKernelGGL(( matDotKernel) , dim3(gridSize), dim3(blockSize), 0, 0, Xb, delta[0], dW[0], true);
hipLaunchKernelGGL(( matSumKernel) , dim3(gridSize), dim3(blockSize), 0, 0, delta[0], dB[0], 0);
hipDeviceSynchronize();
//
for (int i = 0; i < 4; i++) {
hipLaunchKernelGGL(( matMulKernel) , dim3(gridSize), dim3(blockSize), 0, 0, dW[i], epsilon);
hipLaunchKernelGGL(( matMulKernel) , dim3(gridSize), dim3(blockSize), 0, 0, dB[i], epsilon);
hipLaunchKernelGGL(( matSubKernel) , dim3(gridSize), dim3(blockSize), 0, 0, W[i], dW[i], W[i]);
hipLaunchKernelGGL(( matSubKernel) , dim3(gridSize), dim3(blockSize), 0, 0, B[i], dB[i], B[i]);
hipDeviceSynchronize();
}
}
void trainModel(int numPasses, bool printLoss) {
int i;
for (i = 0; i <= numPasses; i++) {
int j = i % nbsPerEpoch;
//
if (j == 0) {
vector<int> ridx(numExamples);
int k;
for (k = 0; k < numExamples; k++)
ridx[k] = k;
random_shuffle(ridx.begin(), ridx.end());
X_train.shuffle(ridx);
Y_train.shuffle(ridx);
}
//batch
dataCopy(Xb, X_train, j * batch, (j + 1) * batch);
dataCopy(Yb, Y_train, j * batch, (j + 1) * batch, true);
forwardPropagation();
backPropagation();
if (printLoss && (i % 100 == 0)) {
epsilon *= 0.99;
cnt = 0;
//batch
for (int k = 0; k < (int)(X_test.height / batch); k++) {
dataCopy(Xt, X_test, k * batch, (k + 1) * batch);
dataCopy(Yt, Y_test, k * batch, (k + 1) * batch, true);
predict();
hipDeviceSynchronize();
}
double accuracy = (cnt * 1.0 / X_test.height);
struct tm *p;
time_t t = time(0);
p = localtime(&t);
printf("%02d:%02d:%02d testing accuracy after iteration %d: %.2lf%%\n", p->tm_hour, p->tm_min, p->tm_sec, i, accuracy * 100);
}
//train loss
if (printLoss && (j == 0) && (i != 0)) {
double accuracy = (loss * 1.0 / X_train.height);
struct tm *p;
time_t t = time(0);
p = localtime(&t);
printf("\n%02d:%02d:%02d train loss after iteration %d: %.2lf%%\n\n", p->tm_hour, p->tm_min, p->tm_sec, i, accuracy * 100);
loss = 0;
}
calLoss();
}
} | 429e59aa83f72c247bb802d72fe70182fca56303.cu | #include <vector>
#include <algorithm>
#include "cifar10_reader.cuh"
#include "matrix.cuh"
int trainLen = 50000;
int testLen = 10000;
int cnt, loss;
const int batch = 200;
double epsilon = 0.0001;
double regLambda = 0.00;
int numExamples, nbsPerEpoch, inputDim, nnHdim[5] = {0, 2048, 1024, 512, 10};
Matrix *Xb, *Yb, *Xt, *Yt, *Y_pred, *W[4], *B[4], *a[4], *delta[4], *dW[4], *dB[4];
Matrix *softMaxSum;
Matrix X_train, Y_train, X_test, Y_test;
void generate();
void predict();
void calLoss();
void forwardPropagation();
void backPropagation();
void trainModel(int numPasses, bool printLoss);
int main() {
cout.precision(16);
unordered_map<string, Matrix> dataMap = readData();
X_train = dataMap["trainImages"];
Y_train = dataMap["trainLabels"];
X_test = dataMap["testImages"];
Y_test = dataMap["testLabels"];
printf("(%d, %d) (%d, %d)\n", X_train.height, X_train.width, Y_train.height, Y_train.width);
printf("(%d, %d) (%d, %d)\n", X_test.height, X_test.width, Y_test.height, Y_test.width);
numExamples = X_train.height;
inputDim = X_train.width;
nbsPerEpoch = (int)(numExamples / batch);
generate();
trainModel(10000, true);
return 0;
}
void generate() {
printf("input dim: %d\n", inputDim);
// 初始化各矩阵,为其分配共享内存
nnHdim[0] = inputDim;
cudaMallocManaged((void **)&(Xb), sizeof(Matrix));
Xb->height = batch; Xb->width = inputDim;
cudaMallocManaged((void **)&(Yb), sizeof(Matrix));
Yb->height = batch; Yb->width = 10;
cudaMallocManaged((void **)&(Xt), sizeof(Matrix));
Xt->height = batch; Xt->width = inputDim;
cudaMallocManaged((void **)&(Yt), sizeof(Matrix));
Yt->height = batch; Yt->width = 10;
cudaMallocManaged((void **)&(Y_pred), sizeof(Matrix));
Y_pred->height = batch; Y_pred->width = 10;
cudaMallocManaged((void **)&(Xb->elements), batch * inputDim * sizeof(double));
cudaMallocManaged((void **)&(Yb->elements), batch * 10 * sizeof(double));
cudaMallocManaged((void **)&(Xt->elements), batch * inputDim * sizeof(double));
cudaMallocManaged((void **)&(Yt->elements), batch * 10 * sizeof(double));
cudaMallocManaged((void **)&(Y_pred->elements), batch * 10 * sizeof(double));
for (int i = 0; i < 4; i++) {
int row = nnHdim[i], col = nnHdim[i + 1];
double std = sqrt(col);
cudaMallocManaged((void **)&(W[i]), sizeof(Matrix));
W[i]->width = col; W[i]->height = row;
cudaMallocManaged((void **)&(B[i]), sizeof(Matrix));
B[i]->width = col; B[i]->height = 1;
cudaMallocManaged((void **)&(a[i]), sizeof(Matrix));
a[i]->width = col; a[i]->height = batch;
cudaMallocManaged((void **)&(delta[i]), sizeof(Matrix));
delta[i]->width = col; delta[i]->height = batch;
cudaMallocManaged((void **)&(dW[i]), sizeof(Matrix));
dW[i]->width = col; dW[i]->height = row;
cudaMallocManaged((void **)&(dB[i]), sizeof(Matrix));
dB[i]->width = col; dB[i]->height = 1;
cudaMallocManaged((void **)&(W[i]->elements), row * col * sizeof(double));
cudaMallocManaged((void **)&(B[i]->elements), 1 * col * sizeof(double));
cudaMallocManaged((void **)&(a[i]->elements), batch * col * sizeof(double));
cudaMallocManaged((void **)&(delta[i]->elements), batch * col * sizeof(double));
cudaMallocManaged((void **)&(dW[i]->elements), row * col * sizeof(double));
cudaMallocManaged((void **)&(dB[i]->elements), row * col * sizeof(double));
printf("fc: %d -> %d\n", row, col);
initialize(W[i], std);
initialize(B[i], 0);
}
cudaMallocManaged((void **)&(softMaxSum), sizeof(Matrix));
softMaxSum->width = 1; softMaxSum->height = batch;
cudaMallocManaged((void **)&(softMaxSum->elements), batch * 1 * sizeof(double));
cudaDeviceSynchronize();
}
void predict() {
dim3 blockSize(32, 32);
dim3 gridSize(32, 32);
//输入层:Xb,隐藏层:a[0],a[1],a[2],输出层:a[3]
matDotKernel <<<gridSize, blockSize>>> (Xt, W[0], a[0]);
matPlusKernel <<<gridSize, blockSize>>> (a[0], B[0], a[0]);
matReLUKernel <<<gridSize, blockSize>>> (a[0]);
matDotKernel <<<gridSize, blockSize>>> (a[0], W[1], a[1]);
matPlusKernel <<<gridSize, blockSize>>> (a[1], B[1], a[1]);
matReLUKernel <<<gridSize, blockSize>>> (a[1]);
matDotKernel <<<gridSize, blockSize>>> (a[1], W[2], a[2]);
matPlusKernel <<<gridSize, blockSize>>> (a[2], B[2], a[2]);
matReLUKernel <<<gridSize, blockSize>>> (a[2]);
matDotKernel <<<gridSize, blockSize>>> (a[2], W[3], a[3]);
matPlusKernel <<<gridSize, blockSize>>> (a[3], B[3], a[3]);
cudaDeviceSynchronize();
for (int i = 0; i < a[3]->height; i++) {
int maxIndex = 0;
double maxValue = a[3]->elements[i * a[3]->width];
for (int j = 0; j < a[3]->width; j++)
if (a[3]->elements[i * a[3]->width + j] > maxValue) {
maxIndex = j;
maxValue = a[3]->elements[i * a[3]->width + j];
}
if (Yt->elements[i * a[3]->width + maxIndex])
cnt++;
}
cudaDeviceSynchronize();
}
void calLoss() {
dim3 blockSize(32, 32);
dim3 gridSize(32, 32);
//输入层:Xb,隐藏层:a[0],a[1],a[2],输出层:a[3]
matDotKernel <<<gridSize, blockSize>>> (Xb, W[0], a[0]);
matPlusKernel <<<gridSize, blockSize>>> (a[0], B[0], a[0]);
matReLUKernel <<<gridSize, blockSize>>> (a[0]);
matDotKernel <<<gridSize, blockSize>>> (a[0], W[1], a[1]);
matPlusKernel <<<gridSize, blockSize>>> (a[1], B[1], a[1]);
matReLUKernel <<<gridSize, blockSize>>> (a[1]);
matDotKernel <<<gridSize, blockSize>>> (a[1], W[2], a[2]);
matPlusKernel <<<gridSize, blockSize>>> (a[2], B[2], a[2]);
matReLUKernel <<<gridSize, blockSize>>> (a[2]);
matDotKernel <<<gridSize, blockSize>>> (a[2], W[3], a[3]);
matPlusKernel <<<gridSize, blockSize>>> (a[3], B[3], a[3]);
cudaDeviceSynchronize();
for (int i = 0; i < a[3]->height; i++) {
int maxIndex = 0;
double maxValue = a[3]->elements[i * a[3]->width];
for (int j = 0; j < a[3]->width; j++)
if (a[3]->elements[i * a[3]->width + j] > maxValue) {
maxIndex = j;
maxValue = a[3]->elements[i * a[3]->width + j];
}
if (Yb->elements[i * a[3]->width + maxIndex])
loss++;
}
cudaDeviceSynchronize();
}
void forwardPropagation() {
dim3 blockSize(32, 32);
dim3 gridSize(32, 32);
//输入层:Xb,隐藏层:a[0],a[1],a[2],输出层:a[3]
matDotKernel <<<gridSize, blockSize>>> (Xb, W[0], a[0]);
matPlusKernel <<<gridSize, blockSize>>> (a[0], B[0], a[0]);
matReLUKernel <<<gridSize, blockSize>>> (a[0]);
matDotKernel <<<gridSize, blockSize>>> (a[0], W[1], a[1]);
matPlusKernel <<<gridSize, blockSize>>> (a[1], B[1], a[1]);
matReLUKernel <<<gridSize, blockSize>>> (a[1]);
matDotKernel <<<gridSize, blockSize>>> (a[1], W[2], a[2]);
matPlusKernel <<<gridSize, blockSize>>> (a[2], B[2], a[2]);
matReLUKernel <<<gridSize, blockSize>>> (a[2]);
matDotKernel <<<gridSize, blockSize>>> (a[2], W[3], a[3]);
matPlusKernel <<<gridSize, blockSize>>> (a[3], B[3], a[3]);
//输出层使用softmax
matExpKernel <<<gridSize, blockSize>>> (a[3]);
matSumKernel <<<gridSize, blockSize>>> (a[3], softMaxSum, 1);
matDivKernel <<<gridSize, blockSize>>> (a[3], softMaxSum);
cudaDeviceSynchronize();
}
void backPropagation() {
dim3 blockSize(32, 32);
dim3 gridSize(32, 32);
//反向传播
matSubKernel <<<gridSize, blockSize>>> (a[3], Yb, delta[3]);
cudaDeviceSynchronize();
matDotKernel <<<gridSize, blockSize>>> (a[2], delta[3], dW[3], true);
matSumKernel <<<gridSize, blockSize>>> (delta[3], dB[3], 0);
cudaDeviceSynchronize();
matDerReLUKernel <<<gridSize, blockSize>>> (a[2]);
matDotKernel <<<gridSize, blockSize>>> (delta[3], W[3], delta[2], false, true);
matMulKernel <<<gridSize, blockSize>>> (delta[2], a[2], delta[2]);
matDotKernel <<<gridSize, blockSize>>> (a[1], delta[2], dW[2], true);
matSumKernel <<<gridSize, blockSize>>> (delta[2], dB[2], 0);
cudaDeviceSynchronize();
matDerReLUKernel <<<gridSize, blockSize>>> (a[1]);
matDotKernel <<<gridSize, blockSize>>> (delta[2], W[2], delta[1], false, true);
matMulKernel <<<gridSize, blockSize>>> (delta[1], a[1], delta[1]);
matDotKernel <<<gridSize, blockSize>>> (a[0], delta[1], dW[1], true);
matSumKernel <<<gridSize, blockSize>>> (delta[1], dB[1], 0);
cudaDeviceSynchronize();
matDerReLUKernel <<<gridSize, blockSize>>> (a[0]);
matDotKernel <<<gridSize, blockSize>>> (delta[1], W[1], delta[0], false, true);
matMulKernel <<<gridSize, blockSize>>> (delta[0], a[0], delta[0]);
matDotKernel <<<gridSize, blockSize>>> (Xb, delta[0], dW[0], true);
matSumKernel <<<gridSize, blockSize>>> (delta[0], dB[0], 0);
cudaDeviceSynchronize();
//梯度更新
for (int i = 0; i < 4; i++) {
matMulKernel <<<gridSize, blockSize>>> (dW[i], epsilon);
matMulKernel <<<gridSize, blockSize>>> (dB[i], epsilon);
matSubKernel <<<gridSize, blockSize>>> (W[i], dW[i], W[i]);
matSubKernel <<<gridSize, blockSize>>> (B[i], dB[i], B[i]);
cudaDeviceSynchronize();
}
}
void trainModel(int numPasses, bool printLoss) {
int i;
for (i = 0; i <= numPasses; i++) {
int j = i % nbsPerEpoch;
//每训练一次完整的训练集,就重新打乱训练集
if (j == 0) {
vector<int> ridx(numExamples);
int k;
for (k = 0; k < numExamples; k++)
ridx[k] = k;
random_shuffle(ridx.begin(), ridx.end());
X_train.shuffle(ridx);
Y_train.shuffle(ridx);
}
//获取训练集的一个batch
dataCopy(Xb, X_train, j * batch, (j + 1) * batch);
dataCopy(Yb, Y_train, j * batch, (j + 1) * batch, true);
forwardPropagation();
backPropagation();
if (printLoss && (i % 100 == 0)) {
epsilon *= 0.99;
cnt = 0;
//将测试集分成batch依次预测
for (int k = 0; k < (int)(X_test.height / batch); k++) {
dataCopy(Xt, X_test, k * batch, (k + 1) * batch);
dataCopy(Yt, Y_test, k * batch, (k + 1) * batch, true);
predict();
cudaDeviceSynchronize();
}
double accuracy = (cnt * 1.0 / X_test.height);
struct tm *p;
time_t t = time(0);
p = localtime(&t);
printf("%02d:%02d:%02d testing accuracy after iteration %d: %.2lf%%\n", p->tm_hour, p->tm_min, p->tm_sec, i, accuracy * 100);
}
//经过一个完成的测试集,输出train loss
if (printLoss && (j == 0) && (i != 0)) {
double accuracy = (loss * 1.0 / X_train.height);
struct tm *p;
time_t t = time(0);
p = localtime(&t);
printf("\n%02d:%02d:%02d train loss after iteration %d: %.2lf%%\n\n", p->tm_hour, p->tm_min, p->tm_sec, i, accuracy * 100);
loss = 0;
}
calLoss();
}
} |
64805dfd78931867ff58127ce957305548a84a0f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void kernel( void ) {
int id = 1;
} | 64805dfd78931867ff58127ce957305548a84a0f.cu | __global__ void kernel( void ) {
int id = 1;
} |
ddbbfb907fbeb443768b85278db8bd8268484e65.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
const int BlockSize = StreamCompaction::Common::BlockSize;
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// naive scan on GPU
__global__ void scan(int n, int offset, int *input, int *output)
{
int k = threadIdx.x + (blockDim.x * blockIdx.x);
if (k >= n)
return;
if (k >= offset)
{
output[k] = input[k - offset] + input[k];
}
else
{
output[k] = input[k];
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
int *dev_in, *dev_out;
hipMalloc((void**)&dev_in, n * sizeof(int));
checkCUDAError("hipMalloc dev1 failed");
hipMalloc((void**)&dev_out, n * sizeof(int));
checkCUDAError("hipMalloc dev2 failed");
hipMemcpy(dev_in, idata, n*sizeof(int), hipMemcpyHostToDevice);
timer().startGpuTimer();
dim3 fulllBlocksPerGrid((n + BlockSize - 1)/BlockSize);
int maxD = ilog2ceil(n);
int offset;
for (int d = 1; d <= maxD; ++d)
{
//2^(d-1)
offset = (1 << (d - 1));
scan << < fulllBlocksPerGrid, BlockSize >> >(n, offset, dev_in, dev_out);
int *tmp = dev_in;
dev_in = dev_out;
dev_out = tmp;
}
// last swap back in and out buffer
int *tmp = dev_in;
dev_in = dev_out;
dev_out = tmp;
timer().endGpuTimer();
// exclusive scan
hipMemcpy(odata + 1, dev_out, (n-1)*sizeof(int), hipMemcpyDeviceToHost);
odata[0] = 0;
hipFree(dev_in);
hipFree(dev_out);
checkCUDAError("naive scan error return");
}
}
}
| ddbbfb907fbeb443768b85278db8bd8268484e65.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
const int BlockSize = StreamCompaction::Common::BlockSize;
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// naive scan on GPU
__global__ void scan(int n, int offset, int *input, int *output)
{
int k = threadIdx.x + (blockDim.x * blockIdx.x);
if (k >= n)
return;
if (k >= offset)
{
output[k] = input[k - offset] + input[k];
}
else
{
output[k] = input[k];
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
int *dev_in, *dev_out;
cudaMalloc((void**)&dev_in, n * sizeof(int));
checkCUDAError("cudaMalloc dev1 failed");
cudaMalloc((void**)&dev_out, n * sizeof(int));
checkCUDAError("cudaMalloc dev2 failed");
cudaMemcpy(dev_in, idata, n*sizeof(int), cudaMemcpyHostToDevice);
timer().startGpuTimer();
dim3 fulllBlocksPerGrid((n + BlockSize - 1)/BlockSize);
int maxD = ilog2ceil(n);
int offset;
for (int d = 1; d <= maxD; ++d)
{
//2^(d-1)
offset = (1 << (d - 1));
scan << < fulllBlocksPerGrid, BlockSize >> >(n, offset, dev_in, dev_out);
int *tmp = dev_in;
dev_in = dev_out;
dev_out = tmp;
}
// last swap back in and out buffer
int *tmp = dev_in;
dev_in = dev_out;
dev_out = tmp;
timer().endGpuTimer();
// exclusive scan
cudaMemcpy(odata + 1, dev_out, (n-1)*sizeof(int), cudaMemcpyDeviceToHost);
odata[0] = 0;
cudaFree(dev_in);
cudaFree(dev_out);
checkCUDAError("naive scan error return");
}
}
}
|
d1660f843f9b33f408321c5d6d2d3bb0351aaf98.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cu_log(const float* src, float* dst, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
dst[tid] = __logf(src[tid]);
tid += stride;
}
} | d1660f843f9b33f408321c5d6d2d3bb0351aaf98.cu | #include "includes.h"
__global__ void cu_log(const float* src, float* dst, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
dst[tid] = __logf(src[tid]);
tid += stride;
}
} |
a36e8265059d95d309089783345a0a0d2596a279.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define WINDOW_WIDTH 9
#define WINDOW_HEIGHT 7
#define IMAGE_WIDTH 1280
#define IMAGE_HEIGHT 720
#define BLOCK_SIZE 128
#define LINES_PER_BLOCK 16
__global__ static void censusTransform(int width, int height, int * ret)
{
int half_kw = WINDOW_WIDTH /2;
int half_kh = WINDOW_HEIGHT /2 ;
int tid = threadIdx.x;
int x0 = blockIdx.x * (BLOCK_SIZE - WINDOW_WIDTH + 1) - half_kw;
int y0 = blockIdx.y * LINES_PER_BLOCK;
printf("block Dim (%d %d) (%d %d) \n", blockDim.x, blockDim.y, blockIdx.x, blockIdx.y);
//printf("block id (%d %d) \n", blockIdx.x, blockIdx.y);
for (int i = 0; i < WINDOW_HEIGHT; ++i) {
int x = x0 + tid;
int y = y0 - half_kh + i;
int value = 0;
if (0 <= x && x < IMAGE_WIDTH && 0 <= y && y < IMAGE_HEIGHT) {
value = x+y*IMAGE_WIDTH;
ret[i] = value / IMAGE_WIDTH;
ret[WINDOW_HEIGHT + i] = value % IMAGE_WIDTH;
//printf("row %d col %d \n", value / IMAGE_WIDTH, value % IMAGE_WIDTH);
}
}
__syncthreads();
}
void censusDemo(void)
{
printf("censusDemo +++++\n");
int * ret = 0;
int host_ret[WINDOW_HEIGHT*2] = {0};
hipMalloc((void**) &ret, sizeof(int)*WINDOW_HEIGHT*2);
int width_per_block = BLOCK_SIZE - WINDOW_WIDTH + 1;
int height_per_block = LINES_PER_BLOCK;
dim3 grid((IMAGE_WIDTH + width_per_block - 1) / width_per_block, (IMAGE_HEIGHT + height_per_block - 1) / height_per_block);
//dim3 grid(1,1);
dim3 block(BLOCK_SIZE);
hipLaunchKernelGGL(( censusTransform), dim3(grid),dim3(block), 0, 0, IMAGE_WIDTH, IMAGE_HEIGHT, ret);
hipMemcpy(host_ret, ret, sizeof(int)*WINDOW_HEIGHT*2, hipMemcpyDeviceToHost);
for (int i = 0; i < WINDOW_HEIGHT; i++) {
printf("(%d %d)", host_ret[i], host_ret[WINDOW_HEIGHT+i]);
}
hipFree(ret);
}
| a36e8265059d95d309089783345a0a0d2596a279.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#define WINDOW_WIDTH 9
#define WINDOW_HEIGHT 7
#define IMAGE_WIDTH 1280
#define IMAGE_HEIGHT 720
#define BLOCK_SIZE 128
#define LINES_PER_BLOCK 16
__global__ static void censusTransform(int width, int height, int * ret)
{
int half_kw = WINDOW_WIDTH /2;
int half_kh = WINDOW_HEIGHT /2 ;
int tid = threadIdx.x;
int x0 = blockIdx.x * (BLOCK_SIZE - WINDOW_WIDTH + 1) - half_kw;
int y0 = blockIdx.y * LINES_PER_BLOCK;
printf("block Dim (%d %d) (%d %d) \n", blockDim.x, blockDim.y, blockIdx.x, blockIdx.y);
//printf("block id (%d %d) \n", blockIdx.x, blockIdx.y);
for (int i = 0; i < WINDOW_HEIGHT; ++i) {
int x = x0 + tid;
int y = y0 - half_kh + i;
int value = 0;
if (0 <= x && x < IMAGE_WIDTH && 0 <= y && y < IMAGE_HEIGHT) {
value = x+y*IMAGE_WIDTH;
ret[i] = value / IMAGE_WIDTH;
ret[WINDOW_HEIGHT + i] = value % IMAGE_WIDTH;
//printf("row %d col %d \n", value / IMAGE_WIDTH, value % IMAGE_WIDTH);
}
}
__syncthreads();
}
void censusDemo(void)
{
printf("censusDemo +++++\n");
int * ret = 0;
int host_ret[WINDOW_HEIGHT*2] = {0};
cudaMalloc((void**) &ret, sizeof(int)*WINDOW_HEIGHT*2);
int width_per_block = BLOCK_SIZE - WINDOW_WIDTH + 1;
int height_per_block = LINES_PER_BLOCK;
dim3 grid((IMAGE_WIDTH + width_per_block - 1) / width_per_block, (IMAGE_HEIGHT + height_per_block - 1) / height_per_block);
//dim3 grid(1,1);
dim3 block(BLOCK_SIZE);
censusTransform<<<grid,block>>>(IMAGE_WIDTH, IMAGE_HEIGHT, ret);
cudaMemcpy(host_ret, ret, sizeof(int)*WINDOW_HEIGHT*2, cudaMemcpyDeviceToHost);
for (int i = 0; i < WINDOW_HEIGHT; i++) {
printf("(%d %d)", host_ret[i], host_ret[WINDOW_HEIGHT+i]);
}
cudaFree(ret);
}
|
13c89525cd1ccbd943a169cce9ae28f5ef1fcc3a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdlib>
#include <cstdio>
#include <iomanip>
#include <fstream>
#include <sstream>
#include <iostream>
#include <cmath>
#include <time.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <string.h>
#include <map>
#include <cassert>
#include <vector>
#include <limits>
using namespace std;
#include "contraintes_gpu.h"
__global__
void slocal(int NB_SPH,R * LIST_IND, int * NBCONTCO, unsigned int * NOCONT, R * LIST_R, R * NCONT, R * FCJI, R * PSIG11, R * PSIG12, R * PSIG13, R * PSIG22, R * PSIG23, R * PSIG33, int NMAXZ) {
int numcor = (blockIdx.x*blockDim.x)+threadIdx.x;
if (numcor<NB_SPH) {
R sig11=0.;
R sig12=0.;
R sig13=0.;
R sig21=0.;
R sig22=0.;
R sig23=0.;
R sig31=0.;
R sig32=0.;
R sig33=0.;
R coefij=LIST_IND[numcor];
for(int kt=0;kt<NBCONTCO[numcor];kt++){
unsigned int numc=NOCONT[numcor*NMAXZ+kt];
// if(numcor==0){ printf("NUMC: (%i,%i)\n",kt,numc);}
R ray=LIST_R[numcor];
R n1=-NCONT[numc*9+0];
R n2=-NCONT[numc*9+1];
R n3=-NCONT[numc*9+2];
/*
sig11=numc;
sig12=numc;
sig13=numc;
sig21=numc;
sig22=numc;
sig23=numc;
sig31=numc;
sig32=numc;
sig33=numc;*/
sig11=sig11+coefij*ray*n1*FCJI[numc*3+0];
sig12=sig12+coefij*ray*n1*FCJI[numc*3+1];
sig13=sig13+coefij*ray*n1*FCJI[numc*3+2];
sig21=sig21+coefij*ray*n2*FCJI[numc*3+0];
sig22=sig22+coefij*ray*n2*FCJI[numc*3+1];
sig23=sig23+coefij*ray*n2*FCJI[numc*3+2];
sig31=sig31+coefij*ray*n3*FCJI[numc*3+0];
sig32=sig32+coefij*ray*n3*FCJI[numc*3+1];
sig33=sig33+coefij*ray*n3*FCJI[numc*3+2];
}
sig12=(sig12+sig21)/2.;
sig13=(sig13+sig31)/2.;
sig23=(sig23+sig32)/2.;
PSIG11[numcor]=sig11;
PSIG12[numcor]=sig12;
PSIG13[numcor]=sig13;
PSIG22[numcor]=sig22;
PSIG23[numcor]=sig23;
PSIG33[numcor]=sig33;
}
}
__global__
void shalo(int NB_SPH, int * NBHALO, unsigned int * NOHALO, R * VOLHALO, R * PSIG11, R * PSIG12, R * PSIG13, R * PSIG22, R * PSIG23, R * PSIG33, R * SIG11, R * SIG12, R * SIG13, R * SIG22, R * SIG23, R * SIG33, R * VONMIS, R * TRACE, R * SIG1, R *SIG2, R * SIG3, R minsig11, R maxsig11, R minsig12, R maxsig12, R minsig13, R maxsig13, R minsig22, R maxsig22, R minsig23, R maxsig23, R minsig33, R maxsig33, R minvm, R maxvm, R mintrac, R maxtrac, R minsig1, R maxsig1, R minsig2, R maxsig2, R minsig3, R maxsig3, int NMAXHALO) {
int jt = (blockIdx.x*blockDim.x)+threadIdx.x;
if (jt<NB_SPH) {
R sig11=PSIG11[jt];
R sig12=PSIG12[jt];
R sig13=PSIG13[jt];
R sig22=PSIG22[jt];
R sig23=PSIG23[jt];
R sig33=PSIG33[jt];
for(int kt=1;kt<NBHALO[jt];kt++){
//printf("NBHALOav: %i\n",long(700000)*NMAXHALO+kt);
unsigned int lt=NOHALO[long(jt)*NMAXHALO+kt];
//printf("NBHALOap: %i\n",lt);
sig11=sig11+PSIG11[lt];
sig12=sig12+PSIG12[lt];
sig13=sig13+PSIG13[lt];
sig22=sig22+PSIG22[lt];
sig23=sig23+PSIG23[lt];
sig33=sig33+PSIG33[lt];
}
sig11/=VOLHALO[jt];
sig12/=VOLHALO[jt];
sig13/=VOLHALO[jt];
sig22/=VOLHALO[jt];
sig23/=VOLHALO[jt];
sig33/=VOLHALO[jt];
SIG11[jt]=sig11;
SIG12[jt]=sig12;
SIG13[jt]=sig13;
SIG22[jt]=sig22;
SIG23[jt]=sig23;
SIG33[jt]=sig33;
R i1=sig11+sig22+sig33;
R i2=sig11*sig22+sig22*sig33+sig33*sig11-sig12*sig12-sig23*sig23-sig13*sig13;
R i3=sig11*(sig22*sig33-sig23*sig23)-sig12*(sig12*sig33-sig13*sig23)+sig13*(sig12*sig23-sig22*sig13);
R b=-i1;
R c=i2;
R d=-i3;
R p=c-b*b/3.;
R q=d-b*c/3.+2*b*b*b/27.;
R detd=4*c*c*c+27*d*d+4*d*b*b*b-b*b*c*c-18*b*c*d;
R s1=0.;
R s2=0.;
R s3=0.;
if (fabs(detd)<=1e-30){
double t=-q/2.;
s1=2.*pow(t,1./3)-b/3.;
s2=-pow(t,1./3)-b/3.;
s3=s2;
}
else{
R r=sqrt(-p*p*p/27.);
R theta=acos(-q/(2*r));
s1=2.*sqrt(-p/3.)*cos(theta/3.)-b/3.;
s2=2.*sqrt(-p/3.)*cos((theta+2.*3.14159265358979323846)/3.)-b/3.;
s3=2.*sqrt(-p/3.)*cos((theta+4.*3.14159265358979323846)/3.)-b/3.;
}
R smax=max(max(s1,s2),s3);
R smin=min(min(s1,s2),s3);
if(s1==smax){s2=max(s2,s3);}
else if(s2==smax){s2=max(s1,s3);}
else if(s3==smax){s2=max(s1,s2);}
s1=smax;
s3=smin;
R trac=s1+s2+s3;
R vmis=sqrt((sig11-sig22)*(sig11-sig22)+(sig33-sig22)*(sig33-sig22)+(sig11-sig33)*(sig11-sig33)+6.*(sig12*sig12+sig13*sig13+sig23*sig23))/sqrt(2.);
if(vmis!=vmis) {vmis=0.;}
VONMIS[jt]=vmis;
TRACE[jt]=trac;
SIG1[jt]=s1;
SIG2[jt]=s2;
SIG3[jt]=s3;
/*
minvm=fmin(vmis,minvm);
maxvm=fmax(vmis,maxvm);
mintrac=fmin(trac,mintrac);
maxtrac=fmax(trac,maxtrac);
minsig11=fmin(sig11,minsig11);
maxsig11=fmax(sig11,maxsig11);
minsig12=fmin(sig12,minsig12);
maxsig12=fmax(sig12,maxsig12);
minsig13=fmin(sig13,minsig13);
maxsig13=fmax(sig13,maxsig13);
minsig22=fmin(sig22,minsig22);
maxsig22=fmax(sig22,maxsig22);
minsig23=fmin(sig23,minsig23);
maxsig23=fmax(sig23,maxsig23);
minsig33=fmin(sig33,minsig33);
maxsig33=fmax(sig33,maxsig33);
minsig1=fmin(s1,minsig1);
maxsig1=fmax(s1,maxsig1);
minsig2=fmin(s2,minsig2);
maxsig2=fmax(s2,maxsig2);
minsig3=fmin(s3,minsig3);
maxsig3=fmax(s3,maxsig3); */
}
}
void contrainteshalo_gpu(R Pi, R coef1, int ite,int NBENREG, int NB_SPH, int NBCO, int NMAXZ,int NMAXHALO, int NMAXCONT, R H_TOT, R V_TOT, R Z_TOT,R * LIST_R, R ** FCJI, R ** NCONT, unsigned int ** NOCONT, int * NBCONTCO, R * VONMIS, R * TRACE, R * SIG11, R * SIG12, R * SIG13, R * SIG22, R * SIG23, R * SIG33, R * SIG1, R * SIG2, R * SIG3,R &minvm, R &maxvm,R &mintrac, R &maxtrac, R &minsig11, R &maxsig11, R &minsig12, R &maxsig12, R &minsig13, R &maxsig13, R &minsig22, R &maxsig22, R &minsig23, R &maxsig23, R &minsig33, R &maxsig33, R &minsig1, R &maxsig1, R &minsig2, R &maxsig2, R &minsig3, R &maxsig3, bool * EDGE, unsigned int ** NOHALO, int * NBHALO, R * LIST_V, R * VOLHALO,R * LIST_IND) {
int it;
mintrac = 1e12;
maxtrac = -1e12;
minvm = 1e12;
maxvm = -1e12;
minsig11 = 1e12;
maxsig11 = -1e12;
minsig12 = 1e12;
maxsig12 = -1e12;
minsig13 = 1e12;
maxsig13 = -1e12;
minsig22 = 1e12;
maxsig22 = -1e12;
minsig23 = 1e12;
maxsig23 = -1e12;
minsig33 = 1e12;
maxsig33 = -1e12;
minsig1 = 1e12;
maxsig1 = -1e12;
minsig2 = 1e12;
maxsig2 = -1e12;
minsig3 = 1e12;
maxsig3 = -1e12;
/////////////////////////////////
// Vecteurs/matrices device
R * dLIST_IND;
R * dLIST_R;
R * dPSIG11;
R * dPSIG12;
R * dPSIG13;
R * dPSIG22;
R * dPSIG23;
R * dPSIG33;
R * dSIG11;
R * dSIG12;
R * dSIG13;
R * dSIG22;
R * dSIG23;
R * dSIG33;
R * dVONMIS;
R * dTRACE;
R * dSIG1;
R * dSIG2;
R * dSIG3;
R * dVOLHALO;
int * dNBHALO;
int * dNBCONTCO;
unsigned int * dNOCONT;
R * dNCONT;
R * dFCJI;
unsigned int * dNOHALO;
/////////////////////////////////
// Allocation mmoire
hipMalloc((void **)&dLIST_IND, NB_SPH*sizeof(R));
hipMemcpy(dLIST_IND, LIST_IND, NB_SPH*sizeof(R), hipMemcpyHostToDevice);
hipMalloc((void **)&dLIST_R, NB_SPH*sizeof(R));
hipMemcpy(dLIST_R, LIST_R, NB_SPH*sizeof(R), hipMemcpyHostToDevice);
hipMalloc((void **)&dPSIG11, NB_SPH*sizeof(R));
hipMalloc((void **)&dPSIG12, NB_SPH*sizeof(R));
hipMalloc((void **)&dPSIG13, NB_SPH*sizeof(R));
hipMalloc((void **)&dPSIG22, NB_SPH*sizeof(R));
hipMalloc((void **)&dPSIG23, NB_SPH*sizeof(R));
hipMalloc((void **)&dPSIG33, NB_SPH*sizeof(R));
hipMalloc((void **)&dNBCONTCO, NB_SPH*sizeof(int));
hipMemcpy(dNBCONTCO, NBCONTCO, NB_SPH*sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void **)&dNOCONT, NB_SPH*NMAXZ*sizeof(unsigned int));
hipMemcpy(dNOCONT, NOCONT[0], NB_SPH*NMAXZ*sizeof(unsigned int), hipMemcpyHostToDevice);
hipMalloc((void **)&dNCONT, NMAXCONT*9*sizeof(R));
hipMemcpy(dNCONT, NCONT[0], NMAXCONT*9*sizeof(R), hipMemcpyHostToDevice);
hipMalloc((void **)&dFCJI, NMAXCONT*3*sizeof(R));
hipMemcpy(dFCJI, FCJI[0], NMAXCONT*3*sizeof(R), hipMemcpyHostToDevice);
/////////////////////////////////
// Contraintes l'chelle de la particule
dim3 DimGrid ((NB_SPH-1)/256+1,1,1) ;
dim3 DimBlock (256,1,1) ;hipLaunchKernelGGL((
slocal), dim3(DimGrid), dim3(DimBlock), 0, 0, NB_SPH,dLIST_IND,dNBCONTCO,dNOCONT,dLIST_R,dNCONT,dFCJI,dPSIG11,dPSIG12, dPSIG13, dPSIG22,dPSIG23,dPSIG33,NMAXZ);
/////////////////////////////////
// Libration mmoire
hipFree(dLIST_IND);
hipFree(dLIST_R);
hipFree(dNBCONTCO);
hipFree(dNOCONT);
hipFree(dNCONT);
hipFree(dFCJI);
/////////////////////////////////
// Allocation mmoire
hipMalloc((void **)&dSIG11, NB_SPH*sizeof(R));
hipMalloc((void **)&dSIG12, NB_SPH*sizeof(R));
hipMalloc((void **)&dSIG13, NB_SPH*sizeof(R));
hipMalloc((void **)&dSIG22, NB_SPH*sizeof(R));
hipMalloc((void **)&dSIG23, NB_SPH*sizeof(R));
hipMalloc((void **)&dSIG33, NB_SPH*sizeof(R));
hipMalloc((void **)&dVONMIS, NB_SPH*sizeof(R));
hipMalloc((void **)&dTRACE, NB_SPH*sizeof(R));
hipMalloc((void **)&dSIG1, NB_SPH*sizeof(R));
hipMalloc((void **)&dSIG2, NB_SPH*sizeof(R));
hipMalloc((void **)&dSIG3, NB_SPH*sizeof(R));
hipMalloc((void **)&dVOLHALO, NB_SPH*sizeof(R));
hipMemcpy(dVOLHALO, VOLHALO, NB_SPH*sizeof(R), hipMemcpyHostToDevice);
hipMalloc((void **)&dNBHALO, NB_SPH*sizeof(int));
hipMemcpy(dNBHALO, NBHALO, NB_SPH*sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void **)&dNOHALO, long(NB_SPH)*NMAXHALO*sizeof(unsigned int));
hipMemcpy(dNOHALO, NOHALO[0], long(NB_SPH)*NMAXHALO*sizeof(unsigned int), hipMemcpyHostToDevice);
size_t free, total;
printf("\n");
hipMemGetInfo(&free,&total);
printf("%d KB free of total %d KB\n",free/1024,total/1024);
/////////////////////////////////
// Contraintes l'chelle du halohipLaunchKernelGGL((
shalo), dim3(DimGrid), dim3(DimBlock), 0, 0, NB_SPH, dNBHALO,dNOHALO,dVOLHALO,dPSIG11,dPSIG12,dPSIG13,dPSIG22,dPSIG23,dPSIG33,dSIG11,dSIG12,dSIG13,dSIG22,dSIG23,dSIG33,dVONMIS,dTRACE,dSIG1,dSIG2,dSIG3,minsig11, maxsig11,minsig12,maxsig12,minsig13,maxsig13,minsig22,maxsig22,minsig23,maxsig23,minsig33,maxsig33,minvm,maxvm,mintrac,maxtrac,minsig1,maxsig1,minsig2,maxsig2,minsig3,maxsig3,NMAXHALO);
/////////////////////////////////
// Copies des vecteurs/matrices utiles vers l'host
hipMemcpy(SIG11, dSIG11, NB_SPH*sizeof(R), hipMemcpyDeviceToHost);
hipMemcpy(SIG12, dSIG12, NB_SPH*sizeof(R), hipMemcpyDeviceToHost);
hipMemcpy(SIG13, dSIG13, NB_SPH*sizeof(R), hipMemcpyDeviceToHost);
hipMemcpy(SIG22, dSIG22, NB_SPH*sizeof(R), hipMemcpyDeviceToHost);
hipMemcpy(SIG23, dSIG23, NB_SPH*sizeof(R), hipMemcpyDeviceToHost);
hipMemcpy(SIG33, dSIG33, NB_SPH*sizeof(R), hipMemcpyDeviceToHost);
hipMemcpy(VONMIS, dVONMIS, NB_SPH*sizeof(R), hipMemcpyDeviceToHost);
hipMemcpy(TRACE, dTRACE, NB_SPH*sizeof(R), hipMemcpyDeviceToHost);
hipMemcpy(SIG1, dSIG1, NB_SPH*sizeof(R), hipMemcpyDeviceToHost);
hipMemcpy(SIG2, dSIG2, NB_SPH*sizeof(R), hipMemcpyDeviceToHost);
hipMemcpy(SIG3, dSIG3, NB_SPH*sizeof(R), hipMemcpyDeviceToHost);
/////////////////////////////////
// Libration mmoire
hipFree(dPSIG11);
hipFree(dPSIG12);
hipFree(dPSIG13);
hipFree(dPSIG22);
hipFree(dPSIG23);
hipFree(dPSIG33);
hipFree(dSIG11);
hipFree(dSIG12);
hipFree(dSIG13);
hipFree(dSIG22);
hipFree(dSIG23);
hipFree(dSIG33);
hipFree(dVONMIS);
hipFree(dTRACE);
hipFree(dSIG1);
hipFree(dSIG2);
hipFree(dSIG3);
hipFree(dNBHALO);
hipFree(dVOLHALO);
hipFree(dNOHALO);
/////////////////////////////////
// Traitement Min/max
for(it=0;it<NB_SPH;it++){
minvm=min(VONMIS[it],minvm);
maxvm=max(VONMIS[it],maxvm);
mintrac=min(TRACE[it],mintrac);
maxtrac=max(TRACE[it],maxtrac);
minsig11=min(SIG11[it],minsig11);
maxsig11=max(SIG11[it],maxsig11);
minsig12=min(SIG12[it],minsig12);
maxsig12=max(SIG12[it],maxsig12);
minsig13=min(SIG13[it],minsig13);
maxsig13=max(SIG13[it],maxsig13);
minsig22=min(SIG22[it],minsig22);
maxsig22=max(SIG22[it],maxsig22);
minsig23=min(SIG23[it],minsig23);
maxsig23=max(SIG23[it],maxsig23);
minsig33=min(SIG33[it],minsig33);
maxsig33=max(SIG33[it],maxsig33);
minsig1=min(SIG1[it],minsig1);
maxsig1=max(SIG1[it],maxsig1);
minsig2=min(SIG2[it],minsig2);
maxsig2=max(SIG2[it],maxsig2);
minsig3=min(SIG3[it],minsig3);
maxsig3=max(SIG3[it],maxsig3);
}
if(ite%NBENREG==0){
cout<<"Maxsig11:"<<maxsig11<<endl;
cout<<"Minsig11:"<<minsig11<<endl;
cout<<"Maxsig12:"<<maxsig12<<endl;
cout<<"Minsig12:"<<minsig12<<endl;
cout<<"Maxsig13:"<<maxsig13<<endl;
cout<<"Minsig13:"<<minsig13<<endl;
cout<<"Maxsig22:"<<maxsig22<<endl;
cout<<"Minsig22:"<<minsig22<<endl;
cout<<"Maxsig23:"<<maxsig23<<endl;
cout<<"Minsig23:"<<minsig23<<endl;
cout<<"Maxsig33:"<<maxsig33<<endl;
cout<<"Minsig33:"<<minsig33<<endl;
cout<<"Maxsig1:"<<maxsig1<<endl;
cout<<"Minsig1:"<<minsig1<<endl;
cout<<"Maxsig2:"<<maxsig2<<endl;
cout<<"Minsig2:"<<minsig2<<endl;
cout<<"Maxsig3:"<<maxsig3<<endl;
cout<<"Minsig3:"<<minsig3<<endl;
cout<<"Maxvm:"<<maxvm<<endl;
cout<<"Minvm:"<<minvm<<endl;
cout<<"Maxtrac:"<<maxtrac<<endl;
cout<<"Mintrac:"<<mintrac<<endl;
}
}
| 13c89525cd1ccbd943a169cce9ae28f5ef1fcc3a.cu | #include <cstdlib>
#include <cstdio>
#include <iomanip>
#include <fstream>
#include <sstream>
#include <iostream>
#include <cmath>
#include <time.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <string.h>
#include <map>
#include <cassert>
#include <vector>
#include <limits>
using namespace std;
#include "contraintes_gpu.h"
__global__
void slocal(int NB_SPH,R * LIST_IND, int * NBCONTCO, unsigned int * NOCONT, R * LIST_R, R * NCONT, R * FCJI, R * PSIG11, R * PSIG12, R * PSIG13, R * PSIG22, R * PSIG23, R * PSIG33, int NMAXZ) {
int numcor = (blockIdx.x*blockDim.x)+threadIdx.x;
if (numcor<NB_SPH) {
R sig11=0.;
R sig12=0.;
R sig13=0.;
R sig21=0.;
R sig22=0.;
R sig23=0.;
R sig31=0.;
R sig32=0.;
R sig33=0.;
R coefij=LIST_IND[numcor];
for(int kt=0;kt<NBCONTCO[numcor];kt++){
unsigned int numc=NOCONT[numcor*NMAXZ+kt];
// if(numcor==0){ printf("NUMC: (%i,%i)\n",kt,numc);}
R ray=LIST_R[numcor];
R n1=-NCONT[numc*9+0];
R n2=-NCONT[numc*9+1];
R n3=-NCONT[numc*9+2];
/*
sig11=numc;
sig12=numc;
sig13=numc;
sig21=numc;
sig22=numc;
sig23=numc;
sig31=numc;
sig32=numc;
sig33=numc;*/
sig11=sig11+coefij*ray*n1*FCJI[numc*3+0];
sig12=sig12+coefij*ray*n1*FCJI[numc*3+1];
sig13=sig13+coefij*ray*n1*FCJI[numc*3+2];
sig21=sig21+coefij*ray*n2*FCJI[numc*3+0];
sig22=sig22+coefij*ray*n2*FCJI[numc*3+1];
sig23=sig23+coefij*ray*n2*FCJI[numc*3+2];
sig31=sig31+coefij*ray*n3*FCJI[numc*3+0];
sig32=sig32+coefij*ray*n3*FCJI[numc*3+1];
sig33=sig33+coefij*ray*n3*FCJI[numc*3+2];
}
sig12=(sig12+sig21)/2.;
sig13=(sig13+sig31)/2.;
sig23=(sig23+sig32)/2.;
PSIG11[numcor]=sig11;
PSIG12[numcor]=sig12;
PSIG13[numcor]=sig13;
PSIG22[numcor]=sig22;
PSIG23[numcor]=sig23;
PSIG33[numcor]=sig33;
}
}
__global__
void shalo(int NB_SPH, int * NBHALO, unsigned int * NOHALO, R * VOLHALO, R * PSIG11, R * PSIG12, R * PSIG13, R * PSIG22, R * PSIG23, R * PSIG33, R * SIG11, R * SIG12, R * SIG13, R * SIG22, R * SIG23, R * SIG33, R * VONMIS, R * TRACE, R * SIG1, R *SIG2, R * SIG3, R minsig11, R maxsig11, R minsig12, R maxsig12, R minsig13, R maxsig13, R minsig22, R maxsig22, R minsig23, R maxsig23, R minsig33, R maxsig33, R minvm, R maxvm, R mintrac, R maxtrac, R minsig1, R maxsig1, R minsig2, R maxsig2, R minsig3, R maxsig3, int NMAXHALO) {
int jt = (blockIdx.x*blockDim.x)+threadIdx.x;
if (jt<NB_SPH) {
R sig11=PSIG11[jt];
R sig12=PSIG12[jt];
R sig13=PSIG13[jt];
R sig22=PSIG22[jt];
R sig23=PSIG23[jt];
R sig33=PSIG33[jt];
for(int kt=1;kt<NBHALO[jt];kt++){
//printf("NBHALOav: %i\n",long(700000)*NMAXHALO+kt);
unsigned int lt=NOHALO[long(jt)*NMAXHALO+kt];
//printf("NBHALOap: %i\n",lt);
sig11=sig11+PSIG11[lt];
sig12=sig12+PSIG12[lt];
sig13=sig13+PSIG13[lt];
sig22=sig22+PSIG22[lt];
sig23=sig23+PSIG23[lt];
sig33=sig33+PSIG33[lt];
}
sig11/=VOLHALO[jt];
sig12/=VOLHALO[jt];
sig13/=VOLHALO[jt];
sig22/=VOLHALO[jt];
sig23/=VOLHALO[jt];
sig33/=VOLHALO[jt];
SIG11[jt]=sig11;
SIG12[jt]=sig12;
SIG13[jt]=sig13;
SIG22[jt]=sig22;
SIG23[jt]=sig23;
SIG33[jt]=sig33;
R i1=sig11+sig22+sig33;
R i2=sig11*sig22+sig22*sig33+sig33*sig11-sig12*sig12-sig23*sig23-sig13*sig13;
R i3=sig11*(sig22*sig33-sig23*sig23)-sig12*(sig12*sig33-sig13*sig23)+sig13*(sig12*sig23-sig22*sig13);
R b=-i1;
R c=i2;
R d=-i3;
R p=c-b*b/3.;
R q=d-b*c/3.+2*b*b*b/27.;
R detd=4*c*c*c+27*d*d+4*d*b*b*b-b*b*c*c-18*b*c*d;
R s1=0.;
R s2=0.;
R s3=0.;
if (fabs(detd)<=1e-30){
double t=-q/2.;
s1=2.*pow(t,1./3)-b/3.;
s2=-pow(t,1./3)-b/3.;
s3=s2;
}
else{
R r=sqrt(-p*p*p/27.);
R theta=acos(-q/(2*r));
s1=2.*sqrt(-p/3.)*cos(theta/3.)-b/3.;
s2=2.*sqrt(-p/3.)*cos((theta+2.*3.14159265358979323846)/3.)-b/3.;
s3=2.*sqrt(-p/3.)*cos((theta+4.*3.14159265358979323846)/3.)-b/3.;
}
R smax=max(max(s1,s2),s3);
R smin=min(min(s1,s2),s3);
if(s1==smax){s2=max(s2,s3);}
else if(s2==smax){s2=max(s1,s3);}
else if(s3==smax){s2=max(s1,s2);}
s1=smax;
s3=smin;
R trac=s1+s2+s3;
R vmis=sqrt((sig11-sig22)*(sig11-sig22)+(sig33-sig22)*(sig33-sig22)+(sig11-sig33)*(sig11-sig33)+6.*(sig12*sig12+sig13*sig13+sig23*sig23))/sqrt(2.);
if(vmis!=vmis) {vmis=0.;}
VONMIS[jt]=vmis;
TRACE[jt]=trac;
SIG1[jt]=s1;
SIG2[jt]=s2;
SIG3[jt]=s3;
/*
minvm=fmin(vmis,minvm);
maxvm=fmax(vmis,maxvm);
mintrac=fmin(trac,mintrac);
maxtrac=fmax(trac,maxtrac);
minsig11=fmin(sig11,minsig11);
maxsig11=fmax(sig11,maxsig11);
minsig12=fmin(sig12,minsig12);
maxsig12=fmax(sig12,maxsig12);
minsig13=fmin(sig13,minsig13);
maxsig13=fmax(sig13,maxsig13);
minsig22=fmin(sig22,minsig22);
maxsig22=fmax(sig22,maxsig22);
minsig23=fmin(sig23,minsig23);
maxsig23=fmax(sig23,maxsig23);
minsig33=fmin(sig33,minsig33);
maxsig33=fmax(sig33,maxsig33);
minsig1=fmin(s1,minsig1);
maxsig1=fmax(s1,maxsig1);
minsig2=fmin(s2,minsig2);
maxsig2=fmax(s2,maxsig2);
minsig3=fmin(s3,minsig3);
maxsig3=fmax(s3,maxsig3); */
}
}
void contrainteshalo_gpu(R Pi, R coef1, int ite,int NBENREG, int NB_SPH, int NBCO, int NMAXZ,int NMAXHALO, int NMAXCONT, R H_TOT, R V_TOT, R Z_TOT,R * LIST_R, R ** FCJI, R ** NCONT, unsigned int ** NOCONT, int * NBCONTCO, R * VONMIS, R * TRACE, R * SIG11, R * SIG12, R * SIG13, R * SIG22, R * SIG23, R * SIG33, R * SIG1, R * SIG2, R * SIG3,R &minvm, R &maxvm,R &mintrac, R &maxtrac, R &minsig11, R &maxsig11, R &minsig12, R &maxsig12, R &minsig13, R &maxsig13, R &minsig22, R &maxsig22, R &minsig23, R &maxsig23, R &minsig33, R &maxsig33, R &minsig1, R &maxsig1, R &minsig2, R &maxsig2, R &minsig3, R &maxsig3, bool * EDGE, unsigned int ** NOHALO, int * NBHALO, R * LIST_V, R * VOLHALO,R * LIST_IND) {
int it;
mintrac = 1e12;
maxtrac = -1e12;
minvm = 1e12;
maxvm = -1e12;
minsig11 = 1e12;
maxsig11 = -1e12;
minsig12 = 1e12;
maxsig12 = -1e12;
minsig13 = 1e12;
maxsig13 = -1e12;
minsig22 = 1e12;
maxsig22 = -1e12;
minsig23 = 1e12;
maxsig23 = -1e12;
minsig33 = 1e12;
maxsig33 = -1e12;
minsig1 = 1e12;
maxsig1 = -1e12;
minsig2 = 1e12;
maxsig2 = -1e12;
minsig3 = 1e12;
maxsig3 = -1e12;
/////////////////////////////////
// Vecteurs/matrices device
R * dLIST_IND;
R * dLIST_R;
R * dPSIG11;
R * dPSIG12;
R * dPSIG13;
R * dPSIG22;
R * dPSIG23;
R * dPSIG33;
R * dSIG11;
R * dSIG12;
R * dSIG13;
R * dSIG22;
R * dSIG23;
R * dSIG33;
R * dVONMIS;
R * dTRACE;
R * dSIG1;
R * dSIG2;
R * dSIG3;
R * dVOLHALO;
int * dNBHALO;
int * dNBCONTCO;
unsigned int * dNOCONT;
R * dNCONT;
R * dFCJI;
unsigned int * dNOHALO;
/////////////////////////////////
// Allocation mémoire
cudaMalloc((void **)&dLIST_IND, NB_SPH*sizeof(R));
cudaMemcpy(dLIST_IND, LIST_IND, NB_SPH*sizeof(R), cudaMemcpyHostToDevice);
cudaMalloc((void **)&dLIST_R, NB_SPH*sizeof(R));
cudaMemcpy(dLIST_R, LIST_R, NB_SPH*sizeof(R), cudaMemcpyHostToDevice);
cudaMalloc((void **)&dPSIG11, NB_SPH*sizeof(R));
cudaMalloc((void **)&dPSIG12, NB_SPH*sizeof(R));
cudaMalloc((void **)&dPSIG13, NB_SPH*sizeof(R));
cudaMalloc((void **)&dPSIG22, NB_SPH*sizeof(R));
cudaMalloc((void **)&dPSIG23, NB_SPH*sizeof(R));
cudaMalloc((void **)&dPSIG33, NB_SPH*sizeof(R));
cudaMalloc((void **)&dNBCONTCO, NB_SPH*sizeof(int));
cudaMemcpy(dNBCONTCO, NBCONTCO, NB_SPH*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&dNOCONT, NB_SPH*NMAXZ*sizeof(unsigned int));
cudaMemcpy(dNOCONT, NOCONT[0], NB_SPH*NMAXZ*sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&dNCONT, NMAXCONT*9*sizeof(R));
cudaMemcpy(dNCONT, NCONT[0], NMAXCONT*9*sizeof(R), cudaMemcpyHostToDevice);
cudaMalloc((void **)&dFCJI, NMAXCONT*3*sizeof(R));
cudaMemcpy(dFCJI, FCJI[0], NMAXCONT*3*sizeof(R), cudaMemcpyHostToDevice);
/////////////////////////////////
// Contraintes à l'échelle de la particule
dim3 DimGrid ((NB_SPH-1)/256+1,1,1) ;
dim3 DimBlock (256,1,1) ;
slocal<<<DimGrid, DimBlock>>>(NB_SPH,dLIST_IND,dNBCONTCO,dNOCONT,dLIST_R,dNCONT,dFCJI,dPSIG11,dPSIG12, dPSIG13, dPSIG22,dPSIG23,dPSIG33,NMAXZ);
/////////////////////////////////
// Libération mémoire
cudaFree(dLIST_IND);
cudaFree(dLIST_R);
cudaFree(dNBCONTCO);
cudaFree(dNOCONT);
cudaFree(dNCONT);
cudaFree(dFCJI);
/////////////////////////////////
// Allocation mémoire
cudaMalloc((void **)&dSIG11, NB_SPH*sizeof(R));
cudaMalloc((void **)&dSIG12, NB_SPH*sizeof(R));
cudaMalloc((void **)&dSIG13, NB_SPH*sizeof(R));
cudaMalloc((void **)&dSIG22, NB_SPH*sizeof(R));
cudaMalloc((void **)&dSIG23, NB_SPH*sizeof(R));
cudaMalloc((void **)&dSIG33, NB_SPH*sizeof(R));
cudaMalloc((void **)&dVONMIS, NB_SPH*sizeof(R));
cudaMalloc((void **)&dTRACE, NB_SPH*sizeof(R));
cudaMalloc((void **)&dSIG1, NB_SPH*sizeof(R));
cudaMalloc((void **)&dSIG2, NB_SPH*sizeof(R));
cudaMalloc((void **)&dSIG3, NB_SPH*sizeof(R));
cudaMalloc((void **)&dVOLHALO, NB_SPH*sizeof(R));
cudaMemcpy(dVOLHALO, VOLHALO, NB_SPH*sizeof(R), cudaMemcpyHostToDevice);
cudaMalloc((void **)&dNBHALO, NB_SPH*sizeof(int));
cudaMemcpy(dNBHALO, NBHALO, NB_SPH*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&dNOHALO, long(NB_SPH)*NMAXHALO*sizeof(unsigned int));
cudaMemcpy(dNOHALO, NOHALO[0], long(NB_SPH)*NMAXHALO*sizeof(unsigned int), cudaMemcpyHostToDevice);
size_t free, total;
printf("\n");
cudaMemGetInfo(&free,&total);
printf("%d KB free of total %d KB\n",free/1024,total/1024);
/////////////////////////////////
// Contraintes à l'échelle du halo
shalo<<<DimGrid, DimBlock>>>(NB_SPH, dNBHALO,dNOHALO,dVOLHALO,dPSIG11,dPSIG12,dPSIG13,dPSIG22,dPSIG23,dPSIG33,dSIG11,dSIG12,dSIG13,dSIG22,dSIG23,dSIG33,dVONMIS,dTRACE,dSIG1,dSIG2,dSIG3,minsig11, maxsig11,minsig12,maxsig12,minsig13,maxsig13,minsig22,maxsig22,minsig23,maxsig23,minsig33,maxsig33,minvm,maxvm,mintrac,maxtrac,minsig1,maxsig1,minsig2,maxsig2,minsig3,maxsig3,NMAXHALO);
/////////////////////////////////
// Copies des vecteurs/matrices utiles vers l'host
cudaMemcpy(SIG11, dSIG11, NB_SPH*sizeof(R), cudaMemcpyDeviceToHost);
cudaMemcpy(SIG12, dSIG12, NB_SPH*sizeof(R), cudaMemcpyDeviceToHost);
cudaMemcpy(SIG13, dSIG13, NB_SPH*sizeof(R), cudaMemcpyDeviceToHost);
cudaMemcpy(SIG22, dSIG22, NB_SPH*sizeof(R), cudaMemcpyDeviceToHost);
cudaMemcpy(SIG23, dSIG23, NB_SPH*sizeof(R), cudaMemcpyDeviceToHost);
cudaMemcpy(SIG33, dSIG33, NB_SPH*sizeof(R), cudaMemcpyDeviceToHost);
cudaMemcpy(VONMIS, dVONMIS, NB_SPH*sizeof(R), cudaMemcpyDeviceToHost);
cudaMemcpy(TRACE, dTRACE, NB_SPH*sizeof(R), cudaMemcpyDeviceToHost);
cudaMemcpy(SIG1, dSIG1, NB_SPH*sizeof(R), cudaMemcpyDeviceToHost);
cudaMemcpy(SIG2, dSIG2, NB_SPH*sizeof(R), cudaMemcpyDeviceToHost);
cudaMemcpy(SIG3, dSIG3, NB_SPH*sizeof(R), cudaMemcpyDeviceToHost);
/////////////////////////////////
// Libération mémoire
cudaFree(dPSIG11);
cudaFree(dPSIG12);
cudaFree(dPSIG13);
cudaFree(dPSIG22);
cudaFree(dPSIG23);
cudaFree(dPSIG33);
cudaFree(dSIG11);
cudaFree(dSIG12);
cudaFree(dSIG13);
cudaFree(dSIG22);
cudaFree(dSIG23);
cudaFree(dSIG33);
cudaFree(dVONMIS);
cudaFree(dTRACE);
cudaFree(dSIG1);
cudaFree(dSIG2);
cudaFree(dSIG3);
cudaFree(dNBHALO);
cudaFree(dVOLHALO);
cudaFree(dNOHALO);
/////////////////////////////////
// Traitement Min/max
for(it=0;it<NB_SPH;it++){
minvm=min(VONMIS[it],minvm);
maxvm=max(VONMIS[it],maxvm);
mintrac=min(TRACE[it],mintrac);
maxtrac=max(TRACE[it],maxtrac);
minsig11=min(SIG11[it],minsig11);
maxsig11=max(SIG11[it],maxsig11);
minsig12=min(SIG12[it],minsig12);
maxsig12=max(SIG12[it],maxsig12);
minsig13=min(SIG13[it],minsig13);
maxsig13=max(SIG13[it],maxsig13);
minsig22=min(SIG22[it],minsig22);
maxsig22=max(SIG22[it],maxsig22);
minsig23=min(SIG23[it],minsig23);
maxsig23=max(SIG23[it],maxsig23);
minsig33=min(SIG33[it],minsig33);
maxsig33=max(SIG33[it],maxsig33);
minsig1=min(SIG1[it],minsig1);
maxsig1=max(SIG1[it],maxsig1);
minsig2=min(SIG2[it],minsig2);
maxsig2=max(SIG2[it],maxsig2);
minsig3=min(SIG3[it],minsig3);
maxsig3=max(SIG3[it],maxsig3);
}
if(ite%NBENREG==0){
cout<<"Maxsig11:"<<maxsig11<<endl;
cout<<"Minsig11:"<<minsig11<<endl;
cout<<"Maxsig12:"<<maxsig12<<endl;
cout<<"Minsig12:"<<minsig12<<endl;
cout<<"Maxsig13:"<<maxsig13<<endl;
cout<<"Minsig13:"<<minsig13<<endl;
cout<<"Maxsig22:"<<maxsig22<<endl;
cout<<"Minsig22:"<<minsig22<<endl;
cout<<"Maxsig23:"<<maxsig23<<endl;
cout<<"Minsig23:"<<minsig23<<endl;
cout<<"Maxsig33:"<<maxsig33<<endl;
cout<<"Minsig33:"<<minsig33<<endl;
cout<<"Maxsig1:"<<maxsig1<<endl;
cout<<"Minsig1:"<<minsig1<<endl;
cout<<"Maxsig2:"<<maxsig2<<endl;
cout<<"Minsig2:"<<minsig2<<endl;
cout<<"Maxsig3:"<<maxsig3<<endl;
cout<<"Minsig3:"<<minsig3<<endl;
cout<<"Maxvm:"<<maxvm<<endl;
cout<<"Minvm:"<<minvm<<endl;
cout<<"Maxtrac:"<<maxtrac<<endl;
cout<<"Mintrac:"<<mintrac<<endl;
}
}
|
c270999aa1fc85280eb372388df66b7bca773e7b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
const int blocksize = 1024;
const int factor = 16;
const int arraysize = blocksize / factor;
template <typename T>
__global__ void addition_test_kernel(T * a, T * sum) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int idx = (tid) % arraysize;
sum[idx] += a[idx];
atomicAdd(&sum[idx], a[idx]);
if (idx == 2)
printf("%d %d %d %d -> ", idx, tid, sum[idx], a[idx]);
}
template <typename T>
void test_atomic_int() {
dim3 dimBlock(blocksize, 1);
dim3 dimGrid(1, 1);
T *a, *b, *sum, *answer, *ad, *bd, *sumd, *answerd;
a = (T*)malloc(arraysize * sizeof(T));
sum = (T*)malloc(arraysize * sizeof(T));
answer = (T*)malloc(arraysize * sizeof(T));
for (int i = 0; i < arraysize; ++i) {
a[i] = 1;
sum[i] = 0;
answer[i] = i + i;
}
hipMalloc((void**)&ad, arraysize * sizeof(T));
gpuErrchk(hipPeekAtLastError());
hipMalloc((void**)&sumd, arraysize * sizeof(T));
gpuErrchk(hipPeekAtLastError());
hipMemcpy(ad, a, arraysize * sizeof(T), hipMemcpyHostToDevice);
gpuErrchk(hipGetLastError());
hipMemcpy(sumd, sum, arraysize * sizeof(T), hipMemcpyHostToDevice);
gpuErrchk(hipPeekAtLastError());
printf("addition kernel;\n");
hipLaunchKernelGGL(( addition_test_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, sumd);
gpuErrchk(hipPeekAtLastError());
hipMemcpy(sum, sumd, arraysize * sizeof(T), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
for (int i = 0; i < arraysize; ++i) {
printf("sum[%d]: %d\n", i, sum[i]);
}
}
int main(int argc, char *argv[])
{
test_atomic_int<int32_t>();
return 0;
}
| c270999aa1fc85280eb372388df66b7bca773e7b.cu | #include <stdio.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
const int blocksize = 1024;
const int factor = 16;
const int arraysize = blocksize / factor;
template <typename T>
__global__ void addition_test_kernel(T * a, T * sum) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int idx = (tid) % arraysize;
sum[idx] += a[idx];
atomicAdd(&sum[idx], a[idx]);
if (idx == 2)
printf("%d %d %d %d -> ", idx, tid, sum[idx], a[idx]);
}
template <typename T>
void test_atomic_int() {
dim3 dimBlock(blocksize, 1);
dim3 dimGrid(1, 1);
T *a, *b, *sum, *answer, *ad, *bd, *sumd, *answerd;
a = (T*)malloc(arraysize * sizeof(T));
sum = (T*)malloc(arraysize * sizeof(T));
answer = (T*)malloc(arraysize * sizeof(T));
for (int i = 0; i < arraysize; ++i) {
a[i] = 1;
sum[i] = 0;
answer[i] = i + i;
}
cudaMalloc((void**)&ad, arraysize * sizeof(T));
gpuErrchk(cudaPeekAtLastError());
cudaMalloc((void**)&sumd, arraysize * sizeof(T));
gpuErrchk(cudaPeekAtLastError());
cudaMemcpy(ad, a, arraysize * sizeof(T), cudaMemcpyHostToDevice);
gpuErrchk(cudaGetLastError());
cudaMemcpy(sumd, sum, arraysize * sizeof(T), cudaMemcpyHostToDevice);
gpuErrchk(cudaPeekAtLastError());
printf("addition kernel;\n");
addition_test_kernel<<<dimGrid, dimBlock>>>(ad, sumd);
gpuErrchk(cudaPeekAtLastError());
cudaMemcpy(sum, sumd, arraysize * sizeof(T), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
for (int i = 0; i < arraysize; ++i) {
printf("sum[%d]: %d\n", i, sum[i]);
}
}
int main(int argc, char *argv[])
{
test_atomic_int<int32_t>();
return 0;
}
|
da18a2b055509d24f46e058169b576a86735e6b7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__
void kernel2D(float *d_out, int w, int h, ...)
{
// Compute column and row indices
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r = blockIdx.y*blockDim.y + threadIdx.y;
const int i = r * w + c;
// check if within image bounds.
if((c>=w) || (r >= h))
return error;
d_out[i] = INSERT_CODE_HERE; // compute/ store pixel in device array.
}
| da18a2b055509d24f46e058169b576a86735e6b7.cu | __global__
void kernel2D(float *d_out, int w, int h, ...)
{
// Compute column and row indices
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r = blockIdx.y*blockDim.y + threadIdx.y;
const int i = r * w + c;
// check if within image bounds.
if((c>=w) || (r >= h))
return error;
d_out[i] = INSERT_CODE_HERE; // compute/ store pixel in device array.
}
|
616bd2e2bfc3bedd32319eb517b82c560d4c60d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
RoI-aware point cloud feature pooling
Written by Shaoshuai Shi
All Rights Reserved 2019-2020.
*/
#include <math.h>
#include <stdio.h>
#define THREADS_PER_BLOCK 256
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
// #define DEBUG
__device__ inline void lidar_to_local_coords(float shift_x, float shift_y, float rot_angle, float &local_x, float &local_y){
float cosa = cos(-rot_angle), sina = sin(-rot_angle);
local_x = shift_x * cosa + shift_y * (-sina);
local_y = shift_x * sina + shift_y * cosa;
}
__device__ inline int check_pt_in_box3d(const float *pt, const float *box3d, float &local_x, float &local_y){
// param pt: (x, y, z)
// param box3d: [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center
const float MARGIN = 1e-5;
float x = pt[0], y = pt[1], z = pt[2];
float cx = box3d[0], cy = box3d[1], cz = box3d[2];
float dx = box3d[3], dy = box3d[4], dz = box3d[5], rz = box3d[6];
if (fabsf(z - cz) > dz / 2.0) return 0;
lidar_to_local_coords(x - cx, y - cy, rz, local_x, local_y);
float in_flag = (fabs(local_x) < dx / 2.0 + MARGIN) & (fabs(local_y) < dy / 2.0 + MARGIN);
return in_flag;
}
__global__ void generate_pts_mask_for_box3d(int boxes_num, int pts_num, int out_x, int out_y, int out_z,
const float *rois, const float *pts, int *pts_mask){
// params rois: [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center
// params pts: (npoints, 3) [x, y, z]
// params pts_mask: (N, npoints): -1 means point doesnot in this box, otherwise: encode (x_idxs, y_idxs, z_idxs) by binary bit
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
int box_idx = blockIdx.y;
if (pt_idx >= pts_num || box_idx >= boxes_num) return;
pts += pt_idx * 3;
rois += box_idx * 7;
pts_mask += box_idx * pts_num + pt_idx;
float local_x = 0, local_y = 0;
int cur_in_flag = check_pt_in_box3d(pts, rois, local_x, local_y);
pts_mask[0] = -1;
if (cur_in_flag > 0){
float local_z = pts[2] - rois[2];
float dx = rois[3], dy = rois[4], dz = rois[5];
float x_res = dx / out_x;
float y_res = dy / out_y;
float z_res = dz / out_z;
unsigned int x_idx = int((local_x + dx / 2) / x_res);
unsigned int y_idx = int((local_y + dy / 2) / y_res);
unsigned int z_idx = int((local_z + dz / 2) / z_res);
x_idx = min(max(x_idx, 0), out_x - 1);
y_idx = min(max(y_idx, 0), out_y - 1);
z_idx = min(max(z_idx, 0), out_z - 1);
unsigned int idx_encoding = (x_idx << 16) + (y_idx << 8) + z_idx;
pts_mask[0] = idx_encoding;
}
}
__global__ void collect_inside_pts_for_box3d(int boxes_num, int pts_num, int max_pts_each_voxel,
int out_x, int out_y, int out_z, const int *pts_mask, int *pts_idx_of_voxels){
// params pts_mask: (N, npoints) 0 or 1
// params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel)
int box_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (box_idx >= boxes_num) return;
int max_num_pts = max_pts_each_voxel - 1; // index 0 is the counter
pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel;
for (int k = 0; k < pts_num; k++){
if (pts_mask[box_idx * pts_num + k] != -1){
unsigned int idx_encoding = pts_mask[box_idx * pts_num + k];
unsigned int x_idx = (idx_encoding >> 16) & 0xFF;
unsigned int y_idx = (idx_encoding >> 8) & 0xFF;
unsigned int z_idx = idx_encoding & 0xFF;
unsigned int base_offset = x_idx * out_y * out_z * max_pts_each_voxel + y_idx * out_z * max_pts_each_voxel + z_idx * max_pts_each_voxel;
unsigned int cnt = pts_idx_of_voxels[base_offset];
if (cnt < max_num_pts){
pts_idx_of_voxels[base_offset + cnt + 1] = k;
pts_idx_of_voxels[base_offset]++;
}
#ifdef DEBUG
printf("collect: pts_%d, idx(%d, %d, %d), idx_encoding=%x\n",
k, x_idx, y_idx, z_idx, idx_encoding);
#endif
}
}
}
__global__ void roiaware_maxpool3d(int boxes_num, int pts_num, int channels, int max_pts_each_voxel, int out_x,
int out_y, int out_z, const float *pts_feature, const int *pts_idx_of_voxels, float *pooled_features, int *argmax){
// params pts_feature: (npoints, C)
// params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel), index 0 is the counter
// params pooled_features: (N, out_x, out_y, out_z, C)
// params argmax: (N, out_x, out_y, out_z, C)
int box_idx = blockIdx.z;
int channel_idx = blockIdx.y;
int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x;
int x_idx = voxel_idx_flat / (out_y * out_z);
int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z;
int z_idx = voxel_idx_flat % out_z;
if (box_idx >= boxes_num || channel_idx >= channels|| x_idx >= out_x || y_idx >= out_y || z_idx >= out_z) return;
#ifdef DEBUG
printf("src pts_idx_of_voxels: (%p, ), argmax: %p\n", pts_idx_of_voxels, argmax);
#endif
int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx;
pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel + offset_base * max_pts_each_voxel;
pooled_features += box_idx * out_x * out_y * out_z * channels + offset_base * channels + channel_idx;
argmax += box_idx * out_x * out_y * out_z * channels + offset_base * channels + channel_idx;
int argmax_idx = -1;
float max_val = -1e50;
int total_pts = pts_idx_of_voxels[0];
for (int k = 1; k <= total_pts; k++){
if (pts_feature[pts_idx_of_voxels[k] * channels + channel_idx] > max_val){
max_val = pts_feature[pts_idx_of_voxels[k] * channels + channel_idx];
argmax_idx = pts_idx_of_voxels[k];
}
}
if (argmax_idx != -1){
pooled_features[0] = max_val;
}
argmax[0] = argmax_idx;
#ifdef DEBUG
printf("channel_%d idx(%d, %d, %d), argmax_idx=(%d, %.3f), total=%d, after pts_idx: %p, argmax: (%p, %d)\n",
channel_idx, x_idx, y_idx, z_idx, argmax_idx, max_val, total_pts, pts_idx_of_voxels, argmax, argmax_idx);
#endif
}
__global__ void roiaware_avgpool3d(int boxes_num, int pts_num, int channels, int max_pts_each_voxel, int out_x,
int out_y, int out_z, const float *pts_feature, const int *pts_idx_of_voxels, float *pooled_features){
// params pts_feature: (npoints, C)
// params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel), index 0 is the counter
// params pooled_features: (N, out_x, out_y, out_z, C)
// params argmax: (N, out_x, out_y, out_z, C)
int box_idx = blockIdx.z;
int channel_idx = blockIdx.y;
int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x;
int x_idx = voxel_idx_flat / (out_y * out_z);
int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z;
int z_idx = voxel_idx_flat % out_z;
if (box_idx >= boxes_num || channel_idx >= channels|| x_idx >= out_x || y_idx >= out_y || z_idx >= out_z) return;
int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx;
pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel + offset_base * max_pts_each_voxel;
pooled_features += box_idx * out_x * out_y * out_z * channels + offset_base * channels + channel_idx;
float sum_val = 0;
int total_pts = pts_idx_of_voxels[0];
for (int k = 1; k <= total_pts; k++){
sum_val += pts_feature[pts_idx_of_voxels[k] * channels + channel_idx];
}
if (total_pts > 0){
pooled_features[0] = sum_val / total_pts;
}
}
void roiaware_pool3d_launcher(int boxes_num, int pts_num, int channels, int max_pts_each_voxel, int out_x, int out_y, int out_z,
const float *rois, const float *pts, const float *pts_feature, int *argmax, int *pts_idx_of_voxels, float *pooled_features, int pool_method){
// params rois: (N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center
// params pts: (npoints, 3) [x, y, z]
// params pts_feature: (npoints, C)
// params argmax: (N, out_x, out_y, out_z, C)
// params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel)
// params pooled_features: (N, out_x, out_y, out_z, C)
// params pool_method: 0: max_pool 1: avg_pool
int *pts_mask = NULL;
hipMalloc(&pts_mask, boxes_num * pts_num * sizeof(int)); // (N, M)
hipMemset(pts_mask, -1, boxes_num * pts_num * sizeof(int));
dim3 blocks_mask(DIVUP(pts_num, THREADS_PER_BLOCK), boxes_num);
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( generate_pts_mask_for_box3d), dim3(blocks_mask), dim3(threads), 0, 0, boxes_num, pts_num, out_x, out_y, out_z, rois, pts, pts_mask);
// TODO: Merge the collect and pool functions, SS
dim3 blocks_collect(DIVUP(boxes_num, THREADS_PER_BLOCK));
hipLaunchKernelGGL(( collect_inside_pts_for_box3d), dim3(blocks_collect), dim3(threads), 0, 0, boxes_num, pts_num, max_pts_each_voxel,
out_x, out_y, out_z, pts_mask, pts_idx_of_voxels);
dim3 blocks_pool(DIVUP(out_x * out_y * out_z, THREADS_PER_BLOCK), channels, boxes_num);
if (pool_method == 0){
hipLaunchKernelGGL(( roiaware_maxpool3d), dim3(blocks_pool), dim3(threads), 0, 0, boxes_num, pts_num, channels, max_pts_each_voxel, out_x, out_y, out_z,
pts_feature, pts_idx_of_voxels, pooled_features, argmax);
}
else if (pool_method == 1){
hipLaunchKernelGGL(( roiaware_avgpool3d), dim3(blocks_pool), dim3(threads), 0, 0, boxes_num, pts_num, channels, max_pts_each_voxel, out_x, out_y, out_z,
pts_feature, pts_idx_of_voxels, pooled_features);
}
hipFree(pts_mask);
#ifdef DEBUG
hipDeviceSynchronize(); // for using printf in kernel function
#endif
}
__global__ void roiaware_maxpool3d_backward(int boxes_num, int channels, int out_x, int out_y, int out_z,
const int *argmax, const float *grad_out, float *grad_in){
// params argmax: (N, out_x, out_y, out_z, C)
// params grad_out: (N, out_x, out_y, out_z, C)
// params grad_in: (npoints, C), return value
int box_idx = blockIdx.z;
int channel_idx = blockIdx.y;
int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x;
int x_idx = voxel_idx_flat / (out_y * out_z);
int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z;
int z_idx = voxel_idx_flat % out_z;
if (box_idx >= boxes_num || channel_idx >= channels|| x_idx >= out_x || y_idx >= out_y || z_idx >= out_z) return;
int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx;
argmax += box_idx * out_x * out_y * out_z * channels + offset_base * channels + channel_idx;
grad_out += box_idx * out_x * out_y * out_z * channels + offset_base * channels + channel_idx;
if (argmax[0] == -1) return;
atomicAdd(grad_in + argmax[0] * channels + channel_idx, grad_out[0] * 1);
}
__global__ void roiaware_avgpool3d_backward(int boxes_num, int channels, int out_x, int out_y, int out_z,
int max_pts_each_voxel, const int *pts_idx_of_voxels, const float *grad_out, float *grad_in){
// params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel)
// params grad_out: (N, out_x, out_y, out_z, C)
// params grad_in: (npoints, C), return value
int box_idx = blockIdx.z;
int channel_idx = blockIdx.y;
int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x;
int x_idx = voxel_idx_flat / (out_y * out_z);
int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z;
int z_idx = voxel_idx_flat % out_z;
if (box_idx >= boxes_num || channel_idx >= channels|| x_idx >= out_x || y_idx >= out_y || z_idx >= out_z) return;
int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx;
pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel + offset_base * max_pts_each_voxel;
grad_out += box_idx * out_x * out_y * out_z * channels + offset_base * channels + channel_idx;
int total_pts = pts_idx_of_voxels[0];
float cur_grad = 1 / fmaxf(float(total_pts), 1.0);
for (int k = 1; k <= total_pts; k++){
atomicAdd(grad_in + pts_idx_of_voxels[k] * channels + channel_idx, grad_out[0] * cur_grad);
}
}
void roiaware_pool3d_backward_launcher(int boxes_num, int out_x, int out_y, int out_z, int channels, int max_pts_each_voxel,
const int *pts_idx_of_voxels, const int *argmax, const float *grad_out, float *grad_in, int pool_method){
// params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel)
// params argmax: (N, out_x, out_y, out_z, C)
// params grad_out: (N, out_x, out_y, out_z, C)
// params grad_in: (npoints, C), return value
// params pool_method: 0: max_pool, 1: avg_pool
dim3 blocks(DIVUP(out_x * out_y * out_z, THREADS_PER_BLOCK), channels, boxes_num);
dim3 threads(THREADS_PER_BLOCK);
if (pool_method == 0){
hipLaunchKernelGGL(( roiaware_maxpool3d_backward), dim3(blocks), dim3(threads), 0, 0,
boxes_num, channels, out_x, out_y, out_z, argmax, grad_out, grad_in
);
}
else if (pool_method == 1){
hipLaunchKernelGGL(( roiaware_avgpool3d_backward), dim3(blocks), dim3(threads), 0, 0,
boxes_num, channels, out_x, out_y, out_z, max_pts_each_voxel, pts_idx_of_voxels, grad_out, grad_in
);
}
}
__global__ void points_in_boxes_kernel(int batch_size, int boxes_num, int pts_num, const float *boxes,
const float *pts, int *box_idx_of_points){
// params boxes: (B, N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center
// params pts: (B, npoints, 3) [x, y, z] in LiDAR coordinate
// params boxes_idx_of_points: (B, npoints), default -1
int bs_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= batch_size || pt_idx >= pts_num) return;
boxes += bs_idx * boxes_num * 7;
pts += bs_idx * pts_num * 3 + pt_idx * 3;
box_idx_of_points += bs_idx * pts_num + pt_idx;
float local_x = 0, local_y = 0;
int cur_in_flag = 0;
for (int k = 0; k < boxes_num; k++){
cur_in_flag = check_pt_in_box3d(pts, boxes + k * 7, local_x, local_y);
if (cur_in_flag){
box_idx_of_points[0] = k;
break;
}
}
}
void points_in_boxes_launcher(int batch_size, int boxes_num, int pts_num, const float *boxes,
const float *pts, int *box_idx_of_points){
// params boxes: (B, N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center
// params pts: (B, npoints, 3) [x, y, z]
// params boxes_idx_of_points: (B, npoints), default -1
hipError_t err;
dim3 blocks(DIVUP(pts_num, THREADS_PER_BLOCK), batch_size);
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( points_in_boxes_kernel), dim3(blocks), dim3(threads), 0, 0, batch_size, boxes_num, pts_num, boxes, pts, box_idx_of_points);
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
#ifdef DEBUG
hipDeviceSynchronize(); // for using printf in kernel function
#endif
}
| 616bd2e2bfc3bedd32319eb517b82c560d4c60d4.cu | /*
RoI-aware point cloud feature pooling
Written by Shaoshuai Shi
All Rights Reserved 2019-2020.
*/
#include <math.h>
#include <stdio.h>
#define THREADS_PER_BLOCK 256
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
// #define DEBUG
__device__ inline void lidar_to_local_coords(float shift_x, float shift_y, float rot_angle, float &local_x, float &local_y){
float cosa = cos(-rot_angle), sina = sin(-rot_angle);
local_x = shift_x * cosa + shift_y * (-sina);
local_y = shift_x * sina + shift_y * cosa;
}
__device__ inline int check_pt_in_box3d(const float *pt, const float *box3d, float &local_x, float &local_y){
// param pt: (x, y, z)
// param box3d: [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center
const float MARGIN = 1e-5;
float x = pt[0], y = pt[1], z = pt[2];
float cx = box3d[0], cy = box3d[1], cz = box3d[2];
float dx = box3d[3], dy = box3d[4], dz = box3d[5], rz = box3d[6];
if (fabsf(z - cz) > dz / 2.0) return 0;
lidar_to_local_coords(x - cx, y - cy, rz, local_x, local_y);
float in_flag = (fabs(local_x) < dx / 2.0 + MARGIN) & (fabs(local_y) < dy / 2.0 + MARGIN);
return in_flag;
}
__global__ void generate_pts_mask_for_box3d(int boxes_num, int pts_num, int out_x, int out_y, int out_z,
const float *rois, const float *pts, int *pts_mask){
// params rois: [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center
// params pts: (npoints, 3) [x, y, z]
// params pts_mask: (N, npoints): -1 means point doesnot in this box, otherwise: encode (x_idxs, y_idxs, z_idxs) by binary bit
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
int box_idx = blockIdx.y;
if (pt_idx >= pts_num || box_idx >= boxes_num) return;
pts += pt_idx * 3;
rois += box_idx * 7;
pts_mask += box_idx * pts_num + pt_idx;
float local_x = 0, local_y = 0;
int cur_in_flag = check_pt_in_box3d(pts, rois, local_x, local_y);
pts_mask[0] = -1;
if (cur_in_flag > 0){
float local_z = pts[2] - rois[2];
float dx = rois[3], dy = rois[4], dz = rois[5];
float x_res = dx / out_x;
float y_res = dy / out_y;
float z_res = dz / out_z;
unsigned int x_idx = int((local_x + dx / 2) / x_res);
unsigned int y_idx = int((local_y + dy / 2) / y_res);
unsigned int z_idx = int((local_z + dz / 2) / z_res);
x_idx = min(max(x_idx, 0), out_x - 1);
y_idx = min(max(y_idx, 0), out_y - 1);
z_idx = min(max(z_idx, 0), out_z - 1);
unsigned int idx_encoding = (x_idx << 16) + (y_idx << 8) + z_idx;
pts_mask[0] = idx_encoding;
}
}
__global__ void collect_inside_pts_for_box3d(int boxes_num, int pts_num, int max_pts_each_voxel,
int out_x, int out_y, int out_z, const int *pts_mask, int *pts_idx_of_voxels){
// params pts_mask: (N, npoints) 0 or 1
// params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel)
int box_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (box_idx >= boxes_num) return;
int max_num_pts = max_pts_each_voxel - 1; // index 0 is the counter
pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel;
for (int k = 0; k < pts_num; k++){
if (pts_mask[box_idx * pts_num + k] != -1){
unsigned int idx_encoding = pts_mask[box_idx * pts_num + k];
unsigned int x_idx = (idx_encoding >> 16) & 0xFF;
unsigned int y_idx = (idx_encoding >> 8) & 0xFF;
unsigned int z_idx = idx_encoding & 0xFF;
unsigned int base_offset = x_idx * out_y * out_z * max_pts_each_voxel + y_idx * out_z * max_pts_each_voxel + z_idx * max_pts_each_voxel;
unsigned int cnt = pts_idx_of_voxels[base_offset];
if (cnt < max_num_pts){
pts_idx_of_voxels[base_offset + cnt + 1] = k;
pts_idx_of_voxels[base_offset]++;
}
#ifdef DEBUG
printf("collect: pts_%d, idx(%d, %d, %d), idx_encoding=%x\n",
k, x_idx, y_idx, z_idx, idx_encoding);
#endif
}
}
}
__global__ void roiaware_maxpool3d(int boxes_num, int pts_num, int channels, int max_pts_each_voxel, int out_x,
int out_y, int out_z, const float *pts_feature, const int *pts_idx_of_voxels, float *pooled_features, int *argmax){
// params pts_feature: (npoints, C)
// params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel), index 0 is the counter
// params pooled_features: (N, out_x, out_y, out_z, C)
// params argmax: (N, out_x, out_y, out_z, C)
int box_idx = blockIdx.z;
int channel_idx = blockIdx.y;
int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x;
int x_idx = voxel_idx_flat / (out_y * out_z);
int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z;
int z_idx = voxel_idx_flat % out_z;
if (box_idx >= boxes_num || channel_idx >= channels|| x_idx >= out_x || y_idx >= out_y || z_idx >= out_z) return;
#ifdef DEBUG
printf("src pts_idx_of_voxels: (%p, ), argmax: %p\n", pts_idx_of_voxels, argmax);
#endif
int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx;
pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel + offset_base * max_pts_each_voxel;
pooled_features += box_idx * out_x * out_y * out_z * channels + offset_base * channels + channel_idx;
argmax += box_idx * out_x * out_y * out_z * channels + offset_base * channels + channel_idx;
int argmax_idx = -1;
float max_val = -1e50;
int total_pts = pts_idx_of_voxels[0];
for (int k = 1; k <= total_pts; k++){
if (pts_feature[pts_idx_of_voxels[k] * channels + channel_idx] > max_val){
max_val = pts_feature[pts_idx_of_voxels[k] * channels + channel_idx];
argmax_idx = pts_idx_of_voxels[k];
}
}
if (argmax_idx != -1){
pooled_features[0] = max_val;
}
argmax[0] = argmax_idx;
#ifdef DEBUG
printf("channel_%d idx(%d, %d, %d), argmax_idx=(%d, %.3f), total=%d, after pts_idx: %p, argmax: (%p, %d)\n",
channel_idx, x_idx, y_idx, z_idx, argmax_idx, max_val, total_pts, pts_idx_of_voxels, argmax, argmax_idx);
#endif
}
__global__ void roiaware_avgpool3d(int boxes_num, int pts_num, int channels, int max_pts_each_voxel, int out_x,
int out_y, int out_z, const float *pts_feature, const int *pts_idx_of_voxels, float *pooled_features){
// params pts_feature: (npoints, C)
// params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel), index 0 is the counter
// params pooled_features: (N, out_x, out_y, out_z, C)
// params argmax: (N, out_x, out_y, out_z, C)
int box_idx = blockIdx.z;
int channel_idx = blockIdx.y;
int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x;
int x_idx = voxel_idx_flat / (out_y * out_z);
int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z;
int z_idx = voxel_idx_flat % out_z;
if (box_idx >= boxes_num || channel_idx >= channels|| x_idx >= out_x || y_idx >= out_y || z_idx >= out_z) return;
int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx;
pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel + offset_base * max_pts_each_voxel;
pooled_features += box_idx * out_x * out_y * out_z * channels + offset_base * channels + channel_idx;
float sum_val = 0;
int total_pts = pts_idx_of_voxels[0];
for (int k = 1; k <= total_pts; k++){
sum_val += pts_feature[pts_idx_of_voxels[k] * channels + channel_idx];
}
if (total_pts > 0){
pooled_features[0] = sum_val / total_pts;
}
}
void roiaware_pool3d_launcher(int boxes_num, int pts_num, int channels, int max_pts_each_voxel, int out_x, int out_y, int out_z,
const float *rois, const float *pts, const float *pts_feature, int *argmax, int *pts_idx_of_voxels, float *pooled_features, int pool_method){
// params rois: (N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center
// params pts: (npoints, 3) [x, y, z]
// params pts_feature: (npoints, C)
// params argmax: (N, out_x, out_y, out_z, C)
// params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel)
// params pooled_features: (N, out_x, out_y, out_z, C)
// params pool_method: 0: max_pool 1: avg_pool
int *pts_mask = NULL;
cudaMalloc(&pts_mask, boxes_num * pts_num * sizeof(int)); // (N, M)
cudaMemset(pts_mask, -1, boxes_num * pts_num * sizeof(int));
dim3 blocks_mask(DIVUP(pts_num, THREADS_PER_BLOCK), boxes_num);
dim3 threads(THREADS_PER_BLOCK);
generate_pts_mask_for_box3d<<<blocks_mask, threads>>>(boxes_num, pts_num, out_x, out_y, out_z, rois, pts, pts_mask);
// TODO: Merge the collect and pool functions, SS
dim3 blocks_collect(DIVUP(boxes_num, THREADS_PER_BLOCK));
collect_inside_pts_for_box3d<<<blocks_collect, threads>>>(boxes_num, pts_num, max_pts_each_voxel,
out_x, out_y, out_z, pts_mask, pts_idx_of_voxels);
dim3 blocks_pool(DIVUP(out_x * out_y * out_z, THREADS_PER_BLOCK), channels, boxes_num);
if (pool_method == 0){
roiaware_maxpool3d<<<blocks_pool, threads>>>(boxes_num, pts_num, channels, max_pts_each_voxel, out_x, out_y, out_z,
pts_feature, pts_idx_of_voxels, pooled_features, argmax);
}
else if (pool_method == 1){
roiaware_avgpool3d<<<blocks_pool, threads>>>(boxes_num, pts_num, channels, max_pts_each_voxel, out_x, out_y, out_z,
pts_feature, pts_idx_of_voxels, pooled_features);
}
cudaFree(pts_mask);
#ifdef DEBUG
cudaDeviceSynchronize(); // for using printf in kernel function
#endif
}
__global__ void roiaware_maxpool3d_backward(int boxes_num, int channels, int out_x, int out_y, int out_z,
const int *argmax, const float *grad_out, float *grad_in){
// params argmax: (N, out_x, out_y, out_z, C)
// params grad_out: (N, out_x, out_y, out_z, C)
// params grad_in: (npoints, C), return value
int box_idx = blockIdx.z;
int channel_idx = blockIdx.y;
int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x;
int x_idx = voxel_idx_flat / (out_y * out_z);
int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z;
int z_idx = voxel_idx_flat % out_z;
if (box_idx >= boxes_num || channel_idx >= channels|| x_idx >= out_x || y_idx >= out_y || z_idx >= out_z) return;
int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx;
argmax += box_idx * out_x * out_y * out_z * channels + offset_base * channels + channel_idx;
grad_out += box_idx * out_x * out_y * out_z * channels + offset_base * channels + channel_idx;
if (argmax[0] == -1) return;
atomicAdd(grad_in + argmax[0] * channels + channel_idx, grad_out[0] * 1);
}
__global__ void roiaware_avgpool3d_backward(int boxes_num, int channels, int out_x, int out_y, int out_z,
int max_pts_each_voxel, const int *pts_idx_of_voxels, const float *grad_out, float *grad_in){
// params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel)
// params grad_out: (N, out_x, out_y, out_z, C)
// params grad_in: (npoints, C), return value
int box_idx = blockIdx.z;
int channel_idx = blockIdx.y;
int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x;
int x_idx = voxel_idx_flat / (out_y * out_z);
int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z;
int z_idx = voxel_idx_flat % out_z;
if (box_idx >= boxes_num || channel_idx >= channels|| x_idx >= out_x || y_idx >= out_y || z_idx >= out_z) return;
int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx;
pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel + offset_base * max_pts_each_voxel;
grad_out += box_idx * out_x * out_y * out_z * channels + offset_base * channels + channel_idx;
int total_pts = pts_idx_of_voxels[0];
float cur_grad = 1 / fmaxf(float(total_pts), 1.0);
for (int k = 1; k <= total_pts; k++){
atomicAdd(grad_in + pts_idx_of_voxels[k] * channels + channel_idx, grad_out[0] * cur_grad);
}
}
void roiaware_pool3d_backward_launcher(int boxes_num, int out_x, int out_y, int out_z, int channels, int max_pts_each_voxel,
const int *pts_idx_of_voxels, const int *argmax, const float *grad_out, float *grad_in, int pool_method){
// params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel)
// params argmax: (N, out_x, out_y, out_z, C)
// params grad_out: (N, out_x, out_y, out_z, C)
// params grad_in: (npoints, C), return value
// params pool_method: 0: max_pool, 1: avg_pool
dim3 blocks(DIVUP(out_x * out_y * out_z, THREADS_PER_BLOCK), channels, boxes_num);
dim3 threads(THREADS_PER_BLOCK);
if (pool_method == 0){
roiaware_maxpool3d_backward<<<blocks, threads>>>(
boxes_num, channels, out_x, out_y, out_z, argmax, grad_out, grad_in
);
}
else if (pool_method == 1){
roiaware_avgpool3d_backward<<<blocks, threads>>>(
boxes_num, channels, out_x, out_y, out_z, max_pts_each_voxel, pts_idx_of_voxels, grad_out, grad_in
);
}
}
__global__ void points_in_boxes_kernel(int batch_size, int boxes_num, int pts_num, const float *boxes,
const float *pts, int *box_idx_of_points){
// params boxes: (B, N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center
// params pts: (B, npoints, 3) [x, y, z] in LiDAR coordinate
// params boxes_idx_of_points: (B, npoints), default -1
int bs_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= batch_size || pt_idx >= pts_num) return;
boxes += bs_idx * boxes_num * 7;
pts += bs_idx * pts_num * 3 + pt_idx * 3;
box_idx_of_points += bs_idx * pts_num + pt_idx;
float local_x = 0, local_y = 0;
int cur_in_flag = 0;
for (int k = 0; k < boxes_num; k++){
cur_in_flag = check_pt_in_box3d(pts, boxes + k * 7, local_x, local_y);
if (cur_in_flag){
box_idx_of_points[0] = k;
break;
}
}
}
void points_in_boxes_launcher(int batch_size, int boxes_num, int pts_num, const float *boxes,
const float *pts, int *box_idx_of_points){
// params boxes: (B, N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center
// params pts: (B, npoints, 3) [x, y, z]
// params boxes_idx_of_points: (B, npoints), default -1
cudaError_t err;
dim3 blocks(DIVUP(pts_num, THREADS_PER_BLOCK), batch_size);
dim3 threads(THREADS_PER_BLOCK);
points_in_boxes_kernel<<<blocks, threads>>>(batch_size, boxes_num, pts_num, boxes, pts, box_idx_of_points);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
#ifdef DEBUG
cudaDeviceSynchronize(); // for using printf in kernel function
#endif
}
|
f0f057602bbf9d39619ef99f61cad101af4df57b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include "iterator.h"
#include <stdio.h>
#include <omp.h>
__global__ void jacobiIteration(double *u, double *uold, double *f, int N) {
int i, j;
for (i = 1; i < N + 1; ++i) {
for (j = 1; j < N + 1; ++j) {
u[i*(N+2)+j] = 0.25 * (uold[i*(N+2)+j-1] + uold[i*(N+2)+j+1] + uold[(i-1)*(N+2)+j] + uold[(i+1)*(N+2)+j] + f[(i-1)*(N)+j-1]);
}
}
}
| f0f057602bbf9d39619ef99f61cad101af4df57b.cu | #include <math.h>
#include "iterator.h"
#include <stdio.h>
#include <omp.h>
__global__ void jacobiIteration(double *u, double *uold, double *f, int N) {
int i, j;
for (i = 1; i < N + 1; ++i) {
for (j = 1; j < N + 1; ++j) {
u[i*(N+2)+j] = 0.25 * (uold[i*(N+2)+j-1] + uold[i*(N+2)+j+1] + uold[(i-1)*(N+2)+j] + uold[(i+1)*(N+2)+j] + f[(i-1)*(N)+j-1]);
}
}
}
|
98cc22744ad2035e6fc9b37c251cb26172a5732b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "getInducedGraphNeighborCountsKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int size = XSIZE*YSIZE;
int *aggregateIdx = NULL;
hipMalloc(&aggregateIdx, XSIZE*YSIZE);
int *adjIndexesOut = NULL;
hipMalloc(&adjIndexesOut, XSIZE*YSIZE);
int *permutedAdjIndexes = NULL;
hipMalloc(&permutedAdjIndexes, XSIZE*YSIZE);
int *permutedAdjacencyIn = NULL;
hipMalloc(&permutedAdjacencyIn, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
getInducedGraphNeighborCountsKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,aggregateIdx,adjIndexesOut,permutedAdjIndexes,permutedAdjacencyIn);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
getInducedGraphNeighborCountsKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,aggregateIdx,adjIndexesOut,permutedAdjIndexes,permutedAdjacencyIn);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
getInducedGraphNeighborCountsKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,aggregateIdx,adjIndexesOut,permutedAdjIndexes,permutedAdjacencyIn);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 98cc22744ad2035e6fc9b37c251cb26172a5732b.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "getInducedGraphNeighborCountsKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int size = XSIZE*YSIZE;
int *aggregateIdx = NULL;
cudaMalloc(&aggregateIdx, XSIZE*YSIZE);
int *adjIndexesOut = NULL;
cudaMalloc(&adjIndexesOut, XSIZE*YSIZE);
int *permutedAdjIndexes = NULL;
cudaMalloc(&permutedAdjIndexes, XSIZE*YSIZE);
int *permutedAdjacencyIn = NULL;
cudaMalloc(&permutedAdjacencyIn, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
getInducedGraphNeighborCountsKernel<<<gridBlock,threadBlock>>>(size,aggregateIdx,adjIndexesOut,permutedAdjIndexes,permutedAdjacencyIn);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
getInducedGraphNeighborCountsKernel<<<gridBlock,threadBlock>>>(size,aggregateIdx,adjIndexesOut,permutedAdjIndexes,permutedAdjacencyIn);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
getInducedGraphNeighborCountsKernel<<<gridBlock,threadBlock>>>(size,aggregateIdx,adjIndexesOut,permutedAdjIndexes,permutedAdjacencyIn);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
8c592c812fb4e533bc454626d066096c4dfaa3a3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
[LinkSymbol] 69 updates in 0.01 seconds
[AnnotParser] begin
Token #
Token pragma
Token FCUDA
Token GRID
Token x_dim=16
Token y_dim=16
Token #
Token pragma
Token FCUDA
Token COREINFO
Token num_cores=1
Token pipeline=no
----addStatementBefore----index is:0
----addStatementBefore----index is:1
----addStatementBefore----index is:2
[AnnotParser] end in 0.05 seconds
WARNING: Fcuda flag is set but mcuda_nolib is not set
WARNING: turning on mcuda_nolib
WARNING: mcuda_nolib flag is set but serialThr is not set
WARNING: turning on serialThr
... Setting: FLOW = 3
TASK DECOMP = 2
CODE MOTION = 1
*** Before Any Passes ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0, b = 0, k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
===========================================
[SingleDeclarator] begin
-------------working on eliminating: signed char x, y
------eliminating: signed char x, y its child is: signed char x, y its parent is: signed char x, y;
------parent is instance of declaration statement, child was: signed char x, y parent was: signed char x, y; outer was: null
------now child becomes: signed char x, y; parent becomes: struct char2
{
signed char x, y;
};
outer becomes: struct char2
{
signed char x, y;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct char2
{
signed char x, y;
;
};
------after parent.removeChild(child), parent is: struct char2
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct char2
{
;
signed char y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct char2
{
;
signed char y;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct char2
{
;
signed char x;
signed char y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct char2
{
;
signed char x;
signed char y;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct char2
{
signed char x;
signed char y;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct char2
{
signed char x;
signed char y;
};
------Done with eliminateMultipleDeclarators, d now is: signed char x, y
-------------working on eliminating: unsigned char x, y
------eliminating: unsigned char x, y its child is: unsigned char x, y its parent is: unsigned char x, y;
------parent is instance of declaration statement, child was: unsigned char x, y parent was: unsigned char x, y; outer was: null
------now child becomes: unsigned char x, y; parent becomes: struct uchar2
{
unsigned char x, y;
};
outer becomes: struct uchar2
{
unsigned char x, y;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct uchar2
{
unsigned char x, y;
;
};
------after parent.removeChild(child), parent is: struct uchar2
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uchar2
{
;
unsigned char y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uchar2
{
;
unsigned char y;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uchar2
{
;
unsigned char x;
unsigned char y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uchar2
{
;
unsigned char x;
unsigned char y;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct uchar2
{
unsigned char x;
unsigned char y;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct uchar2
{
unsigned char x;
unsigned char y;
};
------Done with eliminateMultipleDeclarators, d now is: unsigned char x, y
-------------working on eliminating: signed char x, y, z
------eliminating: signed char x, y, z its child is: signed char x, y, z its parent is: signed char x, y, z;
------parent is instance of declaration statement, child was: signed char x, y, z parent was: signed char x, y, z; outer was: null
------now child becomes: signed char x, y, z; parent becomes: struct char3
{
signed char x, y, z;
};
outer becomes: struct char3
{
signed char x, y, z;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct char3
{
signed char x, y, z;
;
};
------after parent.removeChild(child), parent is: struct char3
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct char3
{
;
signed char z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct char3
{
;
signed char z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct char3
{
;
signed char y;
signed char z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct char3
{
;
signed char y;
signed char z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct char3
{
;
signed char x;
signed char y;
signed char z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct char3
{
;
signed char x;
signed char y;
signed char z;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct char3
{
signed char x;
signed char y;
signed char z;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct char3
{
signed char x;
signed char y;
signed char z;
};
------Done with eliminateMultipleDeclarators, d now is: signed char x, y, z
-------------working on eliminating: unsigned char x, y, z
------eliminating: unsigned char x, y, z its child is: unsigned char x, y, z its parent is: unsigned char x, y, z;
------parent is instance of declaration statement, child was: unsigned char x, y, z parent was: unsigned char x, y, z; outer was: null
------now child becomes: unsigned char x, y, z; parent becomes: struct uchar3
{
unsigned char x, y, z;
};
outer becomes: struct uchar3
{
unsigned char x, y, z;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct uchar3
{
unsigned char x, y, z;
;
};
------after parent.removeChild(child), parent is: struct uchar3
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uchar3
{
;
unsigned char z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uchar3
{
;
unsigned char z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uchar3
{
;
unsigned char y;
unsigned char z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uchar3
{
;
unsigned char y;
unsigned char z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uchar3
{
;
unsigned char x;
unsigned char y;
unsigned char z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uchar3
{
;
unsigned char x;
unsigned char y;
unsigned char z;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct uchar3
{
unsigned char x;
unsigned char y;
unsigned char z;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct uchar3
{
unsigned char x;
unsigned char y;
unsigned char z;
};
------Done with eliminateMultipleDeclarators, d now is: unsigned char x, y, z
-------------working on eliminating: signed char x, y, z, w
------eliminating: signed char x, y, z, w its child is: signed char x, y, z, w its parent is: signed char x, y, z, w;
------parent is instance of declaration statement, child was: signed char x, y, z, w parent was: signed char x, y, z, w; outer was: null
------now child becomes: signed char x, y, z, w; parent becomes: struct char4
{
signed char x, y, z, w;
};
outer becomes: struct char4
{
signed char x, y, z, w;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct char4
{
signed char x, y, z, w;
;
};
------after parent.removeChild(child), parent is: struct char4
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 3
------after, Declarator d = decl.getDeclarator(i), d is: w
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct char4
{
;
signed char w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct char4
{
;
signed char w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct char4
{
;
signed char z;
signed char w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct char4
{
;
signed char z;
signed char w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct char4
{
;
signed char y;
signed char z;
signed char w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct char4
{
;
signed char y;
signed char z;
signed char w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct char4
{
;
signed char x;
signed char y;
signed char z;
signed char w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct char4
{
;
signed char x;
signed char y;
signed char z;
signed char w;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct char4
{
signed char x;
signed char y;
signed char z;
signed char w;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct char4
{
signed char x;
signed char y;
signed char z;
signed char w;
};
------Done with eliminateMultipleDeclarators, d now is: signed char x, y, z, w
-------------working on eliminating: unsigned char x, y, z, w
------eliminating: unsigned char x, y, z, w its child is: unsigned char x, y, z, w its parent is: unsigned char x, y, z, w;
------parent is instance of declaration statement, child was: unsigned char x, y, z, w parent was: unsigned char x, y, z, w; outer was: null
------now child becomes: unsigned char x, y, z, w; parent becomes: struct uchar4
{
unsigned char x, y, z, w;
};
outer becomes: struct uchar4
{
unsigned char x, y, z, w;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct uchar4
{
unsigned char x, y, z, w;
;
};
------after parent.removeChild(child), parent is: struct uchar4
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 3
------after, Declarator d = decl.getDeclarator(i), d is: w
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uchar4
{
;
unsigned char w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uchar4
{
;
unsigned char w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uchar4
{
;
unsigned char z;
unsigned char w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uchar4
{
;
unsigned char z;
unsigned char w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uchar4
{
;
unsigned char y;
unsigned char z;
unsigned char w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uchar4
{
;
unsigned char y;
unsigned char z;
unsigned char w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uchar4
{
;
unsigned char x;
unsigned char y;
unsigned char z;
unsigned char w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uchar4
{
;
unsigned char x;
unsigned char y;
unsigned char z;
unsigned char w;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct uchar4
{
unsigned char x;
unsigned char y;
unsigned char z;
unsigned char w;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct uchar4
{
unsigned char x;
unsigned char y;
unsigned char z;
unsigned char w;
};
------Done with eliminateMultipleDeclarators, d now is: unsigned char x, y, z, w
-------------working on eliminating: short x, y
------eliminating: short x, y its child is: short x, y its parent is: short x, y;
------parent is instance of declaration statement, child was: short x, y parent was: short x, y; outer was: null
------now child becomes: short x, y; parent becomes: struct short2
{
short x, y;
};
outer becomes: struct short2
{
short x, y;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct short2
{
short x, y;
;
};
------after parent.removeChild(child), parent is: struct short2
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct short2
{
;
short y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct short2
{
;
short y;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct short2
{
;
short x;
short y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct short2
{
;
short x;
short y;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct short2
{
short x;
short y;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct short2
{
short x;
short y;
};
------Done with eliminateMultipleDeclarators, d now is: short x, y
-------------working on eliminating: unsigned short x, y
------eliminating: unsigned short x, y its child is: unsigned short x, y its parent is: unsigned short x, y;
------parent is instance of declaration statement, child was: unsigned short x, y parent was: unsigned short x, y; outer was: null
------now child becomes: unsigned short x, y; parent becomes: struct ushort2
{
unsigned short x, y;
};
outer becomes: struct ushort2
{
unsigned short x, y;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct ushort2
{
unsigned short x, y;
;
};
------after parent.removeChild(child), parent is: struct ushort2
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct ushort2
{
;
unsigned short y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct ushort2
{
;
unsigned short y;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct ushort2
{
;
unsigned short x;
unsigned short y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct ushort2
{
;
unsigned short x;
unsigned short y;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct ushort2
{
unsigned short x;
unsigned short y;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct ushort2
{
unsigned short x;
unsigned short y;
};
------Done with eliminateMultipleDeclarators, d now is: unsigned short x, y
-------------working on eliminating: short x, y, z
------eliminating: short x, y, z its child is: short x, y, z its parent is: short x, y, z;
------parent is instance of declaration statement, child was: short x, y, z parent was: short x, y, z; outer was: null
------now child becomes: short x, y, z; parent becomes: struct short3
{
short x, y, z;
};
outer becomes: struct short3
{
short x, y, z;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct short3
{
short x, y, z;
;
};
------after parent.removeChild(child), parent is: struct short3
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct short3
{
;
short z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct short3
{
;
short z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct short3
{
;
short y;
short z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct short3
{
;
short y;
short z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct short3
{
;
short x;
short y;
short z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct short3
{
;
short x;
short y;
short z;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct short3
{
short x;
short y;
short z;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct short3
{
short x;
short y;
short z;
};
------Done with eliminateMultipleDeclarators, d now is: short x, y, z
-------------working on eliminating: unsigned short x, y, z
------eliminating: unsigned short x, y, z its child is: unsigned short x, y, z its parent is: unsigned short x, y, z;
------parent is instance of declaration statement, child was: unsigned short x, y, z parent was: unsigned short x, y, z; outer was: null
------now child becomes: unsigned short x, y, z; parent becomes: struct ushort3
{
unsigned short x, y, z;
};
outer becomes: struct ushort3
{
unsigned short x, y, z;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct ushort3
{
unsigned short x, y, z;
;
};
------after parent.removeChild(child), parent is: struct ushort3
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct ushort3
{
;
unsigned short z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct ushort3
{
;
unsigned short z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct ushort3
{
;
unsigned short y;
unsigned short z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct ushort3
{
;
unsigned short y;
unsigned short z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct ushort3
{
;
unsigned short x;
unsigned short y;
unsigned short z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct ushort3
{
;
unsigned short x;
unsigned short y;
unsigned short z;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct ushort3
{
unsigned short x;
unsigned short y;
unsigned short z;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct ushort3
{
unsigned short x;
unsigned short y;
unsigned short z;
};
------Done with eliminateMultipleDeclarators, d now is: unsigned short x, y, z
-------------working on eliminating: short x, y, z, w
------eliminating: short x, y, z, w its child is: short x, y, z, w its parent is: short x, y, z, w;
------parent is instance of declaration statement, child was: short x, y, z, w parent was: short x, y, z, w; outer was: null
------now child becomes: short x, y, z, w; parent becomes: struct short4
{
short x, y, z, w;
};
outer becomes: struct short4
{
short x, y, z, w;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct short4
{
short x, y, z, w;
;
};
------after parent.removeChild(child), parent is: struct short4
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 3
------after, Declarator d = decl.getDeclarator(i), d is: w
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct short4
{
;
short w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct short4
{
;
short w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct short4
{
;
short z;
short w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct short4
{
;
short z;
short w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct short4
{
;
short y;
short z;
short w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct short4
{
;
short y;
short z;
short w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct short4
{
;
short x;
short y;
short z;
short w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct short4
{
;
short x;
short y;
short z;
short w;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct short4
{
short x;
short y;
short z;
short w;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct short4
{
short x;
short y;
short z;
short w;
};
------Done with eliminateMultipleDeclarators, d now is: short x, y, z, w
-------------working on eliminating: unsigned short x, y, z, w
------eliminating: unsigned short x, y, z, w its child is: unsigned short x, y, z, w its parent is: unsigned short x, y, z, w;
------parent is instance of declaration statement, child was: unsigned short x, y, z, w parent was: unsigned short x, y, z, w; outer was: null
------now child becomes: unsigned short x, y, z, w; parent becomes: struct ushort4
{
unsigned short x, y, z, w;
};
outer becomes: struct ushort4
{
unsigned short x, y, z, w;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct ushort4
{
unsigned short x, y, z, w;
;
};
------after parent.removeChild(child), parent is: struct ushort4
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 3
------after, Declarator d = decl.getDeclarator(i), d is: w
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct ushort4
{
;
unsigned short w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct ushort4
{
;
unsigned short w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct ushort4
{
;
unsigned short z;
unsigned short w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct ushort4
{
;
unsigned short z;
unsigned short w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct ushort4
{
;
unsigned short y;
unsigned short z;
unsigned short w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct ushort4
{
;
unsigned short y;
unsigned short z;
unsigned short w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct ushort4
{
;
unsigned short x;
unsigned short y;
unsigned short z;
unsigned short w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct ushort4
{
;
unsigned short x;
unsigned short y;
unsigned short z;
unsigned short w;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct ushort4
{
unsigned short x;
unsigned short y;
unsigned short z;
unsigned short w;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct ushort4
{
unsigned short x;
unsigned short y;
unsigned short z;
unsigned short w;
};
------Done with eliminateMultipleDeclarators, d now is: unsigned short x, y, z, w
-------------working on eliminating: int x, y
------eliminating: int x, y its child is: int x, y its parent is: int x, y;
------parent is instance of declaration statement, child was: int x, y parent was: int x, y; outer was: null
------now child becomes: int x, y; parent becomes: struct int2
{
int x, y;
};
outer becomes: struct int2
{
int x, y;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct int2
{
int x, y;
;
};
------after parent.removeChild(child), parent is: struct int2
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct int2
{
;
int y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct int2
{
;
int y;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct int2
{
;
int x;
int y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct int2
{
;
int x;
int y;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct int2
{
int x;
int y;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct int2
{
int x;
int y;
};
------Done with eliminateMultipleDeclarators, d now is: int x, y
-------------working on eliminating: unsigned int x, y
------eliminating: unsigned int x, y its child is: unsigned int x, y its parent is: unsigned int x, y;
------parent is instance of declaration statement, child was: unsigned int x, y parent was: unsigned int x, y; outer was: null
------now child becomes: unsigned int x, y; parent becomes: struct uint2
{
unsigned int x, y;
};
outer becomes: struct uint2
{
unsigned int x, y;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct uint2
{
unsigned int x, y;
;
};
------after parent.removeChild(child), parent is: struct uint2
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uint2
{
;
unsigned int y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uint2
{
;
unsigned int y;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uint2
{
;
unsigned int x;
unsigned int y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uint2
{
;
unsigned int x;
unsigned int y;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct uint2
{
unsigned int x;
unsigned int y;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct uint2
{
unsigned int x;
unsigned int y;
};
------Done with eliminateMultipleDeclarators, d now is: unsigned int x, y
-------------working on eliminating: int x, y, z
------eliminating: int x, y, z its child is: int x, y, z its parent is: int x, y, z;
------parent is instance of declaration statement, child was: int x, y, z parent was: int x, y, z; outer was: null
------now child becomes: int x, y, z; parent becomes: struct int3
{
int x, y, z;
};
outer becomes: struct int3
{
int x, y, z;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct int3
{
int x, y, z;
;
};
------after parent.removeChild(child), parent is: struct int3
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct int3
{
;
int z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct int3
{
;
int z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct int3
{
;
int y;
int z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct int3
{
;
int y;
int z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct int3
{
;
int x;
int y;
int z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct int3
{
;
int x;
int y;
int z;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct int3
{
int x;
int y;
int z;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct int3
{
int x;
int y;
int z;
};
------Done with eliminateMultipleDeclarators, d now is: int x, y, z
-------------working on eliminating: unsigned int x, y, z
------eliminating: unsigned int x, y, z its child is: unsigned int x, y, z its parent is: unsigned int x, y, z;
------parent is instance of declaration statement, child was: unsigned int x, y, z parent was: unsigned int x, y, z; outer was: null
------now child becomes: unsigned int x, y, z; parent becomes: struct uint3
{
unsigned int x, y, z;
};
outer becomes: struct uint3
{
unsigned int x, y, z;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct uint3
{
unsigned int x, y, z;
;
};
------after parent.removeChild(child), parent is: struct uint3
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uint3
{
;
unsigned int z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uint3
{
;
unsigned int z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uint3
{
;
unsigned int y;
unsigned int z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uint3
{
;
unsigned int y;
unsigned int z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uint3
{
;
unsigned int x;
unsigned int y;
unsigned int z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uint3
{
;
unsigned int x;
unsigned int y;
unsigned int z;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct uint3
{
unsigned int x;
unsigned int y;
unsigned int z;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct uint3
{
unsigned int x;
unsigned int y;
unsigned int z;
};
------Done with eliminateMultipleDeclarators, d now is: unsigned int x, y, z
-------------working on eliminating: int x, y, z, w
------eliminating: int x, y, z, w its child is: int x, y, z, w its parent is: int x, y, z, w;
------parent is instance of declaration statement, child was: int x, y, z, w parent was: int x, y, z, w; outer was: null
------now child becomes: int x, y, z, w; parent becomes: struct int4
{
int x, y, z, w;
};
outer becomes: struct int4
{
int x, y, z, w;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct int4
{
int x, y, z, w;
;
};
------after parent.removeChild(child), parent is: struct int4
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 3
------after, Declarator d = decl.getDeclarator(i), d is: w
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct int4
{
;
int w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct int4
{
;
int w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct int4
{
;
int z;
int w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct int4
{
;
int z;
int w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct int4
{
;
int y;
int z;
int w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct int4
{
;
int y;
int z;
int w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct int4
{
;
int x;
int y;
int z;
int w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct int4
{
;
int x;
int y;
int z;
int w;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct int4
{
int x;
int y;
int z;
int w;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct int4
{
int x;
int y;
int z;
int w;
};
------Done with eliminateMultipleDeclarators, d now is: int x, y, z, w
-------------working on eliminating: unsigned int x, y, z, w
------eliminating: unsigned int x, y, z, w its child is: unsigned int x, y, z, w its parent is: unsigned int x, y, z, w;
------parent is instance of declaration statement, child was: unsigned int x, y, z, w parent was: unsigned int x, y, z, w; outer was: null
------now child becomes: unsigned int x, y, z, w; parent becomes: struct uint4
{
unsigned int x, y, z, w;
};
outer becomes: struct uint4
{
unsigned int x, y, z, w;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct uint4
{
unsigned int x, y, z, w;
;
};
------after parent.removeChild(child), parent is: struct uint4
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 3
------after, Declarator d = decl.getDeclarator(i), d is: w
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uint4
{
;
unsigned int w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uint4
{
;
unsigned int w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uint4
{
;
unsigned int z;
unsigned int w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uint4
{
;
unsigned int z;
unsigned int w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uint4
{
;
unsigned int y;
unsigned int z;
unsigned int w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uint4
{
;
unsigned int y;
unsigned int z;
unsigned int w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uint4
{
;
unsigned int x;
unsigned int y;
unsigned int z;
unsigned int w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uint4
{
;
unsigned int x;
unsigned int y;
unsigned int z;
unsigned int w;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct uint4
{
unsigned int x;
unsigned int y;
unsigned int z;
unsigned int w;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct uint4
{
unsigned int x;
unsigned int y;
unsigned int z;
unsigned int w;
};
------Done with eliminateMultipleDeclarators, d now is: unsigned int x, y, z, w
-------------working on eliminating: long int x, y
------eliminating: long int x, y its child is: long int x, y its parent is: long int x, y;
------parent is instance of declaration statement, child was: long int x, y parent was: long int x, y; outer was: null
------now child becomes: long int x, y; parent becomes: struct long2
{
long int x, y;
};
outer becomes: struct long2
{
long int x, y;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct long2
{
long int x, y;
;
};
------after parent.removeChild(child), parent is: struct long2
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct long2
{
;
long int y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct long2
{
;
long int y;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct long2
{
;
long int x;
long int y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct long2
{
;
long int x;
long int y;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct long2
{
long int x;
long int y;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct long2
{
long int x;
long int y;
};
------Done with eliminateMultipleDeclarators, d now is: long int x, y
-------------working on eliminating: unsigned long int x, y
------eliminating: unsigned long int x, y its child is: unsigned long int x, y its parent is: unsigned long int x, y;
------parent is instance of declaration statement, child was: unsigned long int x, y parent was: unsigned long int x, y; outer was: null
------now child becomes: unsigned long int x, y; parent becomes: struct ulong2
{
unsigned long int x, y;
};
outer becomes: struct ulong2
{
unsigned long int x, y;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct ulong2
{
unsigned long int x, y;
;
};
------after parent.removeChild(child), parent is: struct ulong2
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct ulong2
{
;
unsigned long int y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct ulong2
{
;
unsigned long int y;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct ulong2
{
;
unsigned long int x;
unsigned long int y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct ulong2
{
;
unsigned long int x;
unsigned long int y;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct ulong2
{
unsigned long int x;
unsigned long int y;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct ulong2
{
unsigned long int x;
unsigned long int y;
};
------Done with eliminateMultipleDeclarators, d now is: unsigned long int x, y
-------------working on eliminating: float x, y
------eliminating: float x, y its child is: float x, y its parent is: float x, y;
------parent is instance of declaration statement, child was: float x, y parent was: float x, y; outer was: null
------now child becomes: float x, y; parent becomes: struct float2
{
float x, y;
};
outer becomes: struct float2
{
float x, y;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct float2
{
float x, y;
;
};
------after parent.removeChild(child), parent is: struct float2
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct float2
{
;
float y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct float2
{
;
float y;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct float2
{
;
float x;
float y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct float2
{
;
float x;
float y;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct float2
{
float x;
float y;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct float2
{
float x;
float y;
};
------Done with eliminateMultipleDeclarators, d now is: float x, y
-------------working on eliminating: float x, y, z
------eliminating: float x, y, z its child is: float x, y, z its parent is: float x, y, z;
------parent is instance of declaration statement, child was: float x, y, z parent was: float x, y, z; outer was: null
------now child becomes: float x, y, z; parent becomes: struct float3
{
float x, y, z;
};
outer becomes: struct float3
{
float x, y, z;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct float3
{
float x, y, z;
;
};
------after parent.removeChild(child), parent is: struct float3
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct float3
{
;
float z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct float3
{
;
float z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct float3
{
;
float y;
float z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct float3
{
;
float y;
float z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct float3
{
;
float x;
float y;
float z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct float3
{
;
float x;
float y;
float z;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct float3
{
float x;
float y;
float z;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct float3
{
float x;
float y;
float z;
};
------Done with eliminateMultipleDeclarators, d now is: float x, y, z
-------------working on eliminating: float x, y, z, w
------eliminating: float x, y, z, w its child is: float x, y, z, w its parent is: float x, y, z, w;
------parent is instance of declaration statement, child was: float x, y, z, w parent was: float x, y, z, w; outer was: null
------now child becomes: float x, y, z, w; parent becomes: struct float4
{
float x, y, z, w;
};
outer becomes: struct float4
{
float x, y, z, w;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct float4
{
float x, y, z, w;
;
};
------after parent.removeChild(child), parent is: struct float4
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 3
------after, Declarator d = decl.getDeclarator(i), d is: w
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct float4
{
;
float w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct float4
{
;
float w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct float4
{
;
float z;
float w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct float4
{
;
float z;
float w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct float4
{
;
float y;
float z;
float w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct float4
{
;
float y;
float z;
float w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct float4
{
;
float x;
float y;
float z;
float w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct float4
{
;
float x;
float y;
float z;
float w;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct float4
{
float x;
float y;
float z;
float w;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct float4
{
float x;
float y;
float z;
float w;
};
------Done with eliminateMultipleDeclarators, d now is: float x, y, z, w
-------------working on eliminating: long long int x, y
------eliminating: long long int x, y its child is: long long int x, y its parent is: long long int x, y;
------parent is instance of declaration statement, child was: long long int x, y parent was: long long int x, y; outer was: null
------now child becomes: long long int x, y; parent becomes: struct longlong2
{
long long int x, y;
};
outer becomes: struct longlong2
{
long long int x, y;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct longlong2
{
long long int x, y;
;
};
------after parent.removeChild(child), parent is: struct longlong2
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct longlong2
{
;
long long int y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct longlong2
{
;
long long int y;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct longlong2
{
;
long long int x;
long long int y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct longlong2
{
;
long long int x;
long long int y;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct longlong2
{
long long int x;
long long int y;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct longlong2
{
long long int x;
long long int y;
};
------Done with eliminateMultipleDeclarators, d now is: long long int x, y
-------------working on eliminating: unsigned long long int x, y
------eliminating: unsigned long long int x, y its child is: unsigned long long int x, y its parent is: unsigned long long int x, y;
------parent is instance of declaration statement, child was: unsigned long long int x, y parent was: unsigned long long int x, y; outer was: null
------now child becomes: unsigned long long int x, y; parent becomes: struct ulonglong2
{
unsigned long long int x, y;
};
outer becomes: struct ulonglong2
{
unsigned long long int x, y;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct ulonglong2
{
unsigned long long int x, y;
;
};
------after parent.removeChild(child), parent is: struct ulonglong2
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct ulonglong2
{
;
unsigned long long int y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct ulonglong2
{
;
unsigned long long int y;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct ulonglong2
{
;
unsigned long long int x;
unsigned long long int y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct ulonglong2
{
;
unsigned long long int x;
unsigned long long int y;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct ulonglong2
{
unsigned long long int x;
unsigned long long int y;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct ulonglong2
{
unsigned long long int x;
unsigned long long int y;
};
------Done with eliminateMultipleDeclarators, d now is: unsigned long long int x, y
-------------working on eliminating: double x, y
------eliminating: double x, y its child is: double x, y its parent is: double x, y;
------parent is instance of declaration statement, child was: double x, y parent was: double x, y; outer was: null
------now child becomes: double x, y; parent becomes: struct double2
{
double x, y;
};
outer becomes: struct double2
{
double x, y;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct double2
{
double x, y;
;
};
------after parent.removeChild(child), parent is: struct double2
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct double2
{
;
double y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct double2
{
;
double y;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct double2
{
;
double x;
double y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct double2
{
;
double x;
double y;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct double2
{
double x;
double y;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct double2
{
double x;
double y;
};
------Done with eliminateMultipleDeclarators, d now is: double x, y
-------------working on eliminating: double x, y, z
------eliminating: double x, y, z its child is: double x, y, z its parent is: double x, y, z;
------parent is instance of declaration statement, child was: double x, y, z parent was: double x, y, z; outer was: null
------now child becomes: double x, y, z; parent becomes: struct double3
{
double x, y, z;
};
outer becomes: struct double3
{
double x, y, z;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct double3
{
double x, y, z;
;
};
------after parent.removeChild(child), parent is: struct double3
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct double3
{
;
double z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct double3
{
;
double z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct double3
{
;
double y;
double z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct double3
{
;
double y;
double z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct double3
{
;
double x;
double y;
double z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct double3
{
;
double x;
double y;
double z;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct double3
{
double x;
double y;
double z;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct double3
{
double x;
double y;
double z;
};
------Done with eliminateMultipleDeclarators, d now is: double x, y, z
-------------working on eliminating: unsigned int x, y, z
------eliminating: unsigned int x, y, z its child is: unsigned int x, y, z its parent is: unsigned int x, y, z;
------parent is instance of declaration statement, child was: unsigned int x, y, z parent was: unsigned int x, y, z; outer was: null
------now child becomes: unsigned int x, y, z; parent becomes: struct dim3
{
unsigned int x, y, z;
};
outer becomes: struct dim3
{
unsigned int x, y, z;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct dim3
{
unsigned int x, y, z;
;
};
------after parent.removeChild(child), parent is: struct dim3
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct dim3
{
;
unsigned int z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct dim3
{
;
unsigned int z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct dim3
{
;
unsigned int y;
unsigned int z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct dim3
{
;
unsigned int y;
unsigned int z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct dim3
{
;
unsigned int x;
unsigned int y;
unsigned int z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct dim3
{
;
unsigned int x;
unsigned int y;
unsigned int z;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct dim3
{
unsigned int x;
unsigned int y;
unsigned int z;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct dim3
{
unsigned int x;
unsigned int y;
unsigned int z;
};
------Done with eliminateMultipleDeclarators, d now is: unsigned int x, y, z
-------------d is instance of procedure: void __syncthreads()
{
;
}
-------------d is instance of procedure: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0, b = 0, k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
-------------working on eliminating: int a = 0, b = 0, k = 0
------eliminating: int a = 0, b = 0, k = 0 its child is: int a = 0, b = 0, k = 0 its parent is: int a = 0, b = 0, k = 0;
------parent is instance of declaration statement, child was: int a = 0, b = 0, k = 0 parent was: int a = 0, b = 0, k = 0; outer was: null
------now child becomes: int a = 0, b = 0, k = 0; parent becomes: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0, b = 0, k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
} outer becomes: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0, b = 0, k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
....CompoundStmt...addDecAfter...01
....CompoundStmt...addDecAfter...02
------after outer.addDeclarationAfter(decl, placeholder), outer is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0, b = 0, k = 0;
;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
------after parent.removeChild(child), parent is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: k = 0
....CompoundStmt...addDecAfter...01
....CompoundStmt...addDecAfter...02
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: b = 0
....CompoundStmt...addDecAfter...01
....CompoundStmt...addDecAfter...02
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: a = 0
....CompoundStmt...addDecAfter...01
....CompoundStmt...addDecAfter...02
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
------after parent.removeChild(placeholder.getParent()), outer is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
------Done with eliminateMultipleDeclarators, d now is: int a = 0, b = 0, k = 0
[SingleDeclarator] end in 0.05 seconds
[LinkSymbol] 69 updates in 0.00 seconds
*** After SingleDeclarator ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
===========================================
[SeparateInitializers] begin
[SeparateInitializers] examining procedure matrixMul
00-----if Driver.getoptionvvalue fcuda != null-----
00-----after List<Procedure> tskLst = FCUDAutils.getTaskMapping(proc.getSymbolName()); tskLst is: null-----
1111-----If tskLst == null-----
00-----in splitInitialization, proc is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
-----
1111-----enter while loop, declStmt is: int bx = blockIdx.x;-----
1111-----vDecl = (VariableDeclaration)declStmt.getDeclaration(), vDecl is: int bx = blockIdx.x-----
1111-----variable = (VariableDeclarator)vDecl.getDeclarator(0), variable is: bx = blockIdx.x-----
1111-----after Initializer vInit = variable.getInitializer(), vInit is: = blockIdx.x-----
1111-----after variable.setInitializer(null), variable is: bx-----
1111-----vDecl.getSpecifiers().remove(Specifier.CONST), vDecl is: int bx-----
1111-----variable.getSpecifiers().remove(Specifier.CONST), variable is: bx-----
1111-----List<Traversable> children = vInit.getChildren(), children is: [blockIdx.x]-----
1111-----Expression initExpr = (Expression)((Expression)children.get(0)).clone(), initExpr is: blockIdx.x-----
1111-----IDExpression vID = new Identifier(variable), vID is: bx-----
1111-----AssignmentExpression vAssign = new AssignmentExpression(vID, AssignmentOperator.NORMAL, initExpr), vAssign is: (bx=blockIdx.x)-----
1111-----ExpressionStatement vStmt = new ExpressionStatement(vAssign), vStmt is: bx=blockIdx.x;-----
1111-----CompoundStatement scope = (CompoundStatement)declStmt.getParent(), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----scope.addStatementAfter(declStmt, vStmt), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----enter while loop, declStmt is: int by = blockIdx.y;-----
1111-----vDecl = (VariableDeclaration)declStmt.getDeclaration(), vDecl is: int by = blockIdx.y-----
1111-----variable = (VariableDeclarator)vDecl.getDeclarator(0), variable is: by = blockIdx.y-----
1111-----after Initializer vInit = variable.getInitializer(), vInit is: = blockIdx.y-----
1111-----after variable.setInitializer(null), variable is: by-----
1111-----vDecl.getSpecifiers().remove(Specifier.CONST), vDecl is: int by-----
1111-----variable.getSpecifiers().remove(Specifier.CONST), variable is: by-----
1111-----List<Traversable> children = vInit.getChildren(), children is: [blockIdx.y]-----
1111-----Expression initExpr = (Expression)((Expression)children.get(0)).clone(), initExpr is: blockIdx.y-----
1111-----IDExpression vID = new Identifier(variable), vID is: by-----
1111-----AssignmentExpression vAssign = new AssignmentExpression(vID, AssignmentOperator.NORMAL, initExpr), vAssign is: (by=blockIdx.y)-----
1111-----ExpressionStatement vStmt = new ExpressionStatement(vAssign), vStmt is: by=blockIdx.y;-----
1111-----CompoundStatement scope = (CompoundStatement)declStmt.getParent(), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----scope.addStatementAfter(declStmt, vStmt), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----enter while loop, declStmt is: int aBegin = ((wA*16)*by);-----
1111-----vDecl = (VariableDeclaration)declStmt.getDeclaration(), vDecl is: int aBegin = ((wA*16)*by)-----
1111-----variable = (VariableDeclarator)vDecl.getDeclarator(0), variable is: aBegin = ((wA*16)*by)-----
1111-----after Initializer vInit = variable.getInitializer(), vInit is: = ((wA*16)*by)-----
1111-----after variable.setInitializer(null), variable is: aBegin-----
1111-----vDecl.getSpecifiers().remove(Specifier.CONST), vDecl is: int aBegin-----
1111-----variable.getSpecifiers().remove(Specifier.CONST), variable is: aBegin-----
1111-----List<Traversable> children = vInit.getChildren(), children is: [((wA*16)*by)]-----
1111-----Expression initExpr = (Expression)((Expression)children.get(0)).clone(), initExpr is: ((wA*16)*by)-----
1111-----IDExpression vID = new Identifier(variable), vID is: aBegin-----
1111-----AssignmentExpression vAssign = new AssignmentExpression(vID, AssignmentOperator.NORMAL, initExpr), vAssign is: (aBegin=((wA*16)*by))-----
1111-----ExpressionStatement vStmt = new ExpressionStatement(vAssign), vStmt is: aBegin=((wA*16)*by);-----
1111-----CompoundStatement scope = (CompoundStatement)declStmt.getParent(), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----scope.addStatementAfter(declStmt, vStmt), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----enter while loop, declStmt is: int aEnd = ((aBegin+wA)-1);-----
1111-----vDecl = (VariableDeclaration)declStmt.getDeclaration(), vDecl is: int aEnd = ((aBegin+wA)-1)-----
1111-----variable = (VariableDeclarator)vDecl.getDeclarator(0), variable is: aEnd = ((aBegin+wA)-1)-----
1111-----after Initializer vInit = variable.getInitializer(), vInit is: = ((aBegin+wA)-1)-----
1111-----after variable.setInitializer(null), variable is: aEnd-----
1111-----vDecl.getSpecifiers().remove(Specifier.CONST), vDecl is: int aEnd-----
1111-----variable.getSpecifiers().remove(Specifier.CONST), variable is: aEnd-----
1111-----List<Traversable> children = vInit.getChildren(), children is: [((aBegin+wA)-1)]-----
1111-----Expression initExpr = (Expression)((Expression)children.get(0)).clone(), initExpr is: ((aBegin+wA)-1)-----
1111-----IDExpression vID = new Identifier(variable), vID is: aEnd-----
1111-----AssignmentExpression vAssign = new AssignmentExpression(vID, AssignmentOperator.NORMAL, initExpr), vAssign is: (aEnd=((aBegin+wA)-1))-----
1111-----ExpressionStatement vStmt = new ExpressionStatement(vAssign), vStmt is: aEnd=((aBegin+wA)-1);-----
1111-----CompoundStatement scope = (CompoundStatement)declStmt.getParent(), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----scope.addStatementAfter(declStmt, vStmt), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----enter while loop, declStmt is: int aStep = 16;-----
1111-----vDecl = (VariableDeclaration)declStmt.getDeclaration(), vDecl is: int aStep = 16-----
1111-----variable = (VariableDeclarator)vDecl.getDeclarator(0), variable is: aStep = 16-----
1111-----after Initializer vInit = variable.getInitializer(), vInit is: = 16-----
1111-----after variable.setInitializer(null), variable is: aStep-----
1111-----vDecl.getSpecifiers().remove(Specifier.CONST), vDecl is: int aStep-----
1111-----variable.getSpecifiers().remove(Specifier.CONST), variable is: aStep-----
1111-----List<Traversable> children = vInit.getChildren(), children is: [16]-----
1111-----Expression initExpr = (Expression)((Expression)children.get(0)).clone(), initExpr is: 16-----
1111-----IDExpression vID = new Identifier(variable), vID is: aStep-----
1111-----AssignmentExpression vAssign = new AssignmentExpression(vID, AssignmentOperator.NORMAL, initExpr), vAssign is: (aStep=16)-----
1111-----ExpressionStatement vStmt = new ExpressionStatement(vAssign), vStmt is: aStep=16;-----
1111-----CompoundStatement scope = (CompoundStatement)declStmt.getParent(), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----scope.addStatementAfter(declStmt, vStmt), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----enter while loop, declStmt is: int bBegin = (16*bx);-----
1111-----vDecl = (VariableDeclaration)declStmt.getDeclaration(), vDecl is: int bBegin = (16*bx)-----
1111-----variable = (VariableDeclarator)vDecl.getDeclarator(0), variable is: bBegin = (16*bx)-----
1111-----after Initializer vInit = variable.getInitializer(), vInit is: = (16*bx)-----
1111-----after variable.setInitializer(null), variable is: bBegin-----
1111-----vDecl.getSpecifiers().remove(Specifier.CONST), vDecl is: int bBegin-----
1111-----variable.getSpecifiers().remove(Specifier.CONST), variable is: bBegin-----
1111-----List<Traversable> children = vInit.getChildren(), children is: [(16*bx)]-----
1111-----Expression initExpr = (Expression)((Expression)children.get(0)).clone(), initExpr is: (16*bx)-----
1111-----IDExpression vID = new Identifier(variable), vID is: bBegin-----
1111-----AssignmentExpression vAssign = new AssignmentExpression(vID, AssignmentOperator.NORMAL, initExpr), vAssign is: (bBegin=(16*bx))-----
1111-----ExpressionStatement vStmt = new ExpressionStatement(vAssign), vStmt is: bBegin=(16*bx);-----
1111-----CompoundStatement scope = (CompoundStatement)declStmt.getParent(), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----scope.addStatementAfter(declStmt, vStmt), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----enter while loop, declStmt is: int bStep = (16*wB);-----
1111-----vDecl = (VariableDeclaration)declStmt.getDeclaration(), vDecl is: int bStep = (16*wB)-----
1111-----variable = (VariableDeclarator)vDecl.getDeclarator(0), variable is: bStep = (16*wB)-----
1111-----after Initializer vInit = variable.getInitializer(), vInit is: = (16*wB)-----
1111-----after variable.setInitializer(null), variable is: bStep-----
1111-----vDecl.getSpecifiers().remove(Specifier.CONST), vDecl is: int bStep-----
1111-----variable.getSpecifiers().remove(Specifier.CONST), variable is: bStep-----
1111-----List<Traversable> children = vInit.getChildren(), children is: [(16*wB)]-----
1111-----Expression initExpr = (Expression)((Expression)children.get(0)).clone(), initExpr is: (16*wB)-----
1111-----IDExpression vID = new Identifier(variable), vID is: bStep-----
1111-----AssignmentExpression vAssign = new AssignmentExpression(vID, AssignmentOperator.NORMAL, initExpr), vAssign is: (bStep=(16*wB))-----
1111-----ExpressionStatement vStmt = new ExpressionStatement(vAssign), vStmt is: bStep=(16*wB);-----
1111-----CompoundStatement scope = (CompoundStatement)declStmt.getParent(), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----scope.addStatementAfter(declStmt, vStmt), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----enter while loop, declStmt is: DATATYPE Csub = 0;-----
1111-----vDecl = (VariableDeclaration)declStmt.getDeclaration(), vDecl is: DATATYPE Csub = 0-----
1111-----variable = (VariableDeclarator)vDecl.getDeclarator(0), variable is: Csub = 0-----
1111-----after Initializer vInit = variable.getInitializer(), vInit is: = 0-----
1111-----after variable.setInitializer(null), variable is: Csub-----
1111-----vDecl.getSpecifiers().remove(Specifier.CONST), vDecl is: DATATYPE Csub-----
1111-----variable.getSpecifiers().remove(Specifier.CONST), variable is: Csub-----
1111-----List<Traversable> children = vInit.getChildren(), children is: [0]-----
1111-----Expression initExpr = (Expression)((Expression)children.get(0)).clone(), initExpr is: 0-----
1111-----IDExpression vID = new Identifier(variable), vID is: Csub-----
1111-----AssignmentExpression vAssign = new AssignmentExpression(vID, AssignmentOperator.NORMAL, initExpr), vAssign is: (Csub=0)-----
1111-----ExpressionStatement vStmt = new ExpressionStatement(vAssign), vStmt is: Csub=0;-----
1111-----CompoundStatement scope = (CompoundStatement)declStmt.getParent(), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----scope.addStatementAfter(declStmt, vStmt), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----enter while loop, declStmt is: int a = 0;-----
1111-----vDecl = (VariableDeclaration)declStmt.getDeclaration(), vDecl is: int a = 0-----
1111-----variable = (VariableDeclarator)vDecl.getDeclarator(0), variable is: a = 0-----
1111-----after Initializer vInit = variable.getInitializer(), vInit is: = 0-----
1111-----after variable.setInitializer(null), variable is: a-----
1111-----vDecl.getSpecifiers().remove(Specifier.CONST), vDecl is: int a-----
1111-----variable.getSpecifiers().remove(Specifier.CONST), variable is: a-----
1111-----List<Traversable> children = vInit.getChildren(), children is: [0]-----
1111-----Expression initExpr = (Expression)((Expression)children.get(0)).clone(), initExpr is: 0-----
1111-----IDExpression vID = new Identifier(variable), vID is: a-----
1111-----AssignmentExpression vAssign = new AssignmentExpression(vID, AssignmentOperator.NORMAL, initExpr), vAssign is: (a=0)-----
1111-----ExpressionStatement vStmt = new ExpressionStatement(vAssign), vStmt is: a=0;-----
1111-----CompoundStatement scope = (CompoundStatement)declStmt.getParent(), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----scope.addStatementAfter(declStmt, vStmt), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----enter while loop, declStmt is: int b = 0;-----
1111-----vDecl = (VariableDeclaration)declStmt.getDeclaration(), vDecl is: int b = 0-----
1111-----variable = (VariableDeclarator)vDecl.getDeclarator(0), variable is: b = 0-----
1111-----after Initializer vInit = variable.getInitializer(), vInit is: = 0-----
1111-----after variable.setInitializer(null), variable is: b-----
1111-----vDecl.getSpecifiers().remove(Specifier.CONST), vDecl is: int b-----
1111-----variable.getSpecifiers().remove(Specifier.CONST), variable is: b-----
1111-----List<Traversable> children = vInit.getChildren(), children is: [0]-----
1111-----Expression initExpr = (Expression)((Expression)children.get(0)).clone(), initExpr is: 0-----
1111-----IDExpression vID = new Identifier(variable), vID is: b-----
1111-----AssignmentExpression vAssign = new AssignmentExpression(vID, AssignmentOperator.NORMAL, initExpr), vAssign is: (b=0)-----
1111-----ExpressionStatement vStmt = new ExpressionStatement(vAssign), vStmt is: b=0;-----
1111-----CompoundStatement scope = (CompoundStatement)declStmt.getParent(), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----scope.addStatementAfter(declStmt, vStmt), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----enter while loop, declStmt is: int k = 0;-----
1111-----vDecl = (VariableDeclaration)declStmt.getDeclaration(), vDecl is: int k = 0-----
1111-----variable = (VariableDeclarator)vDecl.getDeclarator(0), variable is: k = 0-----
1111-----after Initializer vInit = variable.getInitializer(), vInit is: = 0-----
1111-----after variable.setInitializer(null), variable is: k-----
1111-----vDecl.getSpecifiers().remove(Specifier.CONST), vDecl is: int k-----
1111-----variable.getSpecifiers().remove(Specifier.CONST), variable is: k-----
1111-----List<Traversable> children = vInit.getChildren(), children is: [0]-----
1111-----Expression initExpr = (Expression)((Expression)children.get(0)).clone(), initExpr is: 0-----
1111-----IDExpression vID = new Identifier(variable), vID is: k-----
1111-----AssignmentExpression vAssign = new AssignmentExpression(vID, AssignmentOperator.NORMAL, initExpr), vAssign is: (k=0)-----
1111-----ExpressionStatement vStmt = new ExpressionStatement(vAssign), vStmt is: k=0;-----
1111-----CompoundStatement scope = (CompoundStatement)declStmt.getParent(), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----scope.addStatementAfter(declStmt, vStmt), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----enter while loop, declStmt is: __shared__ DATATYPE As[16][16];-----
1111-----vDecl = (VariableDeclaration)declStmt.getDeclaration(), vDecl is: __shared__ DATATYPE As[16][16]-----
1111-----variable = (VariableDeclarator)vDecl.getDeclarator(0), variable is: As[16][16]-----
1111-----after Initializer vInit = variable.getInitializer(), vInit is: null-----
1111-----after variable.setInitializer(null), variable is: As[16][16]-----
222222-----if(vInit == null)
1111-----enter while loop, declStmt is: __shared__ DATATYPE Bs[16][16];-----
1111-----vDecl = (VariableDeclaration)declStmt.getDeclaration(), vDecl is: __shared__ DATATYPE Bs[16][16]-----
1111-----variable = (VariableDeclarator)vDecl.getDeclarator(0), variable is: Bs[16][16]-----
1111-----after Initializer vInit = variable.getInitializer(), vInit is: null-----
1111-----after variable.setInitializer(null), variable is: Bs[16][16]-----
222222-----if(vInit == null)
1111-----enter while loop, declStmt is: int c = (((wB*16)*by)+(16*bx));-----
1111-----vDecl = (VariableDeclaration)declStmt.getDeclaration(), vDecl is: int c = (((wB*16)*by)+(16*bx))-----
1111-----variable = (VariableDeclarator)vDecl.getDeclarator(0), variable is: c = (((wB*16)*by)+(16*bx))-----
1111-----after Initializer vInit = variable.getInitializer(), vInit is: = (((wB*16)*by)+(16*bx))-----
1111-----after variable.setInitializer(null), variable is: c-----
1111-----vDecl.getSpecifiers().remove(Specifier.CONST), vDecl is: int c-----
1111-----variable.getSpecifiers().remove(Specifier.CONST), variable is: c-----
1111-----List<Traversable> children = vInit.getChildren(), children is: [(((wB*16)*by)+(16*bx))]-----
1111-----Expression initExpr = (Expression)((Expression)children.get(0)).clone(), initExpr is: (((wB*16)*by)+(16*bx))-----
1111-----IDExpression vID = new Identifier(variable), vID is: c-----
1111-----AssignmentExpression vAssign = new AssignmentExpression(vID, AssignmentOperator.NORMAL, initExpr), vAssign is: (c=(((wB*16)*by)+(16*bx)))-----
1111-----ExpressionStatement vStmt = new ExpressionStatement(vAssign), vStmt is: c=(((wB*16)*by)+(16*bx));-----
1111-----CompoundStatement scope = (CompoundStatement)declStmt.getParent(), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----scope.addStatementAfter(declStmt, vStmt), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
-----kernelTransformPass, finish transformProcedure-----, proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
[SeparateInitializers] end in 0.03 seconds
[LinkSymbol] 81 updates in 0.00 seconds
*** After SeparateInitializers ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
===========================================
[AnsiDeclarations] begin
00-----List<Traversable> statements = scope.getChildren()
00-----statements is: [;]-----
00-----List<Statement> nonDeclarations = new LinkedList<Statement>()
00-----statements is: [;]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: ;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: ;-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [;]-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: ;-----
222222-----d.detach()
222222-----d is: ;-----
222222-----scope.addStatement(d)
222222-----scope is: {
;
}-----
00-----List<Traversable> statements = scope.getChildren()
00-----statements is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , int bx;, bx=blockIdx.x;, int by;, by=blockIdx.y;, int aBegin;, aBegin=((wA*16)*by);, int aEnd;, aEnd=((aBegin+wA)-1);, int aStep;, aStep=16;, int bBegin;, bBegin=(16*bx);, int bStep;, bStep=(16*wB);, DATATYPE Csub;, Csub=0;, int a;, a=0;, int b;, b=0;, int k;, k=0;, for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}, int c;, c=(((wB*16)*by)+(16*bx));, C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;]-----
00-----List<Statement> nonDeclarations = new LinkedList<Statement>()
00-----statements is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , int bx;, bx=blockIdx.x;, int by;, by=blockIdx.y;, int aBegin;, aBegin=((wA*16)*by);, int aEnd;, aEnd=((aBegin+wA)-1);, int aStep;, aStep=16;, int bBegin;, bBegin=(16*bx);, int bStep;, bStep=(16*wB);, DATATYPE Csub;, Csub=0;, int a;, a=0;, int b;, b=0;, int k;, k=0;, for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}, int c;, c=(((wB*16)*by)+(16*bx));, C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: #pragma HLS INTERFACE ap_bus port=A depth=3840 -----
1111-----Statement stmt = (Statement)i
1111-----stmt is: #pragma HLS INTERFACE ap_bus port=A depth=3840 -----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 ]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: #pragma HLS INTERFACE ap_bus port=B depth=6144 -----
1111-----Statement stmt = (Statement)i
1111-----stmt is: #pragma HLS INTERFACE ap_bus port=B depth=6144 -----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 ]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: #pragma HLS INTERFACE ap_bus port=C depth=10240 -----
1111-----Statement stmt = (Statement)i
1111-----stmt is: #pragma HLS INTERFACE ap_bus port=C depth=10240 -----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 ]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: int bx;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: int bx;-----
222222-----nonDeclarations.add(stmt)
222222-----i is: bx=blockIdx.x;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: bx=blockIdx.x;-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: int by;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: int by;-----
222222-----nonDeclarations.add(stmt)
222222-----i is: by=blockIdx.y;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: by=blockIdx.y;-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: int aBegin;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: int aBegin;-----
222222-----nonDeclarations.add(stmt)
222222-----i is: aBegin=((wA*16)*by);-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: aBegin=((wA*16)*by);-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;, aBegin=((wA*16)*by);]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: int aEnd;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: int aEnd;-----
222222-----nonDeclarations.add(stmt)
222222-----i is: aEnd=((aBegin+wA)-1);-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: aEnd=((aBegin+wA)-1);-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;, aBegin=((wA*16)*by);, aEnd=((aBegin+wA)-1);]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: int aStep;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: int aStep;-----
222222-----nonDeclarations.add(stmt)
222222-----i is: aStep=16;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: aStep=16;-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;, aBegin=((wA*16)*by);, aEnd=((aBegin+wA)-1);, aStep=16;]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: int bBegin;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: int bBegin;-----
222222-----nonDeclarations.add(stmt)
222222-----i is: bBegin=(16*bx);-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: bBegin=(16*bx);-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;, aBegin=((wA*16)*by);, aEnd=((aBegin+wA)-1);, aStep=16;, bBegin=(16*bx);]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: int bStep;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: int bStep;-----
222222-----nonDeclarations.add(stmt)
222222-----i is: bStep=(16*wB);-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: bStep=(16*wB);-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;, aBegin=((wA*16)*by);, aEnd=((aBegin+wA)-1);, aStep=16;, bBegin=(16*bx);, bStep=(16*wB);]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: DATATYPE Csub;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: DATATYPE Csub;-----
222222-----nonDeclarations.add(stmt)
222222-----i is: Csub=0;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: Csub=0;-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;, aBegin=((wA*16)*by);, aEnd=((aBegin+wA)-1);, aStep=16;, bBegin=(16*bx);, bStep=(16*wB);, Csub=0;]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: int a;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: int a;-----
222222-----nonDeclarations.add(stmt)
222222-----i is: a=0;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: a=0;-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;, aBegin=((wA*16)*by);, aEnd=((aBegin+wA)-1);, aStep=16;, bBegin=(16*bx);, bStep=(16*wB);, Csub=0;, a=0;]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: int b;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: int b;-----
222222-----nonDeclarations.add(stmt)
222222-----i is: b=0;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: b=0;-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;, aBegin=((wA*16)*by);, aEnd=((aBegin+wA)-1);, aStep=16;, bBegin=(16*bx);, bStep=(16*wB);, Csub=0;, a=0;, b=0;]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: int k;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: int k;-----
222222-----nonDeclarations.add(stmt)
222222-----i is: k=0;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: k=0;-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;, aBegin=((wA*16)*by);, aEnd=((aBegin+wA)-1);, aStep=16;, bBegin=(16*bx);, bStep=(16*wB);, Csub=0;, a=0;, b=0;, k=0;]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;, aBegin=((wA*16)*by);, aEnd=((aBegin+wA)-1);, aStep=16;, bBegin=(16*bx);, bStep=(16*wB);, Csub=0;, a=0;, b=0;, k=0;, for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: int c;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: int c;-----
222222-----nonDeclarations.add(stmt)
222222-----i is: c=(((wB*16)*by)+(16*bx));-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: c=(((wB*16)*by)+(16*bx));-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;, aBegin=((wA*16)*by);, aEnd=((aBegin+wA)-1);, aStep=16;, bBegin=(16*bx);, bStep=(16*wB);, Csub=0;, a=0;, b=0;, k=0;, for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}, c=(((wB*16)*by)+(16*bx));]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;, aBegin=((wA*16)*by);, aEnd=((aBegin+wA)-1);, aStep=16;, bBegin=(16*bx);, bStep=(16*wB);, Csub=0;, a=0;, b=0;, k=0;, for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}, c=(((wB*16)*by)+(16*bx));, C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;]-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: #pragma HLS INTERFACE ap_bus port=A depth=3840 -----
222222-----d.detach()
222222-----d is: #pragma HLS INTERFACE ap_bus port=A depth=3840 -----
222222-----scope.addStatement(d)
222222-----scope is: {
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: #pragma HLS INTERFACE ap_bus port=B depth=6144 -----
222222-----d.detach()
222222-----d is: #pragma HLS INTERFACE ap_bus port=B depth=6144 -----
222222-----scope.addStatement(d)
222222-----scope is: {
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: #pragma HLS INTERFACE ap_bus port=C depth=10240 -----
222222-----d.detach()
222222-----d is: #pragma HLS INTERFACE ap_bus port=C depth=10240 -----
222222-----scope.addStatement(d)
222222-----scope is: {
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: bx=blockIdx.x;-----
222222-----d.detach()
222222-----d is: bx=blockIdx.x;-----
222222-----scope.addStatement(d)
222222-----scope is: {
int bx;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: by=blockIdx.y;-----
222222-----d.detach()
222222-----d is: by=blockIdx.y;-----
222222-----scope.addStatement(d)
222222-----scope is: {
int bx;
int by;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: aBegin=((wA*16)*by);-----
222222-----d.detach()
222222-----d is: aBegin=((wA*16)*by);-----
222222-----scope.addStatement(d)
222222-----scope is: {
int bx;
int by;
int aBegin;
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: aEnd=((aBegin+wA)-1);-----
222222-----d.detach()
222222-----d is: aEnd=((aBegin+wA)-1);-----
222222-----scope.addStatement(d)
222222-----scope is: {
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: aStep=16;-----
222222-----d.detach()
222222-----d is: aStep=16;-----
222222-----scope.addStatement(d)
222222-----scope is: {
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: bBegin=(16*bx);-----
222222-----d.detach()
222222-----d is: bBegin=(16*bx);-----
222222-----scope.addStatement(d)
222222-----scope is: {
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: bStep=(16*wB);-----
222222-----d.detach()
222222-----d is: bStep=(16*wB);-----
222222-----scope.addStatement(d)
222222-----scope is: {
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: Csub=0;-----
222222-----d.detach()
222222-----d is: Csub=0;-----
222222-----scope.addStatement(d)
222222-----scope is: {
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: a=0;-----
222222-----d.detach()
222222-----d is: a=0;-----
222222-----scope.addStatement(d)
222222-----scope is: {
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: b=0;-----
222222-----d.detach()
222222-----d is: b=0;-----
222222-----scope.addStatement(d)
222222-----scope is: {
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: k=0;-----
222222-----d.detach()
222222-----d is: k=0;-----
222222-----scope.addStatement(d)
222222-----scope is: {
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}-----
222222-----d.detach()
222222-----d is: for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}-----
222222-----scope.addStatement(d)
222222-----scope is: {
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: c=(((wB*16)*by)+(16*bx));-----
222222-----d.detach()
222222-----d is: c=(((wB*16)*by)+(16*bx));-----
222222-----scope.addStatement(d)
222222-----scope is: {
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;-----
222222-----d.detach()
222222-----d is: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;-----
222222-----scope.addStatement(d)
222222-----scope is: {
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
00-----List<Traversable> statements = scope.getChildren()
00-----statements is: [__shared__ DATATYPE As[16][16];, __shared__ DATATYPE Bs[16][16];, As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, __syncthreads();, {
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}, __syncthreads();]-----
00-----List<Statement> nonDeclarations = new LinkedList<Statement>()
00-----statements is: [__shared__ DATATYPE As[16][16];, __shared__ DATATYPE Bs[16][16];, As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, __syncthreads();, {
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}, __syncthreads();]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: __shared__ DATATYPE As[16][16];-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: __shared__ DATATYPE As[16][16];-----
222222-----nonDeclarations.add(stmt)
222222-----i is: __shared__ DATATYPE Bs[16][16];-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: __shared__ DATATYPE Bs[16][16];-----
222222-----nonDeclarations.add(stmt)
222222-----i is: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: __syncthreads();-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: __syncthreads();-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, __syncthreads();]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: {
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: {
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, __syncthreads();, {
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: __syncthreads();-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: __syncthreads();-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, __syncthreads();, {
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}, __syncthreads();]-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];-----
222222-----d.detach()
222222-----d is: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];-----
222222-----scope.addStatement(d)
222222-----scope is: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];-----
222222-----d.detach()
222222-----d is: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];-----
222222-----scope.addStatement(d)
222222-----scope is: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: __syncthreads();-----
222222-----d.detach()
222222-----d is: __syncthreads();-----
222222-----scope.addStatement(d)
222222-----scope is: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: {
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}-----
222222-----d.detach()
222222-----d is: {
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}-----
222222-----scope.addStatement(d)
222222-----scope is: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
__syncthreads();
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: __syncthreads();-----
222222-----d.detach()
222222-----d is: __syncthreads();-----
222222-----scope.addStatement(d)
222222-----scope is: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}-----
00-----List<Traversable> statements = scope.getChildren()
00-----statements is: [lp1:, for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}]-----
00-----List<Statement> nonDeclarations = new LinkedList<Statement>()
00-----statements is: [lp1:, for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: lp1:-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: lp1:-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [lp1:]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [lp1:, for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}]-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: lp1:-----
222222-----d.detach()
222222-----d is: lp1:-----
222222-----scope.addStatement(d)
222222-----scope is: {
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
lp1:
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}-----
222222-----d.detach()
222222-----d is: for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}-----
222222-----scope.addStatement(d)
222222-----scope is: {
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}-----
00-----List<Traversable> statements = scope.getChildren()
00-----statements is: [Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);]-----
00-----List<Statement> nonDeclarations = new LinkedList<Statement>()
00-----statements is: [Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);]-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);-----
222222-----d.detach()
222222-----d is: Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);-----
222222-----scope.addStatement(d)
222222-----scope is: {
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}-----
[AnsiDeclarations] end in 0.02 seconds
[LinkSymbol] 81 updates in 0.00 seconds
*** After AnsiDeclarations ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
===========================================
[StreamInsertion-FCUDA] begin
[StreamInsertion-FCUDA] examining procedure matrixMul
-----kernelTransformPass, finish transformProcedure-----, proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
[StreamInsertion-FCUDA] end in 0.01 seconds
[LinkSymbol] 81 updates in 0.00 seconds
*** After StreamInsertion ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
===========================================
[CreateTaskRegions-FCUDA] begin
[CreateTaskRegions-FCUDA] examining procedure matrixMul
001
002
003---------------------------------------------------------------------------------------------------------------------------------------------------
----Collect global-memory arrays/pointers symbols (includes __constant__ symbols): glMemArraySet = GlobalMemUtils.getGlobMemSymbols(mProcedure)-------
------------------------------------------------------------------------------------------------------------------------------------------------------
----[* A, * C, * B]----
----1.1----mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
Global-Mem Arrays: [* A, * C, * B]
----now identifyGlmemAccs----
PointerSet: [* A, * C, * B]
findGMrefs for: A
---before continue, GM refs in ArrayAccess format already----
findGMrefs for: B
---before continue, GM refs in ArrayAccess format already----
findGMrefs for: C
---before continue, GM refs in ArrayAccess format already----
AliasSet: []
derefAccs: []
004---------------------------------------------------------------------------------------------------------------------------------------------------
----Convert dereference-based global-mem accesses to array-accesses and find global-mem aliases: identifyGlMemAccs();-------
------------------------------------------------------------------------------------------------------------------------------------------------------
----1.2----mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
----now handleMixStmts----
getMixdStmts Symbols: [* A, * C, * B]
getMixdStmts Global Symbols: [* A, * C, * B]
getMixdStmts Alias Symbols: []
symUses: [A]
Contained GM Ref: A[((a+(wA*threadIdx.y))+threadIdx.x)]
Candidate MIXED stmt: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
symUses: [B]
Contained GM Ref: B[((b+(wB*threadIdx.y))+threadIdx.x)]
Candidate MIXED stmt: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
symUses: [C]
Contained GM Ref: C[((c+(wB*threadIdx.y))+threadIdx.x)]
Candidate MIXED stmt: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
mixdStmts: []
--------------------------------
005---------------------------------------------------------------------------------------------------------------------------------------------------
----Eliminate MIXED statements (i.e. statements that contain both COMPUTE & TRANSFER parts): handleMixStmts();-------
------------------------------------------------------------------------------------------------------------------------------------------------------
----1.3----mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
----FcudaGlobalData2.java start----
-----cetus application IPChainAnalysis start----
-----cetus application IPChainAnalysis 1----
------IPChainAnalysis.java PerformAliasAnalysis start----
-------IPPointsToAnalysis.java IPPointsToAnalysis start super----
--------IPAnalysis.java IPAnalysis start 1----
--------IPAnalysis.java IPAnalysis start 2----
[NormalizeReturn] begin
*AP* procedure: __syncthreads
*AP* is_void=true
*AP* ret_expr= null
*AP* procedure: matrixMul
*AP* is_void=true
*AP* ret_expr= null
[NormalizeReturn] end in 0.00 seconds
[LinkSymbol] 81 updates in 0.00 seconds
--------IPAnalysis.java IPAnalysis start 3 enter IPA Graph----
---------IPAGraph.java finish super()----
---------IPAGraph.java start new Arraylist <IPANode>----
---------IPAGraph.java enter buildgraph<prog>
---------IPAGraph.java enter identifyCloneableNodes
---------IPAGraph.java enter buildTopOrder
---------IPAGraph.java finish new IPA Graph
[IPA] Stops due to no flow entry
-------IPPointsToAnalysis.java IPPointsToAnalysis finished super----
-------IPPointsToAnalysis.java IPPointsToAnalysis finished 2----
-------IPPointsToAnalysis.java IPPointsToAnalysis end----
------PerformAliasAnalysis 1----
0000IPpointsToAnalysis
[IPA:PointsTo] Stops due to no flow entry
------PerformAliasAnalysis end----
-----cetus application IPChainAnalysis 2----
-----cetus application IPChainAnalysis 3----
----------------------------------------------
----start to generate CF graph with thread----
----------------------------------------------
proc now is: void __syncthreads()
{
;
return ;
}
=====
proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
return ;
}
=====
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
-----cetus application IPChainAnalysis 4----
--------------------------------------------------------
----start to generate generate Program Summary Graph----
--------------------------------------------------------
############### PSG Summary Detail[__syncthreads()] #################
## Entry Node ##
# Ref Info (psg_entry_ref, use)
psg_entry_ref is null
# Global Info (psg_entry_global, UseOutSet)
psg_entry_global is null
## Exit Node ##
# Ref Info (psg_exit_ref, def)
psg_exit_ref is null
# Global Info (psg_exit_global, DefInSet)
psg_exit_global is null
############### PSG Summary Detail[matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)] #################
## Entry Node ##
# Ref Info (psg_entry_ref, use)
# Global Info (psg_entry_global, UseOutSet)
## Call Node ## IR: __syncthreads();
# Ref Info (psg_call_ref, def)
psg_call_ref is null
# Global Info (psg_call_global, DefInSet)
psg_call_global is null
## Return Node ## IR: __syncthreads();
# Ref Info (psg_return_ref, use)
psg_return_ref is null
# Global Info (psg_return_global, UseOutSet)
psg_return_global is null
## Call Node ## IR: __syncthreads();
# Ref Info (psg_call_ref, def)
psg_call_ref is null
# Global Info (psg_call_global, DefInSet)
psg_call_global is null
## Return Node ## IR: __syncthreads();
# Ref Info (psg_return_ref, use)
psg_return_ref is null
# Global Info (psg_return_global, UseOutSet)
psg_return_global is null
## Exit Node ##
# Ref Info (psg_exit_ref, def)
-DEF: C, NODE: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
-DEF: A, NODE: * A
-DEF: B, NODE: * B
# Global Info (psg_exit_global, DefInSet)
############### PSG Propagated Detail[__syncthreads()] #################
## Entry Node ##
# Ref Info (psg_entry_ref)
psg_entry_ref is null
# Global Info (psg_entry_global)
psg_entry_global is null
## Exit Node ##
# Ref Info (psg_exit_ref)
psg_exit_ref is null
# Global Info (psg_exit_global)
psg_exit_global is null
############### PSG Propagated Detail[matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)] #################
## Entry Node ##
# Ref Info (psg_entry_ref)
# parameter: B
# parameter: C
# parameter: A
# Global Info (psg_entry_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Call Node ## IR: __syncthreads();
# Ref Info (psg_call_ref)
psg_call_ref is null
# Global Info (psg_call_global)
psg_call_global is null
## Return Node ## IR: __syncthreads();
# Ref Info (psg_return_ref)
psg_return_ref is null
# Global Info (psg_return_global)
psg_return_global is null
## Call Node ## IR: __syncthreads();
# Ref Info (psg_call_ref)
psg_call_ref is null
# Global Info (psg_call_global)
psg_call_global is null
## Return Node ## IR: __syncthreads();
# Ref Info (psg_return_ref)
psg_return_ref is null
# Global Info (psg_return_global)
psg_return_global is null
## Exit Node ##
# Ref Info (psg_exit_ref)
-OUTdef: C, NODE: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
-OUTdef: A, NODE: * A
-OUTdef: B, NODE: * B
# Global Info (psg_exit_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
-----cetus application IPChainAnalysis 5----
-----cetus application IPChainAnalysis 6----
-----cetus application IPChainAnalysis 7----
-----cetus application IPChainAnalysis end----
----FcudaGlobalData2.java end----
----1.4----mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
return ;
}
006
Instances of tidx: [threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx]
----c.1---- idx is: threadIdx
----c.1---- parStmt is: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
----c.1---- idx is: threadIdx
----c.1---- parStmt is: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
----c.1---- idx is: threadIdx
----c.1---- parStmt is: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
defIDs: [As, As, As, As, Bs, Bs, Bs, Bs, Csub, Csub, C, C]
Looking for uses of: As
... in: Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... useID: As
Looking for uses of: As
... in: #pragma fcuda stmt tdep_vars=[As]
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
Looking for uses of: As
... in: #pragma fcuda stmt tdep_vars=[As]
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
Looking for uses of: As
... in: #pragma fcuda stmt tdep_vars=[As]
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
Looking for uses of: Bs
... in: #pragma fcuda stmt tdep_vars=[As]
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... useID: Bs
Looking for uses of: Bs
... in: #pragma fcuda stmt tdep_vars=[Bs, As]
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
Looking for uses of: Bs
... in: #pragma fcuda stmt tdep_vars=[Bs, As]
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
Looking for uses of: Bs
... in: #pragma fcuda stmt tdep_vars=[Bs, As]
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
Looking for uses of: Csub
... in: #pragma fcuda stmt tdep_vars=[Bs, As]
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... useID: Csub
... in: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
... useID: Csub
Looking for uses of: Csub
... in: #pragma fcuda stmt tdep_vars=[Bs, As, Csub]
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... in: #pragma fcuda stmt tdep_vars=[Csub]
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
Looking for uses of: C
Looking for uses of: C
defIDs: [Csub, Csub, Csub, C]
Looking for uses of: Csub
... in: #pragma fcuda stmt tdep_vars=[Bs, As, Csub]
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... in: #pragma fcuda stmt tdep_vars=[Csub]
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
Looking for uses of: Csub
... in: #pragma fcuda stmt tdep_vars=[Bs, As, Csub]
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... in: #pragma fcuda stmt tdep_vars=[Csub]
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
Looking for uses of: Csub
... in: #pragma fcuda stmt tdep_vars=[Bs, As, Csub]
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... in: #pragma fcuda stmt tdep_vars=[Csub]
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
Looking for uses of: C
----c.2---- tDepStmt is: #pragma fcuda stmt tdep_vars=[Csub]
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
----c.2---- tDepStmt is: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
----c.21---- tDepStmt is: #pragma fcuda stmt tdep=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
----c.2---- tDepStmt is: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
----c.21---- tDepStmt is: #pragma fcuda stmt tdep=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
----c.2---- tDepStmt is: #pragma fcuda stmt tdep_vars=[Bs, As, Csub]
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
----1.5----mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt tdep=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt tdep=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt tdep_vars=[Bs, As, Csub] tdep=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt tdep_vars=[Csub] tdep=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
return ;
}
007
findTransferStmts Symbols: [* A, * C, * B]
findTransferStmts non-Const Symbols: [* A, * C, * B]
findTransferStmts for: A
findTransferStmts for: B
findTransferStmts for: C
INFO - findTransferStmts: 12 address index IDs
....IPChainAnalysis getDefList....
INFO - findTransferStmts: 2 defStmts for addrID: a
INFO - findTransferStmts: Corner case forloop definition: ((a=aBegin), (b=bBegin));
WARNING - findTransferStmts: Definition of class: class cetus.hir.CommaExpression
((a+=aStep), (b+=bStep))
....IPChainAnalysis getDefList....
INFO - findTransferStmts: 1 defStmts for addrID: wA
WARNING - findTransferStmts: Definition of class: class cetus.hir.VariableDeclarator
wA
....IPChainAnalysis getDefList....
INFO - findTransferStmts: 2 defStmts for addrID: b
INFO - findTransferStmts: Corner case forloop definition: ((a=aBegin), (b=bBegin));
WARNING - findTransferStmts: Definition of class: class cetus.hir.CommaExpression
((a+=aStep), (b+=bStep))
....IPChainAnalysis getDefList....
INFO - findTransferStmts: 1 defStmts for addrID: wB
WARNING - findTransferStmts: Definition of class: class cetus.hir.VariableDeclarator
wB
....IPChainAnalysis getDefList....
INFO - findTransferStmts: 1 defStmts for addrID: c
....IPChainAnalysis getDefList....
INFO - findTransferStmts: 1 defStmts for addrID: wB
WARNING - findTransferStmts: Definition of class: class cetus.hir.VariableDeclarator
wB
----1.6----mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A tdep=true TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B tdep=true TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt tdep_vars=[Bs, As, Csub] tdep=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt rdNwrt=false GLBpntr=C tdep_vars=[Csub] tdep=true TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
return ;
}
008
----1.7----mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 tdep=true seqID=3
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
return ;
}
009_finish
-----kernelTransformPass, finish transformProcedure-----, proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 tdep=true seqID=3
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
return ;
}
[CreateTaskRegions-FCUDA] end in 0.33 seconds
[LinkSymbol] 81 updates in 0.00 seconds
*** After CreateTaskRegions ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 tdep=true seqID=3
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
return ;
}
===========================================
[UniformCFstructs-FCUDA] begin
[UniformCFstructs-FCUDA] examining procedure matrixMul
----001----enter UniformCFstructs
----002----defUseData = FCUDAGlobalData2.getDataDepAnalysis(program)
-----001----- fcudaGlobalData2.java enter UpdateHTG
-----001----- proc is
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 tdep=true seqID=3
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
return ;
}
-----001----- proc.getSymbolName() is
matrixMul
------001------ HTGraph.java super()
------002------ HTGraph.java curTask = new String()
------003------ HTGraph.java HTGName = new String(name)
------004------ HTGraph.java if(trv instanceof Procedure)
------005------ HTGraph.java mProcedure = (Procedure) trv
------007------ HTGraph.java before build graph
------0007------ t is procedure now
------0007------ t is procedure now, getbody
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 tdep=true seqID=3
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
return ;
}
------0007------ t is CompoundStatement now
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 tdep=true seqID=3
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
return ;
}
------0007------ buildCompound cStmt getChildren
[int bx;, int by;, int aBegin;, int aEnd;, int aStep;, int bBegin;, int bStep;, DATATYPE Csub;, int a;, int b;, int k;, int c;, #pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;, aBegin=((wA*16)*by);, aEnd=((aBegin+wA)-1);, aStep=16;, bBegin=(16*bx);, bStep=(16*wB);, Csub=0;, a=0;, b=0;, k=0;, for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 tdep=true seqID=3
__syncthreads();
}, c=(((wB*16)*by)+(16*bx));, #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;, return ;]
------0007------ buildCompound for(Traversable trv : children), current trv is
int bx;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int bx;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int by;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int by;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int aBegin;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int aBegin;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int aEnd;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int aEnd;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int aStep;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int aStep;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int bBegin;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int bBegin;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int bStep;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int bStep;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
DATATYPE Csub;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for DATATYPE Csub;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int a;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int a;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int b;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int b;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int k;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int k;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int c;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int c;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma HLS INTERFACE ap_bus port=A depth=3840
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for #pragma HLS INTERFACE ap_bus port=A depth=3840
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma HLS INTERFACE ap_bus port=B depth=6144
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for #pragma HLS INTERFACE ap_bus port=B depth=6144
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma HLS INTERFACE ap_bus port=C depth=10240
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for #pragma HLS INTERFACE ap_bus port=C depth=10240
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
bx=blockIdx.x;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for bx=blockIdx.x;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
by=blockIdx.y;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for by=blockIdx.y;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
aBegin=((wA*16)*by);
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for aBegin=((wA*16)*by);
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
aEnd=((aBegin+wA)-1);
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for aEnd=((aBegin+wA)-1);
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
aStep=16;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for aStep=16;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
bBegin=(16*bx);
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for bBegin=(16*bx);
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
bStep=(16*wB);
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for bStep=(16*wB);
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
Csub=0;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for Csub=0;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
a=0;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for a=0;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
b=0;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for b=0;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
k=0;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for k=0;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 tdep=true seqID=3
__syncthreads();
}
------ for loop current trv is not compound
------0007------ t is ForLoop now
------0007------ t is CompoundStatement now
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 tdep=true seqID=3
__syncthreads();
}
------0007------ buildCompound cStmt getChildren
[__shared__ DATATYPE As[16][16];, __shared__ DATATYPE Bs[16][16];, #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, #pragma fcuda stmt SNCtask=true name=SNC_1 tdep=true seqID=1
__syncthreads();, {
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}, #pragma fcuda stmt SNCtask=true name=SNC_3 tdep=true seqID=3
__syncthreads();]
------0007------ buildCompound for(Traversable trv : children), current trv is
__shared__ DATATYPE As[16][16];
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for __shared__ DATATYPE As[16][16];
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
__shared__ DATATYPE Bs[16][16];
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for __shared__ DATATYPE Bs[16][16];
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, TRN task----
-----added node: TRN_0
------done curr build graph
--- DFAGraph for #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, TRN task----
-----added node: TRN_0
------done curr build graph
--- DFAGraph for #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt SNCtask=true name=SNC_1 tdep=true seqID=1
__syncthreads();
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, SYNC task----
-----added node: SNC_1
------done curr build graph
--- DFAGraph for #pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
------ for loop current trv is compound
------0007------ t is CompoundStatement now
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
------0007------ buildCompound cStmt getChildren
[lp1:, for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}]
------0007------ buildCompound for(Traversable trv : children), current trv is
lp1:
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for lp1:
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
------ for loop current trv is not compound
------0007------ t is ForLoop now
------0007------ t is CompoundStatement now
{
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
------0007------ buildCompound cStmt getChildren
[#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);]
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, COM task----
-----added node: CMP_2
------done curr build graph
--- DFAGraph for #pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ exit buildCompound for loop
------ ret.size > 0------
------ wrap enabled------
====== DFAGraph.java enter getFirst()
------ exit buildCompound with wrap enabled, and ret > 0, return superDFA------
------ exit buildCompound with wrap enabled, and ret == 0, return superDFA------
------done curr build graph
------ for loop ret has node------
====== DFAGraph.java enter getFirst()
------done curr build graph
--- DFAGraph for #pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ exit buildCompound for loop
------ ret.size > 0------
------ exit buildCompound with NO wrap, return ret------
------done curr build graph
--- DFAGraph for {
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt SNCtask=true name=SNC_3 tdep=true seqID=3
__syncthreads();
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, SYNC task----
-----added node: SNC_3
------done curr build graph
--- DFAGraph for #pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ exit buildCompound for loop
------ ret.size > 0------
------ wrap enabled------
====== DFAGraph.java enter getFirst()
------ exit buildCompound with wrap enabled, and ret > 0, return superDFA------
------ exit buildCompound with wrap enabled, and ret == 0, return superDFA------
------done curr build graph
------ for loop ret has node------
====== DFAGraph.java enter getFirst()
------done curr build graph
--- DFAGraph for #pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
}
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
c=(((wB*16)*by)+(16*bx));
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for c=(((wB*16)*by)+(16*bx));
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, TRN task----
-----added node: TRN_4
------done curr build graph
--- DFAGraph for #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
return ;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for return ;
has 0 nodes
------0007------ exit buildCompound for loop
------ ret.size > 0------
------ exit buildCompound with NO wrap, return ret------
------done curr build graph
------done curr build graph
------008------ HTGraph.java DFAGraph dfag = buildGraph(trv)
-----002----- fcudaGlobalData2.java HTGraph htg = new HTGraph(proc, proc.getSymbolName())
-----003----- fcudaGlobalData2.java mKern2HTG.put(proc, htg)
----003----defUseData = FCUDAGlobalData2.getDataDepAnalysis(program)
HTG PRINT-OUT
====================
| Graph: matrixMul L1
|
V
--------------------
| FOR_HTG_TRN_0
| type: FOR
| tdep: false
--------------------
====================
| Graph: HTG_TRN_0 L2
|
V
--------------------
| TRN_0
| ParentNode: FOR_HTG_TRN_0
| type: TRN
--------------------
|
V
--------------------
| SNC_1
| ParentNode: FOR_HTG_TRN_0
| type: SNC
--------------------
|
V
--------------------
| FOR_HTG_CMP_2
| ParentNode: FOR_HTG_TRN_0
| type: FOR
| tdep: false
--------------------
====================
| Graph: HTG_CMP_2 L3
|
V
--------------------
| CMP_2
| ParentNode: FOR_HTG_CMP_2
| type: CMP
| Addr-Use: false
--------------------
\===== HTG_CMP_2 =====/
|
V
--------------------
| SNC_3
| ParentNode: FOR_HTG_TRN_0
| type: SNC
--------------------
\===== HTG_TRN_0 =====/
|
V
--------------------
| TRN_4
| type: TRN
--------------------
\===== matrixMul =====/
----004----procHTG.printGraph_1_0
----005----after if rgnBounds.add_none_or_TRN
----006----List<DFANode> nonUniformCFs = new LinkedList_DFANode
----007----identifyCFs(nonUniformCFs, rgnBounds, procHTG.getFirst(), false)
----007----before 008, nonUniformCFs is: []
-----kernelTransformPass, finish transformProcedure-----, proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
return ;
}
[UniformCFstructs-FCUDA] end in 0.02 seconds
[LinkSymbol] 81 updates in 0.00 seconds
*** After UniformCFstructs ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
return ;
}
===========================================
[AnnotateTasks-FCUDA] begin
[AnnotateTasks-FCUDA] examining procedure matrixMul
------001------ HTGraph.java super()
------002------ HTGraph.java curTask = new String()
------003------ HTGraph.java HTGName = new String(name)
------004------ HTGraph.java if(trv instanceof Procedure)
------005------ HTGraph.java mProcedure = (Procedure) trv
------007------ HTGraph.java before build graph
------0007------ t is procedure now
------0007------ t is procedure now, getbody
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
return ;
}
------0007------ t is CompoundStatement now
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
return ;
}
------0007------ buildCompound cStmt getChildren
[int bx;, int by;, int aBegin;, int aEnd;, int aStep;, int bBegin;, int bStep;, DATATYPE Csub;, int a;, int b;, int k;, int c;, #pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;, aBegin=((wA*16)*by);, aEnd=((aBegin+wA)-1);, aStep=16;, bBegin=(16*bx);, bStep=(16*wB);, Csub=0;, a=0;, b=0;, k=0;, #pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
}, c=(((wB*16)*by)+(16*bx));, #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;, return ;]
------0007------ buildCompound for(Traversable trv : children), current trv is
int bx;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int bx;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int by;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int by;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int aBegin;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int aBegin;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int aEnd;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int aEnd;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int aStep;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int aStep;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int bBegin;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int bBegin;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int bStep;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int bStep;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
DATATYPE Csub;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for DATATYPE Csub;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int a;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int a;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int b;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int b;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int k;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int k;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int c;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int c;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma HLS INTERFACE ap_bus port=A depth=3840
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for #pragma HLS INTERFACE ap_bus port=A depth=3840
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma HLS INTERFACE ap_bus port=B depth=6144
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for #pragma HLS INTERFACE ap_bus port=B depth=6144
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma HLS INTERFACE ap_bus port=C depth=10240
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for #pragma HLS INTERFACE ap_bus port=C depth=10240
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
bx=blockIdx.x;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for bx=blockIdx.x;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
by=blockIdx.y;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for by=blockIdx.y;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
aBegin=((wA*16)*by);
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for aBegin=((wA*16)*by);
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
aEnd=((aBegin+wA)-1);
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for aEnd=((aBegin+wA)-1);
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
aStep=16;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for aStep=16;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
bBegin=(16*bx);
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for bBegin=(16*bx);
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
bStep=(16*wB);
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for bStep=(16*wB);
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
Csub=0;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for Csub=0;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
a=0;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for a=0;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
b=0;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for b=0;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
k=0;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for k=0;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
}
------ for loop current trv is not compound
------0007------ t is ForLoop now
------0007------ t is CompoundStatement now
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
}
------0007------ buildCompound cStmt getChildren
[__shared__ DATATYPE As[16][16];, __shared__ DATATYPE Bs[16][16];, #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, #pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();, {
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}, #pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();]
------0007------ buildCompound for(Traversable trv : children), current trv is
__shared__ DATATYPE As[16][16];
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for __shared__ DATATYPE As[16][16];
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
__shared__ DATATYPE Bs[16][16];
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for __shared__ DATATYPE Bs[16][16];
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, TRN task----
-----added node: TRN_0
------done curr build graph
--- DFAGraph for #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, TRN task----
-----added node: TRN_0
------done curr build graph
--- DFAGraph for #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, SYNC task----
-----added node: SNC_1
------done curr build graph
--- DFAGraph for #pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
{
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
------ for loop current trv is compound
------0007------ t is CompoundStatement now
{
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
------0007------ buildCompound cStmt getChildren
[lp1:, #pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}]
------0007------ buildCompound for(Traversable trv : children), current trv is
lp1:
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for lp1:
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
------ for loop current trv is not compound
------0007------ t is ForLoop now
------0007------ t is CompoundStatement now
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
------0007------ buildCompound cStmt getChildren
[#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);]
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, COM task----
-----added node: CMP_2
------done curr build graph
--- DFAGraph for #pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ exit buildCompound for loop
------ ret.size > 0------
------ wrap enabled------
====== DFAGraph.java enter getFirst()
------ exit buildCompound with wrap enabled, and ret > 0, return superDFA------
------ exit buildCompound with wrap enabled, and ret == 0, return superDFA------
------done curr build graph
------ for loop ret has node------
====== DFAGraph.java enter getFirst()
------done curr build graph
--- DFAGraph for #pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ exit buildCompound for loop
------ ret.size > 0------
------ exit buildCompound with NO wrap, return ret------
------done curr build graph
--- DFAGraph for {
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, SYNC task----
-----added node: SNC_3
------done curr build graph
--- DFAGraph for #pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ exit buildCompound for loop
------ ret.size > 0------
------ wrap enabled------
====== DFAGraph.java enter getFirst()
------ exit buildCompound with wrap enabled, and ret > 0, return superDFA------
------ exit buildCompound with wrap enabled, and ret == 0, return superDFA------
------done curr build graph
------ for loop ret has node------
====== DFAGraph.java enter getFirst()
------done curr build graph
--- DFAGraph for #pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
}
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
c=(((wB*16)*by)+(16*bx));
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for c=(((wB*16)*by)+(16*bx));
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, TRN task----
-----added node: TRN_4
------done curr build graph
--- DFAGraph for #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
return ;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for return ;
has 0 nodes
------0007------ exit buildCompound for loop
------ ret.size > 0------
------ exit buildCompound with NO wrap, return ret------
------done curr build graph
------done curr build graph
------008------ HTGraph.java DFAGraph dfag = buildGraph(trv)
PRINT-GRAPH OUTPUT
====================
| Graph: matrixMul L1
|
V
--------------------
| FOR_HTG_TRN_0
| type: FOR
| tdep: false
--------------------
====================
| Graph: HTG_TRN_0 L2
|
V
--------------------
| TRN_0
| ParentNode: FOR_HTG_TRN_0
| type: TRN
--------------------
|
V
--------------------
| SNC_1
| ParentNode: FOR_HTG_TRN_0
| type: SNC
--------------------
|
V
--------------------
| FOR_HTG_CMP_2
| ParentNode: FOR_HTG_TRN_0
| type: FOR
| tdep: false
--------------------
====================
| Graph: HTG_CMP_2 L3
|
V
--------------------
| CMP_2
| ParentNode: FOR_HTG_CMP_2
| type: CMP
| Addr-Use: false
--------------------
\===== HTG_CMP_2 =====/
|
V
--------------------
| SNC_3
| ParentNode: FOR_HTG_TRN_0
| type: SNC
--------------------
\===== HTG_TRN_0 =====/
|
V
--------------------
| TRN_4
| type: TRN
--------------------
\===== matrixMul =====/
*** CF Node Info Begin ***
FOR_HTG_TRN_0 info:
- uniform: false - SubTypes: [TRN, CMP, SNC]
*** CF Node Info End ***
*** Number of Tasks in matrixMul :3
---- Target: TRN_0 ----
nodes #: 1
TRN_0
Node Types: [TRN]
uniform: true
**** TLOOPS(1) ****
TLOOP 0
---- Target: TRN_0 ----
nodes #: 1
TRN_0
Node Types: [TRN]
uniform: true
----addStatementBefore----index is:2
----addStatementBefore----index is:3
---- Target: SNC_1 ----
nodes #: 3
SNC_1
FOR_HTG_CMP_2
SNC_3
Node Types: [CMP, SNC]
uniform: true
**** TLOOPS(1) ****
TLOOP 0
---- Target: FOR_HTG_CMP_2 ----
nodes #: 1
FOR_HTG_CMP_2
Node Types: [CMP]
uniform: true
----addStatementBefore----index is:8
----addStatementBefore----index is:1
---- Target: TRN_4 ----
nodes #: 1
TRN_4
Node Types: [TRN]
uniform: true
**** TLOOPS(1) ****
TLOOP 0
---- Target: TRN_4 ----
nodes #: 1
TRN_4
Node Types: [TRN]
uniform: true
----addStatementBefore----index is:28
----addStatementBefore----index is:29
-----kernelTransformPass, finish transformProcedure-----, proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=false begin=true
#pragma fcuda tloop name=TRN_0 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_0 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_1 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2 tlpName=FOR_HTG_CMP_2
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_1 end=true begin=false
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_4 end=false begin=true
#pragma fcuda tloop name=TRN_4 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 tlpName=TRN_4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma fcuda tloop name=TRN_4 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_4 end=true begin=false
return ;
}
[AnnotateTasks-FCUDA] end in 0.02 seconds
[LinkSymbol] 81 updates in 0.00 seconds
*** After AnnotateTasks ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=false begin=true
#pragma fcuda tloop name=TRN_0 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_0 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_1 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2 tlpName=FOR_HTG_CMP_2
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_1 end=true begin=false
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_4 end=false begin=true
#pragma fcuda tloop name=TRN_4 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 tlpName=TRN_4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma fcuda tloop name=TRN_4 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_4 end=true begin=false
return ;
}
===========================================
[PrivatizeScalars-FCUDA] begin
[PrivatizeScalars-FCUDA] examining procedure matrixMul
----FcudaGlobalData2.java start----
-----cetus application IPChainAnalysis start----
-----cetus application IPChainAnalysis 1----
------IPChainAnalysis.java PerformAliasAnalysis start----
-------IPPointsToAnalysis.java IPPointsToAnalysis start super----
--------IPAnalysis.java IPAnalysis start 1----
--------IPAnalysis.java IPAnalysis start 2----
[NormalizeReturn] begin
*AP* procedure: __syncthreads
*AP* is_void=true
*AP* ret_expr= null
*AP* procedure: matrixMul
*AP* is_void=true
*AP* ret_expr= null
[NormalizeReturn] end in 0.00 seconds
[LinkSymbol] 81 updates in 0.00 seconds
--------IPAnalysis.java IPAnalysis start 3 enter IPA Graph----
---------IPAGraph.java finish super()----
---------IPAGraph.java start new Arraylist <IPANode>----
---------IPAGraph.java enter buildgraph<prog>
---------IPAGraph.java enter identifyCloneableNodes
---------IPAGraph.java enter buildTopOrder
---------IPAGraph.java finish new IPA Graph
[IPA] Stops due to no flow entry
-------IPPointsToAnalysis.java IPPointsToAnalysis finished super----
-------IPPointsToAnalysis.java IPPointsToAnalysis finished 2----
-------IPPointsToAnalysis.java IPPointsToAnalysis end----
------PerformAliasAnalysis 1----
0000IPpointsToAnalysis
[IPA:PointsTo] Stops due to no flow entry
------PerformAliasAnalysis end----
-----cetus application IPChainAnalysis 2----
-----cetus application IPChainAnalysis 3----
----------------------------------------------
----start to generate CF graph with thread----
----------------------------------------------
proc now is: void __syncthreads()
{
;
return ;
}
=====
proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=false begin=true
#pragma fcuda tloop name=TRN_0 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_0 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_1 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2 tlpName=FOR_HTG_CMP_2
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_1 end=true begin=false
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_4 end=false begin=true
#pragma fcuda tloop name=TRN_4 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 tlpName=TRN_4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma fcuda tloop name=TRN_4 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_4 end=true begin=false
return ;
}
=====
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
-----cetus application IPChainAnalysis 4----
--------------------------------------------------------
----start to generate generate Program Summary Graph----
--------------------------------------------------------
############### PSG Summary Detail[__syncthreads()] #################
## Entry Node ##
# Ref Info (psg_entry_ref, use)
psg_entry_ref is null
# Global Info (psg_entry_global, UseOutSet)
psg_entry_global is null
## Exit Node ##
# Ref Info (psg_exit_ref, def)
psg_exit_ref is null
# Global Info (psg_exit_global, DefInSet)
psg_exit_global is null
############### PSG Summary Detail[matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)] #################
## Entry Node ##
# Ref Info (psg_entry_ref, use)
# Global Info (psg_entry_global, UseOutSet)
## Call Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
# Ref Info (psg_call_ref, def)
psg_call_ref is null
# Global Info (psg_call_global, DefInSet)
psg_call_global is null
## Return Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
# Ref Info (psg_return_ref, use)
psg_return_ref is null
# Global Info (psg_return_global, UseOutSet)
psg_return_global is null
## Call Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
# Ref Info (psg_call_ref, def)
psg_call_ref is null
# Global Info (psg_call_global, DefInSet)
psg_call_global is null
## Return Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
# Ref Info (psg_return_ref, use)
psg_return_ref is null
# Global Info (psg_return_global, UseOutSet)
psg_return_global is null
## Exit Node ##
# Ref Info (psg_exit_ref, def)
-DEF: B, NODE: * B
-DEF: C, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 tlpName=TRN_4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
-DEF: A, NODE: * A
# Global Info (psg_exit_global, DefInSet)
############### PSG Propagated Detail[__syncthreads()] #################
## Entry Node ##
# Ref Info (psg_entry_ref)
psg_entry_ref is null
# Global Info (psg_entry_global)
psg_entry_global is null
## Exit Node ##
# Ref Info (psg_exit_ref)
psg_exit_ref is null
# Global Info (psg_exit_global)
psg_exit_global is null
############### PSG Propagated Detail[matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)] #################
## Entry Node ##
# Ref Info (psg_entry_ref)
# parameter: B
# parameter: C
# parameter: A
# Global Info (psg_entry_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Call Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
# Ref Info (psg_call_ref)
psg_call_ref is null
# Global Info (psg_call_global)
psg_call_global is null
## Return Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
# Ref Info (psg_return_ref)
psg_return_ref is null
# Global Info (psg_return_global)
psg_return_global is null
## Call Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
# Ref Info (psg_call_ref)
psg_call_ref is null
# Global Info (psg_call_global)
psg_call_global is null
## Return Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
# Ref Info (psg_return_ref)
psg_return_ref is null
# Global Info (psg_return_global)
psg_return_global is null
## Exit Node ##
# Ref Info (psg_exit_ref)
-OUTdef: B, NODE: * B
-OUTdef: C, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 tlpName=TRN_4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
-OUTdef: A, NODE: * A
# Global Info (psg_exit_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
-----cetus application IPChainAnalysis 5----
-----cetus application IPChainAnalysis 6----
-----cetus application IPChainAnalysis 7----
-----cetus application IPChainAnalysis end----
----FcudaGlobalData2.java end----
...ps...001...before set of candidate variables to be privatized...
*** bfi: {
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=false begin=true
#pragma fcuda tloop name=TRN_0 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_0 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_1 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2 tlpName=FOR_HTG_CMP_2
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_1 end=true begin=false
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_4 end=false begin=true
#pragma fcuda tloop name=TRN_4 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 tlpName=TRN_4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma fcuda tloop name=TRN_4 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_4 end=true begin=false
return ;
}
*** bfi: int bx;
*** bfi: int by;
*** bfi: int aBegin;
*** bfi: int aEnd;
*** bfi: int aStep;
*** bfi: int bBegin;
*** bfi: int bStep;
*** bfi: DATATYPE Csub;
*** bfi: int a;
*** bfi: int b;
*** bfi: int k;
*** bfi: int c;
*** bfi: #pragma HLS INTERFACE ap_bus port=A depth=3840
*** bfi: #pragma HLS INTERFACE ap_bus port=B depth=6144
*** bfi: #pragma HLS INTERFACE ap_bus port=C depth=10240
*** bfi: bx=blockIdx.x;
*** bfi: by=blockIdx.y;
*** bfi: aBegin=((wA*16)*by);
*** bfi: aEnd=((aBegin+wA)-1);
*** bfi: aStep=16;
*** bfi: bBegin=(16*bx);
*** bfi: bStep=(16*wB);
*** bfi: Csub=0;
*** bfi: a=0;
*** bfi: b=0;
*** bfi: k=0;
*** bfi: #pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=false begin=true
#pragma fcuda tloop name=TRN_0 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_0 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_1 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2 tlpName=FOR_HTG_CMP_2
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_1 end=true begin=false
}
*** bfi: c=(((wB*16)*by)+(16*bx));
*** bfi: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_4 end=false begin=true
*** bfi: #pragma fcuda tloop name=TRN_4 end=false begin=true
*** tloop pragma: #pragma fcuda tloop name=TRN_4 end=false begin=true
*** entering: TRN_4
*** bfi: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 tlpName=TRN_4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
...ps...scan...001...curTloop is: TRN_4
...ps...scan...0001...fcAnnot is: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 tlpName=TRN_4 TRNtask=true
- defs: [C[((c+(wB*threadIdx.y))+threadIdx.x)]]
...ps...scan...00001...defExp is: C[((c+(wB*threadIdx.y))+threadIdx.x)]
- Is DefID:C candidate?
- Non-Candidate defId: C
*** bfi: #pragma fcuda tloop name=TRN_4 end=true begin=false
...ps...scan...001...curTloop is: TRN_4
...ps...scan...0001...fcAnnot is: #pragma fcuda tloop name=TRN_4 end=true begin=false
*** tloop pragma: #pragma fcuda tloop name=TRN_4 end=true begin=false
*** exiting: TRN_4
*** bfi: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_4 end=true begin=false
*** bfi: return ;
*** bfi: ((a=aBegin), (b=bBegin));
*** bfi: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=false begin=true
#pragma fcuda tloop name=TRN_0 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_0 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_1 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2 tlpName=FOR_HTG_CMP_2
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_1 end=true begin=false
}
*** bfi: __shared__ DATATYPE As[16][16];
*** bfi: __shared__ DATATYPE Bs[16][16];
*** bfi: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=false begin=true
*** bfi: #pragma fcuda tloop name=TRN_0 end=false begin=true
*** tloop pragma: #pragma fcuda tloop name=TRN_0 end=false begin=true
*** entering: TRN_0
*** bfi: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
...ps...scan...001...curTloop is: TRN_0
...ps...scan...0001...fcAnnot is: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
- defs: [As[threadIdx.y][threadIdx.x]]
...ps...scan...00001...defExp is: As[threadIdx.y][threadIdx.x]
- Is DefID:As candidate?
- Non-Candidate defId: As
*** bfi: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
...ps...scan...001...curTloop is: TRN_0
...ps...scan...0001...fcAnnot is: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
- defs: [Bs[threadIdx.y][threadIdx.x]]
...ps...scan...00001...defExp is: Bs[threadIdx.y][threadIdx.x]
- Is DefID:Bs candidate?
- Non-Candidate defId: Bs
*** bfi: #pragma fcuda tloop name=TRN_0 end=true begin=false
...ps...scan...001...curTloop is: TRN_0
...ps...scan...0001...fcAnnot is: #pragma fcuda tloop name=TRN_0 end=true begin=false
*** tloop pragma: #pragma fcuda tloop name=TRN_0 end=true begin=false
*** exiting: TRN_0
*** bfi: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=true begin=false
*** bfi: #pragma fcuda compute cores=1 name=SNC_1 end=false begin=true
*** bfi: #pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
*** bfi: {
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2 tlpName=FOR_HTG_CMP_2
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=true begin=false
}
*** bfi: #pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
*** bfi: #pragma fcuda compute cores=1 name=SNC_1 end=true begin=false
*** bfi: lp1:
*** bfi: #pragma fcuda tloop name=FOR_HTG_CMP_2 end=false begin=true
*** tloop pragma: #pragma fcuda tloop name=FOR_HTG_CMP_2 end=false begin=true
*** entering: FOR_HTG_CMP_2
*** bfi: #pragma fcuda stmt HTGNode=FOR_HTG_CMP_2 tlpName=FOR_HTG_CMP_2
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
...ps...scan...001...curTloop is: FOR_HTG_CMP_2
...ps...scan...0001...fcAnnot is: #pragma fcuda stmt HTGNode=FOR_HTG_CMP_2 tlpName=FOR_HTG_CMP_2
- defs: [k, k, Csub]
...ps...scan...00001...defExp is: k
- Is DefID:k candidate?
- with TRV USE:k<16
- with TRV USE:#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
- with TRV USE:#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
- with TRV USE: ++ k
- Non-Candidate defId: k
...ps...scan...00001...defExp is: k
- Is DefID:k candidate?
- with TRV USE:k<16
- with TRV USE:#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
- with TRV USE:#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
- with TRV USE: ++ k
- Non-Candidate defId: k
...ps...scan...00001...defExp is: Csub
- Is DefID:Csub candidate?
- with TRV USE:#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
- with TRV USE:#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 tlpName=TRN_4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
- Candidate defId: Csub
*** bfi: #pragma fcuda tloop name=FOR_HTG_CMP_2 end=true begin=false
...ps...scan...001...curTloop is: FOR_HTG_CMP_2
...ps...scan...0001...fcAnnot is: #pragma fcuda tloop name=FOR_HTG_CMP_2 end=true begin=false
*** tloop pragma: #pragma fcuda tloop name=FOR_HTG_CMP_2 end=true begin=false
*** exiting: FOR_HTG_CMP_2
*** bfi: #pragma fcuda stmt tlpName=FOR_HTG_CMP_2
k=0;
*** bfi: #pragma fcuda stmt tlpName=FOR_HTG_CMP_2
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
*** bfi: #pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
...ps...002...after scanTLoops_candidateVars...
...ps...002...candidateVars is: [Csub]
...ps...002...end of candidateVars print...
Privatization candidates for matrixMul: [Csub]
....ps....09....candVars is: [Csub]
....ps....09.01....for loop curr idExp is: Csub
....ps....09.02....idExp.getSymbol, origDeclor is: Csub
....ps....09.04....defSyms.containsKey(origDeclor) is: false
....ps....09.04....defSyms before put is: {}
....ps....09.04....curr origDeclor is: Csub
....ps....09.04....curr symtarg is: fcuda.analysis.SymTarget@27f723
....ps....09.05....defSyms after put is: {Csub=fcuda.analysis.SymTarget@27f723}
....ps....10.....defSyms.keySet is: [Csub]
....ps....10.01....for loop curr defSym is: Csub
- Privatizing SymTarget:
---- Symbol: Csub ----
*** mDefUses:
{71194203=[2006034581, 916419490], 457357179=[2006034581, 916419490]}
*** Candidate Defs: 1 [71194203]
- 71194203 : #pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
*** non-Candidate Defs: 1
- 457357179 : Csub=0;
*** Symbol Uses:
[#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);, #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 tlpName=TRN_4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;]
....ps....10.02....defSym is: Csub
....ps....10.02....defSym.getDeclaration, origDecl is: DATATYPE Csub
*** Using clone for tIdx.y ***
*** Using clone for tIdx.x ***
Replaced 1 candidate defs
Replaced 2 candidate uses
Replaced 1 non-candidate defs with cand uses
Replaced 0 non-candidate uses
....privatize....10.02....origDecl is: DATATYPE Csub
....privatize....10.02....origDecl.getParent is: DATATYPE Csub;
....privatize....10.02....blockDecl is: __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
....privatize....10.02....blockDecl.parent is: null
....privatize....10.02....compoundStatement.class is: class cetus.hir.CompoundStatement
....IRTools....getAncestorOfType....first argu t is not null....
....IRTools....t.parent, ret is: DATATYPE Csub;
....IRTools....type.isInstance(ret) is: false
....IRTools....ret = ret.parent, ret is: {
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub_block[threadIdx.y][threadIdx.x]=0;
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=false begin=true
#pragma fcuda tloop name=TRN_0 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_0 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_1 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2 tlpName=FOR_HTG_CMP_2
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_1 end=true begin=false
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_4 end=false begin=true
#pragma fcuda tloop name=TRN_4 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 tlpName=TRN_4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_4 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_4 end=true begin=false
return ;
}
....IRTools....type.isInstance(ret) is: true
....privatize....10.03....parCmpd is: {
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub_block[threadIdx.y][threadIdx.x]=0;
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=false begin=true
#pragma fcuda tloop name=TRN_0 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_0 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_1 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2 tlpName=FOR_HTG_CMP_2
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_1 end=true begin=false
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_4 end=false begin=true
#pragma fcuda tloop name=TRN_4 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 tlpName=TRN_4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_4 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_4 end=true begin=false
return ;
}
....CompoundStmt...addDecAfter...01
....CompoundStmt...addDecAfter...02
001
002
003---------------------------------------------------------------------------------------------------------------------------------------------------
----Collect global-memory arrays/pointers symbols (includes __constant__ symbols): glMemArraySet = GlobalMemUtils.getGlobMemSymbols(mProcedure)-------
------------------------------------------------------------------------------------------------------------------------------------------------------
----[* A, * C, * B]----
----1.1----mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub_block[threadIdx.y][threadIdx.x]=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
return ;
}
Global-Mem Arrays: [* A, * C, * B]
----now identifyGlmemAccs----
PointerSet: [* A, * C, * B]
findGMrefs for: A
---before continue, GM refs in ArrayAccess format already----
findGMrefs for: B
---before continue, GM refs in ArrayAccess format already----
findGMrefs for: C
---before continue, GM refs in ArrayAccess format already----
AliasSet: []
derefAccs: []
004---------------------------------------------------------------------------------------------------------------------------------------------------
----Convert dereference-based global-mem accesses to array-accesses and find global-mem aliases: identifyGlMemAccs();-------
------------------------------------------------------------------------------------------------------------------------------------------------------
----1.2----mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub_block[threadIdx.y][threadIdx.x]=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
return ;
}
----now handleMixStmts----
getMixdStmts Symbols: [* A, * C, * B]
getMixdStmts Global Symbols: [* A, * C, * B]
getMixdStmts Alias Symbols: []
symUses: [A]
Contained GM Ref: A[((a+(wA*threadIdx.y))+threadIdx.x)]
Candidate MIXED stmt: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
symUses: [B]
Contained GM Ref: B[((b+(wB*threadIdx.y))+threadIdx.x)]
Candidate MIXED stmt: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
symUses: [C]
Contained GM Ref: C[((c+(wB*threadIdx.y))+threadIdx.x)]
Candidate MIXED stmt: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
mixdStmts: []
--------------------------------
005---------------------------------------------------------------------------------------------------------------------------------------------------
----Eliminate MIXED statements (i.e. statements that contain both COMPUTE & TRANSFER parts): handleMixStmts();-------
------------------------------------------------------------------------------------------------------------------------------------------------------
----1.3----mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub_block[threadIdx.y][threadIdx.x]=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
return ;
}
----FcudaGlobalData2.java start----
-----cetus application IPChainAnalysis start----
-----cetus application IPChainAnalysis 1----
------IPChainAnalysis.java PerformAliasAnalysis start----
-------IPPointsToAnalysis.java IPPointsToAnalysis start super----
--------IPAnalysis.java IPAnalysis start 1----
--------IPAnalysis.java IPAnalysis start 2----
[NormalizeReturn] begin
*AP* procedure: __syncthreads
*AP* is_void=true
*AP* ret_expr= null
*AP* procedure: matrixMul
*AP* is_void=true
*AP* ret_expr= null
[NormalizeReturn] end in 0.00 seconds
[LinkSymbol] 93 updates in 0.00 seconds
--------IPAnalysis.java IPAnalysis start 3 enter IPA Graph----
---------IPAGraph.java finish super()----
---------IPAGraph.java start new Arraylist <IPANode>----
---------IPAGraph.java enter buildgraph<prog>
---------IPAGraph.java enter identifyCloneableNodes
---------IPAGraph.java enter buildTopOrder
---------IPAGraph.java finish new IPA Graph
[IPA] Stops due to no flow entry
-------IPPointsToAnalysis.java IPPointsToAnalysis finished super----
-------IPPointsToAnalysis.java IPPointsToAnalysis finished 2----
-------IPPointsToAnalysis.java IPPointsToAnalysis end----
------PerformAliasAnalysis 1----
0000IPpointsToAnalysis
[IPA:PointsTo] Stops due to no flow entry
------PerformAliasAnalysis end----
-----cetus application IPChainAnalysis 2----
-----cetus application IPChainAnalysis 3----
----------------------------------------------
----start to generate CF graph with thread----
----------------------------------------------
proc now is: void __syncthreads()
{
;
return ;
}
=====
proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub_block[threadIdx.y][threadIdx.x]=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
return ;
}
=====
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
-----cetus application IPChainAnalysis 4----
--------------------------------------------------------
----start to generate generate Program Summary Graph----
--------------------------------------------------------
############### PSG Summary Detail[__syncthreads()] #################
## Entry Node ##
# Ref Info (psg_entry_ref, use)
psg_entry_ref is null
# Global Info (psg_entry_global, UseOutSet)
psg_entry_global is null
## Exit Node ##
# Ref Info (psg_exit_ref, def)
psg_exit_ref is null
# Global Info (psg_exit_global, DefInSet)
psg_exit_global is null
############### PSG Summary Detail[matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)] #################
## Entry Node ##
# Ref Info (psg_entry_ref, use)
# Global Info (psg_entry_global, UseOutSet)
## Call Node ## IR: __syncthreads();
# Ref Info (psg_call_ref, def)
psg_call_ref is null
# Global Info (psg_call_global, DefInSet)
psg_call_global is null
## Return Node ## IR: __syncthreads();
# Ref Info (psg_return_ref, use)
psg_return_ref is null
# Global Info (psg_return_global, UseOutSet)
psg_return_global is null
## Call Node ## IR: __syncthreads();
# Ref Info (psg_call_ref, def)
psg_call_ref is null
# Global Info (psg_call_global, DefInSet)
psg_call_global is null
## Return Node ## IR: __syncthreads();
# Ref Info (psg_return_ref, use)
psg_return_ref is null
# Global Info (psg_return_global, UseOutSet)
psg_return_global is null
## Exit Node ##
# Ref Info (psg_exit_ref, def)
-DEF: C, NODE: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-DEF: B, NODE: * B
-DEF: A, NODE: * A
# Global Info (psg_exit_global, DefInSet)
############### PSG Propagated Detail[__syncthreads()] #################
## Entry Node ##
# Ref Info (psg_entry_ref)
psg_entry_ref is null
# Global Info (psg_entry_global)
psg_entry_global is null
## Exit Node ##
# Ref Info (psg_exit_ref)
psg_exit_ref is null
# Global Info (psg_exit_global)
psg_exit_global is null
############### PSG Propagated Detail[matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)] #################
## Entry Node ##
# Ref Info (psg_entry_ref)
# parameter: C
# parameter: B
# parameter: A
# Global Info (psg_entry_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Call Node ## IR: __syncthreads();
# Ref Info (psg_call_ref)
psg_call_ref is null
# Global Info (psg_call_global)
psg_call_global is null
## Return Node ## IR: __syncthreads();
# Ref Info (psg_return_ref)
psg_return_ref is null
# Global Info (psg_return_global)
psg_return_global is null
## Call Node ## IR: __syncthreads();
# Ref Info (psg_call_ref)
psg_call_ref is null
# Global Info (psg_call_global)
psg_call_global is null
## Return Node ## IR: __syncthreads();
# Ref Info (psg_return_ref)
psg_return_ref is null
# Global Info (psg_return_global)
psg_return_global is null
## Exit Node ##
# Ref Info (psg_exit_ref)
-OUTdef: C, NODE: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-OUTdef: B, NODE: * B
-OUTdef: A, NODE: * A
# Global Info (psg_exit_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
-----cetus application IPChainAnalysis 5----
-----cetus application IPChainAnalysis 6----
-----cetus application IPChainAnalysis 7----
-----cetus application IPChainAnalysis end----
----FcudaGlobalData2.java end----
----1.4----mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub_block[threadIdx.y][threadIdx.x]=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
return ;
}
006
Instances of tidx: [threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx]
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Csub_block[threadIdx.y][threadIdx.x]=0;
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Csub_block[threadIdx.y][threadIdx.x]=0;
----c.1---- idx is: threadIdx
----c.1---- parStmt is: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
----c.1---- idx is: threadIdx
----c.1---- parStmt is: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
defIDs: [Csub_block, Csub_block, As, As, As, As, Bs, Bs, Bs, Bs, Csub_block, Csub_block, Csub_block, Csub_block, C, C, C, C]
Looking for uses of: Csub_block
... in: Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... useID: Csub_block
... in: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
... useID: Csub_block
Looking for uses of: Csub_block
... in: #pragma fcuda stmt tdep_vars=[Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... in: #pragma fcuda stmt tdep_vars=[Csub_block]
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
Looking for uses of: As
... in: #pragma fcuda stmt tdep_vars=[Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... useID: As
Looking for uses of: As
... in: #pragma fcuda stmt tdep_vars=[As, Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
Looking for uses of: As
... in: #pragma fcuda stmt tdep_vars=[As, Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
Looking for uses of: As
... in: #pragma fcuda stmt tdep_vars=[As, Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
Looking for uses of: Bs
... in: #pragma fcuda stmt tdep_vars=[As, Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... useID: Bs
Looking for uses of: Bs
... in: #pragma fcuda stmt tdep_vars=[Bs, As, Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
Looking for uses of: Bs
... in: #pragma fcuda stmt tdep_vars=[Bs, As, Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
Looking for uses of: Bs
... in: #pragma fcuda stmt tdep_vars=[Bs, As, Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
Looking for uses of: Csub_block
... in: #pragma fcuda stmt tdep_vars=[Bs, As, Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... in: #pragma fcuda stmt tdep_vars=[Csub_block]
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
Looking for uses of: Csub_block
... in: #pragma fcuda stmt tdep_vars=[Bs, As, Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... in: #pragma fcuda stmt tdep_vars=[Csub_block]
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
Looking for uses of: Csub_block
... in: #pragma fcuda stmt tdep_vars=[Bs, As, Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... in: #pragma fcuda stmt tdep_vars=[Csub_block]
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
Looking for uses of: Csub_block
... in: #pragma fcuda stmt tdep_vars=[Bs, As, Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... in: #pragma fcuda stmt tdep_vars=[Csub_block]
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
Looking for uses of: C
Looking for uses of: C
Looking for uses of: C
Looking for uses of: C
defIDs: [Csub_block, C, Csub_block, Csub_block]
Looking for uses of: Csub_block
... in: #pragma fcuda stmt tdep_vars=[Bs, As, Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... in: #pragma fcuda stmt tdep_vars=[Csub_block]
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
Looking for uses of: C
Looking for uses of: Csub_block
... in: #pragma fcuda stmt tdep_vars=[Bs, As, Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... in: #pragma fcuda stmt tdep_vars=[Csub_block]
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
Looking for uses of: Csub_block
... in: #pragma fcuda stmt tdep_vars=[Bs, As, Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... in: #pragma fcuda stmt tdep_vars=[Csub_block]
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
----c.2---- tDepStmt is: #pragma fcuda stmt tdep_vars=[Csub_block]
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
----c.2---- tDepStmt is: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
----c.21---- tDepStmt is: #pragma fcuda stmt tdep=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
----c.2---- tDepStmt is: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
----c.21---- tDepStmt is: #pragma fcuda stmt tdep=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
----c.2---- tDepStmt is: Csub_block[threadIdx.y][threadIdx.x]=0;
----c.21---- tDepStmt is: #pragma fcuda stmt tdep=true
Csub_block[threadIdx.y][threadIdx.x]=0;
----c.2---- tDepStmt is: #pragma fcuda stmt tdep_vars=[Bs, As, Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
----1.5----mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
#pragma fcuda stmt tdep=true
Csub_block[threadIdx.y][threadIdx.x]=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt tdep=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt tdep=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt tdep_vars=[Bs, As, Csub_block] tdep=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt tdep_vars=[Csub_block] tdep=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
return ;
}
007
findTransferStmts Symbols: [* A, * C, * B]
findTransferStmts non-Const Symbols: [* A, * C, * B]
findTransferStmts for: A
findTransferStmts for: B
findTransferStmts for: C
INFO - findTransferStmts: 12 address index IDs
....IPChainAnalysis getDefList....
INFO - findTransferStmts: 2 defStmts for addrID: a
INFO - findTransferStmts: Corner case forloop definition: ((a=aBegin), (b=bBegin));
WARNING - findTransferStmts: Definition of class: class cetus.hir.CommaExpression
((a+=aStep), (b+=bStep))
....IPChainAnalysis getDefList....
INFO - findTransferStmts: 1 defStmts for addrID: wA
WARNING - findTransferStmts: Definition of class: class cetus.hir.VariableDeclarator
wA
....IPChainAnalysis getDefList....
INFO - findTransferStmts: 2 defStmts for addrID: b
INFO - findTransferStmts: Corner case forloop definition: ((a=aBegin), (b=bBegin));
WARNING - findTransferStmts: Definition of class: class cetus.hir.CommaExpression
((a+=aStep), (b+=bStep))
....IPChainAnalysis getDefList....
INFO - findTransferStmts: 1 defStmts for addrID: wB
WARNING - findTransferStmts: Definition of class: class cetus.hir.VariableDeclarator
wB
....IPChainAnalysis getDefList....
INFO - findTransferStmts: 1 defStmts for addrID: c
....IPChainAnalysis getDefList....
INFO - findTransferStmts: 1 defStmts for addrID: wB
WARNING - findTransferStmts: Definition of class: class cetus.hir.VariableDeclarator
wB
----1.6----mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
#pragma fcuda stmt tdep=true
Csub_block[threadIdx.y][threadIdx.x]=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A tdep=true TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B tdep=true TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt tdep_vars=[Bs, As, Csub_block] tdep=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt rdNwrt=false GLBpntr=C tdep_vars=[Csub_block] tdep=true TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
return ;
}
008
----1.7----mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
#pragma fcuda stmt name=CMP_5 tdep=true seqID=5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 tdep=true seqID=6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 tdep=true seqID=6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_9 tdep=true seqID=9
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
return ;
}
009_finish
------001------ HTGraph.java super()
------002------ HTGraph.java curTask = new String()
------003------ HTGraph.java HTGName = new String(name)
------004------ HTGraph.java if(trv instanceof Procedure)
------005------ HTGraph.java mProcedure = (Procedure) trv
------007------ HTGraph.java before build graph
------0007------ t is procedure now
------0007------ t is procedure now, getbody
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
#pragma fcuda stmt name=CMP_5 tdep=true seqID=5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 tdep=true seqID=6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 tdep=true seqID=6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_9 tdep=true seqID=9
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
return ;
}
------0007------ t is CompoundStatement now
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
#pragma fcuda stmt name=CMP_5 tdep=true seqID=5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 tdep=true seqID=6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 tdep=true seqID=6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_9 tdep=true seqID=9
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
return ;
}
------0007------ buildCompound cStmt getChildren
[int bx;, int by;, int aBegin;, int aEnd;, int aStep;, int bBegin;, int bStep;, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];, int a;, int b;, int k;, int c;, #pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;, aBegin=((wA*16)*by);, aEnd=((aBegin+wA)-1);, aStep=16;, bBegin=(16*bx);, bStep=(16*wB);, #pragma fcuda stmt name=CMP_5 tdep=true seqID=5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;, a=0;, b=0;, k=0;, for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 tdep=true seqID=6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 tdep=true seqID=6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_9 tdep=true seqID=9
__syncthreads();
}, c=(((wB*16)*by)+(16*bx));, , , #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];, , , return ;]
------0007------ buildCompound for(Traversable trv : children), current trv is
int bx;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int bx;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int by;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int by;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int aBegin;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int aBegin;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int aEnd;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int aEnd;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int aStep;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int aStep;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int bBegin;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int bBegin;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int bStep;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int bStep;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int a;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int a;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int b;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int b;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int k;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int k;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int c;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int c;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma HLS INTERFACE ap_bus port=A depth=3840
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for #pragma HLS INTERFACE ap_bus port=A depth=3840
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma HLS INTERFACE ap_bus port=B depth=6144
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for #pragma HLS INTERFACE ap_bus port=B depth=6144
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma HLS INTERFACE ap_bus port=C depth=10240
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for #pragma HLS INTERFACE ap_bus port=C depth=10240
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
bx=blockIdx.x;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for bx=blockIdx.x;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
by=blockIdx.y;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for by=blockIdx.y;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
aBegin=((wA*16)*by);
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for aBegin=((wA*16)*by);
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
aEnd=((aBegin+wA)-1);
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for aEnd=((aBegin+wA)-1);
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
aStep=16;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for aStep=16;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
bBegin=(16*bx);
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for bBegin=(16*bx);
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
bStep=(16*wB);
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for bStep=(16*wB);
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt name=CMP_5 tdep=true seqID=5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, COM task----
-----added node: CMP_5
------done curr build graph
--- DFAGraph for #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
a=0;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for a=0;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
b=0;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for b=0;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
k=0;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for k=0;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 tdep=true seqID=6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 tdep=true seqID=6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_9 tdep=true seqID=9
__syncthreads();
}
------ for loop current trv is not compound
------0007------ t is ForLoop now
------0007------ t is CompoundStatement now
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 tdep=true seqID=6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 tdep=true seqID=6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_9 tdep=true seqID=9
__syncthreads();
}
------0007------ buildCompound cStmt getChildren
[__shared__ DATATYPE As[16][16];, __shared__ DATATYPE Bs[16][16];, , , #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 tdep=true seqID=6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 tdep=true seqID=6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, , , , #pragma fcuda stmt SNCtask=true name=SNC_7 tdep=true seqID=7
__syncthreads();, {
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}, #pragma fcuda stmt SNCtask=true name=SNC_9 tdep=true seqID=9
__syncthreads();, ]
------0007------ buildCompound for(Traversable trv : children), current trv is
__shared__ DATATYPE As[16][16];
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for __shared__ DATATYPE As[16][16];
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
__shared__ DATATYPE Bs[16][16];
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for __shared__ DATATYPE Bs[16][16];
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 tdep=true seqID=6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, TRN task----
-----added node: TRN_6
------done curr build graph
--- DFAGraph for #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 tdep=true seqID=6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, TRN task----
-----added node: TRN_6
------done curr build graph
--- DFAGraph for #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt SNCtask=true name=SNC_7 tdep=true seqID=7
__syncthreads();
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, SYNC task----
-----added node: SNC_7
------done curr build graph
--- DFAGraph for #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
------ for loop current trv is compound
------0007------ t is CompoundStatement now
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
------0007------ buildCompound cStmt getChildren
[lp1:, , for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}, ]
------0007------ buildCompound for(Traversable trv : children), current trv is
lp1:
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for lp1:
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
------ for loop current trv is not compound
------0007------ t is ForLoop now
------0007------ t is CompoundStatement now
{
#pragma fcuda stmt name=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
------0007------ buildCompound cStmt getChildren
[#pragma fcuda stmt name=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);]
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt name=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, COM task----
-----added node: CMP_8
------done curr build graph
--- DFAGraph for #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ exit buildCompound for loop
------ ret.size > 0------
------ wrap enabled------
====== DFAGraph.java enter getFirst()
------ exit buildCompound with wrap enabled, and ret > 0, return superDFA------
------ exit buildCompound with wrap enabled, and ret == 0, return superDFA------
------done curr build graph
------ for loop ret has node------
====== DFAGraph.java enter getFirst()
------done curr build graph
--- DFAGraph for #pragma fcuda stmt HTGNode=FOR_HTG_CMP_8
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for
has 0 nodes
------0007------ exit buildCompound for loop
------ ret.size > 0------
------ exit buildCompound with NO wrap, return ret------
------done curr build graph
--- DFAGraph for {
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt SNCtask=true name=SNC_9 tdep=true seqID=9
__syncthreads();
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, SYNC task----
-----added node: SNC_9
------done curr build graph
--- DFAGraph for #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for
has 0 nodes
------0007------ exit buildCompound for loop
------ ret.size > 0------
------ wrap enabled------
====== DFAGraph.java enter getFirst()
------ exit buildCompound with wrap enabled, and ret > 0, return superDFA------
------ exit buildCompound with wrap enabled, and ret == 0, return superDFA------
------done curr build graph
------ for loop ret has node------
====== DFAGraph.java enter getFirst()
------done curr build graph
--- DFAGraph for #pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
}
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
c=(((wB*16)*by)+(16*bx));
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for c=(((wB*16)*by)+(16*bx));
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, TRN task----
-----added node: TRN_10
------done curr build graph
--- DFAGraph for #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
return ;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for return ;
has 0 nodes
------0007------ exit buildCompound for loop
------ ret.size > 0------
------ exit buildCompound with NO wrap, return ret------
------done curr build graph
------done curr build graph
------008------ HTGraph.java DFAGraph dfag = buildGraph(trv)
PRINT-GRAPH OUTPUT
====================
| Graph: matrixMul L1
|
V
--------------------
| CMP_5
| type: CMP
| Addr-Use: false
--------------------
|
V
--------------------
| FOR_HTG_TRN_6
| type: FOR
| tdep: false
--------------------
====================
| Graph: HTG_TRN_6 L2
|
V
--------------------
| TRN_6
| ParentNode: FOR_HTG_TRN_6
| type: TRN
--------------------
|
V
--------------------
| SNC_7
| ParentNode: FOR_HTG_TRN_6
| type: SNC
--------------------
|
V
--------------------
| FOR_HTG_CMP_8
| ParentNode: FOR_HTG_TRN_6
| type: FOR
| tdep: false
--------------------
====================
| Graph: HTG_CMP_8 L3
|
V
--------------------
| CMP_8
| ParentNode: FOR_HTG_CMP_8
| type: CMP
| Addr-Use: false
--------------------
\===== HTG_CMP_8 =====/
|
V
--------------------
| SNC_9
| ParentNode: FOR_HTG_TRN_6
| type: SNC
--------------------
\===== HTG_TRN_6 =====/
|
V
--------------------
| TRN_10
| type: TRN
--------------------
\===== matrixMul =====/
*** CF Node Info Begin ***
FOR_HTG_TRN_6 info:
- uniform: false - SubTypes: [TRN, CMP, SNC]
*** CF Node Info End ***
*** Number of Tasks in matrixMul :4
---- Target: CMP_5 ----
nodes #: 1
CMP_5
Node Types: [CMP]
uniform: true
**** TLOOPS(1) ****
TLOOP 0
---- Target: CMP_5 ----
nodes #: 1
CMP_5
Node Types: [CMP]
uniform: true
----addStatementBefore----index is:22
----addStatementBefore----index is:23
---- Target: TRN_6 ----
nodes #: 1
TRN_6
Node Types: [TRN]
uniform: true
**** TLOOPS(1) ****
TLOOP 0
---- Target: TRN_6 ----
nodes #: 1
TRN_6
Node Types: [TRN]
uniform: true
----addStatementBefore----index is:4
----addStatementBefore----index is:5
---- Target: SNC_7 ----
nodes #: 3
SNC_7
FOR_HTG_CMP_8
SNC_9
Node Types: [CMP, SNC]
uniform: true
**** TLOOPS(1) ****
TLOOP 0
---- Target: FOR_HTG_CMP_8 ----
nodes #: 1
FOR_HTG_CMP_8
Node Types: [CMP]
uniform: true
----addStatementBefore----index is:13
----addStatementBefore----index is:2
---- Target: TRN_10 ----
nodes #: 1
TRN_10
Node Types: [TRN]
uniform: true
**** TLOOPS(1) ****
TLOOP 0
---- Target: TRN_10 ----
nodes #: 1
TRN_10
Node Types: [TRN]
uniform: true
----addStatementBefore----index is:34
----addStatementBefore----index is:35
-----kernelTransformPass, finish transformProcedure-----, proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
[PrivatizeScalars-FCUDA] end in 0.36 seconds
[LinkSymbol] 93 updates in 0.00 seconds
*** After PrivatizeScalars ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
===========================================
----FcudaGlobalData2.java start----
-----cetus application IPChainAnalysis start----
-----cetus application IPChainAnalysis 1----
------IPChainAnalysis.java PerformAliasAnalysis start----
-------IPPointsToAnalysis.java IPPointsToAnalysis start super----
--------IPAnalysis.java IPAnalysis start 1----
--------IPAnalysis.java IPAnalysis start 2----
[NormalizeReturn] begin
*AP* procedure: __syncthreads
*AP* is_void=true
*AP* ret_expr= null
*AP* procedure: matrixMul
*AP* is_void=true
*AP* ret_expr= null
[NormalizeReturn] end in 0.00 seconds
[LinkSymbol] 93 updates in 0.00 seconds
--------IPAnalysis.java IPAnalysis start 3 enter IPA Graph----
---------IPAGraph.java finish super()----
---------IPAGraph.java start new Arraylist <IPANode>----
---------IPAGraph.java enter buildgraph<prog>
---------IPAGraph.java enter identifyCloneableNodes
---------IPAGraph.java enter buildTopOrder
---------IPAGraph.java finish new IPA Graph
[IPA] Stops due to no flow entry
-------IPPointsToAnalysis.java IPPointsToAnalysis finished super----
-------IPPointsToAnalysis.java IPPointsToAnalysis finished 2----
-------IPPointsToAnalysis.java IPPointsToAnalysis end----
------PerformAliasAnalysis 1----
0000IPpointsToAnalysis
[IPA:PointsTo] Stops due to no flow entry
------PerformAliasAnalysis end----
-----cetus application IPChainAnalysis 2----
-----cetus application IPChainAnalysis 3----
----------------------------------------------
----start to generate CF graph with thread----
----------------------------------------------
proc now is: void __syncthreads()
{
;
return ;
}
=====
====== DFAGraph.java enter getFirst()
proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
=====
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
-----cetus application IPChainAnalysis 4----
--------------------------------------------------------
----start to generate generate Program Summary Graph----
--------------------------------------------------------
############### PSG Summary Detail[__syncthreads()] #################
## Entry Node ##
# Ref Info (psg_entry_ref, use)
psg_entry_ref is null
# Global Info (psg_entry_global, UseOutSet)
psg_entry_global is null
## Exit Node ##
# Ref Info (psg_exit_ref, def)
psg_exit_ref is null
# Global Info (psg_exit_global, DefInSet)
psg_exit_global is null
############### PSG Summary Detail[matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)] #################
## Entry Node ##
# Ref Info (psg_entry_ref, use)
# Global Info (psg_entry_global, UseOutSet)
## Call Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
# Ref Info (psg_call_ref, def)
psg_call_ref is null
# Global Info (psg_call_global, DefInSet)
psg_call_global is null
## Return Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
# Ref Info (psg_return_ref, use)
psg_return_ref is null
# Global Info (psg_return_global, UseOutSet)
psg_return_global is null
## Call Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
# Ref Info (psg_call_ref, def)
psg_call_ref is null
# Global Info (psg_call_global, DefInSet)
psg_call_global is null
## Return Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
# Ref Info (psg_return_ref, use)
psg_return_ref is null
# Global Info (psg_return_global, UseOutSet)
psg_return_global is null
## Exit Node ##
# Ref Info (psg_exit_ref, def)
-DEF: C, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-DEF: B, NODE: * B
-DEF: A, NODE: * A
# Global Info (psg_exit_global, DefInSet)
############### PSG Propagated Detail[__syncthreads()] #################
## Entry Node ##
# Ref Info (psg_entry_ref)
psg_entry_ref is null
# Global Info (psg_entry_global)
psg_entry_global is null
## Exit Node ##
# Ref Info (psg_exit_ref)
psg_exit_ref is null
# Global Info (psg_exit_global)
psg_exit_global is null
############### PSG Propagated Detail[matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)] #################
## Entry Node ##
# Ref Info (psg_entry_ref)
# parameter: C
# parameter: B
# parameter: A
# Global Info (psg_entry_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Call Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
# Ref Info (psg_call_ref)
psg_call_ref is null
# Global Info (psg_call_global)
psg_call_global is null
## Return Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
# Ref Info (psg_return_ref)
psg_return_ref is null
# Global Info (psg_return_global)
psg_return_global is null
## Call Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
# Ref Info (psg_call_ref)
psg_call_ref is null
# Global Info (psg_call_global)
psg_call_global is null
## Return Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
# Ref Info (psg_return_ref)
psg_return_ref is null
# Global Info (psg_return_global)
psg_return_global is null
## Exit Node ##
# Ref Info (psg_exit_ref)
-OUTdef: C, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-OUTdef: B, NODE: * B
-OUTdef: A, NODE: * A
# Global Info (psg_exit_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
-----cetus application IPChainAnalysis 5----
-----cetus application IPChainAnalysis 6----
-----cetus application IPChainAnalysis 7----
-----cetus application IPChainAnalysis end----
----FcudaGlobalData2.java end----
DEF::
########################## (printDefUseChain) Procedure: __syncthreads
#######################################################################
########################## (printDefUseChain) Procedure: matrixMul
Def[0]: C, IR: * C
Def[1]: A, IR: * A
--> Use: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, proc: matrixMul
Def[2]: B, IR: * B
--> Use: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, proc: matrixMul
Def[3]: wA, IR: wA
--> Use: aBegin=((wA*16)*by);, proc: matrixMul
--> Use: aEnd=((aBegin+wA)-1);, proc: matrixMul
--> Use: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, proc: matrixMul
Def[4]: wB, IR: wB
--> Use: bStep=(16*wB);, proc: matrixMul
--> Use: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, proc: matrixMul
--> Use: c=(((wB*16)*by)+(16*bx));, proc: matrixMul
--> Use: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];, proc: matrixMul
Def[5]: bx, IR: bx=blockIdx.x;
--> Use: bBegin=(16*bx);, proc: matrixMul
--> Use: c=(((wB*16)*by)+(16*bx));, proc: matrixMul
Def[6]: by, IR: by=blockIdx.y;
--> Use: aBegin=((wA*16)*by);, proc: matrixMul
--> Use: c=(((wB*16)*by)+(16*bx));, proc: matrixMul
Def[7]: aBegin, IR: aBegin=((wA*16)*by);
--> Use: aEnd=((aBegin+wA)-1);, proc: matrixMul
--> Use: ((a=aBegin), (b=bBegin));, proc: matrixMul
Def[8]: aEnd, IR: aEnd=((aBegin+wA)-1);
--> Use: a<=aEnd, proc: matrixMul
Def[9]: aStep, IR: aStep=16;
--> Use: ((a+=aStep), (b+=bStep)), proc: matrixMul
Def[10]: bBegin, IR: bBegin=(16*bx);
--> Use: ((a=aBegin), (b=bBegin));, proc: matrixMul
Def[11]: bStep, IR: bStep=(16*wB);
--> Use: ((a+=aStep), (b+=bStep)), proc: matrixMul
Def[12]: Csub_block, IR: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
--> Use: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);, proc: matrixMul
--> Use: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];, proc: matrixMul
Def[13]: a, IR: a=0;
Def[14]: b, IR: b=0;
Def[15]: k, IR: k=0;
Def[16]: a, IR: ((a=aBegin), (b=bBegin));
--> Use: a<=aEnd, proc: matrixMul
--> Use: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, proc: matrixMul
--> Use: ((a+=aStep), (b+=bStep)), proc: matrixMul
Def[17]: b, IR: ((a=aBegin), (b=bBegin));
--> Use: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, proc: matrixMul
--> Use: ((a+=aStep), (b+=bStep)), proc: matrixMul
Def[18]: As, IR: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
--> Use: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);, proc: matrixMul
Def[19]: Bs, IR: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
--> Use: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);, proc: matrixMul
Def[20]: k, IR: #pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0;
--> Use: k<16, proc: matrixMul
--> Use: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);, proc: matrixMul
--> Use: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);, proc: matrixMul
--> Use: ++ k, proc: matrixMul
Def[21]: Csub_block, IR: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
--> Use: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);, proc: matrixMul
--> Use: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];, proc: matrixMul
Def[22]: k, IR: ++ k
--> Use: k<16, proc: matrixMul
--> Use: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);, proc: matrixMul
--> Use: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);, proc: matrixMul
--> Use: ++ k, proc: matrixMul
Def[23]: a, IR: ((a+=aStep), (b+=bStep))
--> Use: a<=aEnd, proc: matrixMul
--> Use: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, proc: matrixMul
--> Use: ((a+=aStep), (b+=bStep)), proc: matrixMul
Def[24]: b, IR: ((a+=aStep), (b+=bStep))
--> Use: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, proc: matrixMul
--> Use: ((a+=aStep), (b+=bStep)), proc: matrixMul
Def[25]: c, IR: c=(((wB*16)*by)+(16*bx));
--> Use: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];, proc: matrixMul
Def[26]: C, IR: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#######################################################################
USE::
########################## (printUseDefChain) Procedure: __syncthreads
#######################################################################
########################## (printUseDefChain) Procedure: matrixMul
Use[0]: wA, IR: aBegin=((wA*16)*by);
....IPChainAnalysis getDefList....
--> Def: wA, proc: matrixMul
Use[1]: by, IR: aBegin=((wA*16)*by);
....IPChainAnalysis getDefList....
--> Def: by=blockIdx.y;, proc: matrixMul
Use[2]: aBegin, IR: aEnd=((aBegin+wA)-1);
....IPChainAnalysis getDefList....
--> Def: aBegin=((wA*16)*by);, proc: matrixMul
Use[3]: wA, IR: aEnd=((aBegin+wA)-1);
....IPChainAnalysis getDefList....
--> Def: wA, proc: matrixMul
Use[4]: bx, IR: bBegin=(16*bx);
....IPChainAnalysis getDefList....
--> Def: bx=blockIdx.x;, proc: matrixMul
Use[5]: wB, IR: bStep=(16*wB);
....IPChainAnalysis getDefList....
--> Def: wB, proc: matrixMul
Use[6]: aBegin, IR: ((a=aBegin), (b=bBegin));
....IPChainAnalysis getDefList....
--> Def: aBegin=((wA*16)*by);, proc: matrixMul
Use[7]: bBegin, IR: ((a=aBegin), (b=bBegin));
....IPChainAnalysis getDefList....
--> Def: bBegin=(16*bx);, proc: matrixMul
Use[8]: a, IR: a<=aEnd
....IPChainAnalysis getDefList....
--> Def: ((a=aBegin), (b=bBegin));, proc: matrixMul
--> Def: ((a+=aStep), (b+=bStep)), proc: matrixMul
Use[9]: aEnd, IR: a<=aEnd
....IPChainAnalysis getDefList....
--> Def: aEnd=((aBegin+wA)-1);, proc: matrixMul
Use[10]: a, IR: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
....IPChainAnalysis getDefList....
--> Def: ((a=aBegin), (b=bBegin));, proc: matrixMul
--> Def: ((a+=aStep), (b+=bStep)), proc: matrixMul
Use[11]: wA, IR: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
....IPChainAnalysis getDefList....
--> Def: wA, proc: matrixMul
Use[12]: A, IR: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
....IPChainAnalysis getDefList....
--> Def: * A, proc: matrixMul
Use[13]: b, IR: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
....IPChainAnalysis getDefList....
--> Def: ((a=aBegin), (b=bBegin));, proc: matrixMul
--> Def: ((a+=aStep), (b+=bStep)), proc: matrixMul
Use[14]: wB, IR: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
....IPChainAnalysis getDefList....
--> Def: wB, proc: matrixMul
Use[15]: B, IR: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
....IPChainAnalysis getDefList....
--> Def: * B, proc: matrixMul
Use[16]: k, IR: k<16
....IPChainAnalysis getDefList....
--> Def: #pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0;, proc: matrixMul
--> Def: ++ k, proc: matrixMul
Use[17]: k, IR: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
....IPChainAnalysis getDefList....
--> Def: #pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0;, proc: matrixMul
--> Def: ++ k, proc: matrixMul
Use[18]: As, IR: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
....IPChainAnalysis getDefList....
--> Def: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, proc: matrixMul
Use[19]: k, IR: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
....IPChainAnalysis getDefList....
--> Def: #pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0;, proc: matrixMul
--> Def: ++ k, proc: matrixMul
Use[20]: Bs, IR: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
....IPChainAnalysis getDefList....
--> Def: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, proc: matrixMul
Use[21]: Csub_block, IR: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
....IPChainAnalysis getDefList....
--> Def: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;, proc: matrixMul
--> Def: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);, proc: matrixMul
Use[22]: k, IR: ++ k
....IPChainAnalysis getDefList....
--> Def: #pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0;, proc: matrixMul
--> Def: ++ k, proc: matrixMul
Use[23]: aStep, IR: ((a+=aStep), (b+=bStep))
....IPChainAnalysis getDefList....
--> Def: aStep=16;, proc: matrixMul
Use[24]: a, IR: ((a+=aStep), (b+=bStep))
....IPChainAnalysis getDefList....
--> Def: ((a=aBegin), (b=bBegin));, proc: matrixMul
--> Def: ((a+=aStep), (b+=bStep)), proc: matrixMul
Use[25]: bStep, IR: ((a+=aStep), (b+=bStep))
....IPChainAnalysis getDefList....
--> Def: bStep=(16*wB);, proc: matrixMul
Use[26]: b, IR: ((a+=aStep), (b+=bStep))
....IPChainAnalysis getDefList....
--> Def: ((a=aBegin), (b=bBegin));, proc: matrixMul
--> Def: ((a+=aStep), (b+=bStep)), proc: matrixMul
Use[27]: wB, IR: c=(((wB*16)*by)+(16*bx));
....IPChainAnalysis getDefList....
--> Def: wB, proc: matrixMul
Use[28]: by, IR: c=(((wB*16)*by)+(16*bx));
....IPChainAnalysis getDefList....
--> Def: by=blockIdx.y;, proc: matrixMul
Use[29]: bx, IR: c=(((wB*16)*by)+(16*bx));
....IPChainAnalysis getDefList....
--> Def: bx=blockIdx.x;, proc: matrixMul
Use[30]: Csub_block, IR: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
....IPChainAnalysis getDefList....
--> Def: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;, proc: matrixMul
--> Def: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);, proc: matrixMul
Use[31]: c, IR: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
....IPChainAnalysis getDefList....
--> Def: c=(((wB*16)*by)+(16*bx));, proc: matrixMul
Use[32]: wB, IR: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
....IPChainAnalysis getDefList....
--> Def: wB, proc: matrixMul
#######################################################################
[SplitFcudaTasks2-FCUDA] begin
[SplitFcudaTasks2-FCUDA] examining procedure matrixMul
mVar2Var:
{a=[a, b, bStep, blockIdx, aStep, wA, bBegin, wB, aBegin], A=[], bStep=[wB], b=[a, b, bStep, blockIdx, aStep, wA, bBegin, wB, aBegin], B=[], c=[blockIdx, wB], C=[blockIdx, threadIdx, c, wB], wA=[], bBegin=[blockIdx], k=[k], wB=[], Bs=[a, b, bStep, blockIdx, threadIdx, aStep, wA, bBegin, wB, aBegin], blockIdx=[], threadIdx=[], As=[a, b, bStep, blockIdx, threadIdx, aStep, wA, bBegin, wB, aBegin], aStep=[], aEnd=[blockIdx, wA, aBegin], Csub_block=[threadIdx, k], aBegin=[blockIdx, wA]}
----SFT2_entered transformProcedure----
-----SFT2 enter new flow-----
-----SFT2 finished addAllSharedToBRAMSet(proc)-----
-----SFT2 finished FCUDAGlobalData2.setBRAMSet(mBRAMSet);-----
-----SFT2 finished proc.getBody().addANSIDeclaration(MCUDAUtils.Bidx.getDecl().get(0));-----
-----proc after proc.getbody addAnsideclaration-----
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
-----start to decomposeKernel-----
----001----dck BFI new----
----002----annotLst bfitr getlist----
----003----annotLst bfitr getlist----
----004----asNum = annotLst size, asNum is: 31
----0041----enter for_1, curr asCnt is: 0
----0042----annotStmt is: #pragma HLS INTERFACE ap_bus port=A depth=3840
----0043----list fcAnnots is: []
----0041----enter for_1, curr asCnt is: 1
----0042----annotStmt is: #pragma HLS INTERFACE ap_bus port=B depth=6144
----0043----list fcAnnots is: []
----0041----enter for_1, curr asCnt is: 2
----0042----annotStmt is: #pragma HLS INTERFACE ap_bus port=C depth=10240
----0043----list fcAnnots is: []
----0041----enter for_1, curr asCnt is: 3
----0042----annotStmt is: #pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
----0043----list fcAnnots is: [#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true ]
----00431----enter for_2 curr fcannot is: #pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
----00432----curr annotType is: compute
----00433----curr bgn is: true
----00434----curr annotType is COM or TRN
----00435----curr bgn is true
----SFT2-decompose-before sanity check, inCMP is: false, inTRN is: false, tskName is: null
----0041----enter for_1, curr asCnt is: 4
----0042----annotStmt is: #pragma fcuda tloop name=CMP_5 end=false begin=true
----0043----list fcAnnots is: [#pragma fcuda tloop name=CMP_5 end=false begin=true ]
----00431----enter for_2 curr fcannot is: #pragma fcuda tloop name=CMP_5 end=false begin=true
----00432----curr annotType is: tloop
----00433----curr bgn is: true
----0043A----curr annotType is not COM nor TRN
----0041----enter for_1, curr asCnt is: 5
----0042----annotStmt is: #pragma fcuda tloop name=CMP_5 end=true begin=false
----0043----list fcAnnots is: [#pragma fcuda tloop name=CMP_5 end=true begin=false ]
----00431----enter for_2 curr fcannot is: #pragma fcuda tloop name=CMP_5 end=true begin=false
----00432----curr annotType is: tloop
----00433----curr bgn is: false
----0043A----curr annotType is not COM nor TRN
----0041----enter for_1, curr asCnt is: 6
----0042----annotStmt is: #pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
----0043----list fcAnnots is: [#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false ]
----00431----enter for_2 curr fcannot is: #pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
----00432----curr annotType is: compute
----00433----curr bgn is: false
----00434----curr annotType is COM or TRN
----00436----curr bgn is false
----00438----enter inCMP decomposeCompute
----00438----bgnstmt is: #pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
----00438----endStmt is: #pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
----00438----tskNmae is: CMP_5
... Prelim task handling: matrixMul_CMP_5
...tskName is: matrixMul_CMP_5
...mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
...print current fcudaGlobalData2 tasks...
Tasks: [matrixMul_CMP_5]
....00......................................
....01....enableSignal is: guard_matrixMul_CMP_5
....02....enDeclor is: guard_matrixMul_CMP_5
....03....enableDecl is: int guard_matrixMul_CMP_5
....04....after getbody addAnsideclaration, mprocedure_body is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
----addStatementBefore----index is:12
....05....after addAfterLastDeclaration, mprocedure_body is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....06....parCStmt is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....07....idxDif is: 2
....08....taskCall is: matrixMul_CMP_5()
....09....after addTaskCall, tskName is: matrixMul_CMP_5
....09....after addTaskCall, taskCall is: matrixMul_CMP_5()
....10....after addFcudaCore, taskCall is: matrixMul_CMP_5()
....10....after addFcudaCore, mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
Creating new FcudaCoreData for core: matrixMul_CMP_5()
....11....after setCoreName, tskName is: matrixMul_CMP_5
....11....after setCoreName, taskCall is: matrixMul_CMP_5()
....11....after setCoreName, mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....12....sftTaskArgs size is: 0
....12....sftTaskArgs is: []
....13....sftTaskArgs size is: 1
....13....sftTaskArgs is: [guard_matrixMul_CMP_5]
....14....sftTaskArgSyms is: [guard_matrixMul_CMP_5]
....15....sftTaskDecls size is: 1
....15....sftTaskDecls is: [int guard_matrixMul_CMP_5]
....16....sftTaskArgs size is: 2
....16....sftTaskArgs is: [guard_matrixMul_CMP_5, blockDim]
....16....sftTaskDecls size is: 4
....16....sftTaskDecls is: [int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx]
....17....sftCommonArgsIndex size is: 1
....17....sftCommonArgsIndex is: [1]
....18....sftTaskArgs size is: 3
....18....sftTaskArgs is: [guard_matrixMul_CMP_5, blockDim, gridDim]
....18....sftCommonArgsIndex size is: 2
....18....sftCommonArgsIndex is: [1, 2]
....19....sftTaskArgs size is: 4
....19....sftTaskArgs is: [guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx]
....20....enableStmt is: if (guard_matrixMul_CMP_5)
{
}
....21....fcTask is: {
if (guard_matrixMul_CMP_5)
{
}
}
....22....tskStmts size is: 41
....22....tskStmts is: [int aBegin;, int aEnd;, int aStep;, int bBegin;, int bStep;, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];, int a;, int b;, int k;, int c;, dim3 blockIdx;, int guard_matrixMul_CMP_5;, guard_matrixMul_CMP_5=1;, #pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , aBegin=((wA*16)*blockIdx.y);, aEnd=((aBegin+wA)-1);, aStep=16;, bBegin=(16*blockIdx.x);, bStep=(16*wB);, #pragma fcuda compute cores=1 name=CMP_5 end=false begin=true , #pragma fcuda tloop name=CMP_5 end=false begin=true , #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;, #pragma fcuda tloop name=CMP_5 end=true begin=false , #pragma fcuda compute cores=1 name=CMP_5 end=true begin=false , a=0;, b=0;, k=0;, #pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}, c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));, , , #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true , #pragma fcuda tloop name=TRN_10 end=false begin=true , #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];, #pragma fcuda tloop name=TRN_10 end=true begin=false , #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false , , , return ;]
Marking Statements 19 - 23 for task: matrixMul_CMP_5
....23....curr sIdx is: 19 , curr tskStmts to be added is: #pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
....23....after adding, tskName is: matrixMul_CMP_5
....23....curr sIdx is: 20 , curr tskStmts to be added is: #pragma fcuda tloop name=CMP_5 end=false begin=true
....23....after adding, tskName is: matrixMul_CMP_5
....23....curr sIdx is: 21 , curr tskStmts to be added is: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
....23....after adding, tskName is: matrixMul_CMP_5
....23....curr sIdx is: 22 , curr tskStmts to be added is: #pragma fcuda tloop name=CMP_5 end=true begin=false
....23....after adding, tskName is: matrixMul_CMP_5
....23....curr sIdx is: 23 , curr tskStmts to be added is: #pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
....23....after adding, tskName is: matrixMul_CMP_5
........declList is: []
....24....ProcedureDeclarator tskProcDecl is: matrixMul_CMP_5()
....25....Procedure tskProc is: void matrixMul_CMP_5()
{
if (guard_matrixMul_CMP_5)
{
}
}
....26.... TranslationUnit kernUnit is: #include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....27.... after kernUnit.addDeclaratuibBefore, kernUnit is: #include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
void matrixMul_CMP_5()
{
if (guard_matrixMul_CMP_5)
{
}
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....28.... before FCUDAutils.addTaskMapping, mProcedure.getSymbolName is: matrixMul
....28.... before FCUDAutils.addTaskMapping, tskProc is: void matrixMul_CMP_5()
{
if (guard_matrixMul_CMP_5)
{
}
}
....29.... after FCUDAutils.addTaskMapping, mProcedure.getSymbolName is: matrixMul
....29.... after FCUDAutils.addTaskMapping, tskProc is: void matrixMul_CMP_5()
{
if (guard_matrixMul_CMP_5)
{
}
}
....30.... after FcudaAnnotation fannot, fannot is: #pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
....31.... fannot is not null
....32.... done taskCreationPrelims
....32.... final taskCall is: matrixMul_CMP_5()
----addStatementBefore----index is:21
Starting to collect parameters for procedure: matrixMul_CMP_5
Task Statement: #pragma fcuda compute cores=1 name=CMP_5 end=false begin=true of task: matrixMul_CMP_5
Task Statement: #pragma fcuda tloop name=CMP_5 end=false begin=true of task: matrixMul_CMP_5
Task Statement: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0; of task: matrixMul_CMP_5
Task defExp: Csub_block[threadIdx.y][threadIdx.x] of task: matrixMul_CMP_5
Task def: Csub_block of task: matrixMul_CMP_5
... has # of chain uses: 2
Check Uses: [#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);, #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];]
... has Use out of task
> #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
Task useExp: threadIdx.x of task: matrixMul_CMP_5
Task useExp: threadIdx.y of task: matrixMul_CMP_5
Task Statement: #pragma fcuda tloop name=CMP_5 end=true begin=false of task: matrixMul_CMP_5
Task Statement: #pragma fcuda compute cores=1 name=CMP_5 end=true begin=false of task: matrixMul_CMP_5
----0041----enter for_1, curr asCnt is: 7
----0042----annotStmt is:
----0043----list fcAnnots is: []
----0041----enter for_1, curr asCnt is: 8
----0042----annotStmt is:
----0043----list fcAnnots is: []
----0041----enter for_1, curr asCnt is: 9
----0042----annotStmt is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
----0043----list fcAnnots is: [#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true ]
----00431----enter for_2 curr fcannot is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
----00432----curr annotType is: transfer
----00433----curr bgn is: true
----00434----curr annotType is COM or TRN
----00435----curr bgn is true
----SFT2-decompose-before sanity check, inCMP is: false, inTRN is: false, tskName is: null
----0041----enter for_1, curr asCnt is: 10
----0042----annotStmt is: #pragma fcuda tloop name=TRN_10 end=false begin=true
----0043----list fcAnnots is: [#pragma fcuda tloop name=TRN_10 end=false begin=true ]
----00431----enter for_2 curr fcannot is: #pragma fcuda tloop name=TRN_10 end=false begin=true
----00432----curr annotType is: tloop
----00433----curr bgn is: true
----0043A----curr annotType is not COM nor TRN
----0041----enter for_1, curr asCnt is: 11
----0042----annotStmt is: #pragma fcuda tloop name=TRN_10 end=true begin=false
----0043----list fcAnnots is: [#pragma fcuda tloop name=TRN_10 end=true begin=false ]
----00431----enter for_2 curr fcannot is: #pragma fcuda tloop name=TRN_10 end=true begin=false
----00432----curr annotType is: tloop
----00433----curr bgn is: false
----0043A----curr annotType is not COM nor TRN
----0041----enter for_1, curr asCnt is: 12
----0042----annotStmt is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
----0043----list fcAnnots is: [#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false ]
----00431----enter for_2 curr fcannot is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
----00432----curr annotType is: transfer
----00433----curr bgn is: false
----00434----curr annotType is COM or TRN
----00436----curr bgn is false
----00439----enter inTRN decomposeCompute
----00439----bgnstmt is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
----00439----endStmt is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
----00439----tskNmae is: TRN_10
----4.0----trnTaskName is: matrixMul_TRN_10
----4.0----bgnIdx is: 34
----4.0----endIdx is: 38
... Prelim task handling: matrixMul_TRN_10
...tskName is: matrixMul_TRN_10
...mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
...print current fcudaGlobalData2 tasks...
Tasks: [matrixMul_TRN_10, matrixMul_CMP_5]
....00......................................
....01....enableSignal is: guard_matrixMul_TRN_10
....02....enDeclor is: guard_matrixMul_TRN_10
....03....enableDecl is: int guard_matrixMul_TRN_10
....04....after getbody addAnsideclaration, mprocedure_body is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
----addStatementBefore----index is:13
....05....after addAfterLastDeclaration, mprocedure_body is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....06....parCStmt is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....07....idxDif is: 2
....08....taskCall is: matrixMul_TRN_10()
....09....after addTaskCall, tskName is: matrixMul_TRN_10
....09....after addTaskCall, taskCall is: matrixMul_TRN_10()
....10....after addFcudaCore, taskCall is: matrixMul_TRN_10()
....10....after addFcudaCore, mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
Creating new FcudaCoreData for core: matrixMul_TRN_10()
....11....after setCoreName, tskName is: matrixMul_TRN_10
....11....after setCoreName, taskCall is: matrixMul_TRN_10()
....11....after setCoreName, mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....12....sftTaskArgs size is: 0
....12....sftTaskArgs is: []
....13....sftTaskArgs size is: 1
....13....sftTaskArgs is: [guard_matrixMul_TRN_10]
....14....sftTaskArgSyms is: [guard_matrixMul_TRN_10]
....15....sftTaskDecls size is: 1
....15....sftTaskDecls is: [int guard_matrixMul_TRN_10]
....16....sftTaskArgs size is: 2
....16....sftTaskArgs is: [guard_matrixMul_TRN_10, blockDim]
....16....sftTaskDecls size is: 4
....16....sftTaskDecls is: [int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx]
....17....sftCommonArgsIndex size is: 1
....17....sftCommonArgsIndex is: [1]
....18....sftTaskArgs size is: 3
....18....sftTaskArgs is: [guard_matrixMul_TRN_10, blockDim, gridDim]
....18....sftCommonArgsIndex size is: 2
....18....sftCommonArgsIndex is: [1, 2]
....19....sftTaskArgs size is: 4
....19....sftTaskArgs is: [guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx]
....20....enableStmt is: if (guard_matrixMul_TRN_10)
{
}
....21....fcTask is: {
if (guard_matrixMul_TRN_10)
{
}
}
....22....tskStmts size is: 44
....22....tskStmts is: [int aBegin;, int aEnd;, int aStep;, int bBegin;, int bStep;, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];, int a;, int b;, int k;, int c;, dim3 blockIdx;, int guard_matrixMul_CMP_5;, int guard_matrixMul_TRN_10;, guard_matrixMul_TRN_10=1;, guard_matrixMul_CMP_5=1;, #pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , aBegin=((wA*16)*blockIdx.y);, aEnd=((aBegin+wA)-1);, aStep=16;, bBegin=(16*blockIdx.x);, bStep=(16*wB);, matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);, #pragma fcuda compute cores=1 name=CMP_5 end=false begin=true , #pragma fcuda tloop name=CMP_5 end=false begin=true , #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;, #pragma fcuda tloop name=CMP_5 end=true begin=false , #pragma fcuda compute cores=1 name=CMP_5 end=true begin=false , a=0;, b=0;, k=0;, #pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}, c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));, , , #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true , #pragma fcuda tloop name=TRN_10 end=false begin=true , #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];, #pragma fcuda tloop name=TRN_10 end=true begin=false , #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false , , , return ;]
Marking Statements 34 - 38 for task: matrixMul_TRN_10
....23....curr sIdx is: 34 , curr tskStmts to be added is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
....23....after adding, tskName is: matrixMul_TRN_10
....23....curr sIdx is: 35 , curr tskStmts to be added is: #pragma fcuda tloop name=TRN_10 end=false begin=true
....23....after adding, tskName is: matrixMul_TRN_10
....23....curr sIdx is: 36 , curr tskStmts to be added is: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
....23....after adding, tskName is: matrixMul_TRN_10
....23....curr sIdx is: 37 , curr tskStmts to be added is: #pragma fcuda tloop name=TRN_10 end=true begin=false
....23....after adding, tskName is: matrixMul_TRN_10
....23....curr sIdx is: 38 , curr tskStmts to be added is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
....23....after adding, tskName is: matrixMul_TRN_10
........declList is: []
....24....ProcedureDeclarator tskProcDecl is: matrixMul_TRN_10()
....25....Procedure tskProc is: void matrixMul_TRN_10()
{
if (guard_matrixMul_TRN_10)
{
}
}
....26.... TranslationUnit kernUnit is: #include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
if (guard_matrixMul_CMP_5)
{
}
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....27.... after kernUnit.addDeclaratuibBefore, kernUnit is: #include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
if (guard_matrixMul_CMP_5)
{
}
}
void matrixMul_TRN_10()
{
if (guard_matrixMul_TRN_10)
{
}
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....28.... before FCUDAutils.addTaskMapping, mProcedure.getSymbolName is: matrixMul
....28.... before FCUDAutils.addTaskMapping, tskProc is: void matrixMul_TRN_10()
{
if (guard_matrixMul_TRN_10)
{
}
}
....29.... after FCUDAutils.addTaskMapping, mProcedure.getSymbolName is: matrixMul
....29.... after FCUDAutils.addTaskMapping, tskProc is: void matrixMul_TRN_10()
{
if (guard_matrixMul_TRN_10)
{
}
}
....30.... after FcudaAnnotation fannot, fannot is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
....31.... fannot is not null
....32.... done taskCreationPrelims
....32.... final taskCall is: matrixMul_TRN_10()
----4.0----taskCall is: matrixMul_TRN_10()
----4.0----taskCallStmt is: matrixMul_TRN_10();
----addStatementBefore----index is:36
----4.0----cStmt after addStatementBefore is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10();
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
----4.1----Starting to collect parameters for procedure: matrixMul_TRN_10
----4.1----trnData is: fcuda.common.TaskData@61ca2dfa
----4.1----tskStmts is: [#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true , #pragma fcuda tloop name=TRN_10 end=false begin=true , #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];, #pragma fcuda tloop name=TRN_10 end=true begin=false , #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false ]
----4.1----allTskSyms is: []
----4.2----curr Task Statement: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
----7.11----test_children is: []
----7.12----test_annot is: [#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true ]
----7.13---coresList is: [1]
----7.13---sizeList is: [BLOCKDIM_X]
----7.13---taskName is: TRN_10
----7.13---transferType is: null
----4.21---- AnnotationStatement continue
----4.2----curr Task Statement: #pragma fcuda tloop name=TRN_10 end=false begin=true
----7.11----test_children is: []
----7.12----test_annot is: [#pragma fcuda tloop name=TRN_10 end=false begin=true ]
----4.21---- AnnotationStatement continue
----4.2----curr Task Statement: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
----7.11----test_children is: [C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x]]
----7.12----test_annot is: [#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true ]
----7.14----stmtpragmas: []
----7.14----test_annot: [#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true ]
----7.14----real_stmt: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
----7.14---mOffchipPtrNameList is: [C]
----7.14---dirList is: false
----7.15----1.collecting parameters----
-------GLBpntr is: [C]
-------core: [1]
-------size: [BLOCKDIM_X]
-------rdNwrt: false
-------name: TRN_10
-------type: null
----4.3----in for loop DEFS, defExp: C[((c+(wB*threadIdx.y))+threadIdx.x)]
----4.3----Task def: C
----4.3----defSym: * C
----4.3----scalarDef: true
----4.3----constArrAcc: false
----4.3----allTskSyms: [* C]
----4.4----defDecl: DATATYPE * C
----4.4----defUses: []
----4.4---- ... has # of chain uses: 0
Task useExp: Csub_block[threadIdx.y][threadIdx.x] of task: matrixMul_TRN_10
Task use: Csub_block of task: matrixMul_TRN_10
... has # of chain defs: 2
... has Def out of task
> #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
Task useExp: c of task: matrixMul_TRN_10
Task use: c of task: matrixMul_TRN_10
... has # of chain defs: 1
... has Def out of task
> c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
Task useExp: threadIdx.x of task: matrixMul_TRN_10
Task useExp: threadIdx.y of task: matrixMul_TRN_10
Task useExp: wB of task: matrixMul_TRN_10
Task use: wB of task: matrixMul_TRN_10
... has # of chain defs: 1
... has Def out of task
> wB
----4.2----curr Task Statement: #pragma fcuda tloop name=TRN_10 end=true begin=false
----7.11----test_children is: []
----7.12----test_annot is: [#pragma fcuda tloop name=TRN_10 end=true begin=false ]
----4.21---- AnnotationStatement continue
----4.2----curr Task Statement: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
----7.11----test_children is: []
----7.12----test_annot is: [#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false ]
----4.21---- AnnotationStatement continue
----5.0----end of task statement, cStmt is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10();
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
----8.0----trnTaskName: matrixMul_TRN_10
--------sftTaskArgSyms: [wB, Csub_block[BLOCKDIM_Y][BLOCKDIM_X], c, guard_matrixMul_TRN_10, * C]
--------sftTaskArgs: [guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB]
--------sftTaskDecls: [int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB]
--------taskCall: matrixMul_TRN_10()
--------sftCommonArgsIndex: [1, 2, 4, 7]
----8.00----tskSym is: wB
----8.01----decl is: int wB
----8.02----inTaskDecl is: false
----8.03----sftTaskArgSyms contains tskSym
----8.00----tskSym is: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
----8.01----decl is: __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
----8.02----inTaskDecl is: false
----8.03----sftTaskArgSyms contains tskSym
----8.00----tskSym is: c
----8.01----decl is: int c
----8.02----inTaskDecl is: false
----8.03----sftTaskArgSyms contains tskSym
----8.00----tskSym is: * C
----8.01----decl is: DATATYPE * C
----8.02----inTaskDecl is: false
----8.03----sftTaskArgSyms contains tskSym
----8.07----trnTaskName: matrixMul_TRN_10
--------sftTaskArgSyms: [wB, Csub_block[BLOCKDIM_Y][BLOCKDIM_X], c, guard_matrixMul_TRN_10, * C]
--------sftTaskArgs: [guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB]
--------sftTaskDecls: [int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB]
--------taskCall: matrixMul_TRN_10()
--------sftCommonArgsIndex: [1, 2, 4, 7]
----8.08----trnTaskName: matrixMul_TRN_10
--------sftTaskArgSyms: [wB, Csub_block[BLOCKDIM_Y][BLOCKDIM_X], c, guard_matrixMul_TRN_10, * C]
--------sftTaskArgs: [guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB]
--------sftTaskDecls: [int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB]
--------taskCall: matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB)
--------sftCommonArgsIndex: [1, 2, 4, 7]
----0041----enter for_1, curr asCnt is: 13
----0042----annotStmt is:
----0043----list fcAnnots is: []
----0041----enter for_1, curr asCnt is: 14
----0042----annotStmt is:
----0043----list fcAnnots is: []
----0041----enter for_1, curr asCnt is: 15
----0042----annotStmt is:
----0043----list fcAnnots is: []
----0041----enter for_1, curr asCnt is: 16
----0042----annotStmt is:
----0043----list fcAnnots is: []
----0041----enter for_1, curr asCnt is: 17
----0042----annotStmt is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
----0043----list fcAnnots is: [#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true ]
----00431----enter for_2 curr fcannot is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
----00432----curr annotType is: transfer
----00433----curr bgn is: true
----00434----curr annotType is COM or TRN
----00435----curr bgn is true
----SFT2-decompose-before sanity check, inCMP is: false, inTRN is: false, tskName is: null
----0041----enter for_1, curr asCnt is: 18
----0042----annotStmt is: #pragma fcuda tloop name=TRN_6 end=false begin=true
----0043----list fcAnnots is: [#pragma fcuda tloop name=TRN_6 end=false begin=true ]
----00431----enter for_2 curr fcannot is: #pragma fcuda tloop name=TRN_6 end=false begin=true
----00432----curr annotType is: tloop
----00433----curr bgn is: true
----0043A----curr annotType is not COM nor TRN
----0041----enter for_1, curr asCnt is: 19
----0042----annotStmt is: #pragma fcuda tloop name=TRN_6 end=true begin=false
----0043----list fcAnnots is: [#pragma fcuda tloop name=TRN_6 end=true begin=false ]
----00431----enter for_2 curr fcannot is: #pragma fcuda tloop name=TRN_6 end=true begin=false
----00432----curr annotType is: tloop
----00433----curr bgn is: false
----0043A----curr annotType is not COM nor TRN
----0041----enter for_1, curr asCnt is: 20
----0042----annotStmt is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
----0043----list fcAnnots is: [#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false ]
----00431----enter for_2 curr fcannot is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
----00432----curr annotType is: transfer
----00433----curr bgn is: false
----00434----curr annotType is COM or TRN
----00436----curr bgn is false
----00439----enter inTRN decomposeCompute
----00439----bgnstmt is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
----00439----endStmt is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
----00439----tskNmae is: TRN_6
----4.0----trnTaskName is: matrixMul_TRN_6
----4.0----bgnIdx is: 4
----4.0----endIdx is: 9
... Prelim task handling: matrixMul_TRN_6
...tskName is: matrixMul_TRN_6
...mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
...print current fcudaGlobalData2 tasks...
Tasks: [matrixMul_TRN_6, matrixMul_TRN_10, matrixMul_CMP_5]
....00......................................
....01....enableSignal is: guard_matrixMul_TRN_6
....02....enDeclor is: guard_matrixMul_TRN_6
....03....enableDecl is: int guard_matrixMul_TRN_6
....04....after getbody addAnsideclaration, mprocedure_body is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
----addStatementBefore----index is:14
....05....after addAfterLastDeclaration, mprocedure_body is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....06....parCStmt is: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
....07....idxDif is: 0
....08....taskCall is: matrixMul_TRN_6()
....09....after addTaskCall, tskName is: matrixMul_TRN_6
....09....after addTaskCall, taskCall is: matrixMul_TRN_6()
....10....after addFcudaCore, taskCall is: matrixMul_TRN_6()
....10....after addFcudaCore, mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
Creating new FcudaCoreData for core: matrixMul_TRN_6()
....11....after setCoreName, tskName is: matrixMul_TRN_6
....11....after setCoreName, taskCall is: matrixMul_TRN_6()
....11....after setCoreName, mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....12....sftTaskArgs size is: 0
....12....sftTaskArgs is: []
....13....sftTaskArgs size is: 1
....13....sftTaskArgs is: [guard_matrixMul_TRN_6]
....14....sftTaskArgSyms is: [guard_matrixMul_TRN_6]
....15....sftTaskDecls size is: 1
....15....sftTaskDecls is: [int guard_matrixMul_TRN_6]
....16....sftTaskArgs size is: 2
....16....sftTaskArgs is: [guard_matrixMul_TRN_6, blockDim]
....16....sftTaskDecls size is: 4
....16....sftTaskDecls is: [int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx]
....17....sftCommonArgsIndex size is: 1
....17....sftCommonArgsIndex is: [1]
....18....sftTaskArgs size is: 3
....18....sftTaskArgs is: [guard_matrixMul_TRN_6, blockDim, gridDim]
....18....sftCommonArgsIndex size is: 2
....18....sftCommonArgsIndex is: [1, 2]
....19....sftTaskArgs size is: 4
....19....sftTaskArgs is: [guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx]
....20....enableStmt is: if (guard_matrixMul_TRN_6)
{
}
....21....fcTask is: {
if (guard_matrixMul_TRN_6)
{
}
}
....22....tskStmts size is: 19
....22....tskStmts is: [__shared__ DATATYPE As[16][16];, __shared__ DATATYPE Bs[16][16];, , , #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true , #pragma fcuda tloop name=TRN_6 end=false begin=true , #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, #pragma fcuda tloop name=TRN_6 end=true begin=false , #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false , , , , #pragma fcuda compute cores=1 name=SNC_7 end=false begin=true , #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();, {
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}, #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();, #pragma fcuda compute cores=1 name=SNC_7 end=true begin=false , ]
Marking Statements 4 - 9 for task: matrixMul_TRN_6
....23....curr sIdx is: 4 , curr tskStmts to be added is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
....23....after adding, tskName is: matrixMul_TRN_6
....23....curr sIdx is: 5 , curr tskStmts to be added is: #pragma fcuda tloop name=TRN_6 end=false begin=true
....23....after adding, tskName is: matrixMul_TRN_6
....23....curr sIdx is: 6 , curr tskStmts to be added is: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
....23....after adding, tskName is: matrixMul_TRN_6
....23....curr sIdx is: 7 , curr tskStmts to be added is: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
....23....after adding, tskName is: matrixMul_TRN_6
....23....curr sIdx is: 8 , curr tskStmts to be added is: #pragma fcuda tloop name=TRN_6 end=true begin=false
....23....after adding, tskName is: matrixMul_TRN_6
....23....curr sIdx is: 9 , curr tskStmts to be added is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
....23....after adding, tskName is: matrixMul_TRN_6
........declList is: []
....24....ProcedureDeclarator tskProcDecl is: matrixMul_TRN_6()
....25....Procedure tskProc is: void matrixMul_TRN_6()
{
if (guard_matrixMul_TRN_6)
{
}
}
....26.... TranslationUnit kernUnit is: #include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
if (guard_matrixMul_CMP_5)
{
}
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
if (guard_matrixMul_TRN_10)
{
}
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....27.... after kernUnit.addDeclaratuibBefore, kernUnit is: #include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
if (guard_matrixMul_CMP_5)
{
}
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
if (guard_matrixMul_TRN_10)
{
}
}
void matrixMul_TRN_6()
{
if (guard_matrixMul_TRN_6)
{
}
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....28.... before FCUDAutils.addTaskMapping, mProcedure.getSymbolName is: matrixMul
....28.... before FCUDAutils.addTaskMapping, tskProc is: void matrixMul_TRN_6()
{
if (guard_matrixMul_TRN_6)
{
}
}
....29.... after FCUDAutils.addTaskMapping, mProcedure.getSymbolName is: matrixMul
....29.... after FCUDAutils.addTaskMapping, tskProc is: void matrixMul_TRN_6()
{
if (guard_matrixMul_TRN_6)
{
}
}
....30.... after FcudaAnnotation fannot, fannot is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
....31.... fannot is not null
....32.... done taskCreationPrelims
....32.... final taskCall is: matrixMul_TRN_6()
----4.0----taskCall is: matrixMul_TRN_6()
----4.0----taskCallStmt is: matrixMul_TRN_6();
----addStatementBefore----index is:4
----4.0----cStmt after addStatementBefore is: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
matrixMul_TRN_6();
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
----4.1----Starting to collect parameters for procedure: matrixMul_TRN_6
----4.1----trnData is: fcuda.common.TaskData@7ba18f1b
----4.1----tskStmts is: [#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true , #pragma fcuda tloop name=TRN_6 end=false begin=true , #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, #pragma fcuda tloop name=TRN_6 end=true begin=false , #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false ]
----4.1----allTskSyms is: []
----4.2----curr Task Statement: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
----7.11----test_children is: []
----7.12----test_annot is: [#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true ]
----7.13---coresList is: [1]
----7.13---sizeList is: [BLOCKDIM_X]
----7.13---taskName is: TRN_6
----7.13---transferType is: null
----4.21---- AnnotationStatement continue
----4.2----curr Task Statement: #pragma fcuda tloop name=TRN_6 end=false begin=true
----7.11----test_children is: []
----7.12----test_annot is: [#pragma fcuda tloop name=TRN_6 end=false begin=true ]
----4.21---- AnnotationStatement continue
----4.2----curr Task Statement: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
----7.11----test_children is: [As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)]]
----7.12----test_annot is: [#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true ]
----7.14----stmtpragmas: []
----7.14----test_annot: [#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true ]
----7.14----real_stmt: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
----7.14---mOffchipPtrNameList is: [A]
----7.14---dirList is: true
----7.15----1.collecting parameters----
-------GLBpntr is: [A]
-------core: [1]
-------size: [BLOCKDIM_X]
-------rdNwrt: true
-------name: TRN_6
-------type: null
----4.3----in for loop DEFS, defExp: As[threadIdx.y][threadIdx.x]
----4.3----Task def: As
----4.3----defSym: As[16][16]
----4.3----scalarDef: false
----4.3----constArrAcc: false
----4.3----allTskSyms: [As[16][16]]
----4.4----defDecl: __shared__ DATATYPE As[16][16]
----4.4----defUses: [#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);]
----4.4---- ... has # of chain uses: 1
----4.5----curr useTrv: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
----4.51----!(taskContains(useTrv, tskStmts))
----4.51----sftTaskArgs: [guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As]
----4.51----sftTaskArgSyms: [As[16][16], guard_matrixMul_TRN_6]
----4.51----sftTaskDecls: [int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16]]
... has Use out of task
> #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
----4.52----gonna break
Task useExp: A[((a+(wA*threadIdx.y))+threadIdx.x)] of task: matrixMul_TRN_6
Task use: A of task: matrixMul_TRN_6
... has # of chain defs: 1
... has Def out of task
> * A
Task useExp: a of task: matrixMul_TRN_6
Task use: a of task: matrixMul_TRN_6
... has # of chain defs: 2
... has Def out of task
> ((a=aBegin), (b=bBegin));
Task useExp: threadIdx.x of task: matrixMul_TRN_6
Task useExp: threadIdx.y of task: matrixMul_TRN_6
Task useExp: wA of task: matrixMul_TRN_6
Task use: wA of task: matrixMul_TRN_6
... has # of chain defs: 1
... has Def out of task
> wA
----4.2----curr Task Statement: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
----7.11----test_children is: [Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)]]
----7.12----test_annot is: [#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true ]
----7.14----stmtpragmas: []
----7.14----test_annot: [#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true ]
----7.14----real_stmt: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
----7.14---mOffchipPtrNameList is: [A, B]
----7.14---dirList is: true
----7.15----1.collecting parameters----
-------GLBpntr is: [A, B]
-------core: [1]
-------size: [BLOCKDIM_X]
-------rdNwrt: true
-------name: TRN_6
-------type: null
----4.3----in for loop DEFS, defExp: Bs[threadIdx.y][threadIdx.x]
----4.3----Task def: Bs
----4.3----defSym: Bs[16][16]
----4.3----scalarDef: false
----4.3----constArrAcc: false
----4.3----allTskSyms: [* A, Bs[16][16], wA, a, As[16][16]]
----4.4----defDecl: __shared__ DATATYPE Bs[16][16]
----4.4----defUses: [#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);]
----4.4---- ... has # of chain uses: 1
----4.5----curr useTrv: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
----4.51----!(taskContains(useTrv, tskStmts))
----4.51----sftTaskArgs: [guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs]
----4.51----sftTaskArgSyms: [* A, Bs[16][16], wA, a, As[16][16], guard_matrixMul_TRN_6]
----4.51----sftTaskDecls: [int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16]]
... has Use out of task
> #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
----4.52----gonna break
Task useExp: B[((b+(wB*threadIdx.y))+threadIdx.x)] of task: matrixMul_TRN_6
Task use: B of task: matrixMul_TRN_6
... has # of chain defs: 1
... has Def out of task
> * B
Task useExp: b of task: matrixMul_TRN_6
Task use: b of task: matrixMul_TRN_6
... has # of chain defs: 2
... has Def out of task
> ((a=aBegin), (b=bBegin));
Task useExp: threadIdx.x of task: matrixMul_TRN_6
Task useExp: threadIdx.y of task: matrixMul_TRN_6
Task useExp: wB of task: matrixMul_TRN_6
Task use: wB of task: matrixMul_TRN_6
... has # of chain defs: 1
... has Def out of task
> wB
----4.2----curr Task Statement: #pragma fcuda tloop name=TRN_6 end=true begin=false
----7.11----test_children is: []
----7.12----test_annot is: [#pragma fcuda tloop name=TRN_6 end=true begin=false ]
----4.21---- AnnotationStatement continue
----4.2----curr Task Statement: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
----7.11----test_children is: []
----7.12----test_annot is: [#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false ]
----4.21---- AnnotationStatement continue
----5.0----end of task statement, cStmt is: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
matrixMul_TRN_6();
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
----8.0----trnTaskName: matrixMul_TRN_6
--------sftTaskArgSyms: [wB, * A, Bs[16][16], wA, a, As[16][16], guard_matrixMul_TRN_6, b, * B]
--------sftTaskArgs: [guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB]
--------sftTaskDecls: [int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB]
--------taskCall: matrixMul_TRN_6()
--------sftCommonArgsIndex: [1, 2, 5, 7, 9, 11]
----8.00----tskSym is: wB
----8.01----decl is: int wB
----8.02----inTaskDecl is: false
----8.03----sftTaskArgSyms contains tskSym
----8.00----tskSym is: * A
----8.01----decl is: DATATYPE * A
----8.02----inTaskDecl is: false
----8.03----sftTaskArgSyms contains tskSym
----8.00----tskSym is: Bs[16][16]
----8.01----decl is: __shared__ DATATYPE Bs[16][16]
----8.02----inTaskDecl is: false
----8.03----sftTaskArgSyms contains tskSym
----8.00----tskSym is: wA
----8.01----decl is: int wA
----8.02----inTaskDecl is: false
----8.03----sftTaskArgSyms contains tskSym
----8.00----tskSym is: a
----8.01----decl is: int a
----8.02----inTaskDecl is: false
----8.03----sftTaskArgSyms contains tskSym
----8.00----tskSym is: As[16][16]
----8.01----decl is: __shared__ DATATYPE As[16][16]
----8.02----inTaskDecl is: false
----8.03----sftTaskArgSyms contains tskSym
----8.00----tskSym is: b
----8.01----decl is: int b
----8.02----inTaskDecl is: false
----8.03----sftTaskArgSyms contains tskSym
----8.00----tskSym is: * B
----8.01----decl is: DATATYPE * B
----8.02----inTaskDecl is: false
----8.03----sftTaskArgSyms contains tskSym
----8.07----trnTaskName: matrixMul_TRN_6
--------sftTaskArgSyms: [wB, * A, Bs[16][16], wA, a, As[16][16], guard_matrixMul_TRN_6, b, * B]
--------sftTaskArgs: [guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB]
--------sftTaskDecls: [int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB]
--------taskCall: matrixMul_TRN_6()
--------sftCommonArgsIndex: [1, 2, 5, 7, 9, 11]
----8.08----trnTaskName: matrixMul_TRN_6
--------sftTaskArgSyms: [wB, * A, Bs[16][16], wA, a, As[16][16], guard_matrixMul_TRN_6, b, * B]
--------sftTaskArgs: [guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB]
--------sftTaskDecls: [int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB]
--------taskCall: matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB)
--------sftCommonArgsIndex: [1, 2, 5, 7, 9, 11]
----0041----enter for_1, curr asCnt is: 21
----0042----annotStmt is:
----0043----list fcAnnots is: []
----0041----enter for_1, curr asCnt is: 22
----0042----annotStmt is:
----0043----list fcAnnots is: []
----0041----enter for_1, curr asCnt is: 23
----0042----annotStmt is:
----0043----list fcAnnots is: []
----0041----enter for_1, curr asCnt is: 24
----0042----annotStmt is: #pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
----0043----list fcAnnots is: [#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true ]
----00431----enter for_2 curr fcannot is: #pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
----00432----curr annotType is: compute
----00433----curr bgn is: true
----00434----curr annotType is COM or TRN
----00435----curr bgn is true
----SFT2-decompose-before sanity check, inCMP is: false, inTRN is: false, tskName is: null
----0041----enter for_1, curr asCnt is: 25
----0042----annotStmt is: #pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
----0043----list fcAnnots is: [#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false ]
----00431----enter for_2 curr fcannot is: #pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
----00432----curr annotType is: compute
----00433----curr bgn is: false
----00434----curr annotType is COM or TRN
----00436----curr bgn is false
----00438----enter inCMP decomposeCompute
----00438----bgnstmt is: #pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
----00438----endStmt is: #pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
----00438----tskNmae is: SNC_7
... Prelim task handling: matrixMul_SNC_7
...tskName is: matrixMul_SNC_7
...mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
...print current fcudaGlobalData2 tasks...
Tasks: [matrixMul_TRN_6, matrixMul_SNC_7, matrixMul_TRN_10, matrixMul_CMP_5]
....00......................................
....01....enableSignal is: guard_matrixMul_SNC_7
....02....enDeclor is: guard_matrixMul_SNC_7
....03....enableDecl is: int guard_matrixMul_SNC_7
....04....after getbody addAnsideclaration, mprocedure_body is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
----addStatementBefore----index is:15
....05....after addAfterLastDeclaration, mprocedure_body is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....06....parCStmt is: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
....07....idxDif is: 0
....08....taskCall is: matrixMul_SNC_7()
....09....after addTaskCall, tskName is: matrixMul_SNC_7
....09....after addTaskCall, taskCall is: matrixMul_SNC_7()
....10....after addFcudaCore, taskCall is: matrixMul_SNC_7()
....10....after addFcudaCore, mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
Creating new FcudaCoreData for core: matrixMul_SNC_7()
....11....after setCoreName, tskName is: matrixMul_SNC_7
....11....after setCoreName, taskCall is: matrixMul_SNC_7()
....11....after setCoreName, mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....12....sftTaskArgs size is: 0
....12....sftTaskArgs is: []
....13....sftTaskArgs size is: 1
....13....sftTaskArgs is: [guard_matrixMul_SNC_7]
....14....sftTaskArgSyms is: [guard_matrixMul_SNC_7]
....15....sftTaskDecls size is: 1
....15....sftTaskDecls is: [int guard_matrixMul_SNC_7]
....16....sftTaskArgs size is: 2
....16....sftTaskArgs is: [guard_matrixMul_SNC_7, blockDim]
....16....sftTaskDecls size is: 4
....16....sftTaskDecls is: [int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx]
....17....sftCommonArgsIndex size is: 1
....17....sftCommonArgsIndex is: [1]
....18....sftTaskArgs size is: 3
....18....sftTaskArgs is: [guard_matrixMul_SNC_7, blockDim, gridDim]
....18....sftCommonArgsIndex size is: 2
....18....sftCommonArgsIndex is: [1, 2]
....19....sftTaskArgs size is: 4
....19....sftTaskArgs is: [guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx]
....20....enableStmt is: if (guard_matrixMul_SNC_7)
{
}
....21....fcTask is: {
if (guard_matrixMul_SNC_7)
{
}
}
....22....tskStmts size is: 20
....22....tskStmts is: [__shared__ DATATYPE As[16][16];, __shared__ DATATYPE Bs[16][16];, , , matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);, #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true , #pragma fcuda tloop name=TRN_6 end=false begin=true , #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, #pragma fcuda tloop name=TRN_6 end=true begin=false , #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false , , , , #pragma fcuda compute cores=1 name=SNC_7 end=false begin=true , #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();, {
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}, #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();, #pragma fcuda compute cores=1 name=SNC_7 end=true begin=false , ]
Marking Statements 14 - 18 for task: matrixMul_SNC_7
....23....curr sIdx is: 14 , curr tskStmts to be added is: #pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
....23....after adding, tskName is: matrixMul_SNC_7
....23....curr sIdx is: 15 , curr tskStmts to be added is: #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
....23....after adding, tskName is: matrixMul_SNC_7
....23....curr sIdx is: 16 , curr tskStmts to be added is: {
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
....23....after adding, tskName is: matrixMul_SNC_7
....23....curr sIdx is: 17 , curr tskStmts to be added is: #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
....23....after adding, tskName is: matrixMul_SNC_7
....23....curr sIdx is: 18 , curr tskStmts to be added is: #pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
....23....after adding, tskName is: matrixMul_SNC_7
........declList is: []
....24....ProcedureDeclarator tskProcDecl is: matrixMul_SNC_7()
....25....Procedure tskProc is: void matrixMul_SNC_7()
{
if (guard_matrixMul_SNC_7)
{
}
}
....26.... TranslationUnit kernUnit is: #include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
if (guard_matrixMul_CMP_5)
{
}
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
if (guard_matrixMul_TRN_10)
{
}
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
if (guard_matrixMul_TRN_6)
{
}
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....27.... after kernUnit.addDeclaratuibBefore, kernUnit is: #include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
if (guard_matrixMul_CMP_5)
{
}
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
if (guard_matrixMul_TRN_10)
{
}
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
if (guard_matrixMul_TRN_6)
{
}
}
void matrixMul_SNC_7()
{
if (guard_matrixMul_SNC_7)
{
}
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....28.... before FCUDAutils.addTaskMapping, mProcedure.getSymbolName is: matrixMul
....28.... before FCUDAutils.addTaskMapping, tskProc is: void matrixMul_SNC_7()
{
if (guard_matrixMul_SNC_7)
{
}
}
....29.... after FCUDAutils.addTaskMapping, mProcedure.getSymbolName is: matrixMul
....29.... after FCUDAutils.addTaskMapping, tskProc is: void matrixMul_SNC_7()
{
if (guard_matrixMul_SNC_7)
{
}
}
....30.... after FcudaAnnotation fannot, fannot is: #pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
....31.... fannot is not null
....32.... done taskCreationPrelims
....32.... final taskCall is: matrixMul_SNC_7()
----addStatementBefore----index is:14
Starting to collect parameters for procedure: matrixMul_SNC_7
Task Statement: #pragma fcuda compute cores=1 name=SNC_7 end=false begin=true of task: matrixMul_SNC_7
Task Statement: #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads(); of task: matrixMul_SNC_7
Task Statement: {
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
} of task: matrixMul_SNC_7
Task defExp: Csub_block[threadIdx.y][threadIdx.x] of task: matrixMul_SNC_7
Task def: Csub_block of task: matrixMul_SNC_7
... has # of chain uses: 2
Check Uses: [#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);, #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];]
... has Use out of task
> #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
Task defExp: k of task: matrixMul_SNC_7
Task def: k of task: matrixMul_SNC_7
... has # of chain uses: 4
Check Uses: [k<16, #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);, #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);, ++ k]
Task useExp: As[threadIdx.y][k] of task: matrixMul_SNC_7
Task use: As of task: matrixMul_SNC_7
... has # of chain defs: 1
Check Defs: [#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];]
... has Def out of task
> #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Task useExp: Bs[k][threadIdx.x] of task: matrixMul_SNC_7
Task use: Bs of task: matrixMul_SNC_7
... has # of chain defs: 1
Check Defs: [#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];]
... has Def out of task
> #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
Task useExp: Csub_block[threadIdx.y][threadIdx.x] of task: matrixMul_SNC_7
Task use: Csub_block of task: matrixMul_SNC_7
Task useExp: k of task: matrixMul_SNC_7
Task use: k of task: matrixMul_SNC_7
... has # of chain defs: 2
Check Defs: [#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0;, ++ k]
Task useExp: threadIdx.x of task: matrixMul_SNC_7
Task useExp: threadIdx.y of task: matrixMul_SNC_7
Task Statement: #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads(); of task: matrixMul_SNC_7
Task Statement: #pragma fcuda compute cores=1 name=SNC_7 end=true begin=false of task: matrixMul_SNC_7
Shift Decl: int k to proc: matrixMul_SNC_7
----0041----enter for_1, curr asCnt is: 26
----0042----annotStmt is:
----0043----list fcAnnots is: []
----0041----enter for_1, curr asCnt is: 27
----0042----annotStmt is:
----0043----list fcAnnots is: []
----0041----enter for_1, curr asCnt is: 28
----0042----annotStmt is: #pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
----0043----list fcAnnots is: [#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true ]
----00431----enter for_2 curr fcannot is: #pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
----00432----curr annotType is: tloop
----00433----curr bgn is: true
----0043A----curr annotType is not COM nor TRN
----0041----enter for_1, curr asCnt is: 29
----0042----annotStmt is: #pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
----0043----list fcAnnots is: [#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false ]
----00431----enter for_2 curr fcannot is: #pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
----00432----curr annotType is: tloop
----00433----curr bgn is: false
----0043A----curr annotType is not COM nor TRN
----0041----enter for_1, curr asCnt is: 30
----0042----annotStmt is:
----0043----list fcAnnots is: []
-----SFT2 finished decomposeKernel();-----
-----proc after decomposeKernel-----
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
-----6.0----generateMemcpy starts-----
----6.1----stmt is: int aBegin;
----6.11----test_children is: [int aBegin]
----6.12----test_annot is: null
----6.1----stmt is: int aEnd;
----6.11----test_children is: [int aEnd]
----6.12----test_annot is: null
----6.1----stmt is: int aStep;
----6.11----test_children is: [int aStep]
----6.12----test_annot is: null
----6.1----stmt is: int bBegin;
----6.11----test_children is: [int bBegin]
----6.12----test_annot is: null
----6.1----stmt is: int bStep;
----6.11----test_children is: [int bStep]
----6.12----test_annot is: null
----6.1----stmt is: __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
----6.11----test_children is: [__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X]]
----6.12----test_annot is: null
----6.1----stmt is: int a;
----6.11----test_children is: [int a]
----6.12----test_annot is: null
----6.1----stmt is: int b;
----6.11----test_children is: [int b]
----6.12----test_annot is: null
----6.1----stmt is: int k;
----6.11----test_children is: [int k]
----6.12----test_annot is: null
----6.1----stmt is: int c;
----6.11----test_children is: [int c]
----6.12----test_annot is: null
----6.1----stmt is: dim3 blockIdx;
----6.11----test_children is: [dim3 blockIdx]
----6.12----test_annot is: null
----6.1----stmt is: int guard_matrixMul_CMP_5;
----6.11----test_children is: [int guard_matrixMul_CMP_5]
----6.12----test_annot is: null
----6.1----stmt is: int guard_matrixMul_TRN_10;
----6.11----test_children is: [int guard_matrixMul_TRN_10]
----6.12----test_annot is: null
----6.1----stmt is: int guard_matrixMul_TRN_6;
----6.11----test_children is: [int guard_matrixMul_TRN_6]
----6.12----test_annot is: null
----6.1----stmt is: int guard_matrixMul_SNC_7;
----6.11----test_children is: [int guard_matrixMul_SNC_7]
----6.12----test_annot is: null
----6.1----stmt is: guard_matrixMul_SNC_7=1;
----6.11----test_children is: [guard_matrixMul_SNC_7=1]
----6.12----test_annot is: null
----6.1----stmt is: guard_matrixMul_TRN_6=1;
----6.11----test_children is: [guard_matrixMul_TRN_6=1]
----6.12----test_annot is: null
----6.1----stmt is: guard_matrixMul_TRN_10=1;
----6.11----test_children is: [guard_matrixMul_TRN_10=1]
----6.12----test_annot is: null
----6.1----stmt is: guard_matrixMul_CMP_5=1;
----6.11----test_children is: [guard_matrixMul_CMP_5=1]
----6.12----test_annot is: null
----6.1----stmt is: #pragma HLS INTERFACE ap_bus port=A depth=3840
----6.11----test_children is: []
----6.12----test_annot is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 ]
----6.1----stmt is: #pragma HLS INTERFACE ap_bus port=B depth=6144
----6.11----test_children is: []
----6.12----test_annot is: [#pragma HLS INTERFACE ap_bus port=B depth=6144 ]
----6.1----stmt is: #pragma HLS INTERFACE ap_bus port=C depth=10240
----6.11----test_children is: []
----6.12----test_annot is: [#pragma HLS INTERFACE ap_bus port=C depth=10240 ]
----6.1----stmt is: aBegin=((wA*16)*blockIdx.y);
----6.11----test_children is: [aBegin=((wA*16)*blockIdx.y)]
----6.12----test_annot is: null
----6.1----stmt is: aEnd=((aBegin+wA)-1);
----6.11----test_children is: [aEnd=((aBegin+wA)-1)]
----6.12----test_annot is: null
----6.1----stmt is: aStep=16;
----6.11----test_children is: [aStep=16]
----6.12----test_annot is: null
----6.1----stmt is: bBegin=(16*blockIdx.x);
----6.11----test_children is: [bBegin=(16*blockIdx.x)]
----6.12----test_annot is: null
----6.1----stmt is: bStep=(16*wB);
----6.11----test_children is: [bStep=(16*wB)]
----6.12----test_annot is: null
----6.1----stmt is: matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
----6.11----test_children is: [matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block)]
----6.12----test_annot is: null
----6.1----stmt is: #pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true ]
... Preprocessing pragma:
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
----6.1----stmt is: #pragma fcuda tloop name=CMP_5 end=false begin=true
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda tloop name=CMP_5 end=false begin=true ]
... Preprocessing pragma:
#pragma fcuda tloop name=CMP_5 end=false begin=true
----6.15----find tloop
----6.1----stmt is: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
----6.11----test_children is: [Csub_block[threadIdx.y][threadIdx.x]=0]
----6.12----test_annot is: [#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true ]
----6.13----find stmt annot
... Preprocessing pragma:
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
----6.1----stmt is: #pragma fcuda tloop name=CMP_5 end=true begin=false
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda tloop name=CMP_5 end=true begin=false ]
... Preprocessing pragma:
#pragma fcuda tloop name=CMP_5 end=true begin=false
----6.15----find tloop
----6.1----stmt is: #pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false ]
... Preprocessing pragma:
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
----6.1----stmt is: a=0;
----6.11----test_children is: [a=0]
----6.12----test_annot is: null
----6.1----stmt is: b=0;
----6.11----test_children is: [b=0]
----6.12----test_annot is: null
----6.1----stmt is: k=0;
----6.11----test_children is: [k=0]
----6.12----test_annot is: null
----6.1----stmt is: ((a=aBegin), (b=bBegin));
----6.11----test_children is: [((a=aBegin), (b=bBegin))]
----6.12----test_annot is: null
----6.1----stmt is: __shared__ DATATYPE As[16][16];
----6.11----test_children is: [__shared__ DATATYPE As[16][16]]
----6.12----test_annot is: null
----6.1----stmt is: __shared__ DATATYPE Bs[16][16];
----6.11----test_children is: [__shared__ DATATYPE Bs[16][16]]
----6.12----test_annot is: null
----6.1----stmt is:
----6.11----test_children is: []
----6.12----test_annot is: []
----6.1----stmt is:
----6.11----test_children is: []
----6.12----test_annot is: []
----6.1----stmt is: matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
----6.11----test_children is: [matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB)]
----6.12----test_annot is: null
----6.1----stmt is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true ]
... Preprocessing pragma:
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
----6.14----find transfer begin
----6.1----stmt is: #pragma fcuda tloop name=TRN_6 end=false begin=true
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda tloop name=TRN_6 end=false begin=true ]
... Preprocessing pragma:
#pragma fcuda tloop name=TRN_6 end=false begin=true
----6.15----find tloop
----6.1----stmt is: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
----6.11----test_children is: [As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)]]
----6.12----test_annot is: [#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true ]
----6.13----find stmt annot
... Preprocessing pragma:
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
----6.16----find stmt
----6.16----children size: 1
----6.160----stmtpragmas: []
----6.161----test_annot: [#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true ]
----6.162----real_stmt: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
----6.1----stmt is: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
----6.11----test_children is: [Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)]]
----6.12----test_annot is: [#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true ]
----6.13----find stmt annot
... Preprocessing pragma:
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
----6.16----find stmt
----6.16----children size: 1
----6.160----stmtpragmas: []
----6.161----test_annot: [#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true ]
----6.162----real_stmt: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
----6.1----stmt is: #pragma fcuda tloop name=TRN_6 end=true begin=false
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda tloop name=TRN_6 end=true begin=false ]
... Preprocessing pragma:
#pragma fcuda tloop name=TRN_6 end=true begin=false
----6.15----find tloop
----6.1----stmt is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false ]
... Preprocessing pragma:
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
----6.17----find transfer end
----6.18----ready to enter addTransferParameters
----transferAnnot is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
----transferStmt is: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
----real_trn is: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
----taskDecls is: [dim3 blockDim, dim3 gridDim]
----taskArgs is: [blockDim, gridDim]
----taskArgSet is: [blockDim, gridDim]
----commonArgsIndex is: [0, 1]
----6.19----enter addTransferParameters2
... Handling transfer params for
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
----6.23----1.collecting parameters----
-------GLBpntr is: [B]
-------core: [1]
-------size: [BLOCKDIM_X]
-------rdNwrt: true
-------name: TRN_6
-------type: null
-------base: []
----6.24----transferAnnot.getAnnotatable: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
----6.25--------
----annotStmt: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
----cStmt: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
----transferStmtf2: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
---------------------------------
----6.26------ptrName is: B
----6.26------ptrId is: B
----6.27------ptrDecl is: DATATYPE * B
----6.28------ptrDeclor is: * B
----6.28------ptrDeclorSpecs is: [* ]
----6.29----ptrDecl.getSpecifiers(): [DATATYPE]
----6.30--volatPtrDecl: DATATYPE * B
----6.300----taskArgSet not contain ptrId, enter if
----6.31--transferStmtf2: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
transferStmtf2: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
----6.31--real_trn: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
[findBRAM]: Annotated Statement --> #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
---Assignments--- [Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)]]
----6.32--bramId: Bs
----6.33--bramDecl: __shared__ DATATYPE Bs[16][16]
------6.331---in try block now------
------6.331---in catch block now------
------6.332---taskArgSet not contain bramId, enter if------
BRAM: Bs Dim: 2
Array access, but dimension greater than 1 Bs[threadIdx.y][threadIdx.x]
------6.34----onChipOffset is: null
------6.34----prefixOffset is: TRN_6_Bs_offset
------6.34----coeffOffset is: TRN_6_Bs_offset
------6.34----offsetDeclor is: TRN_6_Bs_offset
------6.34----offsetDeclion is: int TRN_6_Bs_offset
----addStatementBefore----index is:4
------6.341----before taskArgs: [blockDim, gridDim, B, Bs]
------6.341----before taskDecls: [dim3 blockDim, dim3 gridDim, DATATYPE * B, __shared__ DATATYPE Bs[16][16]]
------6.341----after taskArgs: [blockDim, gridDim, B, Bs, TRN_6_Bs_offset]
------6.341----after taskDecls: [dim3 blockDim, dim3 gridDim, DATATYPE * B, __shared__ DATATYPE Bs[16][16], int TRN_6_Bs_offset]
------6.35----after offset added, cStmt is: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
------6.36----after memcpyArgs added, memcpyArgs is: [(TRN_6_Bs_offset+Bs[threadIdx.y])]
Parsing ((b+(wB*threadIdx.y))+threadIdx.x)
Terms [b, (wB*threadIdx.y), threadIdx.x]
Terms [threadIdx.y]
Base expr b
c1 = wB
c2 = null
c3 = 1
c4 = null
c5 = null
------6.37----baseAddrForBurst is: b
------6.370----j is: 0
------6.371----prefix is: TRN_6_Bs_X
------6.372----coeffVar is: TRN_6_Bs_X_0
------6.373----cDeclor is: TRN_6_Bs_X_0
------6.374----cDeclion is: int TRN_6_Bs_X_0
----addStatementBefore----index is:5
------6.38----taskArgs is: [blockDim, gridDim, B, Bs, TRN_6_Bs_offset, TRN_6_Bs_X_0]
------6.38----taskDecls is: [dim3 blockDim, dim3 gridDim, DATATYPE * B, __shared__ DATATYPE Bs[16][16], int TRN_6_Bs_offset, int TRN_6_Bs_X_0]
------6.38----cStmt is: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
------6.370----j is: 1
------6.371----prefix is: TRN_6_Bs_c
------6.372----coeffVar is: TRN_6_Bs_c_1
------6.373----cDeclor is: TRN_6_Bs_c_1
------6.374----cDeclion is: int TRN_6_Bs_c_1
----addStatementBefore----index is:6
------6.38----taskArgs is: [blockDim, gridDim, B, Bs, TRN_6_Bs_offset, TRN_6_Bs_X_0, TRN_6_Bs_c_1]
------6.38----taskDecls is: [dim3 blockDim, dim3 gridDim, DATATYPE * B, __shared__ DATATYPE Bs[16][16], int TRN_6_Bs_offset, int TRN_6_Bs_X_0, int TRN_6_Bs_c_1]
------6.38----cStmt is: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
------6.39----memcpyArgs is: [(TRN_6_Bs_offset+Bs[threadIdx.y]), ((B+TRN_6_Bs_X_0)+(threadIdx.y*TRN_6_Bs_c_1))]
------6.40----after memsize, memcpyArgs is: [(TRN_6_Bs_offset+Bs[threadIdx.y]), ((B+TRN_6_Bs_X_0)+(threadIdx.y*TRN_6_Bs_c_1)), (BLOCKDIM_X*sizeof (DATATYPE))]
------6.40----memcpyCall is: memcpy((TRN_6_Bs_offset+Bs[threadIdx.y]), ((B+TRN_6_Bs_X_0)+(threadIdx.y*TRN_6_Bs_c_1)), (BLOCKDIM_X*sizeof (DATATYPE)))
------6.42----after add memcpy before real_trn: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
--------sftTaskArgSyms: [Bs[16][16], Csub_block[BLOCKDIM_Y][BLOCKDIM_X], As[16][16], guard_matrixMul_SNC_7]
--------sftTaskArgs: [guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs]
--------sftTaskDecls: [int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16]]
--------sftCommonArgsIndex: [1, 2]
------6.44----before leave addTrans, cstmt is: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
----6.1----stmt is:
----6.11----test_children is: []
----6.12----test_annot is: []
----6.1----stmt is:
----6.11----test_children is: []
----6.12----test_annot is: []
----6.1----stmt is:
----6.11----test_children is: []
----6.12----test_annot is: []
----6.1----stmt is: matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
----6.11----test_children is: [matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs)]
----6.12----test_annot is: null
----6.1----stmt is: #pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true ]
... Preprocessing pragma:
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
----6.1----stmt is: #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
----6.11----test_children is: [__syncthreads()]
----6.12----test_annot is: [#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7 ]
----6.13----find stmt annot
... Preprocessing pragma:
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
----6.1----stmt is: lp1:
----6.11----test_children is: [lp1]
----6.12----test_annot is: null
----6.1----stmt is:
----6.11----test_children is: []
----6.12----test_annot is: []
----6.1----stmt is: #pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true ]
... Preprocessing pragma:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
----6.15----find tloop
----6.1----stmt is: #pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0;
----6.11----test_children is: [k=0]
----6.12----test_annot is: [#pragma fcuda stmt tlpName=FOR_HTG_CMP_8 ]
----6.13----find stmt annot
... Preprocessing pragma:
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
----6.1----stmt is: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
----6.11----test_children is: [Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x])]
----6.12----test_annot is: [#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true ]
----6.13----find stmt annot
... Preprocessing pragma:
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
----6.1----stmt is: #pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
----6.11----test_children is: [#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);]
----6.12----test_annot is: [#pragma fcuda stmt tlpName=FOR_HTG_CMP_8 ]
----6.13----find stmt annot
... Preprocessing pragma:
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
----6.1----stmt is: #pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
----6.11----test_children is: [#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0;, k<16, ++ k, #pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}]
----6.12----test_annot is: [#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8 ]
----6.13----find stmt annot
... Preprocessing pragma:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
----6.1----stmt is: #pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false ]
... Preprocessing pragma:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
----6.15----find tloop
----6.1----stmt is:
----6.11----test_children is: []
----6.12----test_annot is: []
----6.1----stmt is: {
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
----6.11----test_children is: [lp1:, , #pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true , #pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}, #pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false , ]
----6.12----test_annot is: null
----6.1----stmt is: #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
----6.11----test_children is: [__syncthreads()]
----6.12----test_annot is: [#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9 ]
----6.13----find stmt annot
... Preprocessing pragma:
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
----6.1----stmt is: #pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false ]
... Preprocessing pragma:
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
----6.1----stmt is:
----6.11----test_children is: []
----6.12----test_annot is: []
----6.1----stmt is: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
----6.11----test_children is: [__shared__ DATATYPE As[16][16];, __shared__ DATATYPE Bs[16][16];, , , TRN_6_Bs_offset=0;, TRN_6_Bs_X_0=b;, TRN_6_Bs_c_1=wB;, matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);, #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true , #pragma fcuda tloop name=TRN_6 end=false begin=true , #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, #pragma fcuda tloop name=TRN_6 end=true begin=false , #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false , , , , matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);, #pragma fcuda compute cores=1 name=SNC_7 end=false begin=true , #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();, {
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}, #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();, #pragma fcuda compute cores=1 name=SNC_7 end=true begin=false , ]
----6.12----test_annot is: null
----6.1----stmt is: #pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
----6.11----test_children is: [((a=aBegin), (b=bBegin));, a<=aEnd, ((a+=aStep), (b+=bStep)), {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}]
----6.12----test_annot is: [#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6 ]
----6.13----find stmt annot
... Preprocessing pragma:
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
----6.1----stmt is: c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
----6.11----test_children is: [c=(((wB*16)*blockIdx.y)+(16*blockIdx.x))]
----6.12----test_annot is: null
----6.1----stmt is:
----6.11----test_children is: []
----6.12----test_annot is: []
----6.1----stmt is:
----6.11----test_children is: []
----6.12----test_annot is: []
----6.1----stmt is: matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
----6.11----test_children is: [matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB)]
----6.12----test_annot is: null
----6.1----stmt is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true ]
... Preprocessing pragma:
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
----6.14----find transfer begin
----6.1----stmt is: #pragma fcuda tloop name=TRN_10 end=false begin=true
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda tloop name=TRN_10 end=false begin=true ]
... Preprocessing pragma:
#pragma fcuda tloop name=TRN_10 end=false begin=true
----6.15----find tloop
----6.1----stmt is: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
----6.11----test_children is: [C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x]]
----6.12----test_annot is: [#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true ]
----6.13----find stmt annot
... Preprocessing pragma:
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
----6.16----find stmt
----6.16----children size: 1
----6.160----stmtpragmas: []
----6.161----test_annot: [#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true ]
----6.162----real_stmt: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
----6.1----stmt is: #pragma fcuda tloop name=TRN_10 end=true begin=false
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda tloop name=TRN_10 end=true begin=false ]
... Preprocessing pragma:
#pragma fcuda tloop name=TRN_10 end=true begin=false
----6.15----find tloop
----6.1----stmt is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false ]
... Preprocessing pragma:
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
----6.17----find transfer end
----6.18----ready to enter addTransferParameters
----transferAnnot is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
----transferStmt is: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
----real_trn is: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
----taskDecls is: [dim3 blockDim, dim3 gridDim]
----taskArgs is: [blockDim, gridDim]
----taskArgSet is: [blockDim, gridDim]
----commonArgsIndex is: [0, 1]
----6.19----enter addTransferParameters2
... Handling transfer params for
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
----6.23----1.collecting parameters----
-------GLBpntr is: [C]
-------core: [1]
-------size: [BLOCKDIM_X]
-------rdNwrt: false
-------name: TRN_10
-------type: null
-------base: []
----6.24----transferAnnot.getAnnotatable: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
----6.25--------
----annotStmt: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
----cStmt: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
----transferStmtf2: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
---------------------------------
----6.26------ptrName is: C
----6.26------ptrId is: C
----6.27------ptrDecl is: DATATYPE * C
----6.28------ptrDeclor is: * C
----6.28------ptrDeclorSpecs is: [* ]
----6.29----ptrDecl.getSpecifiers(): [DATATYPE]
----6.30--volatPtrDecl: DATATYPE * C
----6.300----taskArgSet not contain ptrId, enter if
----6.31--transferStmtf2: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
transferStmtf2: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
----6.31--real_trn: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
[findBRAM]: Annotated Statement --> #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
---Assignments--- [C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x]]
----6.32--bramId: Csub_block
----6.33--bramDecl: __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
------6.331---in try block now------
------6.331---in catch block now------
------6.332---taskArgSet not contain bramId, enter if------
BRAM: Csub_block Dim: 2
Array access, but dimension greater than 1 Csub_block[threadIdx.y][threadIdx.x]
------6.34----onChipOffset is: null
------6.34----prefixOffset is: TRN_10_Csub_block_offset
------6.34----coeffOffset is: TRN_10_Csub_block_offset
------6.34----offsetDeclor is: TRN_10_Csub_block_offset
------6.34----offsetDeclion is: int TRN_10_Csub_block_offset
----addStatementBefore----index is:44
------6.341----before taskArgs: [blockDim, gridDim, C, Csub_block]
------6.341----before taskDecls: [dim3 blockDim, dim3 gridDim, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X]]
------6.341----after taskArgs: [blockDim, gridDim, C, Csub_block, TRN_10_Csub_block_offset]
------6.341----after taskDecls: [dim3 blockDim, dim3 gridDim, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int TRN_10_Csub_block_offset]
------6.35----after offset added, cStmt is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
------6.36----after memcpyArgs added, memcpyArgs is: [(TRN_10_Csub_block_offset+Csub_block[threadIdx.y])]
Parsing ((c+(wB*threadIdx.y))+threadIdx.x)
Terms [c, (wB*threadIdx.y), threadIdx.x]
Terms [threadIdx.y]
Base expr c
c1 = wB
c2 = null
c3 = 1
c4 = null
c5 = null
------6.37----baseAddrForBurst is: c
------6.370----j is: 0
------6.371----prefix is: TRN_10_Csub_block_X
------6.372----coeffVar is: TRN_10_Csub_block_X_0
------6.373----cDeclor is: TRN_10_Csub_block_X_0
------6.374----cDeclion is: int TRN_10_Csub_block_X_0
----addStatementBefore----index is:46
------6.38----taskArgs is: [blockDim, gridDim, C, Csub_block, TRN_10_Csub_block_offset, TRN_10_Csub_block_X_0]
------6.38----taskDecls is: [dim3 blockDim, dim3 gridDim, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int TRN_10_Csub_block_offset, int TRN_10_Csub_block_X_0]
------6.38----cStmt is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
------6.370----j is: 1
------6.371----prefix is: TRN_10_Csub_block_c
------6.372----coeffVar is: TRN_10_Csub_block_c_1
------6.373----cDeclor is: TRN_10_Csub_block_c_1
------6.374----cDeclion is: int TRN_10_Csub_block_c_1
----addStatementBefore----index is:48
------6.38----taskArgs is: [blockDim, gridDim, C, Csub_block, TRN_10_Csub_block_offset, TRN_10_Csub_block_X_0, TRN_10_Csub_block_c_1]
------6.38----taskDecls is: [dim3 blockDim, dim3 gridDim, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int TRN_10_Csub_block_offset, int TRN_10_Csub_block_X_0, int TRN_10_Csub_block_c_1]
------6.38----cStmt is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
------6.39----memcpyArgs is: [(TRN_10_Csub_block_offset+Csub_block[threadIdx.y]), ((C+TRN_10_Csub_block_X_0)+(threadIdx.y*TRN_10_Csub_block_c_1))]
------6.40----after memsize, memcpyArgs is: [((C+TRN_10_Csub_block_X_0)+(threadIdx.y*TRN_10_Csub_block_c_1)), (TRN_10_Csub_block_offset+Csub_block[threadIdx.y]), (BLOCKDIM_X*sizeof (DATATYPE))]
------6.40----memcpyCall is: memcpy(((C+TRN_10_Csub_block_X_0)+(threadIdx.y*TRN_10_Csub_block_c_1)), (TRN_10_Csub_block_offset+Csub_block[threadIdx.y]), (BLOCKDIM_X*sizeof (DATATYPE)))
------6.42----after add memcpy before real_trn: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
--------sftTaskArgSyms: [Bs[16][16], Csub_block[BLOCKDIM_Y][BLOCKDIM_X], As[16][16], guard_matrixMul_SNC_7]
--------sftTaskArgs: [guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs]
--------sftTaskDecls: [int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16]]
--------sftCommonArgsIndex: [1, 2]
------6.44----before leave addTrans, cstmt is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
----6.1----stmt is:
----6.11----test_children is: []
----6.12----test_annot is: []
----6.1----stmt is:
----6.11----test_children is: []
----6.12----test_annot is: []
----6.1----stmt is: return ;
----6.11----test_children is: []
----6.12----test_annot is: null
----6.1----stmt is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
----6.11----test_children is: [int aBegin;, int aEnd;, int aStep;, int bBegin;, int bStep;, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];, int a;, int b;, int k;, int c;, dim3 blockIdx;, int guard_matrixMul_CMP_5;, int guard_matrixMul_TRN_10;, int guard_matrixMul_TRN_6;, int guard_matrixMul_SNC_7;, int TRN_6_Bs_offset;, int TRN_6_Bs_X_0;, int TRN_6_Bs_c_1;, int TRN_10_Csub_block_offset;, int TRN_10_Csub_block_X_0;, int TRN_10_Csub_block_c_1;, guard_matrixMul_SNC_7=1;, guard_matrixMul_TRN_6=1;, guard_matrixMul_TRN_10=1;, guard_matrixMul_CMP_5=1;, #pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , aBegin=((wA*16)*blockIdx.y);, aEnd=((aBegin+wA)-1);, aStep=16;, bBegin=(16*blockIdx.x);, bStep=(16*wB);, matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);, #pragma fcuda compute cores=1 name=CMP_5 end=false begin=true , #pragma fcuda tloop name=CMP_5 end=false begin=true , #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;, #pragma fcuda tloop name=CMP_5 end=true begin=false , #pragma fcuda compute cores=1 name=CMP_5 end=true begin=false , a=0;, b=0;, k=0;, #pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}, c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));, , , TRN_10_Csub_block_offset=0;, TRN_10_Csub_block_X_0=c;, TRN_10_Csub_block_c_1=wB;, matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);, #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true , #pragma fcuda tloop name=TRN_10 end=false begin=true , #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];, #pragma fcuda tloop name=TRN_10 end=true begin=false , #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false , , , return ;]
----6.12----test_annot is: null
-----6.9----generateMemcpy ends-----
-----6.9----proc is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
-----start to fillDecomposedTasks-----
Moving task statement: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
Moving task statement: #pragma fcuda tloop name=TRN_6 end=false begin=true
Moving task statement: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Moving task statement: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
Moving task statement: #pragma fcuda tloop name=TRN_6 end=true begin=false
Moving task statement: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
Moving task statement: #pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
Moving task statement: #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
Moving task statement: {
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
Moving task statement: #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
Moving task statement: #pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
Moving task statement: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
Moving task statement: #pragma fcuda tloop name=TRN_10 end=false begin=true
Moving task statement: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
Moving task statement: #pragma fcuda tloop name=TRN_10 end=true begin=false
Moving task statement: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
Moving task statement: #pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
Moving task statement: #pragma fcuda tloop name=CMP_5 end=false begin=true
Moving task statement: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
Moving task statement: #pragma fcuda tloop name=CMP_5 end=true begin=false
Moving task statement: #pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
-----SFT2 finished fillDecomposedTasks();-----
-----proc after filldecomposedTasks-----
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
-----KernUnit after filldecomposedTasks-----
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
if (guard_matrixMul_CMP_5)
{
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
}
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
if (guard_matrixMul_TRN_10)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
}
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
if (guard_matrixMul_TRN_6)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
}
}
#pragma fcuda compute name=SNC_7 end=false cores=1 begin=true
void matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16])
{
if (guard_matrixMul_SNC_7)
{
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
-----start to shiftDeclarations-----
----FcudaGlobalData2.java start----
-----cetus application IPChainAnalysis start----
-----cetus application IPChainAnalysis 1----
------IPChainAnalysis.java PerformAliasAnalysis start----
-------IPPointsToAnalysis.java IPPointsToAnalysis start super----
--------IPAnalysis.java IPAnalysis start 1----
--------IPAnalysis.java IPAnalysis start 2----
[NormalizeReturn] begin
*AP* procedure: __syncthreads
*AP* is_void=true
*AP* ret_expr= null
*AP* procedure: matrixMul_CMP_5
*AP* is_void=true
*AP* ret_expr= null
*AP* procedure: matrixMul_TRN_10
*AP* is_void=true
*AP* ret_expr= null
*AP* procedure: matrixMul_TRN_6
*AP* is_void=true
*AP* ret_expr= null
*AP* procedure: matrixMul_SNC_7
*AP* is_void=true
*AP* ret_expr= null
*AP* procedure: matrixMul
*AP* is_void=true
*AP* ret_expr= null
[NormalizeReturn] end in 0.00 seconds
[WARNING] Undeclared symbol k from k=0
[WARNING] Undeclared symbol k from k<16
[WARNING] Undeclared symbol k from ++ k
[WARNING] Undeclared symbol k from As[threadIdx.y][k]
[WARNING] Undeclared symbol k from Bs[k][threadIdx.x]
[LinkSymbol] 124 updates in 0.00 seconds
--------IPAnalysis.java IPAnalysis start 3 enter IPA Graph----
---------IPAGraph.java finish super()----
---------IPAGraph.java start new Arraylist <IPANode>----
---------IPAGraph.java enter buildgraph<prog>
---------IPAGraph.java enter identifyCloneableNodes
---------IPAGraph.java enter buildTopOrder
---------IPAGraph.java finish new IPA Graph
[IPA] Stops due to no flow entry
-------IPPointsToAnalysis.java IPPointsToAnalysis finished super----
-------IPPointsToAnalysis.java IPPointsToAnalysis finished 2----
-------IPPointsToAnalysis.java IPPointsToAnalysis end----
------PerformAliasAnalysis 1----
0000IPpointsToAnalysis
[IPA:PointsTo] Stops due to no flow entry
------PerformAliasAnalysis end----
-----cetus application IPChainAnalysis 2----
-----cetus application IPChainAnalysis 3----
----------------------------------------------
----start to generate CF graph with thread----
----------------------------------------------
proc now is: void __syncthreads()
{
;
return ;
}
=====
proc now is: #pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
if (guard_matrixMul_CMP_5)
{
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
}
return ;
}
=====
proc now is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
if (guard_matrixMul_TRN_10)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
}
return ;
}
=====
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
proc now is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
if (guard_matrixMul_TRN_6)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
}
return ;
}
=====
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
proc now is: #pragma fcuda compute name=SNC_7 end=false cores=1 begin=true
void matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16])
{
if (guard_matrixMul_SNC_7)
{
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
return ;
}
=====
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
=====
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
-----cetus application IPChainAnalysis 4----
--------------------------------------------------------
----start to generate generate Program Summary Graph----
--------------------------------------------------------
############### PSG Summary Detail[__syncthreads()] #################
## Entry Node ##
# Ref Info (psg_entry_ref, use)
psg_entry_ref is null
# Global Info (psg_entry_global, UseOutSet)
psg_entry_global is null
## Exit Node ##
# Ref Info (psg_exit_ref, def)
psg_exit_ref is null
# Global Info (psg_exit_global, DefInSet)
psg_exit_global is null
############### PSG Summary Detail[matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])] #################
## Entry Node ##
# Ref Info (psg_entry_ref, use)
# Global Info (psg_entry_global, UseOutSet)
## Exit Node ##
# Ref Info (psg_exit_ref, def)
-DEF: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-DEF: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
# Global Info (psg_exit_global, DefInSet)
############### PSG Summary Detail[matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)] #################
## Entry Node ##
# Ref Info (psg_entry_ref, use)
-USE: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
# Global Info (psg_entry_global, UseOutSet)
## Exit Node ##
# Ref Info (psg_exit_ref, def)
-DEF: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-DEF: C, NODE: * C
-DEF: C, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
# Global Info (psg_exit_global, DefInSet)
############### PSG Summary Detail[matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)] #################
## Entry Node ##
# Ref Info (psg_entry_ref, use)
# Global Info (psg_entry_global, UseOutSet)
## Exit Node ##
# Ref Info (psg_exit_ref, def)
-DEF: B, NODE: * B
-DEF: As, NODE: As[16][16]
-DEF: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-DEF: A, NODE: * A
-DEF: Bs, NODE: Bs[16][16]
-DEF: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
# Global Info (psg_exit_global, DefInSet)
############### PSG Summary Detail[matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16])] #################
## Entry Node ##
# Ref Info (psg_entry_ref, use)
-USE: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-USE: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-USE: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
# Global Info (psg_entry_global, UseOutSet)
## Call Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
# Ref Info (psg_call_ref, def)
psg_call_ref is null
# Global Info (psg_call_global, DefInSet)
psg_call_global is null
## Return Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
# Ref Info (psg_return_ref, use)
psg_return_ref is null
# Global Info (psg_return_global, UseOutSet)
psg_return_global is null
## Call Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
# Ref Info (psg_call_ref, def)
psg_call_ref is null
# Global Info (psg_call_global, DefInSet)
psg_call_global is null
## Return Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
# Ref Info (psg_return_ref, use)
psg_return_ref is null
# Global Info (psg_return_global, UseOutSet)
psg_return_global is null
## Exit Node ##
# Ref Info (psg_exit_ref, def)
-DEF: Bs, NODE: Bs[16][16]
-DEF: As, NODE: As[16][16]
-DEF: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-DEF: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
# Global Info (psg_exit_global, DefInSet)
############### PSG Summary Detail[matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)] #################
## Entry Node ##
# Ref Info (psg_entry_ref, use)
-USE: B, NODE: matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
-USE: C, NODE: matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
-USE: A, NODE: matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
# Global Info (psg_entry_global, UseOutSet)
## Call Node ## IR: matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
# Ref Info (psg_call_ref, def)
# Global Info (psg_call_global, DefInSet)
## Return Node ## IR: matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
# Ref Info (psg_return_ref, use)
# Global Info (psg_return_global, UseOutSet)
## Call Node ## IR: matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
# Ref Info (psg_call_ref, def)
# Global Info (psg_call_global, DefInSet)
## Return Node ## IR: matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
# Ref Info (psg_return_ref, use)
# Global Info (psg_return_global, UseOutSet)
## Call Node ## IR: matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
# Ref Info (psg_call_ref, def)
# Global Info (psg_call_global, DefInSet)
## Return Node ## IR: matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
# Ref Info (psg_return_ref, use)
# Global Info (psg_return_global, UseOutSet)
## Call Node ## IR: matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
# Ref Info (psg_call_ref, def)
# Global Info (psg_call_global, DefInSet)
## Return Node ## IR: matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
# Ref Info (psg_return_ref, use)
# Global Info (psg_return_global, UseOutSet)
## Exit Node ##
# Ref Info (psg_exit_ref, def)
-DEF: B, NODE: * B
-DEF: B, NODE: matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
-DEF: C, NODE: matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
-DEF: A, NODE: * A
-DEF: A, NODE: matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
# Global Info (psg_exit_global, DefInSet)
############### PSG Propagated Detail[__syncthreads()] #################
## Entry Node ##
# Ref Info (psg_entry_ref)
psg_entry_ref is null
# Global Info (psg_entry_global)
psg_entry_global is null
## Exit Node ##
# Ref Info (psg_exit_ref)
psg_exit_ref is null
# Global Info (psg_exit_global)
psg_exit_global is null
############### PSG Propagated Detail[matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])] #################
## Entry Node ##
# Ref Info (psg_entry_ref)
# parameter: Csub_block
-INuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-OUTuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
# Global Info (psg_entry_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Exit Node ##
# Ref Info (psg_exit_ref)
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-INuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-OUTuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
# Global Info (psg_exit_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
############### PSG Propagated Detail[matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)] #################
## Entry Node ##
# Ref Info (psg_entry_ref)
# parameter: Csub_block
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
# parameter: C
# Global Info (psg_entry_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Exit Node ##
# Ref Info (psg_exit_ref)
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTdef: C, NODE: * C
-OUTdef: C, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
# Global Info (psg_exit_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
############### PSG Propagated Detail[matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)] #################
## Entry Node ##
# Ref Info (psg_entry_ref)
# parameter: Bs
-INdef: Bs, NODE: Bs[16][16]
-INdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-INdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-OUTdef: Bs, NODE: Bs[16][16]
-INuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
# parameter: As
-INdef: As, NODE: As[16][16]
-INdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-INdef: As, NODE: As[16][16]
-OUTdef: As, NODE: As[16][16]
-OUTdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-OUTdef: As, NODE: As[16][16]
-INuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
# parameter: A
-INdef: A, NODE: * A
-OUTdef: A, NODE: * A
# parameter: B
-INdef: B, NODE: * B
-OUTdef: B, NODE: * B
# Global Info (psg_entry_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Exit Node ##
# Ref Info (psg_exit_ref)
-INdef: B, NODE: * B
-OUTdef: B, NODE: * B
-INdef: As, NODE: As[16][16]
-INdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-INdef: As, NODE: As[16][16]
-OUTdef: As, NODE: As[16][16]
-OUTdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-OUTdef: As, NODE: As[16][16]
-INuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INdef: A, NODE: * A
-OUTdef: A, NODE: * A
-INdef: Bs, NODE: Bs[16][16]
-INdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-INdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-OUTdef: Bs, NODE: Bs[16][16]
-INuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
# Global Info (psg_exit_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
############### PSG Propagated Detail[matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16])] #################
## Entry Node ##
# Ref Info (psg_entry_ref)
# parameter: As
-INdef: As, NODE: As[16][16]
-INdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-INdef: As, NODE: As[16][16]
-OUTdef: As, NODE: As[16][16]
-OUTdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-OUTdef: As, NODE: As[16][16]
-INuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
# parameter: Bs
-INdef: Bs, NODE: Bs[16][16]
-INdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-INdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-OUTdef: Bs, NODE: Bs[16][16]
-INuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
# parameter: Csub_block
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-OUTuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
# Global Info (psg_entry_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Call Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
# Ref Info (psg_call_ref)
psg_call_ref is null
# Global Info (psg_call_global)
psg_call_global is null
## Return Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
# Ref Info (psg_return_ref)
psg_return_ref is null
# Global Info (psg_return_global)
psg_return_global is null
## Call Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
# Ref Info (psg_call_ref)
psg_call_ref is null
# Global Info (psg_call_global)
psg_call_global is null
## Return Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
# Ref Info (psg_return_ref)
psg_return_ref is null
# Global Info (psg_return_global)
psg_return_global is null
## Exit Node ##
# Ref Info (psg_exit_ref)
-INdef: Bs, NODE: Bs[16][16]
-INdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-INdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-INuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INdef: As, NODE: As[16][16]
-INdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-INdef: As, NODE: As[16][16]
-OUTdef: As, NODE: As[16][16]
-OUTdef: As, NODE: As[16][16]
-OUTdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-INuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-INuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-OUTuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
# Global Info (psg_exit_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
############### PSG Propagated Detail[matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)] #################
## Entry Node ##
# Ref Info (psg_entry_ref)
# parameter: B
-INuse: B, NODE: matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
# parameter: C
-INuse: C, NODE: matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
# parameter: A
-INuse: A, NODE: matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
# Global Info (psg_entry_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Call Node ## IR: matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
# Ref Info (psg_call_ref)
-INuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-OUTuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
# Global Info (psg_call_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Return Node ## IR: matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
# Ref Info (psg_return_ref)
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-INuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-OUTuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
# Global Info (psg_return_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Call Node ## IR: matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
# Ref Info (psg_call_ref)
-INdef: Bs, NODE: Bs[16][16]
-INdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-INdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-OUTdef: Bs, NODE: Bs[16][16]
-INuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INdef: A, NODE: * A
-OUTdef: A, NODE: * A
-INdef: As, NODE: As[16][16]
-INdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-INdef: As, NODE: As[16][16]
-OUTdef: As, NODE: As[16][16]
-OUTdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-OUTdef: As, NODE: As[16][16]
-INuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INdef: B, NODE: * B
-OUTdef: B, NODE: * B
# Global Info (psg_call_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Return Node ## IR: matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
# Ref Info (psg_return_ref)
-INdef: B, NODE: * B
-OUTdef: B, NODE: * B
-INdef: A, NODE: * A
-OUTdef: A, NODE: * A
-INdef: As, NODE: As[16][16]
-INdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-INdef: As, NODE: As[16][16]
-OUTdef: As, NODE: As[16][16]
-OUTdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-OUTdef: As, NODE: As[16][16]
-INuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INdef: Bs, NODE: Bs[16][16]
-INdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-INdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-OUTdef: Bs, NODE: Bs[16][16]
-INuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
# Global Info (psg_return_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Call Node ## IR: matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
# Ref Info (psg_call_ref)
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-OUTuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-INdef: As, NODE: As[16][16]
-INdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-INdef: As, NODE: As[16][16]
-OUTdef: As, NODE: As[16][16]
-OUTdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-OUTdef: As, NODE: As[16][16]
-INuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INdef: Bs, NODE: Bs[16][16]
-INdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-INdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-OUTdef: Bs, NODE: Bs[16][16]
-INuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
# Global Info (psg_call_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Return Node ## IR: matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
# Ref Info (psg_return_ref)
-INdef: As, NODE: As[16][16]
-INdef: As, NODE: As[16][16]
-INdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-OUTdef: As, NODE: As[16][16]
-OUTdef: As, NODE: As[16][16]
-OUTdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-INuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-INuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-OUTuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-INdef: Bs, NODE: Bs[16][16]
-INdef: Bs, NODE: Bs[16][16]
-INdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-OUTdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-INuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
# Global Info (psg_return_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Call Node ## IR: matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
# Ref Info (psg_call_ref)
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-OUTuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
# Global Info (psg_call_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Return Node ## IR: matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
# Ref Info (psg_return_ref)
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INdef: C, NODE: * C
-INdef: C, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-OUTdef: C, NODE: * C
-OUTdef: C, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
# Global Info (psg_return_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Exit Node ##
# Ref Info (psg_exit_ref)
-INdef: B, NODE: * B
-OUTdef: B, NODE: * B
-OUTdef: B, NODE: matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
-OUTdef: B, NODE: * B
-INdef: C, NODE: * C
-INdef: C, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-OUTdef: C, NODE: matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
-OUTdef: C, NODE: * C
-OUTdef: C, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-INdef: A, NODE: * A
-OUTdef: A, NODE: * A
-OUTdef: A, NODE: matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
-OUTdef: A, NODE: * A
# Global Info (psg_exit_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
-----cetus application IPChainAnalysis 5----
-----cetus application IPChainAnalysis 6----
-----cetus application IPChainAnalysis 7----
-----cetus application IPChainAnalysis end----
----FcudaGlobalData2.java end----
Found def k with IR: k=0;
isExist? true
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
-----proc after shiftDeclarations();-----
-----kernelTransformPass, finish transformProcedure-----, proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
[SplitFcudaTasks2-FCUDA] end in 0.29 seconds
[LinkSymbol] 129 updates in 0.00 seconds
*** After SplitFcudaTasks2 ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
if (guard_matrixMul_CMP_5)
{
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
if (guard_matrixMul_TRN_10)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
if (guard_matrixMul_TRN_6)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
}
return ;
}
#pragma fcuda compute name=SNC_7 end=false cores=1 begin=true
void matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16])
{
if (guard_matrixMul_SNC_7)
{
int k;
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
return ;
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
===========================================
[CleanKernelDecls-FCUDA] begin
[CleanKernelDecls-FCUDA] examining procedure matrixMul
cur_level:0
Defs+Uses:[guard_matrixMul_SNC_7]
Defs+Uses:[guard_matrixMul_TRN_6]
Defs+Uses:[guard_matrixMul_TRN_10]
Defs+Uses:[guard_matrixMul_CMP_5]
Defs+Uses:[]
Defs+Uses:[]
Defs+Uses:[]
Defs+Uses:[aBegin, blockIdx, blockIdx.y, wA]
Defs+Uses:[aBegin, aEnd, wA]
Defs+Uses:[aStep]
Defs+Uses:[bBegin, blockIdx, blockIdx.x]
Defs+Uses:[bStep, wB]
Defs+Uses:[Csub_block, blockDim, blockIdx, gridDim, guard_matrixMul_CMP_5, matrixMul_CMP_5]
Defs+Uses:[a]
Defs+Uses:[b]
Defs+Uses:[k]
cur_level:1
Defs+Uses:[]
Defs+Uses:[]
Defs+Uses:[TRN_6_Bs_offset]
Defs+Uses:[TRN_6_Bs_X_0, b]
Defs+Uses:[TRN_6_Bs_c_1, wB]
Defs+Uses:[A, As, B, Bs, a, b, blockDim, blockIdx, gridDim, guard_matrixMul_TRN_6, matrixMul_TRN_6, wA, wB]
Defs+Uses:[]
Defs+Uses:[]
Defs+Uses:[]
Defs+Uses:[As, Bs, Csub_block, blockDim, blockIdx, gridDim, guard_matrixMul_SNC_7, matrixMul_SNC_7]
Defs+Uses:[]
cur_level:1
var2freqMap{As=2, Bs=2}
funcCallParams[A, As, B, Bs, Csub_block, a, b, blockDim, blockIdx, gridDim, guard_matrixMul_CMP_5, guard_matrixMul_SNC_7, guard_matrixMul_TRN_6, wA, wB]
Defs+Uses:[blockIdx, blockIdx.x, blockIdx.y, c, wB]
Defs+Uses:[]
Defs+Uses:[]
Defs+Uses:[TRN_10_Csub_block_offset]
Defs+Uses:[TRN_10_Csub_block_X_0, c]
Defs+Uses:[TRN_10_Csub_block_c_1, wB]
Defs+Uses:[C, Csub_block, blockDim, blockIdx, c, gridDim, guard_matrixMul_TRN_10, matrixMul_TRN_10, wB]
Defs+Uses:[]
Defs+Uses:[]
Defs+Uses:[]
cur_level:0
var2freqMap{Csub_block=3, TRN_10_Csub_block_X_0=1, TRN_10_Csub_block_c_1=1, TRN_10_Csub_block_offset=1, TRN_6_Bs_X_0=1, TRN_6_Bs_c_1=1, TRN_6_Bs_offset=1, a=5, aBegin=3, aEnd=2, aStep=2, b=5, bBegin=2, bStep=2, c=3, guard_matrixMul_CMP_5=2, guard_matrixMul_SNC_7=2, guard_matrixMul_TRN_10=2, guard_matrixMul_TRN_6=2, k=1}
funcCallParams[A, As, B, Bs, C, Csub_block, a, b, blockDim, blockIdx, c, gridDim, guard_matrixMul_CMP_5, guard_matrixMul_SNC_7, guard_matrixMul_TRN_10, guard_matrixMul_TRN_6, wA, wB]
-----kernelTransformPass, finish transformProcedure-----, proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
[CleanKernelDecls-FCUDA] end in 0.01 seconds
[LinkSymbol] 129 updates in 0.00 seconds
*** After CleanKernelDecls ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
if (guard_matrixMul_CMP_5)
{
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
if (guard_matrixMul_TRN_10)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
if (guard_matrixMul_TRN_6)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
}
return ;
}
#pragma fcuda compute name=SNC_7 end=false cores=1 begin=true
void matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16])
{
if (guard_matrixMul_SNC_7)
{
int k;
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
return ;
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
===========================================
[SerializeThreads2-MCUDA] begin
[SerializeThreads2-MCUDA] examining procedure matrixMul
----ST2-enter ST2 and if (Driver.getOptionValue(Fcuda) != null)
-----ST2-List<Procedure> tskLst = FCUDAutils.getTaskMapping(proc.getSymbolName());
------ST2-if(tskLst != null)
-------ST2-for( Procedure task : tskLst ), task is: #pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
if (guard_matrixMul_CMP_5)
{
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
}
return ;
}
------InsertTLoops flag 1------
------InsertTLoops flag 2------
------InsertTLoops flag 2------
------InsertTLoops flag 3------
------InsertTLoops flag 2------
------InsertTLoops flag 4------
------InsertTLoops flag 5------
------InsertTLoops flag 6------
------InsertTLoops flag 6.1------
------InsertTLoops flag 6.2------
------InsertTLoops flag 6.2------tloopstmts.get(idx) is: #pragma fcuda tloop name=CMP_5 end=false begin=true
------InsertTLoops flag 6.3------
------InsertTLoops flag 6.1------
------InsertTLoops flag 6.2------
------InsertTLoops flag 6.2------tloopstmts.get(idx) is: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
------InsertTLoops flag 6.3------
------InsertTLoops flag 6.4------
------InsertTLoops flag 6.5------
------InsertTLoops flag 7------
------InsertTLoops flag 2------
-------ST2-for( Procedure task : tskLst ), finish current insertTLoops
-------ST2-for( Procedure task : tskLst ), task is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
if (guard_matrixMul_TRN_10)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
}
return ;
}
------InsertTLoops flag 1------
------InsertTLoops flag 2------
------InsertTLoops flag 2------
------InsertTLoops flag 3------
------InsertTLoops flag 2------
------InsertTLoops flag 4------
------InsertTLoops flag 5------
------InsertTLoops flag 6------
------InsertTLoops flag 6.1------
------InsertTLoops flag 6.2------
------InsertTLoops flag 6.2------tloopstmts.get(idx) is: #pragma fcuda tloop name=TRN_10 end=false begin=true
------InsertTLoops flag 6.3------
------InsertTLoops flag 6.1------
------InsertTLoops flag 6.2------
------InsertTLoops flag 6.2------tloopstmts.get(idx) is: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
------InsertTLoops flag 6.3------
------InsertTLoops flag 6.4------
------InsertTLoops flag 6.5------
------InsertTLoops flag 7------
------InsertTLoops flag 2------
-------ST2-for( Procedure task : tskLst ), finish current insertTLoops
-------ST2-for( Procedure task : tskLst ), task is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
if (guard_matrixMul_TRN_6)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
}
return ;
}
------InsertTLoops flag 1------
------InsertTLoops flag 2------
------InsertTLoops flag 2------
------InsertTLoops flag 3------
------InsertTLoops flag 2------
------InsertTLoops flag 4------
------InsertTLoops flag 5------
------InsertTLoops flag 6------
------InsertTLoops flag 6.1------
------InsertTLoops flag 6.2------
------InsertTLoops flag 6.2------tloopstmts.get(idx) is: #pragma fcuda tloop name=TRN_6 end=false begin=true
------InsertTLoops flag 6.3------
------InsertTLoops flag 6.1------
------InsertTLoops flag 6.2------
------InsertTLoops flag 6.2------tloopstmts.get(idx) is: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
------InsertTLoops flag 6.3------
------InsertTLoops flag 6.1------
------InsertTLoops flag 6.2------
------InsertTLoops flag 6.2------tloopstmts.get(idx) is: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
------InsertTLoops flag 6.3------
------InsertTLoops flag 6.4------
------InsertTLoops flag 6.5------
------InsertTLoops flag 7------
------InsertTLoops flag 2------
-------ST2-for( Procedure task : tskLst ), finish current insertTLoops
-------ST2-for( Procedure task : tskLst ), task is: #pragma fcuda compute name=SNC_7 end=false cores=1 begin=true
void matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16])
{
if (guard_matrixMul_SNC_7)
{
int k;
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
return ;
}
------InsertTLoops flag 1------
------InsertTLoops flag 2------
------InsertTLoops flag 2------
------InsertTLoops flag 2------
------InsertTLoops flag 2------
------InsertTLoops flag 3------
------InsertTLoops flag 2------
------InsertTLoops flag 4------
------InsertTLoops flag 5------
------InsertTLoops flag 6------
------InsertTLoops flag 6.1------
------InsertTLoops flag 6.2------
------InsertTLoops flag 6.2------tloopstmts.get(idx) is: #pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
------InsertTLoops flag 6.3------
------InsertTLoops flag 6.1------
------InsertTLoops flag 6.2------
------InsertTLoops flag 6.2------tloopstmts.get(idx) is: #pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
------InsertTLoops flag 6.3------
------InsertTLoops flag 6.4------
------InsertTLoops flag 6.5------
------InsertTLoops flag 7------
------InsertTLoops flag 2------
-------ST2-for( Procedure task : tskLst ), finish current insertTLoops
-----kernelTransformPass, finish transformProcedure-----, proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
[SerializeThreads2-MCUDA] end in 0.01 seconds
[LinkSymbol] 129 updates in 0.00 seconds
*** After SerializeThreads2 ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_CMP_5)
{
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
}
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_10)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_6)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
}
return ;
}
#pragma fcuda compute name=SNC_7 end=false cores=1 begin=true
void matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_SNC_7)
{
int k;
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
return ;
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
===========================================
[CleanThreadLoops2-FCUDA] begin
[CleanThreadLoops2-FCUDA] examining procedure matrixMul
mVar2Var:
{threadIdx=[], guard_matrixMul_CMP_5=[], blockDim=[], Csub_block=[threadIdx]}
TLP Thread-Indep Stmts: []
mVar2Var:
{threadIdx=[], C=[threadIdx, c, wB], c=[], guard_matrixMul_TRN_10=[], blockDim=[], wB=[], Csub_block=[]}
TLP Thread-Indep Stmts: []
mVar2Var:
{guard_matrixMul_TRN_6=[], A=[], a=[], Bs=[threadIdx, b, wB], threadIdx=[], As=[a, threadIdx, wA], B=[], b=[], blockDim=[], wA=[], wB=[]}
TLP Thread-Indep Stmts: []
mVar2Var:
{Bs=[], threadIdx=[], As=[], blockDim=[], guard_matrixMul_SNC_7=[], k=[k], Csub_block=[threadIdx, k]}
TLP Thread-Indep Stmts: []
-----kernelTransformPass, finish transformProcedure-----, proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
[CleanThreadLoops2-FCUDA] end in 0.02 seconds
[LinkSymbol] 129 updates in 0.00 seconds
*** After CleanThreadLoops2 ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_CMP_5)
{
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
}
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_10)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_6)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
}
return ;
}
#pragma fcuda compute name=SNC_7 end=false cores=1 begin=true
void matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_SNC_7)
{
int k;
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
return ;
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
===========================================
[UnrollThreadLoops2-MCUDA] begin
[UnrollThreadLoops2-MCUDA] examining procedure matrixMul
[Unrolling] : matrixMul_CMP_5
[unrollFactor] 1
[Unrolling] : matrixMul_SNC_7
[unrollFactor] 1
mUnrolledIDs:
{}
-----kernelTransformPass, finish transformProcedure-----, proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
[UnrollThreadLoops2-MCUDA] end in 0.00 seconds
[LinkSymbol] 129 updates in 0.00 seconds
*** After UnrollThreadLoops2 ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_CMP_5)
{
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
}
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_10)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_6)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
}
return ;
}
#pragma fcuda compute name=SNC_7 end=false cores=1 begin=true
void matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_SNC_7)
{
int k;
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
return ;
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
===========================================
[PartitionArrays2-MCUDA] begin
[PartitionArrays2-MCUDA] examining procedure matrixMul
[Memory partition] : matrixMul_CMP_5
[mempartFactor]1
[Memory partition] : matrixMul_TRN_10
[mempartFactor]1
[Memory partition] : matrixMul_TRN_6
[mempartFactor]1
[Memory partition] : matrixMul_SNC_7
[mempartFactor]1
[Memory partition] : matrixMul
-----kernelTransformPass, finish transformProcedure-----, proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
[PartitionArrays2-MCUDA] end in 0.00 seconds
[LinkSymbol] 129 updates in 0.00 seconds
*** After PartitionArrays2 ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_CMP_5)
{
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
}
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_10)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_6)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
}
return ;
}
#pragma fcuda compute name=SNC_7 end=false cores=1 begin=true
void matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_SNC_7)
{
int k;
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
return ;
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
===========================================
[WrapBlockIdxLoop2-FCUDA] begin
[WrapBlockIdxLoop2-FCUDA] examining procedure matrixMul
-----kernelTransformPass, finish transformProcedure-----, proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB, dim3 gridDim, dim3 blockDim)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
dim3 blockIdx_loop;
for (blockIdx_loop.y=0; (gridDim.y+(-1*blockIdx_loop.y))>0; blockIdx_loop.y+=1)
{
blockIdx.y=blockIdx_loop.y;
for (blockIdx_loop.x=0; (gridDim.x+(-1*blockIdx_loop.x))>0; blockIdx_loop.x+=1)
{
blockIdx.x=blockIdx_loop.x;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
}
}
}
[WrapBlockIdxLoop2-FCUDA] end in 0.01 seconds
[LinkSymbol] 137 updates in 0.00 seconds
*** After WrapBlockIdxLoop2 ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_CMP_5)
{
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
}
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_10)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_6)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
}
return ;
}
#pragma fcuda compute name=SNC_7 end=false cores=1 begin=true
void matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_SNC_7)
{
int k;
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
return ;
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB, dim3 gridDim, dim3 blockDim)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
dim3 blockIdx_loop;
for (blockIdx_loop.y=0; (gridDim.y+(-1*blockIdx_loop.y))>0; blockIdx_loop.y+=1)
{
blockIdx.y=blockIdx_loop.y;
for (blockIdx_loop.x=0; (gridDim.x+(-1*blockIdx_loop.x))>0; blockIdx_loop.x+=1)
{
blockIdx.x=blockIdx_loop.x;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
}
}
}
===========================================
[DuplicateForFCUDA2-FCUDA] begin
[DuplicateForFCUDA2-FCUDA] examining procedure matrixMul
matrixMul is inStreamProc: false
fcudaCores:
[matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block), matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB), matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB), matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs)]
coreNames:
[matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block), matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs), matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB), matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB)]
------------------------
mVarsToDuplicate: []
mVarsToDuplicate: [a, guard_matrixMul_TRN_6, bStep, b, c, guard_matrixMul_TRN_10, TRN_10_Csub_block_offset, guard_matrixMul_SNC_7, bBegin, k, TRN_10_Csub_block_X_0, blockIdx, TRN_6_Bs_X_0, aStep, aEnd, guard_matrixMul_CMP_5, TRN_6_Bs_offset, TRN_10_Csub_block_c_1, Csub_block, TRN_6_Bs_c_1, aBegin]
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mVarsToDuplicate: [a, guard_matrixMul_TRN_6, bStep, b, c, guard_matrixMul_TRN_10, TRN_10_Csub_block_offset, guard_matrixMul_SNC_7, bBegin, k, TRN_10_Csub_block_X_0, blockIdx, TRN_6_Bs_X_0, aStep, aEnd, guard_matrixMul_CMP_5, TRN_6_Bs_offset, TRN_10_Csub_block_c_1, Csub_block, TRN_6_Bs_c_1, aBegin]
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
--- handleFcudaCore: matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block)
getCoreType for matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block)
--- of type COMPUTE:
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
--- handleFcudaCore: matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB)
getCoreType for matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB)
getCoreType for matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB)
--- of typee TRANSFER:
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
Treating arguments of call: matrixMul_TRN_10(guard_matrixMul_TRN_10_block0, blockDim, gridDim, blockIdx_block0, C, Csub_block_block0, c_block0, wB)
Arg #0: guard_matrixMul_TRN_10_block0
getCoreName for matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB)
getCoreName for matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB)
Arg #1: blockDim
Arg #2: gridDim
Arg #3: blockIdx_block0
getCoreName for matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB)
getCoreName for matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB)
Arg #4: C
Arg #5: Csub_block_block0
Arg #6: c_block0
Arg #7: wB
getCommonArgsIndex for matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB)
----addStatementBefore----index is:16
----addStatementBefore----index is:12
----addStatementBefore----index is:17
----addStatementBefore----index is:3
----addStatementBefore----index is:15
----addStatementBefore----index is:24
----addStatementBefore----index is:2
----addStatementBefore----index is:13
----addStatementBefore----index is:1
----addStatementBefore----index is:22
----addStatementBefore----index is:9
----addStatementBefore----index is:14
----addStatementBefore----index is:10
----addStatementBefore----index is:23
----addStatementBefore----index is:4
----addStatementBefore----index is:18
----addStatementBefore----index is:21
----addStatementBefore----index is:8
----addStatementBefore----index is:11
----addStatementBefore----index is:0
----addStatementBefore----index is:0
... handleCompoundStatement finished!
-----kernelTransformPass, finish transformProcedure-----, proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB, dim3 gridDim, dim3 blockDim)
{
int aBegin_block0;
int aEnd_block0;
int aStep_block0;
int bBegin_block0;
int bStep_block0;
__shared__ DATATYPE Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X];
int a_block0;
int b_block0;
int k_block0;
int c_block0;
dim3 blockIdx_block0;
int guard_matrixMul_CMP_5_block0;
int guard_matrixMul_TRN_10_block0;
int guard_matrixMul_TRN_6_block0;
int guard_matrixMul_SNC_7_block0;
int TRN_6_Bs_offset_block0;
int TRN_6_Bs_X_0_block0;
int TRN_6_Bs_c_1_block0;
int TRN_10_Csub_block_offset_block0;
int TRN_10_Csub_block_X_0_block0;
int TRN_10_Csub_block_c_1_block0;
dim3 blockIdx_loop;
for (blockIdx_loop.y=0; (gridDim.y+(-1*blockIdx_loop.y))>0; blockIdx_loop.y+=1)
{
blockIdx_block0.y=blockIdx_loop.y;
for (blockIdx_loop.x=0; (gridDim.x+(-1*blockIdx_loop.x))>0; blockIdx_loop.x+=1)
{
blockIdx_block0.x=(blockIdx_loop.x+0);
guard_matrixMul_SNC_7_block0=1;
guard_matrixMul_TRN_6_block0=1;
guard_matrixMul_TRN_10_block0=1;
guard_matrixMul_CMP_5_block0=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin_block0=((wA*16)*blockIdx_block0.y);
aEnd_block0=((aBegin_block0+wA)-1);
aStep_block0=16;
bBegin_block0=(16*blockIdx_block0.x);
bStep_block0=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5_block0, blockDim, gridDim, blockIdx_block0, Csub_block_block0);
a_block0=0;
b_block0=0;
k_block0=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a_block0=aBegin_block0), (b_block0=bBegin_block0)); a_block0<=aEnd_block0; ((a_block0+=aStep_block0), (b_block0+=bStep_block0)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset_block0=0;
TRN_6_Bs_X_0_block0=b_block0;
TRN_6_Bs_c_1_block0=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6_block0, blockDim, gridDim, blockIdx_block0, As, A, a_block0, wA, Bs, B, b_block0, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7_block0, blockDim, gridDim, blockIdx_block0, Csub_block_block0, As, Bs);
}
c_block0=(((wB*16)*blockIdx_block0.y)+(16*blockIdx_block0.x));
TRN_10_Csub_block_offset_block0=0;
TRN_10_Csub_block_X_0_block0=c_block0;
TRN_10_Csub_block_c_1_block0=wB;
matrixMul_TRN_10_wrapper(guard_matrixMul_TRN_10_block0, blockDim, gridDim, blockIdx_block0, C, Csub_block_block0, c_block0, wB);
}
}
}
[DuplicateForFCUDA2-FCUDA] end in 0.01 seconds
[LinkSymbol] 146 updates in 0.00 seconds
*** After DuplicateForFCUDA2 ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_CMP_5)
{
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
}
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_10)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_6)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
}
return ;
}
#pragma fcuda compute name=SNC_7 end=false cores=1 begin=true
void matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_SNC_7)
{
int k;
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
return ;
}
void matrixMul_TRN_10_wrapper(int guard_matrixMul_TRN_10_block0, dim3 blockDim, dim3 gridDim, dim3 blockIdx_matrixMul_TRN_10_block0, DATATYPE * C, __shared__ DATATYPE Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], int c_block0, int wB)
{
matrixMul_TRN_10(guard_matrixMul_TRN_10_block0, blockDim, gridDim, blockIdx_matrixMul_TRN_10_block0, C, Csub_block_block0, c_block0, wB);
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB, dim3 gridDim, dim3 blockDim)
{
int aBegin_block0;
int aEnd_block0;
int aStep_block0;
int bBegin_block0;
int bStep_block0;
__shared__ DATATYPE Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X];
int a_block0;
int b_block0;
int k_block0;
int c_block0;
dim3 blockIdx_block0;
int guard_matrixMul_CMP_5_block0;
int guard_matrixMul_TRN_10_block0;
int guard_matrixMul_TRN_6_block0;
int guard_matrixMul_SNC_7_block0;
int TRN_6_Bs_offset_block0;
int TRN_6_Bs_X_0_block0;
int TRN_6_Bs_c_1_block0;
int TRN_10_Csub_block_offset_block0;
int TRN_10_Csub_block_X_0_block0;
int TRN_10_Csub_block_c_1_block0;
dim3 blockIdx_loop;
for (blockIdx_loop.y=0; (gridDim.y+(-1*blockIdx_loop.y))>0; blockIdx_loop.y+=1)
{
blockIdx_block0.y=blockIdx_loop.y;
for (blockIdx_loop.x=0; (gridDim.x+(-1*blockIdx_loop.x))>0; blockIdx_loop.x+=1)
{
blockIdx_block0.x=(blockIdx_loop.x+0);
guard_matrixMul_SNC_7_block0=1;
guard_matrixMul_TRN_6_block0=1;
guard_matrixMul_TRN_10_block0=1;
guard_matrixMul_CMP_5_block0=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin_block0=((wA*16)*blockIdx_block0.y);
aEnd_block0=((aBegin_block0+wA)-1);
aStep_block0=16;
bBegin_block0=(16*blockIdx_block0.x);
bStep_block0=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5_block0, blockDim, gridDim, blockIdx_block0, Csub_block_block0);
a_block0=0;
b_block0=0;
k_block0=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a_block0=aBegin_block0), (b_block0=bBegin_block0)); a_block0<=aEnd_block0; ((a_block0+=aStep_block0), (b_block0+=bStep_block0)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset_block0=0;
TRN_6_Bs_X_0_block0=b_block0;
TRN_6_Bs_c_1_block0=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6_block0, blockDim, gridDim, blockIdx_block0, As, A, a_block0, wA, Bs, B, b_block0, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7_block0, blockDim, gridDim, blockIdx_block0, Csub_block_block0, As, Bs);
}
c_block0=(((wB*16)*blockIdx_block0.y)+(16*blockIdx_block0.x));
TRN_10_Csub_block_offset_block0=0;
TRN_10_Csub_block_X_0_block0=c_block0;
TRN_10_Csub_block_c_1_block0=wB;
matrixMul_TRN_10_wrapper(guard_matrixMul_TRN_10_block0, blockDim, gridDim, blockIdx_block0, C, Csub_block_block0, c_block0, wB);
}
}
}
===========================================
[CleanSyncFunc-MCUDA] begin
[CleanSyncFunc-MCUDA] examining procedure matrixMul
-----kernelTransformPass, finish transformProcedure-----, proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB, dim3 gridDim, dim3 blockDim)
{
int aBegin_block0;
int aEnd_block0;
int aStep_block0;
int bBegin_block0;
int bStep_block0;
__shared__ DATATYPE Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X];
int a_block0;
int b_block0;
int k_block0;
int c_block0;
dim3 blockIdx_block0;
int guard_matrixMul_CMP_5_block0;
int guard_matrixMul_TRN_10_block0;
int guard_matrixMul_TRN_6_block0;
int guard_matrixMul_SNC_7_block0;
int TRN_6_Bs_offset_block0;
int TRN_6_Bs_X_0_block0;
int TRN_6_Bs_c_1_block0;
int TRN_10_Csub_block_offset_block0;
int TRN_10_Csub_block_X_0_block0;
int TRN_10_Csub_block_c_1_block0;
dim3 blockIdx_loop;
for (blockIdx_loop.y=0; (gridDim.y+(-1*blockIdx_loop.y))>0; blockIdx_loop.y+=1)
{
blockIdx_block0.y=blockIdx_loop.y;
for (blockIdx_loop.x=0; (gridDim.x+(-1*blockIdx_loop.x))>0; blockIdx_loop.x+=1)
{
blockIdx_block0.x=(blockIdx_loop.x+0);
guard_matrixMul_SNC_7_block0=1;
guard_matrixMul_TRN_6_block0=1;
guard_matrixMul_TRN_10_block0=1;
guard_matrixMul_CMP_5_block0=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin_block0=((wA*16)*blockIdx_block0.y);
aEnd_block0=((aBegin_block0+wA)-1);
aStep_block0=16;
bBegin_block0=(16*blockIdx_block0.x);
bStep_block0=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5_block0, blockDim, gridDim, blockIdx_block0, Csub_block_block0);
a_block0=0;
b_block0=0;
k_block0=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a_block0=aBegin_block0), (b_block0=bBegin_block0)); a_block0<=aEnd_block0; ((a_block0+=aStep_block0), (b_block0+=bStep_block0)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset_block0=0;
TRN_6_Bs_X_0_block0=b_block0;
TRN_6_Bs_c_1_block0=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6_block0, blockDim, gridDim, blockIdx_block0, As, A, a_block0, wA, Bs, B, b_block0, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7_block0, blockDim, gridDim, blockIdx_block0, Csub_block_block0, As, Bs);
}
c_block0=(((wB*16)*blockIdx_block0.y)+(16*blockIdx_block0.x));
TRN_10_Csub_block_offset_block0=0;
TRN_10_Csub_block_X_0_block0=c_block0;
TRN_10_Csub_block_c_1_block0=wB;
matrixMul_TRN_10_wrapper(guard_matrixMul_TRN_10_block0, blockDim, gridDim, blockIdx_block0, C, Csub_block_block0, c_block0, wB);
}
}
}
[CleanSyncFunc-MCUDA] end in 0.00 seconds
[LinkSymbol] 144 updates in 0.00 seconds
*** After CleanSyncFunc ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_CMP_5)
{
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
}
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_10)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_6)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
}
return ;
}
#pragma fcuda compute name=SNC_7 end=false cores=1 begin=true
void matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_SNC_7)
{
int k;
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
{
lp1:
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
}
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
return ;
}
void matrixMul_TRN_10_wrapper(int guard_matrixMul_TRN_10_block0, dim3 blockDim, dim3 gridDim, dim3 blockIdx_matrixMul_TRN_10_block0, DATATYPE * C, __shared__ DATATYPE Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], int c_block0, int wB)
{
matrixMul_TRN_10(guard_matrixMul_TRN_10_block0, blockDim, gridDim, blockIdx_matrixMul_TRN_10_block0, C, Csub_block_block0, c_block0, wB);
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB, dim3 gridDim, dim3 blockDim)
{
int aBegin_block0;
int aEnd_block0;
int aStep_block0;
int bBegin_block0;
int bStep_block0;
__shared__ DATATYPE Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X];
int a_block0;
int b_block0;
int k_block0;
int c_block0;
dim3 blockIdx_block0;
int guard_matrixMul_CMP_5_block0;
int guard_matrixMul_TRN_10_block0;
int guard_matrixMul_TRN_6_block0;
int guard_matrixMul_SNC_7_block0;
int TRN_6_Bs_offset_block0;
int TRN_6_Bs_X_0_block0;
int TRN_6_Bs_c_1_block0;
int TRN_10_Csub_block_offset_block0;
int TRN_10_Csub_block_X_0_block0;
int TRN_10_Csub_block_c_1_block0;
dim3 blockIdx_loop;
for (blockIdx_loop.y=0; (gridDim.y+(-1*blockIdx_loop.y))>0; blockIdx_loop.y+=1)
{
blockIdx_block0.y=blockIdx_loop.y;
for (blockIdx_loop.x=0; (gridDim.x+(-1*blockIdx_loop.x))>0; blockIdx_loop.x+=1)
{
blockIdx_block0.x=(blockIdx_loop.x+0);
guard_matrixMul_SNC_7_block0=1;
guard_matrixMul_TRN_6_block0=1;
guard_matrixMul_TRN_10_block0=1;
guard_matrixMul_CMP_5_block0=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin_block0=((wA*16)*blockIdx_block0.y);
aEnd_block0=((aBegin_block0+wA)-1);
aStep_block0=16;
bBegin_block0=(16*blockIdx_block0.x);
bStep_block0=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5_block0, blockDim, gridDim, blockIdx_block0, Csub_block_block0);
a_block0=0;
b_block0=0;
k_block0=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a_block0=aBegin_block0), (b_block0=bBegin_block0)); a_block0<=aEnd_block0; ((a_block0+=aStep_block0), (b_block0+=bStep_block0)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset_block0=0;
TRN_6_Bs_X_0_block0=b_block0;
TRN_6_Bs_c_1_block0=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6_block0, blockDim, gridDim, blockIdx_block0, As, A, a_block0, wA, Bs, B, b_block0, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7_block0, blockDim, gridDim, blockIdx_block0, Csub_block_block0, As, Bs);
}
c_block0=(((wB*16)*blockIdx_block0.y)+(16*blockIdx_block0.x));
TRN_10_Csub_block_offset_block0=0;
TRN_10_Csub_block_X_0_block0=c_block0;
TRN_10_Csub_block_c_1_block0=wB;
matrixMul_TRN_10_wrapper(guard_matrixMul_TRN_10_block0, blockDim, gridDim, blockIdx_block0, C, Csub_block_block0, c_block0, wB);
}
}
}
===========================================
*** After CleanLaunches ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_CMP_5)
{
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
}
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_10)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_6)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
}
return ;
}
#pragma fcuda compute name=SNC_7 end=false cores=1 begin=true
void matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_SNC_7)
{
int k;
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
{
lp1:
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
}
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
return ;
}
void matrixMul_TRN_10_wrapper(int guard_matrixMul_TRN_10_block0, dim3 blockDim, dim3 gridDim, dim3 blockIdx_matrixMul_TRN_10_block0, DATATYPE * C, __shared__ DATATYPE Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], int c_block0, int wB)
{
matrixMul_TRN_10(guard_matrixMul_TRN_10_block0, blockDim, gridDim, blockIdx_matrixMul_TRN_10_block0, C, Csub_block_block0, c_block0, wB);
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB, dim3 gridDim, dim3 blockDim)
{
int aBegin_block0;
int aEnd_block0;
int aStep_block0;
int bBegin_block0;
int bStep_block0;
__shared__ DATATYPE Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X];
int a_block0;
int b_block0;
int k_block0;
int c_block0;
dim3 blockIdx_block0;
int guard_matrixMul_CMP_5_block0;
int guard_matrixMul_TRN_10_block0;
int guard_matrixMul_TRN_6_block0;
int guard_matrixMul_SNC_7_block0;
int TRN_6_Bs_offset_block0;
int TRN_6_Bs_X_0_block0;
int TRN_6_Bs_c_1_block0;
int TRN_10_Csub_block_offset_block0;
int TRN_10_Csub_block_X_0_block0;
int TRN_10_Csub_block_c_1_block0;
dim3 blockIdx_loop;
for (blockIdx_loop.y=0; (gridDim.y+(-1*blockIdx_loop.y))>0; blockIdx_loop.y+=1)
{
blockIdx_block0.y=blockIdx_loop.y;
for (blockIdx_loop.x=0; (gridDim.x+(-1*blockIdx_loop.x))>0; blockIdx_loop.x+=1)
{
blockIdx_block0.x=(blockIdx_loop.x+0);
guard_matrixMul_SNC_7_block0=1;
guard_matrixMul_TRN_6_block0=1;
guard_matrixMul_TRN_10_block0=1;
guard_matrixMul_CMP_5_block0=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin_block0=((wA*16)*blockIdx_block0.y);
aEnd_block0=((aBegin_block0+wA)-1);
aStep_block0=16;
bBegin_block0=(16*blockIdx_block0.x);
bStep_block0=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5_block0, blockDim, gridDim, blockIdx_block0, Csub_block_block0);
a_block0=0;
b_block0=0;
k_block0=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a_block0=aBegin_block0), (b_block0=bBegin_block0)); a_block0<=aEnd_block0; ((a_block0+=aStep_block0), (b_block0+=bStep_block0)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset_block0=0;
TRN_6_Bs_X_0_block0=b_block0;
TRN_6_Bs_c_1_block0=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6_block0, blockDim, gridDim, blockIdx_block0, As, A, a_block0, wA, Bs, B, b_block0, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7_block0, blockDim, gridDim, blockIdx_block0, Csub_block_block0, As, Bs);
}
c_block0=(((wB*16)*blockIdx_block0.y)+(16*blockIdx_block0.x));
TRN_10_Csub_block_offset_block0=0;
TRN_10_Csub_block_X_0_block0=c_block0;
TRN_10_Csub_block_c_1_block0=wB;
matrixMul_TRN_10_wrapper(guard_matrixMul_TRN_10_block0, blockDim, gridDim, blockIdx_block0, C, Csub_block_block0, c_block0, wB);
}
}
}
*** After ClearCUDASpecs ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
dim3 threadIdx;
if (guard_matrixMul_CMP_5)
{
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
Csub_block[threadIdx.y][threadIdx.x]=0;
}
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
dim3 threadIdx;
if (guard_matrixMul_TRN_10)
{
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
}
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE As[16][16], DATATYPE * A, int a, int wA, DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
dim3 threadIdx;
if (guard_matrixMul_TRN_6)
{
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
}
}
return ;
}
#pragma fcuda compute name=SNC_7 end=false cores=1 begin=true
void matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], DATATYPE As[16][16], DATATYPE Bs[16][16])
{
dim3 threadIdx;
if (guard_matrixMul_SNC_7)
{
int k;
{
lp1:
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
for (k=0; k<16; ++ k)
{
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
}
}
return ;
}
void matrixMul_TRN_10_wrapper(int guard_matrixMul_TRN_10_block0, dim3 blockDim, dim3 gridDim, dim3 blockIdx_matrixMul_TRN_10_block0, DATATYPE * C, DATATYPE Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], int c_block0, int wB)
{
matrixMul_TRN_10(guard_matrixMul_TRN_10_block0, blockDim, gridDim, blockIdx_matrixMul_TRN_10_block0, C, Csub_block_block0, c_block0, wB);
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB, dim3 gridDim, dim3 blockDim)
{
int aBegin_block0;
int aEnd_block0;
int aStep_block0;
int bBegin_block0;
int bStep_block0;
DATATYPE Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X];
int a_block0;
int b_block0;
int k_block0;
int c_block0;
dim3 blockIdx_block0;
int guard_matrixMul_CMP_5_block0;
int guard_matrixMul_TRN_10_block0;
int guard_matrixMul_TRN_6_block0;
int guard_matrixMul_SNC_7_block0;
int TRN_6_Bs_offset_block0;
int TRN_6_Bs_X_0_block0;
int TRN_6_Bs_c_1_block0;
int TRN_10_Csub_block_offset_block0;
int TRN_10_Csub_block_X_0_block0;
int TRN_10_Csub_block_c_1_block0;
dim3 blockIdx_loop;
for (blockIdx_loop.y=0; (gridDim.y+(-1*blockIdx_loop.y))>0; blockIdx_loop.y+=1)
{
blockIdx_block0.y=blockIdx_loop.y;
for (blockIdx_loop.x=0; (gridDim.x+(-1*blockIdx_loop.x))>0; blockIdx_loop.x+=1)
{
blockIdx_block0.x=(blockIdx_loop.x+0);
guard_matrixMul_SNC_7_block0=1;
guard_matrixMul_TRN_6_block0=1;
guard_matrixMul_TRN_10_block0=1;
guard_matrixMul_CMP_5_block0=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin_block0=((wA*16)*blockIdx_block0.y);
aEnd_block0=((aBegin_block0+wA)-1);
aStep_block0=16;
bBegin_block0=(16*blockIdx_block0.x);
bStep_block0=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5_block0, blockDim, gridDim, blockIdx_block0, Csub_block_block0);
a_block0=0;
b_block0=0;
k_block0=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a_block0=aBegin_block0), (b_block0=bBegin_block0)); a_block0<=aEnd_block0; ((a_block0+=aStep_block0), (b_block0+=bStep_block0)))
{
DATATYPE As[16][16];
DATATYPE Bs[16][16];
TRN_6_Bs_offset_block0=0;
TRN_6_Bs_X_0_block0=b_block0;
TRN_6_Bs_c_1_block0=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6_block0, blockDim, gridDim, blockIdx_block0, As, A, a_block0, wA, Bs, B, b_block0, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7_block0, blockDim, gridDim, blockIdx_block0, Csub_block_block0, As, Bs);
}
c_block0=(((wB*16)*blockIdx_block0.y)+(16*blockIdx_block0.x));
TRN_10_Csub_block_offset_block0=0;
TRN_10_Csub_block_X_0_block0=c_block0;
TRN_10_Csub_block_c_1_block0=wB;
matrixMul_TRN_10_wrapper(guard_matrixMul_TRN_10_block0, blockDim, gridDim, blockIdx_block0, C, Csub_block_block0, c_block0, wB);
}
}
}
| 8c592c812fb4e533bc454626d066096c4dfaa3a3.cu | [LinkSymbol] 69 updates in 0.01 seconds
[AnnotParser] begin
Token #
Token pragma
Token FCUDA
Token GRID
Token x_dim=16
Token y_dim=16
Token #
Token pragma
Token FCUDA
Token COREINFO
Token num_cores=1
Token pipeline=no
----addStatementBefore----index is:0
----addStatementBefore----index is:1
----addStatementBefore----index is:2
[AnnotParser] end in 0.05 seconds
WARNING: Fcuda flag is set but mcuda_nolib is not set
WARNING: turning on mcuda_nolib
WARNING: mcuda_nolib flag is set but serialThr is not set
WARNING: turning on serialThr
... Setting: FLOW = 3
TASK DECOMP = 2
CODE MOTION = 1
*** Before Any Passes ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0, b = 0, k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
===========================================
[SingleDeclarator] begin
-------------working on eliminating: signed char x, y
------eliminating: signed char x, y its child is: signed char x, y its parent is: signed char x, y;
------parent is instance of declaration statement, child was: signed char x, y parent was: signed char x, y; outer was: null
------now child becomes: signed char x, y; parent becomes: struct char2
{
signed char x, y;
};
outer becomes: struct char2
{
signed char x, y;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct char2
{
signed char x, y;
;
};
------after parent.removeChild(child), parent is: struct char2
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct char2
{
;
signed char y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct char2
{
;
signed char y;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct char2
{
;
signed char x;
signed char y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct char2
{
;
signed char x;
signed char y;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct char2
{
signed char x;
signed char y;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct char2
{
signed char x;
signed char y;
};
------Done with eliminateMultipleDeclarators, d now is: signed char x, y
-------------working on eliminating: unsigned char x, y
------eliminating: unsigned char x, y its child is: unsigned char x, y its parent is: unsigned char x, y;
------parent is instance of declaration statement, child was: unsigned char x, y parent was: unsigned char x, y; outer was: null
------now child becomes: unsigned char x, y; parent becomes: struct uchar2
{
unsigned char x, y;
};
outer becomes: struct uchar2
{
unsigned char x, y;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct uchar2
{
unsigned char x, y;
;
};
------after parent.removeChild(child), parent is: struct uchar2
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uchar2
{
;
unsigned char y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uchar2
{
;
unsigned char y;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uchar2
{
;
unsigned char x;
unsigned char y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uchar2
{
;
unsigned char x;
unsigned char y;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct uchar2
{
unsigned char x;
unsigned char y;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct uchar2
{
unsigned char x;
unsigned char y;
};
------Done with eliminateMultipleDeclarators, d now is: unsigned char x, y
-------------working on eliminating: signed char x, y, z
------eliminating: signed char x, y, z its child is: signed char x, y, z its parent is: signed char x, y, z;
------parent is instance of declaration statement, child was: signed char x, y, z parent was: signed char x, y, z; outer was: null
------now child becomes: signed char x, y, z; parent becomes: struct char3
{
signed char x, y, z;
};
outer becomes: struct char3
{
signed char x, y, z;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct char3
{
signed char x, y, z;
;
};
------after parent.removeChild(child), parent is: struct char3
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct char3
{
;
signed char z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct char3
{
;
signed char z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct char3
{
;
signed char y;
signed char z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct char3
{
;
signed char y;
signed char z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct char3
{
;
signed char x;
signed char y;
signed char z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct char3
{
;
signed char x;
signed char y;
signed char z;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct char3
{
signed char x;
signed char y;
signed char z;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct char3
{
signed char x;
signed char y;
signed char z;
};
------Done with eliminateMultipleDeclarators, d now is: signed char x, y, z
-------------working on eliminating: unsigned char x, y, z
------eliminating: unsigned char x, y, z its child is: unsigned char x, y, z its parent is: unsigned char x, y, z;
------parent is instance of declaration statement, child was: unsigned char x, y, z parent was: unsigned char x, y, z; outer was: null
------now child becomes: unsigned char x, y, z; parent becomes: struct uchar3
{
unsigned char x, y, z;
};
outer becomes: struct uchar3
{
unsigned char x, y, z;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct uchar3
{
unsigned char x, y, z;
;
};
------after parent.removeChild(child), parent is: struct uchar3
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uchar3
{
;
unsigned char z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uchar3
{
;
unsigned char z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uchar3
{
;
unsigned char y;
unsigned char z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uchar3
{
;
unsigned char y;
unsigned char z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uchar3
{
;
unsigned char x;
unsigned char y;
unsigned char z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uchar3
{
;
unsigned char x;
unsigned char y;
unsigned char z;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct uchar3
{
unsigned char x;
unsigned char y;
unsigned char z;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct uchar3
{
unsigned char x;
unsigned char y;
unsigned char z;
};
------Done with eliminateMultipleDeclarators, d now is: unsigned char x, y, z
-------------working on eliminating: signed char x, y, z, w
------eliminating: signed char x, y, z, w its child is: signed char x, y, z, w its parent is: signed char x, y, z, w;
------parent is instance of declaration statement, child was: signed char x, y, z, w parent was: signed char x, y, z, w; outer was: null
------now child becomes: signed char x, y, z, w; parent becomes: struct char4
{
signed char x, y, z, w;
};
outer becomes: struct char4
{
signed char x, y, z, w;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct char4
{
signed char x, y, z, w;
;
};
------after parent.removeChild(child), parent is: struct char4
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 3
------after, Declarator d = decl.getDeclarator(i), d is: w
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct char4
{
;
signed char w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct char4
{
;
signed char w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct char4
{
;
signed char z;
signed char w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct char4
{
;
signed char z;
signed char w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct char4
{
;
signed char y;
signed char z;
signed char w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct char4
{
;
signed char y;
signed char z;
signed char w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct char4
{
;
signed char x;
signed char y;
signed char z;
signed char w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct char4
{
;
signed char x;
signed char y;
signed char z;
signed char w;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct char4
{
signed char x;
signed char y;
signed char z;
signed char w;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct char4
{
signed char x;
signed char y;
signed char z;
signed char w;
};
------Done with eliminateMultipleDeclarators, d now is: signed char x, y, z, w
-------------working on eliminating: unsigned char x, y, z, w
------eliminating: unsigned char x, y, z, w its child is: unsigned char x, y, z, w its parent is: unsigned char x, y, z, w;
------parent is instance of declaration statement, child was: unsigned char x, y, z, w parent was: unsigned char x, y, z, w; outer was: null
------now child becomes: unsigned char x, y, z, w; parent becomes: struct uchar4
{
unsigned char x, y, z, w;
};
outer becomes: struct uchar4
{
unsigned char x, y, z, w;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct uchar4
{
unsigned char x, y, z, w;
;
};
------after parent.removeChild(child), parent is: struct uchar4
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 3
------after, Declarator d = decl.getDeclarator(i), d is: w
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uchar4
{
;
unsigned char w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uchar4
{
;
unsigned char w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uchar4
{
;
unsigned char z;
unsigned char w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uchar4
{
;
unsigned char z;
unsigned char w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uchar4
{
;
unsigned char y;
unsigned char z;
unsigned char w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uchar4
{
;
unsigned char y;
unsigned char z;
unsigned char w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uchar4
{
;
unsigned char x;
unsigned char y;
unsigned char z;
unsigned char w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uchar4
{
;
unsigned char x;
unsigned char y;
unsigned char z;
unsigned char w;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct uchar4
{
unsigned char x;
unsigned char y;
unsigned char z;
unsigned char w;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct uchar4
{
unsigned char x;
unsigned char y;
unsigned char z;
unsigned char w;
};
------Done with eliminateMultipleDeclarators, d now is: unsigned char x, y, z, w
-------------working on eliminating: short x, y
------eliminating: short x, y its child is: short x, y its parent is: short x, y;
------parent is instance of declaration statement, child was: short x, y parent was: short x, y; outer was: null
------now child becomes: short x, y; parent becomes: struct short2
{
short x, y;
};
outer becomes: struct short2
{
short x, y;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct short2
{
short x, y;
;
};
------after parent.removeChild(child), parent is: struct short2
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct short2
{
;
short y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct short2
{
;
short y;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct short2
{
;
short x;
short y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct short2
{
;
short x;
short y;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct short2
{
short x;
short y;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct short2
{
short x;
short y;
};
------Done with eliminateMultipleDeclarators, d now is: short x, y
-------------working on eliminating: unsigned short x, y
------eliminating: unsigned short x, y its child is: unsigned short x, y its parent is: unsigned short x, y;
------parent is instance of declaration statement, child was: unsigned short x, y parent was: unsigned short x, y; outer was: null
------now child becomes: unsigned short x, y; parent becomes: struct ushort2
{
unsigned short x, y;
};
outer becomes: struct ushort2
{
unsigned short x, y;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct ushort2
{
unsigned short x, y;
;
};
------after parent.removeChild(child), parent is: struct ushort2
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct ushort2
{
;
unsigned short y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct ushort2
{
;
unsigned short y;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct ushort2
{
;
unsigned short x;
unsigned short y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct ushort2
{
;
unsigned short x;
unsigned short y;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct ushort2
{
unsigned short x;
unsigned short y;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct ushort2
{
unsigned short x;
unsigned short y;
};
------Done with eliminateMultipleDeclarators, d now is: unsigned short x, y
-------------working on eliminating: short x, y, z
------eliminating: short x, y, z its child is: short x, y, z its parent is: short x, y, z;
------parent is instance of declaration statement, child was: short x, y, z parent was: short x, y, z; outer was: null
------now child becomes: short x, y, z; parent becomes: struct short3
{
short x, y, z;
};
outer becomes: struct short3
{
short x, y, z;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct short3
{
short x, y, z;
;
};
------after parent.removeChild(child), parent is: struct short3
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct short3
{
;
short z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct short3
{
;
short z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct short3
{
;
short y;
short z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct short3
{
;
short y;
short z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct short3
{
;
short x;
short y;
short z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct short3
{
;
short x;
short y;
short z;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct short3
{
short x;
short y;
short z;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct short3
{
short x;
short y;
short z;
};
------Done with eliminateMultipleDeclarators, d now is: short x, y, z
-------------working on eliminating: unsigned short x, y, z
------eliminating: unsigned short x, y, z its child is: unsigned short x, y, z its parent is: unsigned short x, y, z;
------parent is instance of declaration statement, child was: unsigned short x, y, z parent was: unsigned short x, y, z; outer was: null
------now child becomes: unsigned short x, y, z; parent becomes: struct ushort3
{
unsigned short x, y, z;
};
outer becomes: struct ushort3
{
unsigned short x, y, z;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct ushort3
{
unsigned short x, y, z;
;
};
------after parent.removeChild(child), parent is: struct ushort3
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct ushort3
{
;
unsigned short z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct ushort3
{
;
unsigned short z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct ushort3
{
;
unsigned short y;
unsigned short z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct ushort3
{
;
unsigned short y;
unsigned short z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct ushort3
{
;
unsigned short x;
unsigned short y;
unsigned short z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct ushort3
{
;
unsigned short x;
unsigned short y;
unsigned short z;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct ushort3
{
unsigned short x;
unsigned short y;
unsigned short z;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct ushort3
{
unsigned short x;
unsigned short y;
unsigned short z;
};
------Done with eliminateMultipleDeclarators, d now is: unsigned short x, y, z
-------------working on eliminating: short x, y, z, w
------eliminating: short x, y, z, w its child is: short x, y, z, w its parent is: short x, y, z, w;
------parent is instance of declaration statement, child was: short x, y, z, w parent was: short x, y, z, w; outer was: null
------now child becomes: short x, y, z, w; parent becomes: struct short4
{
short x, y, z, w;
};
outer becomes: struct short4
{
short x, y, z, w;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct short4
{
short x, y, z, w;
;
};
------after parent.removeChild(child), parent is: struct short4
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 3
------after, Declarator d = decl.getDeclarator(i), d is: w
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct short4
{
;
short w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct short4
{
;
short w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct short4
{
;
short z;
short w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct short4
{
;
short z;
short w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct short4
{
;
short y;
short z;
short w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct short4
{
;
short y;
short z;
short w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct short4
{
;
short x;
short y;
short z;
short w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct short4
{
;
short x;
short y;
short z;
short w;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct short4
{
short x;
short y;
short z;
short w;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct short4
{
short x;
short y;
short z;
short w;
};
------Done with eliminateMultipleDeclarators, d now is: short x, y, z, w
-------------working on eliminating: unsigned short x, y, z, w
------eliminating: unsigned short x, y, z, w its child is: unsigned short x, y, z, w its parent is: unsigned short x, y, z, w;
------parent is instance of declaration statement, child was: unsigned short x, y, z, w parent was: unsigned short x, y, z, w; outer was: null
------now child becomes: unsigned short x, y, z, w; parent becomes: struct ushort4
{
unsigned short x, y, z, w;
};
outer becomes: struct ushort4
{
unsigned short x, y, z, w;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct ushort4
{
unsigned short x, y, z, w;
;
};
------after parent.removeChild(child), parent is: struct ushort4
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 3
------after, Declarator d = decl.getDeclarator(i), d is: w
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct ushort4
{
;
unsigned short w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct ushort4
{
;
unsigned short w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct ushort4
{
;
unsigned short z;
unsigned short w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct ushort4
{
;
unsigned short z;
unsigned short w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct ushort4
{
;
unsigned short y;
unsigned short z;
unsigned short w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct ushort4
{
;
unsigned short y;
unsigned short z;
unsigned short w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct ushort4
{
;
unsigned short x;
unsigned short y;
unsigned short z;
unsigned short w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct ushort4
{
;
unsigned short x;
unsigned short y;
unsigned short z;
unsigned short w;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct ushort4
{
unsigned short x;
unsigned short y;
unsigned short z;
unsigned short w;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct ushort4
{
unsigned short x;
unsigned short y;
unsigned short z;
unsigned short w;
};
------Done with eliminateMultipleDeclarators, d now is: unsigned short x, y, z, w
-------------working on eliminating: int x, y
------eliminating: int x, y its child is: int x, y its parent is: int x, y;
------parent is instance of declaration statement, child was: int x, y parent was: int x, y; outer was: null
------now child becomes: int x, y; parent becomes: struct int2
{
int x, y;
};
outer becomes: struct int2
{
int x, y;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct int2
{
int x, y;
;
};
------after parent.removeChild(child), parent is: struct int2
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct int2
{
;
int y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct int2
{
;
int y;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct int2
{
;
int x;
int y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct int2
{
;
int x;
int y;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct int2
{
int x;
int y;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct int2
{
int x;
int y;
};
------Done with eliminateMultipleDeclarators, d now is: int x, y
-------------working on eliminating: unsigned int x, y
------eliminating: unsigned int x, y its child is: unsigned int x, y its parent is: unsigned int x, y;
------parent is instance of declaration statement, child was: unsigned int x, y parent was: unsigned int x, y; outer was: null
------now child becomes: unsigned int x, y; parent becomes: struct uint2
{
unsigned int x, y;
};
outer becomes: struct uint2
{
unsigned int x, y;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct uint2
{
unsigned int x, y;
;
};
------after parent.removeChild(child), parent is: struct uint2
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uint2
{
;
unsigned int y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uint2
{
;
unsigned int y;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uint2
{
;
unsigned int x;
unsigned int y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uint2
{
;
unsigned int x;
unsigned int y;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct uint2
{
unsigned int x;
unsigned int y;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct uint2
{
unsigned int x;
unsigned int y;
};
------Done with eliminateMultipleDeclarators, d now is: unsigned int x, y
-------------working on eliminating: int x, y, z
------eliminating: int x, y, z its child is: int x, y, z its parent is: int x, y, z;
------parent is instance of declaration statement, child was: int x, y, z parent was: int x, y, z; outer was: null
------now child becomes: int x, y, z; parent becomes: struct int3
{
int x, y, z;
};
outer becomes: struct int3
{
int x, y, z;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct int3
{
int x, y, z;
;
};
------after parent.removeChild(child), parent is: struct int3
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct int3
{
;
int z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct int3
{
;
int z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct int3
{
;
int y;
int z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct int3
{
;
int y;
int z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct int3
{
;
int x;
int y;
int z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct int3
{
;
int x;
int y;
int z;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct int3
{
int x;
int y;
int z;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct int3
{
int x;
int y;
int z;
};
------Done with eliminateMultipleDeclarators, d now is: int x, y, z
-------------working on eliminating: unsigned int x, y, z
------eliminating: unsigned int x, y, z its child is: unsigned int x, y, z its parent is: unsigned int x, y, z;
------parent is instance of declaration statement, child was: unsigned int x, y, z parent was: unsigned int x, y, z; outer was: null
------now child becomes: unsigned int x, y, z; parent becomes: struct uint3
{
unsigned int x, y, z;
};
outer becomes: struct uint3
{
unsigned int x, y, z;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct uint3
{
unsigned int x, y, z;
;
};
------after parent.removeChild(child), parent is: struct uint3
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uint3
{
;
unsigned int z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uint3
{
;
unsigned int z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uint3
{
;
unsigned int y;
unsigned int z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uint3
{
;
unsigned int y;
unsigned int z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uint3
{
;
unsigned int x;
unsigned int y;
unsigned int z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uint3
{
;
unsigned int x;
unsigned int y;
unsigned int z;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct uint3
{
unsigned int x;
unsigned int y;
unsigned int z;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct uint3
{
unsigned int x;
unsigned int y;
unsigned int z;
};
------Done with eliminateMultipleDeclarators, d now is: unsigned int x, y, z
-------------working on eliminating: int x, y, z, w
------eliminating: int x, y, z, w its child is: int x, y, z, w its parent is: int x, y, z, w;
------parent is instance of declaration statement, child was: int x, y, z, w parent was: int x, y, z, w; outer was: null
------now child becomes: int x, y, z, w; parent becomes: struct int4
{
int x, y, z, w;
};
outer becomes: struct int4
{
int x, y, z, w;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct int4
{
int x, y, z, w;
;
};
------after parent.removeChild(child), parent is: struct int4
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 3
------after, Declarator d = decl.getDeclarator(i), d is: w
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct int4
{
;
int w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct int4
{
;
int w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct int4
{
;
int z;
int w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct int4
{
;
int z;
int w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct int4
{
;
int y;
int z;
int w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct int4
{
;
int y;
int z;
int w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct int4
{
;
int x;
int y;
int z;
int w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct int4
{
;
int x;
int y;
int z;
int w;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct int4
{
int x;
int y;
int z;
int w;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct int4
{
int x;
int y;
int z;
int w;
};
------Done with eliminateMultipleDeclarators, d now is: int x, y, z, w
-------------working on eliminating: unsigned int x, y, z, w
------eliminating: unsigned int x, y, z, w its child is: unsigned int x, y, z, w its parent is: unsigned int x, y, z, w;
------parent is instance of declaration statement, child was: unsigned int x, y, z, w parent was: unsigned int x, y, z, w; outer was: null
------now child becomes: unsigned int x, y, z, w; parent becomes: struct uint4
{
unsigned int x, y, z, w;
};
outer becomes: struct uint4
{
unsigned int x, y, z, w;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct uint4
{
unsigned int x, y, z, w;
;
};
------after parent.removeChild(child), parent is: struct uint4
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 3
------after, Declarator d = decl.getDeclarator(i), d is: w
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uint4
{
;
unsigned int w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uint4
{
;
unsigned int w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uint4
{
;
unsigned int z;
unsigned int w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uint4
{
;
unsigned int z;
unsigned int w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uint4
{
;
unsigned int y;
unsigned int z;
unsigned int w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uint4
{
;
unsigned int y;
unsigned int z;
unsigned int w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct uint4
{
;
unsigned int x;
unsigned int y;
unsigned int z;
unsigned int w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct uint4
{
;
unsigned int x;
unsigned int y;
unsigned int z;
unsigned int w;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct uint4
{
unsigned int x;
unsigned int y;
unsigned int z;
unsigned int w;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct uint4
{
unsigned int x;
unsigned int y;
unsigned int z;
unsigned int w;
};
------Done with eliminateMultipleDeclarators, d now is: unsigned int x, y, z, w
-------------working on eliminating: long int x, y
------eliminating: long int x, y its child is: long int x, y its parent is: long int x, y;
------parent is instance of declaration statement, child was: long int x, y parent was: long int x, y; outer was: null
------now child becomes: long int x, y; parent becomes: struct long2
{
long int x, y;
};
outer becomes: struct long2
{
long int x, y;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct long2
{
long int x, y;
;
};
------after parent.removeChild(child), parent is: struct long2
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct long2
{
;
long int y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct long2
{
;
long int y;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct long2
{
;
long int x;
long int y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct long2
{
;
long int x;
long int y;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct long2
{
long int x;
long int y;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct long2
{
long int x;
long int y;
};
------Done with eliminateMultipleDeclarators, d now is: long int x, y
-------------working on eliminating: unsigned long int x, y
------eliminating: unsigned long int x, y its child is: unsigned long int x, y its parent is: unsigned long int x, y;
------parent is instance of declaration statement, child was: unsigned long int x, y parent was: unsigned long int x, y; outer was: null
------now child becomes: unsigned long int x, y; parent becomes: struct ulong2
{
unsigned long int x, y;
};
outer becomes: struct ulong2
{
unsigned long int x, y;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct ulong2
{
unsigned long int x, y;
;
};
------after parent.removeChild(child), parent is: struct ulong2
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct ulong2
{
;
unsigned long int y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct ulong2
{
;
unsigned long int y;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct ulong2
{
;
unsigned long int x;
unsigned long int y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct ulong2
{
;
unsigned long int x;
unsigned long int y;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct ulong2
{
unsigned long int x;
unsigned long int y;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct ulong2
{
unsigned long int x;
unsigned long int y;
};
------Done with eliminateMultipleDeclarators, d now is: unsigned long int x, y
-------------working on eliminating: float x, y
------eliminating: float x, y its child is: float x, y its parent is: float x, y;
------parent is instance of declaration statement, child was: float x, y parent was: float x, y; outer was: null
------now child becomes: float x, y; parent becomes: struct float2
{
float x, y;
};
outer becomes: struct float2
{
float x, y;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct float2
{
float x, y;
;
};
------after parent.removeChild(child), parent is: struct float2
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct float2
{
;
float y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct float2
{
;
float y;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct float2
{
;
float x;
float y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct float2
{
;
float x;
float y;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct float2
{
float x;
float y;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct float2
{
float x;
float y;
};
------Done with eliminateMultipleDeclarators, d now is: float x, y
-------------working on eliminating: float x, y, z
------eliminating: float x, y, z its child is: float x, y, z its parent is: float x, y, z;
------parent is instance of declaration statement, child was: float x, y, z parent was: float x, y, z; outer was: null
------now child becomes: float x, y, z; parent becomes: struct float3
{
float x, y, z;
};
outer becomes: struct float3
{
float x, y, z;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct float3
{
float x, y, z;
;
};
------after parent.removeChild(child), parent is: struct float3
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct float3
{
;
float z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct float3
{
;
float z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct float3
{
;
float y;
float z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct float3
{
;
float y;
float z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct float3
{
;
float x;
float y;
float z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct float3
{
;
float x;
float y;
float z;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct float3
{
float x;
float y;
float z;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct float3
{
float x;
float y;
float z;
};
------Done with eliminateMultipleDeclarators, d now is: float x, y, z
-------------working on eliminating: float x, y, z, w
------eliminating: float x, y, z, w its child is: float x, y, z, w its parent is: float x, y, z, w;
------parent is instance of declaration statement, child was: float x, y, z, w parent was: float x, y, z, w; outer was: null
------now child becomes: float x, y, z, w; parent becomes: struct float4
{
float x, y, z, w;
};
outer becomes: struct float4
{
float x, y, z, w;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct float4
{
float x, y, z, w;
;
};
------after parent.removeChild(child), parent is: struct float4
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 3
------after, Declarator d = decl.getDeclarator(i), d is: w
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct float4
{
;
float w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct float4
{
;
float w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct float4
{
;
float z;
float w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct float4
{
;
float z;
float w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct float4
{
;
float y;
float z;
float w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct float4
{
;
float y;
float z;
float w;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct float4
{
;
float x;
float y;
float z;
float w;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct float4
{
;
float x;
float y;
float z;
float w;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct float4
{
float x;
float y;
float z;
float w;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct float4
{
float x;
float y;
float z;
float w;
};
------Done with eliminateMultipleDeclarators, d now is: float x, y, z, w
-------------working on eliminating: long long int x, y
------eliminating: long long int x, y its child is: long long int x, y its parent is: long long int x, y;
------parent is instance of declaration statement, child was: long long int x, y parent was: long long int x, y; outer was: null
------now child becomes: long long int x, y; parent becomes: struct longlong2
{
long long int x, y;
};
outer becomes: struct longlong2
{
long long int x, y;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct longlong2
{
long long int x, y;
;
};
------after parent.removeChild(child), parent is: struct longlong2
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct longlong2
{
;
long long int y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct longlong2
{
;
long long int y;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct longlong2
{
;
long long int x;
long long int y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct longlong2
{
;
long long int x;
long long int y;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct longlong2
{
long long int x;
long long int y;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct longlong2
{
long long int x;
long long int y;
};
------Done with eliminateMultipleDeclarators, d now is: long long int x, y
-------------working on eliminating: unsigned long long int x, y
------eliminating: unsigned long long int x, y its child is: unsigned long long int x, y its parent is: unsigned long long int x, y;
------parent is instance of declaration statement, child was: unsigned long long int x, y parent was: unsigned long long int x, y; outer was: null
------now child becomes: unsigned long long int x, y; parent becomes: struct ulonglong2
{
unsigned long long int x, y;
};
outer becomes: struct ulonglong2
{
unsigned long long int x, y;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct ulonglong2
{
unsigned long long int x, y;
;
};
------after parent.removeChild(child), parent is: struct ulonglong2
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct ulonglong2
{
;
unsigned long long int y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct ulonglong2
{
;
unsigned long long int y;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct ulonglong2
{
;
unsigned long long int x;
unsigned long long int y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct ulonglong2
{
;
unsigned long long int x;
unsigned long long int y;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct ulonglong2
{
unsigned long long int x;
unsigned long long int y;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct ulonglong2
{
unsigned long long int x;
unsigned long long int y;
};
------Done with eliminateMultipleDeclarators, d now is: unsigned long long int x, y
-------------working on eliminating: double x, y
------eliminating: double x, y its child is: double x, y its parent is: double x, y;
------parent is instance of declaration statement, child was: double x, y parent was: double x, y; outer was: null
------now child becomes: double x, y; parent becomes: struct double2
{
double x, y;
};
outer becomes: struct double2
{
double x, y;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct double2
{
double x, y;
;
};
------after parent.removeChild(child), parent is: struct double2
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct double2
{
;
double y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct double2
{
;
double y;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct double2
{
;
double x;
double y;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct double2
{
;
double x;
double y;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct double2
{
double x;
double y;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct double2
{
double x;
double y;
};
------Done with eliminateMultipleDeclarators, d now is: double x, y
-------------working on eliminating: double x, y, z
------eliminating: double x, y, z its child is: double x, y, z its parent is: double x, y, z;
------parent is instance of declaration statement, child was: double x, y, z parent was: double x, y, z; outer was: null
------now child becomes: double x, y, z; parent becomes: struct double3
{
double x, y, z;
};
outer becomes: struct double3
{
double x, y, z;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct double3
{
double x, y, z;
;
};
------after parent.removeChild(child), parent is: struct double3
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct double3
{
;
double z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct double3
{
;
double z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct double3
{
;
double y;
double z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct double3
{
;
double y;
double z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct double3
{
;
double x;
double y;
double z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct double3
{
;
double x;
double y;
double z;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct double3
{
double x;
double y;
double z;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct double3
{
double x;
double y;
double z;
};
------Done with eliminateMultipleDeclarators, d now is: double x, y, z
-------------working on eliminating: unsigned int x, y, z
------eliminating: unsigned int x, y, z its child is: unsigned int x, y, z its parent is: unsigned int x, y, z;
------parent is instance of declaration statement, child was: unsigned int x, y, z parent was: unsigned int x, y, z; outer was: null
------now child becomes: unsigned int x, y, z; parent becomes: struct dim3
{
unsigned int x, y, z;
};
outer becomes: struct dim3
{
unsigned int x, y, z;
};
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
------after outer.addDeclarationAfter(decl, placeholder), outer is: struct dim3
{
unsigned int x, y, z;
;
};
------after parent.removeChild(child), parent is: struct dim3
{
;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: z
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct dim3
{
;
unsigned int z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct dim3
{
;
unsigned int z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: y
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct dim3
{
;
unsigned int y;
unsigned int z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct dim3
{
;
unsigned int y;
unsigned int z;
};
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: x
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: struct dim3
{
;
unsigned int x;
unsigned int y;
unsigned int z;
};
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: struct dim3
{
;
unsigned int x;
unsigned int y;
unsigned int z;
};
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: struct dim3
{
unsigned int x;
unsigned int y;
unsigned int z;
};
------after parent.removeChild(placeholder.getParent()), outer is: struct dim3
{
unsigned int x;
unsigned int y;
unsigned int z;
};
------Done with eliminateMultipleDeclarators, d now is: unsigned int x, y, z
-------------d is instance of procedure: void __syncthreads()
{
;
}
-------------d is instance of procedure: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0, b = 0, k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
-------------working on eliminating: int a = 0, b = 0, k = 0
------eliminating: int a = 0, b = 0, k = 0 its child is: int a = 0, b = 0, k = 0 its parent is: int a = 0, b = 0, k = 0;
------parent is instance of declaration statement, child was: int a = 0, b = 0, k = 0 parent was: int a = 0, b = 0, k = 0; outer was: null
------now child becomes: int a = 0, b = 0, k = 0; parent becomes: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0, b = 0, k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
} outer becomes: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0, b = 0, k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
------Start to eliminate---------------------------------------------
------placeholder is----------------------------
....CompoundStmt...addDecAfter...01
....CompoundStmt...addDecAfter...02
------after outer.addDeclarationAfter(decl, placeholder), outer is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0, b = 0, k = 0;
;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
------after parent.removeChild(child), parent is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 2
------after, Declarator d = decl.getDeclarator(i), d is: k = 0
....CompoundStmt...addDecAfter...01
....CompoundStmt...addDecAfter...02
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 1
------after, Declarator d = decl.getDeclarator(i), d is: b = 0
....CompoundStmt...addDecAfter...01
....CompoundStmt...addDecAfter...02
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
------enter for loop, int i = decl.getNumDeclarators() - 1, i is: 0
------after, Declarator d = decl.getDeclarator(i), d is: a = 0
....CompoundStmt...addDecAfter...01
....CompoundStmt...addDecAfter...02
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, outer is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
------after, outer.addDeclarationAfter(placeholder, new VariableDeclaration(decl.getSpecifiers(), d.clone()));, parent is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
------if (placeholder.getParent() instanceof DeclarationStatement), placeholder.getParent() is: ;
------after parent.removeChild(placeholder.getParent()), parent is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
------after parent.removeChild(placeholder.getParent()), outer is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
------Done with eliminateMultipleDeclarators, d now is: int a = 0, b = 0, k = 0
[SingleDeclarator] end in 0.05 seconds
[LinkSymbol] 69 updates in 0.00 seconds
*** After SingleDeclarator ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
===========================================
[SeparateInitializers] begin
[SeparateInitializers] examining procedure matrixMul
00-----if Driver.getoptionvvalue fcuda != null-----
00-----after List<Procedure> tskLst = FCUDAutils.getTaskMapping(proc.getSymbolName()); tskLst is: null-----
1111-----If tskLst == null-----
00-----in splitInitialization, proc is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx = blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
-----
1111-----enter while loop, declStmt is: int bx = blockIdx.x;-----
1111-----vDecl = (VariableDeclaration)declStmt.getDeclaration(), vDecl is: int bx = blockIdx.x-----
1111-----variable = (VariableDeclarator)vDecl.getDeclarator(0), variable is: bx = blockIdx.x-----
1111-----after Initializer vInit = variable.getInitializer(), vInit is: = blockIdx.x-----
1111-----after variable.setInitializer(null), variable is: bx-----
1111-----vDecl.getSpecifiers().remove(Specifier.CONST), vDecl is: int bx-----
1111-----variable.getSpecifiers().remove(Specifier.CONST), variable is: bx-----
1111-----List<Traversable> children = vInit.getChildren(), children is: [blockIdx.x]-----
1111-----Expression initExpr = (Expression)((Expression)children.get(0)).clone(), initExpr is: blockIdx.x-----
1111-----IDExpression vID = new Identifier(variable), vID is: bx-----
1111-----AssignmentExpression vAssign = new AssignmentExpression(vID, AssignmentOperator.NORMAL, initExpr), vAssign is: (bx=blockIdx.x)-----
1111-----ExpressionStatement vStmt = new ExpressionStatement(vAssign), vStmt is: bx=blockIdx.x;-----
1111-----CompoundStatement scope = (CompoundStatement)declStmt.getParent(), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----scope.addStatementAfter(declStmt, vStmt), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by = blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----enter while loop, declStmt is: int by = blockIdx.y;-----
1111-----vDecl = (VariableDeclaration)declStmt.getDeclaration(), vDecl is: int by = blockIdx.y-----
1111-----variable = (VariableDeclarator)vDecl.getDeclarator(0), variable is: by = blockIdx.y-----
1111-----after Initializer vInit = variable.getInitializer(), vInit is: = blockIdx.y-----
1111-----after variable.setInitializer(null), variable is: by-----
1111-----vDecl.getSpecifiers().remove(Specifier.CONST), vDecl is: int by-----
1111-----variable.getSpecifiers().remove(Specifier.CONST), variable is: by-----
1111-----List<Traversable> children = vInit.getChildren(), children is: [blockIdx.y]-----
1111-----Expression initExpr = (Expression)((Expression)children.get(0)).clone(), initExpr is: blockIdx.y-----
1111-----IDExpression vID = new Identifier(variable), vID is: by-----
1111-----AssignmentExpression vAssign = new AssignmentExpression(vID, AssignmentOperator.NORMAL, initExpr), vAssign is: (by=blockIdx.y)-----
1111-----ExpressionStatement vStmt = new ExpressionStatement(vAssign), vStmt is: by=blockIdx.y;-----
1111-----CompoundStatement scope = (CompoundStatement)declStmt.getParent(), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----scope.addStatementAfter(declStmt, vStmt), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin = ((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----enter while loop, declStmt is: int aBegin = ((wA*16)*by);-----
1111-----vDecl = (VariableDeclaration)declStmt.getDeclaration(), vDecl is: int aBegin = ((wA*16)*by)-----
1111-----variable = (VariableDeclarator)vDecl.getDeclarator(0), variable is: aBegin = ((wA*16)*by)-----
1111-----after Initializer vInit = variable.getInitializer(), vInit is: = ((wA*16)*by)-----
1111-----after variable.setInitializer(null), variable is: aBegin-----
1111-----vDecl.getSpecifiers().remove(Specifier.CONST), vDecl is: int aBegin-----
1111-----variable.getSpecifiers().remove(Specifier.CONST), variable is: aBegin-----
1111-----List<Traversable> children = vInit.getChildren(), children is: [((wA*16)*by)]-----
1111-----Expression initExpr = (Expression)((Expression)children.get(0)).clone(), initExpr is: ((wA*16)*by)-----
1111-----IDExpression vID = new Identifier(variable), vID is: aBegin-----
1111-----AssignmentExpression vAssign = new AssignmentExpression(vID, AssignmentOperator.NORMAL, initExpr), vAssign is: (aBegin=((wA*16)*by))-----
1111-----ExpressionStatement vStmt = new ExpressionStatement(vAssign), vStmt is: aBegin=((wA*16)*by);-----
1111-----CompoundStatement scope = (CompoundStatement)declStmt.getParent(), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----scope.addStatementAfter(declStmt, vStmt), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd = ((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----enter while loop, declStmt is: int aEnd = ((aBegin+wA)-1);-----
1111-----vDecl = (VariableDeclaration)declStmt.getDeclaration(), vDecl is: int aEnd = ((aBegin+wA)-1)-----
1111-----variable = (VariableDeclarator)vDecl.getDeclarator(0), variable is: aEnd = ((aBegin+wA)-1)-----
1111-----after Initializer vInit = variable.getInitializer(), vInit is: = ((aBegin+wA)-1)-----
1111-----after variable.setInitializer(null), variable is: aEnd-----
1111-----vDecl.getSpecifiers().remove(Specifier.CONST), vDecl is: int aEnd-----
1111-----variable.getSpecifiers().remove(Specifier.CONST), variable is: aEnd-----
1111-----List<Traversable> children = vInit.getChildren(), children is: [((aBegin+wA)-1)]-----
1111-----Expression initExpr = (Expression)((Expression)children.get(0)).clone(), initExpr is: ((aBegin+wA)-1)-----
1111-----IDExpression vID = new Identifier(variable), vID is: aEnd-----
1111-----AssignmentExpression vAssign = new AssignmentExpression(vID, AssignmentOperator.NORMAL, initExpr), vAssign is: (aEnd=((aBegin+wA)-1))-----
1111-----ExpressionStatement vStmt = new ExpressionStatement(vAssign), vStmt is: aEnd=((aBegin+wA)-1);-----
1111-----CompoundStatement scope = (CompoundStatement)declStmt.getParent(), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----scope.addStatementAfter(declStmt, vStmt), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep = 16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----enter while loop, declStmt is: int aStep = 16;-----
1111-----vDecl = (VariableDeclaration)declStmt.getDeclaration(), vDecl is: int aStep = 16-----
1111-----variable = (VariableDeclarator)vDecl.getDeclarator(0), variable is: aStep = 16-----
1111-----after Initializer vInit = variable.getInitializer(), vInit is: = 16-----
1111-----after variable.setInitializer(null), variable is: aStep-----
1111-----vDecl.getSpecifiers().remove(Specifier.CONST), vDecl is: int aStep-----
1111-----variable.getSpecifiers().remove(Specifier.CONST), variable is: aStep-----
1111-----List<Traversable> children = vInit.getChildren(), children is: [16]-----
1111-----Expression initExpr = (Expression)((Expression)children.get(0)).clone(), initExpr is: 16-----
1111-----IDExpression vID = new Identifier(variable), vID is: aStep-----
1111-----AssignmentExpression vAssign = new AssignmentExpression(vID, AssignmentOperator.NORMAL, initExpr), vAssign is: (aStep=16)-----
1111-----ExpressionStatement vStmt = new ExpressionStatement(vAssign), vStmt is: aStep=16;-----
1111-----CompoundStatement scope = (CompoundStatement)declStmt.getParent(), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----scope.addStatementAfter(declStmt, vStmt), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin = (16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----enter while loop, declStmt is: int bBegin = (16*bx);-----
1111-----vDecl = (VariableDeclaration)declStmt.getDeclaration(), vDecl is: int bBegin = (16*bx)-----
1111-----variable = (VariableDeclarator)vDecl.getDeclarator(0), variable is: bBegin = (16*bx)-----
1111-----after Initializer vInit = variable.getInitializer(), vInit is: = (16*bx)-----
1111-----after variable.setInitializer(null), variable is: bBegin-----
1111-----vDecl.getSpecifiers().remove(Specifier.CONST), vDecl is: int bBegin-----
1111-----variable.getSpecifiers().remove(Specifier.CONST), variable is: bBegin-----
1111-----List<Traversable> children = vInit.getChildren(), children is: [(16*bx)]-----
1111-----Expression initExpr = (Expression)((Expression)children.get(0)).clone(), initExpr is: (16*bx)-----
1111-----IDExpression vID = new Identifier(variable), vID is: bBegin-----
1111-----AssignmentExpression vAssign = new AssignmentExpression(vID, AssignmentOperator.NORMAL, initExpr), vAssign is: (bBegin=(16*bx))-----
1111-----ExpressionStatement vStmt = new ExpressionStatement(vAssign), vStmt is: bBegin=(16*bx);-----
1111-----CompoundStatement scope = (CompoundStatement)declStmt.getParent(), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----scope.addStatementAfter(declStmt, vStmt), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep = (16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----enter while loop, declStmt is: int bStep = (16*wB);-----
1111-----vDecl = (VariableDeclaration)declStmt.getDeclaration(), vDecl is: int bStep = (16*wB)-----
1111-----variable = (VariableDeclarator)vDecl.getDeclarator(0), variable is: bStep = (16*wB)-----
1111-----after Initializer vInit = variable.getInitializer(), vInit is: = (16*wB)-----
1111-----after variable.setInitializer(null), variable is: bStep-----
1111-----vDecl.getSpecifiers().remove(Specifier.CONST), vDecl is: int bStep-----
1111-----variable.getSpecifiers().remove(Specifier.CONST), variable is: bStep-----
1111-----List<Traversable> children = vInit.getChildren(), children is: [(16*wB)]-----
1111-----Expression initExpr = (Expression)((Expression)children.get(0)).clone(), initExpr is: (16*wB)-----
1111-----IDExpression vID = new Identifier(variable), vID is: bStep-----
1111-----AssignmentExpression vAssign = new AssignmentExpression(vID, AssignmentOperator.NORMAL, initExpr), vAssign is: (bStep=(16*wB))-----
1111-----ExpressionStatement vStmt = new ExpressionStatement(vAssign), vStmt is: bStep=(16*wB);-----
1111-----CompoundStatement scope = (CompoundStatement)declStmt.getParent(), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----scope.addStatementAfter(declStmt, vStmt), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub = 0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----enter while loop, declStmt is: DATATYPE Csub = 0;-----
1111-----vDecl = (VariableDeclaration)declStmt.getDeclaration(), vDecl is: DATATYPE Csub = 0-----
1111-----variable = (VariableDeclarator)vDecl.getDeclarator(0), variable is: Csub = 0-----
1111-----after Initializer vInit = variable.getInitializer(), vInit is: = 0-----
1111-----after variable.setInitializer(null), variable is: Csub-----
1111-----vDecl.getSpecifiers().remove(Specifier.CONST), vDecl is: DATATYPE Csub-----
1111-----variable.getSpecifiers().remove(Specifier.CONST), variable is: Csub-----
1111-----List<Traversable> children = vInit.getChildren(), children is: [0]-----
1111-----Expression initExpr = (Expression)((Expression)children.get(0)).clone(), initExpr is: 0-----
1111-----IDExpression vID = new Identifier(variable), vID is: Csub-----
1111-----AssignmentExpression vAssign = new AssignmentExpression(vID, AssignmentOperator.NORMAL, initExpr), vAssign is: (Csub=0)-----
1111-----ExpressionStatement vStmt = new ExpressionStatement(vAssign), vStmt is: Csub=0;-----
1111-----CompoundStatement scope = (CompoundStatement)declStmt.getParent(), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----scope.addStatementAfter(declStmt, vStmt), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a = 0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----enter while loop, declStmt is: int a = 0;-----
1111-----vDecl = (VariableDeclaration)declStmt.getDeclaration(), vDecl is: int a = 0-----
1111-----variable = (VariableDeclarator)vDecl.getDeclarator(0), variable is: a = 0-----
1111-----after Initializer vInit = variable.getInitializer(), vInit is: = 0-----
1111-----after variable.setInitializer(null), variable is: a-----
1111-----vDecl.getSpecifiers().remove(Specifier.CONST), vDecl is: int a-----
1111-----variable.getSpecifiers().remove(Specifier.CONST), variable is: a-----
1111-----List<Traversable> children = vInit.getChildren(), children is: [0]-----
1111-----Expression initExpr = (Expression)((Expression)children.get(0)).clone(), initExpr is: 0-----
1111-----IDExpression vID = new Identifier(variable), vID is: a-----
1111-----AssignmentExpression vAssign = new AssignmentExpression(vID, AssignmentOperator.NORMAL, initExpr), vAssign is: (a=0)-----
1111-----ExpressionStatement vStmt = new ExpressionStatement(vAssign), vStmt is: a=0;-----
1111-----CompoundStatement scope = (CompoundStatement)declStmt.getParent(), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----scope.addStatementAfter(declStmt, vStmt), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b = 0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----enter while loop, declStmt is: int b = 0;-----
1111-----vDecl = (VariableDeclaration)declStmt.getDeclaration(), vDecl is: int b = 0-----
1111-----variable = (VariableDeclarator)vDecl.getDeclarator(0), variable is: b = 0-----
1111-----after Initializer vInit = variable.getInitializer(), vInit is: = 0-----
1111-----after variable.setInitializer(null), variable is: b-----
1111-----vDecl.getSpecifiers().remove(Specifier.CONST), vDecl is: int b-----
1111-----variable.getSpecifiers().remove(Specifier.CONST), variable is: b-----
1111-----List<Traversable> children = vInit.getChildren(), children is: [0]-----
1111-----Expression initExpr = (Expression)((Expression)children.get(0)).clone(), initExpr is: 0-----
1111-----IDExpression vID = new Identifier(variable), vID is: b-----
1111-----AssignmentExpression vAssign = new AssignmentExpression(vID, AssignmentOperator.NORMAL, initExpr), vAssign is: (b=0)-----
1111-----ExpressionStatement vStmt = new ExpressionStatement(vAssign), vStmt is: b=0;-----
1111-----CompoundStatement scope = (CompoundStatement)declStmt.getParent(), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----scope.addStatementAfter(declStmt, vStmt), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k = 0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----enter while loop, declStmt is: int k = 0;-----
1111-----vDecl = (VariableDeclaration)declStmt.getDeclaration(), vDecl is: int k = 0-----
1111-----variable = (VariableDeclarator)vDecl.getDeclarator(0), variable is: k = 0-----
1111-----after Initializer vInit = variable.getInitializer(), vInit is: = 0-----
1111-----after variable.setInitializer(null), variable is: k-----
1111-----vDecl.getSpecifiers().remove(Specifier.CONST), vDecl is: int k-----
1111-----variable.getSpecifiers().remove(Specifier.CONST), variable is: k-----
1111-----List<Traversable> children = vInit.getChildren(), children is: [0]-----
1111-----Expression initExpr = (Expression)((Expression)children.get(0)).clone(), initExpr is: 0-----
1111-----IDExpression vID = new Identifier(variable), vID is: k-----
1111-----AssignmentExpression vAssign = new AssignmentExpression(vID, AssignmentOperator.NORMAL, initExpr), vAssign is: (k=0)-----
1111-----ExpressionStatement vStmt = new ExpressionStatement(vAssign), vStmt is: k=0;-----
1111-----CompoundStatement scope = (CompoundStatement)declStmt.getParent(), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----scope.addStatementAfter(declStmt, vStmt), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c = (((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----enter while loop, declStmt is: __shared__ DATATYPE As[16][16];-----
1111-----vDecl = (VariableDeclaration)declStmt.getDeclaration(), vDecl is: __shared__ DATATYPE As[16][16]-----
1111-----variable = (VariableDeclarator)vDecl.getDeclarator(0), variable is: As[16][16]-----
1111-----after Initializer vInit = variable.getInitializer(), vInit is: null-----
1111-----after variable.setInitializer(null), variable is: As[16][16]-----
222222-----if(vInit == null)
1111-----enter while loop, declStmt is: __shared__ DATATYPE Bs[16][16];-----
1111-----vDecl = (VariableDeclaration)declStmt.getDeclaration(), vDecl is: __shared__ DATATYPE Bs[16][16]-----
1111-----variable = (VariableDeclarator)vDecl.getDeclarator(0), variable is: Bs[16][16]-----
1111-----after Initializer vInit = variable.getInitializer(), vInit is: null-----
1111-----after variable.setInitializer(null), variable is: Bs[16][16]-----
222222-----if(vInit == null)
1111-----enter while loop, declStmt is: int c = (((wB*16)*by)+(16*bx));-----
1111-----vDecl = (VariableDeclaration)declStmt.getDeclaration(), vDecl is: int c = (((wB*16)*by)+(16*bx))-----
1111-----variable = (VariableDeclarator)vDecl.getDeclarator(0), variable is: c = (((wB*16)*by)+(16*bx))-----
1111-----after Initializer vInit = variable.getInitializer(), vInit is: = (((wB*16)*by)+(16*bx))-----
1111-----after variable.setInitializer(null), variable is: c-----
1111-----vDecl.getSpecifiers().remove(Specifier.CONST), vDecl is: int c-----
1111-----variable.getSpecifiers().remove(Specifier.CONST), variable is: c-----
1111-----List<Traversable> children = vInit.getChildren(), children is: [(((wB*16)*by)+(16*bx))]-----
1111-----Expression initExpr = (Expression)((Expression)children.get(0)).clone(), initExpr is: (((wB*16)*by)+(16*bx))-----
1111-----IDExpression vID = new Identifier(variable), vID is: c-----
1111-----AssignmentExpression vAssign = new AssignmentExpression(vID, AssignmentOperator.NORMAL, initExpr), vAssign is: (c=(((wB*16)*by)+(16*bx)))-----
1111-----ExpressionStatement vStmt = new ExpressionStatement(vAssign), vStmt is: c=(((wB*16)*by)+(16*bx));-----
1111-----CompoundStatement scope = (CompoundStatement)declStmt.getParent(), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
1111-----scope.addStatementAfter(declStmt, vStmt), scope is: {
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
-----kernelTransformPass, finish transformProcedure-----, proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
[SeparateInitializers] end in 0.03 seconds
[LinkSymbol] 81 updates in 0.00 seconds
*** After SeparateInitializers ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
===========================================
[AnsiDeclarations] begin
00-----List<Traversable> statements = scope.getChildren()
00-----statements is: [;]-----
00-----List<Statement> nonDeclarations = new LinkedList<Statement>()
00-----statements is: [;]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: ;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: ;-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [;]-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: ;-----
222222-----d.detach()
222222-----d is: ;-----
222222-----scope.addStatement(d)
222222-----scope is: {
;
}-----
00-----List<Traversable> statements = scope.getChildren()
00-----statements is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , int bx;, bx=blockIdx.x;, int by;, by=blockIdx.y;, int aBegin;, aBegin=((wA*16)*by);, int aEnd;, aEnd=((aBegin+wA)-1);, int aStep;, aStep=16;, int bBegin;, bBegin=(16*bx);, int bStep;, bStep=(16*wB);, DATATYPE Csub;, Csub=0;, int a;, a=0;, int b;, b=0;, int k;, k=0;, for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}, int c;, c=(((wB*16)*by)+(16*bx));, C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;]-----
00-----List<Statement> nonDeclarations = new LinkedList<Statement>()
00-----statements is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , int bx;, bx=blockIdx.x;, int by;, by=blockIdx.y;, int aBegin;, aBegin=((wA*16)*by);, int aEnd;, aEnd=((aBegin+wA)-1);, int aStep;, aStep=16;, int bBegin;, bBegin=(16*bx);, int bStep;, bStep=(16*wB);, DATATYPE Csub;, Csub=0;, int a;, a=0;, int b;, b=0;, int k;, k=0;, for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}, int c;, c=(((wB*16)*by)+(16*bx));, C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: #pragma HLS INTERFACE ap_bus port=A depth=3840 -----
1111-----Statement stmt = (Statement)i
1111-----stmt is: #pragma HLS INTERFACE ap_bus port=A depth=3840 -----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 ]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: #pragma HLS INTERFACE ap_bus port=B depth=6144 -----
1111-----Statement stmt = (Statement)i
1111-----stmt is: #pragma HLS INTERFACE ap_bus port=B depth=6144 -----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 ]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: #pragma HLS INTERFACE ap_bus port=C depth=10240 -----
1111-----Statement stmt = (Statement)i
1111-----stmt is: #pragma HLS INTERFACE ap_bus port=C depth=10240 -----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 ]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: int bx;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: int bx;-----
222222-----nonDeclarations.add(stmt)
222222-----i is: bx=blockIdx.x;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: bx=blockIdx.x;-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: int by;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: int by;-----
222222-----nonDeclarations.add(stmt)
222222-----i is: by=blockIdx.y;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: by=blockIdx.y;-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: int aBegin;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: int aBegin;-----
222222-----nonDeclarations.add(stmt)
222222-----i is: aBegin=((wA*16)*by);-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: aBegin=((wA*16)*by);-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;, aBegin=((wA*16)*by);]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: int aEnd;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: int aEnd;-----
222222-----nonDeclarations.add(stmt)
222222-----i is: aEnd=((aBegin+wA)-1);-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: aEnd=((aBegin+wA)-1);-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;, aBegin=((wA*16)*by);, aEnd=((aBegin+wA)-1);]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: int aStep;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: int aStep;-----
222222-----nonDeclarations.add(stmt)
222222-----i is: aStep=16;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: aStep=16;-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;, aBegin=((wA*16)*by);, aEnd=((aBegin+wA)-1);, aStep=16;]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: int bBegin;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: int bBegin;-----
222222-----nonDeclarations.add(stmt)
222222-----i is: bBegin=(16*bx);-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: bBegin=(16*bx);-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;, aBegin=((wA*16)*by);, aEnd=((aBegin+wA)-1);, aStep=16;, bBegin=(16*bx);]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: int bStep;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: int bStep;-----
222222-----nonDeclarations.add(stmt)
222222-----i is: bStep=(16*wB);-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: bStep=(16*wB);-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;, aBegin=((wA*16)*by);, aEnd=((aBegin+wA)-1);, aStep=16;, bBegin=(16*bx);, bStep=(16*wB);]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: DATATYPE Csub;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: DATATYPE Csub;-----
222222-----nonDeclarations.add(stmt)
222222-----i is: Csub=0;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: Csub=0;-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;, aBegin=((wA*16)*by);, aEnd=((aBegin+wA)-1);, aStep=16;, bBegin=(16*bx);, bStep=(16*wB);, Csub=0;]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: int a;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: int a;-----
222222-----nonDeclarations.add(stmt)
222222-----i is: a=0;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: a=0;-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;, aBegin=((wA*16)*by);, aEnd=((aBegin+wA)-1);, aStep=16;, bBegin=(16*bx);, bStep=(16*wB);, Csub=0;, a=0;]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: int b;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: int b;-----
222222-----nonDeclarations.add(stmt)
222222-----i is: b=0;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: b=0;-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;, aBegin=((wA*16)*by);, aEnd=((aBegin+wA)-1);, aStep=16;, bBegin=(16*bx);, bStep=(16*wB);, Csub=0;, a=0;, b=0;]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: int k;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: int k;-----
222222-----nonDeclarations.add(stmt)
222222-----i is: k=0;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: k=0;-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;, aBegin=((wA*16)*by);, aEnd=((aBegin+wA)-1);, aStep=16;, bBegin=(16*bx);, bStep=(16*wB);, Csub=0;, a=0;, b=0;, k=0;]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;, aBegin=((wA*16)*by);, aEnd=((aBegin+wA)-1);, aStep=16;, bBegin=(16*bx);, bStep=(16*wB);, Csub=0;, a=0;, b=0;, k=0;, for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: int c;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: int c;-----
222222-----nonDeclarations.add(stmt)
222222-----i is: c=(((wB*16)*by)+(16*bx));-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: c=(((wB*16)*by)+(16*bx));-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;, aBegin=((wA*16)*by);, aEnd=((aBegin+wA)-1);, aStep=16;, bBegin=(16*bx);, bStep=(16*wB);, Csub=0;, a=0;, b=0;, k=0;, for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}, c=(((wB*16)*by)+(16*bx));]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;, aBegin=((wA*16)*by);, aEnd=((aBegin+wA)-1);, aStep=16;, bBegin=(16*bx);, bStep=(16*wB);, Csub=0;, a=0;, b=0;, k=0;, for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}, c=(((wB*16)*by)+(16*bx));, C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;]-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: #pragma HLS INTERFACE ap_bus port=A depth=3840 -----
222222-----d.detach()
222222-----d is: #pragma HLS INTERFACE ap_bus port=A depth=3840 -----
222222-----scope.addStatement(d)
222222-----scope is: {
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: #pragma HLS INTERFACE ap_bus port=B depth=6144 -----
222222-----d.detach()
222222-----d is: #pragma HLS INTERFACE ap_bus port=B depth=6144 -----
222222-----scope.addStatement(d)
222222-----scope is: {
#pragma HLS INTERFACE ap_bus port=C depth=10240
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: #pragma HLS INTERFACE ap_bus port=C depth=10240 -----
222222-----d.detach()
222222-----d is: #pragma HLS INTERFACE ap_bus port=C depth=10240 -----
222222-----scope.addStatement(d)
222222-----scope is: {
int bx;
bx=blockIdx.x;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: bx=blockIdx.x;-----
222222-----d.detach()
222222-----d is: bx=blockIdx.x;-----
222222-----scope.addStatement(d)
222222-----scope is: {
int bx;
int by;
by=blockIdx.y;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: by=blockIdx.y;-----
222222-----d.detach()
222222-----d is: by=blockIdx.y;-----
222222-----scope.addStatement(d)
222222-----scope is: {
int bx;
int by;
int aBegin;
aBegin=((wA*16)*by);
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: aBegin=((wA*16)*by);-----
222222-----d.detach()
222222-----d is: aBegin=((wA*16)*by);-----
222222-----scope.addStatement(d)
222222-----scope is: {
int bx;
int by;
int aBegin;
int aEnd;
aEnd=((aBegin+wA)-1);
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: aEnd=((aBegin+wA)-1);-----
222222-----d.detach()
222222-----d is: aEnd=((aBegin+wA)-1);-----
222222-----scope.addStatement(d)
222222-----scope is: {
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
aStep=16;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: aStep=16;-----
222222-----d.detach()
222222-----d is: aStep=16;-----
222222-----scope.addStatement(d)
222222-----scope is: {
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
bBegin=(16*bx);
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: bBegin=(16*bx);-----
222222-----d.detach()
222222-----d is: bBegin=(16*bx);-----
222222-----scope.addStatement(d)
222222-----scope is: {
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
bStep=(16*wB);
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: bStep=(16*wB);-----
222222-----d.detach()
222222-----d is: bStep=(16*wB);-----
222222-----scope.addStatement(d)
222222-----scope is: {
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
Csub=0;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: Csub=0;-----
222222-----d.detach()
222222-----d is: Csub=0;-----
222222-----scope.addStatement(d)
222222-----scope is: {
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
a=0;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: a=0;-----
222222-----d.detach()
222222-----d is: a=0;-----
222222-----scope.addStatement(d)
222222-----scope is: {
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
b=0;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: b=0;-----
222222-----d.detach()
222222-----d is: b=0;-----
222222-----scope.addStatement(d)
222222-----scope is: {
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: k=0;-----
222222-----d.detach()
222222-----d is: k=0;-----
222222-----scope.addStatement(d)
222222-----scope is: {
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}-----
222222-----d.detach()
222222-----d is: for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}-----
222222-----scope.addStatement(d)
222222-----scope is: {
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: c=(((wB*16)*by)+(16*bx));-----
222222-----d.detach()
222222-----d is: c=(((wB*16)*by)+(16*bx));-----
222222-----scope.addStatement(d)
222222-----scope is: {
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;-----
222222-----d.detach()
222222-----d is: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;-----
222222-----scope.addStatement(d)
222222-----scope is: {
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}-----
00-----List<Traversable> statements = scope.getChildren()
00-----statements is: [__shared__ DATATYPE As[16][16];, __shared__ DATATYPE Bs[16][16];, As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, __syncthreads();, {
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}, __syncthreads();]-----
00-----List<Statement> nonDeclarations = new LinkedList<Statement>()
00-----statements is: [__shared__ DATATYPE As[16][16];, __shared__ DATATYPE Bs[16][16];, As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, __syncthreads();, {
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}, __syncthreads();]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: __shared__ DATATYPE As[16][16];-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: __shared__ DATATYPE As[16][16];-----
222222-----nonDeclarations.add(stmt)
222222-----i is: __shared__ DATATYPE Bs[16][16];-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: __shared__ DATATYPE Bs[16][16];-----
222222-----nonDeclarations.add(stmt)
222222-----i is: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: __syncthreads();-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: __syncthreads();-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, __syncthreads();]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: {
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: {
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, __syncthreads();, {
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: __syncthreads();-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: __syncthreads();-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, __syncthreads();, {
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}, __syncthreads();]-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];-----
222222-----d.detach()
222222-----d is: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];-----
222222-----scope.addStatement(d)
222222-----scope is: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];-----
222222-----d.detach()
222222-----d is: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];-----
222222-----scope.addStatement(d)
222222-----scope is: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: __syncthreads();-----
222222-----d.detach()
222222-----d is: __syncthreads();-----
222222-----scope.addStatement(d)
222222-----scope is: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: {
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}-----
222222-----d.detach()
222222-----d is: {
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}-----
222222-----scope.addStatement(d)
222222-----scope is: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
__syncthreads();
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: __syncthreads();-----
222222-----d.detach()
222222-----d is: __syncthreads();-----
222222-----scope.addStatement(d)
222222-----scope is: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}-----
00-----List<Traversable> statements = scope.getChildren()
00-----statements is: [lp1:, for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}]-----
00-----List<Statement> nonDeclarations = new LinkedList<Statement>()
00-----statements is: [lp1:, for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: lp1:-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: lp1:-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [lp1:]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [lp1:, for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}]-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: lp1:-----
222222-----d.detach()
222222-----d is: lp1:-----
222222-----scope.addStatement(d)
222222-----scope is: {
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
lp1:
}-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}-----
222222-----d.detach()
222222-----d is: for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}-----
222222-----scope.addStatement(d)
222222-----scope is: {
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}-----
00-----List<Traversable> statements = scope.getChildren()
00-----statements is: [Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);]-----
00-----List<Statement> nonDeclarations = new LinkedList<Statement>()
00-----statements is: [Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);]-----
222222-----nonDeclarations.add(stmt)
222222-----i is: Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);-----
1111-----Statement stmt = (Statement)i
1111-----stmt is: Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);-----
222222-----if(!(stmt instanceof DeclarationStatement))
222222-----nonDeclarations.add(stmt)
222222-----nonDeclarations is: [Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);]-----
222222-----enter for(Statement d : nonDeclarations)
222222-----d is: Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);-----
222222-----d.detach()
222222-----d is: Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);-----
222222-----scope.addStatement(d)
222222-----scope is: {
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}-----
[AnsiDeclarations] end in 0.02 seconds
[LinkSymbol] 81 updates in 0.00 seconds
*** After AnsiDeclarations ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
===========================================
[StreamInsertion-FCUDA] begin
[StreamInsertion-FCUDA] examining procedure matrixMul
-----kernelTransformPass, finish transformProcedure-----, proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
[StreamInsertion-FCUDA] end in 0.01 seconds
[LinkSymbol] 81 updates in 0.00 seconds
*** After StreamInsertion ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
===========================================
[CreateTaskRegions-FCUDA] begin
[CreateTaskRegions-FCUDA] examining procedure matrixMul
001
002
003---------------------------------------------------------------------------------------------------------------------------------------------------
----Collect global-memory arrays/pointers symbols (includes __constant__ symbols): glMemArraySet = GlobalMemUtils.getGlobMemSymbols(mProcedure)-------
------------------------------------------------------------------------------------------------------------------------------------------------------
----[* A, * C, * B]----
----1.1----mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
Global-Mem Arrays: [* A, * C, * B]
----now identifyGlmemAccs----
PointerSet: [* A, * C, * B]
findGMrefs for: A
---before continue, GM refs in ArrayAccess format already----
findGMrefs for: B
---before continue, GM refs in ArrayAccess format already----
findGMrefs for: C
---before continue, GM refs in ArrayAccess format already----
AliasSet: []
derefAccs: []
004---------------------------------------------------------------------------------------------------------------------------------------------------
----Convert dereference-based global-mem accesses to array-accesses and find global-mem aliases: identifyGlMemAccs();-------
------------------------------------------------------------------------------------------------------------------------------------------------------
----1.2----mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
----now handleMixStmts----
getMixdStmts Symbols: [* A, * C, * B]
getMixdStmts Global Symbols: [* A, * C, * B]
getMixdStmts Alias Symbols: []
symUses: [A]
Contained GM Ref: A[((a+(wA*threadIdx.y))+threadIdx.x)]
Candidate MIXED stmt: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
symUses: [B]
Contained GM Ref: B[((b+(wB*threadIdx.y))+threadIdx.x)]
Candidate MIXED stmt: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
symUses: [C]
Contained GM Ref: C[((c+(wB*threadIdx.y))+threadIdx.x)]
Candidate MIXED stmt: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
mixdStmts: []
--------------------------------
005---------------------------------------------------------------------------------------------------------------------------------------------------
----Eliminate MIXED statements (i.e. statements that contain both COMPUTE & TRANSFER parts): handleMixStmts();-------
------------------------------------------------------------------------------------------------------------------------------------------------------
----1.3----mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
}
----FcudaGlobalData2.java start----
-----cetus application IPChainAnalysis start----
-----cetus application IPChainAnalysis 1----
------IPChainAnalysis.java PerformAliasAnalysis start----
-------IPPointsToAnalysis.java IPPointsToAnalysis start super----
--------IPAnalysis.java IPAnalysis start 1----
--------IPAnalysis.java IPAnalysis start 2----
[NormalizeReturn] begin
*AP* procedure: __syncthreads
*AP* is_void=true
*AP* ret_expr= null
*AP* procedure: matrixMul
*AP* is_void=true
*AP* ret_expr= null
[NormalizeReturn] end in 0.00 seconds
[LinkSymbol] 81 updates in 0.00 seconds
--------IPAnalysis.java IPAnalysis start 3 enter IPA Graph----
---------IPAGraph.java finish super()----
---------IPAGraph.java start new Arraylist <IPANode>----
---------IPAGraph.java enter buildgraph<prog>
---------IPAGraph.java enter identifyCloneableNodes
---------IPAGraph.java enter buildTopOrder
---------IPAGraph.java finish new IPA Graph
[IPA] Stops due to no flow entry
-------IPPointsToAnalysis.java IPPointsToAnalysis finished super----
-------IPPointsToAnalysis.java IPPointsToAnalysis finished 2----
-------IPPointsToAnalysis.java IPPointsToAnalysis end----
------PerformAliasAnalysis 1----
0000IPpointsToAnalysis
[IPA:PointsTo] Stops due to no flow entry
------PerformAliasAnalysis end----
-----cetus application IPChainAnalysis 2----
-----cetus application IPChainAnalysis 3----
----------------------------------------------
----start to generate CF graph with thread----
----------------------------------------------
proc now is: void __syncthreads()
{
;
return ;
}
=====
proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
return ;
}
=====
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
-----cetus application IPChainAnalysis 4----
--------------------------------------------------------
----start to generate generate Program Summary Graph----
--------------------------------------------------------
############### PSG Summary Detail[__syncthreads()] #################
## Entry Node ##
# Ref Info (psg_entry_ref, use)
psg_entry_ref is null
# Global Info (psg_entry_global, UseOutSet)
psg_entry_global is null
## Exit Node ##
# Ref Info (psg_exit_ref, def)
psg_exit_ref is null
# Global Info (psg_exit_global, DefInSet)
psg_exit_global is null
############### PSG Summary Detail[matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)] #################
## Entry Node ##
# Ref Info (psg_entry_ref, use)
# Global Info (psg_entry_global, UseOutSet)
## Call Node ## IR: __syncthreads();
# Ref Info (psg_call_ref, def)
psg_call_ref is null
# Global Info (psg_call_global, DefInSet)
psg_call_global is null
## Return Node ## IR: __syncthreads();
# Ref Info (psg_return_ref, use)
psg_return_ref is null
# Global Info (psg_return_global, UseOutSet)
psg_return_global is null
## Call Node ## IR: __syncthreads();
# Ref Info (psg_call_ref, def)
psg_call_ref is null
# Global Info (psg_call_global, DefInSet)
psg_call_global is null
## Return Node ## IR: __syncthreads();
# Ref Info (psg_return_ref, use)
psg_return_ref is null
# Global Info (psg_return_global, UseOutSet)
psg_return_global is null
## Exit Node ##
# Ref Info (psg_exit_ref, def)
-DEF: C, NODE: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
-DEF: A, NODE: * A
-DEF: B, NODE: * B
# Global Info (psg_exit_global, DefInSet)
############### PSG Propagated Detail[__syncthreads()] #################
## Entry Node ##
# Ref Info (psg_entry_ref)
psg_entry_ref is null
# Global Info (psg_entry_global)
psg_entry_global is null
## Exit Node ##
# Ref Info (psg_exit_ref)
psg_exit_ref is null
# Global Info (psg_exit_global)
psg_exit_global is null
############### PSG Propagated Detail[matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)] #################
## Entry Node ##
# Ref Info (psg_entry_ref)
# parameter: B
# parameter: C
# parameter: A
# Global Info (psg_entry_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Call Node ## IR: __syncthreads();
# Ref Info (psg_call_ref)
psg_call_ref is null
# Global Info (psg_call_global)
psg_call_global is null
## Return Node ## IR: __syncthreads();
# Ref Info (psg_return_ref)
psg_return_ref is null
# Global Info (psg_return_global)
psg_return_global is null
## Call Node ## IR: __syncthreads();
# Ref Info (psg_call_ref)
psg_call_ref is null
# Global Info (psg_call_global)
psg_call_global is null
## Return Node ## IR: __syncthreads();
# Ref Info (psg_return_ref)
psg_return_ref is null
# Global Info (psg_return_global)
psg_return_global is null
## Exit Node ##
# Ref Info (psg_exit_ref)
-OUTdef: C, NODE: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
-OUTdef: A, NODE: * A
-OUTdef: B, NODE: * B
# Global Info (psg_exit_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
-----cetus application IPChainAnalysis 5----
-----cetus application IPChainAnalysis 6----
-----cetus application IPChainAnalysis 7----
-----cetus application IPChainAnalysis end----
----FcudaGlobalData2.java end----
----1.4----mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
return ;
}
006
Instances of tidx: [threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx]
----c.1---- idx is: threadIdx
----c.1---- parStmt is: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
----c.1---- idx is: threadIdx
----c.1---- parStmt is: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
----c.1---- idx is: threadIdx
----c.1---- parStmt is: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
defIDs: [As, As, As, As, Bs, Bs, Bs, Bs, Csub, Csub, C, C]
Looking for uses of: As
... in: Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... useID: As
Looking for uses of: As
... in: #pragma fcuda stmt tdep_vars=[As]
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
Looking for uses of: As
... in: #pragma fcuda stmt tdep_vars=[As]
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
Looking for uses of: As
... in: #pragma fcuda stmt tdep_vars=[As]
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
Looking for uses of: Bs
... in: #pragma fcuda stmt tdep_vars=[As]
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... useID: Bs
Looking for uses of: Bs
... in: #pragma fcuda stmt tdep_vars=[Bs, As]
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
Looking for uses of: Bs
... in: #pragma fcuda stmt tdep_vars=[Bs, As]
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
Looking for uses of: Bs
... in: #pragma fcuda stmt tdep_vars=[Bs, As]
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
Looking for uses of: Csub
... in: #pragma fcuda stmt tdep_vars=[Bs, As]
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... useID: Csub
... in: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
... useID: Csub
Looking for uses of: Csub
... in: #pragma fcuda stmt tdep_vars=[Bs, As, Csub]
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... in: #pragma fcuda stmt tdep_vars=[Csub]
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
Looking for uses of: C
Looking for uses of: C
defIDs: [Csub, Csub, Csub, C]
Looking for uses of: Csub
... in: #pragma fcuda stmt tdep_vars=[Bs, As, Csub]
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... in: #pragma fcuda stmt tdep_vars=[Csub]
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
Looking for uses of: Csub
... in: #pragma fcuda stmt tdep_vars=[Bs, As, Csub]
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... in: #pragma fcuda stmt tdep_vars=[Csub]
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
Looking for uses of: Csub
... in: #pragma fcuda stmt tdep_vars=[Bs, As, Csub]
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... in: #pragma fcuda stmt tdep_vars=[Csub]
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
Looking for uses of: C
----c.2---- tDepStmt is: #pragma fcuda stmt tdep_vars=[Csub]
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
----c.2---- tDepStmt is: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
----c.21---- tDepStmt is: #pragma fcuda stmt tdep=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
----c.2---- tDepStmt is: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
----c.21---- tDepStmt is: #pragma fcuda stmt tdep=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
----c.2---- tDepStmt is: #pragma fcuda stmt tdep_vars=[Bs, As, Csub]
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
----1.5----mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt tdep=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt tdep=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt tdep_vars=[Bs, As, Csub] tdep=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt tdep_vars=[Csub] tdep=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
return ;
}
007
findTransferStmts Symbols: [* A, * C, * B]
findTransferStmts non-Const Symbols: [* A, * C, * B]
findTransferStmts for: A
findTransferStmts for: B
findTransferStmts for: C
INFO - findTransferStmts: 12 address index IDs
....IPChainAnalysis getDefList....
INFO - findTransferStmts: 2 defStmts for addrID: a
INFO - findTransferStmts: Corner case forloop definition: ((a=aBegin), (b=bBegin));
WARNING - findTransferStmts: Definition of class: class cetus.hir.CommaExpression
((a+=aStep), (b+=bStep))
....IPChainAnalysis getDefList....
INFO - findTransferStmts: 1 defStmts for addrID: wA
WARNING - findTransferStmts: Definition of class: class cetus.hir.VariableDeclarator
wA
....IPChainAnalysis getDefList....
INFO - findTransferStmts: 2 defStmts for addrID: b
INFO - findTransferStmts: Corner case forloop definition: ((a=aBegin), (b=bBegin));
WARNING - findTransferStmts: Definition of class: class cetus.hir.CommaExpression
((a+=aStep), (b+=bStep))
....IPChainAnalysis getDefList....
INFO - findTransferStmts: 1 defStmts for addrID: wB
WARNING - findTransferStmts: Definition of class: class cetus.hir.VariableDeclarator
wB
....IPChainAnalysis getDefList....
INFO - findTransferStmts: 1 defStmts for addrID: c
....IPChainAnalysis getDefList....
INFO - findTransferStmts: 1 defStmts for addrID: wB
WARNING - findTransferStmts: Definition of class: class cetus.hir.VariableDeclarator
wB
----1.6----mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A tdep=true TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B tdep=true TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt tdep_vars=[Bs, As, Csub] tdep=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt rdNwrt=false GLBpntr=C tdep_vars=[Csub] tdep=true TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
return ;
}
008
----1.7----mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 tdep=true seqID=3
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
return ;
}
009_finish
-----kernelTransformPass, finish transformProcedure-----, proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 tdep=true seqID=3
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
return ;
}
[CreateTaskRegions-FCUDA] end in 0.33 seconds
[LinkSymbol] 81 updates in 0.00 seconds
*** After CreateTaskRegions ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 tdep=true seqID=3
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
return ;
}
===========================================
[UniformCFstructs-FCUDA] begin
[UniformCFstructs-FCUDA] examining procedure matrixMul
----001----enter UniformCFstructs
----002----defUseData = FCUDAGlobalData2.getDataDepAnalysis(program)
-----001----- fcudaGlobalData2.java enter UpdateHTG
-----001----- proc is
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 tdep=true seqID=3
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
return ;
}
-----001----- proc.getSymbolName() is
matrixMul
------001------ HTGraph.java super()
------002------ HTGraph.java curTask = new String()
------003------ HTGraph.java HTGName = new String(name)
------004------ HTGraph.java if(trv instanceof Procedure)
------005------ HTGraph.java mProcedure = (Procedure) trv
------007------ HTGraph.java before build graph
------0007------ t is procedure now
------0007------ t is procedure now, getbody
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 tdep=true seqID=3
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
return ;
}
------0007------ t is CompoundStatement now
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 tdep=true seqID=3
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
return ;
}
------0007------ buildCompound cStmt getChildren
[int bx;, int by;, int aBegin;, int aEnd;, int aStep;, int bBegin;, int bStep;, DATATYPE Csub;, int a;, int b;, int k;, int c;, #pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;, aBegin=((wA*16)*by);, aEnd=((aBegin+wA)-1);, aStep=16;, bBegin=(16*bx);, bStep=(16*wB);, Csub=0;, a=0;, b=0;, k=0;, for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 tdep=true seqID=3
__syncthreads();
}, c=(((wB*16)*by)+(16*bx));, #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;, return ;]
------0007------ buildCompound for(Traversable trv : children), current trv is
int bx;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int bx;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int by;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int by;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int aBegin;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int aBegin;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int aEnd;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int aEnd;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int aStep;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int aStep;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int bBegin;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int bBegin;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int bStep;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int bStep;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
DATATYPE Csub;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for DATATYPE Csub;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int a;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int a;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int b;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int b;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int k;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int k;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int c;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int c;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma HLS INTERFACE ap_bus port=A depth=3840
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for #pragma HLS INTERFACE ap_bus port=A depth=3840
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma HLS INTERFACE ap_bus port=B depth=6144
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for #pragma HLS INTERFACE ap_bus port=B depth=6144
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma HLS INTERFACE ap_bus port=C depth=10240
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for #pragma HLS INTERFACE ap_bus port=C depth=10240
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
bx=blockIdx.x;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for bx=blockIdx.x;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
by=blockIdx.y;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for by=blockIdx.y;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
aBegin=((wA*16)*by);
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for aBegin=((wA*16)*by);
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
aEnd=((aBegin+wA)-1);
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for aEnd=((aBegin+wA)-1);
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
aStep=16;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for aStep=16;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
bBegin=(16*bx);
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for bBegin=(16*bx);
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
bStep=(16*wB);
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for bStep=(16*wB);
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
Csub=0;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for Csub=0;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
a=0;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for a=0;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
b=0;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for b=0;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
k=0;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for k=0;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 tdep=true seqID=3
__syncthreads();
}
------ for loop current trv is not compound
------0007------ t is ForLoop now
------0007------ t is CompoundStatement now
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 tdep=true seqID=3
__syncthreads();
}
------0007------ buildCompound cStmt getChildren
[__shared__ DATATYPE As[16][16];, __shared__ DATATYPE Bs[16][16];, #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, #pragma fcuda stmt SNCtask=true name=SNC_1 tdep=true seqID=1
__syncthreads();, {
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}, #pragma fcuda stmt SNCtask=true name=SNC_3 tdep=true seqID=3
__syncthreads();]
------0007------ buildCompound for(Traversable trv : children), current trv is
__shared__ DATATYPE As[16][16];
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for __shared__ DATATYPE As[16][16];
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
__shared__ DATATYPE Bs[16][16];
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for __shared__ DATATYPE Bs[16][16];
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, TRN task----
-----added node: TRN_0
------done curr build graph
--- DFAGraph for #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, TRN task----
-----added node: TRN_0
------done curr build graph
--- DFAGraph for #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt SNCtask=true name=SNC_1 tdep=true seqID=1
__syncthreads();
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, SYNC task----
-----added node: SNC_1
------done curr build graph
--- DFAGraph for #pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
------ for loop current trv is compound
------0007------ t is CompoundStatement now
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
------0007------ buildCompound cStmt getChildren
[lp1:, for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}]
------0007------ buildCompound for(Traversable trv : children), current trv is
lp1:
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for lp1:
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
------ for loop current trv is not compound
------0007------ t is ForLoop now
------0007------ t is CompoundStatement now
{
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
------0007------ buildCompound cStmt getChildren
[#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);]
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt name=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, COM task----
-----added node: CMP_2
------done curr build graph
--- DFAGraph for #pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ exit buildCompound for loop
------ ret.size > 0------
------ wrap enabled------
====== DFAGraph.java enter getFirst()
------ exit buildCompound with wrap enabled, and ret > 0, return superDFA------
------ exit buildCompound with wrap enabled, and ret == 0, return superDFA------
------done curr build graph
------ for loop ret has node------
====== DFAGraph.java enter getFirst()
------done curr build graph
--- DFAGraph for #pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ exit buildCompound for loop
------ ret.size > 0------
------ exit buildCompound with NO wrap, return ret------
------done curr build graph
--- DFAGraph for {
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt SNCtask=true name=SNC_3 tdep=true seqID=3
__syncthreads();
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, SYNC task----
-----added node: SNC_3
------done curr build graph
--- DFAGraph for #pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ exit buildCompound for loop
------ ret.size > 0------
------ wrap enabled------
====== DFAGraph.java enter getFirst()
------ exit buildCompound with wrap enabled, and ret > 0, return superDFA------
------ exit buildCompound with wrap enabled, and ret == 0, return superDFA------
------done curr build graph
------ for loop ret has node------
====== DFAGraph.java enter getFirst()
------done curr build graph
--- DFAGraph for #pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
}
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
c=(((wB*16)*by)+(16*bx));
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for c=(((wB*16)*by)+(16*bx));
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, TRN task----
-----added node: TRN_4
------done curr build graph
--- DFAGraph for #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
return ;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for return ;
has 0 nodes
------0007------ exit buildCompound for loop
------ ret.size > 0------
------ exit buildCompound with NO wrap, return ret------
------done curr build graph
------done curr build graph
------008------ HTGraph.java DFAGraph dfag = buildGraph(trv)
-----002----- fcudaGlobalData2.java HTGraph htg = new HTGraph(proc, proc.getSymbolName())
-----003----- fcudaGlobalData2.java mKern2HTG.put(proc, htg)
----003----defUseData = FCUDAGlobalData2.getDataDepAnalysis(program)
HTG PRINT-OUT
====================
| Graph: matrixMul L1
|
V
--------------------
| FOR_HTG_TRN_0
| type: FOR
| tdep: false
--------------------
====================
| Graph: HTG_TRN_0 L2
|
V
--------------------
| TRN_0
| ParentNode: FOR_HTG_TRN_0
| type: TRN
--------------------
|
V
--------------------
| SNC_1
| ParentNode: FOR_HTG_TRN_0
| type: SNC
--------------------
|
V
--------------------
| FOR_HTG_CMP_2
| ParentNode: FOR_HTG_TRN_0
| type: FOR
| tdep: false
--------------------
====================
| Graph: HTG_CMP_2 L3
|
V
--------------------
| CMP_2
| ParentNode: FOR_HTG_CMP_2
| type: CMP
| Addr-Use: false
--------------------
\===== HTG_CMP_2 =====/
|
V
--------------------
| SNC_3
| ParentNode: FOR_HTG_TRN_0
| type: SNC
--------------------
\===== HTG_TRN_0 =====/
|
V
--------------------
| TRN_4
| type: TRN
--------------------
\===== matrixMul =====/
----004----procHTG.printGraph_1_0
----005----after if rgnBounds.add_none_or_TRN
----006----List<DFANode> nonUniformCFs = new LinkedList_DFANode
----007----identifyCFs(nonUniformCFs, rgnBounds, procHTG.getFirst(), false)
----007----before 008, nonUniformCFs is: []
-----kernelTransformPass, finish transformProcedure-----, proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
return ;
}
[UniformCFstructs-FCUDA] end in 0.02 seconds
[LinkSymbol] 81 updates in 0.00 seconds
*** After UniformCFstructs ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
return ;
}
===========================================
[AnnotateTasks-FCUDA] begin
[AnnotateTasks-FCUDA] examining procedure matrixMul
------001------ HTGraph.java super()
------002------ HTGraph.java curTask = new String()
------003------ HTGraph.java HTGName = new String(name)
------004------ HTGraph.java if(trv instanceof Procedure)
------005------ HTGraph.java mProcedure = (Procedure) trv
------007------ HTGraph.java before build graph
------0007------ t is procedure now
------0007------ t is procedure now, getbody
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
return ;
}
------0007------ t is CompoundStatement now
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
return ;
}
------0007------ buildCompound cStmt getChildren
[int bx;, int by;, int aBegin;, int aEnd;, int aStep;, int bBegin;, int bStep;, DATATYPE Csub;, int a;, int b;, int k;, int c;, #pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;, aBegin=((wA*16)*by);, aEnd=((aBegin+wA)-1);, aStep=16;, bBegin=(16*bx);, bStep=(16*wB);, Csub=0;, a=0;, b=0;, k=0;, #pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
}, c=(((wB*16)*by)+(16*bx));, #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;, return ;]
------0007------ buildCompound for(Traversable trv : children), current trv is
int bx;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int bx;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int by;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int by;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int aBegin;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int aBegin;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int aEnd;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int aEnd;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int aStep;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int aStep;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int bBegin;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int bBegin;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int bStep;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int bStep;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
DATATYPE Csub;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for DATATYPE Csub;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int a;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int a;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int b;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int b;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int k;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int k;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int c;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int c;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma HLS INTERFACE ap_bus port=A depth=3840
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for #pragma HLS INTERFACE ap_bus port=A depth=3840
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma HLS INTERFACE ap_bus port=B depth=6144
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for #pragma HLS INTERFACE ap_bus port=B depth=6144
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma HLS INTERFACE ap_bus port=C depth=10240
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for #pragma HLS INTERFACE ap_bus port=C depth=10240
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
bx=blockIdx.x;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for bx=blockIdx.x;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
by=blockIdx.y;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for by=blockIdx.y;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
aBegin=((wA*16)*by);
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for aBegin=((wA*16)*by);
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
aEnd=((aBegin+wA)-1);
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for aEnd=((aBegin+wA)-1);
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
aStep=16;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for aStep=16;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
bBegin=(16*bx);
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for bBegin=(16*bx);
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
bStep=(16*wB);
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for bStep=(16*wB);
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
Csub=0;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for Csub=0;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
a=0;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for a=0;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
b=0;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for b=0;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
k=0;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for k=0;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
}
------ for loop current trv is not compound
------0007------ t is ForLoop now
------0007------ t is CompoundStatement now
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
}
------0007------ buildCompound cStmt getChildren
[__shared__ DATATYPE As[16][16];, __shared__ DATATYPE Bs[16][16];, #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, #pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();, {
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}, #pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();]
------0007------ buildCompound for(Traversable trv : children), current trv is
__shared__ DATATYPE As[16][16];
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for __shared__ DATATYPE As[16][16];
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
__shared__ DATATYPE Bs[16][16];
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for __shared__ DATATYPE Bs[16][16];
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, TRN task----
-----added node: TRN_0
------done curr build graph
--- DFAGraph for #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, TRN task----
-----added node: TRN_0
------done curr build graph
--- DFAGraph for #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, SYNC task----
-----added node: SNC_1
------done curr build graph
--- DFAGraph for #pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
{
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
------ for loop current trv is compound
------0007------ t is CompoundStatement now
{
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
------0007------ buildCompound cStmt getChildren
[lp1:, #pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}]
------0007------ buildCompound for(Traversable trv : children), current trv is
lp1:
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for lp1:
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
------ for loop current trv is not compound
------0007------ t is ForLoop now
------0007------ t is CompoundStatement now
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
------0007------ buildCompound cStmt getChildren
[#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);]
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, COM task----
-----added node: CMP_2
------done curr build graph
--- DFAGraph for #pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ exit buildCompound for loop
------ ret.size > 0------
------ wrap enabled------
====== DFAGraph.java enter getFirst()
------ exit buildCompound with wrap enabled, and ret > 0, return superDFA------
------ exit buildCompound with wrap enabled, and ret == 0, return superDFA------
------done curr build graph
------ for loop ret has node------
====== DFAGraph.java enter getFirst()
------done curr build graph
--- DFAGraph for #pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ exit buildCompound for loop
------ ret.size > 0------
------ exit buildCompound with NO wrap, return ret------
------done curr build graph
--- DFAGraph for {
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, SYNC task----
-----added node: SNC_3
------done curr build graph
--- DFAGraph for #pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ exit buildCompound for loop
------ ret.size > 0------
------ wrap enabled------
====== DFAGraph.java enter getFirst()
------ exit buildCompound with wrap enabled, and ret > 0, return superDFA------
------ exit buildCompound with wrap enabled, and ret == 0, return superDFA------
------done curr build graph
------ for loop ret has node------
====== DFAGraph.java enter getFirst()
------done curr build graph
--- DFAGraph for #pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
}
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
c=(((wB*16)*by)+(16*bx));
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for c=(((wB*16)*by)+(16*bx));
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, TRN task----
-----added node: TRN_4
------done curr build graph
--- DFAGraph for #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
return ;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for return ;
has 0 nodes
------0007------ exit buildCompound for loop
------ ret.size > 0------
------ exit buildCompound with NO wrap, return ret------
------done curr build graph
------done curr build graph
------008------ HTGraph.java DFAGraph dfag = buildGraph(trv)
PRINT-GRAPH OUTPUT
====================
| Graph: matrixMul L1
|
V
--------------------
| FOR_HTG_TRN_0
| type: FOR
| tdep: false
--------------------
====================
| Graph: HTG_TRN_0 L2
|
V
--------------------
| TRN_0
| ParentNode: FOR_HTG_TRN_0
| type: TRN
--------------------
|
V
--------------------
| SNC_1
| ParentNode: FOR_HTG_TRN_0
| type: SNC
--------------------
|
V
--------------------
| FOR_HTG_CMP_2
| ParentNode: FOR_HTG_TRN_0
| type: FOR
| tdep: false
--------------------
====================
| Graph: HTG_CMP_2 L3
|
V
--------------------
| CMP_2
| ParentNode: FOR_HTG_CMP_2
| type: CMP
| Addr-Use: false
--------------------
\===== HTG_CMP_2 =====/
|
V
--------------------
| SNC_3
| ParentNode: FOR_HTG_TRN_0
| type: SNC
--------------------
\===== HTG_TRN_0 =====/
|
V
--------------------
| TRN_4
| type: TRN
--------------------
\===== matrixMul =====/
*** CF Node Info Begin ***
FOR_HTG_TRN_0 info:
- uniform: false - SubTypes: [TRN, CMP, SNC]
*** CF Node Info End ***
*** Number of Tasks in matrixMul :3
---- Target: TRN_0 ----
nodes #: 1
TRN_0
Node Types: [TRN]
uniform: true
**** TLOOPS(1) ****
TLOOP 0
---- Target: TRN_0 ----
nodes #: 1
TRN_0
Node Types: [TRN]
uniform: true
----addStatementBefore----index is:2
----addStatementBefore----index is:3
---- Target: SNC_1 ----
nodes #: 3
SNC_1
FOR_HTG_CMP_2
SNC_3
Node Types: [CMP, SNC]
uniform: true
**** TLOOPS(1) ****
TLOOP 0
---- Target: FOR_HTG_CMP_2 ----
nodes #: 1
FOR_HTG_CMP_2
Node Types: [CMP]
uniform: true
----addStatementBefore----index is:8
----addStatementBefore----index is:1
---- Target: TRN_4 ----
nodes #: 1
TRN_4
Node Types: [TRN]
uniform: true
**** TLOOPS(1) ****
TLOOP 0
---- Target: TRN_4 ----
nodes #: 1
TRN_4
Node Types: [TRN]
uniform: true
----addStatementBefore----index is:28
----addStatementBefore----index is:29
-----kernelTransformPass, finish transformProcedure-----, proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=false begin=true
#pragma fcuda tloop name=TRN_0 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_0 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_1 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2 tlpName=FOR_HTG_CMP_2
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_1 end=true begin=false
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_4 end=false begin=true
#pragma fcuda tloop name=TRN_4 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 tlpName=TRN_4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma fcuda tloop name=TRN_4 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_4 end=true begin=false
return ;
}
[AnnotateTasks-FCUDA] end in 0.02 seconds
[LinkSymbol] 81 updates in 0.00 seconds
*** After AnnotateTasks ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=false begin=true
#pragma fcuda tloop name=TRN_0 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_0 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_1 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2 tlpName=FOR_HTG_CMP_2
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_1 end=true begin=false
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_4 end=false begin=true
#pragma fcuda tloop name=TRN_4 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 tlpName=TRN_4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma fcuda tloop name=TRN_4 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_4 end=true begin=false
return ;
}
===========================================
[PrivatizeScalars-FCUDA] begin
[PrivatizeScalars-FCUDA] examining procedure matrixMul
----FcudaGlobalData2.java start----
-----cetus application IPChainAnalysis start----
-----cetus application IPChainAnalysis 1----
------IPChainAnalysis.java PerformAliasAnalysis start----
-------IPPointsToAnalysis.java IPPointsToAnalysis start super----
--------IPAnalysis.java IPAnalysis start 1----
--------IPAnalysis.java IPAnalysis start 2----
[NormalizeReturn] begin
*AP* procedure: __syncthreads
*AP* is_void=true
*AP* ret_expr= null
*AP* procedure: matrixMul
*AP* is_void=true
*AP* ret_expr= null
[NormalizeReturn] end in 0.00 seconds
[LinkSymbol] 81 updates in 0.00 seconds
--------IPAnalysis.java IPAnalysis start 3 enter IPA Graph----
---------IPAGraph.java finish super()----
---------IPAGraph.java start new Arraylist <IPANode>----
---------IPAGraph.java enter buildgraph<prog>
---------IPAGraph.java enter identifyCloneableNodes
---------IPAGraph.java enter buildTopOrder
---------IPAGraph.java finish new IPA Graph
[IPA] Stops due to no flow entry
-------IPPointsToAnalysis.java IPPointsToAnalysis finished super----
-------IPPointsToAnalysis.java IPPointsToAnalysis finished 2----
-------IPPointsToAnalysis.java IPPointsToAnalysis end----
------PerformAliasAnalysis 1----
0000IPpointsToAnalysis
[IPA:PointsTo] Stops due to no flow entry
------PerformAliasAnalysis end----
-----cetus application IPChainAnalysis 2----
-----cetus application IPChainAnalysis 3----
----------------------------------------------
----start to generate CF graph with thread----
----------------------------------------------
proc now is: void __syncthreads()
{
;
return ;
}
=====
proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=false begin=true
#pragma fcuda tloop name=TRN_0 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_0 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_1 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2 tlpName=FOR_HTG_CMP_2
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_1 end=true begin=false
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_4 end=false begin=true
#pragma fcuda tloop name=TRN_4 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 tlpName=TRN_4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma fcuda tloop name=TRN_4 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_4 end=true begin=false
return ;
}
=====
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
-----cetus application IPChainAnalysis 4----
--------------------------------------------------------
----start to generate generate Program Summary Graph----
--------------------------------------------------------
############### PSG Summary Detail[__syncthreads()] #################
## Entry Node ##
# Ref Info (psg_entry_ref, use)
psg_entry_ref is null
# Global Info (psg_entry_global, UseOutSet)
psg_entry_global is null
## Exit Node ##
# Ref Info (psg_exit_ref, def)
psg_exit_ref is null
# Global Info (psg_exit_global, DefInSet)
psg_exit_global is null
############### PSG Summary Detail[matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)] #################
## Entry Node ##
# Ref Info (psg_entry_ref, use)
# Global Info (psg_entry_global, UseOutSet)
## Call Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
# Ref Info (psg_call_ref, def)
psg_call_ref is null
# Global Info (psg_call_global, DefInSet)
psg_call_global is null
## Return Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
# Ref Info (psg_return_ref, use)
psg_return_ref is null
# Global Info (psg_return_global, UseOutSet)
psg_return_global is null
## Call Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
# Ref Info (psg_call_ref, def)
psg_call_ref is null
# Global Info (psg_call_global, DefInSet)
psg_call_global is null
## Return Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
# Ref Info (psg_return_ref, use)
psg_return_ref is null
# Global Info (psg_return_global, UseOutSet)
psg_return_global is null
## Exit Node ##
# Ref Info (psg_exit_ref, def)
-DEF: B, NODE: * B
-DEF: C, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 tlpName=TRN_4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
-DEF: A, NODE: * A
# Global Info (psg_exit_global, DefInSet)
############### PSG Propagated Detail[__syncthreads()] #################
## Entry Node ##
# Ref Info (psg_entry_ref)
psg_entry_ref is null
# Global Info (psg_entry_global)
psg_entry_global is null
## Exit Node ##
# Ref Info (psg_exit_ref)
psg_exit_ref is null
# Global Info (psg_exit_global)
psg_exit_global is null
############### PSG Propagated Detail[matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)] #################
## Entry Node ##
# Ref Info (psg_entry_ref)
# parameter: B
# parameter: C
# parameter: A
# Global Info (psg_entry_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Call Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
# Ref Info (psg_call_ref)
psg_call_ref is null
# Global Info (psg_call_global)
psg_call_global is null
## Return Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
# Ref Info (psg_return_ref)
psg_return_ref is null
# Global Info (psg_return_global)
psg_return_global is null
## Call Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
# Ref Info (psg_call_ref)
psg_call_ref is null
# Global Info (psg_call_global)
psg_call_global is null
## Return Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
# Ref Info (psg_return_ref)
psg_return_ref is null
# Global Info (psg_return_global)
psg_return_global is null
## Exit Node ##
# Ref Info (psg_exit_ref)
-OUTdef: B, NODE: * B
-OUTdef: C, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 tlpName=TRN_4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
-OUTdef: A, NODE: * A
# Global Info (psg_exit_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
-----cetus application IPChainAnalysis 5----
-----cetus application IPChainAnalysis 6----
-----cetus application IPChainAnalysis 7----
-----cetus application IPChainAnalysis end----
----FcudaGlobalData2.java end----
...ps...001...before set of candidate variables to be privatized...
*** bfi: {
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub=0;
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=false begin=true
#pragma fcuda tloop name=TRN_0 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_0 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_1 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2 tlpName=FOR_HTG_CMP_2
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_1 end=true begin=false
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_4 end=false begin=true
#pragma fcuda tloop name=TRN_4 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 tlpName=TRN_4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
#pragma fcuda tloop name=TRN_4 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_4 end=true begin=false
return ;
}
*** bfi: int bx;
*** bfi: int by;
*** bfi: int aBegin;
*** bfi: int aEnd;
*** bfi: int aStep;
*** bfi: int bBegin;
*** bfi: int bStep;
*** bfi: DATATYPE Csub;
*** bfi: int a;
*** bfi: int b;
*** bfi: int k;
*** bfi: int c;
*** bfi: #pragma HLS INTERFACE ap_bus port=A depth=3840
*** bfi: #pragma HLS INTERFACE ap_bus port=B depth=6144
*** bfi: #pragma HLS INTERFACE ap_bus port=C depth=10240
*** bfi: bx=blockIdx.x;
*** bfi: by=blockIdx.y;
*** bfi: aBegin=((wA*16)*by);
*** bfi: aEnd=((aBegin+wA)-1);
*** bfi: aStep=16;
*** bfi: bBegin=(16*bx);
*** bfi: bStep=(16*wB);
*** bfi: Csub=0;
*** bfi: a=0;
*** bfi: b=0;
*** bfi: k=0;
*** bfi: #pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=false begin=true
#pragma fcuda tloop name=TRN_0 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_0 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_1 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2 tlpName=FOR_HTG_CMP_2
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_1 end=true begin=false
}
*** bfi: c=(((wB*16)*by)+(16*bx));
*** bfi: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_4 end=false begin=true
*** bfi: #pragma fcuda tloop name=TRN_4 end=false begin=true
*** tloop pragma: #pragma fcuda tloop name=TRN_4 end=false begin=true
*** entering: TRN_4
*** bfi: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 tlpName=TRN_4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
...ps...scan...001...curTloop is: TRN_4
...ps...scan...0001...fcAnnot is: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 tlpName=TRN_4 TRNtask=true
- defs: [C[((c+(wB*threadIdx.y))+threadIdx.x)]]
...ps...scan...00001...defExp is: C[((c+(wB*threadIdx.y))+threadIdx.x)]
- Is DefID:C candidate?
- Non-Candidate defId: C
*** bfi: #pragma fcuda tloop name=TRN_4 end=true begin=false
...ps...scan...001...curTloop is: TRN_4
...ps...scan...0001...fcAnnot is: #pragma fcuda tloop name=TRN_4 end=true begin=false
*** tloop pragma: #pragma fcuda tloop name=TRN_4 end=true begin=false
*** exiting: TRN_4
*** bfi: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_4 end=true begin=false
*** bfi: return ;
*** bfi: ((a=aBegin), (b=bBegin));
*** bfi: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=false begin=true
#pragma fcuda tloop name=TRN_0 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_0 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_1 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2 tlpName=FOR_HTG_CMP_2
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_1 end=true begin=false
}
*** bfi: __shared__ DATATYPE As[16][16];
*** bfi: __shared__ DATATYPE Bs[16][16];
*** bfi: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=false begin=true
*** bfi: #pragma fcuda tloop name=TRN_0 end=false begin=true
*** tloop pragma: #pragma fcuda tloop name=TRN_0 end=false begin=true
*** entering: TRN_0
*** bfi: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
...ps...scan...001...curTloop is: TRN_0
...ps...scan...0001...fcAnnot is: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
- defs: [As[threadIdx.y][threadIdx.x]]
...ps...scan...00001...defExp is: As[threadIdx.y][threadIdx.x]
- Is DefID:As candidate?
- Non-Candidate defId: As
*** bfi: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
...ps...scan...001...curTloop is: TRN_0
...ps...scan...0001...fcAnnot is: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
- defs: [Bs[threadIdx.y][threadIdx.x]]
...ps...scan...00001...defExp is: Bs[threadIdx.y][threadIdx.x]
- Is DefID:Bs candidate?
- Non-Candidate defId: Bs
*** bfi: #pragma fcuda tloop name=TRN_0 end=true begin=false
...ps...scan...001...curTloop is: TRN_0
...ps...scan...0001...fcAnnot is: #pragma fcuda tloop name=TRN_0 end=true begin=false
*** tloop pragma: #pragma fcuda tloop name=TRN_0 end=true begin=false
*** exiting: TRN_0
*** bfi: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=true begin=false
*** bfi: #pragma fcuda compute cores=1 name=SNC_1 end=false begin=true
*** bfi: #pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
*** bfi: {
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2 tlpName=FOR_HTG_CMP_2
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=true begin=false
}
*** bfi: #pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
*** bfi: #pragma fcuda compute cores=1 name=SNC_1 end=true begin=false
*** bfi: lp1:
*** bfi: #pragma fcuda tloop name=FOR_HTG_CMP_2 end=false begin=true
*** tloop pragma: #pragma fcuda tloop name=FOR_HTG_CMP_2 end=false begin=true
*** entering: FOR_HTG_CMP_2
*** bfi: #pragma fcuda stmt HTGNode=FOR_HTG_CMP_2 tlpName=FOR_HTG_CMP_2
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
...ps...scan...001...curTloop is: FOR_HTG_CMP_2
...ps...scan...0001...fcAnnot is: #pragma fcuda stmt HTGNode=FOR_HTG_CMP_2 tlpName=FOR_HTG_CMP_2
- defs: [k, k, Csub]
...ps...scan...00001...defExp is: k
- Is DefID:k candidate?
- with TRV USE:k<16
- with TRV USE:#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
- with TRV USE:#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
- with TRV USE: ++ k
- Non-Candidate defId: k
...ps...scan...00001...defExp is: k
- Is DefID:k candidate?
- with TRV USE:k<16
- with TRV USE:#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
- with TRV USE:#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
- with TRV USE: ++ k
- Non-Candidate defId: k
...ps...scan...00001...defExp is: Csub
- Is DefID:Csub candidate?
- with TRV USE:#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
- with TRV USE:#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 tlpName=TRN_4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;
- Candidate defId: Csub
*** bfi: #pragma fcuda tloop name=FOR_HTG_CMP_2 end=true begin=false
...ps...scan...001...curTloop is: FOR_HTG_CMP_2
...ps...scan...0001...fcAnnot is: #pragma fcuda tloop name=FOR_HTG_CMP_2 end=true begin=false
*** tloop pragma: #pragma fcuda tloop name=FOR_HTG_CMP_2 end=true begin=false
*** exiting: FOR_HTG_CMP_2
*** bfi: #pragma fcuda stmt tlpName=FOR_HTG_CMP_2
k=0;
*** bfi: #pragma fcuda stmt tlpName=FOR_HTG_CMP_2
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
*** bfi: #pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
...ps...002...after scanTLoops_candidateVars...
...ps...002...candidateVars is: [Csub]
...ps...002...end of candidateVars print...
Privatization candidates for matrixMul: [Csub]
....ps....09....candVars is: [Csub]
....ps....09.01....for loop curr idExp is: Csub
....ps....09.02....idExp.getSymbol, origDeclor is: Csub
....ps....09.04....defSyms.containsKey(origDeclor) is: false
....ps....09.04....defSyms before put is: {}
....ps....09.04....curr origDeclor is: Csub
....ps....09.04....curr symtarg is: fcuda.analysis.SymTarget@27f723
....ps....09.05....defSyms after put is: {Csub=fcuda.analysis.SymTarget@27f723}
....ps....10.....defSyms.keySet is: [Csub]
....ps....10.01....for loop curr defSym is: Csub
- Privatizing SymTarget:
---- Symbol: Csub ----
*** mDefUses:
{71194203=[2006034581, 916419490], 457357179=[2006034581, 916419490]}
*** Candidate Defs: 1 [71194203]
- 71194203 : #pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
*** non-Candidate Defs: 1
- 457357179 : Csub=0;
*** Symbol Uses:
[#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);, #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 tlpName=TRN_4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub;]
....ps....10.02....defSym is: Csub
....ps....10.02....defSym.getDeclaration, origDecl is: DATATYPE Csub
*** Using clone for tIdx.y ***
*** Using clone for tIdx.x ***
Replaced 1 candidate defs
Replaced 2 candidate uses
Replaced 1 non-candidate defs with cand uses
Replaced 0 non-candidate uses
....privatize....10.02....origDecl is: DATATYPE Csub
....privatize....10.02....origDecl.getParent is: DATATYPE Csub;
....privatize....10.02....blockDecl is: __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
....privatize....10.02....blockDecl.parent is: null
....privatize....10.02....compoundStatement.class is: class cetus.hir.CompoundStatement
....IRTools....getAncestorOfType....first argu t is not null....
....IRTools....t.parent, ret is: DATATYPE Csub;
....IRTools....type.isInstance(ret) is: false
....IRTools....ret = ret.parent, ret is: {
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub_block[threadIdx.y][threadIdx.x]=0;
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=false begin=true
#pragma fcuda tloop name=TRN_0 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_0 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_1 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2 tlpName=FOR_HTG_CMP_2
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_1 end=true begin=false
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_4 end=false begin=true
#pragma fcuda tloop name=TRN_4 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 tlpName=TRN_4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_4 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_4 end=true begin=false
return ;
}
....IRTools....type.isInstance(ret) is: true
....privatize....10.03....parCmpd is: {
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
DATATYPE Csub;
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub_block[threadIdx.y][threadIdx.x]=0;
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_0
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=false begin=true
#pragma fcuda tloop name=TRN_0 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_0 HTGNode=TRN_0 tdep=true seqID=0 tlpName=TRN_0 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_0 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_0 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_1 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_1 HTGNode=SNC_1 tdep=true seqID=1
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2 tlpName=FOR_HTG_CMP_2
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_2
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_2
{
#pragma fcuda stmt name=CMP_2 HTGNode=CMP_2 tdep_vars=[Bs, As, Csub] tdep=true seqID=2 tlpName=FOR_HTG_CMP_2 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_2 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_3 HTGNode=SNC_3 tdep=true seqID=3
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_1 end=true begin=false
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_4 end=false begin=true
#pragma fcuda tloop name=TRN_4 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_4 HTGNode=TRN_4 tdep_vars=[Csub] tdep=true seqID=4 tlpName=TRN_4 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_4 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_4 end=true begin=false
return ;
}
....CompoundStmt...addDecAfter...01
....CompoundStmt...addDecAfter...02
001
002
003---------------------------------------------------------------------------------------------------------------------------------------------------
----Collect global-memory arrays/pointers symbols (includes __constant__ symbols): glMemArraySet = GlobalMemUtils.getGlobMemSymbols(mProcedure)-------
------------------------------------------------------------------------------------------------------------------------------------------------------
----[* A, * C, * B]----
----1.1----mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub_block[threadIdx.y][threadIdx.x]=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
return ;
}
Global-Mem Arrays: [* A, * C, * B]
----now identifyGlmemAccs----
PointerSet: [* A, * C, * B]
findGMrefs for: A
---before continue, GM refs in ArrayAccess format already----
findGMrefs for: B
---before continue, GM refs in ArrayAccess format already----
findGMrefs for: C
---before continue, GM refs in ArrayAccess format already----
AliasSet: []
derefAccs: []
004---------------------------------------------------------------------------------------------------------------------------------------------------
----Convert dereference-based global-mem accesses to array-accesses and find global-mem aliases: identifyGlMemAccs();-------
------------------------------------------------------------------------------------------------------------------------------------------------------
----1.2----mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub_block[threadIdx.y][threadIdx.x]=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
return ;
}
----now handleMixStmts----
getMixdStmts Symbols: [* A, * C, * B]
getMixdStmts Global Symbols: [* A, * C, * B]
getMixdStmts Alias Symbols: []
symUses: [A]
Contained GM Ref: A[((a+(wA*threadIdx.y))+threadIdx.x)]
Candidate MIXED stmt: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
symUses: [B]
Contained GM Ref: B[((b+(wB*threadIdx.y))+threadIdx.x)]
Candidate MIXED stmt: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
symUses: [C]
Contained GM Ref: C[((c+(wB*threadIdx.y))+threadIdx.x)]
Candidate MIXED stmt: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
mixdStmts: []
--------------------------------
005---------------------------------------------------------------------------------------------------------------------------------------------------
----Eliminate MIXED statements (i.e. statements that contain both COMPUTE & TRANSFER parts): handleMixStmts();-------
------------------------------------------------------------------------------------------------------------------------------------------------------
----1.3----mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub_block[threadIdx.y][threadIdx.x]=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
return ;
}
----FcudaGlobalData2.java start----
-----cetus application IPChainAnalysis start----
-----cetus application IPChainAnalysis 1----
------IPChainAnalysis.java PerformAliasAnalysis start----
-------IPPointsToAnalysis.java IPPointsToAnalysis start super----
--------IPAnalysis.java IPAnalysis start 1----
--------IPAnalysis.java IPAnalysis start 2----
[NormalizeReturn] begin
*AP* procedure: __syncthreads
*AP* is_void=true
*AP* ret_expr= null
*AP* procedure: matrixMul
*AP* is_void=true
*AP* ret_expr= null
[NormalizeReturn] end in 0.00 seconds
[LinkSymbol] 93 updates in 0.00 seconds
--------IPAnalysis.java IPAnalysis start 3 enter IPA Graph----
---------IPAGraph.java finish super()----
---------IPAGraph.java start new Arraylist <IPANode>----
---------IPAGraph.java enter buildgraph<prog>
---------IPAGraph.java enter identifyCloneableNodes
---------IPAGraph.java enter buildTopOrder
---------IPAGraph.java finish new IPA Graph
[IPA] Stops due to no flow entry
-------IPPointsToAnalysis.java IPPointsToAnalysis finished super----
-------IPPointsToAnalysis.java IPPointsToAnalysis finished 2----
-------IPPointsToAnalysis.java IPPointsToAnalysis end----
------PerformAliasAnalysis 1----
0000IPpointsToAnalysis
[IPA:PointsTo] Stops due to no flow entry
------PerformAliasAnalysis end----
-----cetus application IPChainAnalysis 2----
-----cetus application IPChainAnalysis 3----
----------------------------------------------
----start to generate CF graph with thread----
----------------------------------------------
proc now is: void __syncthreads()
{
;
return ;
}
=====
proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub_block[threadIdx.y][threadIdx.x]=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
return ;
}
=====
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
-----cetus application IPChainAnalysis 4----
--------------------------------------------------------
----start to generate generate Program Summary Graph----
--------------------------------------------------------
############### PSG Summary Detail[__syncthreads()] #################
## Entry Node ##
# Ref Info (psg_entry_ref, use)
psg_entry_ref is null
# Global Info (psg_entry_global, UseOutSet)
psg_entry_global is null
## Exit Node ##
# Ref Info (psg_exit_ref, def)
psg_exit_ref is null
# Global Info (psg_exit_global, DefInSet)
psg_exit_global is null
############### PSG Summary Detail[matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)] #################
## Entry Node ##
# Ref Info (psg_entry_ref, use)
# Global Info (psg_entry_global, UseOutSet)
## Call Node ## IR: __syncthreads();
# Ref Info (psg_call_ref, def)
psg_call_ref is null
# Global Info (psg_call_global, DefInSet)
psg_call_global is null
## Return Node ## IR: __syncthreads();
# Ref Info (psg_return_ref, use)
psg_return_ref is null
# Global Info (psg_return_global, UseOutSet)
psg_return_global is null
## Call Node ## IR: __syncthreads();
# Ref Info (psg_call_ref, def)
psg_call_ref is null
# Global Info (psg_call_global, DefInSet)
psg_call_global is null
## Return Node ## IR: __syncthreads();
# Ref Info (psg_return_ref, use)
psg_return_ref is null
# Global Info (psg_return_global, UseOutSet)
psg_return_global is null
## Exit Node ##
# Ref Info (psg_exit_ref, def)
-DEF: C, NODE: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-DEF: B, NODE: * B
-DEF: A, NODE: * A
# Global Info (psg_exit_global, DefInSet)
############### PSG Propagated Detail[__syncthreads()] #################
## Entry Node ##
# Ref Info (psg_entry_ref)
psg_entry_ref is null
# Global Info (psg_entry_global)
psg_entry_global is null
## Exit Node ##
# Ref Info (psg_exit_ref)
psg_exit_ref is null
# Global Info (psg_exit_global)
psg_exit_global is null
############### PSG Propagated Detail[matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)] #################
## Entry Node ##
# Ref Info (psg_entry_ref)
# parameter: C
# parameter: B
# parameter: A
# Global Info (psg_entry_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Call Node ## IR: __syncthreads();
# Ref Info (psg_call_ref)
psg_call_ref is null
# Global Info (psg_call_global)
psg_call_global is null
## Return Node ## IR: __syncthreads();
# Ref Info (psg_return_ref)
psg_return_ref is null
# Global Info (psg_return_global)
psg_return_global is null
## Call Node ## IR: __syncthreads();
# Ref Info (psg_call_ref)
psg_call_ref is null
# Global Info (psg_call_global)
psg_call_global is null
## Return Node ## IR: __syncthreads();
# Ref Info (psg_return_ref)
psg_return_ref is null
# Global Info (psg_return_global)
psg_return_global is null
## Exit Node ##
# Ref Info (psg_exit_ref)
-OUTdef: C, NODE: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-OUTdef: B, NODE: * B
-OUTdef: A, NODE: * A
# Global Info (psg_exit_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
-----cetus application IPChainAnalysis 5----
-----cetus application IPChainAnalysis 6----
-----cetus application IPChainAnalysis 7----
-----cetus application IPChainAnalysis end----
----FcudaGlobalData2.java end----
----1.4----mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
Csub_block[threadIdx.y][threadIdx.x]=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
return ;
}
006
Instances of tidx: [threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx, threadIdx]
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Csub_block[threadIdx.y][threadIdx.x]=0;
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Csub_block[threadIdx.y][threadIdx.x]=0;
----c.1---- idx is: threadIdx
----c.1---- parStmt is: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
----c.1---- idx is: threadIdx
----c.1---- parStmt is: Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
----c.1---- idx is: threadIdx
----c.1---- parStmt is: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
----c.1---- idx is: threadIdx
----c.1---- parStmt is: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
defIDs: [Csub_block, Csub_block, As, As, As, As, Bs, Bs, Bs, Bs, Csub_block, Csub_block, Csub_block, Csub_block, C, C, C, C]
Looking for uses of: Csub_block
... in: Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... useID: Csub_block
... in: C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
... useID: Csub_block
Looking for uses of: Csub_block
... in: #pragma fcuda stmt tdep_vars=[Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... in: #pragma fcuda stmt tdep_vars=[Csub_block]
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
Looking for uses of: As
... in: #pragma fcuda stmt tdep_vars=[Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... useID: As
Looking for uses of: As
... in: #pragma fcuda stmt tdep_vars=[As, Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
Looking for uses of: As
... in: #pragma fcuda stmt tdep_vars=[As, Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
Looking for uses of: As
... in: #pragma fcuda stmt tdep_vars=[As, Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
Looking for uses of: Bs
... in: #pragma fcuda stmt tdep_vars=[As, Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... useID: Bs
Looking for uses of: Bs
... in: #pragma fcuda stmt tdep_vars=[Bs, As, Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
Looking for uses of: Bs
... in: #pragma fcuda stmt tdep_vars=[Bs, As, Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
Looking for uses of: Bs
... in: #pragma fcuda stmt tdep_vars=[Bs, As, Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
Looking for uses of: Csub_block
... in: #pragma fcuda stmt tdep_vars=[Bs, As, Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... in: #pragma fcuda stmt tdep_vars=[Csub_block]
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
Looking for uses of: Csub_block
... in: #pragma fcuda stmt tdep_vars=[Bs, As, Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... in: #pragma fcuda stmt tdep_vars=[Csub_block]
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
Looking for uses of: Csub_block
... in: #pragma fcuda stmt tdep_vars=[Bs, As, Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... in: #pragma fcuda stmt tdep_vars=[Csub_block]
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
Looking for uses of: Csub_block
... in: #pragma fcuda stmt tdep_vars=[Bs, As, Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... in: #pragma fcuda stmt tdep_vars=[Csub_block]
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
Looking for uses of: C
Looking for uses of: C
Looking for uses of: C
Looking for uses of: C
defIDs: [Csub_block, C, Csub_block, Csub_block]
Looking for uses of: Csub_block
... in: #pragma fcuda stmt tdep_vars=[Bs, As, Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... in: #pragma fcuda stmt tdep_vars=[Csub_block]
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
Looking for uses of: C
Looking for uses of: Csub_block
... in: #pragma fcuda stmt tdep_vars=[Bs, As, Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... in: #pragma fcuda stmt tdep_vars=[Csub_block]
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
Looking for uses of: Csub_block
... in: #pragma fcuda stmt tdep_vars=[Bs, As, Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
... in: #pragma fcuda stmt tdep_vars=[Csub_block]
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
----c.2---- tDepStmt is: #pragma fcuda stmt tdep_vars=[Csub_block]
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
----c.2---- tDepStmt is: As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
----c.21---- tDepStmt is: #pragma fcuda stmt tdep=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
----c.2---- tDepStmt is: Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
----c.21---- tDepStmt is: #pragma fcuda stmt tdep=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
----c.2---- tDepStmt is: Csub_block[threadIdx.y][threadIdx.x]=0;
----c.21---- tDepStmt is: #pragma fcuda stmt tdep=true
Csub_block[threadIdx.y][threadIdx.x]=0;
----c.2---- tDepStmt is: #pragma fcuda stmt tdep_vars=[Bs, As, Csub_block]
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
----1.5----mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
#pragma fcuda stmt tdep=true
Csub_block[threadIdx.y][threadIdx.x]=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt tdep=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt tdep=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt tdep_vars=[Bs, As, Csub_block] tdep=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt tdep_vars=[Csub_block] tdep=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
return ;
}
007
findTransferStmts Symbols: [* A, * C, * B]
findTransferStmts non-Const Symbols: [* A, * C, * B]
findTransferStmts for: A
findTransferStmts for: B
findTransferStmts for: C
INFO - findTransferStmts: 12 address index IDs
....IPChainAnalysis getDefList....
INFO - findTransferStmts: 2 defStmts for addrID: a
INFO - findTransferStmts: Corner case forloop definition: ((a=aBegin), (b=bBegin));
WARNING - findTransferStmts: Definition of class: class cetus.hir.CommaExpression
((a+=aStep), (b+=bStep))
....IPChainAnalysis getDefList....
INFO - findTransferStmts: 1 defStmts for addrID: wA
WARNING - findTransferStmts: Definition of class: class cetus.hir.VariableDeclarator
wA
....IPChainAnalysis getDefList....
INFO - findTransferStmts: 2 defStmts for addrID: b
INFO - findTransferStmts: Corner case forloop definition: ((a=aBegin), (b=bBegin));
WARNING - findTransferStmts: Definition of class: class cetus.hir.CommaExpression
((a+=aStep), (b+=bStep))
....IPChainAnalysis getDefList....
INFO - findTransferStmts: 1 defStmts for addrID: wB
WARNING - findTransferStmts: Definition of class: class cetus.hir.VariableDeclarator
wB
....IPChainAnalysis getDefList....
INFO - findTransferStmts: 1 defStmts for addrID: c
....IPChainAnalysis getDefList....
INFO - findTransferStmts: 1 defStmts for addrID: wB
WARNING - findTransferStmts: Definition of class: class cetus.hir.VariableDeclarator
wB
----1.6----mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
#pragma fcuda stmt tdep=true
Csub_block[threadIdx.y][threadIdx.x]=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A tdep=true TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B tdep=true TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt tdep_vars=[Bs, As, Csub_block] tdep=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt rdNwrt=false GLBpntr=C tdep_vars=[Csub_block] tdep=true TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
return ;
}
008
----1.7----mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
#pragma fcuda stmt name=CMP_5 tdep=true seqID=5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 tdep=true seqID=6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 tdep=true seqID=6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_9 tdep=true seqID=9
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
return ;
}
009_finish
------001------ HTGraph.java super()
------002------ HTGraph.java curTask = new String()
------003------ HTGraph.java HTGName = new String(name)
------004------ HTGraph.java if(trv instanceof Procedure)
------005------ HTGraph.java mProcedure = (Procedure) trv
------007------ HTGraph.java before build graph
------0007------ t is procedure now
------0007------ t is procedure now, getbody
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
#pragma fcuda stmt name=CMP_5 tdep=true seqID=5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 tdep=true seqID=6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 tdep=true seqID=6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_9 tdep=true seqID=9
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
return ;
}
------0007------ t is CompoundStatement now
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
#pragma fcuda stmt name=CMP_5 tdep=true seqID=5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
a=0;
b=0;
k=0;
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 tdep=true seqID=6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 tdep=true seqID=6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_9 tdep=true seqID=9
__syncthreads();
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
return ;
}
------0007------ buildCompound cStmt getChildren
[int bx;, int by;, int aBegin;, int aEnd;, int aStep;, int bBegin;, int bStep;, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];, int a;, int b;, int k;, int c;, #pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , bx=blockIdx.x;, by=blockIdx.y;, aBegin=((wA*16)*by);, aEnd=((aBegin+wA)-1);, aStep=16;, bBegin=(16*bx);, bStep=(16*wB);, #pragma fcuda stmt name=CMP_5 tdep=true seqID=5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;, a=0;, b=0;, k=0;, for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 tdep=true seqID=6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 tdep=true seqID=6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_9 tdep=true seqID=9
__syncthreads();
}, c=(((wB*16)*by)+(16*bx));, , , #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];, , , return ;]
------0007------ buildCompound for(Traversable trv : children), current trv is
int bx;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int bx;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int by;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int by;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int aBegin;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int aBegin;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int aEnd;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int aEnd;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int aStep;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int aStep;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int bBegin;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int bBegin;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int bStep;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int bStep;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int a;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int a;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int b;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int b;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int k;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int k;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
int c;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for int c;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma HLS INTERFACE ap_bus port=A depth=3840
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for #pragma HLS INTERFACE ap_bus port=A depth=3840
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma HLS INTERFACE ap_bus port=B depth=6144
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for #pragma HLS INTERFACE ap_bus port=B depth=6144
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma HLS INTERFACE ap_bus port=C depth=10240
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for #pragma HLS INTERFACE ap_bus port=C depth=10240
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
bx=blockIdx.x;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for bx=blockIdx.x;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
by=blockIdx.y;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for by=blockIdx.y;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
aBegin=((wA*16)*by);
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for aBegin=((wA*16)*by);
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
aEnd=((aBegin+wA)-1);
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for aEnd=((aBegin+wA)-1);
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
aStep=16;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for aStep=16;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
bBegin=(16*bx);
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for bBegin=(16*bx);
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
bStep=(16*wB);
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for bStep=(16*wB);
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt name=CMP_5 tdep=true seqID=5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, COM task----
-----added node: CMP_5
------done curr build graph
--- DFAGraph for #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
a=0;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for a=0;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
b=0;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for b=0;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
k=0;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for k=0;
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 tdep=true seqID=6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 tdep=true seqID=6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_9 tdep=true seqID=9
__syncthreads();
}
------ for loop current trv is not compound
------0007------ t is ForLoop now
------0007------ t is CompoundStatement now
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 tdep=true seqID=6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 tdep=true seqID=6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_9 tdep=true seqID=9
__syncthreads();
}
------0007------ buildCompound cStmt getChildren
[__shared__ DATATYPE As[16][16];, __shared__ DATATYPE Bs[16][16];, , , #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 tdep=true seqID=6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 tdep=true seqID=6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, , , , #pragma fcuda stmt SNCtask=true name=SNC_7 tdep=true seqID=7
__syncthreads();, {
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}, #pragma fcuda stmt SNCtask=true name=SNC_9 tdep=true seqID=9
__syncthreads();, ]
------0007------ buildCompound for(Traversable trv : children), current trv is
__shared__ DATATYPE As[16][16];
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for __shared__ DATATYPE As[16][16];
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
__shared__ DATATYPE Bs[16][16];
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for __shared__ DATATYPE Bs[16][16];
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 tdep=true seqID=6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, TRN task----
-----added node: TRN_6
------done curr build graph
--- DFAGraph for #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 tdep=true seqID=6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, TRN task----
-----added node: TRN_6
------done curr build graph
--- DFAGraph for #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt SNCtask=true name=SNC_7 tdep=true seqID=7
__syncthreads();
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, SYNC task----
-----added node: SNC_7
------done curr build graph
--- DFAGraph for #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
------ for loop current trv is compound
------0007------ t is CompoundStatement now
{
lp1:
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
------0007------ buildCompound cStmt getChildren
[lp1:, , for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}, ]
------0007------ buildCompound for(Traversable trv : children), current trv is
lp1:
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for lp1:
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
------ for loop current trv is not compound
------0007------ t is ForLoop now
------0007------ t is CompoundStatement now
{
#pragma fcuda stmt name=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
------0007------ buildCompound cStmt getChildren
[#pragma fcuda stmt name=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);]
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt name=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, COM task----
-----added node: CMP_8
------done curr build graph
--- DFAGraph for #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ exit buildCompound for loop
------ ret.size > 0------
------ wrap enabled------
====== DFAGraph.java enter getFirst()
------ exit buildCompound with wrap enabled, and ret > 0, return superDFA------
------ exit buildCompound with wrap enabled, and ret == 0, return superDFA------
------done curr build graph
------ for loop ret has node------
====== DFAGraph.java enter getFirst()
------done curr build graph
--- DFAGraph for #pragma fcuda stmt HTGNode=FOR_HTG_CMP_8
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for
has 0 nodes
------0007------ exit buildCompound for loop
------ ret.size > 0------
------ exit buildCompound with NO wrap, return ret------
------done curr build graph
--- DFAGraph for {
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt SNCtask=true name=SNC_9 tdep=true seqID=9
__syncthreads();
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, SYNC task----
-----added node: SNC_9
------done curr build graph
--- DFAGraph for #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for
has 0 nodes
------0007------ exit buildCompound for loop
------ ret.size > 0------
------ wrap enabled------
====== DFAGraph.java enter getFirst()
------ exit buildCompound with wrap enabled, and ret > 0, return superDFA------
------ exit buildCompound with wrap enabled, and ret == 0, return superDFA------
------done curr build graph
------ for loop ret has node------
====== DFAGraph.java enter getFirst()
------done curr build graph
--- DFAGraph for #pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8
for (k=0; k<16; ++ k)
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
}
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
c=(((wB*16)*by)+(16*bx));
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for c=(((wB*16)*by)+(16*bx));
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular tdep stmt, TRN task----
-----added node: TRN_10
------done curr build graph
--- DFAGraph for #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
has 1 nodes
====== DFAGraph.java enter getFirst()
------ done node = 1
------0007------ buildCompound for(Traversable trv : children), current trv is
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for
has 0 nodes
------0007------ buildCompound for(Traversable trv : children), current trv is
return ;
------ for loop current trv is not compound
------0007------ t is regular Statement now
------regular non-tdep stmt----
------regular non-tdep stmt----NOT TRNtask----
------done curr build graph
--- DFAGraph for return ;
has 0 nodes
------0007------ exit buildCompound for loop
------ ret.size > 0------
------ exit buildCompound with NO wrap, return ret------
------done curr build graph
------done curr build graph
------008------ HTGraph.java DFAGraph dfag = buildGraph(trv)
PRINT-GRAPH OUTPUT
====================
| Graph: matrixMul L1
|
V
--------------------
| CMP_5
| type: CMP
| Addr-Use: false
--------------------
|
V
--------------------
| FOR_HTG_TRN_6
| type: FOR
| tdep: false
--------------------
====================
| Graph: HTG_TRN_6 L2
|
V
--------------------
| TRN_6
| ParentNode: FOR_HTG_TRN_6
| type: TRN
--------------------
|
V
--------------------
| SNC_7
| ParentNode: FOR_HTG_TRN_6
| type: SNC
--------------------
|
V
--------------------
| FOR_HTG_CMP_8
| ParentNode: FOR_HTG_TRN_6
| type: FOR
| tdep: false
--------------------
====================
| Graph: HTG_CMP_8 L3
|
V
--------------------
| CMP_8
| ParentNode: FOR_HTG_CMP_8
| type: CMP
| Addr-Use: false
--------------------
\===== HTG_CMP_8 =====/
|
V
--------------------
| SNC_9
| ParentNode: FOR_HTG_TRN_6
| type: SNC
--------------------
\===== HTG_TRN_6 =====/
|
V
--------------------
| TRN_10
| type: TRN
--------------------
\===== matrixMul =====/
*** CF Node Info Begin ***
FOR_HTG_TRN_6 info:
- uniform: false - SubTypes: [TRN, CMP, SNC]
*** CF Node Info End ***
*** Number of Tasks in matrixMul :4
---- Target: CMP_5 ----
nodes #: 1
CMP_5
Node Types: [CMP]
uniform: true
**** TLOOPS(1) ****
TLOOP 0
---- Target: CMP_5 ----
nodes #: 1
CMP_5
Node Types: [CMP]
uniform: true
----addStatementBefore----index is:22
----addStatementBefore----index is:23
---- Target: TRN_6 ----
nodes #: 1
TRN_6
Node Types: [TRN]
uniform: true
**** TLOOPS(1) ****
TLOOP 0
---- Target: TRN_6 ----
nodes #: 1
TRN_6
Node Types: [TRN]
uniform: true
----addStatementBefore----index is:4
----addStatementBefore----index is:5
---- Target: SNC_7 ----
nodes #: 3
SNC_7
FOR_HTG_CMP_8
SNC_9
Node Types: [CMP, SNC]
uniform: true
**** TLOOPS(1) ****
TLOOP 0
---- Target: FOR_HTG_CMP_8 ----
nodes #: 1
FOR_HTG_CMP_8
Node Types: [CMP]
uniform: true
----addStatementBefore----index is:13
----addStatementBefore----index is:2
---- Target: TRN_10 ----
nodes #: 1
TRN_10
Node Types: [TRN]
uniform: true
**** TLOOPS(1) ****
TLOOP 0
---- Target: TRN_10 ----
nodes #: 1
TRN_10
Node Types: [TRN]
uniform: true
----addStatementBefore----index is:34
----addStatementBefore----index is:35
-----kernelTransformPass, finish transformProcedure-----, proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
[PrivatizeScalars-FCUDA] end in 0.36 seconds
[LinkSymbol] 93 updates in 0.00 seconds
*** After PrivatizeScalars ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
===========================================
----FcudaGlobalData2.java start----
-----cetus application IPChainAnalysis start----
-----cetus application IPChainAnalysis 1----
------IPChainAnalysis.java PerformAliasAnalysis start----
-------IPPointsToAnalysis.java IPPointsToAnalysis start super----
--------IPAnalysis.java IPAnalysis start 1----
--------IPAnalysis.java IPAnalysis start 2----
[NormalizeReturn] begin
*AP* procedure: __syncthreads
*AP* is_void=true
*AP* ret_expr= null
*AP* procedure: matrixMul
*AP* is_void=true
*AP* ret_expr= null
[NormalizeReturn] end in 0.00 seconds
[LinkSymbol] 93 updates in 0.00 seconds
--------IPAnalysis.java IPAnalysis start 3 enter IPA Graph----
---------IPAGraph.java finish super()----
---------IPAGraph.java start new Arraylist <IPANode>----
---------IPAGraph.java enter buildgraph<prog>
---------IPAGraph.java enter identifyCloneableNodes
---------IPAGraph.java enter buildTopOrder
---------IPAGraph.java finish new IPA Graph
[IPA] Stops due to no flow entry
-------IPPointsToAnalysis.java IPPointsToAnalysis finished super----
-------IPPointsToAnalysis.java IPPointsToAnalysis finished 2----
-------IPPointsToAnalysis.java IPPointsToAnalysis end----
------PerformAliasAnalysis 1----
0000IPpointsToAnalysis
[IPA:PointsTo] Stops due to no flow entry
------PerformAliasAnalysis end----
-----cetus application IPChainAnalysis 2----
-----cetus application IPChainAnalysis 3----
----------------------------------------------
----start to generate CF graph with thread----
----------------------------------------------
proc now is: void __syncthreads()
{
;
return ;
}
=====
====== DFAGraph.java enter getFirst()
proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int bx;
int by;
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
bx=blockIdx.x;
by=blockIdx.y;
aBegin=((wA*16)*by);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*bx);
bStep=(16*wB);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*by)+(16*bx));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
=====
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
-----cetus application IPChainAnalysis 4----
--------------------------------------------------------
----start to generate generate Program Summary Graph----
--------------------------------------------------------
############### PSG Summary Detail[__syncthreads()] #################
## Entry Node ##
# Ref Info (psg_entry_ref, use)
psg_entry_ref is null
# Global Info (psg_entry_global, UseOutSet)
psg_entry_global is null
## Exit Node ##
# Ref Info (psg_exit_ref, def)
psg_exit_ref is null
# Global Info (psg_exit_global, DefInSet)
psg_exit_global is null
############### PSG Summary Detail[matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)] #################
## Entry Node ##
# Ref Info (psg_entry_ref, use)
# Global Info (psg_entry_global, UseOutSet)
## Call Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
# Ref Info (psg_call_ref, def)
psg_call_ref is null
# Global Info (psg_call_global, DefInSet)
psg_call_global is null
## Return Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
# Ref Info (psg_return_ref, use)
psg_return_ref is null
# Global Info (psg_return_global, UseOutSet)
psg_return_global is null
## Call Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
# Ref Info (psg_call_ref, def)
psg_call_ref is null
# Global Info (psg_call_global, DefInSet)
psg_call_global is null
## Return Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
# Ref Info (psg_return_ref, use)
psg_return_ref is null
# Global Info (psg_return_global, UseOutSet)
psg_return_global is null
## Exit Node ##
# Ref Info (psg_exit_ref, def)
-DEF: C, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-DEF: B, NODE: * B
-DEF: A, NODE: * A
# Global Info (psg_exit_global, DefInSet)
############### PSG Propagated Detail[__syncthreads()] #################
## Entry Node ##
# Ref Info (psg_entry_ref)
psg_entry_ref is null
# Global Info (psg_entry_global)
psg_entry_global is null
## Exit Node ##
# Ref Info (psg_exit_ref)
psg_exit_ref is null
# Global Info (psg_exit_global)
psg_exit_global is null
############### PSG Propagated Detail[matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)] #################
## Entry Node ##
# Ref Info (psg_entry_ref)
# parameter: C
# parameter: B
# parameter: A
# Global Info (psg_entry_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Call Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
# Ref Info (psg_call_ref)
psg_call_ref is null
# Global Info (psg_call_global)
psg_call_global is null
## Return Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
# Ref Info (psg_return_ref)
psg_return_ref is null
# Global Info (psg_return_global)
psg_return_global is null
## Call Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
# Ref Info (psg_call_ref)
psg_call_ref is null
# Global Info (psg_call_global)
psg_call_global is null
## Return Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
# Ref Info (psg_return_ref)
psg_return_ref is null
# Global Info (psg_return_global)
psg_return_global is null
## Exit Node ##
# Ref Info (psg_exit_ref)
-OUTdef: C, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-OUTdef: B, NODE: * B
-OUTdef: A, NODE: * A
# Global Info (psg_exit_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
-----cetus application IPChainAnalysis 5----
-----cetus application IPChainAnalysis 6----
-----cetus application IPChainAnalysis 7----
-----cetus application IPChainAnalysis end----
----FcudaGlobalData2.java end----
DEF::
########################## (printDefUseChain) Procedure: __syncthreads
#######################################################################
########################## (printDefUseChain) Procedure: matrixMul
Def[0]: C, IR: * C
Def[1]: A, IR: * A
--> Use: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, proc: matrixMul
Def[2]: B, IR: * B
--> Use: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, proc: matrixMul
Def[3]: wA, IR: wA
--> Use: aBegin=((wA*16)*by);, proc: matrixMul
--> Use: aEnd=((aBegin+wA)-1);, proc: matrixMul
--> Use: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, proc: matrixMul
Def[4]: wB, IR: wB
--> Use: bStep=(16*wB);, proc: matrixMul
--> Use: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, proc: matrixMul
--> Use: c=(((wB*16)*by)+(16*bx));, proc: matrixMul
--> Use: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];, proc: matrixMul
Def[5]: bx, IR: bx=blockIdx.x;
--> Use: bBegin=(16*bx);, proc: matrixMul
--> Use: c=(((wB*16)*by)+(16*bx));, proc: matrixMul
Def[6]: by, IR: by=blockIdx.y;
--> Use: aBegin=((wA*16)*by);, proc: matrixMul
--> Use: c=(((wB*16)*by)+(16*bx));, proc: matrixMul
Def[7]: aBegin, IR: aBegin=((wA*16)*by);
--> Use: aEnd=((aBegin+wA)-1);, proc: matrixMul
--> Use: ((a=aBegin), (b=bBegin));, proc: matrixMul
Def[8]: aEnd, IR: aEnd=((aBegin+wA)-1);
--> Use: a<=aEnd, proc: matrixMul
Def[9]: aStep, IR: aStep=16;
--> Use: ((a+=aStep), (b+=bStep)), proc: matrixMul
Def[10]: bBegin, IR: bBegin=(16*bx);
--> Use: ((a=aBegin), (b=bBegin));, proc: matrixMul
Def[11]: bStep, IR: bStep=(16*wB);
--> Use: ((a+=aStep), (b+=bStep)), proc: matrixMul
Def[12]: Csub_block, IR: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
--> Use: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);, proc: matrixMul
--> Use: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];, proc: matrixMul
Def[13]: a, IR: a=0;
Def[14]: b, IR: b=0;
Def[15]: k, IR: k=0;
Def[16]: a, IR: ((a=aBegin), (b=bBegin));
--> Use: a<=aEnd, proc: matrixMul
--> Use: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, proc: matrixMul
--> Use: ((a+=aStep), (b+=bStep)), proc: matrixMul
Def[17]: b, IR: ((a=aBegin), (b=bBegin));
--> Use: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, proc: matrixMul
--> Use: ((a+=aStep), (b+=bStep)), proc: matrixMul
Def[18]: As, IR: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
--> Use: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);, proc: matrixMul
Def[19]: Bs, IR: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
--> Use: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);, proc: matrixMul
Def[20]: k, IR: #pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0;
--> Use: k<16, proc: matrixMul
--> Use: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);, proc: matrixMul
--> Use: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);, proc: matrixMul
--> Use: ++ k, proc: matrixMul
Def[21]: Csub_block, IR: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
--> Use: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);, proc: matrixMul
--> Use: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];, proc: matrixMul
Def[22]: k, IR: ++ k
--> Use: k<16, proc: matrixMul
--> Use: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);, proc: matrixMul
--> Use: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);, proc: matrixMul
--> Use: ++ k, proc: matrixMul
Def[23]: a, IR: ((a+=aStep), (b+=bStep))
--> Use: a<=aEnd, proc: matrixMul
--> Use: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, proc: matrixMul
--> Use: ((a+=aStep), (b+=bStep)), proc: matrixMul
Def[24]: b, IR: ((a+=aStep), (b+=bStep))
--> Use: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, proc: matrixMul
--> Use: ((a+=aStep), (b+=bStep)), proc: matrixMul
Def[25]: c, IR: c=(((wB*16)*by)+(16*bx));
--> Use: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];, proc: matrixMul
Def[26]: C, IR: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#######################################################################
USE::
########################## (printUseDefChain) Procedure: __syncthreads
#######################################################################
########################## (printUseDefChain) Procedure: matrixMul
Use[0]: wA, IR: aBegin=((wA*16)*by);
....IPChainAnalysis getDefList....
--> Def: wA, proc: matrixMul
Use[1]: by, IR: aBegin=((wA*16)*by);
....IPChainAnalysis getDefList....
--> Def: by=blockIdx.y;, proc: matrixMul
Use[2]: aBegin, IR: aEnd=((aBegin+wA)-1);
....IPChainAnalysis getDefList....
--> Def: aBegin=((wA*16)*by);, proc: matrixMul
Use[3]: wA, IR: aEnd=((aBegin+wA)-1);
....IPChainAnalysis getDefList....
--> Def: wA, proc: matrixMul
Use[4]: bx, IR: bBegin=(16*bx);
....IPChainAnalysis getDefList....
--> Def: bx=blockIdx.x;, proc: matrixMul
Use[5]: wB, IR: bStep=(16*wB);
....IPChainAnalysis getDefList....
--> Def: wB, proc: matrixMul
Use[6]: aBegin, IR: ((a=aBegin), (b=bBegin));
....IPChainAnalysis getDefList....
--> Def: aBegin=((wA*16)*by);, proc: matrixMul
Use[7]: bBegin, IR: ((a=aBegin), (b=bBegin));
....IPChainAnalysis getDefList....
--> Def: bBegin=(16*bx);, proc: matrixMul
Use[8]: a, IR: a<=aEnd
....IPChainAnalysis getDefList....
--> Def: ((a=aBegin), (b=bBegin));, proc: matrixMul
--> Def: ((a+=aStep), (b+=bStep)), proc: matrixMul
Use[9]: aEnd, IR: a<=aEnd
....IPChainAnalysis getDefList....
--> Def: aEnd=((aBegin+wA)-1);, proc: matrixMul
Use[10]: a, IR: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
....IPChainAnalysis getDefList....
--> Def: ((a=aBegin), (b=bBegin));, proc: matrixMul
--> Def: ((a+=aStep), (b+=bStep)), proc: matrixMul
Use[11]: wA, IR: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
....IPChainAnalysis getDefList....
--> Def: wA, proc: matrixMul
Use[12]: A, IR: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
....IPChainAnalysis getDefList....
--> Def: * A, proc: matrixMul
Use[13]: b, IR: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
....IPChainAnalysis getDefList....
--> Def: ((a=aBegin), (b=bBegin));, proc: matrixMul
--> Def: ((a+=aStep), (b+=bStep)), proc: matrixMul
Use[14]: wB, IR: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
....IPChainAnalysis getDefList....
--> Def: wB, proc: matrixMul
Use[15]: B, IR: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
....IPChainAnalysis getDefList....
--> Def: * B, proc: matrixMul
Use[16]: k, IR: k<16
....IPChainAnalysis getDefList....
--> Def: #pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0;, proc: matrixMul
--> Def: ++ k, proc: matrixMul
Use[17]: k, IR: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
....IPChainAnalysis getDefList....
--> Def: #pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0;, proc: matrixMul
--> Def: ++ k, proc: matrixMul
Use[18]: As, IR: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
....IPChainAnalysis getDefList....
--> Def: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, proc: matrixMul
Use[19]: k, IR: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
....IPChainAnalysis getDefList....
--> Def: #pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0;, proc: matrixMul
--> Def: ++ k, proc: matrixMul
Use[20]: Bs, IR: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
....IPChainAnalysis getDefList....
--> Def: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, proc: matrixMul
Use[21]: Csub_block, IR: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
....IPChainAnalysis getDefList....
--> Def: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;, proc: matrixMul
--> Def: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);, proc: matrixMul
Use[22]: k, IR: ++ k
....IPChainAnalysis getDefList....
--> Def: #pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0;, proc: matrixMul
--> Def: ++ k, proc: matrixMul
Use[23]: aStep, IR: ((a+=aStep), (b+=bStep))
....IPChainAnalysis getDefList....
--> Def: aStep=16;, proc: matrixMul
Use[24]: a, IR: ((a+=aStep), (b+=bStep))
....IPChainAnalysis getDefList....
--> Def: ((a=aBegin), (b=bBegin));, proc: matrixMul
--> Def: ((a+=aStep), (b+=bStep)), proc: matrixMul
Use[25]: bStep, IR: ((a+=aStep), (b+=bStep))
....IPChainAnalysis getDefList....
--> Def: bStep=(16*wB);, proc: matrixMul
Use[26]: b, IR: ((a+=aStep), (b+=bStep))
....IPChainAnalysis getDefList....
--> Def: ((a=aBegin), (b=bBegin));, proc: matrixMul
--> Def: ((a+=aStep), (b+=bStep)), proc: matrixMul
Use[27]: wB, IR: c=(((wB*16)*by)+(16*bx));
....IPChainAnalysis getDefList....
--> Def: wB, proc: matrixMul
Use[28]: by, IR: c=(((wB*16)*by)+(16*bx));
....IPChainAnalysis getDefList....
--> Def: by=blockIdx.y;, proc: matrixMul
Use[29]: bx, IR: c=(((wB*16)*by)+(16*bx));
....IPChainAnalysis getDefList....
--> Def: bx=blockIdx.x;, proc: matrixMul
Use[30]: Csub_block, IR: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
....IPChainAnalysis getDefList....
--> Def: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;, proc: matrixMul
--> Def: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);, proc: matrixMul
Use[31]: c, IR: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
....IPChainAnalysis getDefList....
--> Def: c=(((wB*16)*by)+(16*bx));, proc: matrixMul
Use[32]: wB, IR: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
....IPChainAnalysis getDefList....
--> Def: wB, proc: matrixMul
#######################################################################
[SplitFcudaTasks2-FCUDA] begin
[SplitFcudaTasks2-FCUDA] examining procedure matrixMul
mVar2Var:
{a=[a, b, bStep, blockIdx, aStep, wA, bBegin, wB, aBegin], A=[], bStep=[wB], b=[a, b, bStep, blockIdx, aStep, wA, bBegin, wB, aBegin], B=[], c=[blockIdx, wB], C=[blockIdx, threadIdx, c, wB], wA=[], bBegin=[blockIdx], k=[k], wB=[], Bs=[a, b, bStep, blockIdx, threadIdx, aStep, wA, bBegin, wB, aBegin], blockIdx=[], threadIdx=[], As=[a, b, bStep, blockIdx, threadIdx, aStep, wA, bBegin, wB, aBegin], aStep=[], aEnd=[blockIdx, wA, aBegin], Csub_block=[threadIdx, k], aBegin=[blockIdx, wA]}
----SFT2_entered transformProcedure----
-----SFT2 enter new flow-----
-----SFT2 finished addAllSharedToBRAMSet(proc)-----
-----SFT2 finished FCUDAGlobalData2.setBRAMSet(mBRAMSet);-----
-----SFT2 finished proc.getBody().addANSIDeclaration(MCUDAUtils.Bidx.getDecl().get(0));-----
-----proc after proc.getbody addAnsideclaration-----
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
-----start to decomposeKernel-----
----001----dck BFI new----
----002----annotLst bfitr getlist----
----003----annotLst bfitr getlist----
----004----asNum = annotLst size, asNum is: 31
----0041----enter for_1, curr asCnt is: 0
----0042----annotStmt is: #pragma HLS INTERFACE ap_bus port=A depth=3840
----0043----list fcAnnots is: []
----0041----enter for_1, curr asCnt is: 1
----0042----annotStmt is: #pragma HLS INTERFACE ap_bus port=B depth=6144
----0043----list fcAnnots is: []
----0041----enter for_1, curr asCnt is: 2
----0042----annotStmt is: #pragma HLS INTERFACE ap_bus port=C depth=10240
----0043----list fcAnnots is: []
----0041----enter for_1, curr asCnt is: 3
----0042----annotStmt is: #pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
----0043----list fcAnnots is: [#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true ]
----00431----enter for_2 curr fcannot is: #pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
----00432----curr annotType is: compute
----00433----curr bgn is: true
----00434----curr annotType is COM or TRN
----00435----curr bgn is true
----SFT2-decompose-before sanity check, inCMP is: false, inTRN is: false, tskName is: null
----0041----enter for_1, curr asCnt is: 4
----0042----annotStmt is: #pragma fcuda tloop name=CMP_5 end=false begin=true
----0043----list fcAnnots is: [#pragma fcuda tloop name=CMP_5 end=false begin=true ]
----00431----enter for_2 curr fcannot is: #pragma fcuda tloop name=CMP_5 end=false begin=true
----00432----curr annotType is: tloop
----00433----curr bgn is: true
----0043A----curr annotType is not COM nor TRN
----0041----enter for_1, curr asCnt is: 5
----0042----annotStmt is: #pragma fcuda tloop name=CMP_5 end=true begin=false
----0043----list fcAnnots is: [#pragma fcuda tloop name=CMP_5 end=true begin=false ]
----00431----enter for_2 curr fcannot is: #pragma fcuda tloop name=CMP_5 end=true begin=false
----00432----curr annotType is: tloop
----00433----curr bgn is: false
----0043A----curr annotType is not COM nor TRN
----0041----enter for_1, curr asCnt is: 6
----0042----annotStmt is: #pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
----0043----list fcAnnots is: [#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false ]
----00431----enter for_2 curr fcannot is: #pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
----00432----curr annotType is: compute
----00433----curr bgn is: false
----00434----curr annotType is COM or TRN
----00436----curr bgn is false
----00438----enter inCMP decomposeCompute
----00438----bgnstmt is: #pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
----00438----endStmt is: #pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
----00438----tskNmae is: CMP_5
... Prelim task handling: matrixMul_CMP_5
...tskName is: matrixMul_CMP_5
...mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
...print current fcudaGlobalData2 tasks...
Tasks: [matrixMul_CMP_5]
....00......................................
....01....enableSignal is: guard_matrixMul_CMP_5
....02....enDeclor is: guard_matrixMul_CMP_5
....03....enableDecl is: int guard_matrixMul_CMP_5
....04....after getbody addAnsideclaration, mprocedure_body is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
----addStatementBefore----index is:12
....05....after addAfterLastDeclaration, mprocedure_body is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....06....parCStmt is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....07....idxDif is: 2
....08....taskCall is: matrixMul_CMP_5()
....09....after addTaskCall, tskName is: matrixMul_CMP_5
....09....after addTaskCall, taskCall is: matrixMul_CMP_5()
....10....after addFcudaCore, taskCall is: matrixMul_CMP_5()
....10....after addFcudaCore, mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
Creating new FcudaCoreData for core: matrixMul_CMP_5()
....11....after setCoreName, tskName is: matrixMul_CMP_5
....11....after setCoreName, taskCall is: matrixMul_CMP_5()
....11....after setCoreName, mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....12....sftTaskArgs size is: 0
....12....sftTaskArgs is: []
....13....sftTaskArgs size is: 1
....13....sftTaskArgs is: [guard_matrixMul_CMP_5]
....14....sftTaskArgSyms is: [guard_matrixMul_CMP_5]
....15....sftTaskDecls size is: 1
....15....sftTaskDecls is: [int guard_matrixMul_CMP_5]
....16....sftTaskArgs size is: 2
....16....sftTaskArgs is: [guard_matrixMul_CMP_5, blockDim]
....16....sftTaskDecls size is: 4
....16....sftTaskDecls is: [int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx]
....17....sftCommonArgsIndex size is: 1
....17....sftCommonArgsIndex is: [1]
....18....sftTaskArgs size is: 3
....18....sftTaskArgs is: [guard_matrixMul_CMP_5, blockDim, gridDim]
....18....sftCommonArgsIndex size is: 2
....18....sftCommonArgsIndex is: [1, 2]
....19....sftTaskArgs size is: 4
....19....sftTaskArgs is: [guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx]
....20....enableStmt is: if (guard_matrixMul_CMP_5)
{
}
....21....fcTask is: {
if (guard_matrixMul_CMP_5)
{
}
}
....22....tskStmts size is: 41
....22....tskStmts is: [int aBegin;, int aEnd;, int aStep;, int bBegin;, int bStep;, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];, int a;, int b;, int k;, int c;, dim3 blockIdx;, int guard_matrixMul_CMP_5;, guard_matrixMul_CMP_5=1;, #pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , aBegin=((wA*16)*blockIdx.y);, aEnd=((aBegin+wA)-1);, aStep=16;, bBegin=(16*blockIdx.x);, bStep=(16*wB);, #pragma fcuda compute cores=1 name=CMP_5 end=false begin=true , #pragma fcuda tloop name=CMP_5 end=false begin=true , #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;, #pragma fcuda tloop name=CMP_5 end=true begin=false , #pragma fcuda compute cores=1 name=CMP_5 end=true begin=false , a=0;, b=0;, k=0;, #pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}, c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));, , , #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true , #pragma fcuda tloop name=TRN_10 end=false begin=true , #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];, #pragma fcuda tloop name=TRN_10 end=true begin=false , #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false , , , return ;]
Marking Statements 19 - 23 for task: matrixMul_CMP_5
....23....curr sIdx is: 19 , curr tskStmts to be added is: #pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
....23....after adding, tskName is: matrixMul_CMP_5
....23....curr sIdx is: 20 , curr tskStmts to be added is: #pragma fcuda tloop name=CMP_5 end=false begin=true
....23....after adding, tskName is: matrixMul_CMP_5
....23....curr sIdx is: 21 , curr tskStmts to be added is: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
....23....after adding, tskName is: matrixMul_CMP_5
....23....curr sIdx is: 22 , curr tskStmts to be added is: #pragma fcuda tloop name=CMP_5 end=true begin=false
....23....after adding, tskName is: matrixMul_CMP_5
....23....curr sIdx is: 23 , curr tskStmts to be added is: #pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
....23....after adding, tskName is: matrixMul_CMP_5
........declList is: []
....24....ProcedureDeclarator tskProcDecl is: matrixMul_CMP_5()
....25....Procedure tskProc is: void matrixMul_CMP_5()
{
if (guard_matrixMul_CMP_5)
{
}
}
....26.... TranslationUnit kernUnit is: #include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....27.... after kernUnit.addDeclaratuibBefore, kernUnit is: #include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
void matrixMul_CMP_5()
{
if (guard_matrixMul_CMP_5)
{
}
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....28.... before FCUDAutils.addTaskMapping, mProcedure.getSymbolName is: matrixMul
....28.... before FCUDAutils.addTaskMapping, tskProc is: void matrixMul_CMP_5()
{
if (guard_matrixMul_CMP_5)
{
}
}
....29.... after FCUDAutils.addTaskMapping, mProcedure.getSymbolName is: matrixMul
....29.... after FCUDAutils.addTaskMapping, tskProc is: void matrixMul_CMP_5()
{
if (guard_matrixMul_CMP_5)
{
}
}
....30.... after FcudaAnnotation fannot, fannot is: #pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
....31.... fannot is not null
....32.... done taskCreationPrelims
....32.... final taskCall is: matrixMul_CMP_5()
----addStatementBefore----index is:21
Starting to collect parameters for procedure: matrixMul_CMP_5
Task Statement: #pragma fcuda compute cores=1 name=CMP_5 end=false begin=true of task: matrixMul_CMP_5
Task Statement: #pragma fcuda tloop name=CMP_5 end=false begin=true of task: matrixMul_CMP_5
Task Statement: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0; of task: matrixMul_CMP_5
Task defExp: Csub_block[threadIdx.y][threadIdx.x] of task: matrixMul_CMP_5
Task def: Csub_block of task: matrixMul_CMP_5
... has # of chain uses: 2
Check Uses: [#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);, #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];]
... has Use out of task
> #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
Task useExp: threadIdx.x of task: matrixMul_CMP_5
Task useExp: threadIdx.y of task: matrixMul_CMP_5
Task Statement: #pragma fcuda tloop name=CMP_5 end=true begin=false of task: matrixMul_CMP_5
Task Statement: #pragma fcuda compute cores=1 name=CMP_5 end=true begin=false of task: matrixMul_CMP_5
----0041----enter for_1, curr asCnt is: 7
----0042----annotStmt is:
----0043----list fcAnnots is: []
----0041----enter for_1, curr asCnt is: 8
----0042----annotStmt is:
----0043----list fcAnnots is: []
----0041----enter for_1, curr asCnt is: 9
----0042----annotStmt is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
----0043----list fcAnnots is: [#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true ]
----00431----enter for_2 curr fcannot is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
----00432----curr annotType is: transfer
----00433----curr bgn is: true
----00434----curr annotType is COM or TRN
----00435----curr bgn is true
----SFT2-decompose-before sanity check, inCMP is: false, inTRN is: false, tskName is: null
----0041----enter for_1, curr asCnt is: 10
----0042----annotStmt is: #pragma fcuda tloop name=TRN_10 end=false begin=true
----0043----list fcAnnots is: [#pragma fcuda tloop name=TRN_10 end=false begin=true ]
----00431----enter for_2 curr fcannot is: #pragma fcuda tloop name=TRN_10 end=false begin=true
----00432----curr annotType is: tloop
----00433----curr bgn is: true
----0043A----curr annotType is not COM nor TRN
----0041----enter for_1, curr asCnt is: 11
----0042----annotStmt is: #pragma fcuda tloop name=TRN_10 end=true begin=false
----0043----list fcAnnots is: [#pragma fcuda tloop name=TRN_10 end=true begin=false ]
----00431----enter for_2 curr fcannot is: #pragma fcuda tloop name=TRN_10 end=true begin=false
----00432----curr annotType is: tloop
----00433----curr bgn is: false
----0043A----curr annotType is not COM nor TRN
----0041----enter for_1, curr asCnt is: 12
----0042----annotStmt is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
----0043----list fcAnnots is: [#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false ]
----00431----enter for_2 curr fcannot is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
----00432----curr annotType is: transfer
----00433----curr bgn is: false
----00434----curr annotType is COM or TRN
----00436----curr bgn is false
----00439----enter inTRN decomposeCompute
----00439----bgnstmt is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
----00439----endStmt is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
----00439----tskNmae is: TRN_10
----4.0----trnTaskName is: matrixMul_TRN_10
----4.0----bgnIdx is: 34
----4.0----endIdx is: 38
... Prelim task handling: matrixMul_TRN_10
...tskName is: matrixMul_TRN_10
...mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
...print current fcudaGlobalData2 tasks...
Tasks: [matrixMul_TRN_10, matrixMul_CMP_5]
....00......................................
....01....enableSignal is: guard_matrixMul_TRN_10
....02....enDeclor is: guard_matrixMul_TRN_10
....03....enableDecl is: int guard_matrixMul_TRN_10
....04....after getbody addAnsideclaration, mprocedure_body is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
----addStatementBefore----index is:13
....05....after addAfterLastDeclaration, mprocedure_body is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....06....parCStmt is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....07....idxDif is: 2
....08....taskCall is: matrixMul_TRN_10()
....09....after addTaskCall, tskName is: matrixMul_TRN_10
....09....after addTaskCall, taskCall is: matrixMul_TRN_10()
....10....after addFcudaCore, taskCall is: matrixMul_TRN_10()
....10....after addFcudaCore, mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
Creating new FcudaCoreData for core: matrixMul_TRN_10()
....11....after setCoreName, tskName is: matrixMul_TRN_10
....11....after setCoreName, taskCall is: matrixMul_TRN_10()
....11....after setCoreName, mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....12....sftTaskArgs size is: 0
....12....sftTaskArgs is: []
....13....sftTaskArgs size is: 1
....13....sftTaskArgs is: [guard_matrixMul_TRN_10]
....14....sftTaskArgSyms is: [guard_matrixMul_TRN_10]
....15....sftTaskDecls size is: 1
....15....sftTaskDecls is: [int guard_matrixMul_TRN_10]
....16....sftTaskArgs size is: 2
....16....sftTaskArgs is: [guard_matrixMul_TRN_10, blockDim]
....16....sftTaskDecls size is: 4
....16....sftTaskDecls is: [int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx]
....17....sftCommonArgsIndex size is: 1
....17....sftCommonArgsIndex is: [1]
....18....sftTaskArgs size is: 3
....18....sftTaskArgs is: [guard_matrixMul_TRN_10, blockDim, gridDim]
....18....sftCommonArgsIndex size is: 2
....18....sftCommonArgsIndex is: [1, 2]
....19....sftTaskArgs size is: 4
....19....sftTaskArgs is: [guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx]
....20....enableStmt is: if (guard_matrixMul_TRN_10)
{
}
....21....fcTask is: {
if (guard_matrixMul_TRN_10)
{
}
}
....22....tskStmts size is: 44
....22....tskStmts is: [int aBegin;, int aEnd;, int aStep;, int bBegin;, int bStep;, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];, int a;, int b;, int k;, int c;, dim3 blockIdx;, int guard_matrixMul_CMP_5;, int guard_matrixMul_TRN_10;, guard_matrixMul_TRN_10=1;, guard_matrixMul_CMP_5=1;, #pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , aBegin=((wA*16)*blockIdx.y);, aEnd=((aBegin+wA)-1);, aStep=16;, bBegin=(16*blockIdx.x);, bStep=(16*wB);, matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);, #pragma fcuda compute cores=1 name=CMP_5 end=false begin=true , #pragma fcuda tloop name=CMP_5 end=false begin=true , #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;, #pragma fcuda tloop name=CMP_5 end=true begin=false , #pragma fcuda compute cores=1 name=CMP_5 end=true begin=false , a=0;, b=0;, k=0;, #pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}, c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));, , , #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true , #pragma fcuda tloop name=TRN_10 end=false begin=true , #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];, #pragma fcuda tloop name=TRN_10 end=true begin=false , #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false , , , return ;]
Marking Statements 34 - 38 for task: matrixMul_TRN_10
....23....curr sIdx is: 34 , curr tskStmts to be added is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
....23....after adding, tskName is: matrixMul_TRN_10
....23....curr sIdx is: 35 , curr tskStmts to be added is: #pragma fcuda tloop name=TRN_10 end=false begin=true
....23....after adding, tskName is: matrixMul_TRN_10
....23....curr sIdx is: 36 , curr tskStmts to be added is: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
....23....after adding, tskName is: matrixMul_TRN_10
....23....curr sIdx is: 37 , curr tskStmts to be added is: #pragma fcuda tloop name=TRN_10 end=true begin=false
....23....after adding, tskName is: matrixMul_TRN_10
....23....curr sIdx is: 38 , curr tskStmts to be added is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
....23....after adding, tskName is: matrixMul_TRN_10
........declList is: []
....24....ProcedureDeclarator tskProcDecl is: matrixMul_TRN_10()
....25....Procedure tskProc is: void matrixMul_TRN_10()
{
if (guard_matrixMul_TRN_10)
{
}
}
....26.... TranslationUnit kernUnit is: #include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
if (guard_matrixMul_CMP_5)
{
}
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....27.... after kernUnit.addDeclaratuibBefore, kernUnit is: #include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
if (guard_matrixMul_CMP_5)
{
}
}
void matrixMul_TRN_10()
{
if (guard_matrixMul_TRN_10)
{
}
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....28.... before FCUDAutils.addTaskMapping, mProcedure.getSymbolName is: matrixMul
....28.... before FCUDAutils.addTaskMapping, tskProc is: void matrixMul_TRN_10()
{
if (guard_matrixMul_TRN_10)
{
}
}
....29.... after FCUDAutils.addTaskMapping, mProcedure.getSymbolName is: matrixMul
....29.... after FCUDAutils.addTaskMapping, tskProc is: void matrixMul_TRN_10()
{
if (guard_matrixMul_TRN_10)
{
}
}
....30.... after FcudaAnnotation fannot, fannot is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
....31.... fannot is not null
....32.... done taskCreationPrelims
....32.... final taskCall is: matrixMul_TRN_10()
----4.0----taskCall is: matrixMul_TRN_10()
----4.0----taskCallStmt is: matrixMul_TRN_10();
----addStatementBefore----index is:36
----4.0----cStmt after addStatementBefore is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10();
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
----4.1----Starting to collect parameters for procedure: matrixMul_TRN_10
----4.1----trnData is: fcuda.common.TaskData@61ca2dfa
----4.1----tskStmts is: [#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true , #pragma fcuda tloop name=TRN_10 end=false begin=true , #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];, #pragma fcuda tloop name=TRN_10 end=true begin=false , #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false ]
----4.1----allTskSyms is: []
----4.2----curr Task Statement: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
----7.11----test_children is: []
----7.12----test_annot is: [#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true ]
----7.13---coresList is: [1]
----7.13---sizeList is: [BLOCKDIM_X]
----7.13---taskName is: TRN_10
----7.13---transferType is: null
----4.21---- AnnotationStatement continue
----4.2----curr Task Statement: #pragma fcuda tloop name=TRN_10 end=false begin=true
----7.11----test_children is: []
----7.12----test_annot is: [#pragma fcuda tloop name=TRN_10 end=false begin=true ]
----4.21---- AnnotationStatement continue
----4.2----curr Task Statement: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
----7.11----test_children is: [C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x]]
----7.12----test_annot is: [#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true ]
----7.14----stmtpragmas: []
----7.14----test_annot: [#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true ]
----7.14----real_stmt: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
----7.14---mOffchipPtrNameList is: [C]
----7.14---dirList is: false
----7.15----1.collecting parameters----
-------GLBpntr is: [C]
-------core: [1]
-------size: [BLOCKDIM_X]
-------rdNwrt: false
-------name: TRN_10
-------type: null
----4.3----in for loop DEFS, defExp: C[((c+(wB*threadIdx.y))+threadIdx.x)]
----4.3----Task def: C
----4.3----defSym: * C
----4.3----scalarDef: true
----4.3----constArrAcc: false
----4.3----allTskSyms: [* C]
----4.4----defDecl: DATATYPE * C
----4.4----defUses: []
----4.4---- ... has # of chain uses: 0
Task useExp: Csub_block[threadIdx.y][threadIdx.x] of task: matrixMul_TRN_10
Task use: Csub_block of task: matrixMul_TRN_10
... has # of chain defs: 2
... has Def out of task
> #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
Task useExp: c of task: matrixMul_TRN_10
Task use: c of task: matrixMul_TRN_10
... has # of chain defs: 1
... has Def out of task
> c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
Task useExp: threadIdx.x of task: matrixMul_TRN_10
Task useExp: threadIdx.y of task: matrixMul_TRN_10
Task useExp: wB of task: matrixMul_TRN_10
Task use: wB of task: matrixMul_TRN_10
... has # of chain defs: 1
... has Def out of task
> wB
----4.2----curr Task Statement: #pragma fcuda tloop name=TRN_10 end=true begin=false
----7.11----test_children is: []
----7.12----test_annot is: [#pragma fcuda tloop name=TRN_10 end=true begin=false ]
----4.21---- AnnotationStatement continue
----4.2----curr Task Statement: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
----7.11----test_children is: []
----7.12----test_annot is: [#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false ]
----4.21---- AnnotationStatement continue
----5.0----end of task statement, cStmt is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10();
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
----8.0----trnTaskName: matrixMul_TRN_10
--------sftTaskArgSyms: [wB, Csub_block[BLOCKDIM_Y][BLOCKDIM_X], c, guard_matrixMul_TRN_10, * C]
--------sftTaskArgs: [guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB]
--------sftTaskDecls: [int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB]
--------taskCall: matrixMul_TRN_10()
--------sftCommonArgsIndex: [1, 2, 4, 7]
----8.00----tskSym is: wB
----8.01----decl is: int wB
----8.02----inTaskDecl is: false
----8.03----sftTaskArgSyms contains tskSym
----8.00----tskSym is: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
----8.01----decl is: __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
----8.02----inTaskDecl is: false
----8.03----sftTaskArgSyms contains tskSym
----8.00----tskSym is: c
----8.01----decl is: int c
----8.02----inTaskDecl is: false
----8.03----sftTaskArgSyms contains tskSym
----8.00----tskSym is: * C
----8.01----decl is: DATATYPE * C
----8.02----inTaskDecl is: false
----8.03----sftTaskArgSyms contains tskSym
----8.07----trnTaskName: matrixMul_TRN_10
--------sftTaskArgSyms: [wB, Csub_block[BLOCKDIM_Y][BLOCKDIM_X], c, guard_matrixMul_TRN_10, * C]
--------sftTaskArgs: [guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB]
--------sftTaskDecls: [int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB]
--------taskCall: matrixMul_TRN_10()
--------sftCommonArgsIndex: [1, 2, 4, 7]
----8.08----trnTaskName: matrixMul_TRN_10
--------sftTaskArgSyms: [wB, Csub_block[BLOCKDIM_Y][BLOCKDIM_X], c, guard_matrixMul_TRN_10, * C]
--------sftTaskArgs: [guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB]
--------sftTaskDecls: [int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB]
--------taskCall: matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB)
--------sftCommonArgsIndex: [1, 2, 4, 7]
----0041----enter for_1, curr asCnt is: 13
----0042----annotStmt is:
----0043----list fcAnnots is: []
----0041----enter for_1, curr asCnt is: 14
----0042----annotStmt is:
----0043----list fcAnnots is: []
----0041----enter for_1, curr asCnt is: 15
----0042----annotStmt is:
----0043----list fcAnnots is: []
----0041----enter for_1, curr asCnt is: 16
----0042----annotStmt is:
----0043----list fcAnnots is: []
----0041----enter for_1, curr asCnt is: 17
----0042----annotStmt is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
----0043----list fcAnnots is: [#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true ]
----00431----enter for_2 curr fcannot is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
----00432----curr annotType is: transfer
----00433----curr bgn is: true
----00434----curr annotType is COM or TRN
----00435----curr bgn is true
----SFT2-decompose-before sanity check, inCMP is: false, inTRN is: false, tskName is: null
----0041----enter for_1, curr asCnt is: 18
----0042----annotStmt is: #pragma fcuda tloop name=TRN_6 end=false begin=true
----0043----list fcAnnots is: [#pragma fcuda tloop name=TRN_6 end=false begin=true ]
----00431----enter for_2 curr fcannot is: #pragma fcuda tloop name=TRN_6 end=false begin=true
----00432----curr annotType is: tloop
----00433----curr bgn is: true
----0043A----curr annotType is not COM nor TRN
----0041----enter for_1, curr asCnt is: 19
----0042----annotStmt is: #pragma fcuda tloop name=TRN_6 end=true begin=false
----0043----list fcAnnots is: [#pragma fcuda tloop name=TRN_6 end=true begin=false ]
----00431----enter for_2 curr fcannot is: #pragma fcuda tloop name=TRN_6 end=true begin=false
----00432----curr annotType is: tloop
----00433----curr bgn is: false
----0043A----curr annotType is not COM nor TRN
----0041----enter for_1, curr asCnt is: 20
----0042----annotStmt is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
----0043----list fcAnnots is: [#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false ]
----00431----enter for_2 curr fcannot is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
----00432----curr annotType is: transfer
----00433----curr bgn is: false
----00434----curr annotType is COM or TRN
----00436----curr bgn is false
----00439----enter inTRN decomposeCompute
----00439----bgnstmt is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
----00439----endStmt is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
----00439----tskNmae is: TRN_6
----4.0----trnTaskName is: matrixMul_TRN_6
----4.0----bgnIdx is: 4
----4.0----endIdx is: 9
... Prelim task handling: matrixMul_TRN_6
...tskName is: matrixMul_TRN_6
...mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
...print current fcudaGlobalData2 tasks...
Tasks: [matrixMul_TRN_6, matrixMul_TRN_10, matrixMul_CMP_5]
....00......................................
....01....enableSignal is: guard_matrixMul_TRN_6
....02....enDeclor is: guard_matrixMul_TRN_6
....03....enableDecl is: int guard_matrixMul_TRN_6
....04....after getbody addAnsideclaration, mprocedure_body is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
----addStatementBefore----index is:14
....05....after addAfterLastDeclaration, mprocedure_body is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....06....parCStmt is: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
....07....idxDif is: 0
....08....taskCall is: matrixMul_TRN_6()
....09....after addTaskCall, tskName is: matrixMul_TRN_6
....09....after addTaskCall, taskCall is: matrixMul_TRN_6()
....10....after addFcudaCore, taskCall is: matrixMul_TRN_6()
....10....after addFcudaCore, mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
Creating new FcudaCoreData for core: matrixMul_TRN_6()
....11....after setCoreName, tskName is: matrixMul_TRN_6
....11....after setCoreName, taskCall is: matrixMul_TRN_6()
....11....after setCoreName, mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....12....sftTaskArgs size is: 0
....12....sftTaskArgs is: []
....13....sftTaskArgs size is: 1
....13....sftTaskArgs is: [guard_matrixMul_TRN_6]
....14....sftTaskArgSyms is: [guard_matrixMul_TRN_6]
....15....sftTaskDecls size is: 1
....15....sftTaskDecls is: [int guard_matrixMul_TRN_6]
....16....sftTaskArgs size is: 2
....16....sftTaskArgs is: [guard_matrixMul_TRN_6, blockDim]
....16....sftTaskDecls size is: 4
....16....sftTaskDecls is: [int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx]
....17....sftCommonArgsIndex size is: 1
....17....sftCommonArgsIndex is: [1]
....18....sftTaskArgs size is: 3
....18....sftTaskArgs is: [guard_matrixMul_TRN_6, blockDim, gridDim]
....18....sftCommonArgsIndex size is: 2
....18....sftCommonArgsIndex is: [1, 2]
....19....sftTaskArgs size is: 4
....19....sftTaskArgs is: [guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx]
....20....enableStmt is: if (guard_matrixMul_TRN_6)
{
}
....21....fcTask is: {
if (guard_matrixMul_TRN_6)
{
}
}
....22....tskStmts size is: 19
....22....tskStmts is: [__shared__ DATATYPE As[16][16];, __shared__ DATATYPE Bs[16][16];, , , #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true , #pragma fcuda tloop name=TRN_6 end=false begin=true , #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, #pragma fcuda tloop name=TRN_6 end=true begin=false , #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false , , , , #pragma fcuda compute cores=1 name=SNC_7 end=false begin=true , #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();, {
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}, #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();, #pragma fcuda compute cores=1 name=SNC_7 end=true begin=false , ]
Marking Statements 4 - 9 for task: matrixMul_TRN_6
....23....curr sIdx is: 4 , curr tskStmts to be added is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
....23....after adding, tskName is: matrixMul_TRN_6
....23....curr sIdx is: 5 , curr tskStmts to be added is: #pragma fcuda tloop name=TRN_6 end=false begin=true
....23....after adding, tskName is: matrixMul_TRN_6
....23....curr sIdx is: 6 , curr tskStmts to be added is: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
....23....after adding, tskName is: matrixMul_TRN_6
....23....curr sIdx is: 7 , curr tskStmts to be added is: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
....23....after adding, tskName is: matrixMul_TRN_6
....23....curr sIdx is: 8 , curr tskStmts to be added is: #pragma fcuda tloop name=TRN_6 end=true begin=false
....23....after adding, tskName is: matrixMul_TRN_6
....23....curr sIdx is: 9 , curr tskStmts to be added is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
....23....after adding, tskName is: matrixMul_TRN_6
........declList is: []
....24....ProcedureDeclarator tskProcDecl is: matrixMul_TRN_6()
....25....Procedure tskProc is: void matrixMul_TRN_6()
{
if (guard_matrixMul_TRN_6)
{
}
}
....26.... TranslationUnit kernUnit is: #include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
if (guard_matrixMul_CMP_5)
{
}
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
if (guard_matrixMul_TRN_10)
{
}
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....27.... after kernUnit.addDeclaratuibBefore, kernUnit is: #include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
if (guard_matrixMul_CMP_5)
{
}
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
if (guard_matrixMul_TRN_10)
{
}
}
void matrixMul_TRN_6()
{
if (guard_matrixMul_TRN_6)
{
}
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....28.... before FCUDAutils.addTaskMapping, mProcedure.getSymbolName is: matrixMul
....28.... before FCUDAutils.addTaskMapping, tskProc is: void matrixMul_TRN_6()
{
if (guard_matrixMul_TRN_6)
{
}
}
....29.... after FCUDAutils.addTaskMapping, mProcedure.getSymbolName is: matrixMul
....29.... after FCUDAutils.addTaskMapping, tskProc is: void matrixMul_TRN_6()
{
if (guard_matrixMul_TRN_6)
{
}
}
....30.... after FcudaAnnotation fannot, fannot is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
....31.... fannot is not null
....32.... done taskCreationPrelims
....32.... final taskCall is: matrixMul_TRN_6()
----4.0----taskCall is: matrixMul_TRN_6()
----4.0----taskCallStmt is: matrixMul_TRN_6();
----addStatementBefore----index is:4
----4.0----cStmt after addStatementBefore is: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
matrixMul_TRN_6();
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
----4.1----Starting to collect parameters for procedure: matrixMul_TRN_6
----4.1----trnData is: fcuda.common.TaskData@7ba18f1b
----4.1----tskStmts is: [#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true , #pragma fcuda tloop name=TRN_6 end=false begin=true , #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, #pragma fcuda tloop name=TRN_6 end=true begin=false , #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false ]
----4.1----allTskSyms is: []
----4.2----curr Task Statement: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
----7.11----test_children is: []
----7.12----test_annot is: [#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true ]
----7.13---coresList is: [1]
----7.13---sizeList is: [BLOCKDIM_X]
----7.13---taskName is: TRN_6
----7.13---transferType is: null
----4.21---- AnnotationStatement continue
----4.2----curr Task Statement: #pragma fcuda tloop name=TRN_6 end=false begin=true
----7.11----test_children is: []
----7.12----test_annot is: [#pragma fcuda tloop name=TRN_6 end=false begin=true ]
----4.21---- AnnotationStatement continue
----4.2----curr Task Statement: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
----7.11----test_children is: [As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)]]
----7.12----test_annot is: [#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true ]
----7.14----stmtpragmas: []
----7.14----test_annot: [#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true ]
----7.14----real_stmt: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
----7.14---mOffchipPtrNameList is: [A]
----7.14---dirList is: true
----7.15----1.collecting parameters----
-------GLBpntr is: [A]
-------core: [1]
-------size: [BLOCKDIM_X]
-------rdNwrt: true
-------name: TRN_6
-------type: null
----4.3----in for loop DEFS, defExp: As[threadIdx.y][threadIdx.x]
----4.3----Task def: As
----4.3----defSym: As[16][16]
----4.3----scalarDef: false
----4.3----constArrAcc: false
----4.3----allTskSyms: [As[16][16]]
----4.4----defDecl: __shared__ DATATYPE As[16][16]
----4.4----defUses: [#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);]
----4.4---- ... has # of chain uses: 1
----4.5----curr useTrv: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
----4.51----!(taskContains(useTrv, tskStmts))
----4.51----sftTaskArgs: [guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As]
----4.51----sftTaskArgSyms: [As[16][16], guard_matrixMul_TRN_6]
----4.51----sftTaskDecls: [int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16]]
... has Use out of task
> #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
----4.52----gonna break
Task useExp: A[((a+(wA*threadIdx.y))+threadIdx.x)] of task: matrixMul_TRN_6
Task use: A of task: matrixMul_TRN_6
... has # of chain defs: 1
... has Def out of task
> * A
Task useExp: a of task: matrixMul_TRN_6
Task use: a of task: matrixMul_TRN_6
... has # of chain defs: 2
... has Def out of task
> ((a=aBegin), (b=bBegin));
Task useExp: threadIdx.x of task: matrixMul_TRN_6
Task useExp: threadIdx.y of task: matrixMul_TRN_6
Task useExp: wA of task: matrixMul_TRN_6
Task use: wA of task: matrixMul_TRN_6
... has # of chain defs: 1
... has Def out of task
> wA
----4.2----curr Task Statement: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
----7.11----test_children is: [Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)]]
----7.12----test_annot is: [#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true ]
----7.14----stmtpragmas: []
----7.14----test_annot: [#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true ]
----7.14----real_stmt: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
----7.14---mOffchipPtrNameList is: [A, B]
----7.14---dirList is: true
----7.15----1.collecting parameters----
-------GLBpntr is: [A, B]
-------core: [1]
-------size: [BLOCKDIM_X]
-------rdNwrt: true
-------name: TRN_6
-------type: null
----4.3----in for loop DEFS, defExp: Bs[threadIdx.y][threadIdx.x]
----4.3----Task def: Bs
----4.3----defSym: Bs[16][16]
----4.3----scalarDef: false
----4.3----constArrAcc: false
----4.3----allTskSyms: [* A, Bs[16][16], wA, a, As[16][16]]
----4.4----defDecl: __shared__ DATATYPE Bs[16][16]
----4.4----defUses: [#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);]
----4.4---- ... has # of chain uses: 1
----4.5----curr useTrv: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
----4.51----!(taskContains(useTrv, tskStmts))
----4.51----sftTaskArgs: [guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs]
----4.51----sftTaskArgSyms: [* A, Bs[16][16], wA, a, As[16][16], guard_matrixMul_TRN_6]
----4.51----sftTaskDecls: [int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16]]
... has Use out of task
> #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
----4.52----gonna break
Task useExp: B[((b+(wB*threadIdx.y))+threadIdx.x)] of task: matrixMul_TRN_6
Task use: B of task: matrixMul_TRN_6
... has # of chain defs: 1
... has Def out of task
> * B
Task useExp: b of task: matrixMul_TRN_6
Task use: b of task: matrixMul_TRN_6
... has # of chain defs: 2
... has Def out of task
> ((a=aBegin), (b=bBegin));
Task useExp: threadIdx.x of task: matrixMul_TRN_6
Task useExp: threadIdx.y of task: matrixMul_TRN_6
Task useExp: wB of task: matrixMul_TRN_6
Task use: wB of task: matrixMul_TRN_6
... has # of chain defs: 1
... has Def out of task
> wB
----4.2----curr Task Statement: #pragma fcuda tloop name=TRN_6 end=true begin=false
----7.11----test_children is: []
----7.12----test_annot is: [#pragma fcuda tloop name=TRN_6 end=true begin=false ]
----4.21---- AnnotationStatement continue
----4.2----curr Task Statement: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
----7.11----test_children is: []
----7.12----test_annot is: [#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false ]
----4.21---- AnnotationStatement continue
----5.0----end of task statement, cStmt is: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
matrixMul_TRN_6();
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
----8.0----trnTaskName: matrixMul_TRN_6
--------sftTaskArgSyms: [wB, * A, Bs[16][16], wA, a, As[16][16], guard_matrixMul_TRN_6, b, * B]
--------sftTaskArgs: [guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB]
--------sftTaskDecls: [int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB]
--------taskCall: matrixMul_TRN_6()
--------sftCommonArgsIndex: [1, 2, 5, 7, 9, 11]
----8.00----tskSym is: wB
----8.01----decl is: int wB
----8.02----inTaskDecl is: false
----8.03----sftTaskArgSyms contains tskSym
----8.00----tskSym is: * A
----8.01----decl is: DATATYPE * A
----8.02----inTaskDecl is: false
----8.03----sftTaskArgSyms contains tskSym
----8.00----tskSym is: Bs[16][16]
----8.01----decl is: __shared__ DATATYPE Bs[16][16]
----8.02----inTaskDecl is: false
----8.03----sftTaskArgSyms contains tskSym
----8.00----tskSym is: wA
----8.01----decl is: int wA
----8.02----inTaskDecl is: false
----8.03----sftTaskArgSyms contains tskSym
----8.00----tskSym is: a
----8.01----decl is: int a
----8.02----inTaskDecl is: false
----8.03----sftTaskArgSyms contains tskSym
----8.00----tskSym is: As[16][16]
----8.01----decl is: __shared__ DATATYPE As[16][16]
----8.02----inTaskDecl is: false
----8.03----sftTaskArgSyms contains tskSym
----8.00----tskSym is: b
----8.01----decl is: int b
----8.02----inTaskDecl is: false
----8.03----sftTaskArgSyms contains tskSym
----8.00----tskSym is: * B
----8.01----decl is: DATATYPE * B
----8.02----inTaskDecl is: false
----8.03----sftTaskArgSyms contains tskSym
----8.07----trnTaskName: matrixMul_TRN_6
--------sftTaskArgSyms: [wB, * A, Bs[16][16], wA, a, As[16][16], guard_matrixMul_TRN_6, b, * B]
--------sftTaskArgs: [guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB]
--------sftTaskDecls: [int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB]
--------taskCall: matrixMul_TRN_6()
--------sftCommonArgsIndex: [1, 2, 5, 7, 9, 11]
----8.08----trnTaskName: matrixMul_TRN_6
--------sftTaskArgSyms: [wB, * A, Bs[16][16], wA, a, As[16][16], guard_matrixMul_TRN_6, b, * B]
--------sftTaskArgs: [guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB]
--------sftTaskDecls: [int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB]
--------taskCall: matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB)
--------sftCommonArgsIndex: [1, 2, 5, 7, 9, 11]
----0041----enter for_1, curr asCnt is: 21
----0042----annotStmt is:
----0043----list fcAnnots is: []
----0041----enter for_1, curr asCnt is: 22
----0042----annotStmt is:
----0043----list fcAnnots is: []
----0041----enter for_1, curr asCnt is: 23
----0042----annotStmt is:
----0043----list fcAnnots is: []
----0041----enter for_1, curr asCnt is: 24
----0042----annotStmt is: #pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
----0043----list fcAnnots is: [#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true ]
----00431----enter for_2 curr fcannot is: #pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
----00432----curr annotType is: compute
----00433----curr bgn is: true
----00434----curr annotType is COM or TRN
----00435----curr bgn is true
----SFT2-decompose-before sanity check, inCMP is: false, inTRN is: false, tskName is: null
----0041----enter for_1, curr asCnt is: 25
----0042----annotStmt is: #pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
----0043----list fcAnnots is: [#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false ]
----00431----enter for_2 curr fcannot is: #pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
----00432----curr annotType is: compute
----00433----curr bgn is: false
----00434----curr annotType is COM or TRN
----00436----curr bgn is false
----00438----enter inCMP decomposeCompute
----00438----bgnstmt is: #pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
----00438----endStmt is: #pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
----00438----tskNmae is: SNC_7
... Prelim task handling: matrixMul_SNC_7
...tskName is: matrixMul_SNC_7
...mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
...print current fcudaGlobalData2 tasks...
Tasks: [matrixMul_TRN_6, matrixMul_SNC_7, matrixMul_TRN_10, matrixMul_CMP_5]
....00......................................
....01....enableSignal is: guard_matrixMul_SNC_7
....02....enDeclor is: guard_matrixMul_SNC_7
....03....enableDecl is: int guard_matrixMul_SNC_7
....04....after getbody addAnsideclaration, mprocedure_body is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
----addStatementBefore----index is:15
....05....after addAfterLastDeclaration, mprocedure_body is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....06....parCStmt is: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
....07....idxDif is: 0
....08....taskCall is: matrixMul_SNC_7()
....09....after addTaskCall, tskName is: matrixMul_SNC_7
....09....after addTaskCall, taskCall is: matrixMul_SNC_7()
....10....after addFcudaCore, taskCall is: matrixMul_SNC_7()
....10....after addFcudaCore, mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
Creating new FcudaCoreData for core: matrixMul_SNC_7()
....11....after setCoreName, tskName is: matrixMul_SNC_7
....11....after setCoreName, taskCall is: matrixMul_SNC_7()
....11....after setCoreName, mProcedure is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....12....sftTaskArgs size is: 0
....12....sftTaskArgs is: []
....13....sftTaskArgs size is: 1
....13....sftTaskArgs is: [guard_matrixMul_SNC_7]
....14....sftTaskArgSyms is: [guard_matrixMul_SNC_7]
....15....sftTaskDecls size is: 1
....15....sftTaskDecls is: [int guard_matrixMul_SNC_7]
....16....sftTaskArgs size is: 2
....16....sftTaskArgs is: [guard_matrixMul_SNC_7, blockDim]
....16....sftTaskDecls size is: 4
....16....sftTaskDecls is: [int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx]
....17....sftCommonArgsIndex size is: 1
....17....sftCommonArgsIndex is: [1]
....18....sftTaskArgs size is: 3
....18....sftTaskArgs is: [guard_matrixMul_SNC_7, blockDim, gridDim]
....18....sftCommonArgsIndex size is: 2
....18....sftCommonArgsIndex is: [1, 2]
....19....sftTaskArgs size is: 4
....19....sftTaskArgs is: [guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx]
....20....enableStmt is: if (guard_matrixMul_SNC_7)
{
}
....21....fcTask is: {
if (guard_matrixMul_SNC_7)
{
}
}
....22....tskStmts size is: 20
....22....tskStmts is: [__shared__ DATATYPE As[16][16];, __shared__ DATATYPE Bs[16][16];, , , matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);, #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true , #pragma fcuda tloop name=TRN_6 end=false begin=true , #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, #pragma fcuda tloop name=TRN_6 end=true begin=false , #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false , , , , #pragma fcuda compute cores=1 name=SNC_7 end=false begin=true , #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();, {
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}, #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();, #pragma fcuda compute cores=1 name=SNC_7 end=true begin=false , ]
Marking Statements 14 - 18 for task: matrixMul_SNC_7
....23....curr sIdx is: 14 , curr tskStmts to be added is: #pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
....23....after adding, tskName is: matrixMul_SNC_7
....23....curr sIdx is: 15 , curr tskStmts to be added is: #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
....23....after adding, tskName is: matrixMul_SNC_7
....23....curr sIdx is: 16 , curr tskStmts to be added is: {
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
....23....after adding, tskName is: matrixMul_SNC_7
....23....curr sIdx is: 17 , curr tskStmts to be added is: #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
....23....after adding, tskName is: matrixMul_SNC_7
....23....curr sIdx is: 18 , curr tskStmts to be added is: #pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
....23....after adding, tskName is: matrixMul_SNC_7
........declList is: []
....24....ProcedureDeclarator tskProcDecl is: matrixMul_SNC_7()
....25....Procedure tskProc is: void matrixMul_SNC_7()
{
if (guard_matrixMul_SNC_7)
{
}
}
....26.... TranslationUnit kernUnit is: #include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
if (guard_matrixMul_CMP_5)
{
}
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
if (guard_matrixMul_TRN_10)
{
}
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
if (guard_matrixMul_TRN_6)
{
}
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....27.... after kernUnit.addDeclaratuibBefore, kernUnit is: #include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
if (guard_matrixMul_CMP_5)
{
}
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
if (guard_matrixMul_TRN_10)
{
}
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
if (guard_matrixMul_TRN_6)
{
}
}
void matrixMul_SNC_7()
{
if (guard_matrixMul_SNC_7)
{
}
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
....28.... before FCUDAutils.addTaskMapping, mProcedure.getSymbolName is: matrixMul
....28.... before FCUDAutils.addTaskMapping, tskProc is: void matrixMul_SNC_7()
{
if (guard_matrixMul_SNC_7)
{
}
}
....29.... after FCUDAutils.addTaskMapping, mProcedure.getSymbolName is: matrixMul
....29.... after FCUDAutils.addTaskMapping, tskProc is: void matrixMul_SNC_7()
{
if (guard_matrixMul_SNC_7)
{
}
}
....30.... after FcudaAnnotation fannot, fannot is: #pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
....31.... fannot is not null
....32.... done taskCreationPrelims
....32.... final taskCall is: matrixMul_SNC_7()
----addStatementBefore----index is:14
Starting to collect parameters for procedure: matrixMul_SNC_7
Task Statement: #pragma fcuda compute cores=1 name=SNC_7 end=false begin=true of task: matrixMul_SNC_7
Task Statement: #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads(); of task: matrixMul_SNC_7
Task Statement: {
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
} of task: matrixMul_SNC_7
Task defExp: Csub_block[threadIdx.y][threadIdx.x] of task: matrixMul_SNC_7
Task def: Csub_block of task: matrixMul_SNC_7
... has # of chain uses: 2
Check Uses: [#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);, #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];]
... has Use out of task
> #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
Task defExp: k of task: matrixMul_SNC_7
Task def: k of task: matrixMul_SNC_7
... has # of chain uses: 4
Check Uses: [k<16, #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);, #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);, ++ k]
Task useExp: As[threadIdx.y][k] of task: matrixMul_SNC_7
Task use: As of task: matrixMul_SNC_7
... has # of chain defs: 1
Check Defs: [#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];]
... has Def out of task
> #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Task useExp: Bs[k][threadIdx.x] of task: matrixMul_SNC_7
Task use: Bs of task: matrixMul_SNC_7
... has # of chain defs: 1
Check Defs: [#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];]
... has Def out of task
> #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
Task useExp: Csub_block[threadIdx.y][threadIdx.x] of task: matrixMul_SNC_7
Task use: Csub_block of task: matrixMul_SNC_7
Task useExp: k of task: matrixMul_SNC_7
Task use: k of task: matrixMul_SNC_7
... has # of chain defs: 2
Check Defs: [#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0;, ++ k]
Task useExp: threadIdx.x of task: matrixMul_SNC_7
Task useExp: threadIdx.y of task: matrixMul_SNC_7
Task Statement: #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads(); of task: matrixMul_SNC_7
Task Statement: #pragma fcuda compute cores=1 name=SNC_7 end=true begin=false of task: matrixMul_SNC_7
Shift Decl: int k to proc: matrixMul_SNC_7
----0041----enter for_1, curr asCnt is: 26
----0042----annotStmt is:
----0043----list fcAnnots is: []
----0041----enter for_1, curr asCnt is: 27
----0042----annotStmt is:
----0043----list fcAnnots is: []
----0041----enter for_1, curr asCnt is: 28
----0042----annotStmt is: #pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
----0043----list fcAnnots is: [#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true ]
----00431----enter for_2 curr fcannot is: #pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
----00432----curr annotType is: tloop
----00433----curr bgn is: true
----0043A----curr annotType is not COM nor TRN
----0041----enter for_1, curr asCnt is: 29
----0042----annotStmt is: #pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
----0043----list fcAnnots is: [#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false ]
----00431----enter for_2 curr fcannot is: #pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
----00432----curr annotType is: tloop
----00433----curr bgn is: false
----0043A----curr annotType is not COM nor TRN
----0041----enter for_1, curr asCnt is: 30
----0042----annotStmt is:
----0043----list fcAnnots is: []
-----SFT2 finished decomposeKernel();-----
-----proc after decomposeKernel-----
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
-----6.0----generateMemcpy starts-----
----6.1----stmt is: int aBegin;
----6.11----test_children is: [int aBegin]
----6.12----test_annot is: null
----6.1----stmt is: int aEnd;
----6.11----test_children is: [int aEnd]
----6.12----test_annot is: null
----6.1----stmt is: int aStep;
----6.11----test_children is: [int aStep]
----6.12----test_annot is: null
----6.1----stmt is: int bBegin;
----6.11----test_children is: [int bBegin]
----6.12----test_annot is: null
----6.1----stmt is: int bStep;
----6.11----test_children is: [int bStep]
----6.12----test_annot is: null
----6.1----stmt is: __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
----6.11----test_children is: [__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X]]
----6.12----test_annot is: null
----6.1----stmt is: int a;
----6.11----test_children is: [int a]
----6.12----test_annot is: null
----6.1----stmt is: int b;
----6.11----test_children is: [int b]
----6.12----test_annot is: null
----6.1----stmt is: int k;
----6.11----test_children is: [int k]
----6.12----test_annot is: null
----6.1----stmt is: int c;
----6.11----test_children is: [int c]
----6.12----test_annot is: null
----6.1----stmt is: dim3 blockIdx;
----6.11----test_children is: [dim3 blockIdx]
----6.12----test_annot is: null
----6.1----stmt is: int guard_matrixMul_CMP_5;
----6.11----test_children is: [int guard_matrixMul_CMP_5]
----6.12----test_annot is: null
----6.1----stmt is: int guard_matrixMul_TRN_10;
----6.11----test_children is: [int guard_matrixMul_TRN_10]
----6.12----test_annot is: null
----6.1----stmt is: int guard_matrixMul_TRN_6;
----6.11----test_children is: [int guard_matrixMul_TRN_6]
----6.12----test_annot is: null
----6.1----stmt is: int guard_matrixMul_SNC_7;
----6.11----test_children is: [int guard_matrixMul_SNC_7]
----6.12----test_annot is: null
----6.1----stmt is: guard_matrixMul_SNC_7=1;
----6.11----test_children is: [guard_matrixMul_SNC_7=1]
----6.12----test_annot is: null
----6.1----stmt is: guard_matrixMul_TRN_6=1;
----6.11----test_children is: [guard_matrixMul_TRN_6=1]
----6.12----test_annot is: null
----6.1----stmt is: guard_matrixMul_TRN_10=1;
----6.11----test_children is: [guard_matrixMul_TRN_10=1]
----6.12----test_annot is: null
----6.1----stmt is: guard_matrixMul_CMP_5=1;
----6.11----test_children is: [guard_matrixMul_CMP_5=1]
----6.12----test_annot is: null
----6.1----stmt is: #pragma HLS INTERFACE ap_bus port=A depth=3840
----6.11----test_children is: []
----6.12----test_annot is: [#pragma HLS INTERFACE ap_bus port=A depth=3840 ]
----6.1----stmt is: #pragma HLS INTERFACE ap_bus port=B depth=6144
----6.11----test_children is: []
----6.12----test_annot is: [#pragma HLS INTERFACE ap_bus port=B depth=6144 ]
----6.1----stmt is: #pragma HLS INTERFACE ap_bus port=C depth=10240
----6.11----test_children is: []
----6.12----test_annot is: [#pragma HLS INTERFACE ap_bus port=C depth=10240 ]
----6.1----stmt is: aBegin=((wA*16)*blockIdx.y);
----6.11----test_children is: [aBegin=((wA*16)*blockIdx.y)]
----6.12----test_annot is: null
----6.1----stmt is: aEnd=((aBegin+wA)-1);
----6.11----test_children is: [aEnd=((aBegin+wA)-1)]
----6.12----test_annot is: null
----6.1----stmt is: aStep=16;
----6.11----test_children is: [aStep=16]
----6.12----test_annot is: null
----6.1----stmt is: bBegin=(16*blockIdx.x);
----6.11----test_children is: [bBegin=(16*blockIdx.x)]
----6.12----test_annot is: null
----6.1----stmt is: bStep=(16*wB);
----6.11----test_children is: [bStep=(16*wB)]
----6.12----test_annot is: null
----6.1----stmt is: matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
----6.11----test_children is: [matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block)]
----6.12----test_annot is: null
----6.1----stmt is: #pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true ]
... Preprocessing pragma:
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
----6.1----stmt is: #pragma fcuda tloop name=CMP_5 end=false begin=true
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda tloop name=CMP_5 end=false begin=true ]
... Preprocessing pragma:
#pragma fcuda tloop name=CMP_5 end=false begin=true
----6.15----find tloop
----6.1----stmt is: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
----6.11----test_children is: [Csub_block[threadIdx.y][threadIdx.x]=0]
----6.12----test_annot is: [#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true ]
----6.13----find stmt annot
... Preprocessing pragma:
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
----6.1----stmt is: #pragma fcuda tloop name=CMP_5 end=true begin=false
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda tloop name=CMP_5 end=true begin=false ]
... Preprocessing pragma:
#pragma fcuda tloop name=CMP_5 end=true begin=false
----6.15----find tloop
----6.1----stmt is: #pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false ]
... Preprocessing pragma:
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
----6.1----stmt is: a=0;
----6.11----test_children is: [a=0]
----6.12----test_annot is: null
----6.1----stmt is: b=0;
----6.11----test_children is: [b=0]
----6.12----test_annot is: null
----6.1----stmt is: k=0;
----6.11----test_children is: [k=0]
----6.12----test_annot is: null
----6.1----stmt is: ((a=aBegin), (b=bBegin));
----6.11----test_children is: [((a=aBegin), (b=bBegin))]
----6.12----test_annot is: null
----6.1----stmt is: __shared__ DATATYPE As[16][16];
----6.11----test_children is: [__shared__ DATATYPE As[16][16]]
----6.12----test_annot is: null
----6.1----stmt is: __shared__ DATATYPE Bs[16][16];
----6.11----test_children is: [__shared__ DATATYPE Bs[16][16]]
----6.12----test_annot is: null
----6.1----stmt is:
----6.11----test_children is: []
----6.12----test_annot is: []
----6.1----stmt is:
----6.11----test_children is: []
----6.12----test_annot is: []
----6.1----stmt is: matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
----6.11----test_children is: [matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB)]
----6.12----test_annot is: null
----6.1----stmt is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true ]
... Preprocessing pragma:
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
----6.14----find transfer begin
----6.1----stmt is: #pragma fcuda tloop name=TRN_6 end=false begin=true
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda tloop name=TRN_6 end=false begin=true ]
... Preprocessing pragma:
#pragma fcuda tloop name=TRN_6 end=false begin=true
----6.15----find tloop
----6.1----stmt is: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
----6.11----test_children is: [As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)]]
----6.12----test_annot is: [#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true ]
----6.13----find stmt annot
... Preprocessing pragma:
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
----6.16----find stmt
----6.16----children size: 1
----6.160----stmtpragmas: []
----6.161----test_annot: [#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true ]
----6.162----real_stmt: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
----6.1----stmt is: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
----6.11----test_children is: [Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)]]
----6.12----test_annot is: [#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true ]
----6.13----find stmt annot
... Preprocessing pragma:
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
----6.16----find stmt
----6.16----children size: 1
----6.160----stmtpragmas: []
----6.161----test_annot: [#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true ]
----6.162----real_stmt: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
----6.1----stmt is: #pragma fcuda tloop name=TRN_6 end=true begin=false
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda tloop name=TRN_6 end=true begin=false ]
... Preprocessing pragma:
#pragma fcuda tloop name=TRN_6 end=true begin=false
----6.15----find tloop
----6.1----stmt is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false ]
... Preprocessing pragma:
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
----6.17----find transfer end
----6.18----ready to enter addTransferParameters
----transferAnnot is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
----transferStmt is: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
----real_trn is: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
----taskDecls is: [dim3 blockDim, dim3 gridDim]
----taskArgs is: [blockDim, gridDim]
----taskArgSet is: [blockDim, gridDim]
----commonArgsIndex is: [0, 1]
----6.19----enter addTransferParameters2
... Handling transfer params for
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
----6.23----1.collecting parameters----
-------GLBpntr is: [B]
-------core: [1]
-------size: [BLOCKDIM_X]
-------rdNwrt: true
-------name: TRN_6
-------type: null
-------base: []
----6.24----transferAnnot.getAnnotatable: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
----6.25--------
----annotStmt: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
----cStmt: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
----transferStmtf2: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
---------------------------------
----6.26------ptrName is: B
----6.26------ptrId is: B
----6.27------ptrDecl is: DATATYPE * B
----6.28------ptrDeclor is: * B
----6.28------ptrDeclorSpecs is: [* ]
----6.29----ptrDecl.getSpecifiers(): [DATATYPE]
----6.30--volatPtrDecl: DATATYPE * B
----6.300----taskArgSet not contain ptrId, enter if
----6.31--transferStmtf2: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
transferStmtf2: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
----6.31--real_trn: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
[findBRAM]: Annotated Statement --> #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
---Assignments--- [Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)]]
----6.32--bramId: Bs
----6.33--bramDecl: __shared__ DATATYPE Bs[16][16]
------6.331---in try block now------
------6.331---in catch block now------
------6.332---taskArgSet not contain bramId, enter if------
BRAM: Bs Dim: 2
Array access, but dimension greater than 1 Bs[threadIdx.y][threadIdx.x]
------6.34----onChipOffset is: null
------6.34----prefixOffset is: TRN_6_Bs_offset
------6.34----coeffOffset is: TRN_6_Bs_offset
------6.34----offsetDeclor is: TRN_6_Bs_offset
------6.34----offsetDeclion is: int TRN_6_Bs_offset
----addStatementBefore----index is:4
------6.341----before taskArgs: [blockDim, gridDim, B, Bs]
------6.341----before taskDecls: [dim3 blockDim, dim3 gridDim, DATATYPE * B, __shared__ DATATYPE Bs[16][16]]
------6.341----after taskArgs: [blockDim, gridDim, B, Bs, TRN_6_Bs_offset]
------6.341----after taskDecls: [dim3 blockDim, dim3 gridDim, DATATYPE * B, __shared__ DATATYPE Bs[16][16], int TRN_6_Bs_offset]
------6.35----after offset added, cStmt is: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
------6.36----after memcpyArgs added, memcpyArgs is: [(TRN_6_Bs_offset+Bs[threadIdx.y])]
Parsing ((b+(wB*threadIdx.y))+threadIdx.x)
Terms [b, (wB*threadIdx.y), threadIdx.x]
Terms [threadIdx.y]
Base expr b
c1 = wB
c2 = null
c3 = 1
c4 = null
c5 = null
------6.37----baseAddrForBurst is: b
------6.370----j is: 0
------6.371----prefix is: TRN_6_Bs_X
------6.372----coeffVar is: TRN_6_Bs_X_0
------6.373----cDeclor is: TRN_6_Bs_X_0
------6.374----cDeclion is: int TRN_6_Bs_X_0
----addStatementBefore----index is:5
------6.38----taskArgs is: [blockDim, gridDim, B, Bs, TRN_6_Bs_offset, TRN_6_Bs_X_0]
------6.38----taskDecls is: [dim3 blockDim, dim3 gridDim, DATATYPE * B, __shared__ DATATYPE Bs[16][16], int TRN_6_Bs_offset, int TRN_6_Bs_X_0]
------6.38----cStmt is: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
------6.370----j is: 1
------6.371----prefix is: TRN_6_Bs_c
------6.372----coeffVar is: TRN_6_Bs_c_1
------6.373----cDeclor is: TRN_6_Bs_c_1
------6.374----cDeclion is: int TRN_6_Bs_c_1
----addStatementBefore----index is:6
------6.38----taskArgs is: [blockDim, gridDim, B, Bs, TRN_6_Bs_offset, TRN_6_Bs_X_0, TRN_6_Bs_c_1]
------6.38----taskDecls is: [dim3 blockDim, dim3 gridDim, DATATYPE * B, __shared__ DATATYPE Bs[16][16], int TRN_6_Bs_offset, int TRN_6_Bs_X_0, int TRN_6_Bs_c_1]
------6.38----cStmt is: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
------6.39----memcpyArgs is: [(TRN_6_Bs_offset+Bs[threadIdx.y]), ((B+TRN_6_Bs_X_0)+(threadIdx.y*TRN_6_Bs_c_1))]
------6.40----after memsize, memcpyArgs is: [(TRN_6_Bs_offset+Bs[threadIdx.y]), ((B+TRN_6_Bs_X_0)+(threadIdx.y*TRN_6_Bs_c_1)), (BLOCKDIM_X*sizeof (DATATYPE))]
------6.40----memcpyCall is: memcpy((TRN_6_Bs_offset+Bs[threadIdx.y]), ((B+TRN_6_Bs_X_0)+(threadIdx.y*TRN_6_Bs_c_1)), (BLOCKDIM_X*sizeof (DATATYPE)))
------6.42----after add memcpy before real_trn: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
--------sftTaskArgSyms: [Bs[16][16], Csub_block[BLOCKDIM_Y][BLOCKDIM_X], As[16][16], guard_matrixMul_SNC_7]
--------sftTaskArgs: [guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs]
--------sftTaskDecls: [int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16]]
--------sftCommonArgsIndex: [1, 2]
------6.44----before leave addTrans, cstmt is: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
----6.1----stmt is:
----6.11----test_children is: []
----6.12----test_annot is: []
----6.1----stmt is:
----6.11----test_children is: []
----6.12----test_annot is: []
----6.1----stmt is:
----6.11----test_children is: []
----6.12----test_annot is: []
----6.1----stmt is: matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
----6.11----test_children is: [matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs)]
----6.12----test_annot is: null
----6.1----stmt is: #pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true ]
... Preprocessing pragma:
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
----6.1----stmt is: #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
----6.11----test_children is: [__syncthreads()]
----6.12----test_annot is: [#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7 ]
----6.13----find stmt annot
... Preprocessing pragma:
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
----6.1----stmt is: lp1:
----6.11----test_children is: [lp1]
----6.12----test_annot is: null
----6.1----stmt is:
----6.11----test_children is: []
----6.12----test_annot is: []
----6.1----stmt is: #pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true ]
... Preprocessing pragma:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
----6.15----find tloop
----6.1----stmt is: #pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0;
----6.11----test_children is: [k=0]
----6.12----test_annot is: [#pragma fcuda stmt tlpName=FOR_HTG_CMP_8 ]
----6.13----find stmt annot
... Preprocessing pragma:
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
----6.1----stmt is: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
----6.11----test_children is: [Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x])]
----6.12----test_annot is: [#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true ]
----6.13----find stmt annot
... Preprocessing pragma:
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
----6.1----stmt is: #pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
----6.11----test_children is: [#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);]
----6.12----test_annot is: [#pragma fcuda stmt tlpName=FOR_HTG_CMP_8 ]
----6.13----find stmt annot
... Preprocessing pragma:
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
----6.1----stmt is: #pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
----6.11----test_children is: [#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0;, k<16, ++ k, #pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}]
----6.12----test_annot is: [#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8 ]
----6.13----find stmt annot
... Preprocessing pragma:
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
----6.1----stmt is: #pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false ]
... Preprocessing pragma:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
----6.15----find tloop
----6.1----stmt is:
----6.11----test_children is: []
----6.12----test_annot is: []
----6.1----stmt is: {
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
----6.11----test_children is: [lp1:, , #pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true , #pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}, #pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false , ]
----6.12----test_annot is: null
----6.1----stmt is: #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
----6.11----test_children is: [__syncthreads()]
----6.12----test_annot is: [#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9 ]
----6.13----find stmt annot
... Preprocessing pragma:
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
----6.1----stmt is: #pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false ]
... Preprocessing pragma:
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
----6.1----stmt is:
----6.11----test_children is: []
----6.12----test_annot is: []
----6.1----stmt is: {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
----6.11----test_children is: [__shared__ DATATYPE As[16][16];, __shared__ DATATYPE Bs[16][16];, , , TRN_6_Bs_offset=0;, TRN_6_Bs_X_0=b;, TRN_6_Bs_c_1=wB;, matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);, #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true , #pragma fcuda tloop name=TRN_6 end=false begin=true , #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];, #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];, #pragma fcuda tloop name=TRN_6 end=true begin=false , #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false , , , , matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);, #pragma fcuda compute cores=1 name=SNC_7 end=false begin=true , #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();, {
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}, #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();, #pragma fcuda compute cores=1 name=SNC_7 end=true begin=false , ]
----6.12----test_annot is: null
----6.1----stmt is: #pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
----6.11----test_children is: [((a=aBegin), (b=bBegin));, a<=aEnd, ((a+=aStep), (b+=bStep)), {
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}]
----6.12----test_annot is: [#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6 ]
----6.13----find stmt annot
... Preprocessing pragma:
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
----6.1----stmt is: c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
----6.11----test_children is: [c=(((wB*16)*blockIdx.y)+(16*blockIdx.x))]
----6.12----test_annot is: null
----6.1----stmt is:
----6.11----test_children is: []
----6.12----test_annot is: []
----6.1----stmt is:
----6.11----test_children is: []
----6.12----test_annot is: []
----6.1----stmt is: matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
----6.11----test_children is: [matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB)]
----6.12----test_annot is: null
----6.1----stmt is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true ]
... Preprocessing pragma:
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
----6.14----find transfer begin
----6.1----stmt is: #pragma fcuda tloop name=TRN_10 end=false begin=true
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda tloop name=TRN_10 end=false begin=true ]
... Preprocessing pragma:
#pragma fcuda tloop name=TRN_10 end=false begin=true
----6.15----find tloop
----6.1----stmt is: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
----6.11----test_children is: [C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x]]
----6.12----test_annot is: [#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true ]
----6.13----find stmt annot
... Preprocessing pragma:
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
----6.16----find stmt
----6.16----children size: 1
----6.160----stmtpragmas: []
----6.161----test_annot: [#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true ]
----6.162----real_stmt: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
----6.1----stmt is: #pragma fcuda tloop name=TRN_10 end=true begin=false
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda tloop name=TRN_10 end=true begin=false ]
... Preprocessing pragma:
#pragma fcuda tloop name=TRN_10 end=true begin=false
----6.15----find tloop
----6.1----stmt is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
----6.11----test_children is: []
----6.12----test_annot is: [#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false ]
... Preprocessing pragma:
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
----6.17----find transfer end
----6.18----ready to enter addTransferParameters
----transferAnnot is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
----transferStmt is: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
----real_trn is: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
----taskDecls is: [dim3 blockDim, dim3 gridDim]
----taskArgs is: [blockDim, gridDim]
----taskArgSet is: [blockDim, gridDim]
----commonArgsIndex is: [0, 1]
----6.19----enter addTransferParameters2
... Handling transfer params for
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
----6.23----1.collecting parameters----
-------GLBpntr is: [C]
-------core: [1]
-------size: [BLOCKDIM_X]
-------rdNwrt: false
-------name: TRN_10
-------type: null
-------base: []
----6.24----transferAnnot.getAnnotatable: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
----6.25--------
----annotStmt: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
----cStmt: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
----transferStmtf2: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
---------------------------------
----6.26------ptrName is: C
----6.26------ptrId is: C
----6.27------ptrDecl is: DATATYPE * C
----6.28------ptrDeclor is: * C
----6.28------ptrDeclorSpecs is: [* ]
----6.29----ptrDecl.getSpecifiers(): [DATATYPE]
----6.30--volatPtrDecl: DATATYPE * C
----6.300----taskArgSet not contain ptrId, enter if
----6.31--transferStmtf2: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
transferStmtf2: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
----6.31--real_trn: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
[findBRAM]: Annotated Statement --> #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
---Assignments--- [C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x]]
----6.32--bramId: Csub_block
----6.33--bramDecl: __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
------6.331---in try block now------
------6.331---in catch block now------
------6.332---taskArgSet not contain bramId, enter if------
BRAM: Csub_block Dim: 2
Array access, but dimension greater than 1 Csub_block[threadIdx.y][threadIdx.x]
------6.34----onChipOffset is: null
------6.34----prefixOffset is: TRN_10_Csub_block_offset
------6.34----coeffOffset is: TRN_10_Csub_block_offset
------6.34----offsetDeclor is: TRN_10_Csub_block_offset
------6.34----offsetDeclion is: int TRN_10_Csub_block_offset
----addStatementBefore----index is:44
------6.341----before taskArgs: [blockDim, gridDim, C, Csub_block]
------6.341----before taskDecls: [dim3 blockDim, dim3 gridDim, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X]]
------6.341----after taskArgs: [blockDim, gridDim, C, Csub_block, TRN_10_Csub_block_offset]
------6.341----after taskDecls: [dim3 blockDim, dim3 gridDim, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int TRN_10_Csub_block_offset]
------6.35----after offset added, cStmt is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
------6.36----after memcpyArgs added, memcpyArgs is: [(TRN_10_Csub_block_offset+Csub_block[threadIdx.y])]
Parsing ((c+(wB*threadIdx.y))+threadIdx.x)
Terms [c, (wB*threadIdx.y), threadIdx.x]
Terms [threadIdx.y]
Base expr c
c1 = wB
c2 = null
c3 = 1
c4 = null
c5 = null
------6.37----baseAddrForBurst is: c
------6.370----j is: 0
------6.371----prefix is: TRN_10_Csub_block_X
------6.372----coeffVar is: TRN_10_Csub_block_X_0
------6.373----cDeclor is: TRN_10_Csub_block_X_0
------6.374----cDeclion is: int TRN_10_Csub_block_X_0
----addStatementBefore----index is:46
------6.38----taskArgs is: [blockDim, gridDim, C, Csub_block, TRN_10_Csub_block_offset, TRN_10_Csub_block_X_0]
------6.38----taskDecls is: [dim3 blockDim, dim3 gridDim, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int TRN_10_Csub_block_offset, int TRN_10_Csub_block_X_0]
------6.38----cStmt is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
------6.370----j is: 1
------6.371----prefix is: TRN_10_Csub_block_c
------6.372----coeffVar is: TRN_10_Csub_block_c_1
------6.373----cDeclor is: TRN_10_Csub_block_c_1
------6.374----cDeclion is: int TRN_10_Csub_block_c_1
----addStatementBefore----index is:48
------6.38----taskArgs is: [blockDim, gridDim, C, Csub_block, TRN_10_Csub_block_offset, TRN_10_Csub_block_X_0, TRN_10_Csub_block_c_1]
------6.38----taskDecls is: [dim3 blockDim, dim3 gridDim, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int TRN_10_Csub_block_offset, int TRN_10_Csub_block_X_0, int TRN_10_Csub_block_c_1]
------6.38----cStmt is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
------6.39----memcpyArgs is: [(TRN_10_Csub_block_offset+Csub_block[threadIdx.y]), ((C+TRN_10_Csub_block_X_0)+(threadIdx.y*TRN_10_Csub_block_c_1))]
------6.40----after memsize, memcpyArgs is: [((C+TRN_10_Csub_block_X_0)+(threadIdx.y*TRN_10_Csub_block_c_1)), (TRN_10_Csub_block_offset+Csub_block[threadIdx.y]), (BLOCKDIM_X*sizeof (DATATYPE))]
------6.40----memcpyCall is: memcpy(((C+TRN_10_Csub_block_X_0)+(threadIdx.y*TRN_10_Csub_block_c_1)), (TRN_10_Csub_block_offset+Csub_block[threadIdx.y]), (BLOCKDIM_X*sizeof (DATATYPE)))
------6.42----after add memcpy before real_trn: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
--------sftTaskArgSyms: [Bs[16][16], Csub_block[BLOCKDIM_Y][BLOCKDIM_X], As[16][16], guard_matrixMul_SNC_7]
--------sftTaskArgs: [guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs]
--------sftTaskDecls: [int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16]]
--------sftCommonArgsIndex: [1, 2]
------6.44----before leave addTrans, cstmt is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
----6.1----stmt is:
----6.11----test_children is: []
----6.12----test_annot is: []
----6.1----stmt is:
----6.11----test_children is: []
----6.12----test_annot is: []
----6.1----stmt is: return ;
----6.11----test_children is: []
----6.12----test_annot is: null
----6.1----stmt is: {
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
----6.11----test_children is: [int aBegin;, int aEnd;, int aStep;, int bBegin;, int bStep;, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];, int a;, int b;, int k;, int c;, dim3 blockIdx;, int guard_matrixMul_CMP_5;, int guard_matrixMul_TRN_10;, int guard_matrixMul_TRN_6;, int guard_matrixMul_SNC_7;, int TRN_6_Bs_offset;, int TRN_6_Bs_X_0;, int TRN_6_Bs_c_1;, int TRN_10_Csub_block_offset;, int TRN_10_Csub_block_X_0;, int TRN_10_Csub_block_c_1;, guard_matrixMul_SNC_7=1;, guard_matrixMul_TRN_6=1;, guard_matrixMul_TRN_10=1;, guard_matrixMul_CMP_5=1;, #pragma HLS INTERFACE ap_bus port=A depth=3840 , #pragma HLS INTERFACE ap_bus port=B depth=6144 , #pragma HLS INTERFACE ap_bus port=C depth=10240 , aBegin=((wA*16)*blockIdx.y);, aEnd=((aBegin+wA)-1);, aStep=16;, bBegin=(16*blockIdx.x);, bStep=(16*wB);, matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);, #pragma fcuda compute cores=1 name=CMP_5 end=false begin=true , #pragma fcuda tloop name=CMP_5 end=false begin=true , #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;, #pragma fcuda tloop name=CMP_5 end=true begin=false , #pragma fcuda compute cores=1 name=CMP_5 end=true begin=false , a=0;, b=0;, k=0;, #pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}, c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));, , , TRN_10_Csub_block_offset=0;, TRN_10_Csub_block_X_0=c;, TRN_10_Csub_block_c_1=wB;, matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);, #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true , #pragma fcuda tloop name=TRN_10 end=false begin=true , #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];, #pragma fcuda tloop name=TRN_10 end=true begin=false , #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false , , , return ;]
----6.12----test_annot is: null
-----6.9----generateMemcpy ends-----
-----6.9----proc is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
return ;
}
-----start to fillDecomposedTasks-----
Moving task statement: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
Moving task statement: #pragma fcuda tloop name=TRN_6 end=false begin=true
Moving task statement: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Moving task statement: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
Moving task statement: #pragma fcuda tloop name=TRN_6 end=true begin=false
Moving task statement: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
Moving task statement: #pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
Moving task statement: #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
Moving task statement: {
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
Moving task statement: #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
Moving task statement: #pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
Moving task statement: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
Moving task statement: #pragma fcuda tloop name=TRN_10 end=false begin=true
Moving task statement: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
Moving task statement: #pragma fcuda tloop name=TRN_10 end=true begin=false
Moving task statement: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
Moving task statement: #pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
Moving task statement: #pragma fcuda tloop name=CMP_5 end=false begin=true
Moving task statement: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
Moving task statement: #pragma fcuda tloop name=CMP_5 end=true begin=false
Moving task statement: #pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
-----SFT2 finished fillDecomposedTasks();-----
-----proc after filldecomposedTasks-----
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
-----KernUnit after filldecomposedTasks-----
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
if (guard_matrixMul_CMP_5)
{
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
}
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
if (guard_matrixMul_TRN_10)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
}
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
if (guard_matrixMul_TRN_6)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
}
}
#pragma fcuda compute name=SNC_7 end=false cores=1 begin=true
void matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16])
{
if (guard_matrixMul_SNC_7)
{
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
-----start to shiftDeclarations-----
----FcudaGlobalData2.java start----
-----cetus application IPChainAnalysis start----
-----cetus application IPChainAnalysis 1----
------IPChainAnalysis.java PerformAliasAnalysis start----
-------IPPointsToAnalysis.java IPPointsToAnalysis start super----
--------IPAnalysis.java IPAnalysis start 1----
--------IPAnalysis.java IPAnalysis start 2----
[NormalizeReturn] begin
*AP* procedure: __syncthreads
*AP* is_void=true
*AP* ret_expr= null
*AP* procedure: matrixMul_CMP_5
*AP* is_void=true
*AP* ret_expr= null
*AP* procedure: matrixMul_TRN_10
*AP* is_void=true
*AP* ret_expr= null
*AP* procedure: matrixMul_TRN_6
*AP* is_void=true
*AP* ret_expr= null
*AP* procedure: matrixMul_SNC_7
*AP* is_void=true
*AP* ret_expr= null
*AP* procedure: matrixMul
*AP* is_void=true
*AP* ret_expr= null
[NormalizeReturn] end in 0.00 seconds
[WARNING] Undeclared symbol k from k=0
[WARNING] Undeclared symbol k from k<16
[WARNING] Undeclared symbol k from ++ k
[WARNING] Undeclared symbol k from As[threadIdx.y][k]
[WARNING] Undeclared symbol k from Bs[k][threadIdx.x]
[LinkSymbol] 124 updates in 0.00 seconds
--------IPAnalysis.java IPAnalysis start 3 enter IPA Graph----
---------IPAGraph.java finish super()----
---------IPAGraph.java start new Arraylist <IPANode>----
---------IPAGraph.java enter buildgraph<prog>
---------IPAGraph.java enter identifyCloneableNodes
---------IPAGraph.java enter buildTopOrder
---------IPAGraph.java finish new IPA Graph
[IPA] Stops due to no flow entry
-------IPPointsToAnalysis.java IPPointsToAnalysis finished super----
-------IPPointsToAnalysis.java IPPointsToAnalysis finished 2----
-------IPPointsToAnalysis.java IPPointsToAnalysis end----
------PerformAliasAnalysis 1----
0000IPpointsToAnalysis
[IPA:PointsTo] Stops due to no flow entry
------PerformAliasAnalysis end----
-----cetus application IPChainAnalysis 2----
-----cetus application IPChainAnalysis 3----
----------------------------------------------
----start to generate CF graph with thread----
----------------------------------------------
proc now is: void __syncthreads()
{
;
return ;
}
=====
proc now is: #pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
if (guard_matrixMul_CMP_5)
{
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
}
return ;
}
=====
proc now is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
if (guard_matrixMul_TRN_10)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
}
return ;
}
=====
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
proc now is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
if (guard_matrixMul_TRN_6)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
}
return ;
}
=====
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
proc now is: #pragma fcuda compute name=SNC_7 end=false cores=1 begin=true
void matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16])
{
if (guard_matrixMul_SNC_7)
{
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
return ;
}
=====
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
=====
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
====== DFAGraph.java enter getFirst()
-----cetus application IPChainAnalysis 4----
--------------------------------------------------------
----start to generate generate Program Summary Graph----
--------------------------------------------------------
############### PSG Summary Detail[__syncthreads()] #################
## Entry Node ##
# Ref Info (psg_entry_ref, use)
psg_entry_ref is null
# Global Info (psg_entry_global, UseOutSet)
psg_entry_global is null
## Exit Node ##
# Ref Info (psg_exit_ref, def)
psg_exit_ref is null
# Global Info (psg_exit_global, DefInSet)
psg_exit_global is null
############### PSG Summary Detail[matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])] #################
## Entry Node ##
# Ref Info (psg_entry_ref, use)
# Global Info (psg_entry_global, UseOutSet)
## Exit Node ##
# Ref Info (psg_exit_ref, def)
-DEF: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-DEF: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
# Global Info (psg_exit_global, DefInSet)
############### PSG Summary Detail[matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)] #################
## Entry Node ##
# Ref Info (psg_entry_ref, use)
-USE: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
# Global Info (psg_entry_global, UseOutSet)
## Exit Node ##
# Ref Info (psg_exit_ref, def)
-DEF: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-DEF: C, NODE: * C
-DEF: C, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
# Global Info (psg_exit_global, DefInSet)
############### PSG Summary Detail[matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)] #################
## Entry Node ##
# Ref Info (psg_entry_ref, use)
# Global Info (psg_entry_global, UseOutSet)
## Exit Node ##
# Ref Info (psg_exit_ref, def)
-DEF: B, NODE: * B
-DEF: As, NODE: As[16][16]
-DEF: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-DEF: A, NODE: * A
-DEF: Bs, NODE: Bs[16][16]
-DEF: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
# Global Info (psg_exit_global, DefInSet)
############### PSG Summary Detail[matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16])] #################
## Entry Node ##
# Ref Info (psg_entry_ref, use)
-USE: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-USE: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-USE: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
# Global Info (psg_entry_global, UseOutSet)
## Call Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
# Ref Info (psg_call_ref, def)
psg_call_ref is null
# Global Info (psg_call_global, DefInSet)
psg_call_global is null
## Return Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
# Ref Info (psg_return_ref, use)
psg_return_ref is null
# Global Info (psg_return_global, UseOutSet)
psg_return_global is null
## Call Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
# Ref Info (psg_call_ref, def)
psg_call_ref is null
# Global Info (psg_call_global, DefInSet)
psg_call_global is null
## Return Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
# Ref Info (psg_return_ref, use)
psg_return_ref is null
# Global Info (psg_return_global, UseOutSet)
psg_return_global is null
## Exit Node ##
# Ref Info (psg_exit_ref, def)
-DEF: Bs, NODE: Bs[16][16]
-DEF: As, NODE: As[16][16]
-DEF: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-DEF: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
# Global Info (psg_exit_global, DefInSet)
############### PSG Summary Detail[matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)] #################
## Entry Node ##
# Ref Info (psg_entry_ref, use)
-USE: B, NODE: matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
-USE: C, NODE: matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
-USE: A, NODE: matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
# Global Info (psg_entry_global, UseOutSet)
## Call Node ## IR: matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
# Ref Info (psg_call_ref, def)
# Global Info (psg_call_global, DefInSet)
## Return Node ## IR: matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
# Ref Info (psg_return_ref, use)
# Global Info (psg_return_global, UseOutSet)
## Call Node ## IR: matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
# Ref Info (psg_call_ref, def)
# Global Info (psg_call_global, DefInSet)
## Return Node ## IR: matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
# Ref Info (psg_return_ref, use)
# Global Info (psg_return_global, UseOutSet)
## Call Node ## IR: matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
# Ref Info (psg_call_ref, def)
# Global Info (psg_call_global, DefInSet)
## Return Node ## IR: matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
# Ref Info (psg_return_ref, use)
# Global Info (psg_return_global, UseOutSet)
## Call Node ## IR: matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
# Ref Info (psg_call_ref, def)
# Global Info (psg_call_global, DefInSet)
## Return Node ## IR: matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
# Ref Info (psg_return_ref, use)
# Global Info (psg_return_global, UseOutSet)
## Exit Node ##
# Ref Info (psg_exit_ref, def)
-DEF: B, NODE: * B
-DEF: B, NODE: matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
-DEF: C, NODE: matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
-DEF: A, NODE: * A
-DEF: A, NODE: matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
# Global Info (psg_exit_global, DefInSet)
############### PSG Propagated Detail[__syncthreads()] #################
## Entry Node ##
# Ref Info (psg_entry_ref)
psg_entry_ref is null
# Global Info (psg_entry_global)
psg_entry_global is null
## Exit Node ##
# Ref Info (psg_exit_ref)
psg_exit_ref is null
# Global Info (psg_exit_global)
psg_exit_global is null
############### PSG Propagated Detail[matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])] #################
## Entry Node ##
# Ref Info (psg_entry_ref)
# parameter: Csub_block
-INuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-OUTuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
# Global Info (psg_entry_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Exit Node ##
# Ref Info (psg_exit_ref)
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-INuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-OUTuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
# Global Info (psg_exit_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
############### PSG Propagated Detail[matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)] #################
## Entry Node ##
# Ref Info (psg_entry_ref)
# parameter: Csub_block
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
# parameter: C
# Global Info (psg_entry_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Exit Node ##
# Ref Info (psg_exit_ref)
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTdef: C, NODE: * C
-OUTdef: C, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
# Global Info (psg_exit_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
############### PSG Propagated Detail[matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)] #################
## Entry Node ##
# Ref Info (psg_entry_ref)
# parameter: Bs
-INdef: Bs, NODE: Bs[16][16]
-INdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-INdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-OUTdef: Bs, NODE: Bs[16][16]
-INuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
# parameter: As
-INdef: As, NODE: As[16][16]
-INdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-INdef: As, NODE: As[16][16]
-OUTdef: As, NODE: As[16][16]
-OUTdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-OUTdef: As, NODE: As[16][16]
-INuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
# parameter: A
-INdef: A, NODE: * A
-OUTdef: A, NODE: * A
# parameter: B
-INdef: B, NODE: * B
-OUTdef: B, NODE: * B
# Global Info (psg_entry_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Exit Node ##
# Ref Info (psg_exit_ref)
-INdef: B, NODE: * B
-OUTdef: B, NODE: * B
-INdef: As, NODE: As[16][16]
-INdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-INdef: As, NODE: As[16][16]
-OUTdef: As, NODE: As[16][16]
-OUTdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-OUTdef: As, NODE: As[16][16]
-INuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INdef: A, NODE: * A
-OUTdef: A, NODE: * A
-INdef: Bs, NODE: Bs[16][16]
-INdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-INdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-OUTdef: Bs, NODE: Bs[16][16]
-INuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
# Global Info (psg_exit_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
############### PSG Propagated Detail[matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16])] #################
## Entry Node ##
# Ref Info (psg_entry_ref)
# parameter: As
-INdef: As, NODE: As[16][16]
-INdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-INdef: As, NODE: As[16][16]
-OUTdef: As, NODE: As[16][16]
-OUTdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-OUTdef: As, NODE: As[16][16]
-INuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
# parameter: Bs
-INdef: Bs, NODE: Bs[16][16]
-INdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-INdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-OUTdef: Bs, NODE: Bs[16][16]
-INuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
# parameter: Csub_block
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-OUTuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
# Global Info (psg_entry_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Call Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
# Ref Info (psg_call_ref)
psg_call_ref is null
# Global Info (psg_call_global)
psg_call_global is null
## Return Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
# Ref Info (psg_return_ref)
psg_return_ref is null
# Global Info (psg_return_global)
psg_return_global is null
## Call Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
# Ref Info (psg_call_ref)
psg_call_ref is null
# Global Info (psg_call_global)
psg_call_global is null
## Return Node ## IR: #pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
# Ref Info (psg_return_ref)
psg_return_ref is null
# Global Info (psg_return_global)
psg_return_global is null
## Exit Node ##
# Ref Info (psg_exit_ref)
-INdef: Bs, NODE: Bs[16][16]
-INdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-INdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-INuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INdef: As, NODE: As[16][16]
-INdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-INdef: As, NODE: As[16][16]
-OUTdef: As, NODE: As[16][16]
-OUTdef: As, NODE: As[16][16]
-OUTdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-INuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-INuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-OUTuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
# Global Info (psg_exit_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
############### PSG Propagated Detail[matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)] #################
## Entry Node ##
# Ref Info (psg_entry_ref)
# parameter: B
-INuse: B, NODE: matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
# parameter: C
-INuse: C, NODE: matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
# parameter: A
-INuse: A, NODE: matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
# Global Info (psg_entry_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Call Node ## IR: matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
# Ref Info (psg_call_ref)
-INuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-OUTuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
# Global Info (psg_call_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Return Node ## IR: matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
# Ref Info (psg_return_ref)
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-INuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-OUTuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
# Global Info (psg_return_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Call Node ## IR: matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
# Ref Info (psg_call_ref)
-INdef: Bs, NODE: Bs[16][16]
-INdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-INdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-OUTdef: Bs, NODE: Bs[16][16]
-INuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INdef: A, NODE: * A
-OUTdef: A, NODE: * A
-INdef: As, NODE: As[16][16]
-INdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-INdef: As, NODE: As[16][16]
-OUTdef: As, NODE: As[16][16]
-OUTdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-OUTdef: As, NODE: As[16][16]
-INuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INdef: B, NODE: * B
-OUTdef: B, NODE: * B
# Global Info (psg_call_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Return Node ## IR: matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
# Ref Info (psg_return_ref)
-INdef: B, NODE: * B
-OUTdef: B, NODE: * B
-INdef: A, NODE: * A
-OUTdef: A, NODE: * A
-INdef: As, NODE: As[16][16]
-INdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-INdef: As, NODE: As[16][16]
-OUTdef: As, NODE: As[16][16]
-OUTdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-OUTdef: As, NODE: As[16][16]
-INuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INdef: Bs, NODE: Bs[16][16]
-INdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-INdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-OUTdef: Bs, NODE: Bs[16][16]
-INuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
# Global Info (psg_return_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Call Node ## IR: matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
# Ref Info (psg_call_ref)
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-OUTuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-INdef: As, NODE: As[16][16]
-INdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-INdef: As, NODE: As[16][16]
-OUTdef: As, NODE: As[16][16]
-OUTdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-OUTdef: As, NODE: As[16][16]
-INuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INdef: Bs, NODE: Bs[16][16]
-INdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-INdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-OUTdef: Bs, NODE: Bs[16][16]
-INuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
# Global Info (psg_call_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Return Node ## IR: matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
# Ref Info (psg_return_ref)
-INdef: As, NODE: As[16][16]
-INdef: As, NODE: As[16][16]
-INdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-OUTdef: As, NODE: As[16][16]
-OUTdef: As, NODE: As[16][16]
-OUTdef: As, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
-INuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: As, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-INuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-OUTuse: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-INdef: Bs, NODE: Bs[16][16]
-INdef: Bs, NODE: Bs[16][16]
-INdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-OUTdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: Bs[16][16]
-OUTdef: Bs, NODE: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
-INuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTuse: Bs, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
# Global Info (psg_return_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Call Node ## IR: matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
# Ref Info (psg_call_ref)
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-OUTuse: Csub_block, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
# Global Info (psg_call_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Return Node ## IR: matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
# Ref Info (psg_return_ref)
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-INdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-INdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
-OUTdef: Csub_block, NODE: Csub_block[BLOCKDIM_Y][BLOCKDIM_X]
-OUTdef: Csub_block, NODE: #pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
-INdef: C, NODE: * C
-INdef: C, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-OUTdef: C, NODE: * C
-OUTdef: C, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
# Global Info (psg_return_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
## Exit Node ##
# Ref Info (psg_exit_ref)
-INdef: B, NODE: * B
-OUTdef: B, NODE: * B
-OUTdef: B, NODE: matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
-OUTdef: B, NODE: * B
-INdef: C, NODE: * C
-INdef: C, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-OUTdef: C, NODE: matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
-OUTdef: C, NODE: * C
-OUTdef: C, NODE: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
-INdef: A, NODE: * A
-OUTdef: A, NODE: * A
-OUTdef: A, NODE: matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
-OUTdef: A, NODE: * A
# Global Info (psg_exit_global)
-INdef: {}
-OUTdef: {}
-INuse: {}
-OUTuse: {}
-----cetus application IPChainAnalysis 5----
-----cetus application IPChainAnalysis 6----
-----cetus application IPChainAnalysis 7----
-----cetus application IPChainAnalysis end----
----FcudaGlobalData2.java end----
Found def k with IR: k=0;
isExist? true
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
-----proc after shiftDeclarations();-----
-----kernelTransformPass, finish transformProcedure-----, proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
[SplitFcudaTasks2-FCUDA] end in 0.29 seconds
[LinkSymbol] 129 updates in 0.00 seconds
*** After SplitFcudaTasks2 ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
if (guard_matrixMul_CMP_5)
{
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
if (guard_matrixMul_TRN_10)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
if (guard_matrixMul_TRN_6)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
}
return ;
}
#pragma fcuda compute name=SNC_7 end=false cores=1 begin=true
void matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16])
{
if (guard_matrixMul_SNC_7)
{
int k;
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
return ;
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
===========================================
[CleanKernelDecls-FCUDA] begin
[CleanKernelDecls-FCUDA] examining procedure matrixMul
cur_level:0
Defs+Uses:[guard_matrixMul_SNC_7]
Defs+Uses:[guard_matrixMul_TRN_6]
Defs+Uses:[guard_matrixMul_TRN_10]
Defs+Uses:[guard_matrixMul_CMP_5]
Defs+Uses:[]
Defs+Uses:[]
Defs+Uses:[]
Defs+Uses:[aBegin, blockIdx, blockIdx.y, wA]
Defs+Uses:[aBegin, aEnd, wA]
Defs+Uses:[aStep]
Defs+Uses:[bBegin, blockIdx, blockIdx.x]
Defs+Uses:[bStep, wB]
Defs+Uses:[Csub_block, blockDim, blockIdx, gridDim, guard_matrixMul_CMP_5, matrixMul_CMP_5]
Defs+Uses:[a]
Defs+Uses:[b]
Defs+Uses:[k]
cur_level:1
Defs+Uses:[]
Defs+Uses:[]
Defs+Uses:[TRN_6_Bs_offset]
Defs+Uses:[TRN_6_Bs_X_0, b]
Defs+Uses:[TRN_6_Bs_c_1, wB]
Defs+Uses:[A, As, B, Bs, a, b, blockDim, blockIdx, gridDim, guard_matrixMul_TRN_6, matrixMul_TRN_6, wA, wB]
Defs+Uses:[]
Defs+Uses:[]
Defs+Uses:[]
Defs+Uses:[As, Bs, Csub_block, blockDim, blockIdx, gridDim, guard_matrixMul_SNC_7, matrixMul_SNC_7]
Defs+Uses:[]
cur_level:1
var2freqMap{As=2, Bs=2}
funcCallParams[A, As, B, Bs, Csub_block, a, b, blockDim, blockIdx, gridDim, guard_matrixMul_CMP_5, guard_matrixMul_SNC_7, guard_matrixMul_TRN_6, wA, wB]
Defs+Uses:[blockIdx, blockIdx.x, blockIdx.y, c, wB]
Defs+Uses:[]
Defs+Uses:[]
Defs+Uses:[TRN_10_Csub_block_offset]
Defs+Uses:[TRN_10_Csub_block_X_0, c]
Defs+Uses:[TRN_10_Csub_block_c_1, wB]
Defs+Uses:[C, Csub_block, blockDim, blockIdx, c, gridDim, guard_matrixMul_TRN_10, matrixMul_TRN_10, wB]
Defs+Uses:[]
Defs+Uses:[]
Defs+Uses:[]
cur_level:0
var2freqMap{Csub_block=3, TRN_10_Csub_block_X_0=1, TRN_10_Csub_block_c_1=1, TRN_10_Csub_block_offset=1, TRN_6_Bs_X_0=1, TRN_6_Bs_c_1=1, TRN_6_Bs_offset=1, a=5, aBegin=3, aEnd=2, aStep=2, b=5, bBegin=2, bStep=2, c=3, guard_matrixMul_CMP_5=2, guard_matrixMul_SNC_7=2, guard_matrixMul_TRN_10=2, guard_matrixMul_TRN_6=2, k=1}
funcCallParams[A, As, B, Bs, C, Csub_block, a, b, blockDim, blockIdx, c, gridDim, guard_matrixMul_CMP_5, guard_matrixMul_SNC_7, guard_matrixMul_TRN_10, guard_matrixMul_TRN_6, wA, wB]
-----kernelTransformPass, finish transformProcedure-----, proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
[CleanKernelDecls-FCUDA] end in 0.01 seconds
[LinkSymbol] 129 updates in 0.00 seconds
*** After CleanKernelDecls ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
if (guard_matrixMul_CMP_5)
{
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
if (guard_matrixMul_TRN_10)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
if (guard_matrixMul_TRN_6)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
}
return ;
}
#pragma fcuda compute name=SNC_7 end=false cores=1 begin=true
void matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16])
{
if (guard_matrixMul_SNC_7)
{
int k;
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
return ;
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
===========================================
[SerializeThreads2-MCUDA] begin
[SerializeThreads2-MCUDA] examining procedure matrixMul
----ST2-enter ST2 and if (Driver.getOptionValue(Fcuda) != null)
-----ST2-List<Procedure> tskLst = FCUDAutils.getTaskMapping(proc.getSymbolName());
------ST2-if(tskLst != null)
-------ST2-for( Procedure task : tskLst ), task is: #pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
if (guard_matrixMul_CMP_5)
{
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
}
return ;
}
------InsertTLoops flag 1------
------InsertTLoops flag 2------
------InsertTLoops flag 2------
------InsertTLoops flag 3------
------InsertTLoops flag 2------
------InsertTLoops flag 4------
------InsertTLoops flag 5------
------InsertTLoops flag 6------
------InsertTLoops flag 6.1------
------InsertTLoops flag 6.2------
------InsertTLoops flag 6.2------tloopstmts.get(idx) is: #pragma fcuda tloop name=CMP_5 end=false begin=true
------InsertTLoops flag 6.3------
------InsertTLoops flag 6.1------
------InsertTLoops flag 6.2------
------InsertTLoops flag 6.2------tloopstmts.get(idx) is: #pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
------InsertTLoops flag 6.3------
------InsertTLoops flag 6.4------
------InsertTLoops flag 6.5------
------InsertTLoops flag 7------
------InsertTLoops flag 2------
-------ST2-for( Procedure task : tskLst ), finish current insertTLoops
-------ST2-for( Procedure task : tskLst ), task is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
if (guard_matrixMul_TRN_10)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
}
return ;
}
------InsertTLoops flag 1------
------InsertTLoops flag 2------
------InsertTLoops flag 2------
------InsertTLoops flag 3------
------InsertTLoops flag 2------
------InsertTLoops flag 4------
------InsertTLoops flag 5------
------InsertTLoops flag 6------
------InsertTLoops flag 6.1------
------InsertTLoops flag 6.2------
------InsertTLoops flag 6.2------tloopstmts.get(idx) is: #pragma fcuda tloop name=TRN_10 end=false begin=true
------InsertTLoops flag 6.3------
------InsertTLoops flag 6.1------
------InsertTLoops flag 6.2------
------InsertTLoops flag 6.2------tloopstmts.get(idx) is: #pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
------InsertTLoops flag 6.3------
------InsertTLoops flag 6.4------
------InsertTLoops flag 6.5------
------InsertTLoops flag 7------
------InsertTLoops flag 2------
-------ST2-for( Procedure task : tskLst ), finish current insertTLoops
-------ST2-for( Procedure task : tskLst ), task is: #pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
if (guard_matrixMul_TRN_6)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
}
return ;
}
------InsertTLoops flag 1------
------InsertTLoops flag 2------
------InsertTLoops flag 2------
------InsertTLoops flag 3------
------InsertTLoops flag 2------
------InsertTLoops flag 4------
------InsertTLoops flag 5------
------InsertTLoops flag 6------
------InsertTLoops flag 6.1------
------InsertTLoops flag 6.2------
------InsertTLoops flag 6.2------tloopstmts.get(idx) is: #pragma fcuda tloop name=TRN_6 end=false begin=true
------InsertTLoops flag 6.3------
------InsertTLoops flag 6.1------
------InsertTLoops flag 6.2------
------InsertTLoops flag 6.2------tloopstmts.get(idx) is: #pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
------InsertTLoops flag 6.3------
------InsertTLoops flag 6.1------
------InsertTLoops flag 6.2------
------InsertTLoops flag 6.2------tloopstmts.get(idx) is: #pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
------InsertTLoops flag 6.3------
------InsertTLoops flag 6.4------
------InsertTLoops flag 6.5------
------InsertTLoops flag 7------
------InsertTLoops flag 2------
-------ST2-for( Procedure task : tskLst ), finish current insertTLoops
-------ST2-for( Procedure task : tskLst ), task is: #pragma fcuda compute name=SNC_7 end=false cores=1 begin=true
void matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16])
{
if (guard_matrixMul_SNC_7)
{
int k;
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
return ;
}
------InsertTLoops flag 1------
------InsertTLoops flag 2------
------InsertTLoops flag 2------
------InsertTLoops flag 2------
------InsertTLoops flag 2------
------InsertTLoops flag 3------
------InsertTLoops flag 2------
------InsertTLoops flag 4------
------InsertTLoops flag 5------
------InsertTLoops flag 6------
------InsertTLoops flag 6.1------
------InsertTLoops flag 6.2------
------InsertTLoops flag 6.2------tloopstmts.get(idx) is: #pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
------InsertTLoops flag 6.3------
------InsertTLoops flag 6.1------
------InsertTLoops flag 6.2------
------InsertTLoops flag 6.2------tloopstmts.get(idx) is: #pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
------InsertTLoops flag 6.3------
------InsertTLoops flag 6.4------
------InsertTLoops flag 6.5------
------InsertTLoops flag 7------
------InsertTLoops flag 2------
-------ST2-for( Procedure task : tskLst ), finish current insertTLoops
-----kernelTransformPass, finish transformProcedure-----, proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
[SerializeThreads2-MCUDA] end in 0.01 seconds
[LinkSymbol] 129 updates in 0.00 seconds
*** After SerializeThreads2 ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_CMP_5)
{
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
}
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_10)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_6)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
}
return ;
}
#pragma fcuda compute name=SNC_7 end=false cores=1 begin=true
void matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_SNC_7)
{
int k;
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
return ;
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
===========================================
[CleanThreadLoops2-FCUDA] begin
[CleanThreadLoops2-FCUDA] examining procedure matrixMul
mVar2Var:
{threadIdx=[], guard_matrixMul_CMP_5=[], blockDim=[], Csub_block=[threadIdx]}
TLP Thread-Indep Stmts: []
mVar2Var:
{threadIdx=[], C=[threadIdx, c, wB], c=[], guard_matrixMul_TRN_10=[], blockDim=[], wB=[], Csub_block=[]}
TLP Thread-Indep Stmts: []
mVar2Var:
{guard_matrixMul_TRN_6=[], A=[], a=[], Bs=[threadIdx, b, wB], threadIdx=[], As=[a, threadIdx, wA], B=[], b=[], blockDim=[], wA=[], wB=[]}
TLP Thread-Indep Stmts: []
mVar2Var:
{Bs=[], threadIdx=[], As=[], blockDim=[], guard_matrixMul_SNC_7=[], k=[k], Csub_block=[threadIdx, k]}
TLP Thread-Indep Stmts: []
-----kernelTransformPass, finish transformProcedure-----, proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
[CleanThreadLoops2-FCUDA] end in 0.02 seconds
[LinkSymbol] 129 updates in 0.00 seconds
*** After CleanThreadLoops2 ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_CMP_5)
{
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
}
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_10)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_6)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
}
return ;
}
#pragma fcuda compute name=SNC_7 end=false cores=1 begin=true
void matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_SNC_7)
{
int k;
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
return ;
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
===========================================
[UnrollThreadLoops2-MCUDA] begin
[UnrollThreadLoops2-MCUDA] examining procedure matrixMul
[Unrolling] : matrixMul_CMP_5
[unrollFactor] 1
[Unrolling] : matrixMul_SNC_7
[unrollFactor] 1
mUnrolledIDs:
{}
-----kernelTransformPass, finish transformProcedure-----, proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
[UnrollThreadLoops2-MCUDA] end in 0.00 seconds
[LinkSymbol] 129 updates in 0.00 seconds
*** After UnrollThreadLoops2 ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_CMP_5)
{
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
}
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_10)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_6)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
}
return ;
}
#pragma fcuda compute name=SNC_7 end=false cores=1 begin=true
void matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_SNC_7)
{
int k;
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
return ;
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
===========================================
[PartitionArrays2-MCUDA] begin
[PartitionArrays2-MCUDA] examining procedure matrixMul
[Memory partition] : matrixMul_CMP_5
[mempartFactor]1
[Memory partition] : matrixMul_TRN_10
[mempartFactor]1
[Memory partition] : matrixMul_TRN_6
[mempartFactor]1
[Memory partition] : matrixMul_SNC_7
[mempartFactor]1
[Memory partition] : matrixMul
-----kernelTransformPass, finish transformProcedure-----, proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
[PartitionArrays2-MCUDA] end in 0.00 seconds
[LinkSymbol] 129 updates in 0.00 seconds
*** After PartitionArrays2 ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_CMP_5)
{
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
}
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_10)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_6)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
}
return ;
}
#pragma fcuda compute name=SNC_7 end=false cores=1 begin=true
void matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_SNC_7)
{
int k;
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
return ;
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
return ;
}
===========================================
[WrapBlockIdxLoop2-FCUDA] begin
[WrapBlockIdxLoop2-FCUDA] examining procedure matrixMul
-----kernelTransformPass, finish transformProcedure-----, proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB, dim3 gridDim, dim3 blockDim)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
dim3 blockIdx_loop;
for (blockIdx_loop.y=0; (gridDim.y+(-1*blockIdx_loop.y))>0; blockIdx_loop.y+=1)
{
blockIdx.y=blockIdx_loop.y;
for (blockIdx_loop.x=0; (gridDim.x+(-1*blockIdx_loop.x))>0; blockIdx_loop.x+=1)
{
blockIdx.x=blockIdx_loop.x;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
}
}
}
[WrapBlockIdxLoop2-FCUDA] end in 0.01 seconds
[LinkSymbol] 137 updates in 0.00 seconds
*** After WrapBlockIdxLoop2 ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_CMP_5)
{
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
}
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_10)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_6)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
}
return ;
}
#pragma fcuda compute name=SNC_7 end=false cores=1 begin=true
void matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_SNC_7)
{
int k;
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
return ;
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB, dim3 gridDim, dim3 blockDim)
{
int aBegin;
int aEnd;
int aStep;
int bBegin;
int bStep;
__shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X];
int a;
int b;
int k;
int c;
dim3 blockIdx;
int guard_matrixMul_CMP_5;
int guard_matrixMul_TRN_10;
int guard_matrixMul_TRN_6;
int guard_matrixMul_SNC_7;
int TRN_6_Bs_offset;
int TRN_6_Bs_X_0;
int TRN_6_Bs_c_1;
int TRN_10_Csub_block_offset;
int TRN_10_Csub_block_X_0;
int TRN_10_Csub_block_c_1;
dim3 blockIdx_loop;
for (blockIdx_loop.y=0; (gridDim.y+(-1*blockIdx_loop.y))>0; blockIdx_loop.y+=1)
{
blockIdx.y=blockIdx_loop.y;
for (blockIdx_loop.x=0; (gridDim.x+(-1*blockIdx_loop.x))>0; blockIdx_loop.x+=1)
{
blockIdx.x=blockIdx_loop.x;
guard_matrixMul_SNC_7=1;
guard_matrixMul_TRN_6=1;
guard_matrixMul_TRN_10=1;
guard_matrixMul_CMP_5=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin=((wA*16)*blockIdx.y);
aEnd=((aBegin+wA)-1);
aStep=16;
bBegin=(16*blockIdx.x);
bStep=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block);
a=0;
b=0;
k=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a=aBegin), (b=bBegin)); a<=aEnd; ((a+=aStep), (b+=bStep)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset=0;
TRN_6_Bs_X_0=b;
TRN_6_Bs_c_1=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs);
}
c=(((wB*16)*blockIdx.y)+(16*blockIdx.x));
TRN_10_Csub_block_offset=0;
TRN_10_Csub_block_X_0=c;
TRN_10_Csub_block_c_1=wB;
matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB);
}
}
}
===========================================
[DuplicateForFCUDA2-FCUDA] begin
[DuplicateForFCUDA2-FCUDA] examining procedure matrixMul
matrixMul is inStreamProc: false
fcudaCores:
[matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block), matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB), matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB), matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs)]
coreNames:
[matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block), matrixMul_SNC_7(guard_matrixMul_SNC_7, blockDim, gridDim, blockIdx, Csub_block, As, Bs), matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB), matrixMul_TRN_6(guard_matrixMul_TRN_6, blockDim, gridDim, blockIdx, As, A, a, wA, Bs, B, b, wB)]
------------------------
mVarsToDuplicate: []
mVarsToDuplicate: [a, guard_matrixMul_TRN_6, bStep, b, c, guard_matrixMul_TRN_10, TRN_10_Csub_block_offset, guard_matrixMul_SNC_7, bBegin, k, TRN_10_Csub_block_X_0, blockIdx, TRN_6_Bs_X_0, aStep, aEnd, guard_matrixMul_CMP_5, TRN_6_Bs_offset, TRN_10_Csub_block_c_1, Csub_block, TRN_6_Bs_c_1, aBegin]
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mVarsToDuplicate: [a, guard_matrixMul_TRN_6, bStep, b, c, guard_matrixMul_TRN_10, TRN_10_Csub_block_offset, guard_matrixMul_SNC_7, bBegin, k, TRN_10_Csub_block_X_0, blockIdx, TRN_6_Bs_X_0, aStep, aEnd, guard_matrixMul_CMP_5, TRN_6_Bs_offset, TRN_10_Csub_block_c_1, Csub_block, TRN_6_Bs_c_1, aBegin]
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
--- handleFcudaCore: matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block)
getCoreType for matrixMul_CMP_5(guard_matrixMul_CMP_5, blockDim, gridDim, blockIdx, Csub_block)
--- of type COMPUTE:
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
--- handleFcudaCore: matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB)
getCoreType for matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB)
getCoreType for matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB)
--- of typee TRANSFER:
mId2sym: {Csub_block_block0=Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], TRN_10_Csub_block_X_0_block0=TRN_10_Csub_block_X_0_block0, TRN_10_Csub_block_c_1_block0=TRN_10_Csub_block_c_1_block0, TRN_10_Csub_block_offset_block0=TRN_10_Csub_block_offset_block0, TRN_6_Bs_X_0_block0=TRN_6_Bs_X_0_block0, TRN_6_Bs_c_1_block0=TRN_6_Bs_c_1_block0, TRN_6_Bs_offset_block0=TRN_6_Bs_offset_block0, aBegin_block0=aBegin_block0, aEnd_block0=aEnd_block0, aStep_block0=aStep_block0, a_block0=a_block0, bBegin_block0=bBegin_block0, bStep_block0=bStep_block0, b_block0=b_block0, blockIdx_block0=blockIdx_block0, c_block0=c_block0, guard_matrixMul_CMP_5_block0=guard_matrixMul_CMP_5_block0, guard_matrixMul_SNC_7_block0=guard_matrixMul_SNC_7_block0, guard_matrixMul_TRN_10_block0=guard_matrixMul_TRN_10_block0, guard_matrixMul_TRN_6_block0=guard_matrixMul_TRN_6_block0, k_block0=k_block0}
Treating arguments of call: matrixMul_TRN_10(guard_matrixMul_TRN_10_block0, blockDim, gridDim, blockIdx_block0, C, Csub_block_block0, c_block0, wB)
Arg #0: guard_matrixMul_TRN_10_block0
getCoreName for matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB)
getCoreName for matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB)
Arg #1: blockDim
Arg #2: gridDim
Arg #3: blockIdx_block0
getCoreName for matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB)
getCoreName for matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB)
Arg #4: C
Arg #5: Csub_block_block0
Arg #6: c_block0
Arg #7: wB
getCommonArgsIndex for matrixMul_TRN_10(guard_matrixMul_TRN_10, blockDim, gridDim, blockIdx, C, Csub_block, c, wB)
----addStatementBefore----index is:16
----addStatementBefore----index is:12
----addStatementBefore----index is:17
----addStatementBefore----index is:3
----addStatementBefore----index is:15
----addStatementBefore----index is:24
----addStatementBefore----index is:2
----addStatementBefore----index is:13
----addStatementBefore----index is:1
----addStatementBefore----index is:22
----addStatementBefore----index is:9
----addStatementBefore----index is:14
----addStatementBefore----index is:10
----addStatementBefore----index is:23
----addStatementBefore----index is:4
----addStatementBefore----index is:18
----addStatementBefore----index is:21
----addStatementBefore----index is:8
----addStatementBefore----index is:11
----addStatementBefore----index is:0
----addStatementBefore----index is:0
... handleCompoundStatement finished!
-----kernelTransformPass, finish transformProcedure-----, proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB, dim3 gridDim, dim3 blockDim)
{
int aBegin_block0;
int aEnd_block0;
int aStep_block0;
int bBegin_block0;
int bStep_block0;
__shared__ DATATYPE Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X];
int a_block0;
int b_block0;
int k_block0;
int c_block0;
dim3 blockIdx_block0;
int guard_matrixMul_CMP_5_block0;
int guard_matrixMul_TRN_10_block0;
int guard_matrixMul_TRN_6_block0;
int guard_matrixMul_SNC_7_block0;
int TRN_6_Bs_offset_block0;
int TRN_6_Bs_X_0_block0;
int TRN_6_Bs_c_1_block0;
int TRN_10_Csub_block_offset_block0;
int TRN_10_Csub_block_X_0_block0;
int TRN_10_Csub_block_c_1_block0;
dim3 blockIdx_loop;
for (blockIdx_loop.y=0; (gridDim.y+(-1*blockIdx_loop.y))>0; blockIdx_loop.y+=1)
{
blockIdx_block0.y=blockIdx_loop.y;
for (blockIdx_loop.x=0; (gridDim.x+(-1*blockIdx_loop.x))>0; blockIdx_loop.x+=1)
{
blockIdx_block0.x=(blockIdx_loop.x+0);
guard_matrixMul_SNC_7_block0=1;
guard_matrixMul_TRN_6_block0=1;
guard_matrixMul_TRN_10_block0=1;
guard_matrixMul_CMP_5_block0=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin_block0=((wA*16)*blockIdx_block0.y);
aEnd_block0=((aBegin_block0+wA)-1);
aStep_block0=16;
bBegin_block0=(16*blockIdx_block0.x);
bStep_block0=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5_block0, blockDim, gridDim, blockIdx_block0, Csub_block_block0);
a_block0=0;
b_block0=0;
k_block0=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a_block0=aBegin_block0), (b_block0=bBegin_block0)); a_block0<=aEnd_block0; ((a_block0+=aStep_block0), (b_block0+=bStep_block0)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset_block0=0;
TRN_6_Bs_X_0_block0=b_block0;
TRN_6_Bs_c_1_block0=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6_block0, blockDim, gridDim, blockIdx_block0, As, A, a_block0, wA, Bs, B, b_block0, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7_block0, blockDim, gridDim, blockIdx_block0, Csub_block_block0, As, Bs);
}
c_block0=(((wB*16)*blockIdx_block0.y)+(16*blockIdx_block0.x));
TRN_10_Csub_block_offset_block0=0;
TRN_10_Csub_block_X_0_block0=c_block0;
TRN_10_Csub_block_c_1_block0=wB;
matrixMul_TRN_10_wrapper(guard_matrixMul_TRN_10_block0, blockDim, gridDim, blockIdx_block0, C, Csub_block_block0, c_block0, wB);
}
}
}
[DuplicateForFCUDA2-FCUDA] end in 0.01 seconds
[LinkSymbol] 146 updates in 0.00 seconds
*** After DuplicateForFCUDA2 ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_CMP_5)
{
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
}
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_10)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_6)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
}
return ;
}
#pragma fcuda compute name=SNC_7 end=false cores=1 begin=true
void matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_SNC_7)
{
int k;
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
#pragma fcuda stmt SNCtask=true name=SNC_7 HTGNode=SNC_7 tdep=true seqID=7
__syncthreads();
{
lp1:
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
}
#pragma fcuda stmt SNCtask=true name=SNC_9 HTGNode=SNC_9 tdep=true seqID=9
__syncthreads();
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
return ;
}
void matrixMul_TRN_10_wrapper(int guard_matrixMul_TRN_10_block0, dim3 blockDim, dim3 gridDim, dim3 blockIdx_matrixMul_TRN_10_block0, DATATYPE * C, __shared__ DATATYPE Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], int c_block0, int wB)
{
matrixMul_TRN_10(guard_matrixMul_TRN_10_block0, blockDim, gridDim, blockIdx_matrixMul_TRN_10_block0, C, Csub_block_block0, c_block0, wB);
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB, dim3 gridDim, dim3 blockDim)
{
int aBegin_block0;
int aEnd_block0;
int aStep_block0;
int bBegin_block0;
int bStep_block0;
__shared__ DATATYPE Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X];
int a_block0;
int b_block0;
int k_block0;
int c_block0;
dim3 blockIdx_block0;
int guard_matrixMul_CMP_5_block0;
int guard_matrixMul_TRN_10_block0;
int guard_matrixMul_TRN_6_block0;
int guard_matrixMul_SNC_7_block0;
int TRN_6_Bs_offset_block0;
int TRN_6_Bs_X_0_block0;
int TRN_6_Bs_c_1_block0;
int TRN_10_Csub_block_offset_block0;
int TRN_10_Csub_block_X_0_block0;
int TRN_10_Csub_block_c_1_block0;
dim3 blockIdx_loop;
for (blockIdx_loop.y=0; (gridDim.y+(-1*blockIdx_loop.y))>0; blockIdx_loop.y+=1)
{
blockIdx_block0.y=blockIdx_loop.y;
for (blockIdx_loop.x=0; (gridDim.x+(-1*blockIdx_loop.x))>0; blockIdx_loop.x+=1)
{
blockIdx_block0.x=(blockIdx_loop.x+0);
guard_matrixMul_SNC_7_block0=1;
guard_matrixMul_TRN_6_block0=1;
guard_matrixMul_TRN_10_block0=1;
guard_matrixMul_CMP_5_block0=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin_block0=((wA*16)*blockIdx_block0.y);
aEnd_block0=((aBegin_block0+wA)-1);
aStep_block0=16;
bBegin_block0=(16*blockIdx_block0.x);
bStep_block0=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5_block0, blockDim, gridDim, blockIdx_block0, Csub_block_block0);
a_block0=0;
b_block0=0;
k_block0=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a_block0=aBegin_block0), (b_block0=bBegin_block0)); a_block0<=aEnd_block0; ((a_block0+=aStep_block0), (b_block0+=bStep_block0)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset_block0=0;
TRN_6_Bs_X_0_block0=b_block0;
TRN_6_Bs_c_1_block0=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6_block0, blockDim, gridDim, blockIdx_block0, As, A, a_block0, wA, Bs, B, b_block0, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7_block0, blockDim, gridDim, blockIdx_block0, Csub_block_block0, As, Bs);
}
c_block0=(((wB*16)*blockIdx_block0.y)+(16*blockIdx_block0.x));
TRN_10_Csub_block_offset_block0=0;
TRN_10_Csub_block_X_0_block0=c_block0;
TRN_10_Csub_block_c_1_block0=wB;
matrixMul_TRN_10_wrapper(guard_matrixMul_TRN_10_block0, blockDim, gridDim, blockIdx_block0, C, Csub_block_block0, c_block0, wB);
}
}
}
===========================================
[CleanSyncFunc-MCUDA] begin
[CleanSyncFunc-MCUDA] examining procedure matrixMul
-----kernelTransformPass, finish transformProcedure-----, proc now is: #pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB, dim3 gridDim, dim3 blockDim)
{
int aBegin_block0;
int aEnd_block0;
int aStep_block0;
int bBegin_block0;
int bStep_block0;
__shared__ DATATYPE Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X];
int a_block0;
int b_block0;
int k_block0;
int c_block0;
dim3 blockIdx_block0;
int guard_matrixMul_CMP_5_block0;
int guard_matrixMul_TRN_10_block0;
int guard_matrixMul_TRN_6_block0;
int guard_matrixMul_SNC_7_block0;
int TRN_6_Bs_offset_block0;
int TRN_6_Bs_X_0_block0;
int TRN_6_Bs_c_1_block0;
int TRN_10_Csub_block_offset_block0;
int TRN_10_Csub_block_X_0_block0;
int TRN_10_Csub_block_c_1_block0;
dim3 blockIdx_loop;
for (blockIdx_loop.y=0; (gridDim.y+(-1*blockIdx_loop.y))>0; blockIdx_loop.y+=1)
{
blockIdx_block0.y=blockIdx_loop.y;
for (blockIdx_loop.x=0; (gridDim.x+(-1*blockIdx_loop.x))>0; blockIdx_loop.x+=1)
{
blockIdx_block0.x=(blockIdx_loop.x+0);
guard_matrixMul_SNC_7_block0=1;
guard_matrixMul_TRN_6_block0=1;
guard_matrixMul_TRN_10_block0=1;
guard_matrixMul_CMP_5_block0=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin_block0=((wA*16)*blockIdx_block0.y);
aEnd_block0=((aBegin_block0+wA)-1);
aStep_block0=16;
bBegin_block0=(16*blockIdx_block0.x);
bStep_block0=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5_block0, blockDim, gridDim, blockIdx_block0, Csub_block_block0);
a_block0=0;
b_block0=0;
k_block0=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a_block0=aBegin_block0), (b_block0=bBegin_block0)); a_block0<=aEnd_block0; ((a_block0+=aStep_block0), (b_block0+=bStep_block0)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset_block0=0;
TRN_6_Bs_X_0_block0=b_block0;
TRN_6_Bs_c_1_block0=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6_block0, blockDim, gridDim, blockIdx_block0, As, A, a_block0, wA, Bs, B, b_block0, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7_block0, blockDim, gridDim, blockIdx_block0, Csub_block_block0, As, Bs);
}
c_block0=(((wB*16)*blockIdx_block0.y)+(16*blockIdx_block0.x));
TRN_10_Csub_block_offset_block0=0;
TRN_10_Csub_block_X_0_block0=c_block0;
TRN_10_Csub_block_c_1_block0=wB;
matrixMul_TRN_10_wrapper(guard_matrixMul_TRN_10_block0, blockDim, gridDim, blockIdx_block0, C, Csub_block_block0, c_block0, wB);
}
}
}
[CleanSyncFunc-MCUDA] end in 0.00 seconds
[LinkSymbol] 144 updates in 0.00 seconds
*** After CleanSyncFunc ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_CMP_5)
{
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
}
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_10)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_6)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
}
return ;
}
#pragma fcuda compute name=SNC_7 end=false cores=1 begin=true
void matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_SNC_7)
{
int k;
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
{
lp1:
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
}
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
return ;
}
void matrixMul_TRN_10_wrapper(int guard_matrixMul_TRN_10_block0, dim3 blockDim, dim3 gridDim, dim3 blockIdx_matrixMul_TRN_10_block0, DATATYPE * C, __shared__ DATATYPE Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], int c_block0, int wB)
{
matrixMul_TRN_10(guard_matrixMul_TRN_10_block0, blockDim, gridDim, blockIdx_matrixMul_TRN_10_block0, C, Csub_block_block0, c_block0, wB);
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB, dim3 gridDim, dim3 blockDim)
{
int aBegin_block0;
int aEnd_block0;
int aStep_block0;
int bBegin_block0;
int bStep_block0;
__shared__ DATATYPE Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X];
int a_block0;
int b_block0;
int k_block0;
int c_block0;
dim3 blockIdx_block0;
int guard_matrixMul_CMP_5_block0;
int guard_matrixMul_TRN_10_block0;
int guard_matrixMul_TRN_6_block0;
int guard_matrixMul_SNC_7_block0;
int TRN_6_Bs_offset_block0;
int TRN_6_Bs_X_0_block0;
int TRN_6_Bs_c_1_block0;
int TRN_10_Csub_block_offset_block0;
int TRN_10_Csub_block_X_0_block0;
int TRN_10_Csub_block_c_1_block0;
dim3 blockIdx_loop;
for (blockIdx_loop.y=0; (gridDim.y+(-1*blockIdx_loop.y))>0; blockIdx_loop.y+=1)
{
blockIdx_block0.y=blockIdx_loop.y;
for (blockIdx_loop.x=0; (gridDim.x+(-1*blockIdx_loop.x))>0; blockIdx_loop.x+=1)
{
blockIdx_block0.x=(blockIdx_loop.x+0);
guard_matrixMul_SNC_7_block0=1;
guard_matrixMul_TRN_6_block0=1;
guard_matrixMul_TRN_10_block0=1;
guard_matrixMul_CMP_5_block0=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin_block0=((wA*16)*blockIdx_block0.y);
aEnd_block0=((aBegin_block0+wA)-1);
aStep_block0=16;
bBegin_block0=(16*blockIdx_block0.x);
bStep_block0=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5_block0, blockDim, gridDim, blockIdx_block0, Csub_block_block0);
a_block0=0;
b_block0=0;
k_block0=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a_block0=aBegin_block0), (b_block0=bBegin_block0)); a_block0<=aEnd_block0; ((a_block0+=aStep_block0), (b_block0+=bStep_block0)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset_block0=0;
TRN_6_Bs_X_0_block0=b_block0;
TRN_6_Bs_c_1_block0=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6_block0, blockDim, gridDim, blockIdx_block0, As, A, a_block0, wA, Bs, B, b_block0, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7_block0, blockDim, gridDim, blockIdx_block0, Csub_block_block0, As, Bs);
}
c_block0=(((wB*16)*blockIdx_block0.y)+(16*blockIdx_block0.x));
TRN_10_Csub_block_offset_block0=0;
TRN_10_Csub_block_X_0_block0=c_block0;
TRN_10_Csub_block_c_1_block0=wB;
matrixMul_TRN_10_wrapper(guard_matrixMul_TRN_10_block0, blockDim, gridDim, blockIdx_block0, C, Csub_block_block0, c_block0, wB);
}
}
}
===========================================
*** After CleanLaunches ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_CMP_5)
{
#pragma fcuda compute cores=1 name=CMP_5 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=CMP_5 end=false begin=true
#pragma fcuda stmt name=CMP_5 HTGNode=CMP_5 tdep=true seqID=5 tlpName=CMP_5 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]=0;
#pragma fcuda tloop name=CMP_5 end=true begin=false
}
#pragma fcuda compute cores=1 name=CMP_5 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_10)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_10 end=false begin=true
#pragma fcuda stmt rdNwrt=false GLBpntr=C name=TRN_10 HTGNode=TRN_10 tdep_vars=[Csub_block] tdep=true seqID=10 tlpName=TRN_10 TRNtask=true
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
#pragma fcuda tloop name=TRN_10 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=true begin=false
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE As[16][16], DATATYPE * A, int a, int wA, __shared__ DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_TRN_6)
{
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=TRN_6 end=false begin=true
#pragma fcuda stmt rdNwrt=true GLBpntr=A name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
#pragma fcuda stmt rdNwrt=true GLBpntr=B name=TRN_6 HTGNode=TRN_6 tdep=true seqID=6 tlpName=TRN_6 TRNtask=true
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
#pragma fcuda tloop name=TRN_6 end=true begin=false
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=true begin=false
}
return ;
}
#pragma fcuda compute name=SNC_7 end=false cores=1 begin=true
void matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, __shared__ DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], __shared__ DATATYPE As[16][16], __shared__ DATATYPE Bs[16][16])
{
dim3 __shared__ threadIdx;
if (guard_matrixMul_SNC_7)
{
int k;
#pragma fcuda compute cores=1 name=SNC_7 end=false begin=true
{
lp1:
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=false begin=true
#pragma fcuda stmt HTGNode=FOR_HTG_CMP_8 tlpName=FOR_HTG_CMP_8
for (#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
k=0; k<16; ++ k)
#pragma fcuda stmt tlpName=FOR_HTG_CMP_8
{
#pragma fcuda stmt name=CMP_8 HTGNode=CMP_8 tdep_vars=[Bs, As, Csub_block] tdep=true seqID=8 tlpName=FOR_HTG_CMP_8 CMPtask=true
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
#pragma fcuda tloop name=FOR_HTG_CMP_8 end=true begin=false
}
}
#pragma fcuda compute cores=1 name=SNC_7 end=true begin=false
}
return ;
}
void matrixMul_TRN_10_wrapper(int guard_matrixMul_TRN_10_block0, dim3 blockDim, dim3 gridDim, dim3 blockIdx_matrixMul_TRN_10_block0, DATATYPE * C, __shared__ DATATYPE Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], int c_block0, int wB)
{
matrixMul_TRN_10(guard_matrixMul_TRN_10_block0, blockDim, gridDim, blockIdx_matrixMul_TRN_10_block0, C, Csub_block_block0, c_block0, wB);
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
__global__ void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB, dim3 gridDim, dim3 blockDim)
{
int aBegin_block0;
int aEnd_block0;
int aStep_block0;
int bBegin_block0;
int bStep_block0;
__shared__ DATATYPE Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X];
int a_block0;
int b_block0;
int k_block0;
int c_block0;
dim3 blockIdx_block0;
int guard_matrixMul_CMP_5_block0;
int guard_matrixMul_TRN_10_block0;
int guard_matrixMul_TRN_6_block0;
int guard_matrixMul_SNC_7_block0;
int TRN_6_Bs_offset_block0;
int TRN_6_Bs_X_0_block0;
int TRN_6_Bs_c_1_block0;
int TRN_10_Csub_block_offset_block0;
int TRN_10_Csub_block_X_0_block0;
int TRN_10_Csub_block_c_1_block0;
dim3 blockIdx_loop;
for (blockIdx_loop.y=0; (gridDim.y+(-1*blockIdx_loop.y))>0; blockIdx_loop.y+=1)
{
blockIdx_block0.y=blockIdx_loop.y;
for (blockIdx_loop.x=0; (gridDim.x+(-1*blockIdx_loop.x))>0; blockIdx_loop.x+=1)
{
blockIdx_block0.x=(blockIdx_loop.x+0);
guard_matrixMul_SNC_7_block0=1;
guard_matrixMul_TRN_6_block0=1;
guard_matrixMul_TRN_10_block0=1;
guard_matrixMul_CMP_5_block0=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin_block0=((wA*16)*blockIdx_block0.y);
aEnd_block0=((aBegin_block0+wA)-1);
aStep_block0=16;
bBegin_block0=(16*blockIdx_block0.x);
bStep_block0=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5_block0, blockDim, gridDim, blockIdx_block0, Csub_block_block0);
a_block0=0;
b_block0=0;
k_block0=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a_block0=aBegin_block0), (b_block0=bBegin_block0)); a_block0<=aEnd_block0; ((a_block0+=aStep_block0), (b_block0+=bStep_block0)))
{
__shared__ DATATYPE As[16][16];
__shared__ DATATYPE Bs[16][16];
TRN_6_Bs_offset_block0=0;
TRN_6_Bs_X_0_block0=b_block0;
TRN_6_Bs_c_1_block0=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6_block0, blockDim, gridDim, blockIdx_block0, As, A, a_block0, wA, Bs, B, b_block0, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7_block0, blockDim, gridDim, blockIdx_block0, Csub_block_block0, As, Bs);
}
c_block0=(((wB*16)*blockIdx_block0.y)+(16*blockIdx_block0.x));
TRN_10_Csub_block_offset_block0=0;
TRN_10_Csub_block_X_0_block0=c_block0;
TRN_10_Csub_block_c_1_block0=wB;
matrixMul_TRN_10_wrapper(guard_matrixMul_TRN_10_block0, blockDim, gridDim, blockIdx_block0, C, Csub_block_block0, c_block0, wB);
}
}
}
*** After ClearCUDASpecs ***
#include <fcuda.h>
#include "../matrixMul.h"
#include <string.h>
const int BLOCKDIM_X = 16, BLOCKDIM_Y = 16;
#pragma fcuda compute name=CMP_5 end=false cores=1 begin=true
void matrixMul_CMP_5(int guard_matrixMul_CMP_5, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X])
{
dim3 threadIdx;
if (guard_matrixMul_CMP_5)
{
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
Csub_block[threadIdx.y][threadIdx.x]=0;
}
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_10 end=false begin=true
void matrixMul_TRN_10(int guard_matrixMul_TRN_10, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE * C, DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], int c, int wB)
{
dim3 threadIdx;
if (guard_matrixMul_TRN_10)
{
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
C[((c+(wB*threadIdx.y))+threadIdx.x)]=Csub_block[threadIdx.y][threadIdx.x];
}
}
return ;
}
#pragma fcuda transfer cores=1 size=[BLOCKDIM_X] name=TRN_6 end=false begin=true
void matrixMul_TRN_6(int guard_matrixMul_TRN_6, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE As[16][16], DATATYPE * A, int a, int wA, DATATYPE Bs[16][16], DATATYPE * B, int b, int wB)
{
dim3 threadIdx;
if (guard_matrixMul_TRN_6)
{
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
As[threadIdx.y][threadIdx.x]=A[((a+(wA*threadIdx.y))+threadIdx.x)];
Bs[threadIdx.y][threadIdx.x]=B[((b+(wB*threadIdx.y))+threadIdx.x)];
}
}
return ;
}
#pragma fcuda compute name=SNC_7 end=false cores=1 begin=true
void matrixMul_SNC_7(int guard_matrixMul_SNC_7, dim3 blockDim, dim3 gridDim, dim3 blockIdx, DATATYPE Csub_block[BLOCKDIM_Y][BLOCKDIM_X], DATATYPE As[16][16], DATATYPE Bs[16][16])
{
dim3 threadIdx;
if (guard_matrixMul_SNC_7)
{
int k;
{
lp1:
for (threadIdx.z=0;threadIdx.z<blockDim.z ; threadIdx.z=threadIdx.z+1)
for (threadIdx.y=0;threadIdx.y<blockDim.y ; threadIdx.y=threadIdx.y+1)
for (threadIdx.x=0;threadIdx.x<blockDim.x ; threadIdx.x=threadIdx.x+1)
{
for (k=0; k<16; ++ k)
{
Csub_block[threadIdx.y][threadIdx.x]+=(As[threadIdx.y][k]*Bs[k][threadIdx.x]);
}
}
}
}
return ;
}
void matrixMul_TRN_10_wrapper(int guard_matrixMul_TRN_10_block0, dim3 blockDim, dim3 gridDim, dim3 blockIdx_matrixMul_TRN_10_block0, DATATYPE * C, DATATYPE Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X], int c_block0, int wB)
{
matrixMul_TRN_10(guard_matrixMul_TRN_10_block0, blockDim, gridDim, blockIdx_matrixMul_TRN_10_block0, C, Csub_block_block0, c_block0, wB);
}
#pragma fcuda grid x_dim=16 y_dim=16
#pragma fcuda coreinfo pipeline=no num_cores=1
void matrixMul(DATATYPE * C, DATATYPE * A, DATATYPE * B, int wA, int wB, dim3 gridDim, dim3 blockDim)
{
int aBegin_block0;
int aEnd_block0;
int aStep_block0;
int bBegin_block0;
int bStep_block0;
DATATYPE Csub_block_block0[BLOCKDIM_Y][BLOCKDIM_X];
int a_block0;
int b_block0;
int k_block0;
int c_block0;
dim3 blockIdx_block0;
int guard_matrixMul_CMP_5_block0;
int guard_matrixMul_TRN_10_block0;
int guard_matrixMul_TRN_6_block0;
int guard_matrixMul_SNC_7_block0;
int TRN_6_Bs_offset_block0;
int TRN_6_Bs_X_0_block0;
int TRN_6_Bs_c_1_block0;
int TRN_10_Csub_block_offset_block0;
int TRN_10_Csub_block_X_0_block0;
int TRN_10_Csub_block_c_1_block0;
dim3 blockIdx_loop;
for (blockIdx_loop.y=0; (gridDim.y+(-1*blockIdx_loop.y))>0; blockIdx_loop.y+=1)
{
blockIdx_block0.y=blockIdx_loop.y;
for (blockIdx_loop.x=0; (gridDim.x+(-1*blockIdx_loop.x))>0; blockIdx_loop.x+=1)
{
blockIdx_block0.x=(blockIdx_loop.x+0);
guard_matrixMul_SNC_7_block0=1;
guard_matrixMul_TRN_6_block0=1;
guard_matrixMul_TRN_10_block0=1;
guard_matrixMul_CMP_5_block0=1;
#pragma HLS INTERFACE ap_bus port=A depth=3840
#pragma HLS INTERFACE ap_bus port=B depth=6144
#pragma HLS INTERFACE ap_bus port=C depth=10240
aBegin_block0=((wA*16)*blockIdx_block0.y);
aEnd_block0=((aBegin_block0+wA)-1);
aStep_block0=16;
bBegin_block0=(16*blockIdx_block0.x);
bStep_block0=(16*wB);
matrixMul_CMP_5(guard_matrixMul_CMP_5_block0, blockDim, gridDim, blockIdx_block0, Csub_block_block0);
a_block0=0;
b_block0=0;
k_block0=0;
#pragma fcuda stmt HTGNode=FOR_HTG_TRN_6
for (((a_block0=aBegin_block0), (b_block0=bBegin_block0)); a_block0<=aEnd_block0; ((a_block0+=aStep_block0), (b_block0+=bStep_block0)))
{
DATATYPE As[16][16];
DATATYPE Bs[16][16];
TRN_6_Bs_offset_block0=0;
TRN_6_Bs_X_0_block0=b_block0;
TRN_6_Bs_c_1_block0=wB;
matrixMul_TRN_6(guard_matrixMul_TRN_6_block0, blockDim, gridDim, blockIdx_block0, As, A, a_block0, wA, Bs, B, b_block0, wB);
matrixMul_SNC_7(guard_matrixMul_SNC_7_block0, blockDim, gridDim, blockIdx_block0, Csub_block_block0, As, Bs);
}
c_block0=(((wB*16)*blockIdx_block0.y)+(16*blockIdx_block0.x));
TRN_10_Csub_block_offset_block0=0;
TRN_10_Csub_block_X_0_block0=c_block0;
TRN_10_Csub_block_c_1_block0=wB;
matrixMul_TRN_10_wrapper(guard_matrixMul_TRN_10_block0, blockDim, gridDim, blockIdx_block0, C, Csub_block_block0, c_block0, wB);
}
}
}
|
a68fbc38b23bce2161b18bfb628fd6a540d43e75.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/utilities/error.hpp>
#include <io/utilities/block_utils.cuh>
#include "parquet_gpu.h"
namespace cudf {
namespace io {
namespace parquet {
namespace gpu {
struct dict_state_s {
uint32_t row_cnt;
PageFragment *cur_fragment;
uint32_t *hashmap;
uint32_t total_dict_entries; //!< Total number of entries in dictionary
uint32_t dictionary_size; //!< Total dictionary size in bytes
uint32_t num_dict_entries; //!< Dictionary entries in current fragment to add
uint32_t frag_dict_size;
EncColumnChunk ck;
EncColumnDesc col;
PageFragment frag;
volatile uint32_t scratch_red[32];
uint16_t frag_dict[MAX_PAGE_FRAGMENT_SIZE];
};
/**
* @brief Computes a 16-bit dictionary hash
**/
inline __device__ uint32_t uint32_hash16(uint32_t v) { return (v + (v >> 16)) & 0xffff; }
inline __device__ uint32_t uint64_hash16(uint64_t v)
{
return uint32_hash16((uint32_t)(v + (v >> 32)));
}
inline __device__ uint32_t nvstr_hash16(const uint8_t *p, uint32_t len)
{
uint32_t hash = len;
if (len > 0) {
uint32_t align_p = 3 & reinterpret_cast<uintptr_t>(p);
const uint32_t *p32 = reinterpret_cast<const uint32_t *>(p - align_p);
uint32_t ofs = align_p * 8;
uint32_t v;
while (len > 4) {
v = *p32++;
if (ofs) { v = __funnelshift_r(v, *p32, ofs); }
hash = __funnelshift_l(hash, hash, 5) + v;
len -= 4;
}
v = *p32;
if (ofs) { v = __funnelshift_r(v, (align_p + len > 4) ? p32[1] : 0, ofs); }
v &= ((2 << (len * 8 - 1)) - 1);
hash = __funnelshift_l(hash, hash, 5) + v;
}
return uint32_hash16(hash);
}
/**
* @brief Fetch a page fragment and its dictionary entries in row-ascending order
*
* @param[in,out] s dictionary state
* @param[in,out] dict_data fragment dictionary data for the current column (zeroed out after
*fetching)
* @param[in] frag_start_row row position of current fragment
* @param[in] t thread id
**/
__device__ void FetchDictionaryFragment(dict_state_s *s,
uint32_t *dict_data,
uint32_t frag_start_row,
uint32_t t)
{
if (t < sizeof(PageFragment) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->frag)[t] =
reinterpret_cast<const uint32_t *>(s->cur_fragment)[t];
}
__syncthreads();
// Store the row values in shared mem and set the corresponding dict_data to zero (end-of-list)
// It's easiest to do this here since we're only dealing with values all within a 5K-row window
for (uint32_t i = t; i < s->frag.num_dict_vals; i += 1024) {
uint32_t r = dict_data[frag_start_row + i] - frag_start_row;
s->frag_dict[i] = r;
}
__syncthreads();
for (uint32_t i = t; i < s->frag.num_dict_vals; i += 1024) {
uint32_t r = s->frag_dict[i];
dict_data[frag_start_row + r] = 0;
}
__syncthreads();
}
/// Generate dictionary indices in ascending row order
__device__ void GenerateDictionaryIndices(dict_state_s *s, uint32_t t)
{
uint32_t *dict_index = s->col.dict_index;
uint32_t *dict_data = s->col.dict_data + s->ck.start_row;
const uint32_t *valid_map = s->col.valid_map_base;
uint32_t num_dict_entries = 0;
for (uint32_t i = 0; i < s->row_cnt; i += 1024) {
uint32_t row = s->ck.start_row + i + t;
uint32_t is_valid = (i + t < s->row_cnt && row < s->col.num_rows)
? (valid_map) ? (valid_map[row >> 5] >> (row & 0x1f)) & 1 : 1
: 0;
uint32_t dict_idx = (is_valid) ? dict_index[row] : 0;
uint32_t is_unique =
(is_valid &&
dict_idx ==
row); // Any value that doesn't have bit31 set should have dict_idx=row at this point
uint32_t umask = BALLOT(is_unique);
uint32_t pos = num_dict_entries + __popc(umask & ((1 << (t & 0x1f)) - 1));
if (!(t & 0x1f)) { s->scratch_red[t >> 5] = __popc(umask); }
num_dict_entries += __syncthreads_count(is_unique);
if (t < 32) { s->scratch_red[t] = WarpReducePos32(s->scratch_red[t], t); }
__syncthreads();
if (t >= 32) { pos += s->scratch_red[(t - 32) >> 5]; }
if (is_valid && is_unique) {
dict_data[pos] = row;
dict_index[row] = pos;
}
__syncthreads();
if (is_valid && !is_unique) {
// NOTE: Should have at most 3 iterations (once for early duplicate elimination, once for
// final dictionary duplicate elimination and once for re-ordering) (If something went wrong
// building the dictionary, it will likely hang or crash right here)
do {
dict_idx = dict_index[dict_idx & 0x7fffffff];
} while (dict_idx > 0x7fffffff);
dict_index[row] = dict_idx;
}
}
}
// blockDim(1024, 1, 1)
__global__ void __launch_bounds__(1024, 1)
gpuBuildChunkDictionaries(EncColumnChunk *chunks, uint32_t *dev_scratch)
{
__shared__ __align__(8) dict_state_s state_g;
dict_state_s *const s = &state_g;
uint32_t t = threadIdx.x;
uint32_t dtype, dtype_len, dtype_len_in;
if (t < sizeof(EncColumnChunk) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->ck)[t] =
reinterpret_cast<const uint32_t *>(&chunks[blockIdx.x])[t];
}
__syncthreads();
if (!s->ck.has_dictionary) { return; }
if (t < sizeof(EncColumnDesc) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->col)[t] =
reinterpret_cast<const uint32_t *>(s->ck.col_desc)[t];
}
__syncthreads();
if (!t) {
s->hashmap = dev_scratch + s->ck.dictionary_id * (size_t)(1 << kDictHashBits);
s->row_cnt = 0;
s->cur_fragment = s->ck.fragments;
s->total_dict_entries = 0;
s->dictionary_size = 0;
s->ck.num_dict_fragments = 0;
}
dtype = s->col.physical_type;
dtype_len = (dtype == INT64 || dtype == DOUBLE) ? 8 : 4;
if (dtype == INT32) {
dtype_len_in = GetDtypeLogicalLen(s->col.converted_type);
} else {
dtype_len_in = (dtype == BYTE_ARRAY) ? sizeof(nvstrdesc_s) : dtype_len;
}
__syncthreads();
while (s->row_cnt < s->ck.num_rows) {
uint32_t frag_start_row = s->ck.start_row + s->row_cnt, num_dict_entries, frag_dict_size;
FetchDictionaryFragment(s, s->col.dict_data, frag_start_row, t);
__syncthreads();
num_dict_entries = s->frag.num_dict_vals;
if (!t) {
s->num_dict_entries = 0;
s->frag_dict_size = 0;
}
for (uint32_t i = 0; i < num_dict_entries; i += 1024) {
bool is_valid = (i + t < num_dict_entries);
uint32_t len = 0;
uint32_t is_dupe = 0;
uint32_t row, hash, next, *next_addr;
uint32_t new_dict_entries;
if (is_valid) {
row = frag_start_row + s->frag_dict[i + t];
len = dtype_len;
if (dtype == BYTE_ARRAY) {
const char *ptr = reinterpret_cast<const nvstrdesc_s *>(s->col.column_data_base)[row].ptr;
uint32_t count =
(uint32_t) reinterpret_cast<const nvstrdesc_s *>(s->col.column_data_base)[row].count;
len += count;
hash = nvstr_hash16(reinterpret_cast<const uint8_t *>(ptr), count);
// Walk the list of rows with the same hash
next_addr = &s->hashmap[hash];
while ((next = atomicCAS(next_addr, 0, row + 1)) != 0) {
const char *ptr2 =
reinterpret_cast<const nvstrdesc_s *>(s->col.column_data_base)[next - 1].ptr;
uint32_t count2 =
reinterpret_cast<const nvstrdesc_s *>(s->col.column_data_base)[next - 1].count;
if (count2 == count && nvstr_is_equal(ptr, count, ptr2, count2)) {
is_dupe = 1;
break;
}
next_addr = &s->col.dict_data[next - 1];
}
} else {
uint64_t val;
if (dtype_len_in == 8) {
val = reinterpret_cast<const uint64_t *>(s->col.column_data_base)[row];
hash = uint64_hash16(val);
} else {
val = (dtype_len_in == 4)
? reinterpret_cast<const uint32_t *>(s->col.column_data_base)[row]
: (dtype_len_in == 2)
? reinterpret_cast<const uint16_t *>(s->col.column_data_base)[row]
: reinterpret_cast<const uint8_t *>(s->col.column_data_base)[row];
hash = uint32_hash16(val);
}
// Walk the list of rows with the same hash
next_addr = &s->hashmap[hash];
while ((next = atomicCAS(next_addr, 0, row + 1)) != 0) {
uint64_t val2 =
(dtype_len_in == 8)
? reinterpret_cast<const uint64_t *>(s->col.column_data_base)[next - 1]
: (dtype_len_in == 4)
? reinterpret_cast<const uint32_t *>(s->col.column_data_base)[next - 1]
: (dtype_len_in == 2)
? reinterpret_cast<const uint16_t *>(s->col.column_data_base)[next - 1]
: reinterpret_cast<const uint8_t *>(s->col.column_data_base)[next - 1];
if (val2 == val) {
is_dupe = 1;
break;
}
next_addr = &s->col.dict_data[next - 1];
}
}
}
// Count the non-duplicate entries
frag_dict_size = WarpReduceSum32((is_valid && !is_dupe) ? len : 0);
if (!(t & 0x1f)) { s->scratch_red[t >> 5] = frag_dict_size; }
new_dict_entries = __syncthreads_count(is_valid && !is_dupe);
if (t < 32) {
frag_dict_size = WarpReduceSum32(s->scratch_red[t]);
if (t == 0) {
s->frag_dict_size += frag_dict_size;
s->num_dict_entries += new_dict_entries;
}
}
if (is_valid) {
if (!is_dupe) {
s->col.dict_index[row] = row;
} else {
s->col.dict_index[row] = (next - 1) | (1u << 31);
}
}
__syncthreads();
// At this point, the dictionary order is non-deterministic, and we want insertion order
// Make sure that the non-duplicate entry corresponds to the lower row number
// (The entry in dict_data (next-1) used for duplicate elimination does not need
// to be the lowest row number)
bool reorder_check = (is_valid && is_dupe && next - 1 > row);
if (reorder_check) {
next = s->col.dict_index[next - 1];
while (next & (1u << 31)) { next = s->col.dict_index[next & 0x7fffffff]; }
}
if (__syncthreads_or(reorder_check)) {
if (reorder_check) { atomicMin(&s->col.dict_index[next], row); }
__syncthreads();
if (reorder_check && s->col.dict_index[next] == row) {
s->col.dict_index[next] = row | (1u << 31);
s->col.dict_index[row] = row;
}
__syncthreads();
}
}
__syncthreads();
num_dict_entries = s->num_dict_entries;
frag_dict_size = s->frag_dict_size;
if (s->total_dict_entries + num_dict_entries > 65536 ||
(s->dictionary_size != 0 && s->dictionary_size + frag_dict_size > 512 * 1024)) {
break;
}
__syncthreads();
if (!t) {
if (num_dict_entries != s->frag.num_dict_vals) {
s->cur_fragment->num_dict_vals = num_dict_entries;
}
if (frag_dict_size != s->frag.dict_data_size) { s->frag.dict_data_size = frag_dict_size; }
s->total_dict_entries += num_dict_entries;
s->dictionary_size += frag_dict_size;
s->row_cnt += s->frag.num_rows;
s->cur_fragment++;
s->ck.num_dict_fragments++;
}
__syncthreads();
}
__syncthreads();
GenerateDictionaryIndices(s, t);
if (!t) {
chunks[blockIdx.x].num_dict_fragments = s->ck.num_dict_fragments;
chunks[blockIdx.x].dictionary_size = s->dictionary_size;
chunks[blockIdx.x].total_dict_entries = s->total_dict_entries;
}
}
/**
* @brief Launches kernel for building chunk dictionaries
*
* @param[in] chunks Column chunks
* @param[in] dev_scratch Device scratch data (kDictScratchSize per dictionary)
* @param[in] num_chunks Number of column chunks
* @param[in] stream CUDA stream to use, default 0
*
* @return hipSuccess if successful, a CUDA error code otherwise
**/
hipError_t BuildChunkDictionaries(EncColumnChunk *chunks,
uint32_t *dev_scratch,
size_t scratch_size,
uint32_t num_chunks,
hipStream_t stream)
{
if (num_chunks > 0 && scratch_size > 0) { // zero scratch size implies no dictionaries
CUDA_TRY(hipMemsetAsync(dev_scratch, 0, scratch_size, stream));
hipLaunchKernelGGL(( gpuBuildChunkDictionaries), dim3(num_chunks), dim3(1024), 0, stream, chunks, dev_scratch);
}
return hipSuccess;
}
} // namespace gpu
} // namespace parquet
} // namespace io
} // namespace cudf
| a68fbc38b23bce2161b18bfb628fd6a540d43e75.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/utilities/error.hpp>
#include <io/utilities/block_utils.cuh>
#include "parquet_gpu.h"
namespace cudf {
namespace io {
namespace parquet {
namespace gpu {
struct dict_state_s {
uint32_t row_cnt;
PageFragment *cur_fragment;
uint32_t *hashmap;
uint32_t total_dict_entries; //!< Total number of entries in dictionary
uint32_t dictionary_size; //!< Total dictionary size in bytes
uint32_t num_dict_entries; //!< Dictionary entries in current fragment to add
uint32_t frag_dict_size;
EncColumnChunk ck;
EncColumnDesc col;
PageFragment frag;
volatile uint32_t scratch_red[32];
uint16_t frag_dict[MAX_PAGE_FRAGMENT_SIZE];
};
/**
* @brief Computes a 16-bit dictionary hash
**/
inline __device__ uint32_t uint32_hash16(uint32_t v) { return (v + (v >> 16)) & 0xffff; }
inline __device__ uint32_t uint64_hash16(uint64_t v)
{
return uint32_hash16((uint32_t)(v + (v >> 32)));
}
inline __device__ uint32_t nvstr_hash16(const uint8_t *p, uint32_t len)
{
uint32_t hash = len;
if (len > 0) {
uint32_t align_p = 3 & reinterpret_cast<uintptr_t>(p);
const uint32_t *p32 = reinterpret_cast<const uint32_t *>(p - align_p);
uint32_t ofs = align_p * 8;
uint32_t v;
while (len > 4) {
v = *p32++;
if (ofs) { v = __funnelshift_r(v, *p32, ofs); }
hash = __funnelshift_l(hash, hash, 5) + v;
len -= 4;
}
v = *p32;
if (ofs) { v = __funnelshift_r(v, (align_p + len > 4) ? p32[1] : 0, ofs); }
v &= ((2 << (len * 8 - 1)) - 1);
hash = __funnelshift_l(hash, hash, 5) + v;
}
return uint32_hash16(hash);
}
/**
* @brief Fetch a page fragment and its dictionary entries in row-ascending order
*
* @param[in,out] s dictionary state
* @param[in,out] dict_data fragment dictionary data for the current column (zeroed out after
*fetching)
* @param[in] frag_start_row row position of current fragment
* @param[in] t thread id
**/
__device__ void FetchDictionaryFragment(dict_state_s *s,
uint32_t *dict_data,
uint32_t frag_start_row,
uint32_t t)
{
if (t < sizeof(PageFragment) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->frag)[t] =
reinterpret_cast<const uint32_t *>(s->cur_fragment)[t];
}
__syncthreads();
// Store the row values in shared mem and set the corresponding dict_data to zero (end-of-list)
// It's easiest to do this here since we're only dealing with values all within a 5K-row window
for (uint32_t i = t; i < s->frag.num_dict_vals; i += 1024) {
uint32_t r = dict_data[frag_start_row + i] - frag_start_row;
s->frag_dict[i] = r;
}
__syncthreads();
for (uint32_t i = t; i < s->frag.num_dict_vals; i += 1024) {
uint32_t r = s->frag_dict[i];
dict_data[frag_start_row + r] = 0;
}
__syncthreads();
}
/// Generate dictionary indices in ascending row order
__device__ void GenerateDictionaryIndices(dict_state_s *s, uint32_t t)
{
uint32_t *dict_index = s->col.dict_index;
uint32_t *dict_data = s->col.dict_data + s->ck.start_row;
const uint32_t *valid_map = s->col.valid_map_base;
uint32_t num_dict_entries = 0;
for (uint32_t i = 0; i < s->row_cnt; i += 1024) {
uint32_t row = s->ck.start_row + i + t;
uint32_t is_valid = (i + t < s->row_cnt && row < s->col.num_rows)
? (valid_map) ? (valid_map[row >> 5] >> (row & 0x1f)) & 1 : 1
: 0;
uint32_t dict_idx = (is_valid) ? dict_index[row] : 0;
uint32_t is_unique =
(is_valid &&
dict_idx ==
row); // Any value that doesn't have bit31 set should have dict_idx=row at this point
uint32_t umask = BALLOT(is_unique);
uint32_t pos = num_dict_entries + __popc(umask & ((1 << (t & 0x1f)) - 1));
if (!(t & 0x1f)) { s->scratch_red[t >> 5] = __popc(umask); }
num_dict_entries += __syncthreads_count(is_unique);
if (t < 32) { s->scratch_red[t] = WarpReducePos32(s->scratch_red[t], t); }
__syncthreads();
if (t >= 32) { pos += s->scratch_red[(t - 32) >> 5]; }
if (is_valid && is_unique) {
dict_data[pos] = row;
dict_index[row] = pos;
}
__syncthreads();
if (is_valid && !is_unique) {
// NOTE: Should have at most 3 iterations (once for early duplicate elimination, once for
// final dictionary duplicate elimination and once for re-ordering) (If something went wrong
// building the dictionary, it will likely hang or crash right here)
do {
dict_idx = dict_index[dict_idx & 0x7fffffff];
} while (dict_idx > 0x7fffffff);
dict_index[row] = dict_idx;
}
}
}
// blockDim(1024, 1, 1)
__global__ void __launch_bounds__(1024, 1)
gpuBuildChunkDictionaries(EncColumnChunk *chunks, uint32_t *dev_scratch)
{
__shared__ __align__(8) dict_state_s state_g;
dict_state_s *const s = &state_g;
uint32_t t = threadIdx.x;
uint32_t dtype, dtype_len, dtype_len_in;
if (t < sizeof(EncColumnChunk) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->ck)[t] =
reinterpret_cast<const uint32_t *>(&chunks[blockIdx.x])[t];
}
__syncthreads();
if (!s->ck.has_dictionary) { return; }
if (t < sizeof(EncColumnDesc) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->col)[t] =
reinterpret_cast<const uint32_t *>(s->ck.col_desc)[t];
}
__syncthreads();
if (!t) {
s->hashmap = dev_scratch + s->ck.dictionary_id * (size_t)(1 << kDictHashBits);
s->row_cnt = 0;
s->cur_fragment = s->ck.fragments;
s->total_dict_entries = 0;
s->dictionary_size = 0;
s->ck.num_dict_fragments = 0;
}
dtype = s->col.physical_type;
dtype_len = (dtype == INT64 || dtype == DOUBLE) ? 8 : 4;
if (dtype == INT32) {
dtype_len_in = GetDtypeLogicalLen(s->col.converted_type);
} else {
dtype_len_in = (dtype == BYTE_ARRAY) ? sizeof(nvstrdesc_s) : dtype_len;
}
__syncthreads();
while (s->row_cnt < s->ck.num_rows) {
uint32_t frag_start_row = s->ck.start_row + s->row_cnt, num_dict_entries, frag_dict_size;
FetchDictionaryFragment(s, s->col.dict_data, frag_start_row, t);
__syncthreads();
num_dict_entries = s->frag.num_dict_vals;
if (!t) {
s->num_dict_entries = 0;
s->frag_dict_size = 0;
}
for (uint32_t i = 0; i < num_dict_entries; i += 1024) {
bool is_valid = (i + t < num_dict_entries);
uint32_t len = 0;
uint32_t is_dupe = 0;
uint32_t row, hash, next, *next_addr;
uint32_t new_dict_entries;
if (is_valid) {
row = frag_start_row + s->frag_dict[i + t];
len = dtype_len;
if (dtype == BYTE_ARRAY) {
const char *ptr = reinterpret_cast<const nvstrdesc_s *>(s->col.column_data_base)[row].ptr;
uint32_t count =
(uint32_t) reinterpret_cast<const nvstrdesc_s *>(s->col.column_data_base)[row].count;
len += count;
hash = nvstr_hash16(reinterpret_cast<const uint8_t *>(ptr), count);
// Walk the list of rows with the same hash
next_addr = &s->hashmap[hash];
while ((next = atomicCAS(next_addr, 0, row + 1)) != 0) {
const char *ptr2 =
reinterpret_cast<const nvstrdesc_s *>(s->col.column_data_base)[next - 1].ptr;
uint32_t count2 =
reinterpret_cast<const nvstrdesc_s *>(s->col.column_data_base)[next - 1].count;
if (count2 == count && nvstr_is_equal(ptr, count, ptr2, count2)) {
is_dupe = 1;
break;
}
next_addr = &s->col.dict_data[next - 1];
}
} else {
uint64_t val;
if (dtype_len_in == 8) {
val = reinterpret_cast<const uint64_t *>(s->col.column_data_base)[row];
hash = uint64_hash16(val);
} else {
val = (dtype_len_in == 4)
? reinterpret_cast<const uint32_t *>(s->col.column_data_base)[row]
: (dtype_len_in == 2)
? reinterpret_cast<const uint16_t *>(s->col.column_data_base)[row]
: reinterpret_cast<const uint8_t *>(s->col.column_data_base)[row];
hash = uint32_hash16(val);
}
// Walk the list of rows with the same hash
next_addr = &s->hashmap[hash];
while ((next = atomicCAS(next_addr, 0, row + 1)) != 0) {
uint64_t val2 =
(dtype_len_in == 8)
? reinterpret_cast<const uint64_t *>(s->col.column_data_base)[next - 1]
: (dtype_len_in == 4)
? reinterpret_cast<const uint32_t *>(s->col.column_data_base)[next - 1]
: (dtype_len_in == 2)
? reinterpret_cast<const uint16_t *>(s->col.column_data_base)[next - 1]
: reinterpret_cast<const uint8_t *>(s->col.column_data_base)[next - 1];
if (val2 == val) {
is_dupe = 1;
break;
}
next_addr = &s->col.dict_data[next - 1];
}
}
}
// Count the non-duplicate entries
frag_dict_size = WarpReduceSum32((is_valid && !is_dupe) ? len : 0);
if (!(t & 0x1f)) { s->scratch_red[t >> 5] = frag_dict_size; }
new_dict_entries = __syncthreads_count(is_valid && !is_dupe);
if (t < 32) {
frag_dict_size = WarpReduceSum32(s->scratch_red[t]);
if (t == 0) {
s->frag_dict_size += frag_dict_size;
s->num_dict_entries += new_dict_entries;
}
}
if (is_valid) {
if (!is_dupe) {
s->col.dict_index[row] = row;
} else {
s->col.dict_index[row] = (next - 1) | (1u << 31);
}
}
__syncthreads();
// At this point, the dictionary order is non-deterministic, and we want insertion order
// Make sure that the non-duplicate entry corresponds to the lower row number
// (The entry in dict_data (next-1) used for duplicate elimination does not need
// to be the lowest row number)
bool reorder_check = (is_valid && is_dupe && next - 1 > row);
if (reorder_check) {
next = s->col.dict_index[next - 1];
while (next & (1u << 31)) { next = s->col.dict_index[next & 0x7fffffff]; }
}
if (__syncthreads_or(reorder_check)) {
if (reorder_check) { atomicMin(&s->col.dict_index[next], row); }
__syncthreads();
if (reorder_check && s->col.dict_index[next] == row) {
s->col.dict_index[next] = row | (1u << 31);
s->col.dict_index[row] = row;
}
__syncthreads();
}
}
__syncthreads();
num_dict_entries = s->num_dict_entries;
frag_dict_size = s->frag_dict_size;
if (s->total_dict_entries + num_dict_entries > 65536 ||
(s->dictionary_size != 0 && s->dictionary_size + frag_dict_size > 512 * 1024)) {
break;
}
__syncthreads();
if (!t) {
if (num_dict_entries != s->frag.num_dict_vals) {
s->cur_fragment->num_dict_vals = num_dict_entries;
}
if (frag_dict_size != s->frag.dict_data_size) { s->frag.dict_data_size = frag_dict_size; }
s->total_dict_entries += num_dict_entries;
s->dictionary_size += frag_dict_size;
s->row_cnt += s->frag.num_rows;
s->cur_fragment++;
s->ck.num_dict_fragments++;
}
__syncthreads();
}
__syncthreads();
GenerateDictionaryIndices(s, t);
if (!t) {
chunks[blockIdx.x].num_dict_fragments = s->ck.num_dict_fragments;
chunks[blockIdx.x].dictionary_size = s->dictionary_size;
chunks[blockIdx.x].total_dict_entries = s->total_dict_entries;
}
}
/**
* @brief Launches kernel for building chunk dictionaries
*
* @param[in] chunks Column chunks
* @param[in] dev_scratch Device scratch data (kDictScratchSize per dictionary)
* @param[in] num_chunks Number of column chunks
* @param[in] stream CUDA stream to use, default 0
*
* @return cudaSuccess if successful, a CUDA error code otherwise
**/
cudaError_t BuildChunkDictionaries(EncColumnChunk *chunks,
uint32_t *dev_scratch,
size_t scratch_size,
uint32_t num_chunks,
cudaStream_t stream)
{
if (num_chunks > 0 && scratch_size > 0) { // zero scratch size implies no dictionaries
CUDA_TRY(cudaMemsetAsync(dev_scratch, 0, scratch_size, stream));
gpuBuildChunkDictionaries<<<num_chunks, 1024, 0, stream>>>(chunks, dev_scratch);
}
return cudaSuccess;
}
} // namespace gpu
} // namespace parquet
} // namespace io
} // namespace cudf
|
1a17a640c14ea7b25880fe9b13fef8543a54a9c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by on 2019/12/31.
//
#include "deformable_conv2d.h"
#include <cstdlib>
#include <algorithm>
#ifdef GOOGLE_CUDA
#include "tensorflow/core/util/gpu_kernel_helper.h"
#include "tensorflow/core/platform/stream_executor.h"
#endif
namespace tensorflow {
typedef Eigen::GpuDevice GPUDevice;
typedef Eigen::ThreadPoolDevice CPUDevice;
#ifdef GOOGLE_CUDA
template<typename DType>
__global__ void SwapAxisKernel(const int n, const int cuda_mem_size, const int min_unit_size,
DType *input_data, const int dim_num, const int axis_x_dims, const int axis_y_dims,
const int axis_x, const int axis_y) {
CUDA_1D_KERNEL_LOOP(index, n) {
DType *device_data = new DType[cuda_mem_size];
DType *input_data_ptr = input_data + index * cuda_mem_size;
for (int j = 0; j < axis_y_dims; j++) {
for (int i = 0; i < axis_x_dims; i++) {
DType *temp_ptr = input_data_ptr + (i * axis_x_dims + j) * min_unit_size;
DType *device_data_temp_ptr = device_data + (j * axis_y_dims + i) * min_unit_size;
for (int k = 0; k < min_unit_size; k++) {
*(device_data_temp_ptr + k) = *(temp_ptr + k);
}
}
}
for (int i = 0; i < cuda_mem_size; i++) {
*(input_data_ptr + i) = *(device_data + i);
}
delete[]device_data;
}
}
template<typename DType>
__global__ void DeformableConv2DIm2ColKernel(const int n,
const DType *data_im,
const DType *data_offset,
const DType *data_mask,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int channel_per_deformable_group,
const int batch_size,
const int num_channels,
const int deformable_group,
const int height_col,
const int width_col,
DType *data_col) {
/*
* channel_per_deformable_group // deformable_group,
* //batch_sizeim2col_step_, 1
*/
CUDA_1D_KERNEL_LOOP(index, n) {
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
DType *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
const DType *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const DType *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 *
kernel_h * kernel_w * height_col * width_col;
const DType *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h *
kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
const DType mask = data_mask_ptr[data_mask_hw_ptr];
DType val = static_cast<DType>(0);
const DType h_im = h_in + i * dilation_h + offset_h;
const DType w_im = w_in + j * dilation_w + offset_w;
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) {
val = DmcnIm2colBilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val * mask;
data_col_ptr += batch_size * height_col * width_col;
}
}
}
}
template<typename T>
__global__ void DeformablePSROIPoolForwardKernel(const int count, const T *bottom_data,
const T spatial_scale, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const T *bottom_rois, const T *bottom_trans,
const int no_trans, const T trans_std,
const int sample_per_part, const int output_dim,
const int group_size, const int part_size,
const int num_classes, const int channels_each_class,
T *top_data, T *top_count) {
CUDA_1D_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const T *offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
T roi_start_w = (T) (round(offset_bottom_rois[1])) * spatial_scale - 0.5;
T roi_start_h = (T) (round(offset_bottom_rois[2])) * spatial_scale - 0.5;
T roi_end_w = (T) (round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
T roi_end_h = (T) (round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5;
// Force too small ROIs to be 1x1
T roi_width = ::max(roi_end_w - roi_start_w, static_cast<T>(0.1)); // avoid 0
T roi_height = ::max(roi_end_h - roi_start_h, static_cast<T>(0.1));
// Compute w and h at bottom
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
T sub_bin_size_h = bin_size_h / static_cast<T>(sample_per_part);
T sub_bin_size_w = bin_size_w / static_cast<T>(sample_per_part);
int part_h = floor(static_cast<T>(ph) / pooled_height * part_size);
int part_w = floor(static_cast<T>(pw) / pooled_width * part_size);
int class_id = ctop / channels_each_class;
T trans_x = no_trans ? (T) (0) :
bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w]
* (T) trans_std;
T trans_y = no_trans ? (T) (0) :
bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w]
* (T) trans_std;
T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w;
wstart += trans_x * roi_width;
T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h;
hstart += trans_y * roi_height;
T sum = 0;
int total = 0;
int gw = floor(static_cast<T>(pw) * group_size / pooled_width);
int gh = floor(static_cast<T>(ph) * group_size / pooled_height);
gw = ::min(::max(gw, 0), group_size - 1);
gh = ::min(::max(gh, 0), group_size - 1);
const T *offset_bottom_data = bottom_data + (roi_batch_ind * channels) * height * width;
for (int ih = 0; ih < sample_per_part; ++ih) {
for (int iw = 0; iw < sample_per_part; ++iw) {
T w = wstart + iw * sub_bin_size_w;
T h = hstart + ih * sub_bin_size_h;
// bilinear interpolation
if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) {
continue;
}
w = ::min(::max(w, static_cast<T>(0.)), static_cast<T>(width - 1.));
h = ::min(::max(h, static_cast<T>(0.)), static_cast<T>(height - 1.));
int c = (ctop * group_size + gh) * group_size + gw;
T val = DmcnIm2colBilinear(offset_bottom_data + c * height * width, w, h, w, (T)height, (T)width);
sum += val;
total++;
}
}
top_data[index] = total == 0 ? (T) (0) : sum / total;
top_count[index] = total;
}
}
template<typename T>
__global__ void DeformablePSROIPoolBackwardAccKernel(const int count,
const T *top_diff,
const T *top_count,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int output_dim,
T *bottom_data_diff,
T *bottom_trans_diff,
const T *bottom_data,
const T *bottom_rois,
const T *bottom_trans, const int no_trans,
const T trans_std, const int sample_per_part,
const int group_size, const int part_size,
const int num_classes,
const int channels_each_class) {
CUDA_1D_KERNEL_LOOP(index, count) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const T *offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
T roi_start_w = (T) (round(offset_bottom_rois[1])) * spatial_scale - 0.5;
T roi_start_h = (T) (round(offset_bottom_rois[2])) * spatial_scale - 0.5;
T roi_end_w = (T) (round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
T roi_end_h = (T) (round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5;
// Force too small ROIs to be 1x1
T roi_width = ::max(roi_end_w - roi_start_w, static_cast<T>(0.1)); //avoid 0
T roi_height = ::max(roi_end_h - roi_start_h, static_cast<T>(0.1));
// Compute w and h at bottom
T bin_size_h = roi_height / (T) (pooled_height);
T bin_size_w = roi_width / (T) (pooled_width);
T sub_bin_size_h = bin_size_h / (T) (sample_per_part);
T sub_bin_size_w = bin_size_w / (T) (sample_per_part);
int part_h = floor((T) (ph) / pooled_height * part_size);
int part_w = floor((T) (pw) / pooled_width * part_size);
int class_id = ctop / channels_each_class;
T trans_x = no_trans ? (T) (0) :
bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w]
* (T) trans_std;
T trans_y = no_trans ? (T) (0) :
bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w]
* (T) trans_std;
T wstart = (T) (pw) * bin_size_w + roi_start_w;
wstart += trans_x * roi_width;
T hstart = (T) (ph) * bin_size_h + roi_start_h;
hstart += trans_y * roi_height;
if (top_count[index] <= 0) {
continue;
}
T diff_val = top_diff[index] / top_count[index];
const T *offset_bottom_data = bottom_data + roi_batch_ind * channels * height * width;
T *offset_bottom_data_diff = bottom_data_diff + roi_batch_ind * channels * height * width;
int gw = floor((T) (pw) * group_size / pooled_width);
int gh = floor((T) (ph) * group_size / pooled_height);
gw = ::min(::max(gw, 0), group_size - 1);
gh = ::min(::max(gh, 0), group_size - 1);
for (int ih = 0; ih < sample_per_part; ih++) {
for (int iw = 0; iw < sample_per_part; iw++) {
T w = wstart + iw * sub_bin_size_w;
T h = hstart + ih * sub_bin_size_h;
// bilinear interpolation
if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) {
continue;
}
w = min(max(w, 0.), width - 1.);
h = min(max(h, 0.), height - 1.);
int c = (ctop * group_size + gh) * group_size + gw;
// backward on feature
int x0 = floor(w);
int x1 = ceil(w);
int y0 = floor(h);
int y1 = ceil(h);
T dist_x = w - x0, dist_y = h - y0;
T q00 = (1 - dist_x) * (1 - dist_y);
T q01 = (1 - dist_x) * dist_y;
T q10 = dist_x * (1 - dist_y);
T q11 = dist_x * dist_y;
int bottom_index_base = c * height * width;
CudaAtomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x0, q00 * diff_val);
CudaAtomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x0, q01 * diff_val);
CudaAtomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x1, q10 * diff_val);
CudaAtomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x1, q11 * diff_val);
if (no_trans) {
continue;
}
T U00 = offset_bottom_data[bottom_index_base + y0 * width + x0];
T U01 = offset_bottom_data[bottom_index_base + y1 * width + x0];
T U10 = offset_bottom_data[bottom_index_base + y0 * width + x1];
T U11 = offset_bottom_data[bottom_index_base + y1 * width + x1];
T diff_x = (U11 * dist_y + U10 * (1 - dist_y) - U01 * dist_y - U00 * (1 - dist_y)) * trans_std * diff_val;
diff_x *= roi_width;
T diff_y = (U11 * dist_x + U01 * (1 - dist_x) - U10 * dist_x - U00 * (1 - dist_x)) * trans_std * diff_val;
diff_y *= roi_height;
CudaAtomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w,
diff_x);
CudaAtomicAdd(
bottom_trans_diff + (((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w,
diff_y);
}
}
}
}
template<typename DType>
__global__ void DeformableConv2DCol2ImKernel(
const int n,
const DType *data_col, const DType *data_offset, const DType *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
DType *grad_im) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const DType *data_offset_ptr = data_offset
+ (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const DType *data_mask_ptr =
data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
const DType mask = data_mask_ptr[data_mask_hw_ptr];
const DType cur_inv_h_data = h_in + i * dilation_h + offset_h;
const DType cur_inv_w_data = w_in + j * dilation_w + offset_w;
const DType cur_top_grad = data_col[index] * mask;
const int cur_h = (int) cur_inv_h_data;
const int cur_w = (int) cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++) {
for (int dx = -2; dx <= 2; dx++) {
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1
) {
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
DType weight =
DmcnGetGradientWeight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
CudaAtomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
template<typename DType>
__global__ void DeformableConv2DCol2ImCoordGPUKernel(
const int n,
const DType *data_col, const DType *data_im,
const DType *data_offset, const DType *data_mask,
const int channels, const int height, const int width, // C, H, W
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col,
DType *grad_offset, DType *grad_mask) {
CUDA_1D_KERNEL_LOOP(index, n) {
DType val = 0, mval = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const DType *data_col_ptr =
data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col;
const DType *data_im_ptr = data_im
+ (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w
* height * width;
const DType *data_offset_ptr = data_offset
+ (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const DType *data_mask_ptr =
data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) {
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out);
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
const DType mask = data_mask_ptr[data_mask_hw_ptr];
DType inv_h = h_in + i * dilation_h + offset_h;
DType inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) {
inv_h = inv_w = -2;
} else {
mval += data_col_ptr[col_pos]
* DmcnIm2colBilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w);
}
const DType weight = DmcnGetCoordinateWeight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos] * mask;
cnt += 1;
}
grad_offset[index] = val;
// KERNEL_ASSIGN(grad_offset[index], offset_req, val);
if (offset_c % 2 == 0) {
grad_mask[
(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col
+ h) * width_col + w] = mval;
// KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval);
}
}
}
template<typename DType>
__global__ void PureAddToKernel(const int n, DType *result_data, const DType *right_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
CudaAtomicAdd(result_data + index, right_data[index]);
}
}
template<typename DType>
__global__ void SetZeroKernel(const int n, DType *result_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
*(result_data + index) = DType(0);
}
}
template<typename DType>
__global__ void SetOneKernel(const int n, DType *result_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
*(result_data + index) = DType(1);
}
}
template<typename DType>
__global__ void SetNumAtIndexKernel(DType num, int index, DType *data) {
*(data + index) = num;
}
template<typename DType>
void DeformableConv2DCol2ImCoord<GPUDevice, DType>::operator()(const Eigen::GpuDevice &d,
const DType *data_col,
const DType *data_im,
const DType *data_offset,
const DType *data_mask,
const TShape &im_shape,
const TShape &col_shape,
const TShape &kernel_shape,
const TShape &pad,
const TShape &stride,
const TShape &dilation,
const int32_t deformable_group,
DType *grad_offset,
DType *grad_mask) {
int num_spatial_axes = kernel_shape.size();
int num_kernels =
col_shape[1] * col_shape[2] * col_shape[3] * 2 * kernel_shape[0] * kernel_shape[1] * deformable_group;
int channel_per_deformable_group = col_shape[0] / deformable_group;
// num_axes should be smaller than block size
CudaLaunchConfig config = GetCudaLaunchConfig(num_kernels, d);
CHECK_LT(num_spatial_axes, config.thread_per_block);
switch (num_spatial_axes) {
case 2:
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
DeformableConv2DCol2ImCoordGPUKernel<DType> << < config.block_count, config.thread_per_block,
0, d.stream() >> > (
num_kernels, data_col, data_im, data_offset, data_mask, im_shape[1], im_shape[2], im_shape[3],
kernel_shape[0], kernel_shape[1], pad[0], pad[1], stride[0], stride[1],
dilation[0], dilation[1], channel_per_deformable_group,
col_shape[1], 2 * kernel_shape[0] * kernel_shape[1]
* deformable_group, deformable_group, col_shape[2], col_shape[3],
grad_offset, grad_mask);
// MSHADOW_CUDA_POST_KERNEL_CHECK(DeformableConv2DCol2ImCoordGPUKernel);
break;
default:LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
}
template<typename DType>
void DeformableConv2DCol2Im<GPUDevice, DType>::operator()(
const GPUDevice &d,
const DType *data_col, const DType *data_offset, const DType *data_mask,
const TShape &im_shape, const TShape &col_shape, const TShape &kernel_shape,
const TShape &pad, const TShape &stride,
const TShape &dilation, const int32_t deformable_group,
DType *grad_im) {
int num_spatial_axes = kernel_shape.size();
int im_size = ProdShape(im_shape, 1, im_shape.size());
int channel_per_deformable_group = im_shape[1] / deformable_group;
int num_kernels = ProdShape(col_shape, 0, col_shape.size());
// num_axes should be smaller than block size
CudaLaunchConfig config = GetCudaLaunchConfig(num_kernels, d);
CHECK_LT(num_spatial_axes, config.thread_per_block);
// using namespace mxnet_op;
switch (num_spatial_axes) {
case 2:
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
DeformableConv2DCol2ImKernel<DType> << < config.block_count, config.thread_per_block,
0, d.stream() >> > (
num_kernels, data_col, data_offset, data_mask, im_shape[1], im_shape[2], im_shape[3],
kernel_shape[0], kernel_shape[1], pad[0], pad[1], stride[0], stride[1],
dilation[0], dilation[1], channel_per_deformable_group,
col_shape[1], deformable_group, col_shape[2], col_shape[3], grad_im);
// MSHADOW_CUDA_POST_KERNEL_CHECK(modulated_deformable_col2im_gpu_kernel);
break;
default:LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
}
template<typename DType>
void DeformableConv2DIm2Col<GPUDevice, DType>::operator()(
const GPUDevice &d,
const DType *data_im, const DType *data_offset, const DType *data_mask,
const TShape &im_shape, const TShape &col_shape, const TShape &kernel_shape,
const TShape &pad, const TShape &stride, const TShape &dilation,
const int32_t deformable_group, DType *data_col) {
int num_spatial_axes = kernel_shape.size();
int channel_per_deformable_group = im_shape[1] / deformable_group; // imshape[1] =
int num_kernels = im_shape[1]
* ProdShape(col_shape, 1, col_shape.size()); // K * N / k.Size(), k = filter, col_shape = [K, im2col_step_, H, W]
CudaLaunchConfig config = GetCudaLaunchConfig(num_kernels, d);
CHECK_LT(num_spatial_axes, config.thread_per_block);
switch (num_spatial_axes) {
case 2:
DeformableConv2DIm2ColKernel<DType> // NOLINT_NEXT_LINE(whitespace/operators)
<< < config.block_count, config.thread_per_block, // blocknum_kernel,
0, d.stream() >> > (
//CUDAdevice(GPU )cudaMalloc()hipFree()hipMemcpy()
//add() Cadd<<<MN>>> host(CPU)device
//Mblockblock N, M*N
num_kernels,
data_im,
data_offset,
data_mask,
im_shape[2], im_shape[3],
kernel_shape[0], kernel_shape[1],
pad[0], pad[1],
stride[0], stride[1],
dilation[0], dilation[1],
channel_per_deformable_group,
col_shape[1], im_shape[1],
deformable_group,
col_shape[2], col_shape[3],
data_col);
// MSHADOW_CUDA_POST_KERNEL_CHECK(modulated_deformable_im2col_gpu_kernel);
break;
default:LOG(FATAL) << "im2col_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
}
template<typename DType>
void SetZeros<GPUDevice, DType>::operator()(const GPUDevice &d, int n, DType *result_data) {
CudaLaunchConfig config = GetCudaLaunchConfig(n, d);
SetZeroKernel<DType> << < config.block_count, config.thread_per_block, 0, d.stream() >> > (n, result_data);
}
template<typename DType>
void PureAddTo<GPUDevice, DType>::operator()(const GPUDevice &d,
const int n,
DType *result_data,
const DType *right_data) {
CudaLaunchConfig config = GetCudaLaunchConfig(n, d);
PureAddToKernel<DType> << < config.block_count, config.thread_per_block, 0, d.stream() >>
> (n, result_data, right_data);
}
template<typename DType>
void SetOne<GPUDevice, DType>::operator()(const GPUDevice &d, int n, DType *result_data) {
CudaLaunchConfig config = GetCudaLaunchConfig(n, d);
SetOneKernel<DType> << < config.block_count, config.thread_per_block, 0, d.stream() >> > (n, result_data);
}
template<typename DType>
void SetNumAtIndex<GPUDevice, DType>::operator()(const GPUDevice &d, DType num, int index, DType *data) {
CudaLaunchConfig config = GetCudaLaunchConfig(1, d);
SetNumAtIndexKernel<DType> << < config.block_count, config.thread_per_block, 0, d.stream() >> > (num, index, data);
}
// , .so undefined symbol: _ZN10tensorflow13setNumAtIndexIN5Eigen9GpuDeviceEfEclERKS2_fiPf
// I guess the reason for instancing the functional structure below is that certifying single functor instance for every functor.
template
struct DeformableConv2DIm2Col<GPUDevice, double>;
template
struct DeformableConv2DCol2Im<GPUDevice, double>;
template
struct DeformableConv2DCol2ImCoord<GPUDevice, double>;
template
struct PureAddTo<GPUDevice, double>;
template
struct SetOne<GPUDevice, double>;
template
struct SetZeros<GPUDevice, double>;
template
struct SwapAxis<GPUDevice, double>;
template
struct SetNumAtIndex<GPUDevice, double>;
template
struct DeformableConv2DIm2Col<GPUDevice, float>;
template
struct DeformableConv2DCol2Im<GPUDevice, float>;
template
struct DeformableConv2DCol2ImCoord<GPUDevice, float>;
template
struct PureAddTo<GPUDevice, float>;
template
struct SetOne<GPUDevice, float>;
template
struct SetZeros<GPUDevice, float>;
template
struct SwapAxis<GPUDevice, float>;
template
struct SetNumAtIndex<GPUDevice, float>;
template<typename T>
se::DeviceMemory<T> AsDeviceMemory(const T *cuda_memory) {
se::DeviceMemoryBase wrapped(const_cast<T *>(cuda_memory));
se::DeviceMemory<T> typed(wrapped);
return typed;
}
class CublasScratchAllocator : public se::ScratchAllocator {
public:
using Stream = se::Stream;
using DeviceMemoryBytes = se::DeviceMemory<uint8>;
CublasScratchAllocator(OpKernelContext *context) : context_(context) {}
int64 GetMemoryLimitInBytes() override { return -1; }
se::port::StatusOr<DeviceMemoryBytes> AllocateBytes(int64 byte_size) override {
Tensor temporary_memory;
Status allocation_status(context_->allocate_temp(
DT_UINT8, TensorShape({byte_size}), &temporary_memory));
if (!allocation_status.ok()) {
return se::port::StatusOr<DeviceMemoryBytes>(
DeviceMemoryBytes::MakeFromByteSize(nullptr, 0));
}
// Hold the reference of the allocated tensors until the end of the
// allocator.
allocated_tensors_.push_back(temporary_memory);
return se::port::StatusOr<DeviceMemoryBytes>(
DeviceMemoryBytes::MakeFromByteSize(
temporary_memory.flat<uint8>().data(),
temporary_memory.flat<uint8>().size()));
}
se::port::StatusOr<DeviceMemoryBytes> AllocateBytes(
Stream *stream, int64 byte_size) {
Tensor temporary_memory;
Status allocation_status(context_->allocate_temp(
DT_UINT8, TensorShape({byte_size}), &temporary_memory));
if (!allocation_status.ok()) {
return se::port::StatusOr<DeviceMemoryBytes>(
DeviceMemoryBytes::MakeFromByteSize(nullptr, 0));
}
// Hold the reference of the allocated tensors until the end of the
// allocator.
allocated_tensors_.push_back(temporary_memory);
return se::port::StatusOr<DeviceMemoryBytes>(
DeviceMemoryBytes::MakeFromByteSize(
temporary_memory.flat<uint8>().data(),
temporary_memory.flat<uint8>().size()));
}
private:
OpKernelContext *context_;
std::vector<Tensor> allocated_tensors_;
};
template<typename Scalar>
void LaunchBatchMatMul<GPUDevice, Scalar>::launch(OpKernelContext *context,
const TensorShape &in_x_shape,
const TensorShape &in_y_shape,
const Scalar *in_x_ptr,
const Scalar *in_y_ptr,
bool adj_x,
bool adj_y,
Scalar *out) {
constexpr se::blas::Transpose kTranspose =
is_complex<Scalar>::value ? se::blas::Transpose::kConjugateTranspose
: se::blas::Transpose::kTranspose;
se::blas::Transpose trans[] = {se::blas::Transpose::kNoTranspose,
kTranspose};
const uint64 m = in_x_shape.dim_size(adj_x ? 2 : 1);
const uint64 k = in_x_shape.dim_size(adj_x ? 1 : 2);
const uint64 n = in_y_shape.dim_size(adj_y ? 1 : 2);
const uint64 batch_size = in_x_shape.dim_size(0);
auto blas_transpose_a = trans[adj_x];
auto blas_transpose_b = trans[adj_y];
auto *stream = context->op_device_context()->stream();
OP_REQUIRES(context, stream, errors::Internal("No GPU stream available."));
typedef se::DeviceMemory<Scalar> DeviceMemoryType;
std::vector<DeviceMemoryType> a_device_memory;
std::vector<DeviceMemoryType> b_device_memory;
std::vector<DeviceMemoryType> c_device_memory;
std::vector<DeviceMemoryType *> a_ptrs;
std::vector<DeviceMemoryType *> b_ptrs;
std::vector<DeviceMemoryType *> c_ptrs;
a_device_memory.reserve(batch_size);
b_device_memory.reserve(batch_size);
c_device_memory.reserve(batch_size);
a_ptrs.reserve(batch_size);
b_ptrs.reserve(batch_size);
c_ptrs.reserve(batch_size);
auto *a_base_ptr = in_x_ptr;
auto *b_base_ptr = in_y_ptr;
auto *c_base_ptr = out;
for (int64 i = 0; i < batch_size; ++i) {
a_device_memory.push_back(AsDeviceMemory(a_base_ptr + i * m * k));
b_device_memory.push_back(AsDeviceMemory(b_base_ptr + i * k * n));
c_device_memory.push_back(AsDeviceMemory(c_base_ptr + i * m * n));
a_ptrs.push_back(&a_device_memory.back());
b_ptrs.push_back(&b_device_memory.back());
c_ptrs.push_back(&c_device_memory.back());
}
typedef Scalar Coefficient;
// Cublas does
// C = A x B
// where A, B and C are assumed to be in column major.
// We want the output to be in row-major, so we can compute
// C' = B' x A', where ' stands for transpose (not adjoint).
// TODO(yangzihao): Choose the best of the three strategies using autotune.
if (batch_size == 1) {
// This is a regular matrix*matrix or matrix*vector multiply. Avoid the
// overhead of the scratch allocator and the batch interface.
if (n == 1 &&
blas_transpose_b != se::blas::Transpose::kConjugateTranspose &&
blas_transpose_a != se::blas::Transpose::kConjugateTranspose) {
// This is a matrix*vector multiply so use GEMV to compute A * b.
// Here we are multiplying in the natural order, so we have to flip
// the transposition flag to compensate for the tensor being stored
// row-major. Since GEMV doesn't provide a way to just conjugate an
// argument, we have to defer those cases to GEMM below.
auto gemv_trans_a = blas_transpose_a == se::blas::Transpose::kTranspose
? se::blas::Transpose::kNoTranspose
: se::blas::Transpose::kTranspose;
bool blas_launch_status =
stream
->ThenBlasGemv(gemv_trans_a, adj_x ? m : k, adj_x ? k : m,
static_cast<Coefficient>(1.0), *(a_ptrs[0]),
adj_x ? m : k, *(b_ptrs[0]), 1,
static_cast<Coefficient>(0.0), c_ptrs[0], 1)
.ok();
if (!blas_launch_status) {
context->SetStatus(errors::Internal(
"Blas xGEMV launch failed : a.shape=", in_x_shape.DebugString(),
", b.shape=", in_y_shape.DebugString(), ", m=", m, ", n=", n,
", k=", k));
}
} else {
bool blas_launch_status =
stream
->ThenBlasGemm(blas_transpose_b, blas_transpose_a, n, m, k,
static_cast<Coefficient>(1.0), *(b_ptrs[0]),
adj_y ? k : n, *(a_ptrs[0]), adj_x ? m : k,
static_cast<Coefficient>(0.0), c_ptrs[0], n)
.ok();
if (!blas_launch_status) {
context->SetStatus(errors::Internal(
"Blas xGEMM launch failed : a.shape=", in_x_shape.DebugString(),
", b.shape=", in_y_shape.DebugString(), ", m=", m, ", n=", n,
", k=", k));
}
}
} else {
CublasScratchAllocator scratch_allocator(context);
bool blas_launch_status =
stream
->ThenBlasGemmBatchedWithScratch(
blas_transpose_b, blas_transpose_a, n, m, k,
static_cast<Coefficient>(1.0), b_ptrs, adj_y ? k : n, a_ptrs,
adj_x ? m : k, static_cast<Coefficient>(0.0), c_ptrs, n,
batch_size, &scratch_allocator)
.ok();
if (!blas_launch_status) {
context->SetStatus(errors::Internal(
"Blas xGEMMBatched launch failed : a.shape=",
in_x_shape.DebugString(),
", b.shape=", in_y_shape.DebugString(), ", m=", m, ", n=", n,
", k=", k, ", batch_size=", batch_size));
}
}
}
template<typename T>
void DeformablePSROIPoolForward<GPUDevice, T>::operator()(const GPUDevice &d,
const int count,
const T *bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const T *bottom_rois,
const T *bottom_trans,
const int no_trans,
const T trans_std,
const int sample_per_part,
const int output_dim,
const int group_size,
const int part_size,
const int num_classes,
const int channels_each_class,
T *top_data,
T *top_count) {
auto config = GetCudaLaunchConfig(count, d);
DeformablePSROIPoolForwardKernel<T> << < config.block_count, config.thread_per_block, 0, d.stream() >>>(count, bottom_data,
spatial_scale, channels, height, width, pooled_height, pooled_width, bottom_rois, bottom_trans,
no_trans, trans_std, sample_per_part, output_dim, group_size, part_size, num_classes, channels_each_class,
top_data, top_count);
}
template<typename T>
void DeformablePSROIPoolBackwardKernel<GPUDevice, T>::operator()(const GPUDevice &d,
const int count,
const T *top_diff,
const T *top_count,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int output_dim,
T *bottom_data_diff,
T *bottom_trans_diff,
const T *bottom_data,
const T *bottom_rois,
const T *bottom_trans,
const int no_trans,
const T trans_std,
const int sample_per_part,
const int group_size,
const int part_size,
const int num_classes,
const int channels_each_class) {
auto config = GetCudaLaunchConfig(count, d);
hipLaunchKernelGGL(( DeformablePSROIPoolBackwardAccKernel<T>), dim3(config.block_count), dim3(config.thread_per_block), 0, d.stream(),
count,
top_diff,
top_count,
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
output_dim,
bottom_data_diff,
bottom_trans_diff,
bottom_data,
bottom_rois,
bottom_trans,
no_trans,
trans_std,
sample_per_part,
group_size,
part_size,
num_classes,
channels_each_class
);
}
template
struct LaunchBatchMatMul<GPUDevice, float>;
template
struct LaunchBatchMatMul<GPUDevice, double>;
template
struct DeformablePSROIPoolForward<GPUDevice, float>;
template
struct DeformablePSROIPoolForward<GPUDevice, double>;
template
struct DeformablePSROIPoolBackwardKernel<GPUDevice, float>;
template
struct DeformablePSROIPoolBackwardKernel<GPUDevice, double>;
#endif
}
| 1a17a640c14ea7b25880fe9b13fef8543a54a9c8.cu | //
// Created by 孙嘉禾 on 2019/12/31.
//
#include "deformable_conv2d.h"
#include <cstdlib>
#include <algorithm>
#ifdef GOOGLE_CUDA
#include "tensorflow/core/util/gpu_kernel_helper.h"
#include "tensorflow/core/platform/stream_executor.h"
#endif
namespace tensorflow {
typedef Eigen::GpuDevice GPUDevice;
typedef Eigen::ThreadPoolDevice CPUDevice;
#ifdef GOOGLE_CUDA
template<typename DType>
__global__ void SwapAxisKernel(const int n, const int cuda_mem_size, const int min_unit_size,
DType *input_data, const int dim_num, const int axis_x_dims, const int axis_y_dims,
const int axis_x, const int axis_y) {
CUDA_1D_KERNEL_LOOP(index, n) {
DType *device_data = new DType[cuda_mem_size];
DType *input_data_ptr = input_data + index * cuda_mem_size;
for (int j = 0; j < axis_y_dims; j++) {
for (int i = 0; i < axis_x_dims; i++) {
DType *temp_ptr = input_data_ptr + (i * axis_x_dims + j) * min_unit_size;
DType *device_data_temp_ptr = device_data + (j * axis_y_dims + i) * min_unit_size;
for (int k = 0; k < min_unit_size; k++) {
*(device_data_temp_ptr + k) = *(temp_ptr + k);
}
}
}
for (int i = 0; i < cuda_mem_size; i++) {
*(input_data_ptr + i) = *(device_data + i);
}
delete[]device_data;
}
}
template<typename DType>
__global__ void DeformableConv2DIm2ColKernel(const int n,
const DType *data_im,
const DType *data_offset,
const DType *data_mask,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int channel_per_deformable_group,
const int batch_size,
const int num_channels,
const int deformable_group,
const int height_col,
const int width_col,
DType *data_col) {
/*
* channel_per_deformable_group // 输入图通道数除以deformable_group的数量,
* //这里的batch_size代表的是im2col_step_, 一般就设为1了
*/
CUDA_1D_KERNEL_LOOP(index, n) {
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
DType *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
const DType *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const DType *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 *
kernel_h * kernel_w * height_col * width_col;
const DType *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h *
kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
const DType mask = data_mask_ptr[data_mask_hw_ptr];
DType val = static_cast<DType>(0);
const DType h_im = h_in + i * dilation_h + offset_h;
const DType w_im = w_in + j * dilation_w + offset_w;
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) {
val = DmcnIm2colBilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val * mask;
data_col_ptr += batch_size * height_col * width_col;
}
}
}
}
template<typename T>
__global__ void DeformablePSROIPoolForwardKernel(const int count, const T *bottom_data,
const T spatial_scale, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const T *bottom_rois, const T *bottom_trans,
const int no_trans, const T trans_std,
const int sample_per_part, const int output_dim,
const int group_size, const int part_size,
const int num_classes, const int channels_each_class,
T *top_data, T *top_count) {
CUDA_1D_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const T *offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
T roi_start_w = (T) (round(offset_bottom_rois[1])) * spatial_scale - 0.5;
T roi_start_h = (T) (round(offset_bottom_rois[2])) * spatial_scale - 0.5;
T roi_end_w = (T) (round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
T roi_end_h = (T) (round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5;
// Force too small ROIs to be 1x1
T roi_width = std::max(roi_end_w - roi_start_w, static_cast<T>(0.1)); // avoid 0
T roi_height = std::max(roi_end_h - roi_start_h, static_cast<T>(0.1));
// Compute w and h at bottom
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
T sub_bin_size_h = bin_size_h / static_cast<T>(sample_per_part);
T sub_bin_size_w = bin_size_w / static_cast<T>(sample_per_part);
int part_h = floor(static_cast<T>(ph) / pooled_height * part_size);
int part_w = floor(static_cast<T>(pw) / pooled_width * part_size);
int class_id = ctop / channels_each_class;
T trans_x = no_trans ? (T) (0) :
bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w]
* (T) trans_std;
T trans_y = no_trans ? (T) (0) :
bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w]
* (T) trans_std;
T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w;
wstart += trans_x * roi_width;
T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h;
hstart += trans_y * roi_height;
T sum = 0;
int total = 0;
int gw = floor(static_cast<T>(pw) * group_size / pooled_width);
int gh = floor(static_cast<T>(ph) * group_size / pooled_height);
gw = std::min(std::max(gw, 0), group_size - 1);
gh = std::min(std::max(gh, 0), group_size - 1);
const T *offset_bottom_data = bottom_data + (roi_batch_ind * channels) * height * width;
for (int ih = 0; ih < sample_per_part; ++ih) {
for (int iw = 0; iw < sample_per_part; ++iw) {
T w = wstart + iw * sub_bin_size_w;
T h = hstart + ih * sub_bin_size_h;
// bilinear interpolation
if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) {
continue;
}
w = std::min(std::max(w, static_cast<T>(0.)), static_cast<T>(width - 1.));
h = std::min(std::max(h, static_cast<T>(0.)), static_cast<T>(height - 1.));
int c = (ctop * group_size + gh) * group_size + gw;
T val = DmcnIm2colBilinear(offset_bottom_data + c * height * width, w, h, w, (T)height, (T)width);
sum += val;
total++;
}
}
top_data[index] = total == 0 ? (T) (0) : sum / total;
top_count[index] = total;
}
}
template<typename T>
__global__ void DeformablePSROIPoolBackwardAccKernel(const int count,
const T *top_diff,
const T *top_count,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int output_dim,
T *bottom_data_diff,
T *bottom_trans_diff,
const T *bottom_data,
const T *bottom_rois,
const T *bottom_trans, const int no_trans,
const T trans_std, const int sample_per_part,
const int group_size, const int part_size,
const int num_classes,
const int channels_each_class) {
CUDA_1D_KERNEL_LOOP(index, count) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const T *offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
T roi_start_w = (T) (round(offset_bottom_rois[1])) * spatial_scale - 0.5;
T roi_start_h = (T) (round(offset_bottom_rois[2])) * spatial_scale - 0.5;
T roi_end_w = (T) (round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
T roi_end_h = (T) (round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5;
// Force too small ROIs to be 1x1
T roi_width = std::max(roi_end_w - roi_start_w, static_cast<T>(0.1)); //avoid 0
T roi_height = std::max(roi_end_h - roi_start_h, static_cast<T>(0.1));
// Compute w and h at bottom
T bin_size_h = roi_height / (T) (pooled_height);
T bin_size_w = roi_width / (T) (pooled_width);
T sub_bin_size_h = bin_size_h / (T) (sample_per_part);
T sub_bin_size_w = bin_size_w / (T) (sample_per_part);
int part_h = floor((T) (ph) / pooled_height * part_size);
int part_w = floor((T) (pw) / pooled_width * part_size);
int class_id = ctop / channels_each_class;
T trans_x = no_trans ? (T) (0) :
bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w]
* (T) trans_std;
T trans_y = no_trans ? (T) (0) :
bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w]
* (T) trans_std;
T wstart = (T) (pw) * bin_size_w + roi_start_w;
wstart += trans_x * roi_width;
T hstart = (T) (ph) * bin_size_h + roi_start_h;
hstart += trans_y * roi_height;
if (top_count[index] <= 0) {
continue;
}
T diff_val = top_diff[index] / top_count[index];
const T *offset_bottom_data = bottom_data + roi_batch_ind * channels * height * width;
T *offset_bottom_data_diff = bottom_data_diff + roi_batch_ind * channels * height * width;
int gw = floor((T) (pw) * group_size / pooled_width);
int gh = floor((T) (ph) * group_size / pooled_height);
gw = std::min(std::max(gw, 0), group_size - 1);
gh = std::min(std::max(gh, 0), group_size - 1);
for (int ih = 0; ih < sample_per_part; ih++) {
for (int iw = 0; iw < sample_per_part; iw++) {
T w = wstart + iw * sub_bin_size_w;
T h = hstart + ih * sub_bin_size_h;
// bilinear interpolation
if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) {
continue;
}
w = min(max(w, 0.), width - 1.);
h = min(max(h, 0.), height - 1.);
int c = (ctop * group_size + gh) * group_size + gw;
// backward on feature
int x0 = floor(w);
int x1 = ceil(w);
int y0 = floor(h);
int y1 = ceil(h);
T dist_x = w - x0, dist_y = h - y0;
T q00 = (1 - dist_x) * (1 - dist_y);
T q01 = (1 - dist_x) * dist_y;
T q10 = dist_x * (1 - dist_y);
T q11 = dist_x * dist_y;
int bottom_index_base = c * height * width;
CudaAtomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x0, q00 * diff_val);
CudaAtomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x0, q01 * diff_val);
CudaAtomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x1, q10 * diff_val);
CudaAtomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x1, q11 * diff_val);
if (no_trans) {
continue;
}
T U00 = offset_bottom_data[bottom_index_base + y0 * width + x0];
T U01 = offset_bottom_data[bottom_index_base + y1 * width + x0];
T U10 = offset_bottom_data[bottom_index_base + y0 * width + x1];
T U11 = offset_bottom_data[bottom_index_base + y1 * width + x1];
T diff_x = (U11 * dist_y + U10 * (1 - dist_y) - U01 * dist_y - U00 * (1 - dist_y)) * trans_std * diff_val;
diff_x *= roi_width;
T diff_y = (U11 * dist_x + U01 * (1 - dist_x) - U10 * dist_x - U00 * (1 - dist_x)) * trans_std * diff_val;
diff_y *= roi_height;
CudaAtomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w,
diff_x);
CudaAtomicAdd(
bottom_trans_diff + (((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w,
diff_y);
}
}
}
}
template<typename DType>
__global__ void DeformableConv2DCol2ImKernel(
const int n,
const DType *data_col, const DType *data_offset, const DType *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
DType *grad_im) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const DType *data_offset_ptr = data_offset
+ (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const DType *data_mask_ptr =
data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
const DType mask = data_mask_ptr[data_mask_hw_ptr];
const DType cur_inv_h_data = h_in + i * dilation_h + offset_h;
const DType cur_inv_w_data = w_in + j * dilation_w + offset_w;
const DType cur_top_grad = data_col[index] * mask;
const int cur_h = (int) cur_inv_h_data;
const int cur_w = (int) cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++) {
for (int dx = -2; dx <= 2; dx++) {
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1
) {
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
DType weight =
DmcnGetGradientWeight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
CudaAtomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
template<typename DType>
__global__ void DeformableConv2DCol2ImCoordGPUKernel(
const int n,
const DType *data_col, const DType *data_im,
const DType *data_offset, const DType *data_mask,
const int channels, const int height, const int width, // 输入的C, H, W
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col,
DType *grad_offset, DType *grad_mask) {
CUDA_1D_KERNEL_LOOP(index, n) {
DType val = 0, mval = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const DType *data_col_ptr =
data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col;
const DType *data_im_ptr = data_im
+ (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w
* height * width;
const DType *data_offset_ptr = data_offset
+ (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const DType *data_mask_ptr =
data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) {
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out);
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
const DType mask = data_mask_ptr[data_mask_hw_ptr];
DType inv_h = h_in + i * dilation_h + offset_h;
DType inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) {
inv_h = inv_w = -2;
} else {
mval += data_col_ptr[col_pos]
* DmcnIm2colBilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w);
}
const DType weight = DmcnGetCoordinateWeight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos] * mask;
cnt += 1;
}
grad_offset[index] = val;
// KERNEL_ASSIGN(grad_offset[index], offset_req, val);
if (offset_c % 2 == 0) {
grad_mask[
(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col
+ h) * width_col + w] = mval;
// KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval);
}
}
}
template<typename DType>
__global__ void PureAddToKernel(const int n, DType *result_data, const DType *right_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
CudaAtomicAdd(result_data + index, right_data[index]);
}
}
template<typename DType>
__global__ void SetZeroKernel(const int n, DType *result_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
*(result_data + index) = DType(0);
}
}
template<typename DType>
__global__ void SetOneKernel(const int n, DType *result_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
*(result_data + index) = DType(1);
}
}
template<typename DType>
__global__ void SetNumAtIndexKernel(DType num, int index, DType *data) {
*(data + index) = num;
}
template<typename DType>
void DeformableConv2DCol2ImCoord<GPUDevice, DType>::operator()(const Eigen::GpuDevice &d,
const DType *data_col,
const DType *data_im,
const DType *data_offset,
const DType *data_mask,
const TShape &im_shape,
const TShape &col_shape,
const TShape &kernel_shape,
const TShape &pad,
const TShape &stride,
const TShape &dilation,
const int32_t deformable_group,
DType *grad_offset,
DType *grad_mask) {
int num_spatial_axes = kernel_shape.size();
int num_kernels =
col_shape[1] * col_shape[2] * col_shape[3] * 2 * kernel_shape[0] * kernel_shape[1] * deformable_group;
int channel_per_deformable_group = col_shape[0] / deformable_group;
// num_axes should be smaller than block size
CudaLaunchConfig config = GetCudaLaunchConfig(num_kernels, d);
CHECK_LT(num_spatial_axes, config.thread_per_block);
switch (num_spatial_axes) {
case 2:
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
DeformableConv2DCol2ImCoordGPUKernel<DType> << < config.block_count, config.thread_per_block,
0, d.stream() >> > (
num_kernels, data_col, data_im, data_offset, data_mask, im_shape[1], im_shape[2], im_shape[3],
kernel_shape[0], kernel_shape[1], pad[0], pad[1], stride[0], stride[1],
dilation[0], dilation[1], channel_per_deformable_group,
col_shape[1], 2 * kernel_shape[0] * kernel_shape[1]
* deformable_group, deformable_group, col_shape[2], col_shape[3],
grad_offset, grad_mask);
// MSHADOW_CUDA_POST_KERNEL_CHECK(DeformableConv2DCol2ImCoordGPUKernel);
break;
default:LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
}
template<typename DType>
void DeformableConv2DCol2Im<GPUDevice, DType>::operator()(
const GPUDevice &d,
const DType *data_col, const DType *data_offset, const DType *data_mask,
const TShape &im_shape, const TShape &col_shape, const TShape &kernel_shape,
const TShape &pad, const TShape &stride,
const TShape &dilation, const int32_t deformable_group,
DType *grad_im) {
int num_spatial_axes = kernel_shape.size();
int im_size = ProdShape(im_shape, 1, im_shape.size());
int channel_per_deformable_group = im_shape[1] / deformable_group;
int num_kernels = ProdShape(col_shape, 0, col_shape.size());
// num_axes should be smaller than block size
CudaLaunchConfig config = GetCudaLaunchConfig(num_kernels, d);
CHECK_LT(num_spatial_axes, config.thread_per_block);
// using namespace mxnet_op;
switch (num_spatial_axes) {
case 2:
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
DeformableConv2DCol2ImKernel<DType> << < config.block_count, config.thread_per_block,
0, d.stream() >> > (
num_kernels, data_col, data_offset, data_mask, im_shape[1], im_shape[2], im_shape[3],
kernel_shape[0], kernel_shape[1], pad[0], pad[1], stride[0], stride[1],
dilation[0], dilation[1], channel_per_deformable_group,
col_shape[1], deformable_group, col_shape[2], col_shape[3], grad_im);
// MSHADOW_CUDA_POST_KERNEL_CHECK(modulated_deformable_col2im_gpu_kernel);
break;
default:LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
}
template<typename DType>
void DeformableConv2DIm2Col<GPUDevice, DType>::operator()(
const GPUDevice &d,
const DType *data_im, const DType *data_offset, const DType *data_mask,
const TShape &im_shape, const TShape &col_shape, const TShape &kernel_shape,
const TShape &pad, const TShape &stride, const TShape &dilation,
const int32_t deformable_group, DType *data_col) {
int num_spatial_axes = kernel_shape.size();
int channel_per_deformable_group = im_shape[1] / deformable_group; // imshape[1] = 输入图的通道数
int num_kernels = im_shape[1]
* ProdShape(col_shape, 1, col_shape.size()); // K * N / k.Size(), k = filter, col_shape = [K, im2col_step_, H, W]
CudaLaunchConfig config = GetCudaLaunchConfig(num_kernels, d);
CHECK_LT(num_spatial_axes, config.thread_per_block);
switch (num_spatial_axes) {
case 2:
DeformableConv2DIm2ColKernel<DType> // NOLINT_NEXT_LINE(whitespace/operators)
<< < config.block_count, config.thread_per_block, // 注意这里申请的block的个数是num_kernel个,
0, d.stream() >> > (
//CUDA对device(GPU )的内存管理主要通过cudaMalloc()、cudaFree()、cudaMemcpy() 进行管理。另外,从上述代码我们可以看到,
//add() 函数的调用比较奇怪相对于C语言来说,需要用add<<<M,N>>> 这种形式表明这是一个从host(CPU)代码调用device的代码,
//并且括号中的数值表明,M个block,每个block有 N个线程, 所以这个函数总共有M*N个线程。
num_kernels,
data_im,
data_offset,
data_mask,
im_shape[2], im_shape[3],
kernel_shape[0], kernel_shape[1],
pad[0], pad[1],
stride[0], stride[1],
dilation[0], dilation[1],
channel_per_deformable_group,
col_shape[1], im_shape[1],
deformable_group,
col_shape[2], col_shape[3],
data_col);
// MSHADOW_CUDA_POST_KERNEL_CHECK(modulated_deformable_im2col_gpu_kernel);
break;
default:LOG(FATAL) << "im2col_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
}
template<typename DType>
void SetZeros<GPUDevice, DType>::operator()(const GPUDevice &d, int n, DType *result_data) {
CudaLaunchConfig config = GetCudaLaunchConfig(n, d);
SetZeroKernel<DType> << < config.block_count, config.thread_per_block, 0, d.stream() >> > (n, result_data);
}
template<typename DType>
void PureAddTo<GPUDevice, DType>::operator()(const GPUDevice &d,
const int n,
DType *result_data,
const DType *right_data) {
CudaLaunchConfig config = GetCudaLaunchConfig(n, d);
PureAddToKernel<DType> << < config.block_count, config.thread_per_block, 0, d.stream() >>
> (n, result_data, right_data);
}
template<typename DType>
void SetOne<GPUDevice, DType>::operator()(const GPUDevice &d, int n, DType *result_data) {
CudaLaunchConfig config = GetCudaLaunchConfig(n, d);
SetOneKernel<DType> << < config.block_count, config.thread_per_block, 0, d.stream() >> > (n, result_data);
}
template<typename DType>
void SetNumAtIndex<GPUDevice, DType>::operator()(const GPUDevice &d, DType num, int index, DType *data) {
CudaLaunchConfig config = GetCudaLaunchConfig(1, d);
SetNumAtIndexKernel<DType> << < config.block_count, config.thread_per_block, 0, d.stream() >> > (num, index, data);
}
// 如果没有在这里实例化的话, 生成的.so会报类似于 undefined symbol: _ZN10tensorflow13setNumAtIndexIN5Eigen9GpuDeviceEfEclERKS2_fiPf的错误
// I guess the reason for instancing the functional structure below is that certifying single functor instance for every functor.
template
struct DeformableConv2DIm2Col<GPUDevice, double>;
template
struct DeformableConv2DCol2Im<GPUDevice, double>;
template
struct DeformableConv2DCol2ImCoord<GPUDevice, double>;
template
struct PureAddTo<GPUDevice, double>;
template
struct SetOne<GPUDevice, double>;
template
struct SetZeros<GPUDevice, double>;
template
struct SwapAxis<GPUDevice, double>;
template
struct SetNumAtIndex<GPUDevice, double>;
template
struct DeformableConv2DIm2Col<GPUDevice, float>;
template
struct DeformableConv2DCol2Im<GPUDevice, float>;
template
struct DeformableConv2DCol2ImCoord<GPUDevice, float>;
template
struct PureAddTo<GPUDevice, float>;
template
struct SetOne<GPUDevice, float>;
template
struct SetZeros<GPUDevice, float>;
template
struct SwapAxis<GPUDevice, float>;
template
struct SetNumAtIndex<GPUDevice, float>;
template<typename T>
se::DeviceMemory<T> AsDeviceMemory(const T *cuda_memory) {
se::DeviceMemoryBase wrapped(const_cast<T *>(cuda_memory));
se::DeviceMemory<T> typed(wrapped);
return typed;
}
class CublasScratchAllocator : public se::ScratchAllocator {
public:
using Stream = se::Stream;
using DeviceMemoryBytes = se::DeviceMemory<uint8>;
CublasScratchAllocator(OpKernelContext *context) : context_(context) {}
int64 GetMemoryLimitInBytes() override { return -1; }
se::port::StatusOr<DeviceMemoryBytes> AllocateBytes(int64 byte_size) override {
Tensor temporary_memory;
Status allocation_status(context_->allocate_temp(
DT_UINT8, TensorShape({byte_size}), &temporary_memory));
if (!allocation_status.ok()) {
return se::port::StatusOr<DeviceMemoryBytes>(
DeviceMemoryBytes::MakeFromByteSize(nullptr, 0));
}
// Hold the reference of the allocated tensors until the end of the
// allocator.
allocated_tensors_.push_back(temporary_memory);
return se::port::StatusOr<DeviceMemoryBytes>(
DeviceMemoryBytes::MakeFromByteSize(
temporary_memory.flat<uint8>().data(),
temporary_memory.flat<uint8>().size()));
}
se::port::StatusOr<DeviceMemoryBytes> AllocateBytes(
Stream *stream, int64 byte_size) {
Tensor temporary_memory;
Status allocation_status(context_->allocate_temp(
DT_UINT8, TensorShape({byte_size}), &temporary_memory));
if (!allocation_status.ok()) {
return se::port::StatusOr<DeviceMemoryBytes>(
DeviceMemoryBytes::MakeFromByteSize(nullptr, 0));
}
// Hold the reference of the allocated tensors until the end of the
// allocator.
allocated_tensors_.push_back(temporary_memory);
return se::port::StatusOr<DeviceMemoryBytes>(
DeviceMemoryBytes::MakeFromByteSize(
temporary_memory.flat<uint8>().data(),
temporary_memory.flat<uint8>().size()));
}
private:
OpKernelContext *context_;
std::vector<Tensor> allocated_tensors_;
};
template<typename Scalar>
void LaunchBatchMatMul<GPUDevice, Scalar>::launch(OpKernelContext *context,
const TensorShape &in_x_shape,
const TensorShape &in_y_shape,
const Scalar *in_x_ptr,
const Scalar *in_y_ptr,
bool adj_x,
bool adj_y,
Scalar *out) {
constexpr se::blas::Transpose kTranspose =
is_complex<Scalar>::value ? se::blas::Transpose::kConjugateTranspose
: se::blas::Transpose::kTranspose;
se::blas::Transpose trans[] = {se::blas::Transpose::kNoTranspose,
kTranspose};
const uint64 m = in_x_shape.dim_size(adj_x ? 2 : 1);
const uint64 k = in_x_shape.dim_size(adj_x ? 1 : 2);
const uint64 n = in_y_shape.dim_size(adj_y ? 1 : 2);
const uint64 batch_size = in_x_shape.dim_size(0);
auto blas_transpose_a = trans[adj_x];
auto blas_transpose_b = trans[adj_y];
auto *stream = context->op_device_context()->stream();
OP_REQUIRES(context, stream, errors::Internal("No GPU stream available."));
typedef se::DeviceMemory<Scalar> DeviceMemoryType;
std::vector<DeviceMemoryType> a_device_memory;
std::vector<DeviceMemoryType> b_device_memory;
std::vector<DeviceMemoryType> c_device_memory;
std::vector<DeviceMemoryType *> a_ptrs;
std::vector<DeviceMemoryType *> b_ptrs;
std::vector<DeviceMemoryType *> c_ptrs;
a_device_memory.reserve(batch_size);
b_device_memory.reserve(batch_size);
c_device_memory.reserve(batch_size);
a_ptrs.reserve(batch_size);
b_ptrs.reserve(batch_size);
c_ptrs.reserve(batch_size);
auto *a_base_ptr = in_x_ptr;
auto *b_base_ptr = in_y_ptr;
auto *c_base_ptr = out;
for (int64 i = 0; i < batch_size; ++i) {
a_device_memory.push_back(AsDeviceMemory(a_base_ptr + i * m * k));
b_device_memory.push_back(AsDeviceMemory(b_base_ptr + i * k * n));
c_device_memory.push_back(AsDeviceMemory(c_base_ptr + i * m * n));
a_ptrs.push_back(&a_device_memory.back());
b_ptrs.push_back(&b_device_memory.back());
c_ptrs.push_back(&c_device_memory.back());
}
typedef Scalar Coefficient;
// Cublas does
// C = A x B
// where A, B and C are assumed to be in column major.
// We want the output to be in row-major, so we can compute
// C' = B' x A', where ' stands for transpose (not adjoint).
// TODO(yangzihao): Choose the best of the three strategies using autotune.
if (batch_size == 1) {
// This is a regular matrix*matrix or matrix*vector multiply. Avoid the
// overhead of the scratch allocator and the batch interface.
if (n == 1 &&
blas_transpose_b != se::blas::Transpose::kConjugateTranspose &&
blas_transpose_a != se::blas::Transpose::kConjugateTranspose) {
// This is a matrix*vector multiply so use GEMV to compute A * b.
// Here we are multiplying in the natural order, so we have to flip
// the transposition flag to compensate for the tensor being stored
// row-major. Since GEMV doesn't provide a way to just conjugate an
// argument, we have to defer those cases to GEMM below.
auto gemv_trans_a = blas_transpose_a == se::blas::Transpose::kTranspose
? se::blas::Transpose::kNoTranspose
: se::blas::Transpose::kTranspose;
bool blas_launch_status =
stream
->ThenBlasGemv(gemv_trans_a, adj_x ? m : k, adj_x ? k : m,
static_cast<Coefficient>(1.0), *(a_ptrs[0]),
adj_x ? m : k, *(b_ptrs[0]), 1,
static_cast<Coefficient>(0.0), c_ptrs[0], 1)
.ok();
if (!blas_launch_status) {
context->SetStatus(errors::Internal(
"Blas xGEMV launch failed : a.shape=", in_x_shape.DebugString(),
", b.shape=", in_y_shape.DebugString(), ", m=", m, ", n=", n,
", k=", k));
}
} else {
bool blas_launch_status =
stream
->ThenBlasGemm(blas_transpose_b, blas_transpose_a, n, m, k,
static_cast<Coefficient>(1.0), *(b_ptrs[0]),
adj_y ? k : n, *(a_ptrs[0]), adj_x ? m : k,
static_cast<Coefficient>(0.0), c_ptrs[0], n)
.ok();
if (!blas_launch_status) {
context->SetStatus(errors::Internal(
"Blas xGEMM launch failed : a.shape=", in_x_shape.DebugString(),
", b.shape=", in_y_shape.DebugString(), ", m=", m, ", n=", n,
", k=", k));
}
}
} else {
CublasScratchAllocator scratch_allocator(context);
bool blas_launch_status =
stream
->ThenBlasGemmBatchedWithScratch(
blas_transpose_b, blas_transpose_a, n, m, k,
static_cast<Coefficient>(1.0), b_ptrs, adj_y ? k : n, a_ptrs,
adj_x ? m : k, static_cast<Coefficient>(0.0), c_ptrs, n,
batch_size, &scratch_allocator)
.ok();
if (!blas_launch_status) {
context->SetStatus(errors::Internal(
"Blas xGEMMBatched launch failed : a.shape=",
in_x_shape.DebugString(),
", b.shape=", in_y_shape.DebugString(), ", m=", m, ", n=", n,
", k=", k, ", batch_size=", batch_size));
}
}
}
template<typename T>
void DeformablePSROIPoolForward<GPUDevice, T>::operator()(const GPUDevice &d,
const int count,
const T *bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const T *bottom_rois,
const T *bottom_trans,
const int no_trans,
const T trans_std,
const int sample_per_part,
const int output_dim,
const int group_size,
const int part_size,
const int num_classes,
const int channels_each_class,
T *top_data,
T *top_count) {
auto config = GetCudaLaunchConfig(count, d);
DeformablePSROIPoolForwardKernel<T> << < config.block_count, config.thread_per_block, 0, d.stream() >>>(count, bottom_data,
spatial_scale, channels, height, width, pooled_height, pooled_width, bottom_rois, bottom_trans,
no_trans, trans_std, sample_per_part, output_dim, group_size, part_size, num_classes, channels_each_class,
top_data, top_count);
}
template<typename T>
void DeformablePSROIPoolBackwardKernel<GPUDevice, T>::operator()(const GPUDevice &d,
const int count,
const T *top_diff,
const T *top_count,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int output_dim,
T *bottom_data_diff,
T *bottom_trans_diff,
const T *bottom_data,
const T *bottom_rois,
const T *bottom_trans,
const int no_trans,
const T trans_std,
const int sample_per_part,
const int group_size,
const int part_size,
const int num_classes,
const int channels_each_class) {
auto config = GetCudaLaunchConfig(count, d);
DeformablePSROIPoolBackwardAccKernel<T><<<config.block_count, config.thread_per_block, 0, d.stream()>>>(
count,
top_diff,
top_count,
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
output_dim,
bottom_data_diff,
bottom_trans_diff,
bottom_data,
bottom_rois,
bottom_trans,
no_trans,
trans_std,
sample_per_part,
group_size,
part_size,
num_classes,
channels_each_class
);
}
template
struct LaunchBatchMatMul<GPUDevice, float>;
template
struct LaunchBatchMatMul<GPUDevice, double>;
template
struct DeformablePSROIPoolForward<GPUDevice, float>;
template
struct DeformablePSROIPoolForward<GPUDevice, double>;
template
struct DeformablePSROIPoolBackwardKernel<GPUDevice, float>;
template
struct DeformablePSROIPoolBackwardKernel<GPUDevice, double>;
#endif
}
|
4a784ba7a701a698b296771ca9dfd0a3dce46953.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cutil_inline.h>
#include <cstdlib>
#include <cstdio>
#include <string.h>
#include <GL/glew.h>
#include <cuda_gl_interop.h>
#include "particles_kernel.cu"
#include "ParticleSystem.cuh"
extern "C"
{
hipArray *noiseArray;
void initCuda(bool bUseGL)
{
if (bUseGL) {
hipGLSetGLDevice( cutGetMaxGflopsDeviceId() );
} else {
hipSetDevice( cutGetMaxGflopsDeviceId() );
}
}
void setParameters(SimParams *hostParams)
{
cutilSafeCall( hipMemcpyToSymbol(params, hostParams, sizeof(SimParams)) );
}
int iDivUp(int a, int b)
{
return (a%b != 0)? (a/b +1) : (a/b) ;
}
void computeGridSize(int n, int blockSize, int &numBlocks, int &numThreads)
{
numThreads=min(blockSize, n);
numBlocks=iDivUp(n,numThreads);
}
inline float frand()
{
return rand()/(float) RAND_MAX;
}
void createNoiseTexture(int w,int h,int d)
{
hipExtent size = make_hipExtent(w, h, d);
uint elements = (uint) size.width*size.height*size.depth;
float *volumeData = (float *)malloc(elements*4*sizeof(float));
float *ptr = volumeData;
for(uint i=0; i<elements; i++) {
*ptr++ = frand()*2.0f-1.0f;
*ptr++ = frand()*2.0f-1.0f;
*ptr++ = frand()*2.0f-1.0f;
*ptr++ = frand()*2.0f-1.0f;
}
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float4>();
cutilSafeCall( hipMalloc3DArray(&noiseArray, &channelDesc, size) );
hipMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_hipPitchedPtr((void*)volumeData, size.width*sizeof(float4), size.width, size.height);
copyParams.dstArray = noiseArray;
copyParams.extent = size;
copyParams.kind = hipMemcpyHostToDevice;
cutilSafeCall( hipMemcpy3D(©Params) );
free(volumeData);
// set texture parameters
noiseTex.normalized = true; // access with normalized texture coordinates
noiseTex.filterMode = hipFilterModeLinear; // linear interpolation
noiseTex.addressMode[0] = hipAddressModeWrap; // wrap texture coordinates
noiseTex.addressMode[1] = hipAddressModeWrap;
noiseTex.addressMode[2] = hipAddressModeWrap;
// bind array to 3D texture
cutilSafeCall(hipBindTextureToArray(noiseTex, noiseArray, channelDesc));
}
void
integrateSystem(float4 *oldPos, float4 *newPos,
float4 *oldVel, float4 *newVel,
float deltaTime,
int numParticles)
{
int numThreads,numBlocks;
computeGridSize(numParticles, 256, numBlocks,numThreads);
hipLaunchKernelGGL(( integrateD), dim3(numBlocks),dim3(numThreads), 0, 0, newPos,newVel,
oldPos,oldVel,
deltaTime,
numParticles);
cutilCheckMsg("Kernel execution failed");
}
void
calcDepth(float4* pos,
float* keys,
uint* indices,
float3 sortVector,
int numParticles)
{
int numThreads,numBlocks;
computeGridSize(numParticles, 256, numBlocks, numThreads);
hipLaunchKernelGGL(( calcDepthD), dim3(numBlocks),dim3(numThreads), 0, 0, pos,keys,indices,sortVector,numParticles);
cutilCheckMsg("calcDepthD execution failed");
}
} | 4a784ba7a701a698b296771ca9dfd0a3dce46953.cu | #include <cutil_inline.h>
#include <cstdlib>
#include <cstdio>
#include <string.h>
#include <GL/glew.h>
#include <cuda_gl_interop.h>
#include "particles_kernel.cu"
#include "ParticleSystem.cuh"
extern "C"
{
cudaArray *noiseArray;
void initCuda(bool bUseGL)
{
if (bUseGL) {
cudaGLSetGLDevice( cutGetMaxGflopsDeviceId() );
} else {
cudaSetDevice( cutGetMaxGflopsDeviceId() );
}
}
void setParameters(SimParams *hostParams)
{
cutilSafeCall( cudaMemcpyToSymbol(params, hostParams, sizeof(SimParams)) );
}
int iDivUp(int a, int b)
{
return (a%b != 0)? (a/b +1) : (a/b) ;
}
void computeGridSize(int n, int blockSize, int &numBlocks, int &numThreads)
{
numThreads=min(blockSize, n);
numBlocks=iDivUp(n,numThreads);
}
inline float frand()
{
return rand()/(float) RAND_MAX;
}
void createNoiseTexture(int w,int h,int d)
{
cudaExtent size = make_cudaExtent(w, h, d);
uint elements = (uint) size.width*size.height*size.depth;
float *volumeData = (float *)malloc(elements*4*sizeof(float));
float *ptr = volumeData;
for(uint i=0; i<elements; i++) {
*ptr++ = frand()*2.0f-1.0f;
*ptr++ = frand()*2.0f-1.0f;
*ptr++ = frand()*2.0f-1.0f;
*ptr++ = frand()*2.0f-1.0f;
}
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float4>();
cutilSafeCall( cudaMalloc3DArray(&noiseArray, &channelDesc, size) );
cudaMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_cudaPitchedPtr((void*)volumeData, size.width*sizeof(float4), size.width, size.height);
copyParams.dstArray = noiseArray;
copyParams.extent = size;
copyParams.kind = cudaMemcpyHostToDevice;
cutilSafeCall( cudaMemcpy3D(©Params) );
free(volumeData);
// set texture parameters
noiseTex.normalized = true; // access with normalized texture coordinates
noiseTex.filterMode = cudaFilterModeLinear; // linear interpolation
noiseTex.addressMode[0] = cudaAddressModeWrap; // wrap texture coordinates
noiseTex.addressMode[1] = cudaAddressModeWrap;
noiseTex.addressMode[2] = cudaAddressModeWrap;
// bind array to 3D texture
cutilSafeCall(cudaBindTextureToArray(noiseTex, noiseArray, channelDesc));
}
void
integrateSystem(float4 *oldPos, float4 *newPos,
float4 *oldVel, float4 *newVel,
float deltaTime,
int numParticles)
{
int numThreads,numBlocks;
computeGridSize(numParticles, 256, numBlocks,numThreads);
integrateD<<<numBlocks,numThreads>>>(newPos,newVel,
oldPos,oldVel,
deltaTime,
numParticles);
cutilCheckMsg("Kernel execution failed");
}
void
calcDepth(float4* pos,
float* keys,
uint* indices,
float3 sortVector,
int numParticles)
{
int numThreads,numBlocks;
computeGridSize(numParticles, 256, numBlocks, numThreads);
calcDepthD<<<numBlocks,numThreads>>>(pos,keys,indices,sortVector,numParticles);
cutilCheckMsg("calcDepthD execution failed");
}
} |
6124d9cb8d3812911558a7ec6aa2356ab2314297.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright 2022 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "cunumeric/sort/thrust_sort.cuh"
namespace cunumeric {
void thrust_local_sort(const double* values_in,
double* values_out,
const int64_t* indices_in,
int64_t* indices_out,
const size_t volume,
const size_t sort_dim_size,
const bool stable,
hipStream_t stream)
{
detail::thrust_local_sort(
values_in, values_out, indices_in, indices_out, volume, sort_dim_size, stable, stream);
}
} // namespace cunumeric
| 6124d9cb8d3812911558a7ec6aa2356ab2314297.cu | /* Copyright 2022 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "cunumeric/sort/thrust_sort.cuh"
namespace cunumeric {
void thrust_local_sort(const double* values_in,
double* values_out,
const int64_t* indices_in,
int64_t* indices_out,
const size_t volume,
const size_t sort_dim_size,
const bool stable,
cudaStream_t stream)
{
detail::thrust_local_sort(
values_in, values_out, indices_in, indices_out, volume, sort_dim_size, stable, stream);
}
} // namespace cunumeric
|
525ebe6347301be06a63edfaa34a5284711a48d0.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <iomanip>
#include <math.h>
#include <helper_cuda.h>
#include <complex>
#include <hipfft.h>
#include <cufinufft.h>
#include "../cuspreadinterp.h"
#include "../cudeconvolve.h"
#include "../memtransfer.h"
using namespace std;
int cufinufft2d1_exec(CUCPX* d_c, CUCPX* d_fk, cufinufft_plan *d_plan)
/*
2D Type-1 NUFFT
This function is called in "exec" stage (See ../cufinufft.cu).
It includes (copied from doc in finufft library)
Step 1: spread data to oversampled regular mesh using kernel
Step 2: compute FFT on uniform mesh
Step 3: deconvolve by division of each Fourier mode independently by the
Fourier series coefficient of the kernel.
Melody Shih 07/25/19
*/
{
assert(d_plan->spopts.spread_direction == 1);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
int blksize;
int ier;
CUCPX* d_fkstart;
CUCPX* d_cstart;
for(int i=0; i*d_plan->maxbatchsize < d_plan->ntransf; i++){
blksize = min(d_plan->ntransf - i*d_plan->maxbatchsize,
d_plan->maxbatchsize);
d_cstart = d_c + i*d_plan->maxbatchsize*d_plan->M;
d_fkstart = d_fk + i*d_plan->maxbatchsize*d_plan->ms*d_plan->mt;
d_plan->c = d_cstart;
d_plan->fk = d_fkstart;
checkCudaErrors(hipMemset(d_plan->fw,0,d_plan->maxbatchsize*
d_plan->nf1*d_plan->nf2*sizeof(CUCPX)));// this is needed
#ifdef TIME
float milliseconds = 0;
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tInitialize fw to 0\t %.3g s\n",
milliseconds/1000);
#endif
// Step 1: Spread
hipEventRecord(start);
ier = cuspread2d(d_plan,blksize);
if(ier != 0 ){
printf("error: cuspread2d, method(%d)\n", d_plan->opts.gpu_method);
return 0;
}
#ifdef TIME
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tSpread (%d)\t\t %.3g s\n", milliseconds/1000,
d_plan->opts.gpu_method);
#endif
// Step 2: FFT
hipEventRecord(start);
CUFFT_EX(d_plan->fftplan, d_plan->fw, d_plan->fw, d_plan->iflag);
#ifdef TIME
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tCUFFT Exec\t\t %.3g s\n", milliseconds/1000);
#endif
// Step 3: deconvolve and shuffle
hipEventRecord(start);
cudeconvolve2d(d_plan,blksize);
#ifdef TIME
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tDeconvolve\t\t %.3g s\n", milliseconds/1000);
#endif
}
return ier;
}
int cufinufft2d2_exec(CUCPX* d_c, CUCPX* d_fk, cufinufft_plan *d_plan)
/*
2D Type-2 NUFFT
This function is called in "exec" stage (See ../cufinufft.cu).
It includes (copied from doc in finufft library)
Step 1: deconvolve (amplify) each Fourier mode, dividing by kernel
Fourier coeff
Step 2: compute FFT on uniform mesh
Step 3: interpolate data to regular mesh
Melody Shih 07/25/19
*/
{
assert(d_plan->spopts.spread_direction == 2);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
int blksize;
int ier;
CUCPX* d_fkstart;
CUCPX* d_cstart;
for(int i=0; i*d_plan->maxbatchsize < d_plan->ntransf; i++){
blksize = min(d_plan->ntransf - i*d_plan->maxbatchsize,
d_plan->maxbatchsize);
d_cstart = d_c + i*d_plan->maxbatchsize*d_plan->M;
d_fkstart = d_fk + i*d_plan->maxbatchsize*d_plan->ms*d_plan->mt;
d_plan->c = d_cstart;
d_plan->fk = d_fkstart;
// Step 1: amplify Fourier coeffs fk and copy into upsampled array fw
hipEventRecord(start);
cudeconvolve2d(d_plan,blksize);
#ifdef TIME
float milliseconds = 0;
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tAmplify & Copy fktofw\t %.3g s\n", milliseconds/1000);
#endif
// Step 2: FFT
hipDeviceSynchronize();
hipEventRecord(start);
CUFFT_EX(d_plan->fftplan, d_plan->fw, d_plan->fw, d_plan->iflag);
#ifdef TIME
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tCUFFT Exec\t\t %.3g s\n", milliseconds/1000);
#endif
// Step 3: deconvolve and shuffle
hipEventRecord(start);
ier = cuinterp2d(d_plan, blksize);
if(ier != 0 ){
printf("error: cuinterp2d, method(%d)\n", d_plan->opts.gpu_method);
return 0;
}
#ifdef TIME
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tUnspread (%d)\t\t %.3g s\n", milliseconds/1000,
d_plan->opts.gpu_method);
#endif
}
return ier;
}
| 525ebe6347301be06a63edfaa34a5284711a48d0.cu | #include <iostream>
#include <iomanip>
#include <math.h>
#include <helper_cuda.h>
#include <complex>
#include <cufft.h>
#include <cufinufft.h>
#include "../cuspreadinterp.h"
#include "../cudeconvolve.h"
#include "../memtransfer.h"
using namespace std;
int cufinufft2d1_exec(CUCPX* d_c, CUCPX* d_fk, cufinufft_plan *d_plan)
/*
2D Type-1 NUFFT
This function is called in "exec" stage (See ../cufinufft.cu).
It includes (copied from doc in finufft library)
Step 1: spread data to oversampled regular mesh using kernel
Step 2: compute FFT on uniform mesh
Step 3: deconvolve by division of each Fourier mode independently by the
Fourier series coefficient of the kernel.
Melody Shih 07/25/19
*/
{
assert(d_plan->spopts.spread_direction == 1);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
int blksize;
int ier;
CUCPX* d_fkstart;
CUCPX* d_cstart;
for(int i=0; i*d_plan->maxbatchsize < d_plan->ntransf; i++){
blksize = min(d_plan->ntransf - i*d_plan->maxbatchsize,
d_plan->maxbatchsize);
d_cstart = d_c + i*d_plan->maxbatchsize*d_plan->M;
d_fkstart = d_fk + i*d_plan->maxbatchsize*d_plan->ms*d_plan->mt;
d_plan->c = d_cstart;
d_plan->fk = d_fkstart;
checkCudaErrors(cudaMemset(d_plan->fw,0,d_plan->maxbatchsize*
d_plan->nf1*d_plan->nf2*sizeof(CUCPX)));// this is needed
#ifdef TIME
float milliseconds = 0;
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tInitialize fw to 0\t %.3g s\n",
milliseconds/1000);
#endif
// Step 1: Spread
cudaEventRecord(start);
ier = cuspread2d(d_plan,blksize);
if(ier != 0 ){
printf("error: cuspread2d, method(%d)\n", d_plan->opts.gpu_method);
return 0;
}
#ifdef TIME
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tSpread (%d)\t\t %.3g s\n", milliseconds/1000,
d_plan->opts.gpu_method);
#endif
// Step 2: FFT
cudaEventRecord(start);
CUFFT_EX(d_plan->fftplan, d_plan->fw, d_plan->fw, d_plan->iflag);
#ifdef TIME
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tCUFFT Exec\t\t %.3g s\n", milliseconds/1000);
#endif
// Step 3: deconvolve and shuffle
cudaEventRecord(start);
cudeconvolve2d(d_plan,blksize);
#ifdef TIME
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tDeconvolve\t\t %.3g s\n", milliseconds/1000);
#endif
}
return ier;
}
int cufinufft2d2_exec(CUCPX* d_c, CUCPX* d_fk, cufinufft_plan *d_plan)
/*
2D Type-2 NUFFT
This function is called in "exec" stage (See ../cufinufft.cu).
It includes (copied from doc in finufft library)
Step 1: deconvolve (amplify) each Fourier mode, dividing by kernel
Fourier coeff
Step 2: compute FFT on uniform mesh
Step 3: interpolate data to regular mesh
Melody Shih 07/25/19
*/
{
assert(d_plan->spopts.spread_direction == 2);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
int blksize;
int ier;
CUCPX* d_fkstart;
CUCPX* d_cstart;
for(int i=0; i*d_plan->maxbatchsize < d_plan->ntransf; i++){
blksize = min(d_plan->ntransf - i*d_plan->maxbatchsize,
d_plan->maxbatchsize);
d_cstart = d_c + i*d_plan->maxbatchsize*d_plan->M;
d_fkstart = d_fk + i*d_plan->maxbatchsize*d_plan->ms*d_plan->mt;
d_plan->c = d_cstart;
d_plan->fk = d_fkstart;
// Step 1: amplify Fourier coeffs fk and copy into upsampled array fw
cudaEventRecord(start);
cudeconvolve2d(d_plan,blksize);
#ifdef TIME
float milliseconds = 0;
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tAmplify & Copy fktofw\t %.3g s\n", milliseconds/1000);
#endif
// Step 2: FFT
cudaDeviceSynchronize();
cudaEventRecord(start);
CUFFT_EX(d_plan->fftplan, d_plan->fw, d_plan->fw, d_plan->iflag);
#ifdef TIME
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tCUFFT Exec\t\t %.3g s\n", milliseconds/1000);
#endif
// Step 3: deconvolve and shuffle
cudaEventRecord(start);
ier = cuinterp2d(d_plan, blksize);
if(ier != 0 ){
printf("error: cuinterp2d, method(%d)\n", d_plan->opts.gpu_method);
return 0;
}
#ifdef TIME
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("[time ] \tUnspread (%d)\t\t %.3g s\n", milliseconds/1000,
d_plan->opts.gpu_method);
#endif
}
return ier;
}
|
0613428bd5ea6c21422347b351c46c4a9b944fd2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void jacobi(Matrix T_old, Matrix Bo,float* reduc)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int row_o = blockIdx.y*TILE_SIZE + threadIdx.y;
int col_o = blockIdx.x*TILE_SIZE + threadIdx.x;
int row_i = row_o - FILTER_SIZE/2;
int col_i = col_o - FILTER_SIZE/2;
__shared__ float T1[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float T2[BLOCK_SIZE][BLOCK_SIZE];
int nx= T_old.width;
int ny= T_old.height;
if ((row_i > 0) && (row_i<ny-1) && (col_i>0) && (col_i<nx-1))
T1[ty][tx]=T_old.elements[row_i*nx + col_i];
else
T1[ty][tx]=0.0;
__syncthreads();
float output = 0.0;
for(int j=1; j<BLOCK_SIZE-1;j++){
for(int i=1; i<BLOCK_SIZE-1;i++){
T2[j][i] = 0.25*(T2[j+1][i] + T2[j-1][i] + T2[j][i+1] +T2[j][i-1]);
output += (T2[j][i] - T1[j][i])*(T2[j][i] - T1[j][i]);
}
}
Bo[] = T2
Bo[] = T2
Bo[] = T2
Bo[] = T2
reduc[0] = output;
}
| 0613428bd5ea6c21422347b351c46c4a9b944fd2.cu | __global__ void jacobi(Matrix T_old, Matrix Bo,float* reduc)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int row_o = blockIdx.y*TILE_SIZE + threadIdx.y;
int col_o = blockIdx.x*TILE_SIZE + threadIdx.x;
int row_i = row_o - FILTER_SIZE/2;
int col_i = col_o - FILTER_SIZE/2;
__shared__ float T1[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float T2[BLOCK_SIZE][BLOCK_SIZE];
int nx= T_old.width;
int ny= T_old.height;
if ((row_i > 0) && (row_i<ny-1) && (col_i>0) && (col_i<nx-1))
T1[ty][tx]=T_old.elements[row_i*nx + col_i];
else
T1[ty][tx]=0.0;
__syncthreads();
float output = 0.0;
for(int j=1; j<BLOCK_SIZE-1;j++){
for(int i=1; i<BLOCK_SIZE-1;i++){
T2[j][i] = 0.25*(T2[j+1][i] + T2[j-1][i] + T2[j][i+1] +T2[j][i-1]);
output += (T2[j][i] - T1[j][i])*(T2[j][i] - T1[j][i]);
}
}
Bo[] = T2
Bo[] = T2
Bo[] = T2
Bo[] = T2
reduc[0] = output;
}
|
8de9cd4b6eaa451ad6b0bb597c4929772ef55622.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
#include <iostream>
#include <sstream>
__global__ void add(int a, int b, int* c)
{
*c = a + b;
}
inline void HandleError(hipError_t p_cuda_error) {
if (p_cuda_error != hipSuccess) {
std::cout << hipGetErrorString(p_cuda_error) << "in " << __FILE__ << " at line " << __LINE__ << std::endl;
exit(EXIT_FAILURE);
}
}
std::stringstream DeviceParameterInformation() {
int count;
hipDeviceProp_t properties;
HandleError(hipGetDeviceCount(&count));
std::stringstream ss;
for (int i = 0; i < count; ++i){
HandleError(hipGetDeviceProperties(&properties, i));
ss << "\t---\tGeneral Information for device " << i << std::endl;
ss << "Name: " << properties.name << std::endl;
ss << "Compute Capability: " << properties.major << "." << properties.minor << std::endl;
ss << "Clock Rate: " << properties.clockRate << std::endl;
ss << "Device copy overlap: ";
if (properties.deviceOverlap) {
ss << "Enabled" << std::endl;
}
else {
ss << "Disabled" << std::endl;
}
ss << "Kernel Execution timeout: ";
if (properties.kernelExecTimeoutEnabled) {
ss << "Enabled" << std::endl;
}
else {
ss << "Disabled" << std::endl;
}
ss << "\t---\tMemory Information for device " << i << "---" << std::endl;
ss << "Total Global Memory: " << properties.totalGlobalMem << std::endl;
ss << "Total Constant Memory: " << properties.totalConstMem << std::endl;
ss << "Max Mem Pitch: " << properties.memPitch << std::endl;
ss << "Texture Alignment: " << properties.textureAlignment << std::endl;
ss << "\t---\tInformation for device " << i << " ---\t" << std::endl;
ss << "Multiprocessor count: " << properties.multiProcessorCount << std::endl;
ss << "Shared Memory per MP: " << properties.sharedMemPerBlock << std::endl;
ss << "Registers per MP: " << properties.regsPerBlock << std::endl;
ss << "Threads in Warp: " << properties.warpSize << std::endl;
ss << "Max Threads per Block: " << properties.maxThreadsPerBlock << std::endl;
ss << "Max thread dimensions: ( " << properties.maxThreadsDim[0] << "," << properties.maxThreadsDim[1] << ","
<< properties.maxThreadsDim[2] << ")" << std::endl;
ss << "Max grid dimensions: ( " << properties.maxGridSize[0] << "," << properties.maxGridSize[1] << ","
<< properties.maxGridSize[2] << ")" << std::endl;
ss << std::endl;
return ss;
}
}
int main()
{
int c;
int* d_c;
std::cout << DeviceParameterInformation().str() << std::endl;
HandleError(hipMalloc((void**)&d_c, sizeof(int)));
add << <1, 1 >> >(2, 7, d_c);
HandleError(hipMemcpy(&c, d_c, sizeof(int), hipMemcpyDeviceToHost));
std::cout << " result is " << c << std::endl;
hipFree(d_c);
return 0;
} | 8de9cd4b6eaa451ad6b0bb597c4929772ef55622.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
#include <iostream>
#include <sstream>
__global__ void add(int a, int b, int* c)
{
*c = a + b;
}
inline void HandleError(cudaError_t p_cuda_error) {
if (p_cuda_error != cudaSuccess) {
std::cout << cudaGetErrorString(p_cuda_error) << "in " << __FILE__ << " at line " << __LINE__ << std::endl;
exit(EXIT_FAILURE);
}
}
std::stringstream DeviceParameterInformation() {
int count;
cudaDeviceProp properties;
HandleError(cudaGetDeviceCount(&count));
std::stringstream ss;
for (int i = 0; i < count; ++i){
HandleError(cudaGetDeviceProperties(&properties, i));
ss << "\t---\tGeneral Information for device " << i << std::endl;
ss << "Name: " << properties.name << std::endl;
ss << "Compute Capability: " << properties.major << "." << properties.minor << std::endl;
ss << "Clock Rate: " << properties.clockRate << std::endl;
ss << "Device copy overlap: ";
if (properties.deviceOverlap) {
ss << "Enabled" << std::endl;
}
else {
ss << "Disabled" << std::endl;
}
ss << "Kernel Execution timeout: ";
if (properties.kernelExecTimeoutEnabled) {
ss << "Enabled" << std::endl;
}
else {
ss << "Disabled" << std::endl;
}
ss << "\t---\tMemory Information for device " << i << "---" << std::endl;
ss << "Total Global Memory: " << properties.totalGlobalMem << std::endl;
ss << "Total Constant Memory: " << properties.totalConstMem << std::endl;
ss << "Max Mem Pitch: " << properties.memPitch << std::endl;
ss << "Texture Alignment: " << properties.textureAlignment << std::endl;
ss << "\t---\tInformation for device " << i << " ---\t" << std::endl;
ss << "Multiprocessor count: " << properties.multiProcessorCount << std::endl;
ss << "Shared Memory per MP: " << properties.sharedMemPerBlock << std::endl;
ss << "Registers per MP: " << properties.regsPerBlock << std::endl;
ss << "Threads in Warp: " << properties.warpSize << std::endl;
ss << "Max Threads per Block: " << properties.maxThreadsPerBlock << std::endl;
ss << "Max thread dimensions: ( " << properties.maxThreadsDim[0] << "," << properties.maxThreadsDim[1] << ","
<< properties.maxThreadsDim[2] << ")" << std::endl;
ss << "Max grid dimensions: ( " << properties.maxGridSize[0] << "," << properties.maxGridSize[1] << ","
<< properties.maxGridSize[2] << ")" << std::endl;
ss << std::endl;
return ss;
}
}
int main()
{
int c;
int* d_c;
std::cout << DeviceParameterInformation().str() << std::endl;
HandleError(cudaMalloc((void**)&d_c, sizeof(int)));
add << <1, 1 >> >(2, 7, d_c);
HandleError(cudaMemcpy(&c, d_c, sizeof(int), cudaMemcpyDeviceToHost));
std::cout << " result is " << c << std::endl;
cudaFree(d_c);
return 0;
} |
3e40001bbccfe880146c5af03b1aacd78f52874a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vecMult.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *in = NULL;
hipMalloc(&in, XSIZE*YSIZE);
double *factor = NULL;
hipMalloc(&factor, XSIZE*YSIZE);
double *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vecMult), dim3(gridBlock),dim3(threadBlock), 0, 0, in,factor,out);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vecMult), dim3(gridBlock),dim3(threadBlock), 0, 0, in,factor,out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vecMult), dim3(gridBlock),dim3(threadBlock), 0, 0, in,factor,out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 3e40001bbccfe880146c5af03b1aacd78f52874a.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vecMult.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *in = NULL;
cudaMalloc(&in, XSIZE*YSIZE);
double *factor = NULL;
cudaMalloc(&factor, XSIZE*YSIZE);
double *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vecMult<<<gridBlock,threadBlock>>>(in,factor,out);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vecMult<<<gridBlock,threadBlock>>>(in,factor,out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vecMult<<<gridBlock,threadBlock>>>(in,factor,out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
9632c74435023e8c6c457531633eb82c46dcdb28.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/Exceptions.h>
// Another possibility:
// #include <torch/all.h>
#include <assert.h>
#include "type_shim.h"
#include "multi_tensor_apply.cuh"
#define BLOCK_SIZE 512
#define ILP 4
// Step 2 reads in 'update' value and per-tensor param_norm and update_norm.
// It computes new parameter value.
template<typename T, typename UPD_T>
struct LAMBStage2Functor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<2>& tl,
const float* per_tensor_param_norm,
const float* per_tensor_update_norm,
const float learning_rate)
{
// I'd like this kernel to propagate infs/nans.
// if(*noop_gmem == 1)
// return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int tensor_num = tl.start_tensor_this_launch + tensor_loc;
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
float param_norm = per_tensor_param_norm[tensor_num];
float update_norm = per_tensor_update_norm[tensor_num];
T ratio = (update_norm != 0.0f && param_norm != 0.0f) ? learning_rate * (param_norm / update_norm) : learning_rate;
T* p = (T*)tl.addresses[0][tensor_loc];
p += chunk_idx*chunk_size;
UPD_T* update = (UPD_T*)tl.addresses[1][tensor_loc];
update += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
for(int i_start = 0;
i_start < n && i_start < chunk_size;
i_start += blockDim.x*ILP)
{
T r_p[ILP];
UPD_T r_update[ILP];
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
r_p[ii] = p[i];
r_update[ii] = update[i];
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
r_p[ii] = r_p[ii] - (ratio*(T)r_update[ii]);
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
p[i] = r_p[ii];
}
}
}
}
};
void multi_tensor_lamb_stage2_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
at::Tensor per_tensor_param_norm,
at::Tensor per_tensor_update_norm,
const float learning_rate)
{
using namespace at;
DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "lamb_stage_2",
DISPATCH_FLOAT_AND_HALF(tensor_lists[1][0].scalar_type(), 1, "lamb_stage_2",
multi_tensor_apply<2>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
LAMBStage2Functor<scalar_t_0, scalar_t_1>(),
per_tensor_param_norm.DATA_PTR<float>(),
per_tensor_update_norm.DATA_PTR<float>(),
learning_rate); ))
AT_CUDA_CHECK(hipGetLastError());
// AT_CUDA_CHECK(hipDeviceSynchronize());
}
| 9632c74435023e8c6c457531633eb82c46dcdb28.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/Exceptions.h>
// Another possibility:
// #include <torch/all.h>
#include <assert.h>
#include "type_shim.h"
#include "multi_tensor_apply.cuh"
#define BLOCK_SIZE 512
#define ILP 4
// Step 2 reads in 'update' value and per-tensor param_norm and update_norm.
// It computes new parameter value.
template<typename T, typename UPD_T>
struct LAMBStage2Functor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<2>& tl,
const float* per_tensor_param_norm,
const float* per_tensor_update_norm,
const float learning_rate)
{
// I'd like this kernel to propagate infs/nans.
// if(*noop_gmem == 1)
// return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int tensor_num = tl.start_tensor_this_launch + tensor_loc;
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
float param_norm = per_tensor_param_norm[tensor_num];
float update_norm = per_tensor_update_norm[tensor_num];
T ratio = (update_norm != 0.0f && param_norm != 0.0f) ? learning_rate * (param_norm / update_norm) : learning_rate;
T* p = (T*)tl.addresses[0][tensor_loc];
p += chunk_idx*chunk_size;
UPD_T* update = (UPD_T*)tl.addresses[1][tensor_loc];
update += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
for(int i_start = 0;
i_start < n && i_start < chunk_size;
i_start += blockDim.x*ILP)
{
T r_p[ILP];
UPD_T r_update[ILP];
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
r_p[ii] = p[i];
r_update[ii] = update[i];
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
r_p[ii] = r_p[ii] - (ratio*(T)r_update[ii]);
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
p[i] = r_p[ii];
}
}
}
}
};
void multi_tensor_lamb_stage2_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
at::Tensor per_tensor_param_norm,
at::Tensor per_tensor_update_norm,
const float learning_rate)
{
using namespace at;
DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "lamb_stage_2",
DISPATCH_FLOAT_AND_HALF(tensor_lists[1][0].scalar_type(), 1, "lamb_stage_2",
multi_tensor_apply<2>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
LAMBStage2Functor<scalar_t_0, scalar_t_1>(),
per_tensor_param_norm.DATA_PTR<float>(),
per_tensor_update_norm.DATA_PTR<float>(),
learning_rate); ))
AT_CUDA_CHECK(cudaGetLastError());
// AT_CUDA_CHECK(cudaDeviceSynchronize());
}
|
7f943578775e86038cdb5f64bf9dd6d76fefb3dc.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h>
namespace detectron2 {
int get_cudart_version() {
return CUDART_VERSION;
}
} // namespace detectron2
| 7f943578775e86038cdb5f64bf9dd6d76fefb3dc.cu | #include <cuda_runtime_api.h>
namespace detectron2 {
int get_cudart_version() {
return CUDART_VERSION;
}
} // namespace detectron2
|
639d61bbc1550f9ef53a96e90af5ef3b0aebdc60.hip | // !!! This is a file automatically generated by hipify!!!
//**********************************************************//
// Matching test code by Marten Bjorkman aka Celebrandil //
// //
// The code includes an example of gradual optimization //
// of a kernel for matching two sets of 16K 128D points. //
//**********************************************************//
#include <cstring>
#include <cmath>
#include <iostream>
#include <vector>
#include <memory> // std::align
#include <chrono>
#include <hip/hip_runtime.h>
#define REPEAT 100
#define NPTS (2048*8)
#define NDIM 128
#define M1W 128
#define M2W 16
#define M2H 16
#define M5W 16
#define M5H 16
#define M5R 4
#define M7W 32
#define M7H 32
#define M7R 4
// serial execution on a host
void MatchC1(float *h_pts1, float *h_pts2, float *h_score, int *h_index)
{
std::memset(h_score, 0, sizeof(float)*NPTS);
for (int p1=0;p1<NPTS;p1++) {
for (int p2=0;p2<NPTS;p2++) {
float score = 0.0f;
for (int d=0;d<NDIM;d++)
score += h_pts1[p1*NDIM + d]*h_pts2[p2*NDIM + d];
if (score>h_score[p1]) {
h_score[p1] = score;
h_index[p1] = p2;
}
}
}
}
// verify host and device results
void CheckMatches(int *h_index, int *h_index2, float *h_score, float *h_score2)
{
int ndiff = 0;
for (int i=0;i<NPTS;i++) {
ndiff += (h_index[i] != h_index2[i]);
if (h_index[i] != h_index2[i])
std::cout << " " << i << " " << h_index[i] << " " << h_index2[i] << " " << h_score[i] << " " << h_score2[i] << std::endl;
}
std::cout << "Number of incorrect matches: " << ndiff << std::endl;
}
__global__ void Match1(const float *__restrict d_pts1,
const float *__restrict d_pts2,
float *__restrict d_score,
int *__restrict d_index)
{
int p1 = threadIdx.x + M1W*blockIdx.x;
float max_score = 0.0f;
int index = -1;
for (int p2=0;p2<NPTS;p2++) {
float score = 0.0f;
for (int d=0;d<NDIM;d++)
score += d_pts1[p1*NDIM + d]*d_pts2[p2*NDIM + d];
if (score>max_score) {
max_score = score;
index = p2;
}
}
d_score[p1] = max_score;
d_index[p1] = index;
}
__global__ void Match2(const float *__restrict d_pts1,
const float *__restrict d_pts2,
float *__restrict d_score,
int *__restrict d_index)
{
__shared__ float buffer1[M2W*NDIM];
__shared__ float buffer2[M2H*NDIM];
__shared__ float scores[M2W*M2H];
int tx = threadIdx.x;
int ty = threadIdx.y;
int idx = tx + M2W*ty;
int bp1 = M2W*blockIdx.x;
if (ty<M2W)
for (int d=tx;d<NDIM;d+=M2W)
for (int j=ty;j<M2W;j+=M2H)
buffer1[j*NDIM + d] = d_pts1[(bp1 + j)*NDIM + d];
__syncthreads();
float max_score = 0.0f;
int index = -1;
for (int bp2=0;bp2<NPTS;bp2+=M2H) {
for (int d=tx;d<NDIM;d+=M2W)
buffer2[ty*NDIM + d] = d_pts2[(bp2 + ty)*NDIM + d];
__syncthreads();
float score = 0.0f;
for (int d=0;d<NDIM;d++)
score += buffer1[tx*NDIM + d]*buffer2[ty*NDIM + d];
scores[idx] = score;
__syncthreads();
if (ty==0) {
for (int i=0;i<M2H;i++) {
if (scores[i*M2W + tx]>max_score) {
max_score = scores[i*M2W + tx];
index = bp2 + i;
}
}
}
__syncthreads();
}
if (ty==0) {
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
__global__ void Match3(const float *__restrict d_pts1,
const float *__restrict d_pts2,
float *__restrict d_score,
int *__restrict d_index)
{
__shared__ float buffer1[M2W*(NDIM + 1)];
__shared__ float buffer2[M2H*NDIM];
__shared__ float scores[M2W*M2H];
int tx = threadIdx.x;
int ty = threadIdx.y;
int idx = tx + M2W*ty;
int bp1 = M2W*blockIdx.x;
if (ty<M2W)
for (int d=tx;d<NDIM;d+=M2W)
for (int j=ty;j<M2W;j+=M2H)
buffer1[j*(NDIM + 1) + d] = d_pts1[(bp1 + j)*NDIM + d];
__syncthreads();
float max_score = 0.0f;
int index = -1;
for (int bp2=0;bp2<NPTS;bp2+=M2H) {
for (int d=tx;d<NDIM;d+=M2W)
buffer2[ty*NDIM + d] = d_pts2[(bp2 + ty)*NDIM + d];
__syncthreads();
float score = 0.0f;
for (int d=0;d<NDIM;d++)
score += buffer1[tx*(NDIM + 1) + d]*buffer2[ty*NDIM + d];
scores[idx] = score;
__syncthreads();
if (ty==0) {
for (int i=0;i<M2H;i++) {
if (scores[i*M2W + tx]>max_score) {
max_score = scores[i*M2W + tx];
index = bp2 + i;
}
}
}
__syncthreads();
}
if (ty==0) {
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
__global__ void Match4(const float *__restrict d_pts1,
const float *__restrict d_pts2,
float *__restrict d_score,
int *__restrict d_index)
{
__shared__ float4 buffer1[M2W*(NDIM/4 + 1)];
__shared__ float4 buffer2[M2H*NDIM/4];
__shared__ float scores[M2W*M2H];
int tx = threadIdx.x;
int ty = threadIdx.y;
int idx = tx + M2W*ty;
int bp1 = M2W*blockIdx.x;
if (ty<M2W)
for (int d=tx;d<NDIM/4;d+=M2W)
for (int j=ty;j<M2W;j+=M2H)
buffer1[j*(NDIM/4 + 1) + d] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d];
__syncthreads();
float max_score = 0.0f;
int index = -1;
for (int bp2=0;bp2<NPTS;bp2+=M2H) {
for (int d=tx;d<NDIM/4;d+=M2W)
buffer2[ty*NDIM/4 + d] = ((float4*)d_pts2)[(bp2 + ty)*(NDIM/4) + d];
__syncthreads();
float score = 0.0f;
for (int d=0;d<NDIM/4;d++) {
float4 v1 = buffer1[tx*(NDIM/4 + 1) + d];
float4 v2 = buffer2[ty*(NDIM/4) + d];
score += v1.x*v2.x; score += v1.y*v2.y;
score += v1.z*v2.z; score += v1.w*v2.w;
}
scores[idx] = score;
__syncthreads();
if (ty==0) {
for (int i=0;i<M2H;i++) {
if (scores[i*M2W + tx]>max_score) {
max_score = scores[i*M2W + tx];
index = bp2 + i;
}
}
}
__syncthreads();
}
if (ty==0) {
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
__global__ void Match5(const float *__restrict d_pts1,
const float *__restrict d_pts2,
float *__restrict d_score,
int *__restrict d_index)
{
__shared__ float4 buffer1[M5W*(NDIM/4 + 1)];
__shared__ float4 buffer2[M5H*NDIM/4];
__shared__ float scores[M5W*M5H];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bp1 = M5W*blockIdx.x;
if (ty<M5W)
for (int d=tx;d<NDIM/4;d+=M5W)
for (int j=ty;j<M5W;j+=M5H)
buffer1[j*(NDIM/4 + 1) + d] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d];
__syncthreads();
float max_score = 0.0f;
int index = -1;
for (int bp2=0;bp2<NPTS;bp2+=M5H) {
for (int d=tx;d<NDIM/4;d+=M5W)
buffer2[ty*NDIM/4 + d] = ((float4*)d_pts2)[(bp2 + ty)*(NDIM/4) + d];
__syncthreads();
if (ty<M5H/M5R) {
float score[M5R];
for (int dy=0;dy<M5R;dy++)
score[dy] = 0.0f;
for (int d=0;d<NDIM/4;d++) {
float4 v1 = buffer1[tx*(NDIM/4 + 1) + d];
for (int dy=0;dy<M5R;dy++) {
float4 v2 = buffer2[(M5R*ty + dy)*(NDIM/4) + d];
score[dy] += v1.x*v2.x; score[dy] += v1.y*v2.y;
score[dy] += v1.z*v2.z; score[dy] += v1.w*v2.w;
}
}
for (int dy=0;dy<M5R;dy++)
scores[tx + M5W*(M5R*ty + dy)] = score[dy];
}
__syncthreads();
if (ty==0) {
for (int i=0;i<M5H;i++) {
if (scores[i*M2W + tx]>max_score) {
max_score = scores[i*M5W + tx];
index = bp2 + i;
}
}
}
__syncthreads();
}
if (ty==0) {
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
__global__ void Match6(const float *__restrict d_pts1,
const float *__restrict d_pts2,
float *__restrict d_score,
int *__restrict d_index)
{
__shared__ float4 buffer1[M5W*(NDIM/4 + 1)];
__shared__ float4 buffer2[M5H*NDIM/4];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bp1 = M5W*blockIdx.x;
if (ty<M5W)
for (int d=tx;d<NDIM/4;d+=M5W)
for (int j=ty;j<M5W;j+=M5H)
buffer1[j*(NDIM/4 + 1) + d] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d];
float max_score = 0.0f;
int index = -1;
for (int bp2=0;bp2<NPTS;bp2+=M5H) {
for (int d=tx;d<NDIM/4;d+=M5W)
buffer2[ty*NDIM/4 + d] = ((float4*)d_pts2)[(bp2 + ty)*(NDIM/4) + d];
__syncthreads();
if (ty<M5H/M5R) {
float score[M5R];
for (int dy=0;dy<M5R;dy++)
score[dy] = 0.0f;
for (int d=0;d<NDIM/4;d++) {
float4 v1 = buffer1[tx*(NDIM/4 + 1) + d];
for (int dy=0;dy<M5R;dy++) {
float4 v2 = buffer2[(M5R*ty + dy)*(NDIM/4) + d];
score[dy] += v1.x*v2.x; score[dy] += v1.y*v2.y;
score[dy] += v1.z*v2.z; score[dy] += v1.w*v2.w;
}
}
for (int dy=0;dy<M5R;dy++) {
if (score[dy]>max_score) {
max_score = score[dy];
index = bp2 + M5R*ty + dy;
}
}
}
__syncthreads();
}
float *scores = (float*)buffer1;
int *indices = (int*)&scores[M5W*M5H/M5R];
if (ty<M5H/M5R) {
scores[ty*M5W + tx] = max_score;
indices[ty*M5W + tx] = index;
}
__syncthreads();
if (ty==0) {
max_score = scores[tx];
index = indices[tx];
for (int y=0;y<M5H/M5R;y++)
if (scores[y*M5W + tx]>max_score) {
max_score = scores[y*M5W + tx];
index = indices[y*M5W + tx];
}
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
__global__ void Match7(const float *__restrict d_pts1,
const float *__restrict d_pts2,
float *__restrict d_score,
int *__restrict d_index)
{
__shared__ float4 buffer1[M7W*NDIM/4];
__shared__ float4 buffer2[M7H*NDIM/4];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bp1 = M7W*blockIdx.x;
for (int d=tx;d<NDIM/4;d+=M7W)
for (int j=ty;j<M7W;j+=M7H/M7R)
buffer1[j*NDIM/4 + (d + j)%(NDIM/4)] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d];
float max_score = 0.0f;
int index = -1;
for (int bp2=0;bp2<NPTS;bp2+=M7H) {
for (int d=tx;d<NDIM/4;d+=M7W)
for (int j=ty;j<M7H;j+=M7H/M7R)
buffer2[j*NDIM/4 + d] = ((float4*)d_pts2)[(bp2 + j)*(NDIM/4) + d];
__syncthreads();
float score[M7R];
for (int dy=0;dy<M7R;dy++)
score[dy] = 0.0f;
for (int d=0;d<NDIM/4;d++) {
float4 v1 = buffer1[tx*NDIM/4 + (d + tx)%(NDIM/4)];
for (int dy=0;dy<M7R;dy++) {
float4 v2 = buffer2[(M7R*ty + dy)*(NDIM/4) + d];
score[dy] += v1.x*v2.x;
score[dy] += v1.y*v2.y;
score[dy] += v1.z*v2.z;
score[dy] += v1.w*v2.w;
}
}
for (int dy=0;dy<M7R;dy++) {
if (score[dy]>max_score) {
max_score = score[dy];
index = bp2 + M7R*ty + dy;
}
}
__syncthreads();
}
float *scores = (float*)buffer1;
int *indices = (int*)&scores[M7W*M7H/M7R];
scores[ty*M7W + tx] = max_score;
indices[ty*M7W + tx] = index;
__syncthreads();
if (ty==0) {
max_score = scores[tx];
index = indices[tx];
for (int y=0;y<M7H/M7R;y++)
if (scores[y*M7W + tx]>max_score) {
max_score = scores[y*M7W + tx];
index = indices[y*M7W + tx];
}
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
__global__ void Match8(const float *__restrict d_pts1,
const float *__restrict d_pts2,
float *__restrict d_score,
int *__restrict d_index)
{
__shared__ float4 buffer1[M7W*NDIM/4];
__shared__ float4 buffer2[M7H*NDIM/4];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bp1 = M7W*blockIdx.x;
for (int d=tx;d<NDIM/4;d+=M7W)
for (int j=ty;j<M7W;j+=M7H/M7R)
buffer1[j*NDIM/4 + (d + j)%(NDIM/4)] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d];
#define NRX 2
float max_score[NRX];
int index[NRX];
for (int i=0;i<NRX;i++) {
max_score[i] = 0.0f;
index[i] = -1;
}
int idx = ty*M7W + tx;
int ix = idx%(M7W/NRX);
int iy = idx/(M7W/NRX);
for (int bp2=0;bp2<NPTS;bp2+=M7H) {
for (int d=tx;d<NDIM/4;d+=M7W)
for (int j=ty;j<M7H;j+=M7H/M7R)
buffer2[j*NDIM/4 + d] = ((float4*)d_pts2)[(bp2 + j)*(NDIM/4) + d];
__syncthreads();
if (idx<M7W*M7H/M7R/NRX) {
float score[M7R][NRX];
for (int dy=0;dy<M7R;dy++)
for (int i=0;i<NRX;i++)
score[dy][i] = 0.0f;
for (int d=0;d<NDIM/4;d++) {
float4 v1[NRX];
for (int i=0;i<NRX;i++)
v1[i] = buffer1[((M7W/NRX)*i + ix)*NDIM/4 + (d + (M7W/NRX)*i + ix)%(NDIM/4)];
for (int dy=0;dy<M7R;dy++) {
float4 v2 = buffer2[(M7R*iy + dy)*(NDIM/4) + d];
for (int i=0;i<NRX;i++) {
score[dy][i] += v1[i].x*v2.x;
score[dy][i] += v1[i].y*v2.y;
score[dy][i] += v1[i].z*v2.z;
score[dy][i] += v1[i].w*v2.w;
}
}
}
for (int dy=0;dy<M7R;dy++) {
for (int i=0;i<NRX;i++) {
if (score[dy][i]>max_score[i]) {
max_score[i] = score[dy][i];
index[i] = bp2 + M7R*iy + dy;
}
}
}
}
__syncthreads();
}
float *scores = (float*)buffer1;
int *indices = (int*)&scores[M7W*M7H/M7R];
if (idx<M7W*M7H/M7R/NRX) {
for (int i=0;i<NRX;i++) {
scores[iy*M7W + (M7W/NRX)*i + ix] = max_score[i];
indices[iy*M7W + (M7W/NRX)*i + ix] = index[i];
}
}
__syncthreads();
if (ty==0) {
float max_score = scores[tx];
int index = indices[tx];
for (int y=0;y<M7H/M7R;y++)
if (scores[y*M7W + tx]>max_score) {
max_score = scores[y*M7W + tx];
index = indices[y*M7W + tx];
}
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
__global__ void Match9(const float *__restrict d_pts1,
const float *__restrict d_pts2,
float *__restrict d_score,
int *__restrict d_index)
{
#define NRX 2
__shared__ float4 buffer1[M7W*NDIM/4];
__shared__ float4 buffer2[M7H*NDIM/4];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bp1 = M7W*blockIdx.x;
for (int d=tx;d<NDIM/4;d+=M7W)
for (int j=ty;j<M7W;j+=M7H/M7R/NRX)
buffer1[j*NDIM/4 + (d + j)%(NDIM/4)] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d];
float max_score[NRX];
int index[NRX];
for (int i=0;i<NRX;i++) {
max_score[i] = 0.0f;
index[i] = -1;
}
int idx = ty*M7W + tx;
int ix = idx%(M7W/NRX);
int iy = idx/(M7W/NRX);
for (int bp2=0;bp2<NPTS;bp2+=M7H) {
for (int d=tx;d<NDIM/4;d+=M7W)
for (int j=ty;j<M7H;j+=M7H/M7R/NRX)
buffer2[j*NDIM/4 + d] = ((float4*)d_pts2)[(bp2 + j)*(NDIM/4) + d];
__syncthreads();
float score[M7R][NRX];
for (int dy=0;dy<M7R;dy++)
for (int i=0;i<NRX;i++)
score[dy][i] = 0.0f;
for (int d=0;d<NDIM/4;d++) {
float4 v1[NRX];
for (int i=0;i<NRX;i++)
v1[i] = buffer1[((M7W/NRX)*i + ix)*NDIM/4 + (d + (M7W/NRX)*i + ix)%(NDIM/4)];
for (int dy=0;dy<M7R;dy++) {
float4 v2 = buffer2[(M7R*iy + dy)*(NDIM/4) + d];
for (int i=0;i<NRX;i++) {
score[dy][i] += v1[i].x*v2.x;
score[dy][i] += v1[i].y*v2.y;
score[dy][i] += v1[i].z*v2.z;
score[dy][i] += v1[i].w*v2.w;
}
}
}
for (int dy=0;dy<M7R;dy++) {
for (int i=0;i<NRX;i++) {
if (score[dy][i]>max_score[i]) {
max_score[i] = score[dy][i];
index[i] = bp2 + M7R*iy + dy;
}
}
}
__syncthreads();
}
float *scores = (float*)buffer1;
int *indices = (int*)&scores[M7W*M7H/M7R];
if (idx<M7W*M7H/M7R/NRX) {
for (int i=0;i<NRX;i++) {
scores[iy*M7W + (M7W/NRX)*i + ix] = max_score[i];
indices[iy*M7W + (M7W/NRX)*i + ix] = index[i];
}
}
__syncthreads();
if (ty==0) {
float max_score = scores[tx];
int index = indices[tx];
for (int y=0;y<M7H/M7R;y++)
if (scores[y*M7W + tx]>max_score) {
max_score = scores[y*M7W + tx];
index = indices[y*M7W + tx];
}
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
__global__ void Match10(const float *__restrict d_pts1,
const float *__restrict d_pts2,
float *__restrict d_score,
int *__restrict d_index)
{
#define NRX 2
#define NUM (NRX*M7R) // 32*8 threads
__shared__ float4 buffer1[M7W*NDIM/4]; // 32*32
__shared__ float4 buffer2[M7H*NUM]; // 32*8
int tx = threadIdx.x;
int ty = threadIdx.y;
int bp1 = M7W*blockIdx.x;
for (int d=tx;d<NDIM/4;d+=M7W)
for (int j=ty;j<M7W;j+=M7H/M7R)
buffer1[j*NDIM/4 + (d + j)%(NDIM/4)] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d];
float max_score[NRX];
int index[NRX];
for (int i=0;i<NRX;i++) {
max_score[i] = 0.0f;
index[i] = -1;
}
int idx = ty*M7W + tx;
int ix = idx%(M7W/NRX);
int iy = idx/(M7W/NRX);
for (int bp2=0;bp2<NPTS;bp2+=M7H) {
float score[M7R][NRX];
for (int dy=0;dy<M7R;dy++)
for (int i=0;i<NRX;i++)
score[dy][i] = 0.0f;
int d = (idx%NUM);
int j = (idx/NUM);
buffer2[j*NUM + d] = ((float4*)d_pts2)[(bp2 + j)*(NDIM/4) + d];
__syncthreads();
for (int dp=0;dp<NDIM/4;dp+=NUM) {
float4 temp;
if (dp<(NDIM/4-NUM))
temp = ((float4*)d_pts2)[(bp2 + j)*(NDIM/4) + dp + d + NUM];
if (idx<M7W*M7H/M7R/NRX) {
for (int d=0;d<NUM;d++) {
float4 v1[NRX];
#pragma unroll
for (int i=0;i<NRX;i++)
v1[i] = buffer1[(((M7W/NRX)*i + ix)<<5) + ((dp + d + (M7W/NRX)*i + ix)&31)];
//v1[i] = buffer1[((M7W/NRX)*i + ix)*NDIM/4 + (dp + d + (M7W/NRX)*i + ix)%(NDIM/4)];
#pragma unroll
for (int dy=0;dy<M7R;dy++) {
float4 v2 = buffer2[(M7R*iy + dy)*NUM + d];
#pragma unroll
for (int i=0;i<NRX;i++) {
score[dy][i] += v1[i].x*v2.x;
score[dy][i] += v1[i].y*v2.y;
score[dy][i] += v1[i].z*v2.z;
score[dy][i] += v1[i].w*v2.w;
}
}
}
}
__syncthreads();
if (dp<(NDIM/4-NUM)) {
buffer2[j*NUM + d] = temp;
__syncthreads();
}
}
for (int dy=0;dy<M7R;dy++) {
for (int i=0;i<NRX;i++) {
if (score[dy][i]>max_score[i]) {
max_score[i] = score[dy][i];
index[i] = bp2 + M7R*iy + dy;
}
}
}
__syncthreads();
}
float *scores = (float*)buffer1;
int *indices = (int*)&scores[M7W*M7H/M7R];
if (idx<M7W*M7H/M7R/NRX) {
for (int i=0;i<NRX;i++) {
scores[iy*M7W + (M7W/NRX)*i + ix] = max_score[i];
indices[iy*M7W + (M7W/NRX)*i + ix] = index[i];
}
}
__syncthreads();
if (ty==0) {
float max_score = scores[tx];
int index = indices[tx];
for (int y=0;y<M7H/M7R;y++)
if (scores[y*M7W + tx]>max_score) {
max_score = scores[y*M7W + tx];
index = indices[y*M7W + tx];
}
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
int main(int argc, char *argv[])
{
size_t space = sizeof(float)*NPTS*NDIM*2 + 8;
std::vector<float> data(NPTS*NDIM*2 + 8);
void *ptr = (void*)&data[0];
float *h_pts1 = (float*)std::align(32, sizeof(float)*NPTS*NDIM, ptr, space);
ptr = (void*)&data[NPTS*NDIM];
float *h_pts2 = (float*)std::align(32, sizeof(float)*NPTS*NDIM, ptr, space);
std::vector<int> h_index(NPTS);
std::vector<float> h_score(NPTS);
std::vector<int> h_index2(NPTS);
std::vector<float> h_score2(NPTS);
float *d_pts1, *d_pts2, *d_score;
int *d_index;
std::cout << std::endl;
int psize = sizeof(float)*NPTS;
std::cout << "Data size: " << 2.0*psize*NDIM/1024/1024 << " MB" << std::endl;
hipMalloc((void **)&d_pts1, psize*NDIM);
hipMalloc((void **)&d_pts2, psize*NDIM);
hipMalloc((void **)&d_index, psize);
hipMalloc((void **)&d_score, psize);
for (int i=0;i<NPTS;i++) {
float sum1 = 0.0f, sum2 = 0.0f;
for (int d=0;d<NDIM;d++) {
sum1 += h_pts1[i*NDIM + d] = (float)rand()/RAND_MAX;
sum2 += h_pts2[i*NDIM + d] = (float)rand()/RAND_MAX;
}
sum1 = sqrt(NDIM)/sum1;
sum2 = sqrt(NDIM)/sum2;
for (int d=0;d<NDIM;d++) {
h_pts1[i*NDIM + d] *= sum1;
h_pts2[i*NDIM + d] *= sum2;
}
}
auto start = std::chrono::high_resolution_clock::now();
MatchC1(h_pts1, h_pts2, h_score.data(), h_index.data());
auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
auto delay = elapsed_seconds.count() * 1000;
std::cout << "MatchCPU1: " << elapsed_seconds.count() * 1000 << " ms "
<< 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
hipMemcpy(d_pts1, h_pts1, psize*NDIM, hipMemcpyHostToDevice);
hipMemcpy(d_pts2, h_pts2, psize*NDIM, hipMemcpyHostToDevice);
dim3 blocks, threads;
blocks = dim3(NPTS/M1W);
threads = dim3(M1W);
start = std::chrono::high_resolution_clock::now();
for (int i = 0; i < REPEAT; i++)
hipLaunchKernelGGL(( Match1), dim3(blocks),dim3(threads), 0, 0, d_pts1, d_pts2, d_score, d_index);
hipDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
delay = elapsed_seconds.count() * 1000 / REPEAT;
std::cout << "MatchGPU1: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
hipMemcpy(h_index2.data(), d_index, psize, hipMemcpyDeviceToHost);
hipMemcpy(h_score2.data(), d_score, psize, hipMemcpyDeviceToHost);
CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data());
blocks = dim3(NPTS/M2W);
threads = dim3(M2W, M2H);
start = std::chrono::high_resolution_clock::now();
for (int i = 0; i < REPEAT; i++)
hipLaunchKernelGGL(( Match2), dim3(blocks),dim3(threads), 0, 0, d_pts1, d_pts2, d_score, d_index);
hipDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
delay = elapsed_seconds.count() * 1000 / REPEAT;
std::cout << "MatchGPU2: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
hipMemcpy(h_index2.data(), d_index, psize, hipMemcpyDeviceToHost);
hipMemcpy(h_score2.data(), d_score, psize, hipMemcpyDeviceToHost);
CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data());
blocks = dim3(NPTS/M2W);
threads = dim3(M2W, M2H);
start = std::chrono::high_resolution_clock::now();
for (int i = 0; i < REPEAT; i++)
hipLaunchKernelGGL(( Match3), dim3(blocks),dim3(threads), 0, 0, d_pts1, d_pts2, d_score, d_index);
hipDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
delay = elapsed_seconds.count() * 1000 / REPEAT;
std::cout << "MatchGPU3: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
hipMemcpy(h_index2.data(), d_index, psize, hipMemcpyDeviceToHost);
hipMemcpy(h_score2.data(), d_score, psize, hipMemcpyDeviceToHost);
CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data());
blocks = dim3(NPTS/M2W);
threads = dim3(M2W, M2H);
start = std::chrono::high_resolution_clock::now();
for (int i = 0; i < REPEAT; i++)
hipLaunchKernelGGL(( Match4), dim3(blocks),dim3(threads), 0, 0, d_pts1, d_pts2, d_score, d_index);
hipDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
delay = elapsed_seconds.count() * 1000 / REPEAT;
std::cout << "MatchGPU4: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
hipMemcpy(h_index2.data(), d_index, psize, hipMemcpyDeviceToHost);
hipMemcpy(h_score2.data(), d_score, psize, hipMemcpyDeviceToHost);
CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data());
blocks = dim3(NPTS/M5W);
threads = dim3(M5W, M5H);
start = std::chrono::high_resolution_clock::now();
for (int i = 0; i < REPEAT; i++)
hipLaunchKernelGGL(( Match5), dim3(blocks),dim3(threads), 0, 0, d_pts1, d_pts2, d_score, d_index);
hipDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
delay = elapsed_seconds.count() * 1000 / REPEAT;
std::cout << "MatchGPU5: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
hipMemcpy(h_index2.data(), d_index, psize, hipMemcpyDeviceToHost);
hipMemcpy(h_score2.data(), d_score, psize, hipMemcpyDeviceToHost);
CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data());
blocks = dim3(NPTS/M5W);
threads = dim3(M5W, M5H);
start = std::chrono::high_resolution_clock::now();
for (int i = 0; i < REPEAT; i++)
hipLaunchKernelGGL(( Match6), dim3(blocks),dim3(threads), 0, 0, d_pts1, d_pts2, d_score, d_index);
hipDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
delay = elapsed_seconds.count() * 1000 / REPEAT;
std::cout << "MatchGPU6: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
hipMemcpy(h_index2.data(), d_index, psize, hipMemcpyDeviceToHost);
hipMemcpy(h_score2.data(), d_score, psize, hipMemcpyDeviceToHost);
CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data());
blocks = dim3(NPTS/M7W);
threads = dim3(M7W, M7H/M7R);
start = std::chrono::high_resolution_clock::now();
for (int i = 0; i < REPEAT; i++)
hipLaunchKernelGGL(( Match7), dim3(blocks),dim3(threads), 0, 0, d_pts1, d_pts2, d_score, d_index);
hipDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
delay = elapsed_seconds.count() * 1000 / REPEAT;
std::cout << "MatchGPU7: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
hipMemcpy(h_index2.data(), d_index, psize, hipMemcpyDeviceToHost);
hipMemcpy(h_score2.data(), d_score, psize, hipMemcpyDeviceToHost);
CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data());
blocks = dim3(NPTS/M7W);
threads = dim3(M7W, M7H/M7R);
start = std::chrono::high_resolution_clock::now();
for (int i = 0; i < REPEAT; i++)
hipLaunchKernelGGL(( Match8), dim3(blocks),dim3(threads), 0, 0, d_pts1, d_pts2, d_score, d_index);
hipDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
delay = elapsed_seconds.count() * 1000 / REPEAT;
std::cout << "MatchGPU8: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
hipMemcpy(h_index2.data(), d_index, psize, hipMemcpyDeviceToHost);
hipMemcpy(h_score2.data(), d_score, psize, hipMemcpyDeviceToHost);
CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data());
blocks = dim3(NPTS/M7W);
threads = dim3(M7W, M7H/M7R/2);
start = std::chrono::high_resolution_clock::now();
for (int i = 0; i < REPEAT; i++)
hipLaunchKernelGGL(( Match9), dim3(blocks),dim3(threads), 0, 0, d_pts1, d_pts2, d_score, d_index);
hipDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
delay = elapsed_seconds.count() * 1000 / REPEAT;
std::cout << "MatchGPU9: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
hipMemcpy(h_index2.data(), d_index, psize, hipMemcpyDeviceToHost);
hipMemcpy(h_score2.data(), d_score, psize, hipMemcpyDeviceToHost);
CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data());
blocks = dim3(NPTS/M7W);
threads = dim3(M7W, M7H/M7R);
start = std::chrono::high_resolution_clock::now();
for (int i = 0; i < REPEAT; i++)
hipLaunchKernelGGL(( Match10), dim3(blocks),dim3(threads), 0, 0, d_pts1, d_pts2, d_score, d_index);
hipDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
delay = elapsed_seconds.count() * 1000 / REPEAT;
std::cout << "MatchGPU10: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
hipMemcpy(h_index2.data(), d_index, psize, hipMemcpyDeviceToHost);
hipMemcpy(h_score2.data(), d_score, psize, hipMemcpyDeviceToHost);
CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data());
hipFree(d_pts1);
hipFree(d_pts2);
hipFree(d_index);
hipFree(d_score);
return 0;
}
| 639d61bbc1550f9ef53a96e90af5ef3b0aebdc60.cu | //**********************************************************//
// Matching test code by Marten Bjorkman aka Celebrandil //
// //
// The code includes an example of gradual optimization //
// of a kernel for matching two sets of 16K 128D points. //
//**********************************************************//
#include <cstring>
#include <cmath>
#include <iostream>
#include <vector>
#include <memory> // std::align
#include <chrono>
#include <cuda.h>
#define REPEAT 100
#define NPTS (2048*8)
#define NDIM 128
#define M1W 128
#define M2W 16
#define M2H 16
#define M5W 16
#define M5H 16
#define M5R 4
#define M7W 32
#define M7H 32
#define M7R 4
// serial execution on a host
void MatchC1(float *h_pts1, float *h_pts2, float *h_score, int *h_index)
{
std::memset(h_score, 0, sizeof(float)*NPTS);
for (int p1=0;p1<NPTS;p1++) {
for (int p2=0;p2<NPTS;p2++) {
float score = 0.0f;
for (int d=0;d<NDIM;d++)
score += h_pts1[p1*NDIM + d]*h_pts2[p2*NDIM + d];
if (score>h_score[p1]) {
h_score[p1] = score;
h_index[p1] = p2;
}
}
}
}
// verify host and device results
void CheckMatches(int *h_index, int *h_index2, float *h_score, float *h_score2)
{
int ndiff = 0;
for (int i=0;i<NPTS;i++) {
ndiff += (h_index[i] != h_index2[i]);
if (h_index[i] != h_index2[i])
std::cout << " " << i << " " << h_index[i] << " " << h_index2[i] << " " << h_score[i] << " " << h_score2[i] << std::endl;
}
std::cout << "Number of incorrect matches: " << ndiff << std::endl;
}
__global__ void Match1(const float *__restrict d_pts1,
const float *__restrict d_pts2,
float *__restrict d_score,
int *__restrict d_index)
{
int p1 = threadIdx.x + M1W*blockIdx.x;
float max_score = 0.0f;
int index = -1;
for (int p2=0;p2<NPTS;p2++) {
float score = 0.0f;
for (int d=0;d<NDIM;d++)
score += d_pts1[p1*NDIM + d]*d_pts2[p2*NDIM + d];
if (score>max_score) {
max_score = score;
index = p2;
}
}
d_score[p1] = max_score;
d_index[p1] = index;
}
__global__ void Match2(const float *__restrict d_pts1,
const float *__restrict d_pts2,
float *__restrict d_score,
int *__restrict d_index)
{
__shared__ float buffer1[M2W*NDIM];
__shared__ float buffer2[M2H*NDIM];
__shared__ float scores[M2W*M2H];
int tx = threadIdx.x;
int ty = threadIdx.y;
int idx = tx + M2W*ty;
int bp1 = M2W*blockIdx.x;
if (ty<M2W)
for (int d=tx;d<NDIM;d+=M2W)
for (int j=ty;j<M2W;j+=M2H)
buffer1[j*NDIM + d] = d_pts1[(bp1 + j)*NDIM + d];
__syncthreads();
float max_score = 0.0f;
int index = -1;
for (int bp2=0;bp2<NPTS;bp2+=M2H) {
for (int d=tx;d<NDIM;d+=M2W)
buffer2[ty*NDIM + d] = d_pts2[(bp2 + ty)*NDIM + d];
__syncthreads();
float score = 0.0f;
for (int d=0;d<NDIM;d++)
score += buffer1[tx*NDIM + d]*buffer2[ty*NDIM + d];
scores[idx] = score;
__syncthreads();
if (ty==0) {
for (int i=0;i<M2H;i++) {
if (scores[i*M2W + tx]>max_score) {
max_score = scores[i*M2W + tx];
index = bp2 + i;
}
}
}
__syncthreads();
}
if (ty==0) {
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
__global__ void Match3(const float *__restrict d_pts1,
const float *__restrict d_pts2,
float *__restrict d_score,
int *__restrict d_index)
{
__shared__ float buffer1[M2W*(NDIM + 1)];
__shared__ float buffer2[M2H*NDIM];
__shared__ float scores[M2W*M2H];
int tx = threadIdx.x;
int ty = threadIdx.y;
int idx = tx + M2W*ty;
int bp1 = M2W*blockIdx.x;
if (ty<M2W)
for (int d=tx;d<NDIM;d+=M2W)
for (int j=ty;j<M2W;j+=M2H)
buffer1[j*(NDIM + 1) + d] = d_pts1[(bp1 + j)*NDIM + d];
__syncthreads();
float max_score = 0.0f;
int index = -1;
for (int bp2=0;bp2<NPTS;bp2+=M2H) {
for (int d=tx;d<NDIM;d+=M2W)
buffer2[ty*NDIM + d] = d_pts2[(bp2 + ty)*NDIM + d];
__syncthreads();
float score = 0.0f;
for (int d=0;d<NDIM;d++)
score += buffer1[tx*(NDIM + 1) + d]*buffer2[ty*NDIM + d];
scores[idx] = score;
__syncthreads();
if (ty==0) {
for (int i=0;i<M2H;i++) {
if (scores[i*M2W + tx]>max_score) {
max_score = scores[i*M2W + tx];
index = bp2 + i;
}
}
}
__syncthreads();
}
if (ty==0) {
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
__global__ void Match4(const float *__restrict d_pts1,
const float *__restrict d_pts2,
float *__restrict d_score,
int *__restrict d_index)
{
__shared__ float4 buffer1[M2W*(NDIM/4 + 1)];
__shared__ float4 buffer2[M2H*NDIM/4];
__shared__ float scores[M2W*M2H];
int tx = threadIdx.x;
int ty = threadIdx.y;
int idx = tx + M2W*ty;
int bp1 = M2W*blockIdx.x;
if (ty<M2W)
for (int d=tx;d<NDIM/4;d+=M2W)
for (int j=ty;j<M2W;j+=M2H)
buffer1[j*(NDIM/4 + 1) + d] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d];
__syncthreads();
float max_score = 0.0f;
int index = -1;
for (int bp2=0;bp2<NPTS;bp2+=M2H) {
for (int d=tx;d<NDIM/4;d+=M2W)
buffer2[ty*NDIM/4 + d] = ((float4*)d_pts2)[(bp2 + ty)*(NDIM/4) + d];
__syncthreads();
float score = 0.0f;
for (int d=0;d<NDIM/4;d++) {
float4 v1 = buffer1[tx*(NDIM/4 + 1) + d];
float4 v2 = buffer2[ty*(NDIM/4) + d];
score += v1.x*v2.x; score += v1.y*v2.y;
score += v1.z*v2.z; score += v1.w*v2.w;
}
scores[idx] = score;
__syncthreads();
if (ty==0) {
for (int i=0;i<M2H;i++) {
if (scores[i*M2W + tx]>max_score) {
max_score = scores[i*M2W + tx];
index = bp2 + i;
}
}
}
__syncthreads();
}
if (ty==0) {
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
__global__ void Match5(const float *__restrict d_pts1,
const float *__restrict d_pts2,
float *__restrict d_score,
int *__restrict d_index)
{
__shared__ float4 buffer1[M5W*(NDIM/4 + 1)];
__shared__ float4 buffer2[M5H*NDIM/4];
__shared__ float scores[M5W*M5H];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bp1 = M5W*blockIdx.x;
if (ty<M5W)
for (int d=tx;d<NDIM/4;d+=M5W)
for (int j=ty;j<M5W;j+=M5H)
buffer1[j*(NDIM/4 + 1) + d] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d];
__syncthreads();
float max_score = 0.0f;
int index = -1;
for (int bp2=0;bp2<NPTS;bp2+=M5H) {
for (int d=tx;d<NDIM/4;d+=M5W)
buffer2[ty*NDIM/4 + d] = ((float4*)d_pts2)[(bp2 + ty)*(NDIM/4) + d];
__syncthreads();
if (ty<M5H/M5R) {
float score[M5R];
for (int dy=0;dy<M5R;dy++)
score[dy] = 0.0f;
for (int d=0;d<NDIM/4;d++) {
float4 v1 = buffer1[tx*(NDIM/4 + 1) + d];
for (int dy=0;dy<M5R;dy++) {
float4 v2 = buffer2[(M5R*ty + dy)*(NDIM/4) + d];
score[dy] += v1.x*v2.x; score[dy] += v1.y*v2.y;
score[dy] += v1.z*v2.z; score[dy] += v1.w*v2.w;
}
}
for (int dy=0;dy<M5R;dy++)
scores[tx + M5W*(M5R*ty + dy)] = score[dy];
}
__syncthreads();
if (ty==0) {
for (int i=0;i<M5H;i++) {
if (scores[i*M2W + tx]>max_score) {
max_score = scores[i*M5W + tx];
index = bp2 + i;
}
}
}
__syncthreads();
}
if (ty==0) {
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
__global__ void Match6(const float *__restrict d_pts1,
const float *__restrict d_pts2,
float *__restrict d_score,
int *__restrict d_index)
{
__shared__ float4 buffer1[M5W*(NDIM/4 + 1)];
__shared__ float4 buffer2[M5H*NDIM/4];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bp1 = M5W*blockIdx.x;
if (ty<M5W)
for (int d=tx;d<NDIM/4;d+=M5W)
for (int j=ty;j<M5W;j+=M5H)
buffer1[j*(NDIM/4 + 1) + d] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d];
float max_score = 0.0f;
int index = -1;
for (int bp2=0;bp2<NPTS;bp2+=M5H) {
for (int d=tx;d<NDIM/4;d+=M5W)
buffer2[ty*NDIM/4 + d] = ((float4*)d_pts2)[(bp2 + ty)*(NDIM/4) + d];
__syncthreads();
if (ty<M5H/M5R) {
float score[M5R];
for (int dy=0;dy<M5R;dy++)
score[dy] = 0.0f;
for (int d=0;d<NDIM/4;d++) {
float4 v1 = buffer1[tx*(NDIM/4 + 1) + d];
for (int dy=0;dy<M5R;dy++) {
float4 v2 = buffer2[(M5R*ty + dy)*(NDIM/4) + d];
score[dy] += v1.x*v2.x; score[dy] += v1.y*v2.y;
score[dy] += v1.z*v2.z; score[dy] += v1.w*v2.w;
}
}
for (int dy=0;dy<M5R;dy++) {
if (score[dy]>max_score) {
max_score = score[dy];
index = bp2 + M5R*ty + dy;
}
}
}
__syncthreads();
}
float *scores = (float*)buffer1;
int *indices = (int*)&scores[M5W*M5H/M5R];
if (ty<M5H/M5R) {
scores[ty*M5W + tx] = max_score;
indices[ty*M5W + tx] = index;
}
__syncthreads();
if (ty==0) {
max_score = scores[tx];
index = indices[tx];
for (int y=0;y<M5H/M5R;y++)
if (scores[y*M5W + tx]>max_score) {
max_score = scores[y*M5W + tx];
index = indices[y*M5W + tx];
}
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
__global__ void Match7(const float *__restrict d_pts1,
const float *__restrict d_pts2,
float *__restrict d_score,
int *__restrict d_index)
{
__shared__ float4 buffer1[M7W*NDIM/4];
__shared__ float4 buffer2[M7H*NDIM/4];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bp1 = M7W*blockIdx.x;
for (int d=tx;d<NDIM/4;d+=M7W)
for (int j=ty;j<M7W;j+=M7H/M7R)
buffer1[j*NDIM/4 + (d + j)%(NDIM/4)] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d];
float max_score = 0.0f;
int index = -1;
for (int bp2=0;bp2<NPTS;bp2+=M7H) {
for (int d=tx;d<NDIM/4;d+=M7W)
for (int j=ty;j<M7H;j+=M7H/M7R)
buffer2[j*NDIM/4 + d] = ((float4*)d_pts2)[(bp2 + j)*(NDIM/4) + d];
__syncthreads();
float score[M7R];
for (int dy=0;dy<M7R;dy++)
score[dy] = 0.0f;
for (int d=0;d<NDIM/4;d++) {
float4 v1 = buffer1[tx*NDIM/4 + (d + tx)%(NDIM/4)];
for (int dy=0;dy<M7R;dy++) {
float4 v2 = buffer2[(M7R*ty + dy)*(NDIM/4) + d];
score[dy] += v1.x*v2.x;
score[dy] += v1.y*v2.y;
score[dy] += v1.z*v2.z;
score[dy] += v1.w*v2.w;
}
}
for (int dy=0;dy<M7R;dy++) {
if (score[dy]>max_score) {
max_score = score[dy];
index = bp2 + M7R*ty + dy;
}
}
__syncthreads();
}
float *scores = (float*)buffer1;
int *indices = (int*)&scores[M7W*M7H/M7R];
scores[ty*M7W + tx] = max_score;
indices[ty*M7W + tx] = index;
__syncthreads();
if (ty==0) {
max_score = scores[tx];
index = indices[tx];
for (int y=0;y<M7H/M7R;y++)
if (scores[y*M7W + tx]>max_score) {
max_score = scores[y*M7W + tx];
index = indices[y*M7W + tx];
}
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
__global__ void Match8(const float *__restrict d_pts1,
const float *__restrict d_pts2,
float *__restrict d_score,
int *__restrict d_index)
{
__shared__ float4 buffer1[M7W*NDIM/4];
__shared__ float4 buffer2[M7H*NDIM/4];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bp1 = M7W*blockIdx.x;
for (int d=tx;d<NDIM/4;d+=M7W)
for (int j=ty;j<M7W;j+=M7H/M7R)
buffer1[j*NDIM/4 + (d + j)%(NDIM/4)] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d];
#define NRX 2
float max_score[NRX];
int index[NRX];
for (int i=0;i<NRX;i++) {
max_score[i] = 0.0f;
index[i] = -1;
}
int idx = ty*M7W + tx;
int ix = idx%(M7W/NRX);
int iy = idx/(M7W/NRX);
for (int bp2=0;bp2<NPTS;bp2+=M7H) {
for (int d=tx;d<NDIM/4;d+=M7W)
for (int j=ty;j<M7H;j+=M7H/M7R)
buffer2[j*NDIM/4 + d] = ((float4*)d_pts2)[(bp2 + j)*(NDIM/4) + d];
__syncthreads();
if (idx<M7W*M7H/M7R/NRX) {
float score[M7R][NRX];
for (int dy=0;dy<M7R;dy++)
for (int i=0;i<NRX;i++)
score[dy][i] = 0.0f;
for (int d=0;d<NDIM/4;d++) {
float4 v1[NRX];
for (int i=0;i<NRX;i++)
v1[i] = buffer1[((M7W/NRX)*i + ix)*NDIM/4 + (d + (M7W/NRX)*i + ix)%(NDIM/4)];
for (int dy=0;dy<M7R;dy++) {
float4 v2 = buffer2[(M7R*iy + dy)*(NDIM/4) + d];
for (int i=0;i<NRX;i++) {
score[dy][i] += v1[i].x*v2.x;
score[dy][i] += v1[i].y*v2.y;
score[dy][i] += v1[i].z*v2.z;
score[dy][i] += v1[i].w*v2.w;
}
}
}
for (int dy=0;dy<M7R;dy++) {
for (int i=0;i<NRX;i++) {
if (score[dy][i]>max_score[i]) {
max_score[i] = score[dy][i];
index[i] = bp2 + M7R*iy + dy;
}
}
}
}
__syncthreads();
}
float *scores = (float*)buffer1;
int *indices = (int*)&scores[M7W*M7H/M7R];
if (idx<M7W*M7H/M7R/NRX) {
for (int i=0;i<NRX;i++) {
scores[iy*M7W + (M7W/NRX)*i + ix] = max_score[i];
indices[iy*M7W + (M7W/NRX)*i + ix] = index[i];
}
}
__syncthreads();
if (ty==0) {
float max_score = scores[tx];
int index = indices[tx];
for (int y=0;y<M7H/M7R;y++)
if (scores[y*M7W + tx]>max_score) {
max_score = scores[y*M7W + tx];
index = indices[y*M7W + tx];
}
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
__global__ void Match9(const float *__restrict d_pts1,
const float *__restrict d_pts2,
float *__restrict d_score,
int *__restrict d_index)
{
#define NRX 2
__shared__ float4 buffer1[M7W*NDIM/4];
__shared__ float4 buffer2[M7H*NDIM/4];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bp1 = M7W*blockIdx.x;
for (int d=tx;d<NDIM/4;d+=M7W)
for (int j=ty;j<M7W;j+=M7H/M7R/NRX)
buffer1[j*NDIM/4 + (d + j)%(NDIM/4)] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d];
float max_score[NRX];
int index[NRX];
for (int i=0;i<NRX;i++) {
max_score[i] = 0.0f;
index[i] = -1;
}
int idx = ty*M7W + tx;
int ix = idx%(M7W/NRX);
int iy = idx/(M7W/NRX);
for (int bp2=0;bp2<NPTS;bp2+=M7H) {
for (int d=tx;d<NDIM/4;d+=M7W)
for (int j=ty;j<M7H;j+=M7H/M7R/NRX)
buffer2[j*NDIM/4 + d] = ((float4*)d_pts2)[(bp2 + j)*(NDIM/4) + d];
__syncthreads();
float score[M7R][NRX];
for (int dy=0;dy<M7R;dy++)
for (int i=0;i<NRX;i++)
score[dy][i] = 0.0f;
for (int d=0;d<NDIM/4;d++) {
float4 v1[NRX];
for (int i=0;i<NRX;i++)
v1[i] = buffer1[((M7W/NRX)*i + ix)*NDIM/4 + (d + (M7W/NRX)*i + ix)%(NDIM/4)];
for (int dy=0;dy<M7R;dy++) {
float4 v2 = buffer2[(M7R*iy + dy)*(NDIM/4) + d];
for (int i=0;i<NRX;i++) {
score[dy][i] += v1[i].x*v2.x;
score[dy][i] += v1[i].y*v2.y;
score[dy][i] += v1[i].z*v2.z;
score[dy][i] += v1[i].w*v2.w;
}
}
}
for (int dy=0;dy<M7R;dy++) {
for (int i=0;i<NRX;i++) {
if (score[dy][i]>max_score[i]) {
max_score[i] = score[dy][i];
index[i] = bp2 + M7R*iy + dy;
}
}
}
__syncthreads();
}
float *scores = (float*)buffer1;
int *indices = (int*)&scores[M7W*M7H/M7R];
if (idx<M7W*M7H/M7R/NRX) {
for (int i=0;i<NRX;i++) {
scores[iy*M7W + (M7W/NRX)*i + ix] = max_score[i];
indices[iy*M7W + (M7W/NRX)*i + ix] = index[i];
}
}
__syncthreads();
if (ty==0) {
float max_score = scores[tx];
int index = indices[tx];
for (int y=0;y<M7H/M7R;y++)
if (scores[y*M7W + tx]>max_score) {
max_score = scores[y*M7W + tx];
index = indices[y*M7W + tx];
}
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
__global__ void Match10(const float *__restrict d_pts1,
const float *__restrict d_pts2,
float *__restrict d_score,
int *__restrict d_index)
{
#define NRX 2
#define NUM (NRX*M7R) // 32*8 threads
__shared__ float4 buffer1[M7W*NDIM/4]; // 32*32
__shared__ float4 buffer2[M7H*NUM]; // 32*8
int tx = threadIdx.x;
int ty = threadIdx.y;
int bp1 = M7W*blockIdx.x;
for (int d=tx;d<NDIM/4;d+=M7W)
for (int j=ty;j<M7W;j+=M7H/M7R)
buffer1[j*NDIM/4 + (d + j)%(NDIM/4)] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d];
float max_score[NRX];
int index[NRX];
for (int i=0;i<NRX;i++) {
max_score[i] = 0.0f;
index[i] = -1;
}
int idx = ty*M7W + tx;
int ix = idx%(M7W/NRX);
int iy = idx/(M7W/NRX);
for (int bp2=0;bp2<NPTS;bp2+=M7H) {
float score[M7R][NRX];
for (int dy=0;dy<M7R;dy++)
for (int i=0;i<NRX;i++)
score[dy][i] = 0.0f;
int d = (idx%NUM);
int j = (idx/NUM);
buffer2[j*NUM + d] = ((float4*)d_pts2)[(bp2 + j)*(NDIM/4) + d];
__syncthreads();
for (int dp=0;dp<NDIM/4;dp+=NUM) {
float4 temp;
if (dp<(NDIM/4-NUM))
temp = ((float4*)d_pts2)[(bp2 + j)*(NDIM/4) + dp + d + NUM];
if (idx<M7W*M7H/M7R/NRX) {
for (int d=0;d<NUM;d++) {
float4 v1[NRX];
#pragma unroll
for (int i=0;i<NRX;i++)
v1[i] = buffer1[(((M7W/NRX)*i + ix)<<5) + ((dp + d + (M7W/NRX)*i + ix)&31)];
//v1[i] = buffer1[((M7W/NRX)*i + ix)*NDIM/4 + (dp + d + (M7W/NRX)*i + ix)%(NDIM/4)];
#pragma unroll
for (int dy=0;dy<M7R;dy++) {
float4 v2 = buffer2[(M7R*iy + dy)*NUM + d];
#pragma unroll
for (int i=0;i<NRX;i++) {
score[dy][i] += v1[i].x*v2.x;
score[dy][i] += v1[i].y*v2.y;
score[dy][i] += v1[i].z*v2.z;
score[dy][i] += v1[i].w*v2.w;
}
}
}
}
__syncthreads();
if (dp<(NDIM/4-NUM)) {
buffer2[j*NUM + d] = temp;
__syncthreads();
}
}
for (int dy=0;dy<M7R;dy++) {
for (int i=0;i<NRX;i++) {
if (score[dy][i]>max_score[i]) {
max_score[i] = score[dy][i];
index[i] = bp2 + M7R*iy + dy;
}
}
}
__syncthreads();
}
float *scores = (float*)buffer1;
int *indices = (int*)&scores[M7W*M7H/M7R];
if (idx<M7W*M7H/M7R/NRX) {
for (int i=0;i<NRX;i++) {
scores[iy*M7W + (M7W/NRX)*i + ix] = max_score[i];
indices[iy*M7W + (M7W/NRX)*i + ix] = index[i];
}
}
__syncthreads();
if (ty==0) {
float max_score = scores[tx];
int index = indices[tx];
for (int y=0;y<M7H/M7R;y++)
if (scores[y*M7W + tx]>max_score) {
max_score = scores[y*M7W + tx];
index = indices[y*M7W + tx];
}
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
int main(int argc, char *argv[])
{
size_t space = sizeof(float)*NPTS*NDIM*2 + 8;
std::vector<float> data(NPTS*NDIM*2 + 8);
void *ptr = (void*)&data[0];
float *h_pts1 = (float*)std::align(32, sizeof(float)*NPTS*NDIM, ptr, space);
ptr = (void*)&data[NPTS*NDIM];
float *h_pts2 = (float*)std::align(32, sizeof(float)*NPTS*NDIM, ptr, space);
std::vector<int> h_index(NPTS);
std::vector<float> h_score(NPTS);
std::vector<int> h_index2(NPTS);
std::vector<float> h_score2(NPTS);
float *d_pts1, *d_pts2, *d_score;
int *d_index;
std::cout << std::endl;
int psize = sizeof(float)*NPTS;
std::cout << "Data size: " << 2.0*psize*NDIM/1024/1024 << " MB" << std::endl;
cudaMalloc((void **)&d_pts1, psize*NDIM);
cudaMalloc((void **)&d_pts2, psize*NDIM);
cudaMalloc((void **)&d_index, psize);
cudaMalloc((void **)&d_score, psize);
for (int i=0;i<NPTS;i++) {
float sum1 = 0.0f, sum2 = 0.0f;
for (int d=0;d<NDIM;d++) {
sum1 += h_pts1[i*NDIM + d] = (float)rand()/RAND_MAX;
sum2 += h_pts2[i*NDIM + d] = (float)rand()/RAND_MAX;
}
sum1 = sqrt(NDIM)/sum1;
sum2 = sqrt(NDIM)/sum2;
for (int d=0;d<NDIM;d++) {
h_pts1[i*NDIM + d] *= sum1;
h_pts2[i*NDIM + d] *= sum2;
}
}
auto start = std::chrono::high_resolution_clock::now();
MatchC1(h_pts1, h_pts2, h_score.data(), h_index.data());
auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
auto delay = elapsed_seconds.count() * 1000;
std::cout << "MatchCPU1: " << elapsed_seconds.count() * 1000 << " ms "
<< 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
cudaMemcpy(d_pts1, h_pts1, psize*NDIM, cudaMemcpyHostToDevice);
cudaMemcpy(d_pts2, h_pts2, psize*NDIM, cudaMemcpyHostToDevice);
dim3 blocks, threads;
blocks = dim3(NPTS/M1W);
threads = dim3(M1W);
start = std::chrono::high_resolution_clock::now();
for (int i = 0; i < REPEAT; i++)
Match1<<<blocks,threads>>>(d_pts1, d_pts2, d_score, d_index);
cudaDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
delay = elapsed_seconds.count() * 1000 / REPEAT;
std::cout << "MatchGPU1: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
cudaMemcpy(h_index2.data(), d_index, psize, cudaMemcpyDeviceToHost);
cudaMemcpy(h_score2.data(), d_score, psize, cudaMemcpyDeviceToHost);
CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data());
blocks = dim3(NPTS/M2W);
threads = dim3(M2W, M2H);
start = std::chrono::high_resolution_clock::now();
for (int i = 0; i < REPEAT; i++)
Match2<<<blocks,threads>>>(d_pts1, d_pts2, d_score, d_index);
cudaDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
delay = elapsed_seconds.count() * 1000 / REPEAT;
std::cout << "MatchGPU2: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
cudaMemcpy(h_index2.data(), d_index, psize, cudaMemcpyDeviceToHost);
cudaMemcpy(h_score2.data(), d_score, psize, cudaMemcpyDeviceToHost);
CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data());
blocks = dim3(NPTS/M2W);
threads = dim3(M2W, M2H);
start = std::chrono::high_resolution_clock::now();
for (int i = 0; i < REPEAT; i++)
Match3<<<blocks,threads>>>(d_pts1, d_pts2, d_score, d_index);
cudaDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
delay = elapsed_seconds.count() * 1000 / REPEAT;
std::cout << "MatchGPU3: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
cudaMemcpy(h_index2.data(), d_index, psize, cudaMemcpyDeviceToHost);
cudaMemcpy(h_score2.data(), d_score, psize, cudaMemcpyDeviceToHost);
CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data());
blocks = dim3(NPTS/M2W);
threads = dim3(M2W, M2H);
start = std::chrono::high_resolution_clock::now();
for (int i = 0; i < REPEAT; i++)
Match4<<<blocks,threads>>>(d_pts1, d_pts2, d_score, d_index);
cudaDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
delay = elapsed_seconds.count() * 1000 / REPEAT;
std::cout << "MatchGPU4: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
cudaMemcpy(h_index2.data(), d_index, psize, cudaMemcpyDeviceToHost);
cudaMemcpy(h_score2.data(), d_score, psize, cudaMemcpyDeviceToHost);
CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data());
blocks = dim3(NPTS/M5W);
threads = dim3(M5W, M5H);
start = std::chrono::high_resolution_clock::now();
for (int i = 0; i < REPEAT; i++)
Match5<<<blocks,threads>>>(d_pts1, d_pts2, d_score, d_index);
cudaDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
delay = elapsed_seconds.count() * 1000 / REPEAT;
std::cout << "MatchGPU5: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
cudaMemcpy(h_index2.data(), d_index, psize, cudaMemcpyDeviceToHost);
cudaMemcpy(h_score2.data(), d_score, psize, cudaMemcpyDeviceToHost);
CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data());
blocks = dim3(NPTS/M5W);
threads = dim3(M5W, M5H);
start = std::chrono::high_resolution_clock::now();
for (int i = 0; i < REPEAT; i++)
Match6<<<blocks,threads>>>(d_pts1, d_pts2, d_score, d_index);
cudaDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
delay = elapsed_seconds.count() * 1000 / REPEAT;
std::cout << "MatchGPU6: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
cudaMemcpy(h_index2.data(), d_index, psize, cudaMemcpyDeviceToHost);
cudaMemcpy(h_score2.data(), d_score, psize, cudaMemcpyDeviceToHost);
CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data());
blocks = dim3(NPTS/M7W);
threads = dim3(M7W, M7H/M7R);
start = std::chrono::high_resolution_clock::now();
for (int i = 0; i < REPEAT; i++)
Match7<<<blocks,threads>>>(d_pts1, d_pts2, d_score, d_index);
cudaDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
delay = elapsed_seconds.count() * 1000 / REPEAT;
std::cout << "MatchGPU7: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
cudaMemcpy(h_index2.data(), d_index, psize, cudaMemcpyDeviceToHost);
cudaMemcpy(h_score2.data(), d_score, psize, cudaMemcpyDeviceToHost);
CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data());
blocks = dim3(NPTS/M7W);
threads = dim3(M7W, M7H/M7R);
start = std::chrono::high_resolution_clock::now();
for (int i = 0; i < REPEAT; i++)
Match8<<<blocks,threads>>>(d_pts1, d_pts2, d_score, d_index);
cudaDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
delay = elapsed_seconds.count() * 1000 / REPEAT;
std::cout << "MatchGPU8: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
cudaMemcpy(h_index2.data(), d_index, psize, cudaMemcpyDeviceToHost);
cudaMemcpy(h_score2.data(), d_score, psize, cudaMemcpyDeviceToHost);
CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data());
blocks = dim3(NPTS/M7W);
threads = dim3(M7W, M7H/M7R/2);
start = std::chrono::high_resolution_clock::now();
for (int i = 0; i < REPEAT; i++)
Match9<<<blocks,threads>>>(d_pts1, d_pts2, d_score, d_index);
cudaDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
delay = elapsed_seconds.count() * 1000 / REPEAT;
std::cout << "MatchGPU9: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
cudaMemcpy(h_index2.data(), d_index, psize, cudaMemcpyDeviceToHost);
cudaMemcpy(h_score2.data(), d_score, psize, cudaMemcpyDeviceToHost);
CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data());
blocks = dim3(NPTS/M7W);
threads = dim3(M7W, M7H/M7R);
start = std::chrono::high_resolution_clock::now();
for (int i = 0; i < REPEAT; i++)
Match10<<<blocks,threads>>>(d_pts1, d_pts2, d_score, d_index);
cudaDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
delay = elapsed_seconds.count() * 1000 / REPEAT;
std::cout << "MatchGPU10: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
cudaMemcpy(h_index2.data(), d_index, psize, cudaMemcpyDeviceToHost);
cudaMemcpy(h_score2.data(), d_score, psize, cudaMemcpyDeviceToHost);
CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data());
cudaFree(d_pts1);
cudaFree(d_pts2);
cudaFree(d_index);
cudaFree(d_score);
return 0;
}
|
03e4381464ca842558c4ad638b0853e15529ae62.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <typeinfo>
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include <hipcub/hipcub.hpp>
#include <cub/iterator/constant_input_iterator.cuh>
#include <cub/iterator/discard_output_iterator.cuh>
#include <hipcub/hipcub.hpp>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false;
int g_timing_iterations = 0;
int g_repeat = 0;
double g_device_giga_bandwidth;
CachingDeviceAllocator g_allocator(true);
// Dispatch types
enum Backend
{
CUB, // CUB method
THRUST, // Thrust method
CDP, // GPU-based (dynamic parallelism) dispatch to CUB method
};
/**
* \brief WrapperFunctor (for precluding test-specialized dispatch to *Sum variants)
*/
template<typename OpT>
struct WrapperFunctor
{
OpT op;
WrapperFunctor(OpT op) : op(op) {}
template <typename T>
__host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const
{
return op(a, b);
}
};
//---------------------------------------------------------------------
// Dispatch to different CUB DeviceScan entrypoints
//---------------------------------------------------------------------
/**
* Dispatch to exclusive scan entrypoint
*/
template <typename IsPrimitiveT, typename InputIteratorT, typename OutputIteratorT, typename ScanOpT, typename InitialValueT, typename OffsetT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB> dispatch_to,
IsPrimitiveT is_primitive,
int timing_timing_iterations,
size_t *d_temp_storage_bytes,
hipError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
ScanOpT scan_op,
InitialValueT initial_value,
OffsetT num_items,
hipStream_t stream,
bool debug_synchronous)
{
hipError_t error = hipSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DeviceScan::ExclusiveScan(d_temp_storage, temp_storage_bytes, d_in, d_out, scan_op, initial_value, num_items, stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to exclusive sum entrypoint
*/
/**
template <typename InputIteratorT, typename OutputIteratorT, typename InitialValueT, typename OffsetT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB> dispatch_to,
Int2Type<true> is_primitive,
int timing_timing_iterations,
size_t *d_temp_storage_bytes,
hipError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
Sum scan_op,
InitialValueT initial_value,
OffsetT num_items,
hipStream_t stream,
bool debug_synchronous)
{
hipError_t error = hipSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous);
}
return error;
}
*/
//---------------------------------------------------------------------
// CUDA Nested Parallelism Test Kernel
//---------------------------------------------------------------------
/**
* Simple wrapper kernel to invoke DeviceScan
*/
template <typename IsPrimitiveT, typename InputIteratorT, typename OutputIteratorT, typename ScanOpT, typename InitialValueT, typename OffsetT>
__global__ void CnpDispatchKernel(
IsPrimitiveT is_primitive,
int timing_timing_iterations,
size_t *d_temp_storage_bytes,
hipError_t *d_cdp_error,
void* d_temp_storage,
size_t temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
ScanOpT scan_op,
InitialValueT initial_value,
OffsetT num_items,
bool debug_synchronous)
{
#ifndef CUB_CDP
*d_cdp_error = hipErrorNotSupported;
#else
*d_cdp_error = Dispatch(
Int2Type<CUB>(),
is_primitive,
timing_timing_iterations,
d_temp_storage_bytes,
d_cdp_error,
d_temp_storage,
temp_storage_bytes,
d_in,
d_out,
scan_op,
initial_value,
num_items,
0,
debug_synchronous);
*d_temp_storage_bytes = temp_storage_bytes;
#endif
}
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Main
*/
int main(int argc, char** argv)
{
int num_items = -1;
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("n", num_items);
args.GetCmdLineArgument("i", g_timing_iterations);
args.GetCmdLineArgument("repeat", g_repeat);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--n=<input items> "
"[--i=<timing iterations> "
"[--device=<device-id>] "
"[--repeat=<repetitions of entire test suite>]"
"[--v] "
"[--cdp]"
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
g_device_giga_bandwidth = args.device_giga_bandwidth;
printf("\n");
// Compile/run thorough tests
for (int i = 0; i <= g_repeat; ++i)
{
// Test different input+output data types
TestSize<unsigned char>(num_items, (int) 0, (int) 99);
// Test same intput+output data types
TestSize<unsigned int>(num_items, (unsigned int) 0, (unsigned int) 99);
/**TestSize<unsigned char>(num_items, (unsigned char) 0, (unsigned char) 99);
TestSize<char>(num_items, (char) 0, (char) 99);
TestSize<unsigned short>(num_items, (unsigned short) 0, (unsigned short)99);
TestSize<unsigned long long>(num_items, (unsigned long long) 0, (unsigned long long) 99);
TestSize<uchar2>(num_items, make_uchar2(0, 0), make_uchar2(17, 21));
TestSize<char2>(num_items, make_char2(0, 0), make_char2(17, 21));
TestSize<ushort2>(num_items, make_ushort2(0, 0), make_ushort2(17, 21));
TestSize<uint2>(num_items, make_uint2(0, 0), make_uint2(17, 21));
TestSize<ulonglong2>(num_items, make_ulonglong2(0, 0), make_ulonglong2(17, 21));
TestSize<uchar4>(num_items, make_uchar4(0, 0, 0, 0), make_uchar4(17, 21, 32, 85));
TestSize<char4>(num_items, make_char4(0, 0, 0, 0), make_char4(17, 21, 32, 85));
TestSize<ushort4>(num_items, make_ushort4(0, 0, 0, 0), make_ushort4(17, 21, 32, 85));
TestSize<uint4>(num_items, make_uint4(0, 0, 0, 0), make_uint4(17, 21, 32, 85));
TestSize<ulonglong4>(num_items, make_ulonglong4(0, 0, 0, 0), make_ulonglong4(17, 21, 32, 85));
TestSize<TestFoo>(num_items,
TestFoo::MakeTestFoo(0, 0, 0, 0),
TestFoo::MakeTestFoo(1ll << 63, 1 << 31, short(1 << 15), char(1 << 7)));
TestSize<TestBar>(num_items,
TestBar(0, 0),
TestBar(1ll << 63, 1 << 31));
*/ }
#endif
return 0;
}
| 03e4381464ca842558c4ad638b0853e15529ae62.cu | // Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <typeinfo>
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include <cub/util_allocator.cuh>
#include <cub/iterator/constant_input_iterator.cuh>
#include <cub/iterator/discard_output_iterator.cuh>
#include <cub/device/device_scan.cuh>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false;
int g_timing_iterations = 0;
int g_repeat = 0;
double g_device_giga_bandwidth;
CachingDeviceAllocator g_allocator(true);
// Dispatch types
enum Backend
{
CUB, // CUB method
THRUST, // Thrust method
CDP, // GPU-based (dynamic parallelism) dispatch to CUB method
};
/**
* \brief WrapperFunctor (for precluding test-specialized dispatch to *Sum variants)
*/
template<typename OpT>
struct WrapperFunctor
{
OpT op;
WrapperFunctor(OpT op) : op(op) {}
template <typename T>
__host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const
{
return op(a, b);
}
};
//---------------------------------------------------------------------
// Dispatch to different CUB DeviceScan entrypoints
//---------------------------------------------------------------------
/**
* Dispatch to exclusive scan entrypoint
*/
template <typename IsPrimitiveT, typename InputIteratorT, typename OutputIteratorT, typename ScanOpT, typename InitialValueT, typename OffsetT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB> dispatch_to,
IsPrimitiveT is_primitive,
int timing_timing_iterations,
size_t *d_temp_storage_bytes,
cudaError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
ScanOpT scan_op,
InitialValueT initial_value,
OffsetT num_items,
cudaStream_t stream,
bool debug_synchronous)
{
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DeviceScan::ExclusiveScan(d_temp_storage, temp_storage_bytes, d_in, d_out, scan_op, initial_value, num_items, stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to exclusive sum entrypoint
*/
/**
template <typename InputIteratorT, typename OutputIteratorT, typename InitialValueT, typename OffsetT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB> dispatch_to,
Int2Type<true> is_primitive,
int timing_timing_iterations,
size_t *d_temp_storage_bytes,
cudaError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
Sum scan_op,
InitialValueT initial_value,
OffsetT num_items,
cudaStream_t stream,
bool debug_synchronous)
{
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous);
}
return error;
}
*/
//---------------------------------------------------------------------
// CUDA Nested Parallelism Test Kernel
//---------------------------------------------------------------------
/**
* Simple wrapper kernel to invoke DeviceScan
*/
template <typename IsPrimitiveT, typename InputIteratorT, typename OutputIteratorT, typename ScanOpT, typename InitialValueT, typename OffsetT>
__global__ void CnpDispatchKernel(
IsPrimitiveT is_primitive,
int timing_timing_iterations,
size_t *d_temp_storage_bytes,
cudaError_t *d_cdp_error,
void* d_temp_storage,
size_t temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
ScanOpT scan_op,
InitialValueT initial_value,
OffsetT num_items,
bool debug_synchronous)
{
#ifndef CUB_CDP
*d_cdp_error = cudaErrorNotSupported;
#else
*d_cdp_error = Dispatch(
Int2Type<CUB>(),
is_primitive,
timing_timing_iterations,
d_temp_storage_bytes,
d_cdp_error,
d_temp_storage,
temp_storage_bytes,
d_in,
d_out,
scan_op,
initial_value,
num_items,
0,
debug_synchronous);
*d_temp_storage_bytes = temp_storage_bytes;
#endif
}
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Main
*/
int main(int argc, char** argv)
{
int num_items = -1;
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("n", num_items);
args.GetCmdLineArgument("i", g_timing_iterations);
args.GetCmdLineArgument("repeat", g_repeat);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--n=<input items> "
"[--i=<timing iterations> "
"[--device=<device-id>] "
"[--repeat=<repetitions of entire test suite>]"
"[--v] "
"[--cdp]"
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
g_device_giga_bandwidth = args.device_giga_bandwidth;
printf("\n");
// Compile/run thorough tests
for (int i = 0; i <= g_repeat; ++i)
{
// Test different input+output data types
TestSize<unsigned char>(num_items, (int) 0, (int) 99);
// Test same intput+output data types
TestSize<unsigned int>(num_items, (unsigned int) 0, (unsigned int) 99);
/**TestSize<unsigned char>(num_items, (unsigned char) 0, (unsigned char) 99);
TestSize<char>(num_items, (char) 0, (char) 99);
TestSize<unsigned short>(num_items, (unsigned short) 0, (unsigned short)99);
TestSize<unsigned long long>(num_items, (unsigned long long) 0, (unsigned long long) 99);
TestSize<uchar2>(num_items, make_uchar2(0, 0), make_uchar2(17, 21));
TestSize<char2>(num_items, make_char2(0, 0), make_char2(17, 21));
TestSize<ushort2>(num_items, make_ushort2(0, 0), make_ushort2(17, 21));
TestSize<uint2>(num_items, make_uint2(0, 0), make_uint2(17, 21));
TestSize<ulonglong2>(num_items, make_ulonglong2(0, 0), make_ulonglong2(17, 21));
TestSize<uchar4>(num_items, make_uchar4(0, 0, 0, 0), make_uchar4(17, 21, 32, 85));
TestSize<char4>(num_items, make_char4(0, 0, 0, 0), make_char4(17, 21, 32, 85));
TestSize<ushort4>(num_items, make_ushort4(0, 0, 0, 0), make_ushort4(17, 21, 32, 85));
TestSize<uint4>(num_items, make_uint4(0, 0, 0, 0), make_uint4(17, 21, 32, 85));
TestSize<ulonglong4>(num_items, make_ulonglong4(0, 0, 0, 0), make_ulonglong4(17, 21, 32, 85));
TestSize<TestFoo>(num_items,
TestFoo::MakeTestFoo(0, 0, 0, 0),
TestFoo::MakeTestFoo(1ll << 63, 1 << 31, short(1 << 15), char(1 << 7)));
TestSize<TestBar>(num_items,
TestBar(0, 0),
TestBar(1ll << 63, 1 << 31));
*/ }
#endif
return 0;
}
|
d659f29f677351b7db15b40d806f1fe94e9e0554.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "combineSourceAndBackground.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int nwl = 1;
const int n = 1;
const float scale = 1;
float *src = NULL;
hipMalloc(&src, XSIZE*YSIZE);
const float *bkg = NULL;
hipMalloc(&bkg, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
combineSourceAndBackground), dim3(gridBlock),dim3(threadBlock), 0, 0, nwl,n,scale,src,bkg);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
combineSourceAndBackground), dim3(gridBlock),dim3(threadBlock), 0, 0, nwl,n,scale,src,bkg);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
combineSourceAndBackground), dim3(gridBlock),dim3(threadBlock), 0, 0, nwl,n,scale,src,bkg);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | d659f29f677351b7db15b40d806f1fe94e9e0554.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "combineSourceAndBackground.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int nwl = 1;
const int n = 1;
const float scale = 1;
float *src = NULL;
cudaMalloc(&src, XSIZE*YSIZE);
const float *bkg = NULL;
cudaMalloc(&bkg, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
combineSourceAndBackground<<<gridBlock,threadBlock>>>(nwl,n,scale,src,bkg);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
combineSourceAndBackground<<<gridBlock,threadBlock>>>(nwl,n,scale,src,bkg);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
combineSourceAndBackground<<<gridBlock,threadBlock>>>(nwl,n,scale,src,bkg);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
de12a91804ea84ee14daf0c0229faff3643fd156.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//------------------------------------------------------------------------------
//
// PROGRAM: Matrix Multipliplication driver
//
// PURPOSE: This is a driver program to test various ways of computing
// the product:
//
// C = A * B
//
// A and B are set to constant matrices so we
// can make a quick test of the multiplication.
//
// USAGE: The matrices are constant matrices, square and the order is
// set as a constant, ORDER (see mult.h).
//
// HISTORY: Written by Tim Mattson, August 2010
// Modified by Simon McIntosh-Smith, September 2011
// Modified by Tom Deakin and Simon McIntosh-Smith, October 2012
// Updated to C++ Wrapper v1.2.6 by Tom Deakin, August 2013
// Ported to CUDA by F Rossi, Oct 2013
//------------------------------------------------------------------------------
#include "matmul.hpp"
#include "matrix_lib.hpp"
#include "cuda_util.hpp" //for CudaSafeCall
//Naive dense matrix multiplication kernel.
//
//An optimized and documented version of the parallel algorithm
//is available in section 3.2 of http://docs.nvidia.com/cuda/cuda-c-programming-guide/
//
__global__ void kernel_mmul(
const int Mdim,
const int Ndim,
const int Pdim,
const float* A,
const float* B,
float* C)
{
int k;
int i = blockDim.x*blockIdx.x+threadIdx.x;
int j = blockDim.y*blockIdx.y+threadIdx.y;
float tmp;
if ( (i < Ndim) && (j <Mdim))
{
tmp = 0.0;
for(k=0;k<Pdim;k++)
tmp += A[i*Ndim+k] * B[k*Pdim+j];
C[i*Ndim+j] = tmp;
}
}
int main(void)
{
int Mdim, Ndim, Pdim; // A[N][P], B[P][M], C[N][M]
int szA, szB, szC; // number of elements in each matrix
double start_time; // Starting time
double run_time; // timing data
Ndim = ORDER;
Pdim = ORDER;
Mdim = ORDER;
szA = Ndim * Pdim;
szB = Pdim * Mdim;
szC = Ndim * Mdim;
std::vector<float> h_A(szA); // Host memory for Matrix A
std::vector<float> h_B(szB); // Host memory for Matrix B
std::vector<float> h_C(szC); // Host memory for Matrix C
float* d_a; // device memory used for the input a vector
float* d_b; // device memory used for the input b vector
float* d_c; // device memory used for the output c vector
initmat(Mdim, Ndim, Pdim, h_A, h_B, h_C);
printf("\n===== Sequential, matrix mult (dot prod), order %d on host CPU ======\n",ORDER);
for(int i = 0; i < COUNT; i++)
{
zero_mat(Ndim, Mdim, h_C);
start_time = wtime();
seq_mat_mul_sdot(Mdim, Ndim, Pdim, h_A, h_B, h_C);
run_time = wtime() - start_time;
results(Mdim, Ndim, Pdim, h_C, run_time);
}
//CUDA part begin
CudaSafeCall( hipSetDevice(0) );
CudaSafeCall( hipMalloc(&d_a, sizeof(float)*szA) ); // allocates device memory
CudaSafeCall( hipMalloc(&d_b, sizeof(float)*szB) );
CudaSafeCall( hipMalloc(&d_c, sizeof(float)*szC) );
CudaSafeCall( hipMemcpy(d_a, &h_A[0], sizeof(float)*szA, hipMemcpyHostToDevice) ); //copies vectors initialized on host to device
CudaSafeCall( hipMemcpy(d_b, &h_B[0], sizeof(float)*szB, hipMemcpyHostToDevice) );
hipEvent_t start, stop; //just for timing purpose
float elapsed_time = 0; //just for timing purpose
hipEventCreate(&start); //just for timing purpose
hipEventCreate(&stop); //just for timing purpose
printf("\n===== CUDA, matrix mult, C(i,j) per work item, order %d ======\n",Ndim);
// Do the multiplication COUNT times
for (int i = 0; i < COUNT; i++)
{
zero_mat(Ndim, Mdim, h_C);
dim3 grid,block;
block.x = 32;
block.y = 16;
block.z = 1;
grid.x = (Ndim-1)/block.x+1; //round up division by block.x
grid.y = (Mdim-1)/block.y+1;
grid.z = 1;
hipEventRecord(start,0); //just for timing purpose
hipLaunchKernelGGL(( kernel_mmul), dim3(grid),dim3(block), 0, 0, Mdim,Ndim,Pdim,d_a,d_b,d_c);
hipEventRecord(stop,0); //just for timing purpose
hipEventSynchronize(stop); //just for timing purpose
hipEventElapsedTime(&elapsed_time, start, stop); //just for timing purpose
CudaSafeCall( hipMemcpy(&h_C[0], d_c, sizeof(float)*szC, hipMemcpyDeviceToHost) ); //copies vector computed on device to host
results(Mdim, Ndim, Pdim, h_C, elapsed_time/1000.f);
}
CudaSafeCall( hipFree(d_a) ); //frees up memory
CudaSafeCall( hipFree(d_b) );
CudaSafeCall( hipFree(d_c) );
CudaSafeCall( hipEventDestroy(start) );
CudaSafeCall( hipEventDestroy(stop) );
//cuda part end
return EXIT_SUCCESS;
}
| de12a91804ea84ee14daf0c0229faff3643fd156.cu | //------------------------------------------------------------------------------
//
// PROGRAM: Matrix Multipliplication driver
//
// PURPOSE: This is a driver program to test various ways of computing
// the product:
//
// C = A * B
//
// A and B are set to constant matrices so we
// can make a quick test of the multiplication.
//
// USAGE: The matrices are constant matrices, square and the order is
// set as a constant, ORDER (see mult.h).
//
// HISTORY: Written by Tim Mattson, August 2010
// Modified by Simon McIntosh-Smith, September 2011
// Modified by Tom Deakin and Simon McIntosh-Smith, October 2012
// Updated to C++ Wrapper v1.2.6 by Tom Deakin, August 2013
// Ported to CUDA by F Rossi, Oct 2013
//------------------------------------------------------------------------------
#include "matmul.hpp"
#include "matrix_lib.hpp"
#include "cuda_util.hpp" //for CudaSafeCall
//Naive dense matrix multiplication kernel.
//
//An optimized and documented version of the parallel algorithm
//is available in section 3.2 of http://docs.nvidia.com/cuda/cuda-c-programming-guide/
//
__global__ void kernel_mmul(
const int Mdim,
const int Ndim,
const int Pdim,
const float* A,
const float* B,
float* C)
{
int k;
int i = blockDim.x*blockIdx.x+threadIdx.x;
int j = blockDim.y*blockIdx.y+threadIdx.y;
float tmp;
if ( (i < Ndim) && (j <Mdim))
{
tmp = 0.0;
for(k=0;k<Pdim;k++)
tmp += A[i*Ndim+k] * B[k*Pdim+j];
C[i*Ndim+j] = tmp;
}
}
int main(void)
{
int Mdim, Ndim, Pdim; // A[N][P], B[P][M], C[N][M]
int szA, szB, szC; // number of elements in each matrix
double start_time; // Starting time
double run_time; // timing data
Ndim = ORDER;
Pdim = ORDER;
Mdim = ORDER;
szA = Ndim * Pdim;
szB = Pdim * Mdim;
szC = Ndim * Mdim;
std::vector<float> h_A(szA); // Host memory for Matrix A
std::vector<float> h_B(szB); // Host memory for Matrix B
std::vector<float> h_C(szC); // Host memory for Matrix C
float* d_a; // device memory used for the input a vector
float* d_b; // device memory used for the input b vector
float* d_c; // device memory used for the output c vector
initmat(Mdim, Ndim, Pdim, h_A, h_B, h_C);
printf("\n===== Sequential, matrix mult (dot prod), order %d on host CPU ======\n",ORDER);
for(int i = 0; i < COUNT; i++)
{
zero_mat(Ndim, Mdim, h_C);
start_time = wtime();
seq_mat_mul_sdot(Mdim, Ndim, Pdim, h_A, h_B, h_C);
run_time = wtime() - start_time;
results(Mdim, Ndim, Pdim, h_C, run_time);
}
//CUDA part begin
CudaSafeCall( cudaSetDevice(0) );
CudaSafeCall( cudaMalloc(&d_a, sizeof(float)*szA) ); // allocates device memory
CudaSafeCall( cudaMalloc(&d_b, sizeof(float)*szB) );
CudaSafeCall( cudaMalloc(&d_c, sizeof(float)*szC) );
CudaSafeCall( cudaMemcpy(d_a, &h_A[0], sizeof(float)*szA, cudaMemcpyHostToDevice) ); //copies vectors initialized on host to device
CudaSafeCall( cudaMemcpy(d_b, &h_B[0], sizeof(float)*szB, cudaMemcpyHostToDevice) );
cudaEvent_t start, stop; //just for timing purpose
float elapsed_time = 0; //just for timing purpose
cudaEventCreate(&start); //just for timing purpose
cudaEventCreate(&stop); //just for timing purpose
printf("\n===== CUDA, matrix mult, C(i,j) per work item, order %d ======\n",Ndim);
// Do the multiplication COUNT times
for (int i = 0; i < COUNT; i++)
{
zero_mat(Ndim, Mdim, h_C);
dim3 grid,block;
block.x = 32;
block.y = 16;
block.z = 1;
grid.x = (Ndim-1)/block.x+1; //round up division by block.x
grid.y = (Mdim-1)/block.y+1;
grid.z = 1;
cudaEventRecord(start,0); //just for timing purpose
kernel_mmul<<<grid,block>>>(Mdim,Ndim,Pdim,d_a,d_b,d_c);
cudaEventRecord(stop,0); //just for timing purpose
cudaEventSynchronize(stop); //just for timing purpose
cudaEventElapsedTime(&elapsed_time, start, stop); //just for timing purpose
CudaSafeCall( cudaMemcpy(&h_C[0], d_c, sizeof(float)*szC, cudaMemcpyDeviceToHost) ); //copies vector computed on device to host
results(Mdim, Ndim, Pdim, h_C, elapsed_time/1000.f);
}
CudaSafeCall( cudaFree(d_a) ); //frees up memory
CudaSafeCall( cudaFree(d_b) );
CudaSafeCall( cudaFree(d_c) );
CudaSafeCall( cudaEventDestroy(start) );
CudaSafeCall( cudaEventDestroy(stop) );
//cuda part end
return EXIT_SUCCESS;
}
|
228906f60afa08549bcd98c0cfe4adddfa70d446.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include "kernel/gpu/cuda_impl/concatv2_impl.cuh"
template <typename T>
__global__ void ConcatV2(const size_t size, const int w1, const int w2, const T* input_1, const T* input_2, T* output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
int n = pos / (w1 + w2);
int m = pos % (w1 + w2);
output[pos] = m >= w1 ? input_2[n * w2 + m - w1] : input_1[n * w1 + m];
}
return;
}
template <typename T>
void CalConcatV2(const size_t size, const int w1, const int w2, const T* input_1, const T* input_2, T* output,
hipStream_t cuda_stream) {
hipLaunchKernelGGL(( ConcatV2), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, w1, w2, input_1, input_2, output);
return;
}
template void CalConcatV2(const size_t size, const int w1, const int w2, const float* input_1, const float* input_2,
float* output, hipStream_t cuda_stream);
template void CalConcatV2(const size_t size, const int w1, const int w2, const int* input_1, const int* input_2,
int* output, hipStream_t cuda_stream);
template void CalConcatV2(const size_t size, const int w1, const int w2, const half* input_1, const half* input_2,
half* output, hipStream_t cuda_stream);
| 228906f60afa08549bcd98c0cfe4adddfa70d446.cu | /**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdint.h>
#include <cuda_runtime.h>
#include "kernel/gpu/cuda_impl/concatv2_impl.cuh"
template <typename T>
__global__ void ConcatV2(const size_t size, const int w1, const int w2, const T* input_1, const T* input_2, T* output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
int n = pos / (w1 + w2);
int m = pos % (w1 + w2);
output[pos] = m >= w1 ? input_2[n * w2 + m - w1] : input_1[n * w1 + m];
}
return;
}
template <typename T>
void CalConcatV2(const size_t size, const int w1, const int w2, const T* input_1, const T* input_2, T* output,
cudaStream_t cuda_stream) {
ConcatV2<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, w1, w2, input_1, input_2, output);
return;
}
template void CalConcatV2(const size_t size, const int w1, const int w2, const float* input_1, const float* input_2,
float* output, cudaStream_t cuda_stream);
template void CalConcatV2(const size_t size, const int w1, const int w2, const int* input_1, const int* input_2,
int* output, cudaStream_t cuda_stream);
template void CalConcatV2(const size_t size, const int w1, const int w2, const half* input_1, const half* input_2,
half* output, cudaStream_t cuda_stream);
|
9b23ece76ade0b12bb9a27add8ac827726522fe8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words,
// we multiply each weight with the pixel underneath it. Finally, we add up all
// of the multiplied numbers and assign that value to our output for the current
// pixel. We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the
// outputChannel.
// Here is an example of computing a blur, using a weighted average, for a
// single pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its
// width. We refer to the array of weights as a filter, and we refer to its
// width with the variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called
// checkCudaErrors. You should wrap your allocation and copying statements like
// we've done in the code we're supplying you. Here is an example of the unsafe
// way to allocate memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows *
// numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful
// for catching mistakes. If you write code the unsafe way and you make a
// mistake, then any subsequent kernels won't compute anything, and it will be
// hard to figure out why. Writing code the safe way will inform you as soon as
// you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
__global__ void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel, int numRows,
int numCols, const float* const filter,
const int filterWidth) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before
// accessing GPU memory:
if (x >= numCols || y >= numRows) {
return;
} else {
float result = 0.f;
// Iterate over filter
for (int filter_y = -filterWidth / 2; filter_y <= filterWidth / 2;
++filter_y) {
for (int filter_x = -filterWidth / 2; filter_x <= filterWidth / 2;
++filter_x) {
// Find the global image position for this filter position
// clamp to boundary of the image
int image_x = min(max(x + filter_x, 0), static_cast<int>(numCols - 1));
int image_y = min(max(y + filter_y, 0), static_cast<int>(numRows - 1));
float image_value =
static_cast<float>(inputChannel[image_y * numCols + image_x]);
float filter_value = filter[(filter_y + filterWidth / 2) * filterWidth +
filter_x + filterWidth / 2];
result += image_value * filter_value;
}
}
outputChannel[y * numCols + x] = result;
}
// NOTE: If a thread's absolute position 2D position is within the image,
// but some of its neighbors are outside the image, then you will need to be
// extra careful. Instead of trying to read such a neighbor value from GPU
// memory (which won't work because the value is out of bounds), you should
// explicitly clamp the neighbor values you read to be within the bounds of
// the image. If this is not clear to you, then please refer to sequential
// reference solution for the exact clamping semantics you should follow.
}
// This kernel takes in an image represented as a uchar4 and splits
// it into three images consisting of only one color channel each
__global__ void separateChannels(const uchar4* const inputImageRGBA,
int numRows, int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel) {
int absolute_image_position_x = blockIdx.x * blockDim.x + threadIdx.x;
int absolute_image_position_y = blockIdx.y * blockDim.y + threadIdx.y;
// NOTE: Be careful not to try to access memory that is outside the bounds
// of the image. You'll want code that performs the following check before
// accessing GPU memory:
const int absolute_pos =
absolute_image_position_y * numCols + absolute_image_position_x;
if (absolute_image_position_x >= numCols ||
absolute_image_position_y >= numRows) {
return;
} else {
uchar4 inputPixel = inputImageRGBA[absolute_pos];
redChannel[absolute_pos] = inputPixel.x;
greenChannel[absolute_pos] = inputPixel.y;
blueChannel[absolute_pos] = inputPixel.z;
}
}
// This kernel takes in three color channels and recombines them
// into one image. The alpha channel is set to 255 to represent
// that this image has no transparency.
__global__ void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA, int numRows,
int numCols) {
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
// make sure we don't try and access memory outside the image
// by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
// Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float* d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage,
const size_t numColsImage,
const float* const h_filter,
const size_t filterWidth) {
// allocate memory for the three different channels
// original
checkCudaErrors(
hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(
&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(
hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
// Allocate memory for the filter on the GPU
// Use the pointer d_filter that we have already declared for you
// You need to allocate memory for the filter with hipMalloc
// be sure to use checkCudaErrors like the above examples to
// be able to tell if anything goes wrong
// IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
checkCudaErrors(
hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
// Copy the filter on the host (h_filter) to the memory you just allocated
// on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
// Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter,
sizeof(float) * filterWidth * filterWidth,
hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4* const h_inputImageRGBA,
uchar4* const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows,
const size_t numCols, unsigned char* d_redBlurred,
unsigned char* d_greenBlurred,
unsigned char* d_blueBlurred, const int filterWidth) {
// Dealing with an even width filter is trickier
assert(filterWidth % 2 == 1);
// Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(16, 16, 1);
// Compute correct grid size (i.e., number of blocks per kernel launch)
// from the image size and and block size.
const dim3 gridSize(1 + (numCols / blockSize.x), 1 + (numRows / blockSize.y),
1);
// Launch a kernel for separating the RGBA image into different color
// channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols,
d_red, d_green, d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately
// after launching your kernel to make sure that you didn't make any
// mistakes.
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
// Call your convolution kernel here 3 times, once for each color
// channel.
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols,
d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows,
numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows,
numCols, d_filter, filterWidth);
// Again, call hipDeviceSynchronize(), then call checkCudaErrors()
// immediately after launching your kernel to make sure that you didn't make
// any mistakes.
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for
// you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred, d_greenBlurred,
d_blueBlurred, d_outputImageRGBA,
numRows, numCols);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
}
// Free all the memory that we allocated
// TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
checkCudaErrors(hipFree(d_filter));
}
| 9b23ece76ade0b12bb9a27add8ac827726522fe8.cu | // Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words,
// we multiply each weight with the pixel underneath it. Finally, we add up all
// of the multiplied numbers and assign that value to our output for the current
// pixel. We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the
// outputChannel.
// Here is an example of computing a blur, using a weighted average, for a
// single pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its
// width. We refer to the array of weights as a filter, and we refer to its
// width with the variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called
// checkCudaErrors. You should wrap your allocation and copying statements like
// we've done in the code we're supplying you. Here is an example of the unsafe
// way to allocate memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows *
// numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful
// for catching mistakes. If you write code the unsafe way and you make a
// mistake, then any subsequent kernels won't compute anything, and it will be
// hard to figure out why. Writing code the safe way will inform you as soon as
// you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
__global__ void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel, int numRows,
int numCols, const float* const filter,
const int filterWidth) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before
// accessing GPU memory:
if (x >= numCols || y >= numRows) {
return;
} else {
float result = 0.f;
// Iterate over filter
for (int filter_y = -filterWidth / 2; filter_y <= filterWidth / 2;
++filter_y) {
for (int filter_x = -filterWidth / 2; filter_x <= filterWidth / 2;
++filter_x) {
// Find the global image position for this filter position
// clamp to boundary of the image
int image_x = min(max(x + filter_x, 0), static_cast<int>(numCols - 1));
int image_y = min(max(y + filter_y, 0), static_cast<int>(numRows - 1));
float image_value =
static_cast<float>(inputChannel[image_y * numCols + image_x]);
float filter_value = filter[(filter_y + filterWidth / 2) * filterWidth +
filter_x + filterWidth / 2];
result += image_value * filter_value;
}
}
outputChannel[y * numCols + x] = result;
}
// NOTE: If a thread's absolute position 2D position is within the image,
// but some of its neighbors are outside the image, then you will need to be
// extra careful. Instead of trying to read such a neighbor value from GPU
// memory (which won't work because the value is out of bounds), you should
// explicitly clamp the neighbor values you read to be within the bounds of
// the image. If this is not clear to you, then please refer to sequential
// reference solution for the exact clamping semantics you should follow.
}
// This kernel takes in an image represented as a uchar4 and splits
// it into three images consisting of only one color channel each
__global__ void separateChannels(const uchar4* const inputImageRGBA,
int numRows, int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel) {
int absolute_image_position_x = blockIdx.x * blockDim.x + threadIdx.x;
int absolute_image_position_y = blockIdx.y * blockDim.y + threadIdx.y;
// NOTE: Be careful not to try to access memory that is outside the bounds
// of the image. You'll want code that performs the following check before
// accessing GPU memory:
const int absolute_pos =
absolute_image_position_y * numCols + absolute_image_position_x;
if (absolute_image_position_x >= numCols ||
absolute_image_position_y >= numRows) {
return;
} else {
uchar4 inputPixel = inputImageRGBA[absolute_pos];
redChannel[absolute_pos] = inputPixel.x;
greenChannel[absolute_pos] = inputPixel.y;
blueChannel[absolute_pos] = inputPixel.z;
}
}
// This kernel takes in three color channels and recombines them
// into one image. The alpha channel is set to 255 to represent
// that this image has no transparency.
__global__ void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA, int numRows,
int numCols) {
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
// make sure we don't try and access memory outside the image
// by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
// Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float* d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage,
const size_t numColsImage,
const float* const h_filter,
const size_t filterWidth) {
// allocate memory for the three different channels
// original
checkCudaErrors(
cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(
&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(
cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
// Allocate memory for the filter on the GPU
// Use the pointer d_filter that we have already declared for you
// You need to allocate memory for the filter with cudaMalloc
// be sure to use checkCudaErrors like the above examples to
// be able to tell if anything goes wrong
// IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
checkCudaErrors(
cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
// Copy the filter on the host (h_filter) to the memory you just allocated
// on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
// Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter,
sizeof(float) * filterWidth * filterWidth,
cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4* const h_inputImageRGBA,
uchar4* const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows,
const size_t numCols, unsigned char* d_redBlurred,
unsigned char* d_greenBlurred,
unsigned char* d_blueBlurred, const int filterWidth) {
// Dealing with an even width filter is trickier
assert(filterWidth % 2 == 1);
// Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(16, 16, 1);
// Compute correct grid size (i.e., number of blocks per kernel launch)
// from the image size and and block size.
const dim3 gridSize(1 + (numCols / blockSize.x), 1 + (numRows / blockSize.y),
1);
// Launch a kernel for separating the RGBA image into different color
// channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols,
d_red, d_green, d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately
// after launching your kernel to make sure that you didn't make any
// mistakes.
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
// Call your convolution kernel here 3 times, once for each color
// channel.
gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols,
d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows,
numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows,
numCols, d_filter, filterWidth);
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors()
// immediately after launching your kernel to make sure that you didn't make
// any mistakes.
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for
// you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred, d_greenBlurred,
d_blueBlurred, d_outputImageRGBA,
numRows, numCols);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
}
// Free all the memory that we allocated
// TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
checkCudaErrors(cudaFree(d_filter));
}
|
ab46f79a25cb0822330efe8519809445ef7050b1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <iostream>
#include <algorithm>
#include <iterator>
#define max(a, b) ((a > b) ? a : b)
template <typename T>
__global__
void cuda_convolve_full(T *a, T *v, T *conv, long a_size, long v_size){
long i = blockDim.x * blockIdx.x + threadIdx.x;
conv[i] = 0;
if(i < a_size + v_size - 1)
for(long j = 0; j < v_size; j++)
conv[i] += v[j] * ((i - j >= a_size || i - j < 0) ? 0 : a[i - j]);
}
template <typename T>
T* fary2cuda(std::vector<T> a){
T *g;
hipError_t err = hipMalloc((void**)&g, sizeof(T) * a.size());
if(err) exit(err);
hipMemcpy(g, a.data(), sizeof(T) * a.size(), hipMemcpyHostToDevice);
return g;
}
template <typename T>
std::vector<T> convolve(std::vector<T> a, std::vector<T> v){
std::vector<T> conv(a.size() + v.size() - 1);
T* ga = fary2cuda(a);
T* gv = fary2cuda(v);
T* gconv = fary2cuda(conv);
hipLaunchKernelGGL(( cuda_convolve_full), dim3((a.size()+256)/256), dim3(256), 0, 0, ga, gv, gconv, a.size(), v.size());
hipDeviceSynchronize();
hipMemcpy(conv.data(), gconv, sizeof(T) * conv.size(), hipMemcpyDeviceToHost);
hipFree(ga);
hipFree(gv);
hipFree(gconv);
hipDeviceReset();
return conv;
}
template <typename T>
std::vector<T> convolve_same(std::vector<T> a, std::vector<T> v){
std::vector<T> conv(max(a.size(), v.size()));
long sidx = (int)round(v.size() / 2.0) - 1;
std::vector<T> conv_full = convolve(a, v);
memcpy(conv.data(), conv_full.data() + sidx, sizeof(T) * conv.size());
return conv;
}
template <typename T>
std::vector<T> convolve_valid(std::vector<T> a, std::vector<T> v){
std::vector<T> conv(max(a.size(), v.size()) - min(a.size(), v.size()) + 1);
long sidx = v.size() - 1;
std::vector<T> conv_full = convolve(a, v);
memcpy(conv.data(), conv_full.data() + sidx, sizeof(T) * conv.size());
return conv;
}
template std::vector<float> convolve<float>(std::vector<float>, std::vector<float>);
template std::vector<float> convolve_valid<float>(std::vector<float>, std::vector<float>);
template std::vector<float> convolve_same<float>(std::vector<float>, std::vector<float>);
template std::vector<double> convolve<double>(std::vector<double>, std::vector<double>);
template std::vector<double> convolve_valid<double>(std::vector<double>, std::vector<double>);
template std::vector<double> convolve_same<double>(std::vector<double>, std::vector<double>);
| ab46f79a25cb0822330efe8519809445ef7050b1.cu | #include <vector>
#include <iostream>
#include <algorithm>
#include <iterator>
#define max(a, b) ((a > b) ? a : b)
template <typename T>
__global__
void cuda_convolve_full(T *a, T *v, T *conv, long a_size, long v_size){
long i = blockDim.x * blockIdx.x + threadIdx.x;
conv[i] = 0;
if(i < a_size + v_size - 1)
for(long j = 0; j < v_size; j++)
conv[i] += v[j] * ((i - j >= a_size || i - j < 0) ? 0 : a[i - j]);
}
template <typename T>
T* fary2cuda(std::vector<T> a){
T *g;
cudaError_t err = cudaMalloc((void**)&g, sizeof(T) * a.size());
if(err) exit(err);
cudaMemcpy(g, a.data(), sizeof(T) * a.size(), cudaMemcpyHostToDevice);
return g;
}
template <typename T>
std::vector<T> convolve(std::vector<T> a, std::vector<T> v){
std::vector<T> conv(a.size() + v.size() - 1);
T* ga = fary2cuda(a);
T* gv = fary2cuda(v);
T* gconv = fary2cuda(conv);
cuda_convolve_full<<<(a.size()+256)/256, 256>>>(ga, gv, gconv, a.size(), v.size());
cudaDeviceSynchronize();
cudaMemcpy(conv.data(), gconv, sizeof(T) * conv.size(), cudaMemcpyDeviceToHost);
cudaFree(ga);
cudaFree(gv);
cudaFree(gconv);
cudaDeviceReset();
return conv;
}
template <typename T>
std::vector<T> convolve_same(std::vector<T> a, std::vector<T> v){
std::vector<T> conv(max(a.size(), v.size()));
long sidx = (int)round(v.size() / 2.0) - 1;
std::vector<T> conv_full = convolve(a, v);
memcpy(conv.data(), conv_full.data() + sidx, sizeof(T) * conv.size());
return conv;
}
template <typename T>
std::vector<T> convolve_valid(std::vector<T> a, std::vector<T> v){
std::vector<T> conv(max(a.size(), v.size()) - min(a.size(), v.size()) + 1);
long sidx = v.size() - 1;
std::vector<T> conv_full = convolve(a, v);
memcpy(conv.data(), conv_full.data() + sidx, sizeof(T) * conv.size());
return conv;
}
template std::vector<float> convolve<float>(std::vector<float>, std::vector<float>);
template std::vector<float> convolve_valid<float>(std::vector<float>, std::vector<float>);
template std::vector<float> convolve_same<float>(std::vector<float>, std::vector<float>);
template std::vector<double> convolve<double>(std::vector<double>, std::vector<double>);
template std::vector<double> convolve_valid<double>(std::vector<double>, std::vector<double>);
template std::vector<double> convolve_same<double>(std::vector<double>, std::vector<double>);
|
fbbd2a341c6f2a1a2a8b2935da6e5139e2a70227.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// This is the REAL "hello world" for CUDA!
// It takes the string "Hello ", prints it, then passes it to CUDA with an array
// of offsets. Then the offsets are added in parallel to produce the string "World!"
// By Ingemar Ragnemalm 2010
#include <stdio.h>
const int N = 16;
const int blocksize = 16;
__global__
void hello(char *a, int *b)
{
a[threadIdx.x] += b[threadIdx.x];
}
int main()
{
char a[N] = "Hello \0\0\0\0\0\0";
int b[N] = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
char *d_a;
int *d_b;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);
printf("%s", a);
hipMalloc( (void**)&d_a, csize );
hipMalloc( (void**)&d_b, isize );
hipMemcpy( d_a, a, csize, hipMemcpyHostToDevice );
hipMemcpy( d_b, b, isize, hipMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hipLaunchKernelGGL(( hello), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, d_b);
hipMemcpy( a, d_a, csize, hipMemcpyDeviceToHost );
hipFree( d_a );
hipFree( d_b );
printf("%s\n", a);
return EXIT_SUCCESS;
} | fbbd2a341c6f2a1a2a8b2935da6e5139e2a70227.cu | // This is the REAL "hello world" for CUDA!
// It takes the string "Hello ", prints it, then passes it to CUDA with an array
// of offsets. Then the offsets are added in parallel to produce the string "World!"
// By Ingemar Ragnemalm 2010
#include <stdio.h>
const int N = 16;
const int blocksize = 16;
__global__
void hello(char *a, int *b)
{
a[threadIdx.x] += b[threadIdx.x];
}
int main()
{
char a[N] = "Hello \0\0\0\0\0\0";
int b[N] = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
char *d_a;
int *d_b;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);
printf("%s", a);
cudaMalloc( (void**)&d_a, csize );
cudaMalloc( (void**)&d_b, isize );
cudaMemcpy( d_a, a, csize, cudaMemcpyHostToDevice );
cudaMemcpy( d_b, b, isize, cudaMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hello<<<dimGrid, dimBlock>>>(d_a, d_b);
cudaMemcpy( a, d_a, csize, cudaMemcpyDeviceToHost );
cudaFree( d_a );
cudaFree( d_b );
printf("%s\n", a);
return EXIT_SUCCESS;
} |
1533875aec531e99f70a5b55ca8b556f6fe71b1a.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright 2020 California Institute of Technology. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Author: Ethan Jaszewski
#include "ls.h"
#include <algorithm>
#include <cstdio>
#include "hip/hip_runtime.h"
#include "math.h"
#include "errchk.cuh"
const float TWO_PI = M_PI * 2.0;
//
// Simple LombScargle Function Definitions
//
LombScargle::LombScargle() {}
//
// CUDA Kernels
//
__global__ void LombScargleKernel(const float* times,
const float* mags,
const size_t length,
const float* periods,
const float* period_dts,
const size_t num_periods,
const size_t num_period_dts,
const LombScargle params,
float* periodogram) {
const size_t thread_x = threadIdx.x + blockIdx.x * blockDim.x;
const size_t thread_y = threadIdx.y + blockIdx.y * blockDim.y;
if (thread_x >= num_periods || thread_y >= num_period_dts) {
return;
}
// Period and period time derivative
const float period = periods[thread_x];
const float period_dt = period_dts[thread_y];
// Time derivative correction factor.
const float pdt_corr = (period_dt / period) / 2;
float mag_cos = 0.0;
float mag_sin = 0.0;
float cos_cos = 0.0;
float cos_sin = 0.0;
float cos, sin, i_part;
for (size_t idx = 0; idx < length; idx++) {
float t = times[idx];
float mag = mags[idx];
float t_corr = t - pdt_corr * t * t;
float folded = fabsf(modff(t_corr / period, &i_part));
sincosf(TWO_PI * folded, &sin, &cos);
mag_cos += mag * cos;
mag_sin += mag * sin;
cos_cos += cos * cos;
cos_sin += cos * sin;
}
float sin_sin = static_cast<float>(length) - cos_cos;
float cos_tau, sin_tau;
sincosf(0.5 * atan2f(2.0 * cos_sin, cos_cos - sin_sin), &sin_tau, &cos_tau);
float numerator_l = cos_tau * mag_cos + sin_tau * mag_sin;
numerator_l *= numerator_l;
float numerator_r = cos_tau * mag_sin - sin_tau * mag_cos;
numerator_r *= numerator_r;
float denominator_l = cos_tau * cos_tau * cos_cos
+ 2 * cos_tau * sin_tau * cos_sin
+ sin_tau * sin_tau * sin_sin;
float denominator_r = cos_tau * cos_tau * sin_sin
- 2 * cos_tau * sin_tau * cos_sin
+ sin_tau * sin_tau * cos_cos;
periodogram[thread_x * num_period_dts + thread_y] =
0.5 * ((numerator_l / denominator_l) + (numerator_r / denominator_r));
}
//
// Wrapper Functions
//
float* LombScargle::DeviceCalcLS(const float* times,
const float* mags,
const size_t length,
const float* periods,
const float* period_dts,
const size_t num_periods,
const size_t num_p_dts) const {
float* periodogram;
gpuErrchk(
hipMalloc(&periodogram, num_periods * num_p_dts * sizeof(float)));
const size_t x_threads = 256;
const size_t y_threads = 1;
const size_t x_blocks = ((num_periods + x_threads - 1) / x_threads);
const size_t y_blocks = ((num_p_dts + y_threads - 1) / y_threads);
const dim3 block_dim = dim3(x_threads, y_threads);
const dim3 grid_dim = dim3(x_blocks, y_blocks);
hipLaunchKernelGGL(( LombScargleKernel), dim3(grid_dim), dim3(block_dim), 0, 0, times, mags, length, periods,
period_dts, num_periods,
num_p_dts, *this, periodogram);
return periodogram;
}
float* LombScargle::CalcLS(const float* times,
const float* mags,
const size_t length,
const float* periods,
const float* period_dts,
const size_t num_periods,
const size_t num_p_dts) const {
// Number of bytes of input data
const size_t data_bytes = length * sizeof(float);
// Allocate device pointers
float* dev_times;
float* dev_mags;
float* dev_periods;
float* dev_period_dts;
gpuErrchk(hipMalloc(&dev_times, data_bytes));
gpuErrchk(hipMalloc(&dev_mags, data_bytes));
gpuErrchk(hipMalloc(&dev_periods, num_periods * sizeof(float)));
gpuErrchk(hipMalloc(&dev_period_dts, num_p_dts * sizeof(float)));
// Copy data to device memory
gpuErrchk(hipMemcpy(dev_times, times, data_bytes, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(dev_mags, mags, data_bytes, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(dev_periods, periods, num_periods * sizeof(float),
hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(dev_period_dts, period_dts, num_p_dts * sizeof(float),
hipMemcpyHostToDevice));
float* dev_periodogram =
DeviceCalcLS(dev_times, dev_mags, length, dev_periods, dev_period_dts,
num_periods, num_p_dts);
const size_t periodogram_size = num_periods * num_p_dts * sizeof(float);
float* periodogram = (float*)malloc(periodogram_size);
hipMemcpy(periodogram, dev_periodogram, periodogram_size,
hipMemcpyDeviceToHost);
gpuErrchk(hipFree(dev_periodogram));
gpuErrchk(hipFree(dev_times));
gpuErrchk(hipFree(dev_mags));
gpuErrchk(hipFree(dev_periods));
gpuErrchk(hipFree(dev_period_dts));
return periodogram;
}
float* LombScargle::CalcLSBatched(const std::vector<float*>& times,
const std::vector<float*>& mags,
const std::vector<size_t>& lengths,
const float* periods,
const float* period_dts,
const size_t num_periods,
const size_t num_p_dts) const {
// TODO: Use async memory transferring
// TODO: Look at ways of batching data transfer.
// Size of one CE out array, and total CE output size.
size_t per_points = num_periods * num_p_dts;
size_t per_out_size = per_points * sizeof(float);
size_t per_size_total = per_out_size * lengths.size();
// Allocate the output CE array so we can copy to it.
float* per_host = (float*)malloc(per_size_total);
// Copy trial information over
float* dev_periods;
float* dev_period_dts;
gpuErrchk(hipMalloc(&dev_periods, num_periods * sizeof(float)));
gpuErrchk(hipMalloc(&dev_period_dts, num_p_dts * sizeof(float)));
gpuErrchk(hipMemcpy(dev_periods, periods, num_periods * sizeof(float),
hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(dev_period_dts, period_dts, num_p_dts * sizeof(float),
hipMemcpyHostToDevice));
// Intermediate conditional entropy memory
float* dev_per;
gpuErrchk(hipMalloc(&dev_per, per_out_size));
// Kernel launch information
const size_t x_threads = 256;
const size_t y_threads = 1;
const size_t x_blocks = ((num_periods + x_threads - 1) / x_threads);
const size_t y_blocks = ((num_p_dts + y_threads - 1) / y_threads);
const dim3 block_dim = dim3(x_threads, y_threads);
const dim3 grid_dim = dim3(x_blocks, y_blocks);
// Buffer size (large enough for longest light curve)
auto max_length = std::max_element(lengths.begin(), lengths.end());
const size_t buffer_length = *max_length;
const size_t buffer_bytes = sizeof(float) * buffer_length;
float* dev_times_buffer;
float* dev_mags_buffer;
gpuErrchk(hipMalloc(&dev_times_buffer, buffer_bytes));
gpuErrchk(hipMalloc(&dev_mags_buffer, buffer_bytes));
for (size_t i = 0; i < lengths.size(); i++) {
// Copy light curve into device buffer
const size_t curve_bytes = lengths[i] * sizeof(float);
hipMemcpy(dev_times_buffer, times[i], curve_bytes,
hipMemcpyHostToDevice);
hipMemcpy(dev_mags_buffer, mags[i], curve_bytes,
hipMemcpyHostToDevice);
// Zero conditional entropy output
gpuErrchk(hipMemset(dev_per, 0, per_out_size));
hipLaunchKernelGGL(( LombScargleKernel), dim3(grid_dim), dim3(block_dim), 0, 0,
dev_times_buffer, dev_mags_buffer, lengths[i], dev_periods,
dev_period_dts, num_periods, num_p_dts, *this, dev_per);
// Copy periodogram back to host
hipMemcpy(&per_host[i * per_points], dev_per, per_out_size,
hipMemcpyDeviceToHost);
}
// Free all of the GPU memory
gpuErrchk(hipFree(dev_periods));
gpuErrchk(hipFree(dev_period_dts));
gpuErrchk(hipFree(dev_per));
gpuErrchk(hipFree(dev_times_buffer));
gpuErrchk(hipFree(dev_mags_buffer));
return per_host;
}
| 1533875aec531e99f70a5b55ca8b556f6fe71b1a.cu | // Copyright 2020 California Institute of Technology. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Author: Ethan Jaszewski
#include "ls.h"
#include <algorithm>
#include <cstdio>
#include "cuda_runtime.h"
#include "math.h"
#include "errchk.cuh"
const float TWO_PI = M_PI * 2.0;
//
// Simple LombScargle Function Definitions
//
LombScargle::LombScargle() {}
//
// CUDA Kernels
//
__global__ void LombScargleKernel(const float* times,
const float* mags,
const size_t length,
const float* periods,
const float* period_dts,
const size_t num_periods,
const size_t num_period_dts,
const LombScargle params,
float* periodogram) {
const size_t thread_x = threadIdx.x + blockIdx.x * blockDim.x;
const size_t thread_y = threadIdx.y + blockIdx.y * blockDim.y;
if (thread_x >= num_periods || thread_y >= num_period_dts) {
return;
}
// Period and period time derivative
const float period = periods[thread_x];
const float period_dt = period_dts[thread_y];
// Time derivative correction factor.
const float pdt_corr = (period_dt / period) / 2;
float mag_cos = 0.0;
float mag_sin = 0.0;
float cos_cos = 0.0;
float cos_sin = 0.0;
float cos, sin, i_part;
for (size_t idx = 0; idx < length; idx++) {
float t = times[idx];
float mag = mags[idx];
float t_corr = t - pdt_corr * t * t;
float folded = fabsf(modff(t_corr / period, &i_part));
sincosf(TWO_PI * folded, &sin, &cos);
mag_cos += mag * cos;
mag_sin += mag * sin;
cos_cos += cos * cos;
cos_sin += cos * sin;
}
float sin_sin = static_cast<float>(length) - cos_cos;
float cos_tau, sin_tau;
sincosf(0.5 * atan2f(2.0 * cos_sin, cos_cos - sin_sin), &sin_tau, &cos_tau);
float numerator_l = cos_tau * mag_cos + sin_tau * mag_sin;
numerator_l *= numerator_l;
float numerator_r = cos_tau * mag_sin - sin_tau * mag_cos;
numerator_r *= numerator_r;
float denominator_l = cos_tau * cos_tau * cos_cos
+ 2 * cos_tau * sin_tau * cos_sin
+ sin_tau * sin_tau * sin_sin;
float denominator_r = cos_tau * cos_tau * sin_sin
- 2 * cos_tau * sin_tau * cos_sin
+ sin_tau * sin_tau * cos_cos;
periodogram[thread_x * num_period_dts + thread_y] =
0.5 * ((numerator_l / denominator_l) + (numerator_r / denominator_r));
}
//
// Wrapper Functions
//
float* LombScargle::DeviceCalcLS(const float* times,
const float* mags,
const size_t length,
const float* periods,
const float* period_dts,
const size_t num_periods,
const size_t num_p_dts) const {
float* periodogram;
gpuErrchk(
cudaMalloc(&periodogram, num_periods * num_p_dts * sizeof(float)));
const size_t x_threads = 256;
const size_t y_threads = 1;
const size_t x_blocks = ((num_periods + x_threads - 1) / x_threads);
const size_t y_blocks = ((num_p_dts + y_threads - 1) / y_threads);
const dim3 block_dim = dim3(x_threads, y_threads);
const dim3 grid_dim = dim3(x_blocks, y_blocks);
LombScargleKernel<<<grid_dim, block_dim>>>(times, mags, length, periods,
period_dts, num_periods,
num_p_dts, *this, periodogram);
return periodogram;
}
float* LombScargle::CalcLS(const float* times,
const float* mags,
const size_t length,
const float* periods,
const float* period_dts,
const size_t num_periods,
const size_t num_p_dts) const {
// Number of bytes of input data
const size_t data_bytes = length * sizeof(float);
// Allocate device pointers
float* dev_times;
float* dev_mags;
float* dev_periods;
float* dev_period_dts;
gpuErrchk(cudaMalloc(&dev_times, data_bytes));
gpuErrchk(cudaMalloc(&dev_mags, data_bytes));
gpuErrchk(cudaMalloc(&dev_periods, num_periods * sizeof(float)));
gpuErrchk(cudaMalloc(&dev_period_dts, num_p_dts * sizeof(float)));
// Copy data to device memory
gpuErrchk(cudaMemcpy(dev_times, times, data_bytes, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(dev_mags, mags, data_bytes, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(dev_periods, periods, num_periods * sizeof(float),
cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(dev_period_dts, period_dts, num_p_dts * sizeof(float),
cudaMemcpyHostToDevice));
float* dev_periodogram =
DeviceCalcLS(dev_times, dev_mags, length, dev_periods, dev_period_dts,
num_periods, num_p_dts);
const size_t periodogram_size = num_periods * num_p_dts * sizeof(float);
float* periodogram = (float*)malloc(periodogram_size);
cudaMemcpy(periodogram, dev_periodogram, periodogram_size,
cudaMemcpyDeviceToHost);
gpuErrchk(cudaFree(dev_periodogram));
gpuErrchk(cudaFree(dev_times));
gpuErrchk(cudaFree(dev_mags));
gpuErrchk(cudaFree(dev_periods));
gpuErrchk(cudaFree(dev_period_dts));
return periodogram;
}
float* LombScargle::CalcLSBatched(const std::vector<float*>& times,
const std::vector<float*>& mags,
const std::vector<size_t>& lengths,
const float* periods,
const float* period_dts,
const size_t num_periods,
const size_t num_p_dts) const {
// TODO: Use async memory transferring
// TODO: Look at ways of batching data transfer.
// Size of one CE out array, and total CE output size.
size_t per_points = num_periods * num_p_dts;
size_t per_out_size = per_points * sizeof(float);
size_t per_size_total = per_out_size * lengths.size();
// Allocate the output CE array so we can copy to it.
float* per_host = (float*)malloc(per_size_total);
// Copy trial information over
float* dev_periods;
float* dev_period_dts;
gpuErrchk(cudaMalloc(&dev_periods, num_periods * sizeof(float)));
gpuErrchk(cudaMalloc(&dev_period_dts, num_p_dts * sizeof(float)));
gpuErrchk(cudaMemcpy(dev_periods, periods, num_periods * sizeof(float),
cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(dev_period_dts, period_dts, num_p_dts * sizeof(float),
cudaMemcpyHostToDevice));
// Intermediate conditional entropy memory
float* dev_per;
gpuErrchk(cudaMalloc(&dev_per, per_out_size));
// Kernel launch information
const size_t x_threads = 256;
const size_t y_threads = 1;
const size_t x_blocks = ((num_periods + x_threads - 1) / x_threads);
const size_t y_blocks = ((num_p_dts + y_threads - 1) / y_threads);
const dim3 block_dim = dim3(x_threads, y_threads);
const dim3 grid_dim = dim3(x_blocks, y_blocks);
// Buffer size (large enough for longest light curve)
auto max_length = std::max_element(lengths.begin(), lengths.end());
const size_t buffer_length = *max_length;
const size_t buffer_bytes = sizeof(float) * buffer_length;
float* dev_times_buffer;
float* dev_mags_buffer;
gpuErrchk(cudaMalloc(&dev_times_buffer, buffer_bytes));
gpuErrchk(cudaMalloc(&dev_mags_buffer, buffer_bytes));
for (size_t i = 0; i < lengths.size(); i++) {
// Copy light curve into device buffer
const size_t curve_bytes = lengths[i] * sizeof(float);
cudaMemcpy(dev_times_buffer, times[i], curve_bytes,
cudaMemcpyHostToDevice);
cudaMemcpy(dev_mags_buffer, mags[i], curve_bytes,
cudaMemcpyHostToDevice);
// Zero conditional entropy output
gpuErrchk(cudaMemset(dev_per, 0, per_out_size));
LombScargleKernel<<<grid_dim, block_dim>>>(
dev_times_buffer, dev_mags_buffer, lengths[i], dev_periods,
dev_period_dts, num_periods, num_p_dts, *this, dev_per);
// Copy periodogram back to host
cudaMemcpy(&per_host[i * per_points], dev_per, per_out_size,
cudaMemcpyDeviceToHost);
}
// Free all of the GPU memory
gpuErrchk(cudaFree(dev_periods));
gpuErrchk(cudaFree(dev_period_dts));
gpuErrchk(cudaFree(dev_per));
gpuErrchk(cudaFree(dev_times_buffer));
gpuErrchk(cudaFree(dev_mags_buffer));
return per_host;
}
|
a6811577345051db7d1a9cca8dfb8b02e10351a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define STORE_DERIVATIVE_1(INDEX) atomicAdd(&derivBuffers[offset+(INDEX-1)*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (deriv##INDEX##_1*0x100000000)));
#define STORE_DERIVATIVE_2(INDEX) atomicAdd(&derivBuffers[offset+(INDEX-1)*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].deriv##INDEX*0x100000000)));
typedef struct {
real3 pos;
real3 force;
ATOM_PARAMETER_DATA
#ifdef NEED_PADDING
float padding;
#endif
} AtomData;
/**
* Compute a force based on pair interactions.
*/
extern "C" __global__ void computeN2Energy(unsigned long long* __restrict__ forceBuffers, mixed* __restrict__ energyBuffer,
const real4* __restrict__ posq, const unsigned int* __restrict__ exclusions, const ushort2* __restrict__ exclusionTiles,
#ifdef USE_CUTOFF
const int* __restrict__ tiles, const unsigned int* __restrict__ interactionCount, real4 periodicBoxSize, real4 invPeriodicBoxSize,
real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, unsigned int maxTiles, const real4* __restrict__ blockCenter,
const real4* __restrict__ blockSize, const unsigned int* __restrict__ interactingAtoms
#else
unsigned int numTiles
#endif
PARAMETER_ARGUMENTS) {
const unsigned int totalWarps = (blockDim.x*gridDim.x)/TILE_SIZE;
const unsigned int warp = (blockIdx.x*blockDim.x+threadIdx.x)/TILE_SIZE;
const unsigned int tgx = threadIdx.x & (TILE_SIZE-1);
const unsigned int tbx = threadIdx.x - tgx;
mixed energy = 0;
__shared__ AtomData localData[THREAD_BLOCK_SIZE];
// First loop: process tiles that contain exclusions.
const unsigned int firstExclusionTile = FIRST_EXCLUSION_TILE+warp*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps;
const unsigned int lastExclusionTile = FIRST_EXCLUSION_TILE+(warp+1)*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps;
for (int pos = firstExclusionTile; pos < lastExclusionTile; pos++) {
const ushort2 tileIndices = exclusionTiles[pos];
const unsigned int x = tileIndices.x;
const unsigned int y = tileIndices.y;
real3 force = make_real3(0);
DECLARE_ATOM1_DERIVATIVES
unsigned int atom1 = x*TILE_SIZE + tgx;
real4 pos1 = posq[atom1];
LOAD_ATOM1_PARAMETERS
#ifdef USE_EXCLUSIONS
unsigned int excl = exclusions[pos*TILE_SIZE+tgx];
#endif
if (x == y) {
// This tile is on the diagonal.
const unsigned int localAtomIndex = threadIdx.x;
localData[localAtomIndex].pos = make_real3(pos1.x, pos1.y, pos1.z);
LOAD_LOCAL_PARAMETERS_FROM_1
for (unsigned int j = 0; j < TILE_SIZE; j++) {
int atom2 = tbx+j;
real3 pos2 = localData[atom2].pos;
real3 delta = make_real3(pos2.x-pos1.x, pos2.y-pos1.y, pos2.z-pos1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
#ifdef USE_CUTOFF
if (r2 < CUTOFF_SQUARED) {
#endif
real invR = RSQRT(r2);
real r = r2*invR;
LOAD_ATOM2_PARAMETERS
atom2 = y*TILE_SIZE+j;
real dEdR = 0;
real tempEnergy = 0;
#ifdef USE_EXCLUSIONS
bool isExcluded = !(excl & 0x1);
#endif
if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS && atom1 != atom2) {
COMPUTE_INTERACTION
dEdR /= -r;
}
energy += 0.5f*tempEnergy;
delta *= dEdR;
force.x -= delta.x;
force.y -= delta.y;
force.z -= delta.z;
#ifdef USE_CUTOFF
}
#endif
#ifdef USE_EXCLUSIONS
excl >>= 1;
#endif
}
}
else {
// This is an off-diagonal tile.
const unsigned int localAtomIndex = threadIdx.x;
unsigned int j = y*TILE_SIZE + tgx;
real4 tempPosq = posq[j];
localData[localAtomIndex].pos = make_real3(tempPosq.x, tempPosq.y, tempPosq.z);
LOAD_LOCAL_PARAMETERS_FROM_GLOBAL
localData[localAtomIndex].force = make_real3(0);
CLEAR_LOCAL_DERIVATIVES
#ifdef USE_EXCLUSIONS
excl = (excl >> tgx) | (excl << (TILE_SIZE - tgx));
#endif
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
int atom2 = tbx+tj;
real3 pos2 = localData[atom2].pos;
real3 delta = make_real3(pos2.x-pos1.x, pos2.y-pos1.y, pos2.z-pos1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
#ifdef USE_CUTOFF
if (r2 < CUTOFF_SQUARED) {
#endif
real invR = RSQRT(r2);
real r = r2*invR;
LOAD_ATOM2_PARAMETERS
atom2 = y*TILE_SIZE+tj;
real dEdR = 0;
real tempEnergy = 0;
#ifdef USE_EXCLUSIONS
bool isExcluded = !(excl & 0x1);
#endif
if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) {
COMPUTE_INTERACTION
dEdR /= -r;
}
energy += tempEnergy;
delta *= dEdR;
force.x -= delta.x;
force.y -= delta.y;
force.z -= delta.z;
atom2 = tbx+tj;
localData[atom2].force.x += delta.x;
localData[atom2].force.y += delta.y;
localData[atom2].force.z += delta.z;
RECORD_DERIVATIVE_2
#ifdef USE_CUTOFF
}
#endif
#ifdef USE_EXCLUSIONS
excl >>= 1;
#endif
tj = (tj + 1) & (TILE_SIZE - 1);
}
}
// Write results.
unsigned int offset = x*TILE_SIZE + tgx;
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (force.x*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.y*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.z*0x100000000)));
STORE_DERIVATIVES_1
if (x != y) {
offset = y*TILE_SIZE + tgx;
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.x*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.y*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.z*0x100000000)));
STORE_DERIVATIVES_2
}
}
// Second loop: tiles without exclusions, either from the neighbor list (with cutoff) or just enumerating all
// of them (no cutoff).
#ifdef USE_CUTOFF
unsigned int numTiles = interactionCount[0];
int pos = (int) (warp*(numTiles > maxTiles ? NUM_BLOCKS*((long long)NUM_BLOCKS+1)/2 : (long)numTiles)/totalWarps);
int end = (int) ((warp+1)*(numTiles > maxTiles ? NUM_BLOCKS*((long long)NUM_BLOCKS+1)/2 : (long)numTiles)/totalWarps);
#else
int pos = (int) (warp*(long long)numTiles/totalWarps);
int end = (int) ((warp+1)*(long long)numTiles/totalWarps);
#endif
int skipBase = 0;
int currentSkipIndex = tbx;
__shared__ int atomIndices[THREAD_BLOCK_SIZE];
__shared__ volatile int skipTiles[THREAD_BLOCK_SIZE];
skipTiles[threadIdx.x] = -1;
while (pos < end) {
const bool isExcluded = false;
real3 force = make_real3(0);
DECLARE_ATOM1_DERIVATIVES
bool includeTile = true;
// Extract the coordinates of this tile.
int x, y;
bool singlePeriodicCopy = false;
#ifdef USE_CUTOFF
if (numTiles <= maxTiles) {
x = tiles[pos];
real4 blockSizeX = blockSize[x];
singlePeriodicCopy = (0.5f*periodicBoxSize.x-blockSizeX.x >= CUTOFF &&
0.5f*periodicBoxSize.y-blockSizeX.y >= CUTOFF &&
0.5f*periodicBoxSize.z-blockSizeX.z >= CUTOFF);
}
else
#endif
{
y = (int) floor(NUM_BLOCKS+0.5f-SQRT((NUM_BLOCKS+0.5f)*(NUM_BLOCKS+0.5f)-2*pos));
x = (pos-y*NUM_BLOCKS+y*(y+1)/2);
if (x < y || x >= NUM_BLOCKS) { // Occasionally happens due to roundoff error.
y += (x < y ? -1 : 1);
x = (pos-y*NUM_BLOCKS+y*(y+1)/2);
}
// Skip over tiles that have exclusions, since they were already processed.
while (skipTiles[tbx+TILE_SIZE-1] < pos) {
if (skipBase+tgx < NUM_TILES_WITH_EXCLUSIONS) {
ushort2 tile = exclusionTiles[skipBase+tgx];
skipTiles[threadIdx.x] = tile.x + tile.y*NUM_BLOCKS - tile.y*(tile.y+1)/2;
}
else
skipTiles[threadIdx.x] = end;
skipBase += TILE_SIZE;
currentSkipIndex = tbx;
}
while (skipTiles[currentSkipIndex] < pos)
currentSkipIndex++;
includeTile = (skipTiles[currentSkipIndex] != pos);
}
if (includeTile) {
unsigned int atom1 = x*TILE_SIZE + tgx;
// Load atom data for this tile.
real4 pos1 = posq[atom1];
LOAD_ATOM1_PARAMETERS
const unsigned int localAtomIndex = threadIdx.x;
#ifdef USE_CUTOFF
unsigned int j = (numTiles <= maxTiles ? interactingAtoms[pos*TILE_SIZE+tgx] : y*TILE_SIZE + tgx);
#else
unsigned int j = y*TILE_SIZE + tgx;
#endif
atomIndices[threadIdx.x] = j;
if (j < PADDED_NUM_ATOMS) {
real4 tempPosq = posq[j];
localData[localAtomIndex].pos = make_real3(tempPosq.x, tempPosq.y, tempPosq.z);
LOAD_LOCAL_PARAMETERS_FROM_GLOBAL
localData[localAtomIndex].force = make_real3(0);
CLEAR_LOCAL_DERIVATIVES
}
#ifdef USE_PERIODIC
if (singlePeriodicCopy) {
// The box is small enough that we can just translate all the atoms into a single periodic
// box, then skip having to apply periodic boundary conditions later.
real4 blockCenterX = blockCenter[x];
APPLY_PERIODIC_TO_POS_WITH_CENTER(pos1, blockCenterX)
APPLY_PERIODIC_TO_POS_WITH_CENTER(localData[threadIdx.x].pos, blockCenterX)
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
int atom2 = tbx+tj;
real3 pos2 = localData[atom2].pos;
real3 delta = make_real3(pos2.x-pos1.x, pos2.y-pos1.y, pos2.z-pos1.z);
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
#ifdef USE_CUTOFF
if (r2 < CUTOFF_SQUARED) {
#endif
real invR = RSQRT(r2);
real r = r2*invR;
LOAD_ATOM2_PARAMETERS
atom2 = atomIndices[tbx+tj];
real dEdR = 0;
real tempEnergy = 0;
if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) {
COMPUTE_INTERACTION
dEdR /= -r;
}
energy += tempEnergy;
delta *= dEdR;
force.x -= delta.x;
force.y -= delta.y;
force.z -= delta.z;
atom2 = tbx+tj;
localData[atom2].force.x += delta.x;
localData[atom2].force.y += delta.y;
localData[atom2].force.z += delta.z;
RECORD_DERIVATIVE_2
#ifdef USE_CUTOFF
}
#endif
tj = (tj + 1) & (TILE_SIZE - 1);
}
}
else
#endif
{
// We need to apply periodic boundary conditions separately for each interaction.
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
int atom2 = tbx+tj;
real3 pos2 = localData[atom2].pos;
real3 delta = make_real3(pos2.x-pos1.x, pos2.y-pos1.y, pos2.z-pos1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
#ifdef USE_CUTOFF
if (r2 < CUTOFF_SQUARED) {
#endif
real invR = RSQRT(r2);
real r = r2*invR;
LOAD_ATOM2_PARAMETERS
atom2 = atomIndices[tbx+tj];
real dEdR = 0;
real tempEnergy = 0;
if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) {
COMPUTE_INTERACTION
dEdR /= -r;
}
energy += tempEnergy;
delta *= dEdR;
force.x -= delta.x;
force.y -= delta.y;
force.z -= delta.z;
atom2 = tbx+tj;
localData[atom2].force.x += delta.x;
localData[atom2].force.y += delta.y;
localData[atom2].force.z += delta.z;
RECORD_DERIVATIVE_2
#ifdef USE_CUTOFF
}
#endif
tj = (tj + 1) & (TILE_SIZE - 1);
}
}
// Write results.
atomicAdd(&forceBuffers[atom1], static_cast<unsigned long long>((long long) (force.x*0x100000000)));
atomicAdd(&forceBuffers[atom1+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.y*0x100000000)));
atomicAdd(&forceBuffers[atom1+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.z*0x100000000)));
unsigned int offset = atom1;
STORE_DERIVATIVES_1
#ifdef USE_CUTOFF
unsigned int atom2 = atomIndices[threadIdx.x];
#else
unsigned int atom2 = y*TILE_SIZE + tgx;
#endif
if (atom2 < PADDED_NUM_ATOMS) {
atomicAdd(&forceBuffers[atom2], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.x*0x100000000)));
atomicAdd(&forceBuffers[atom2+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.y*0x100000000)));
atomicAdd(&forceBuffers[atom2+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.z*0x100000000)));
offset = atom2;
STORE_DERIVATIVES_2
}
}
pos++;
}
energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy;
}
| a6811577345051db7d1a9cca8dfb8b02e10351a4.cu | #define STORE_DERIVATIVE_1(INDEX) atomicAdd(&derivBuffers[offset+(INDEX-1)*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (deriv##INDEX##_1*0x100000000)));
#define STORE_DERIVATIVE_2(INDEX) atomicAdd(&derivBuffers[offset+(INDEX-1)*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].deriv##INDEX*0x100000000)));
typedef struct {
real3 pos;
real3 force;
ATOM_PARAMETER_DATA
#ifdef NEED_PADDING
float padding;
#endif
} AtomData;
/**
* Compute a force based on pair interactions.
*/
extern "C" __global__ void computeN2Energy(unsigned long long* __restrict__ forceBuffers, mixed* __restrict__ energyBuffer,
const real4* __restrict__ posq, const unsigned int* __restrict__ exclusions, const ushort2* __restrict__ exclusionTiles,
#ifdef USE_CUTOFF
const int* __restrict__ tiles, const unsigned int* __restrict__ interactionCount, real4 periodicBoxSize, real4 invPeriodicBoxSize,
real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, unsigned int maxTiles, const real4* __restrict__ blockCenter,
const real4* __restrict__ blockSize, const unsigned int* __restrict__ interactingAtoms
#else
unsigned int numTiles
#endif
PARAMETER_ARGUMENTS) {
const unsigned int totalWarps = (blockDim.x*gridDim.x)/TILE_SIZE;
const unsigned int warp = (blockIdx.x*blockDim.x+threadIdx.x)/TILE_SIZE;
const unsigned int tgx = threadIdx.x & (TILE_SIZE-1);
const unsigned int tbx = threadIdx.x - tgx;
mixed energy = 0;
__shared__ AtomData localData[THREAD_BLOCK_SIZE];
// First loop: process tiles that contain exclusions.
const unsigned int firstExclusionTile = FIRST_EXCLUSION_TILE+warp*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps;
const unsigned int lastExclusionTile = FIRST_EXCLUSION_TILE+(warp+1)*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps;
for (int pos = firstExclusionTile; pos < lastExclusionTile; pos++) {
const ushort2 tileIndices = exclusionTiles[pos];
const unsigned int x = tileIndices.x;
const unsigned int y = tileIndices.y;
real3 force = make_real3(0);
DECLARE_ATOM1_DERIVATIVES
unsigned int atom1 = x*TILE_SIZE + tgx;
real4 pos1 = posq[atom1];
LOAD_ATOM1_PARAMETERS
#ifdef USE_EXCLUSIONS
unsigned int excl = exclusions[pos*TILE_SIZE+tgx];
#endif
if (x == y) {
// This tile is on the diagonal.
const unsigned int localAtomIndex = threadIdx.x;
localData[localAtomIndex].pos = make_real3(pos1.x, pos1.y, pos1.z);
LOAD_LOCAL_PARAMETERS_FROM_1
for (unsigned int j = 0; j < TILE_SIZE; j++) {
int atom2 = tbx+j;
real3 pos2 = localData[atom2].pos;
real3 delta = make_real3(pos2.x-pos1.x, pos2.y-pos1.y, pos2.z-pos1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
#ifdef USE_CUTOFF
if (r2 < CUTOFF_SQUARED) {
#endif
real invR = RSQRT(r2);
real r = r2*invR;
LOAD_ATOM2_PARAMETERS
atom2 = y*TILE_SIZE+j;
real dEdR = 0;
real tempEnergy = 0;
#ifdef USE_EXCLUSIONS
bool isExcluded = !(excl & 0x1);
#endif
if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS && atom1 != atom2) {
COMPUTE_INTERACTION
dEdR /= -r;
}
energy += 0.5f*tempEnergy;
delta *= dEdR;
force.x -= delta.x;
force.y -= delta.y;
force.z -= delta.z;
#ifdef USE_CUTOFF
}
#endif
#ifdef USE_EXCLUSIONS
excl >>= 1;
#endif
}
}
else {
// This is an off-diagonal tile.
const unsigned int localAtomIndex = threadIdx.x;
unsigned int j = y*TILE_SIZE + tgx;
real4 tempPosq = posq[j];
localData[localAtomIndex].pos = make_real3(tempPosq.x, tempPosq.y, tempPosq.z);
LOAD_LOCAL_PARAMETERS_FROM_GLOBAL
localData[localAtomIndex].force = make_real3(0);
CLEAR_LOCAL_DERIVATIVES
#ifdef USE_EXCLUSIONS
excl = (excl >> tgx) | (excl << (TILE_SIZE - tgx));
#endif
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
int atom2 = tbx+tj;
real3 pos2 = localData[atom2].pos;
real3 delta = make_real3(pos2.x-pos1.x, pos2.y-pos1.y, pos2.z-pos1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
#ifdef USE_CUTOFF
if (r2 < CUTOFF_SQUARED) {
#endif
real invR = RSQRT(r2);
real r = r2*invR;
LOAD_ATOM2_PARAMETERS
atom2 = y*TILE_SIZE+tj;
real dEdR = 0;
real tempEnergy = 0;
#ifdef USE_EXCLUSIONS
bool isExcluded = !(excl & 0x1);
#endif
if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) {
COMPUTE_INTERACTION
dEdR /= -r;
}
energy += tempEnergy;
delta *= dEdR;
force.x -= delta.x;
force.y -= delta.y;
force.z -= delta.z;
atom2 = tbx+tj;
localData[atom2].force.x += delta.x;
localData[atom2].force.y += delta.y;
localData[atom2].force.z += delta.z;
RECORD_DERIVATIVE_2
#ifdef USE_CUTOFF
}
#endif
#ifdef USE_EXCLUSIONS
excl >>= 1;
#endif
tj = (tj + 1) & (TILE_SIZE - 1);
}
}
// Write results.
unsigned int offset = x*TILE_SIZE + tgx;
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (force.x*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.y*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.z*0x100000000)));
STORE_DERIVATIVES_1
if (x != y) {
offset = y*TILE_SIZE + tgx;
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.x*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.y*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.z*0x100000000)));
STORE_DERIVATIVES_2
}
}
// Second loop: tiles without exclusions, either from the neighbor list (with cutoff) or just enumerating all
// of them (no cutoff).
#ifdef USE_CUTOFF
unsigned int numTiles = interactionCount[0];
int pos = (int) (warp*(numTiles > maxTiles ? NUM_BLOCKS*((long long)NUM_BLOCKS+1)/2 : (long)numTiles)/totalWarps);
int end = (int) ((warp+1)*(numTiles > maxTiles ? NUM_BLOCKS*((long long)NUM_BLOCKS+1)/2 : (long)numTiles)/totalWarps);
#else
int pos = (int) (warp*(long long)numTiles/totalWarps);
int end = (int) ((warp+1)*(long long)numTiles/totalWarps);
#endif
int skipBase = 0;
int currentSkipIndex = tbx;
__shared__ int atomIndices[THREAD_BLOCK_SIZE];
__shared__ volatile int skipTiles[THREAD_BLOCK_SIZE];
skipTiles[threadIdx.x] = -1;
while (pos < end) {
const bool isExcluded = false;
real3 force = make_real3(0);
DECLARE_ATOM1_DERIVATIVES
bool includeTile = true;
// Extract the coordinates of this tile.
int x, y;
bool singlePeriodicCopy = false;
#ifdef USE_CUTOFF
if (numTiles <= maxTiles) {
x = tiles[pos];
real4 blockSizeX = blockSize[x];
singlePeriodicCopy = (0.5f*periodicBoxSize.x-blockSizeX.x >= CUTOFF &&
0.5f*periodicBoxSize.y-blockSizeX.y >= CUTOFF &&
0.5f*periodicBoxSize.z-blockSizeX.z >= CUTOFF);
}
else
#endif
{
y = (int) floor(NUM_BLOCKS+0.5f-SQRT((NUM_BLOCKS+0.5f)*(NUM_BLOCKS+0.5f)-2*pos));
x = (pos-y*NUM_BLOCKS+y*(y+1)/2);
if (x < y || x >= NUM_BLOCKS) { // Occasionally happens due to roundoff error.
y += (x < y ? -1 : 1);
x = (pos-y*NUM_BLOCKS+y*(y+1)/2);
}
// Skip over tiles that have exclusions, since they were already processed.
while (skipTiles[tbx+TILE_SIZE-1] < pos) {
if (skipBase+tgx < NUM_TILES_WITH_EXCLUSIONS) {
ushort2 tile = exclusionTiles[skipBase+tgx];
skipTiles[threadIdx.x] = tile.x + tile.y*NUM_BLOCKS - tile.y*(tile.y+1)/2;
}
else
skipTiles[threadIdx.x] = end;
skipBase += TILE_SIZE;
currentSkipIndex = tbx;
}
while (skipTiles[currentSkipIndex] < pos)
currentSkipIndex++;
includeTile = (skipTiles[currentSkipIndex] != pos);
}
if (includeTile) {
unsigned int atom1 = x*TILE_SIZE + tgx;
// Load atom data for this tile.
real4 pos1 = posq[atom1];
LOAD_ATOM1_PARAMETERS
const unsigned int localAtomIndex = threadIdx.x;
#ifdef USE_CUTOFF
unsigned int j = (numTiles <= maxTiles ? interactingAtoms[pos*TILE_SIZE+tgx] : y*TILE_SIZE + tgx);
#else
unsigned int j = y*TILE_SIZE + tgx;
#endif
atomIndices[threadIdx.x] = j;
if (j < PADDED_NUM_ATOMS) {
real4 tempPosq = posq[j];
localData[localAtomIndex].pos = make_real3(tempPosq.x, tempPosq.y, tempPosq.z);
LOAD_LOCAL_PARAMETERS_FROM_GLOBAL
localData[localAtomIndex].force = make_real3(0);
CLEAR_LOCAL_DERIVATIVES
}
#ifdef USE_PERIODIC
if (singlePeriodicCopy) {
// The box is small enough that we can just translate all the atoms into a single periodic
// box, then skip having to apply periodic boundary conditions later.
real4 blockCenterX = blockCenter[x];
APPLY_PERIODIC_TO_POS_WITH_CENTER(pos1, blockCenterX)
APPLY_PERIODIC_TO_POS_WITH_CENTER(localData[threadIdx.x].pos, blockCenterX)
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
int atom2 = tbx+tj;
real3 pos2 = localData[atom2].pos;
real3 delta = make_real3(pos2.x-pos1.x, pos2.y-pos1.y, pos2.z-pos1.z);
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
#ifdef USE_CUTOFF
if (r2 < CUTOFF_SQUARED) {
#endif
real invR = RSQRT(r2);
real r = r2*invR;
LOAD_ATOM2_PARAMETERS
atom2 = atomIndices[tbx+tj];
real dEdR = 0;
real tempEnergy = 0;
if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) {
COMPUTE_INTERACTION
dEdR /= -r;
}
energy += tempEnergy;
delta *= dEdR;
force.x -= delta.x;
force.y -= delta.y;
force.z -= delta.z;
atom2 = tbx+tj;
localData[atom2].force.x += delta.x;
localData[atom2].force.y += delta.y;
localData[atom2].force.z += delta.z;
RECORD_DERIVATIVE_2
#ifdef USE_CUTOFF
}
#endif
tj = (tj + 1) & (TILE_SIZE - 1);
}
}
else
#endif
{
// We need to apply periodic boundary conditions separately for each interaction.
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
int atom2 = tbx+tj;
real3 pos2 = localData[atom2].pos;
real3 delta = make_real3(pos2.x-pos1.x, pos2.y-pos1.y, pos2.z-pos1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
#ifdef USE_CUTOFF
if (r2 < CUTOFF_SQUARED) {
#endif
real invR = RSQRT(r2);
real r = r2*invR;
LOAD_ATOM2_PARAMETERS
atom2 = atomIndices[tbx+tj];
real dEdR = 0;
real tempEnergy = 0;
if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) {
COMPUTE_INTERACTION
dEdR /= -r;
}
energy += tempEnergy;
delta *= dEdR;
force.x -= delta.x;
force.y -= delta.y;
force.z -= delta.z;
atom2 = tbx+tj;
localData[atom2].force.x += delta.x;
localData[atom2].force.y += delta.y;
localData[atom2].force.z += delta.z;
RECORD_DERIVATIVE_2
#ifdef USE_CUTOFF
}
#endif
tj = (tj + 1) & (TILE_SIZE - 1);
}
}
// Write results.
atomicAdd(&forceBuffers[atom1], static_cast<unsigned long long>((long long) (force.x*0x100000000)));
atomicAdd(&forceBuffers[atom1+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.y*0x100000000)));
atomicAdd(&forceBuffers[atom1+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.z*0x100000000)));
unsigned int offset = atom1;
STORE_DERIVATIVES_1
#ifdef USE_CUTOFF
unsigned int atom2 = atomIndices[threadIdx.x];
#else
unsigned int atom2 = y*TILE_SIZE + tgx;
#endif
if (atom2 < PADDED_NUM_ATOMS) {
atomicAdd(&forceBuffers[atom2], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.x*0x100000000)));
atomicAdd(&forceBuffers[atom2+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.y*0x100000000)));
atomicAdd(&forceBuffers[atom2+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.z*0x100000000)));
offset = atom2;
STORE_DERIVATIVES_2
}
}
pos++;
}
energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy;
}
|
0aed277af5b7de27056bd4320c683d4e04d10af4.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPContext.h>
#include <hip/hip_runtime.h>
namespace at {
namespace native {
Scalar _local_scalar_dense_cuda(const Tensor& self) {
Scalar r;
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, self.type(), "_local_scalar_dense_cuda", [&] {
scalar_t value;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_CUDA_CHECK(hipMemcpyAsync(&value, self.data<scalar_t>(), sizeof(scalar_t), hipMemcpyDeviceToHost, stream));
AT_CUDA_CHECK(hipStreamSynchronize(stream));
r = Scalar(value);
});
return r;
}
}} // at::native
| 0aed277af5b7de27056bd4320c683d4e04d10af4.cu | #include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAContext.h>
#include <cuda.h>
namespace at {
namespace native {
Scalar _local_scalar_dense_cuda(const Tensor& self) {
Scalar r;
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, self.type(), "_local_scalar_dense_cuda", [&] {
scalar_t value;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_CUDA_CHECK(cudaMemcpyAsync(&value, self.data<scalar_t>(), sizeof(scalar_t), cudaMemcpyDeviceToHost, stream));
AT_CUDA_CHECK(cudaStreamSynchronize(stream));
r = Scalar(value);
});
return r;
}
}} // at::native
|
6c49414b803d6ca134b72b4d48e3548002dd7342.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cutil.h"
#include "cudpp.h"
#include <sys/time.h>
__global__ void convI2FK(unsigned int * in, float * out){
int val = blockDim.x*blockIdx.x+threadIdx.x;
out[val] = __int_as_float((in[val] & 0x7FFFFF)|0x3F800000)-1.0f;
}
void doRandGenFloat(float * randOut, unsigned int Size){
struct timeval tim;
gettimeofday(&tim, NULL);
unsigned int seed = (unsigned int) tim.tv_sec + tim.tv_usec*100000.;
unsigned int * outVI;
CUDA_SAFE_CALL(hipMalloc((void **) &outVI, Size*Size*sizeof(unsigned int)));
CUDPPConfiguration config;
config.op = CUDPP_ADD;
config.datatype = CUDPP_UINT;
config.algorithm = CUDPP_RAND_MD5;
config.options = 0;
CUDPPHandle randPlan = 0;
CUDPPResult result;
result = cudppPlan(&randPlan, config, Size*Size, 1, 0);
cudppRandSeed(randPlan, seed);
cudppRand(randPlan, outVI, Size*Size);
hipLaunchKernelGGL(( convI2FK), dim3(Size),dim3(Size), 0, 0, outVI,randOut);
result = cudppDestroyPlan(randPlan);
hipFree(outVI);
}
| 6c49414b803d6ca134b72b4d48e3548002dd7342.cu | #include "cutil.h"
#include "cudpp.h"
#include <sys/time.h>
__global__ void convI2FK(unsigned int * in, float * out){
int val = blockDim.x*blockIdx.x+threadIdx.x;
out[val] = __int_as_float((in[val] & 0x7FFFFF)|0x3F800000)-1.0f;
}
void doRandGenFloat(float * randOut, unsigned int Size){
struct timeval tim;
gettimeofday(&tim, NULL);
unsigned int seed = (unsigned int) tim.tv_sec + tim.tv_usec*100000.;
unsigned int * outVI;
CUDA_SAFE_CALL(cudaMalloc((void **) &outVI, Size*Size*sizeof(unsigned int)));
CUDPPConfiguration config;
config.op = CUDPP_ADD;
config.datatype = CUDPP_UINT;
config.algorithm = CUDPP_RAND_MD5;
config.options = 0;
CUDPPHandle randPlan = 0;
CUDPPResult result;
result = cudppPlan(&randPlan, config, Size*Size, 1, 0);
cudppRandSeed(randPlan, seed);
cudppRand(randPlan, outVI, Size*Size);
convI2FK<<<Size,Size>>>(outVI,randOut);
result = cudppDestroyPlan(randPlan);
cudaFree(outVI);
}
|
6fb08a99cefbf774c7c9e125b10aa31173b75f7f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ReflectionWithBuildingCalculation.cuh"
__device__ void RWBC_UitizeVector(float *a,float *b,float *c)//
{
float length=sqrt((*a)*(*a)+(*b)*(*b)+(*c)*(*c));
*a=*a/length;
*b=*b/length;
*c=*c/length;
}
__device__ Point RWBC_GetReflectedVector(Point d,Point n)//
{
Point reflectedPoint;
float temp=2*(d.x*n.x+d.y*n.y+d.z*n.z);
reflectedPoint.x=d.x-temp*n.x;
reflectedPoint.y=d.y-temp*n.y;
reflectedPoint.z=d.z-temp*n.z;
return reflectedPoint;
}
__device__ Ray RWBC_CalculateReflectedRay(Ray incidentRay,Face *face,int faceCount,int *reflectedFace,int *flag)
{
Ray reflectedRay;
float t=50000;
for (int i=0;i<faceCount;i++)
{
float a1=-incidentRay.direction.x,a2=-incidentRay.direction.y,a3=-incidentRay.direction.z;
float b1=face[i]. B.x-face[i].A.x,b2=face[i].B.y-face[i].A.y,b3=face[i].B.z-face[i].A.z;
float c1=face[i]. C.x-face[i].A.x,c2=face[i].C.y-face[i].A.y,c3=face[i].C.z-face[i].A.z;
float x1=incidentRay.originalPoint.x-face[i].A.x,x2=incidentRay.originalPoint.y-face[i].A.y,x3=incidentRay.originalPoint.z-face[i].A.z;
float denominator=a1*(b2*c3-b3*c2)-b1*(a2*c3-a3*c2)+c1*(a2*b3-a3*b2);
float t_numerator=x1*(b2*c3-b3*c2)-b1*(x2*c3-x3*c2)+c1*(x2*b3-x3*b2);
float u_numerator=a1*(x2*c3-x3*c2)-x1*(a2*c3-a3*c2)+c1*(a2*c3-a3*c2);
float v_numerator=a1*(b2*x3-b3*x2)-b1*(a2*x3-a3*x2)+x1*(a2*b3-a3*b2);
if (abs(denominator)>0.000001)
{
float u=u_numerator/denominator;
float v=v_numerator/denominator;
if(t_numerator/denominator<t&&t_numerator/denominator>1)
{
t=t_numerator/denominator;
reflectedRay.originalPoint.x=incidentRay.originalPoint.x+incidentRay.direction.x*t;
reflectedRay.originalPoint.y=incidentRay.originalPoint.y+incidentRay.direction.y*t;
reflectedRay.originalPoint.z=incidentRay.originalPoint.z+incidentRay.direction.z*t;
Point n;
n.x=b2*c3-b3*c2;n.y=b3*c1-b1*c3;n.z=b1*c2-c1*b2;
RWBC_UitizeVector(&n.x,&n.y,&n.z);
reflectedRay.direction=RWBC_GetReflectedVector(incidentRay.direction,n);
if ((u>0)&&(u<1)&&((u+v)>0)&&((u+v)<1))
{
*flag=1;
*reflectedFace=i;
}
}
}
}
return reflectedRay;
}
__device__ bool RWBC_JudgeTwoTriangleFacesOnTheSameQuadrangle(int faceNumber1,int faceNumber2)
{
int a=faceNumber1%12;
int b=faceNumber2%12;
if (faceNumber1==faceNumber2)
{
return true;
}
if (a%2==0)
{
if (b==a+1)
{
return true;
}
}
if (a%2==1)
{
if (b==a-1)
{
return true;
}
}
return false;
}
__device__ bool RWBC_JudgeTwoTriangleFacesOnTheAdjacentQuadrangle(int faceNumber1,int faceNumber2)
{
int a=faceNumber1%12;
int b=faceNumber2%12;
if (a%2==0&&a!=0)
{
if (b==a-1)
{
return true;
}
}
if (a%2==1&&a!=7)
{
if (b==a+1)
{
return true;
}
}
if ((a==0&&b==7)||(a==7&&b==0))
{
return true;
}
return false;
}
__device__ void RWBC_GetThreeRemainingNumbers(int number,int *remainingNumber1,int *remainingNumber2,int *remainingNumber3)
{
if (number==0)
{
*remainingNumber1=1;
*remainingNumber2=2;
*remainingNumber3=3;
}
if (number==1)
{
*remainingNumber1=0;
*remainingNumber2=2;
*remainingNumber3=3;
}
if (number==2)
{
*remainingNumber1=0;
*remainingNumber2=1;
*remainingNumber3=3;
}
if (number==3)
{
*remainingNumber1=0;
*remainingNumber2=1;
*remainingNumber3=2;
}
}
__device__ void RWBC_GetOneRemainingNumber(int number1,int number2,int number3,int *remainingNumber)
{
*remainingNumber=6-number1-number2-number3;
}
__device__ void GetReflectedRayTubesWhenTwoFlagsEqualOne(int flagNumber1,int flagNumber2,int *reflectedFace,Ray *reflectedRays,Ray *incidentRays,QuadrangleRayTube *reflectedRayTubes,Face *face,int *reflectionExist,int i)
{
if (RWBC_JudgeTwoTriangleFacesOnTheSameQuadrangle(reflectedFace[flagNumber1],reflectedFace[flagNumber2]))
{
reflectionExist[i]=1;
reflectedRayTubes[2*i].ray1=reflectedRays[0];
reflectedRayTubes[2*i].ray2=reflectedRays[1];
reflectedRayTubes[2*i].ray3=reflectedRays[2];
reflectedRayTubes[2*i].ray4=reflectedRays[3];
reflectedRayTubes[2*i+1].ray1=reflectedRays[0];
reflectedRayTubes[2*i+1].ray2=reflectedRays[1];
reflectedRayTubes[2*i+1].ray3=reflectedRays[2];
reflectedRayTubes[2*i+1].ray4=reflectedRays[3];
}
if (RWBC_JudgeTwoTriangleFacesOnTheAdjacentQuadrangle(reflectedFace[flagNumber1],reflectedFace[flagNumber2]))
{
reflectionExist[i]=1;
int remainingNumber1,remainingNumber2,remainingNumber3;
RWBC_GetThreeRemainingNumbers(flagNumber1,&remainingNumber1,&remainingNumber2,&remainingNumber3);
reflectedRayTubes[2*i].ray1=reflectedRays[flagNumber1];
reflectedRayTubes[2*i].ray2=reflectedRays[remainingNumber1];
reflectedRayTubes[2*i].ray3=reflectedRays[remainingNumber2];
reflectedRayTubes[2*i].ray4=reflectedRays[remainingNumber2];
RWBC_GetThreeRemainingNumbers(flagNumber2,&remainingNumber1,&remainingNumber2,&remainingNumber3);
reflectedRayTubes[2*i+1].ray1=reflectedRays[flagNumber2];
reflectedRayTubes[2*i+1].ray2=reflectedRays[remainingNumber1];
reflectedRayTubes[2*i+1].ray3=reflectedRays[remainingNumber2];
reflectedRayTubes[2*i+1].ray4=reflectedRays[remainingNumber3];
}
}
__device__ void GetReflectedRayTubesWhenThreeFlagsEqualOne(int flagNumber1,int flagNumber2,int flagNumber3,int *reflectedFace,Ray *reflectedRays,Ray *incidentRays,QuadrangleRayTube *reflectedRayTubes,QuadrangleRayTube *incidentRayTubes,Face *face,int *reflectionExist,int i)
{
//int remainingNumber;
//RWBC_GetOneRemainingNumber(flagNumber1,flagNumber2,flagNumber3,&remainingNumber);
if (RWBC_JudgeTwoTriangleFacesOnTheSameQuadrangle(reflectedFace[flagNumber1],reflectedFace[flagNumber2]))
{
RWBC_AddPathNodeToReflectedRayTube(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[flagNumber1]],face[reflectedFace[flagNumber3]]);
GetReflectedRayTubesWhenTwoFlagsEqualOne(flagNumber1,flagNumber3,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
}
if (RWBC_JudgeTwoTriangleFacesOnTheSameQuadrangle(reflectedFace[flagNumber1],reflectedFace[flagNumber3]))
{
RWBC_AddPathNodeToReflectedRayTube(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[flagNumber2]],face[reflectedFace[flagNumber3]]);
GetReflectedRayTubesWhenTwoFlagsEqualOne(flagNumber2,flagNumber3,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
}
if (RWBC_JudgeTwoTriangleFacesOnTheSameQuadrangle(reflectedFace[flagNumber2],reflectedFace[flagNumber3]))
{
RWBC_AddPathNodeToReflectedRayTube(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[flagNumber1]],face[reflectedFace[flagNumber2]]);
GetReflectedRayTubesWhenTwoFlagsEqualOne(flagNumber1,flagNumber2,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
}
}
__device__ void RWBC_AddPathNodeToReflectedRayTube(QuadrangleRayTube *incidentRayTubes,QuadrangleRayTube *reflectedRayTubes,int i,Face face1,Face face2)
{
if(incidentRayTubes[i].path.nodeLevel=0)
{
reflectedRayTubes[2*i].path.nodeLevel=1;
reflectedRayTubes[2*i].path.node1.point1=face1.A;
reflectedRayTubes[2*i].path.node1.point2=face1.B;
reflectedRayTubes[2*i].path.node1.point3=face1.C;
reflectedRayTubes[2*i+1].path.nodeLevel=1;
reflectedRayTubes[2*i+1].path.node1.point1=face2.A;
reflectedRayTubes[2*i+1].path.node1.point2=face2.B;
reflectedRayTubes[2*i+1].path.node1.point3=face2.C;
}
if(incidentRayTubes[i].path.nodeLevel=1)
{
reflectedRayTubes[2*i].path.node1=incidentRayTubes[i].path.node1;
reflectedRayTubes[2*i].path.nodeLevel=2;
reflectedRayTubes[2*i].path.node2.point1=face1.A;
reflectedRayTubes[2*i].path.node2.point2=face1.B;
reflectedRayTubes[2*i].path.node2.point3=face1.C;
reflectedRayTubes[2*i+1].path.nodeLevel=2;
reflectedRayTubes[2*i+1].path.node1=incidentRayTubes[i].path.node1;
reflectedRayTubes[2*i+1].path.node2.point1=face2.A;
reflectedRayTubes[2*i+1].path.node2.point2=face2.B;
reflectedRayTubes[2*i+1].path.node2.point3=face2.C;
}
if(incidentRayTubes[i].path.nodeLevel=2)
{
reflectedRayTubes[2*i].path.node1=incidentRayTubes[i].path.node1;
reflectedRayTubes[2*i].path.node2=incidentRayTubes[i].path.node2;
reflectedRayTubes[2*i].path.nodeLevel=3;
reflectedRayTubes[2*i].path.node3.point1=face1.A;
reflectedRayTubes[2*i].path.node3.point2=face1.B;
reflectedRayTubes[2*i].path.node3.point3=face1.C;
reflectedRayTubes[2*i+1].path.nodeLevel=3;
reflectedRayTubes[2*i+1].path.node1=incidentRayTubes[i].path.node1;
reflectedRayTubes[2*i+1].path.node2=incidentRayTubes[i].path.node2;
reflectedRayTubes[2*i+1].path.node3.point1=face2.A;
reflectedRayTubes[2*i+1].path.node3.point2=face2.B;
reflectedRayTubes[2*i+1].path.node3.point3=face2.C;
}
}
__global__ void CalculateReflectionWithBuildingFace(QuadrangleRayTube *incidentRayTubes,int faceCount,int rayTubeCount,QuadrangleRayTube *reflectedRayTubes,Face *face,int *reflectionExist)
{
int i=blockIdx.x * blockDim.x+threadIdx.x;
int reflectedFace[4];
int flag[4]={0,0,0,0};
Ray reflectedRays[4],incidentRays[4];
incidentRays[0]=incidentRayTubes[i].ray1;
incidentRays[1]=incidentRayTubes[i].ray2;
incidentRays[2]=incidentRayTubes[i].ray3;
incidentRays[3]=incidentRayTubes[i].ray4;
reflectedRays[0]=RWBC_CalculateReflectedRay(incidentRays[0],face,faceCount,&reflectedFace[0],&flag[0]);
reflectedRays[1]=RWBC_CalculateReflectedRay(incidentRays[1],face,faceCount,&reflectedFace[1],&flag[1]);
reflectedRays[2]=RWBC_CalculateReflectedRay(incidentRays[2],face,faceCount,&reflectedFace[2],&flag[2]);
reflectedRays[3]=RWBC_CalculateReflectedRay(incidentRays[3],face,faceCount,&reflectedFace[3],&flag[3]);
if (flag[0]+flag[1]+flag[2]+flag[3]==0)
{
reflectionExist[i]=0;
}
if (flag[0]+flag[1]+flag[2]+flag[3]==1)
{
reflectionExist[i]=1;
//reflectedRayTubes[2*i].path.nodeLevel=1;
reflectedRayTubes[2*i].ray1=reflectedRays[0];
reflectedRayTubes[2*i].ray2=reflectedRays[1];
reflectedRayTubes[2*i].ray3=reflectedRays[2];
reflectedRayTubes[2*i].ray4=reflectedRays[3];
//reflectedRayTubes[2*i+1].path.nodeLevel=1;
reflectedRayTubes[2*i+1].ray1=reflectedRays[0];
reflectedRayTubes[2*i+1].ray2=reflectedRays[1];
reflectedRayTubes[2*i+1].ray3=reflectedRays[2];
reflectedRayTubes[2*i+1].ray4=reflectedRays[3];
for (int j=0;j<4;j++)
{
if (flag[j]==1)
{
RWBC_AddPathNodeToReflectedRayTube(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[j]],face[reflectedFace[j]]);
}
}
}
if (flag[0]+flag[1]+flag[2]+flag[3]==2)
{
int m,n;
for (int j=0;j<4;j++)
{
if (flag[j]==1)
{
m=j;
for (int t=j+1;t<4;t++)
{
if(flag[t]==1)
{
n=t;
}
}
}
}
GetReflectedRayTubesWhenTwoFlagsEqualOne(m,n,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
RWBC_AddPathNodeToReflectedRayTube(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[m]],face[reflectedFace[n]]);
}
if (flag[0]+flag[1]+flag[2]+flag[3]==3)
{
int j=0;
int remainingNumber1,remainingNumber2,remainingNumber3;
for (;j<4;j++)
{
if (flag[j]==0)
{
break;
}
}
RWBC_GetThreeRemainingNumbers(j,&remainingNumber1,&remainingNumber2,&remainingNumber3);
GetReflectedRayTubesWhenThreeFlagsEqualOne(remainingNumber1,remainingNumber2,remainingNumber3,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,incidentRayTubes,face,reflectionExist,i);
}
if (flag[0]+flag[1]+flag[2]+flag[3]==4)
{
if (RWBC_JudgeTwoTriangleFacesOnTheSameQuadrangle(reflectedFace[0],reflectedFace[1]))
{
if (RWBC_JudgeTwoTriangleFacesOnTheAdjacentQuadrangle(reflectedFace[0],reflectedFace[2]))
{
RWBC_AddPathNodeToReflectedRayTube(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[0]],face[reflectedFace[2]]);
GetReflectedRayTubesWhenTwoFlagsEqualOne(0,2,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
}
else
{
RWBC_AddPathNodeToReflectedRayTube(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[1]],face[reflectedFace[3]]);
GetReflectedRayTubesWhenTwoFlagsEqualOne(1,3,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
}
}
if (RWBC_JudgeTwoTriangleFacesOnTheSameQuadrangle(reflectedFace[0],reflectedFace[3]))
{
if (RWBC_JudgeTwoTriangleFacesOnTheAdjacentQuadrangle(reflectedFace[0],reflectedFace[1]))
{
RWBC_AddPathNodeToReflectedRayTube(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[0]],face[reflectedFace[1]]);
GetReflectedRayTubesWhenTwoFlagsEqualOne(0,1,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
}
else
{
RWBC_AddPathNodeToReflectedRayTube(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[2]],face[reflectedFace[3]]);
GetReflectedRayTubesWhenTwoFlagsEqualOne(2,3,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
}
}
}
}
hipError_t GetReflectionWithBuildingFace(QuadrangleRayTube *incidentRayTubes,int faceCount,int rayTubeCount,Face *buildingFace,QuadrangleRayTube *reflectedRayTubes,int *reflectionExist)
{
const int pointPerFace=9;
hipError_t cudaStatus;
QuadrangleRayTube *device_incidentRayTubes=0;
QuadrangleRayTube *device_reflectedRayTubes=0;
Face *device_face=0;
//float *device_distance=0;
//int *device_faceNumber1=0;
//int *device_faceNumber2=0;
int *device_reflectionExist=0;
//float *device_a=0;
cudaStatus=hipSetDevice(0);
if (cudaStatus!=hipSuccess)
{
fprintf(stderr,"CUDA capable GPU is not available!");
goto Error;
}
cudaStatus=hipMalloc((void**)&device_incidentRayTubes,rayTubeCount*sizeof(QuadrangleRayTube));
if (cudaStatus!=hipSuccess)
{
fprintf(stderr,"device_incidentRayTubes hipMalloc error!");
goto Error;
}
cudaStatus=hipMalloc((void**)&device_reflectedRayTubes,rayTubeCount*2*sizeof(QuadrangleRayTube));
if (cudaStatus!=hipSuccess)
{
fprintf(stderr,"device_reflectedRayTubes hipMalloc error!");
goto Error;
}
cudaStatus=hipMalloc((void**)&device_face,faceCount*sizeof(Face));
if (cudaStatus!=hipSuccess)
{
fprintf(stderr,"device_face hipMalloc error!");
goto Error;
}
/*cudaStatus=hipMalloc((void**)&device_faceNumber1,rayTubeCount*sizeof(int));
if (cudaStatus!=hipSuccess)
{
fprintf(stderr,"device_faceNumber1 hipMalloc error!");
goto Error;
}
cudaStatus=hipMalloc((void**)&device_faceNumber2,rayTubeCount*sizeof(int));
if (cudaStatus!=hipSuccess)
{
fprintf(stderr,"device_faceNumber2 hipMalloc error!");
goto Error;
}*/
cudaStatus=hipMalloc((void**)&device_reflectionExist,rayTubeCount*sizeof(int));
if (cudaStatus!=hipSuccess)
{
fprintf(stderr,"device_reflectionExis hipMalloc error!");
goto Error;
}
//cudaStatus=hipMalloc((void**)&device_a,rayTubeCount*sizeof(float));
//if (cudaStatus!=hipSuccess)
//{
// fprintf(stderr,"device_reflectionExis hipMalloc error!");
// goto Error;
//}
cudaStatus=hipMemcpy(device_incidentRayTubes,incidentRayTubes,rayTubeCount*sizeof(QuadrangleRayTube),hipMemcpyHostToDevice);
if (cudaStatus!=hipSuccess)
{
fprintf(stderr,"incidentRayTubes Memcpy failed!");
}
cudaStatus=hipMemcpy(device_face,buildingFace,faceCount*sizeof(Face),hipMemcpyHostToDevice);
if (cudaStatus!=hipSuccess)
{
fprintf(stderr,"face Memcpy failed!");
}
const int num_blocks=32;
const int num_threads=640;
//CalculateReflectionAndDiffractionWithBuildingFace<<<num_blocks,num_threads>>>(device_incidentRayTubes,faceCount,rayTubeCount,device_reflectedRayTubes,device_face,device_edge,device_faceNumber1,device_faceNumber2,device_reflectionExist,device_diffractionExist);
cudaStatus=hipMemcpy(reflectedRayTubes,device_reflectedRayTubes,rayTubeCount*2*sizeof(QuadrangleRayTube),hipMemcpyDeviceToHost);
if (cudaStatus!=hipSuccess)
{
fprintf(stderr,"reflectedRayTubes hipMemcpy failed!");
}
//cudaStatus=hipMemcpy(faceNumber1,device_faceNumber1,rayTubeCount*sizeof(int),hipMemcpyDeviceToHost);
//if (cudaStatus!=hipSuccess)
//{
// fprintf(stderr,"faceNumber1 hipMemcpy failed!");
//}
//cudaStatus=hipMemcpy(faceNumber2,device_faceNumber2,rayTubeCount*sizeof(int),hipMemcpyDeviceToHost);
//if (cudaStatus!=hipSuccess)
//{
// fprintf(stderr,"faceNumber2 hipMemcpy failed!");
//}
cudaStatus=hipMemcpy(reflectionExist,device_reflectionExist,rayTubeCount*sizeof(int),hipMemcpyDeviceToHost);
if (cudaStatus!=hipSuccess)
{
fprintf(stderr,"reflectionExist hipMemcpy failed!");
}
return cudaStatus;
Error:
hipFree(device_incidentRayTubes);
hipFree(device_reflectedRayTubes);
hipFree(device_face);
//hipFree(device_faceNumber1);
//hipFree(device_faceNumber2);
hipFree(device_reflectionExist);
} | 6fb08a99cefbf774c7c9e125b10aa31173b75f7f.cu | #include "ReflectionWithBuildingCalculation.cuh"
__device__ void RWBC_UitizeVector(float *a,float *b,float *c)//单位化向量
{
float length=sqrt((*a)*(*a)+(*b)*(*b)+(*c)*(*c));
*a=*a/length;
*b=*b/length;
*c=*c/length;
}
__device__ Point RWBC_GetReflectedVector(Point d,Point n)//计算反射射线方向向量
{
Point reflectedPoint;
float temp=2*(d.x*n.x+d.y*n.y+d.z*n.z);
reflectedPoint.x=d.x-temp*n.x;
reflectedPoint.y=d.y-temp*n.y;
reflectedPoint.z=d.z-temp*n.z;
return reflectedPoint;
}
__device__ Ray RWBC_CalculateReflectedRay(Ray incidentRay,Face *face,int faceCount,int *reflectedFace,int *flag)
{
Ray reflectedRay;
float t=50000;
for (int i=0;i<faceCount;i++)
{
float a1=-incidentRay.direction.x,a2=-incidentRay.direction.y,a3=-incidentRay.direction.z;
float b1=face[i]. B.x-face[i].A.x,b2=face[i].B.y-face[i].A.y,b3=face[i].B.z-face[i].A.z;
float c1=face[i]. C.x-face[i].A.x,c2=face[i].C.y-face[i].A.y,c3=face[i].C.z-face[i].A.z;
float x1=incidentRay.originalPoint.x-face[i].A.x,x2=incidentRay.originalPoint.y-face[i].A.y,x3=incidentRay.originalPoint.z-face[i].A.z;
float denominator=a1*(b2*c3-b3*c2)-b1*(a2*c3-a3*c2)+c1*(a2*b3-a3*b2);
float t_numerator=x1*(b2*c3-b3*c2)-b1*(x2*c3-x3*c2)+c1*(x2*b3-x3*b2);
float u_numerator=a1*(x2*c3-x3*c2)-x1*(a2*c3-a3*c2)+c1*(a2*c3-a3*c2);
float v_numerator=a1*(b2*x3-b3*x2)-b1*(a2*x3-a3*x2)+x1*(a2*b3-a3*b2);
if (abs(denominator)>0.000001)
{
float u=u_numerator/denominator;
float v=v_numerator/denominator;
if(t_numerator/denominator<t&&t_numerator/denominator>1)
{
t=t_numerator/denominator;
reflectedRay.originalPoint.x=incidentRay.originalPoint.x+incidentRay.direction.x*t;
reflectedRay.originalPoint.y=incidentRay.originalPoint.y+incidentRay.direction.y*t;
reflectedRay.originalPoint.z=incidentRay.originalPoint.z+incidentRay.direction.z*t;
Point n;
n.x=b2*c3-b3*c2;n.y=b3*c1-b1*c3;n.z=b1*c2-c1*b2;
RWBC_UitizeVector(&n.x,&n.y,&n.z);
reflectedRay.direction=RWBC_GetReflectedVector(incidentRay.direction,n);
if ((u>0)&&(u<1)&&((u+v)>0)&&((u+v)<1))
{
*flag=1;
*reflectedFace=i;
}
}
}
}
return reflectedRay;
}
__device__ bool RWBC_JudgeTwoTriangleFacesOnTheSameQuadrangle(int faceNumber1,int faceNumber2)
{
int a=faceNumber1%12;
int b=faceNumber2%12;
if (faceNumber1==faceNumber2)
{
return true;
}
if (a%2==0)
{
if (b==a+1)
{
return true;
}
}
if (a%2==1)
{
if (b==a-1)
{
return true;
}
}
return false;
}
__device__ bool RWBC_JudgeTwoTriangleFacesOnTheAdjacentQuadrangle(int faceNumber1,int faceNumber2)
{
int a=faceNumber1%12;
int b=faceNumber2%12;
if (a%2==0&&a!=0)
{
if (b==a-1)
{
return true;
}
}
if (a%2==1&&a!=7)
{
if (b==a+1)
{
return true;
}
}
if ((a==0&&b==7)||(a==7&&b==0))
{
return true;
}
return false;
}
__device__ void RWBC_GetThreeRemainingNumbers(int number,int *remainingNumber1,int *remainingNumber2,int *remainingNumber3)
{
if (number==0)
{
*remainingNumber1=1;
*remainingNumber2=2;
*remainingNumber3=3;
}
if (number==1)
{
*remainingNumber1=0;
*remainingNumber2=2;
*remainingNumber3=3;
}
if (number==2)
{
*remainingNumber1=0;
*remainingNumber2=1;
*remainingNumber3=3;
}
if (number==3)
{
*remainingNumber1=0;
*remainingNumber2=1;
*remainingNumber3=2;
}
}
__device__ void RWBC_GetOneRemainingNumber(int number1,int number2,int number3,int *remainingNumber)
{
*remainingNumber=6-number1-number2-number3;
}
__device__ void GetReflectedRayTubesWhenTwoFlagsEqualOne(int flagNumber1,int flagNumber2,int *reflectedFace,Ray *reflectedRays,Ray *incidentRays,QuadrangleRayTube *reflectedRayTubes,Face *face,int *reflectionExist,int i)
{
if (RWBC_JudgeTwoTriangleFacesOnTheSameQuadrangle(reflectedFace[flagNumber1],reflectedFace[flagNumber2]))
{
reflectionExist[i]=1;
reflectedRayTubes[2*i].ray1=reflectedRays[0];
reflectedRayTubes[2*i].ray2=reflectedRays[1];
reflectedRayTubes[2*i].ray3=reflectedRays[2];
reflectedRayTubes[2*i].ray4=reflectedRays[3];
reflectedRayTubes[2*i+1].ray1=reflectedRays[0];
reflectedRayTubes[2*i+1].ray2=reflectedRays[1];
reflectedRayTubes[2*i+1].ray3=reflectedRays[2];
reflectedRayTubes[2*i+1].ray4=reflectedRays[3];
}
if (RWBC_JudgeTwoTriangleFacesOnTheAdjacentQuadrangle(reflectedFace[flagNumber1],reflectedFace[flagNumber2]))
{
reflectionExist[i]=1;
int remainingNumber1,remainingNumber2,remainingNumber3;
RWBC_GetThreeRemainingNumbers(flagNumber1,&remainingNumber1,&remainingNumber2,&remainingNumber3);
reflectedRayTubes[2*i].ray1=reflectedRays[flagNumber1];
reflectedRayTubes[2*i].ray2=reflectedRays[remainingNumber1];
reflectedRayTubes[2*i].ray3=reflectedRays[remainingNumber2];
reflectedRayTubes[2*i].ray4=reflectedRays[remainingNumber2];
RWBC_GetThreeRemainingNumbers(flagNumber2,&remainingNumber1,&remainingNumber2,&remainingNumber3);
reflectedRayTubes[2*i+1].ray1=reflectedRays[flagNumber2];
reflectedRayTubes[2*i+1].ray2=reflectedRays[remainingNumber1];
reflectedRayTubes[2*i+1].ray3=reflectedRays[remainingNumber2];
reflectedRayTubes[2*i+1].ray4=reflectedRays[remainingNumber3];
}
}
__device__ void GetReflectedRayTubesWhenThreeFlagsEqualOne(int flagNumber1,int flagNumber2,int flagNumber3,int *reflectedFace,Ray *reflectedRays,Ray *incidentRays,QuadrangleRayTube *reflectedRayTubes,QuadrangleRayTube *incidentRayTubes,Face *face,int *reflectionExist,int i)
{
//int remainingNumber;
//RWBC_GetOneRemainingNumber(flagNumber1,flagNumber2,flagNumber3,&remainingNumber);
if (RWBC_JudgeTwoTriangleFacesOnTheSameQuadrangle(reflectedFace[flagNumber1],reflectedFace[flagNumber2]))
{
RWBC_AddPathNodeToReflectedRayTube(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[flagNumber1]],face[reflectedFace[flagNumber3]]);
GetReflectedRayTubesWhenTwoFlagsEqualOne(flagNumber1,flagNumber3,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
}
if (RWBC_JudgeTwoTriangleFacesOnTheSameQuadrangle(reflectedFace[flagNumber1],reflectedFace[flagNumber3]))
{
RWBC_AddPathNodeToReflectedRayTube(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[flagNumber2]],face[reflectedFace[flagNumber3]]);
GetReflectedRayTubesWhenTwoFlagsEqualOne(flagNumber2,flagNumber3,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
}
if (RWBC_JudgeTwoTriangleFacesOnTheSameQuadrangle(reflectedFace[flagNumber2],reflectedFace[flagNumber3]))
{
RWBC_AddPathNodeToReflectedRayTube(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[flagNumber1]],face[reflectedFace[flagNumber2]]);
GetReflectedRayTubesWhenTwoFlagsEqualOne(flagNumber1,flagNumber2,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
}
}
__device__ void RWBC_AddPathNodeToReflectedRayTube(QuadrangleRayTube *incidentRayTubes,QuadrangleRayTube *reflectedRayTubes,int i,Face face1,Face face2)
{
if(incidentRayTubes[i].path.nodeLevel=0)
{
reflectedRayTubes[2*i].path.nodeLevel=1;
reflectedRayTubes[2*i].path.node1.point1=face1.A;
reflectedRayTubes[2*i].path.node1.point2=face1.B;
reflectedRayTubes[2*i].path.node1.point3=face1.C;
reflectedRayTubes[2*i+1].path.nodeLevel=1;
reflectedRayTubes[2*i+1].path.node1.point1=face2.A;
reflectedRayTubes[2*i+1].path.node1.point2=face2.B;
reflectedRayTubes[2*i+1].path.node1.point3=face2.C;
}
if(incidentRayTubes[i].path.nodeLevel=1)
{
reflectedRayTubes[2*i].path.node1=incidentRayTubes[i].path.node1;
reflectedRayTubes[2*i].path.nodeLevel=2;
reflectedRayTubes[2*i].path.node2.point1=face1.A;
reflectedRayTubes[2*i].path.node2.point2=face1.B;
reflectedRayTubes[2*i].path.node2.point3=face1.C;
reflectedRayTubes[2*i+1].path.nodeLevel=2;
reflectedRayTubes[2*i+1].path.node1=incidentRayTubes[i].path.node1;
reflectedRayTubes[2*i+1].path.node2.point1=face2.A;
reflectedRayTubes[2*i+1].path.node2.point2=face2.B;
reflectedRayTubes[2*i+1].path.node2.point3=face2.C;
}
if(incidentRayTubes[i].path.nodeLevel=2)
{
reflectedRayTubes[2*i].path.node1=incidentRayTubes[i].path.node1;
reflectedRayTubes[2*i].path.node2=incidentRayTubes[i].path.node2;
reflectedRayTubes[2*i].path.nodeLevel=3;
reflectedRayTubes[2*i].path.node3.point1=face1.A;
reflectedRayTubes[2*i].path.node3.point2=face1.B;
reflectedRayTubes[2*i].path.node3.point3=face1.C;
reflectedRayTubes[2*i+1].path.nodeLevel=3;
reflectedRayTubes[2*i+1].path.node1=incidentRayTubes[i].path.node1;
reflectedRayTubes[2*i+1].path.node2=incidentRayTubes[i].path.node2;
reflectedRayTubes[2*i+1].path.node3.point1=face2.A;
reflectedRayTubes[2*i+1].path.node3.point2=face2.B;
reflectedRayTubes[2*i+1].path.node3.point3=face2.C;
}
}
__global__ void CalculateReflectionWithBuildingFace(QuadrangleRayTube *incidentRayTubes,int faceCount,int rayTubeCount,QuadrangleRayTube *reflectedRayTubes,Face *face,int *reflectionExist)
{
int i=blockIdx.x * blockDim.x+threadIdx.x;
int reflectedFace[4];
int flag[4]={0,0,0,0};
Ray reflectedRays[4],incidentRays[4];
incidentRays[0]=incidentRayTubes[i].ray1;
incidentRays[1]=incidentRayTubes[i].ray2;
incidentRays[2]=incidentRayTubes[i].ray3;
incidentRays[3]=incidentRayTubes[i].ray4;
reflectedRays[0]=RWBC_CalculateReflectedRay(incidentRays[0],face,faceCount,&reflectedFace[0],&flag[0]);
reflectedRays[1]=RWBC_CalculateReflectedRay(incidentRays[1],face,faceCount,&reflectedFace[1],&flag[1]);
reflectedRays[2]=RWBC_CalculateReflectedRay(incidentRays[2],face,faceCount,&reflectedFace[2],&flag[2]);
reflectedRays[3]=RWBC_CalculateReflectedRay(incidentRays[3],face,faceCount,&reflectedFace[3],&flag[3]);
if (flag[0]+flag[1]+flag[2]+flag[3]==0)
{
reflectionExist[i]=0;
}
if (flag[0]+flag[1]+flag[2]+flag[3]==1)
{
reflectionExist[i]=1;
//reflectedRayTubes[2*i].path.nodeLevel=1;
reflectedRayTubes[2*i].ray1=reflectedRays[0];
reflectedRayTubes[2*i].ray2=reflectedRays[1];
reflectedRayTubes[2*i].ray3=reflectedRays[2];
reflectedRayTubes[2*i].ray4=reflectedRays[3];
//reflectedRayTubes[2*i+1].path.nodeLevel=1;
reflectedRayTubes[2*i+1].ray1=reflectedRays[0];
reflectedRayTubes[2*i+1].ray2=reflectedRays[1];
reflectedRayTubes[2*i+1].ray3=reflectedRays[2];
reflectedRayTubes[2*i+1].ray4=reflectedRays[3];
for (int j=0;j<4;j++)
{
if (flag[j]==1)
{
RWBC_AddPathNodeToReflectedRayTube(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[j]],face[reflectedFace[j]]);
}
}
}
if (flag[0]+flag[1]+flag[2]+flag[3]==2)
{
int m,n;
for (int j=0;j<4;j++)
{
if (flag[j]==1)
{
m=j;
for (int t=j+1;t<4;t++)
{
if(flag[t]==1)
{
n=t;
}
}
}
}
GetReflectedRayTubesWhenTwoFlagsEqualOne(m,n,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
RWBC_AddPathNodeToReflectedRayTube(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[m]],face[reflectedFace[n]]);
}
if (flag[0]+flag[1]+flag[2]+flag[3]==3)
{
int j=0;
int remainingNumber1,remainingNumber2,remainingNumber3;
for (;j<4;j++)
{
if (flag[j]==0)
{
break;
}
}
RWBC_GetThreeRemainingNumbers(j,&remainingNumber1,&remainingNumber2,&remainingNumber3);
GetReflectedRayTubesWhenThreeFlagsEqualOne(remainingNumber1,remainingNumber2,remainingNumber3,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,incidentRayTubes,face,reflectionExist,i);
}
if (flag[0]+flag[1]+flag[2]+flag[3]==4)
{
if (RWBC_JudgeTwoTriangleFacesOnTheSameQuadrangle(reflectedFace[0],reflectedFace[1]))
{
if (RWBC_JudgeTwoTriangleFacesOnTheAdjacentQuadrangle(reflectedFace[0],reflectedFace[2]))
{
RWBC_AddPathNodeToReflectedRayTube(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[0]],face[reflectedFace[2]]);
GetReflectedRayTubesWhenTwoFlagsEqualOne(0,2,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
}
else
{
RWBC_AddPathNodeToReflectedRayTube(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[1]],face[reflectedFace[3]]);
GetReflectedRayTubesWhenTwoFlagsEqualOne(1,3,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
}
}
if (RWBC_JudgeTwoTriangleFacesOnTheSameQuadrangle(reflectedFace[0],reflectedFace[3]))
{
if (RWBC_JudgeTwoTriangleFacesOnTheAdjacentQuadrangle(reflectedFace[0],reflectedFace[1]))
{
RWBC_AddPathNodeToReflectedRayTube(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[0]],face[reflectedFace[1]]);
GetReflectedRayTubesWhenTwoFlagsEqualOne(0,1,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
}
else
{
RWBC_AddPathNodeToReflectedRayTube(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[2]],face[reflectedFace[3]]);
GetReflectedRayTubesWhenTwoFlagsEqualOne(2,3,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
}
}
}
}
cudaError_t GetReflectionWithBuildingFace(QuadrangleRayTube *incidentRayTubes,int faceCount,int rayTubeCount,Face *buildingFace,QuadrangleRayTube *reflectedRayTubes,int *reflectionExist)
{
const int pointPerFace=9;
cudaError_t cudaStatus;
QuadrangleRayTube *device_incidentRayTubes=0;
QuadrangleRayTube *device_reflectedRayTubes=0;
Face *device_face=0;
//float *device_distance=0;
//int *device_faceNumber1=0;
//int *device_faceNumber2=0;
int *device_reflectionExist=0;
//float *device_a=0;
cudaStatus=cudaSetDevice(0);
if (cudaStatus!=cudaSuccess)
{
fprintf(stderr,"CUDA capable GPU is not available!");
goto Error;
}
cudaStatus=cudaMalloc((void**)&device_incidentRayTubes,rayTubeCount*sizeof(QuadrangleRayTube));
if (cudaStatus!=cudaSuccess)
{
fprintf(stderr,"device_incidentRayTubes cudaMalloc error!");
goto Error;
}
cudaStatus=cudaMalloc((void**)&device_reflectedRayTubes,rayTubeCount*2*sizeof(QuadrangleRayTube));
if (cudaStatus!=cudaSuccess)
{
fprintf(stderr,"device_reflectedRayTubes cudaMalloc error!");
goto Error;
}
cudaStatus=cudaMalloc((void**)&device_face,faceCount*sizeof(Face));
if (cudaStatus!=cudaSuccess)
{
fprintf(stderr,"device_face cudaMalloc error!");
goto Error;
}
/*cudaStatus=cudaMalloc((void**)&device_faceNumber1,rayTubeCount*sizeof(int));
if (cudaStatus!=cudaSuccess)
{
fprintf(stderr,"device_faceNumber1 cudaMalloc error!");
goto Error;
}
cudaStatus=cudaMalloc((void**)&device_faceNumber2,rayTubeCount*sizeof(int));
if (cudaStatus!=cudaSuccess)
{
fprintf(stderr,"device_faceNumber2 cudaMalloc error!");
goto Error;
}*/
cudaStatus=cudaMalloc((void**)&device_reflectionExist,rayTubeCount*sizeof(int));
if (cudaStatus!=cudaSuccess)
{
fprintf(stderr,"device_reflectionExis cudaMalloc error!");
goto Error;
}
//cudaStatus=cudaMalloc((void**)&device_a,rayTubeCount*sizeof(float));
//if (cudaStatus!=cudaSuccess)
//{
// fprintf(stderr,"device_reflectionExis cudaMalloc error!");
// goto Error;
//}
cudaStatus=cudaMemcpy(device_incidentRayTubes,incidentRayTubes,rayTubeCount*sizeof(QuadrangleRayTube),cudaMemcpyHostToDevice);
if (cudaStatus!=cudaSuccess)
{
fprintf(stderr,"incidentRayTubes Memcpy failed!");
}
cudaStatus=cudaMemcpy(device_face,buildingFace,faceCount*sizeof(Face),cudaMemcpyHostToDevice);
if (cudaStatus!=cudaSuccess)
{
fprintf(stderr,"face Memcpy failed!");
}
const int num_blocks=32;
const int num_threads=640;
//CalculateReflectionAndDiffractionWithBuildingFace<<<num_blocks,num_threads>>>(device_incidentRayTubes,faceCount,rayTubeCount,device_reflectedRayTubes,device_face,device_edge,device_faceNumber1,device_faceNumber2,device_reflectionExist,device_diffractionExist);
cudaStatus=cudaMemcpy(reflectedRayTubes,device_reflectedRayTubes,rayTubeCount*2*sizeof(QuadrangleRayTube),cudaMemcpyDeviceToHost);
if (cudaStatus!=cudaSuccess)
{
fprintf(stderr,"reflectedRayTubes cudaMemcpy failed!");
}
//cudaStatus=cudaMemcpy(faceNumber1,device_faceNumber1,rayTubeCount*sizeof(int),cudaMemcpyDeviceToHost);
//if (cudaStatus!=cudaSuccess)
//{
// fprintf(stderr,"faceNumber1 cudaMemcpy failed!");
//}
//cudaStatus=cudaMemcpy(faceNumber2,device_faceNumber2,rayTubeCount*sizeof(int),cudaMemcpyDeviceToHost);
//if (cudaStatus!=cudaSuccess)
//{
// fprintf(stderr,"faceNumber2 cudaMemcpy failed!");
//}
cudaStatus=cudaMemcpy(reflectionExist,device_reflectionExist,rayTubeCount*sizeof(int),cudaMemcpyDeviceToHost);
if (cudaStatus!=cudaSuccess)
{
fprintf(stderr,"reflectionExist cudaMemcpy failed!");
}
return cudaStatus;
Error:
cudaFree(device_incidentRayTubes);
cudaFree(device_reflectedRayTubes);
cudaFree(device_face);
//cudaFree(device_faceNumber1);
//cudaFree(device_faceNumber2);
cudaFree(device_reflectionExist);
} |
03ba35104a0190134079d59e5bce02fbccf055bf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void add(int *a, int *b, int *c)
{
/* finish this code to calculate c element-wise from a and b where each block calculates one element */
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
/* experiment with different values of N. */
/* how large can it be? */
#define N 32
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof( int );
/* allocate space for device copies of a, b, c */
hipMalloc( (void **) &d_a, size );
/* insert code here for d_b and d_c */
hipMalloc( (void **) &d_b, size );
hipMalloc( (void **) &d_c, size );
/* allocate space for host copies of a, b, c and setup input values */
a = (int *)malloc( size );
b = (int *)malloc( size );
c = (int *)malloc( size );
/* intializing a, b, c on host */
for( int i = 0; i < N; i++ )
{
a[i] = b[i] = i;
c[i] = 0;
}
/* copy inputs to device */
hipMemcpy( d_a, a, size, hipMemcpyHostToDevice );
/* insert code to copy b to the device */
hipMemcpy( d_b, b, size, hipMemcpyHostToDevice );
/* launch the kernel on the GPU */
/* finish this kernel launch with N blocks and 1 thread per block */
hipLaunchKernelGGL(( add) , dim3(N), dim3(1) , 0, 0, d_a, d_b, d_c);
/* copy result back to host */
hipMemcpy( c, d_c, size, hipMemcpyDeviceToHost );
for( int i = 0; i < N; i++ )
{
printf("c[%d] = %d\n",i,c[i]);
} /* end for */
/* clean up */
free(a);
free(b);
free(c);
hipFree( d_a );
hipFree( d_b );
hipFree( d_c );
return 0;
} /* end main */
| 03ba35104a0190134079d59e5bce02fbccf055bf.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void add(int *a, int *b, int *c)
{
/* finish this code to calculate c element-wise from a and b where each block calculates one element */
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
/* experiment with different values of N. */
/* how large can it be? */
#define N 32
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof( int );
/* allocate space for device copies of a, b, c */
cudaMalloc( (void **) &d_a, size );
/* insert code here for d_b and d_c */
cudaMalloc( (void **) &d_b, size );
cudaMalloc( (void **) &d_c, size );
/* allocate space for host copies of a, b, c and setup input values */
a = (int *)malloc( size );
b = (int *)malloc( size );
c = (int *)malloc( size );
/* intializing a, b, c on host */
for( int i = 0; i < N; i++ )
{
a[i] = b[i] = i;
c[i] = 0;
}
/* copy inputs to device */
cudaMemcpy( d_a, a, size, cudaMemcpyHostToDevice );
/* insert code to copy b to the device */
cudaMemcpy( d_b, b, size, cudaMemcpyHostToDevice );
/* launch the kernel on the GPU */
/* finish this kernel launch with N blocks and 1 thread per block */
add <<< N, 1 >>>(d_a, d_b, d_c);
/* copy result back to host */
cudaMemcpy( c, d_c, size, cudaMemcpyDeviceToHost );
for( int i = 0; i < N; i++ )
{
printf("c[%d] = %d\n",i,c[i]);
} /* end for */
/* clean up */
free(a);
free(b);
free(c);
cudaFree( d_a );
cudaFree( d_b );
cudaFree( d_c );
return 0;
} /* end main */
|
e78dc79a819ac4fef92da8b5e2e94dfea8cfaa28.hip | // !!! This is a file automatically generated by hipify!!!
//
// include files
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <helper_cuda.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
//
// kernel routine
//
__global__ void my_first_kernel(float *x)
{
int tid = threadIdx.x + blockDim.x*blockIdx.x;
x[tid] = (float) threadIdx.x;
}
//
// main code
//
int main(int argc, const char **argv)
{
float *h_x, *d_x;
int nblocks, nthreads, nsize, n;
// initialise card
findCudaDevice(argc, argv);
// set number of blocks, and threads per block
nblocks = 2;
nthreads = 8;
nsize = nblocks*nthreads ;
// allocate memory for array
h_x = (float *)malloc(nsize*sizeof(float));
checkCudaErrors(hipMalloc((void **)&d_x, nsize*sizeof(float)));
// execute kernel
hipLaunchKernelGGL(( my_first_kernel), dim3(nblocks),dim3(nthreads), 0, 0, d_x);
getLastCudaError("my_first_kernel execution failed\n");
// copy back results and print them out
checkCudaErrors( hipMemcpy(h_x,d_x,nsize*sizeof(float),
hipMemcpyDeviceToHost) );
for (n=0; n<nsize; n++) printf(" n, x = %d %f \n",n,h_x[n]);
// free memory
checkCudaErrors(hipFree(d_x));
free(h_x);
// CUDA exit -- needed to flush printf write buffer
hipDeviceReset();
return 0;
}
| e78dc79a819ac4fef92da8b5e2e94dfea8cfaa28.cu | //
// include files
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <helper_cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
//
// kernel routine
//
__global__ void my_first_kernel(float *x)
{
int tid = threadIdx.x + blockDim.x*blockIdx.x;
x[tid] = (float) threadIdx.x;
}
//
// main code
//
int main(int argc, const char **argv)
{
float *h_x, *d_x;
int nblocks, nthreads, nsize, n;
// initialise card
findCudaDevice(argc, argv);
// set number of blocks, and threads per block
nblocks = 2;
nthreads = 8;
nsize = nblocks*nthreads ;
// allocate memory for array
h_x = (float *)malloc(nsize*sizeof(float));
checkCudaErrors(cudaMalloc((void **)&d_x, nsize*sizeof(float)));
// execute kernel
my_first_kernel<<<nblocks,nthreads>>>(d_x);
getLastCudaError("my_first_kernel execution failed\n");
// copy back results and print them out
checkCudaErrors( cudaMemcpy(h_x,d_x,nsize*sizeof(float),
cudaMemcpyDeviceToHost) );
for (n=0; n<nsize; n++) printf(" n, x = %d %f \n",n,h_x[n]);
// free memory
checkCudaErrors(cudaFree(d_x));
free(h_x);
// CUDA exit -- needed to flush printf write buffer
cudaDeviceReset();
return 0;
}
|
ce14a52d584908c9bd3b899892891c6c4315d017.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2018 BlazingDB, Inc.
* Copyright 2018 Cristhian Alberto Gonzales Castillo <cristhian@blazingdb.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thrust/device_ptr.h>
#include <thrust/find.h>
#include <thrust/execution_policy.h>
#include "cudf.h"
#include "utilities/error_utils.h"
#include "utilities//type_dispatcher.hpp"
namespace{ //anonymous
constexpr int BLOCK_SIZE = 256;
/* --------------------------------------------------------------------------*/
/**
* @brief Kernel that replaces elements from `d_col_data` given the following
* rule: replace all `old_values[i]` in [old_values_begin`, `old_values_end`)
* present in `d_col_data` with `d_new_values[i]`.
*
* @Param[in,out] d_col_data Device array with the data to be modified
* @Param[in] nrows # rows in `d_col_data`
* @Param[in] old_values_begin Device pointer to the beginning of the sequence
* of old values to be replaced
* @Param[in] old_values_end Device pointer to the end of the sequence
* of old values to be replaced
* @Param[in] d_new_values Device array with the new values
*
* @Returns
*/
/* ----------------------------------------------------------------------------*/
template <class T>
__global__
void replace_kernel(T* d_col_data,
size_t nrows,
thrust::device_ptr<const T> old_values_begin,
thrust::device_ptr<const T> old_values_end,
const T* d_new_values)
{
size_t i = blockIdx.x * blockDim.x + threadIdx.x;
while(i < nrows)
{
auto found_ptr = thrust::find(thrust::seq, old_values_begin, old_values_end, d_col_data[i]);
if (found_ptr != old_values_end) {
auto d = thrust::distance(old_values_begin, found_ptr);
d_col_data[i] = d_new_values[d];
}
i += blockDim.x * gridDim.x;
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `replace_kernel` with the apropiate data types.
*/
/* ----------------------------------------------------------------------------*/
struct replace_kernel_forwarder {
template <typename col_type>
void operator()(void* d_col_data,
size_t nrows,
const void* d_old_values,
const void* d_new_values,
size_t nvalues)
{
thrust::device_ptr<const col_type> old_values_begin = thrust::device_pointer_cast(static_cast<const col_type*>(d_old_values));
const size_t grid_size = nrows / BLOCK_SIZE + (nrows % BLOCK_SIZE != 0);
hipLaunchKernelGGL(( replace_kernel), dim3(grid_size), dim3(BLOCK_SIZE), 0, 0, static_cast<col_type*>(d_col_data),
nrows,
old_values_begin,
old_values_begin + nvalues,
static_cast<const col_type*>(d_new_values));
}
};
gdf_error find_and_replace_all(gdf_column* col,
const gdf_column* old_values,
const gdf_column* new_values)
{
GDF_REQUIRE(col != nullptr && old_values != nullptr && new_values != nullptr, GDF_DATASET_EMPTY);
GDF_REQUIRE(old_values->size == new_values->size, GDF_COLUMN_SIZE_MISMATCH);
GDF_REQUIRE(col->dtype == old_values->dtype && col->dtype == new_values->dtype, GDF_DTYPE_MISMATCH);
GDF_REQUIRE(col->valid == nullptr || col->null_count == 0, GDF_VALIDITY_UNSUPPORTED);
GDF_REQUIRE(old_values->valid == nullptr || old_values->null_count == 0, GDF_VALIDITY_UNSUPPORTED);
GDF_REQUIRE(new_values->valid == nullptr || new_values->null_count == 0, GDF_VALIDITY_UNSUPPORTED);
cudf::type_dispatcher(col->dtype, replace_kernel_forwarder{},
col->data,
col->size,
old_values->data,
new_values->data,
new_values->size);
return GDF_SUCCESS;
}
} //end anonymous namespace
/* --------------------------------------------------------------------------*/
/**
* @brief Replace elements from `col` according to the mapping `old_values` to
* `new_values`, that is, replace all `old_values[i]` present in `col`
* with `new_values[i]`.
*
* @Param[in,out] col gdf_column with the data to be modified
* @Param[in] old_values gdf_column with the old values to be replaced
* @Param[in] new_values gdf_column with the new values
*
* @Returns GDF_SUCCESS upon successful completion
*/
/* ----------------------------------------------------------------------------*/
gdf_error gdf_find_and_replace_all(gdf_column* col,
const gdf_column* old_values,
const gdf_column* new_values)
{
return find_and_replace_all(col, old_values, new_values);
}
| ce14a52d584908c9bd3b899892891c6c4315d017.cu | /*
* Copyright 2018 BlazingDB, Inc.
* Copyright 2018 Cristhian Alberto Gonzales Castillo <cristhian@blazingdb.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thrust/device_ptr.h>
#include <thrust/find.h>
#include <thrust/execution_policy.h>
#include "cudf.h"
#include "utilities/error_utils.h"
#include "utilities//type_dispatcher.hpp"
namespace{ //anonymous
constexpr int BLOCK_SIZE = 256;
/* --------------------------------------------------------------------------*/
/**
* @brief Kernel that replaces elements from `d_col_data` given the following
* rule: replace all `old_values[i]` in [old_values_begin`, `old_values_end`)
* present in `d_col_data` with `d_new_values[i]`.
*
* @Param[in,out] d_col_data Device array with the data to be modified
* @Param[in] nrows # rows in `d_col_data`
* @Param[in] old_values_begin Device pointer to the beginning of the sequence
* of old values to be replaced
* @Param[in] old_values_end Device pointer to the end of the sequence
* of old values to be replaced
* @Param[in] d_new_values Device array with the new values
*
* @Returns
*/
/* ----------------------------------------------------------------------------*/
template <class T>
__global__
void replace_kernel(T* d_col_data,
size_t nrows,
thrust::device_ptr<const T> old_values_begin,
thrust::device_ptr<const T> old_values_end,
const T* d_new_values)
{
size_t i = blockIdx.x * blockDim.x + threadIdx.x;
while(i < nrows)
{
auto found_ptr = thrust::find(thrust::seq, old_values_begin, old_values_end, d_col_data[i]);
if (found_ptr != old_values_end) {
auto d = thrust::distance(old_values_begin, found_ptr);
d_col_data[i] = d_new_values[d];
}
i += blockDim.x * gridDim.x;
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `replace_kernel` with the apropiate data types.
*/
/* ----------------------------------------------------------------------------*/
struct replace_kernel_forwarder {
template <typename col_type>
void operator()(void* d_col_data,
size_t nrows,
const void* d_old_values,
const void* d_new_values,
size_t nvalues)
{
thrust::device_ptr<const col_type> old_values_begin = thrust::device_pointer_cast(static_cast<const col_type*>(d_old_values));
const size_t grid_size = nrows / BLOCK_SIZE + (nrows % BLOCK_SIZE != 0);
replace_kernel<<<grid_size, BLOCK_SIZE>>>(static_cast<col_type*>(d_col_data),
nrows,
old_values_begin,
old_values_begin + nvalues,
static_cast<const col_type*>(d_new_values));
}
};
gdf_error find_and_replace_all(gdf_column* col,
const gdf_column* old_values,
const gdf_column* new_values)
{
GDF_REQUIRE(col != nullptr && old_values != nullptr && new_values != nullptr, GDF_DATASET_EMPTY);
GDF_REQUIRE(old_values->size == new_values->size, GDF_COLUMN_SIZE_MISMATCH);
GDF_REQUIRE(col->dtype == old_values->dtype && col->dtype == new_values->dtype, GDF_DTYPE_MISMATCH);
GDF_REQUIRE(col->valid == nullptr || col->null_count == 0, GDF_VALIDITY_UNSUPPORTED);
GDF_REQUIRE(old_values->valid == nullptr || old_values->null_count == 0, GDF_VALIDITY_UNSUPPORTED);
GDF_REQUIRE(new_values->valid == nullptr || new_values->null_count == 0, GDF_VALIDITY_UNSUPPORTED);
cudf::type_dispatcher(col->dtype, replace_kernel_forwarder{},
col->data,
col->size,
old_values->data,
new_values->data,
new_values->size);
return GDF_SUCCESS;
}
} //end anonymous namespace
/* --------------------------------------------------------------------------*/
/**
* @brief Replace elements from `col` according to the mapping `old_values` to
* `new_values`, that is, replace all `old_values[i]` present in `col`
* with `new_values[i]`.
*
* @Param[in,out] col gdf_column with the data to be modified
* @Param[in] old_values gdf_column with the old values to be replaced
* @Param[in] new_values gdf_column with the new values
*
* @Returns GDF_SUCCESS upon successful completion
*/
/* ----------------------------------------------------------------------------*/
gdf_error gdf_find_and_replace_all(gdf_column* col,
const gdf_column* old_values,
const gdf_column* new_values)
{
return find_and_replace_all(col, old_values, new_values);
}
|
5be766ce4c4832df3d80c6579f306a474dab74c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "matx.h"
#include <nvbench/nvbench.cuh>
#include "matx/core/nvtx.h"
using namespace matx;
using svd_types =
nvbench::type_list<float, double, cuda::std::complex<float>, cuda::std::complex<double>>;
/* SVD benchmarks */
template <typename ValueType>
void svdpi_batch(nvbench::state &state,
nvbench::type_list<ValueType>)
{
using AType = ValueType;
using SType = typename inner_op_type_t<AType>::type;
hipStream_t stream = 0;
state.set_cuda_stream(nvbench::make_cuda_stream_view(stream));
int batch = state.get_int64("batch");
int m = state.get_int64("rows");
int n = state.get_int64("cols");
int r = ::min(n,m);
auto A = make_tensor<AType>({batch, m, n});
auto U = make_tensor<AType>({batch, m, r});
auto VT = make_tensor<AType>({batch, r, n});
auto S = make_tensor<SType>({batch, r});
int iterations = 10;
(A = random<float>({batch, m, n}, NORMAL)).run(stream);
A.PrefetchDevice(stream);
U.PrefetchDevice(stream);
S.PrefetchDevice(stream);
VT.PrefetchDevice(stream);
(U = 0).run(stream);
(S = 0).run(stream);
(VT = 0).run(stream);
auto x0 = random<float>({batch, r}, NORMAL);
// warm up
roctxRangePushA("Warmup");
(mtie(U, S, VT) = svdpi(A, x0, iterations, r)).run(stream);
hipDeviceSynchronize();
roctxRangePop();
MATX_NVTX_START_RANGE( "Exec", matx_nvxtLogLevels::MATX_NVTX_LOG_ALL, 1 )
state.exec(
[&U, &S, &VT, &A, &x0, &iterations, &r](nvbench::launch &launch) {
(mtie(U, S, VT) = svdpi(A, x0, iterations, r)).run(cudaExecutor{launch.get_stream()}); });
MATX_NVTX_END_RANGE( 1 )
}
NVBENCH_BENCH_TYPES(svdpi_batch, NVBENCH_TYPE_AXES(svd_types))
.add_int64_axis("cols", {4, 16, 64})
.add_int64_axis("rows", {4})
.add_int64_axis("batch", {3000});
template <typename ValueType>
void svdbpi_batch(nvbench::state &state,
nvbench::type_list<ValueType>)
{
using AType = ValueType;
using SType = typename inner_op_type_t<AType>::type;
hipStream_t stream = 0;
state.set_cuda_stream(nvbench::make_cuda_stream_view(stream));
int batch = state.get_int64("batch");
int m = state.get_int64("rows");
int n = state.get_int64("cols");
int r = ::min(n,m);
auto A = make_tensor<AType>({batch, m, n});
auto U = make_tensor<AType>({batch, m, r});
auto VT = make_tensor<AType>({batch, r, n});
auto S = make_tensor<SType>({batch, r});
int iterations = 10;
(A = random<float>({batch, m, n}, NORMAL)).run(stream);
A.PrefetchDevice(stream);
U.PrefetchDevice(stream);
S.PrefetchDevice(stream);
VT.PrefetchDevice(stream);
(U = 0).run(stream);
(S = 0).run(stream);
(VT = 0).run(stream);
// warm up
roctxRangePushA("Warmup");
(mtie(U, S, VT) = svdbpi(A, iterations)).run(stream);
hipDeviceSynchronize();
roctxRangePop();
MATX_NVTX_START_RANGE( "Exec", matx_nvxtLogLevels::MATX_NVTX_LOG_ALL, 1 )
state.exec(
[&U, &S, &VT, &A, &iterations, &r](nvbench::launch &launch) {
(mtie(U, S, VT) = svdbpi(A, iterations)).run(cudaExecutor{launch.get_stream()}); });
MATX_NVTX_END_RANGE( 1 )
}
NVBENCH_BENCH_TYPES(svdbpi_batch, NVBENCH_TYPE_AXES(svd_types))
.add_int64_axis("cols", {4, 16, 64})
.add_int64_axis("rows", {4})
.add_int64_axis("batch", {3000});
| 5be766ce4c4832df3d80c6579f306a474dab74c8.cu | #include "matx.h"
#include <nvbench/nvbench.cuh>
#include "matx/core/nvtx.h"
using namespace matx;
using svd_types =
nvbench::type_list<float, double, cuda::std::complex<float>, cuda::std::complex<double>>;
/* SVD benchmarks */
template <typename ValueType>
void svdpi_batch(nvbench::state &state,
nvbench::type_list<ValueType>)
{
using AType = ValueType;
using SType = typename inner_op_type_t<AType>::type;
cudaStream_t stream = 0;
state.set_cuda_stream(nvbench::make_cuda_stream_view(stream));
int batch = state.get_int64("batch");
int m = state.get_int64("rows");
int n = state.get_int64("cols");
int r = std::min(n,m);
auto A = make_tensor<AType>({batch, m, n});
auto U = make_tensor<AType>({batch, m, r});
auto VT = make_tensor<AType>({batch, r, n});
auto S = make_tensor<SType>({batch, r});
int iterations = 10;
(A = random<float>({batch, m, n}, NORMAL)).run(stream);
A.PrefetchDevice(stream);
U.PrefetchDevice(stream);
S.PrefetchDevice(stream);
VT.PrefetchDevice(stream);
(U = 0).run(stream);
(S = 0).run(stream);
(VT = 0).run(stream);
auto x0 = random<float>({batch, r}, NORMAL);
// warm up
nvtxRangePushA("Warmup");
(mtie(U, S, VT) = svdpi(A, x0, iterations, r)).run(stream);
cudaDeviceSynchronize();
nvtxRangePop();
MATX_NVTX_START_RANGE( "Exec", matx_nvxtLogLevels::MATX_NVTX_LOG_ALL, 1 )
state.exec(
[&U, &S, &VT, &A, &x0, &iterations, &r](nvbench::launch &launch) {
(mtie(U, S, VT) = svdpi(A, x0, iterations, r)).run(cudaExecutor{launch.get_stream()}); });
MATX_NVTX_END_RANGE( 1 )
}
NVBENCH_BENCH_TYPES(svdpi_batch, NVBENCH_TYPE_AXES(svd_types))
.add_int64_axis("cols", {4, 16, 64})
.add_int64_axis("rows", {4})
.add_int64_axis("batch", {3000});
template <typename ValueType>
void svdbpi_batch(nvbench::state &state,
nvbench::type_list<ValueType>)
{
using AType = ValueType;
using SType = typename inner_op_type_t<AType>::type;
cudaStream_t stream = 0;
state.set_cuda_stream(nvbench::make_cuda_stream_view(stream));
int batch = state.get_int64("batch");
int m = state.get_int64("rows");
int n = state.get_int64("cols");
int r = std::min(n,m);
auto A = make_tensor<AType>({batch, m, n});
auto U = make_tensor<AType>({batch, m, r});
auto VT = make_tensor<AType>({batch, r, n});
auto S = make_tensor<SType>({batch, r});
int iterations = 10;
(A = random<float>({batch, m, n}, NORMAL)).run(stream);
A.PrefetchDevice(stream);
U.PrefetchDevice(stream);
S.PrefetchDevice(stream);
VT.PrefetchDevice(stream);
(U = 0).run(stream);
(S = 0).run(stream);
(VT = 0).run(stream);
// warm up
nvtxRangePushA("Warmup");
(mtie(U, S, VT) = svdbpi(A, iterations)).run(stream);
cudaDeviceSynchronize();
nvtxRangePop();
MATX_NVTX_START_RANGE( "Exec", matx_nvxtLogLevels::MATX_NVTX_LOG_ALL, 1 )
state.exec(
[&U, &S, &VT, &A, &iterations, &r](nvbench::launch &launch) {
(mtie(U, S, VT) = svdbpi(A, iterations)).run(cudaExecutor{launch.get_stream()}); });
MATX_NVTX_END_RANGE( 1 )
}
NVBENCH_BENCH_TYPES(svdbpi_batch, NVBENCH_TYPE_AXES(svd_types))
.add_int64_axis("cols", {4, 16, 64})
.add_int64_axis("rows", {4})
.add_int64_axis("batch", {3000});
|
32165538da83a3b15e62992ea1ab5665d744de3a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
* This file is an attempt at producing what the generated target code
* should look like for the multiplyMatrixMatrix routine.
*/
/* Prototype matrix representation. */
struct dag_array_t{
size_t rows;
size_t cols;
int* matrix;
};
/*
DAG Primitive. Here, we leverage the NVIDIA developer examples
to obtain a high-bandwith operation. They make use of shared memory
to avoid strided global memory accesses, and instead perform the
strided access in the shared block, which is roughly a ~3x improvement.
TILE_DIM = 32
BLOCK_ROWS = 8
https://devblogs.nvidia.com/efficient-matrix-transpose-cuda-cc/
*/
const int tp_TILE_DIM = 32;
const int tp_BLOCK_ROWS = 8;
// We use single-dimensional lists.
__global__ void transposeCoalesced(int *result, const int *in)
{
const int TILE_DIM = tp_TILE_DIM;
const int BLOCK_ROWS = tp_BLOCK_ROWS;
__shared__ int tile[TILE_DIM][TILE_DIM];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
tile[threadIdx.y+j][threadIdx.x] = in[(y+j)*width + x];
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
result[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j];
} | 32165538da83a3b15e62992ea1ab5665d744de3a.cu | #include "includes.h"
/*
* This file is an attempt at producing what the generated target code
* should look like for the multiplyMatrixMatrix routine.
*/
/* Prototype matrix representation. */
struct dag_array_t{
size_t rows;
size_t cols;
int* matrix;
};
/*
DAG Primitive. Here, we leverage the NVIDIA developer examples
to obtain a high-bandwith operation. They make use of shared memory
to avoid strided global memory accesses, and instead perform the
strided access in the shared block, which is roughly a ~3x improvement.
TILE_DIM = 32
BLOCK_ROWS = 8
https://devblogs.nvidia.com/efficient-matrix-transpose-cuda-cc/
*/
const int tp_TILE_DIM = 32;
const int tp_BLOCK_ROWS = 8;
// We use single-dimensional lists.
__global__ void transposeCoalesced(int *result, const int *in)
{
const int TILE_DIM = tp_TILE_DIM;
const int BLOCK_ROWS = tp_BLOCK_ROWS;
__shared__ int tile[TILE_DIM][TILE_DIM];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
tile[threadIdx.y+j][threadIdx.x] = in[(y+j)*width + x];
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
result[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j];
} |
c1d5e23a13fd019d095e1513990f81c8001c9427.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "cudakernel/unary/leakyrelu.h"
#include <hip/hip_fp16.h>
#if __CUDA_ARCH__ >= 600 && __CUDACC_VER_MAJOR__ >= 9
template <typename DataT>
__device__ __inline__ DataT ppl_scalar_leakyrelu(const DataT& in_val, float alpha);
template <>
__device__ __inline__ float ppl_scalar_leakyrelu<float>(const float& in_val, float alpha)
{
float res;
res = (in_val > 0) ? in_val : alpha * in_val;
return res;
}
template <>
__device__ __inline__ half ppl_scalar_leakyrelu<half>(const half& in_val, float alpha)
{
half res;
res = __hgt(in_val, 0) ? in_val : __hmul((half)alpha, in_val);
return res;
}
#endif
template <typename DataT>
__global__ void ppl_cukernel_unary_leakyrelu(
const uint64_t num_elems,
const DataT* input,
DataT* output,
float alpha)
{
#if __CUDA_ARCH__ >= 600 && __CUDACC_VER_MAJOR__ >= 9
uint64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems)
return;
DataT in_val = input[index];
output[index] = ppl_scalar_leakyrelu<DataT>(in_val, alpha);
#endif
}
ppl::common::RetCode PPLCUDAUnaryLeakyReluForwardImp(
hipStream_t stream,
const ppl::nn::TensorShape* input_shape,
const void* input,
const ppl::nn::TensorShape* output_shape,
void* output,
float alpha)
{
uint64_t num_elems = output_shape->GetElementsIncludingPadding();
int block_size = 256;
uint64_t grid_size = (num_elems + block_size - 1) / block_size;
if (output_shape->GetDataType() == ppl::common::DATATYPE_FLOAT32) {
hipLaunchKernelGGL(( ppl_cukernel_unary_leakyrelu<float>), dim3(grid_size), dim3(block_size), 0, stream, num_elems, (const float*)input, (float*)output, alpha);
} else if (output_shape->GetDataType() == ppl::common::DATATYPE_FLOAT16) {
hipLaunchKernelGGL(( ppl_cukernel_unary_leakyrelu<half>), dim3(grid_size), dim3(block_size), 0, stream, num_elems, (const half*)input, (half*)output, alpha);
} else {
return ppl::common::RC_UNSUPPORTED;
}
return ppl::common::RC_SUCCESS;
}
| c1d5e23a13fd019d095e1513990f81c8001c9427.cu | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "cudakernel/unary/leakyrelu.h"
#include <cuda_fp16.h>
#if __CUDA_ARCH__ >= 600 && __CUDACC_VER_MAJOR__ >= 9
template <typename DataT>
__device__ __inline__ DataT ppl_scalar_leakyrelu(const DataT& in_val, float alpha);
template <>
__device__ __inline__ float ppl_scalar_leakyrelu<float>(const float& in_val, float alpha)
{
float res;
res = (in_val > 0) ? in_val : alpha * in_val;
return res;
}
template <>
__device__ __inline__ half ppl_scalar_leakyrelu<half>(const half& in_val, float alpha)
{
half res;
res = __hgt(in_val, 0) ? in_val : __hmul((half)alpha, in_val);
return res;
}
#endif
template <typename DataT>
__global__ void ppl_cukernel_unary_leakyrelu(
const uint64_t num_elems,
const DataT* input,
DataT* output,
float alpha)
{
#if __CUDA_ARCH__ >= 600 && __CUDACC_VER_MAJOR__ >= 9
uint64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems)
return;
DataT in_val = input[index];
output[index] = ppl_scalar_leakyrelu<DataT>(in_val, alpha);
#endif
}
ppl::common::RetCode PPLCUDAUnaryLeakyReluForwardImp(
cudaStream_t stream,
const ppl::nn::TensorShape* input_shape,
const void* input,
const ppl::nn::TensorShape* output_shape,
void* output,
float alpha)
{
uint64_t num_elems = output_shape->GetElementsIncludingPadding();
int block_size = 256;
uint64_t grid_size = (num_elems + block_size - 1) / block_size;
if (output_shape->GetDataType() == ppl::common::DATATYPE_FLOAT32) {
ppl_cukernel_unary_leakyrelu<float><<<grid_size, block_size, 0, stream>>>(num_elems, (const float*)input, (float*)output, alpha);
} else if (output_shape->GetDataType() == ppl::common::DATATYPE_FLOAT16) {
ppl_cukernel_unary_leakyrelu<half><<<grid_size, block_size, 0, stream>>>(num_elems, (const half*)input, (half*)output, alpha);
} else {
return ppl::common::RC_UNSUPPORTED;
}
return ppl::common::RC_SUCCESS;
}
|
eb7162b6029dfe510d54713e80f99fe41fab0a52.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdlib>
#include <sys/time.h>
#include <math.h>
#include "wave.h"
__global__
void CalcFrame(unsigned char* pic, int width)
{
int frame = threadIdx.x;
for (int row = 0; row < width; row++) {
for (int col = 0; col < width; col++) {
float fx = col - 1024/2;
float fy = row - 1024/2;
float d = sqrtf( fx * fx + fy * fy );
unsigned char color = (unsigned char) (160.0f + 127.0f *
cos(d/10.0f - frame/7.0f) /
(d/50.0f + 1.0f));
pic[frame * width * width + row * width + col] = (unsigned char) color;
}
}
}
int main(int argc, char *argv[])
{
// check command line
if (argc != 3) {fprintf(stderr, "usage: %s frame_width num_frames\n", argv[0]); exit(-1);}
int width = atoi(argv[1]);
if (width < 100) {fprintf(stderr, "error: frame_width must be at least 100\n"); exit(-1);}
int frames = atoi(argv[2]);
if (frames < 1) {fprintf(stderr, "error: num_frames must be at least 1\n"); exit(-1);}
printf("computing %d frames of %d by %d picture\n", frames, width, width);
// allocate picture array
unsigned char* pic;
hipMallocManaged(&pic, frames * width * width * sizeof(char));
hipLaunchKernelGGL(( CalcFrame), dim3(1),dim3(frames), 0, 0, pic,width);
hipDeviceSynchronize();
// verify result by writing frames to BMP files
if ((frames <= 100)) {
for (int frame = 0; frame < frames; frame++) {
char name[32];
sprintf(name, "wave%d.bmp", frame + 1000);
writeBMP(width, width, &pic[frame * width * width], name);
}
}
hipFree(pic);
return 0;
}
| eb7162b6029dfe510d54713e80f99fe41fab0a52.cu | #include <cstdlib>
#include <sys/time.h>
#include <math.h>
#include "wave.h"
__global__
void CalcFrame(unsigned char* pic, int width)
{
int frame = threadIdx.x;
for (int row = 0; row < width; row++) {
for (int col = 0; col < width; col++) {
float fx = col - 1024/2;
float fy = row - 1024/2;
float d = sqrtf( fx * fx + fy * fy );
unsigned char color = (unsigned char) (160.0f + 127.0f *
cos(d/10.0f - frame/7.0f) /
(d/50.0f + 1.0f));
pic[frame * width * width + row * width + col] = (unsigned char) color;
}
}
}
int main(int argc, char *argv[])
{
// check command line
if (argc != 3) {fprintf(stderr, "usage: %s frame_width num_frames\n", argv[0]); exit(-1);}
int width = atoi(argv[1]);
if (width < 100) {fprintf(stderr, "error: frame_width must be at least 100\n"); exit(-1);}
int frames = atoi(argv[2]);
if (frames < 1) {fprintf(stderr, "error: num_frames must be at least 1\n"); exit(-1);}
printf("computing %d frames of %d by %d picture\n", frames, width, width);
// allocate picture array
unsigned char* pic;
cudaMallocManaged(&pic, frames * width * width * sizeof(char));
CalcFrame<<<1,frames>>>(pic,width);
cudaDeviceSynchronize();
// verify result by writing frames to BMP files
if ((frames <= 100)) {
for (int frame = 0; frame < frames; frame++) {
char name[32];
sprintf(name, "wave%d.bmp", frame + 1000);
writeBMP(width, width, &pic[frame * width * width], name);
}
}
cudaFree(pic);
return 0;
}
|
fe1fa6b0d67ffa3e3acb9cac88cb458555e401ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*-----------------------------------------------------------------------------
..Name: GPU_ALL_KNN_MED.cu
Desc: This file contains the ALL-KNN kd-tree GPU kernel
by Shawn Brown (shawndb@cs.unc.edu)
-----------------------------------------------------------------------------*/
#ifndef _GPU_ALL_KNN_2D_MED_H_
#define _GPU_ALL_KNN_2D_MED_H_
/*---------------------------------------------------------
Includes
---------------------------------------------------------*/
#include <stdio.h>
//#include <float.h>
#include "GPUTREE_API.h"
/*---------------------------------------------------------
Function Definitions
---------------------------------------------------------*/
/*---------------------------------------------------------
Name: GPU_ALL_KNN_2D_MED
Desc: Finds the 'k' Nearest Neighbors in
a search set 'S' for each query point in set 'Q'
Notes:
1. The search set S and query set Q are the same
for the All-KNN search.
2. We need to exclude zero distance results
Otherwise, each point will return itself as
its own nearest neighbor
3. The search set S is represented by a
static balanced cyclical KDTree
with one search point stored per kd-tree node
---------------------------------------------------------*/
__global__ void
GPU_ALL_KNN_2D_MED
(
GPU_NN_Result * qrs, // OUT: Results of KD Nearest Neighbor Algorithm
GPUNode_2D_MED * kdTree, // IN: KD Tree (Nodes)
unsigned int * ids, // IN: IDs (from Indexs)
int rootIdx, // IN: index of root node in KD Tree
unsigned int k // IN: number of nearest neighbors to find
)
{
// Per Thread Local Parameters (shared memory)
__shared__ GPUNode_2D_MED currNodes[KNN_THREADS_PER_BLOCK]; // Current kd-tree node
__shared__ GPU_Search searchStack[ALL_KNN_STACK_SIZE][KNN_THREADS_PER_BLOCK]; // Search Stack
__shared__ GPU_NN_Result knnHeap[KD_KNN_SIZE][KNN_THREADS_PER_BLOCK]; // 'k' NN Closest Heap
__shared__ GPUNode_2D_MED queryPoints[KNN_THREADS_PER_BLOCK]; // Query Point
// Per Thread Local Parameters (registers)
unsigned int currIdx, currAxis, currInOut, nextAxis;
unsigned int stackTop, maxHeap, countHeap;
float dx, dy, diff, diff2, diffDist2;
float queryValue, splitValue;
float dist2Heap, bestDist2;
int tidx, width, currRow, currCol, qidx;
// Compute Thread index
tidx = (threadIdx.y*blockDim.x) + threadIdx.x;
// Compute Query Index
width = gridDim.x * blockDim.x;
currRow = (blockIdx.y * blockDim.y) + threadIdx.y;
currCol = (blockIdx.x * blockDim.x) + threadIdx.x;
qidx = (currRow * width) + currCol;
// Load current Query Point into local (fast) memory
// Slow read from RAM into shared memory
//queryPoints[tidx] = kdTree[currIdx];
// BUGBUG - Had to copy componentwise to avoid kernel crash
queryPoints[tidx].pos[0] = kdTree[qidx].pos[0];
queryPoints[tidx].pos[1] = kdTree[qidx].pos[1];
// Compute number of elements (in grid)
int height = gridDim.y * blockDim.y;
int nElems = height * width;
// Search Stack Variables
stackTop = 0;
// 'k' NN Heap variables
maxHeap = k; // Maximum # elements on knnHeap
countHeap = 0; // Current # elements on knnHeap
dist2Heap = 0.0f; // Max Dist of any element on heap
bestDist2 = 3.0e38f;
// Put root search info on stack
searchStack[stackTop][tidx].nodeFlags = (rootIdx & 0x1FFFFFFF); // | ((currAxis << 29) & 0x60000000); // | ((currInOut << 31) & 0x8000000);;
searchStack[stackTop][tidx].splitVal = 3.0e+38F;
stackTop++;
while (stackTop != 0)
{
// Statistics
//best.cNodes++;
// Get Current Node from top of stack
stackTop--;
// Get Node Info
currIdx = (searchStack[stackTop][tidx].nodeFlags & 0x1FFFFFFFU);
currAxis = (searchStack[stackTop][tidx].nodeFlags & 0x60000000U) >> 29;
currInOut = (searchStack[stackTop][tidx].nodeFlags & 0x80000000U) >> 31;
nextAxis = ((currAxis == 0) ? 1 : 0);
// Early Exit Check
if (currInOut == 1) // KD_OUT
{
if (countHeap == maxHeap) // Is heap full yet ?!?
{
// Next Line is effectively queryValue = queryPoints[prevAxis];
queryValue = ((currAxis == 0) ? queryPoints[tidx].pos[1] : queryPoints[tidx].pos[0]);
splitValue = searchStack[stackTop][tidx].splitVal; // Split Value of Parent Node
diff = splitValue - queryValue;
diff2 = diff*diff;
if (diff2 >= dist2Heap)
{
// We can do an early exit for this node
continue;
}
}
}
// WARNING - It's Much faster to load this node from global memory after the "Early Exit check" !!!
// Load current node
// currNodes[tidx] = kdtree[currIdx];
// Slow read from RAM into
// BUGBUG - Had to copy componentwise to avoid kernel crash
currNodes[tidx].pos[0] = kdTree[currIdx].pos[0];
currNodes[tidx].pos[1] = kdTree[currIdx].pos[1];
currNodes[tidx].Left = kdTree[currIdx].Left;
currNodes[tidx].Right = kdTree[currIdx].Right;
// Get Best Fit Dist for checking child ranges
queryValue = ((currAxis == 0) ? queryPoints[tidx].pos[0] : queryPoints[tidx].pos[1]);
splitValue = ((currAxis == 0) ? currNodes[tidx].pos[0] : currNodes[tidx].pos[1]);
diff = splitValue - queryValue;
diff2 = diff*diff;
// Calc Dist from Median Node to queryLocation
dx = currNodes[tidx].pos[0] - queryPoints[tidx].pos[0];
dy = currNodes[tidx].pos[1] - queryPoints[tidx].pos[1];
diffDist2 = (dx*dx) + (dy*dy);
// See if we should add this point to the 'k' NN Heap
if (diffDist2 <= 0.0f)
{
// Do nothing, The query point found itself in the kd-tree
// We don't want to add ourselves as a NN.
}
else if (countHeap < maxHeap)
{
//-------------------------------
// < 'k' elements on heap
// Do Simple Array append
//-------------------------------
countHeap++;
knnHeap[countHeap][tidx].Id = currIdx;
knnHeap[countHeap][tidx].Dist = diffDist2;
// Do we need to convert the array into a max distance heap ?!?
if (countHeap == maxHeap)
{
// Yes, turn array into a heap, takes O(k) time
for (unsigned int z = countHeap/2; z >= 1; z--)
{
//
// Demote each element in turn (to correct position in heap)
//
unsigned int parentHIdx = z; // Start at specified element
unsigned int childHIdx = z << 1; // left child of parent
// Compare Parent to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent is larger than both children, exit loop
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
}
// Update trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
}
else if (diffDist2 < dist2Heap)
{
//-------------------------------
// >= k elements on heap
// Do Heap Replacement
//-------------------------------
// Replace Root Element with new element
knnHeap[1][tidx].Id = currIdx;
knnHeap[1][tidx].Dist = diffDist2;
//
// Demote new element (to correct position in heap)
//
unsigned int parentHIdx = 1; // Start at Root
unsigned int childHIdx = 2; // left child of parent
// Compare current index to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent node is larger than both children, exit
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
// Update Trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
// update bestDist2
if (queryValue <= splitValue)
{
// [...QL...BD]...SV -> Include Left range only
// or
// [...QL...SV...BD] -> Include Both Left and Right Sub Ranges
// Check if we should add Right Sub-range to stack
if (diff2 < bestDist2)
{
//nextIdx = currNodes[tidx].Right;
if (0xFFFFFFFF != currNodes[tidx].Right) // cInvalid
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (currNodes[tidx].Right & 0x1FFFFFFFU) | ((nextAxis << 29) & 0x60000000U) | 0x80000000U;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Left Sub-range to search path
//nextIdx = currNodes[tidx].Left;
if (0xFFFFFFFF != currNodes[tidx].Left)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (currNodes[tidx].Left & 0x1FFFFFFFU) | ((nextAxis << 29) & 0x60000000U); // | 0x80000000U;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
else
{
// SV...[BD...QL...] -> Include Right sub range only
// or
// [BD...SV...QL...] -> Include Both Left and Right Sub Ranges
// Check if we should add left sub-range to search path
if (diff2 < bestDist2)
{
// Add to search stack
//nextIdx = currNodes[tidx].Left;
if (0xFFFFFFFFU != currNodes[tidx].Left)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (currNodes[tidx].Left & 0x1FFFFFFFU) | ((nextAxis << 29) & 0x60000000U) | 0x80000000U;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Right Sub-range
//nextIdx = currNodes[tidx].Right;
if (0xFFFFFFFFU != currNodes[tidx].Right)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (currNodes[tidx].Right & 0x1FFFFFFFU) | ((nextAxis << 29) & 0x60000000U); // | 0x8000000U;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
}
//
// Output Results
//
// We now have a heap of the 'k' nearest neighbors
// Write them to the results array
// Assume answers should be stored along z axis of 3 dimensional cube
for (unsigned int i = 0; i < countHeap; i++)
{
unsigned int i1 = i+1;
unsigned int offset = i * nElems;
// Convert Nearest Neighbor Info to final format
// Slow read from RAM memory
knnHeap[i1][tidx].Id = ids[knnHeap[i1][tidx].Id]; // Really need ID's not indexs
knnHeap[i1][tidx].Dist = sqrtf( knnHeap[i1][tidx].Dist ); // Get True distance (not distance squared)
// Store Result
// Slow write to RAM memory
//qrs[qidx+offset] = knnHeap[i1][tidx];
// BUGBUG - Had to copy componentwise to avoid kernel crash
qrs[qidx+offset].Id = knnHeap[i1][tidx].Id;
qrs[qidx+offset].Dist = knnHeap[i1][tidx].Dist;
}
}
#endif // _GPU_ALL_KNN_2D_MED_H_
| fe1fa6b0d67ffa3e3acb9cac88cb458555e401ed.cu | /*-----------------------------------------------------------------------------
..Name: GPU_ALL_KNN_MED.cu
Desc: This file contains the ALL-KNN kd-tree GPU kernel
by Shawn Brown (shawndb@cs.unc.edu)
-----------------------------------------------------------------------------*/
#ifndef _GPU_ALL_KNN_2D_MED_H_
#define _GPU_ALL_KNN_2D_MED_H_
/*---------------------------------------------------------
Includes
---------------------------------------------------------*/
#include <stdio.h>
//#include <float.h>
#include "GPUTREE_API.h"
/*---------------------------------------------------------
Function Definitions
---------------------------------------------------------*/
/*---------------------------------------------------------
Name: GPU_ALL_KNN_2D_MED
Desc: Finds the 'k' Nearest Neighbors in
a search set 'S' for each query point in set 'Q'
Notes:
1. The search set S and query set Q are the same
for the All-KNN search.
2. We need to exclude zero distance results
Otherwise, each point will return itself as
its own nearest neighbor
3. The search set S is represented by a
static balanced cyclical KDTree
with one search point stored per kd-tree node
---------------------------------------------------------*/
__global__ void
GPU_ALL_KNN_2D_MED
(
GPU_NN_Result * qrs, // OUT: Results of KD Nearest Neighbor Algorithm
GPUNode_2D_MED * kdTree, // IN: KD Tree (Nodes)
unsigned int * ids, // IN: IDs (from Indexs)
int rootIdx, // IN: index of root node in KD Tree
unsigned int k // IN: number of nearest neighbors to find
)
{
// Per Thread Local Parameters (shared memory)
__shared__ GPUNode_2D_MED currNodes[KNN_THREADS_PER_BLOCK]; // Current kd-tree node
__shared__ GPU_Search searchStack[ALL_KNN_STACK_SIZE][KNN_THREADS_PER_BLOCK]; // Search Stack
__shared__ GPU_NN_Result knnHeap[KD_KNN_SIZE][KNN_THREADS_PER_BLOCK]; // 'k' NN Closest Heap
__shared__ GPUNode_2D_MED queryPoints[KNN_THREADS_PER_BLOCK]; // Query Point
// Per Thread Local Parameters (registers)
unsigned int currIdx, currAxis, currInOut, nextAxis;
unsigned int stackTop, maxHeap, countHeap;
float dx, dy, diff, diff2, diffDist2;
float queryValue, splitValue;
float dist2Heap, bestDist2;
int tidx, width, currRow, currCol, qidx;
// Compute Thread index
tidx = (threadIdx.y*blockDim.x) + threadIdx.x;
// Compute Query Index
width = gridDim.x * blockDim.x;
currRow = (blockIdx.y * blockDim.y) + threadIdx.y;
currCol = (blockIdx.x * blockDim.x) + threadIdx.x;
qidx = (currRow * width) + currCol;
// Load current Query Point into local (fast) memory
// Slow read from RAM into shared memory
//queryPoints[tidx] = kdTree[currIdx];
// BUGBUG - Had to copy componentwise to avoid kernel crash
queryPoints[tidx].pos[0] = kdTree[qidx].pos[0];
queryPoints[tidx].pos[1] = kdTree[qidx].pos[1];
// Compute number of elements (in grid)
int height = gridDim.y * blockDim.y;
int nElems = height * width;
// Search Stack Variables
stackTop = 0;
// 'k' NN Heap variables
maxHeap = k; // Maximum # elements on knnHeap
countHeap = 0; // Current # elements on knnHeap
dist2Heap = 0.0f; // Max Dist of any element on heap
bestDist2 = 3.0e38f;
// Put root search info on stack
searchStack[stackTop][tidx].nodeFlags = (rootIdx & 0x1FFFFFFF); // | ((currAxis << 29) & 0x60000000); // | ((currInOut << 31) & 0x8000000);;
searchStack[stackTop][tidx].splitVal = 3.0e+38F;
stackTop++;
while (stackTop != 0)
{
// Statistics
//best.cNodes++;
// Get Current Node from top of stack
stackTop--;
// Get Node Info
currIdx = (searchStack[stackTop][tidx].nodeFlags & 0x1FFFFFFFU);
currAxis = (searchStack[stackTop][tidx].nodeFlags & 0x60000000U) >> 29;
currInOut = (searchStack[stackTop][tidx].nodeFlags & 0x80000000U) >> 31;
nextAxis = ((currAxis == 0) ? 1 : 0);
// Early Exit Check
if (currInOut == 1) // KD_OUT
{
if (countHeap == maxHeap) // Is heap full yet ?!?
{
// Next Line is effectively queryValue = queryPoints[prevAxis];
queryValue = ((currAxis == 0) ? queryPoints[tidx].pos[1] : queryPoints[tidx].pos[0]);
splitValue = searchStack[stackTop][tidx].splitVal; // Split Value of Parent Node
diff = splitValue - queryValue;
diff2 = diff*diff;
if (diff2 >= dist2Heap)
{
// We can do an early exit for this node
continue;
}
}
}
// WARNING - It's Much faster to load this node from global memory after the "Early Exit check" !!!
// Load current node
// currNodes[tidx] = kdtree[currIdx];
// Slow read from RAM into
// BUGBUG - Had to copy componentwise to avoid kernel crash
currNodes[tidx].pos[0] = kdTree[currIdx].pos[0];
currNodes[tidx].pos[1] = kdTree[currIdx].pos[1];
currNodes[tidx].Left = kdTree[currIdx].Left;
currNodes[tidx].Right = kdTree[currIdx].Right;
// Get Best Fit Dist for checking child ranges
queryValue = ((currAxis == 0) ? queryPoints[tidx].pos[0] : queryPoints[tidx].pos[1]);
splitValue = ((currAxis == 0) ? currNodes[tidx].pos[0] : currNodes[tidx].pos[1]);
diff = splitValue - queryValue;
diff2 = diff*diff;
// Calc Dist from Median Node to queryLocation
dx = currNodes[tidx].pos[0] - queryPoints[tidx].pos[0];
dy = currNodes[tidx].pos[1] - queryPoints[tidx].pos[1];
diffDist2 = (dx*dx) + (dy*dy);
// See if we should add this point to the 'k' NN Heap
if (diffDist2 <= 0.0f)
{
// Do nothing, The query point found itself in the kd-tree
// We don't want to add ourselves as a NN.
}
else if (countHeap < maxHeap)
{
//-------------------------------
// < 'k' elements on heap
// Do Simple Array append
//-------------------------------
countHeap++;
knnHeap[countHeap][tidx].Id = currIdx;
knnHeap[countHeap][tidx].Dist = diffDist2;
// Do we need to convert the array into a max distance heap ?!?
if (countHeap == maxHeap)
{
// Yes, turn array into a heap, takes O(k) time
for (unsigned int z = countHeap/2; z >= 1; z--)
{
//
// Demote each element in turn (to correct position in heap)
//
unsigned int parentHIdx = z; // Start at specified element
unsigned int childHIdx = z << 1; // left child of parent
// Compare Parent to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent is larger than both children, exit loop
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
}
// Update trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
}
else if (diffDist2 < dist2Heap)
{
//-------------------------------
// >= k elements on heap
// Do Heap Replacement
//-------------------------------
// Replace Root Element with new element
knnHeap[1][tidx].Id = currIdx;
knnHeap[1][tidx].Dist = diffDist2;
//
// Demote new element (to correct position in heap)
//
unsigned int parentHIdx = 1; // Start at Root
unsigned int childHIdx = 2; // left child of parent
// Compare current index to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent node is larger than both children, exit
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
// Update Trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
// update bestDist2
if (queryValue <= splitValue)
{
// [...QL...BD]...SV -> Include Left range only
// or
// [...QL...SV...BD] -> Include Both Left and Right Sub Ranges
// Check if we should add Right Sub-range to stack
if (diff2 < bestDist2)
{
//nextIdx = currNodes[tidx].Right;
if (0xFFFFFFFF != currNodes[tidx].Right) // cInvalid
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (currNodes[tidx].Right & 0x1FFFFFFFU) | ((nextAxis << 29) & 0x60000000U) | 0x80000000U;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Left Sub-range to search path
//nextIdx = currNodes[tidx].Left;
if (0xFFFFFFFF != currNodes[tidx].Left)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (currNodes[tidx].Left & 0x1FFFFFFFU) | ((nextAxis << 29) & 0x60000000U); // | 0x80000000U;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
else
{
// SV...[BD...QL...] -> Include Right sub range only
// or
// [BD...SV...QL...] -> Include Both Left and Right Sub Ranges
// Check if we should add left sub-range to search path
if (diff2 < bestDist2)
{
// Add to search stack
//nextIdx = currNodes[tidx].Left;
if (0xFFFFFFFFU != currNodes[tidx].Left)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (currNodes[tidx].Left & 0x1FFFFFFFU) | ((nextAxis << 29) & 0x60000000U) | 0x80000000U;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Right Sub-range
//nextIdx = currNodes[tidx].Right;
if (0xFFFFFFFFU != currNodes[tidx].Right)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (currNodes[tidx].Right & 0x1FFFFFFFU) | ((nextAxis << 29) & 0x60000000U); // | 0x8000000U;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
}
//
// Output Results
//
// We now have a heap of the 'k' nearest neighbors
// Write them to the results array
// Assume answers should be stored along z axis of 3 dimensional cube
for (unsigned int i = 0; i < countHeap; i++)
{
unsigned int i1 = i+1;
unsigned int offset = i * nElems;
// Convert Nearest Neighbor Info to final format
// Slow read from RAM memory
knnHeap[i1][tidx].Id = ids[knnHeap[i1][tidx].Id]; // Really need ID's not indexs
knnHeap[i1][tidx].Dist = sqrtf( knnHeap[i1][tidx].Dist ); // Get True distance (not distance squared)
// Store Result
// Slow write to RAM memory
//qrs[qidx+offset] = knnHeap[i1][tidx];
// BUGBUG - Had to copy componentwise to avoid kernel crash
qrs[qidx+offset].Id = knnHeap[i1][tidx].Id;
qrs[qidx+offset].Dist = knnHeap[i1][tidx].Dist;
}
}
#endif // _GPU_ALL_KNN_2D_MED_H_
|
d3e778e63e23571ced219722177e33786826cd73.hip | // !!! This is a file automatically generated by hipify!!!
#include "common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
/*
* This example demonstrates a simple vector sum on the GPU and on the host.
* sumArraysOnGPU splits the work of the vector sum across CUDA threads on the
* GPU. A 1D thread block and 1D grid are used. sumArraysOnHost sequentially
* iterates through vector elements on the host.
*/
void initialData(float *ip, const int size)
{
int i;
for(i = 0; i < size; i++)
{
ip[i] = (float)(rand() & 0xFF ) / 10.0f;
}
return;
}
void sumMatrixOnHost(float *A, float *B, float *C, const int nx,
const int ny)
{
float *ia = A;
float *ib = B;
float *ic = C;
for (int iy = 0; iy < ny; iy++)
{
for (int ix = 0; ix < nx; ix++)
{
ic[ix] = ia[ix] + ib[ix];
}
ia += nx;
ib += nx;
ic += nx;
}
return;
}
void checkResult(float *hostRef, float *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("host %f gpu %f\n", hostRef[i], gpuRef[i]);
break;
}
}
if (match)
printf("Arrays match.\n\n");
else
printf("Arrays do not match.\n\n");
}
// grid 1D block 1D
__global__ void sumMatrixOnGPU1D(float *MatA, float *MatB, float *MatC, int nx,
int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < nx )
for (int iy = 0; iy < ny; iy++)
{
int idx = iy * nx + ix;
MatC[idx] = MatA[idx] + MatB[idx];
}
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
// set up data size of matrix
int nx = 1 << 14;
int ny = 1 << 14;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float);
printf("Matrix size: nx %d ny %d\n", nx, ny);
// malloc host memory
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
double iStart = seconds();
initialData(h_A, nxy);
initialData(h_B, nxy);
double iElaps = seconds() - iStart;
printf("initialize matrix elapsed %f sec\n", iElaps);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add matrix at host side for result checks
iStart = seconds();
sumMatrixOnHost(h_A, h_B, hostRef, nx, ny);
iElaps = seconds() - iStart;
printf("sumMatrixOnHost elapsed %f sec\n", iElaps);
// malloc device global memory
float *d_MatA, *d_MatB, *d_MatC;
CHECK(hipMalloc((void **)&d_MatA, nBytes));
CHECK(hipMalloc((void **)&d_MatB, nBytes));
CHECK(hipMalloc((void **)&d_MatC, nBytes));
// transfer data from host to device
CHECK(hipMemcpy(d_MatA, h_A, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_MatB, h_B, nBytes, hipMemcpyHostToDevice));
// invoke kernel at host side
int dimx = 32;
dim3 block(dimx, 1);
dim3 grid((nx + block.x - 1) / block.x, 1);
iStart = seconds();
hipLaunchKernelGGL(( sumMatrixOnGPU1D), dim3(grid), dim3(block), 0, 0, d_MatA, d_MatB, d_MatC, nx, ny);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
printf("sumMatrixOnGPU1D <<<(%d,%d), (%d,%d)>>> elapsed %f sec\n", grid.x,
grid.y,
block.x, block.y, iElaps);
// check kernel error
CHECK(hipGetLastError());
// copy kernel result back to host side
CHECK(hipMemcpy(gpuRef, d_MatC, nBytes, hipMemcpyDeviceToHost));
// check device results
checkResult(hostRef, gpuRef, nxy);
// free device global memory
CHECK(hipFree(d_MatA));
CHECK(hipFree(d_MatB));
CHECK(hipFree(d_MatC));
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
// reset device
CHECK(hipDeviceReset());
return (0);
}
| d3e778e63e23571ced219722177e33786826cd73.cu | #include "common.h"
#include <cuda_runtime.h>
#include <stdio.h>
/*
* This example demonstrates a simple vector sum on the GPU and on the host.
* sumArraysOnGPU splits the work of the vector sum across CUDA threads on the
* GPU. A 1D thread block and 1D grid are used. sumArraysOnHost sequentially
* iterates through vector elements on the host.
*/
void initialData(float *ip, const int size)
{
int i;
for(i = 0; i < size; i++)
{
ip[i] = (float)(rand() & 0xFF ) / 10.0f;
}
return;
}
void sumMatrixOnHost(float *A, float *B, float *C, const int nx,
const int ny)
{
float *ia = A;
float *ib = B;
float *ic = C;
for (int iy = 0; iy < ny; iy++)
{
for (int ix = 0; ix < nx; ix++)
{
ic[ix] = ia[ix] + ib[ix];
}
ia += nx;
ib += nx;
ic += nx;
}
return;
}
void checkResult(float *hostRef, float *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("host %f gpu %f\n", hostRef[i], gpuRef[i]);
break;
}
}
if (match)
printf("Arrays match.\n\n");
else
printf("Arrays do not match.\n\n");
}
// grid 1D block 1D
__global__ void sumMatrixOnGPU1D(float *MatA, float *MatB, float *MatC, int nx,
int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < nx )
for (int iy = 0; iy < ny; iy++)
{
int idx = iy * nx + ix;
MatC[idx] = MatA[idx] + MatB[idx];
}
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// set up data size of matrix
int nx = 1 << 14;
int ny = 1 << 14;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float);
printf("Matrix size: nx %d ny %d\n", nx, ny);
// malloc host memory
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
double iStart = seconds();
initialData(h_A, nxy);
initialData(h_B, nxy);
double iElaps = seconds() - iStart;
printf("initialize matrix elapsed %f sec\n", iElaps);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add matrix at host side for result checks
iStart = seconds();
sumMatrixOnHost(h_A, h_B, hostRef, nx, ny);
iElaps = seconds() - iStart;
printf("sumMatrixOnHost elapsed %f sec\n", iElaps);
// malloc device global memory
float *d_MatA, *d_MatB, *d_MatC;
CHECK(cudaMalloc((void **)&d_MatA, nBytes));
CHECK(cudaMalloc((void **)&d_MatB, nBytes));
CHECK(cudaMalloc((void **)&d_MatC, nBytes));
// transfer data from host to device
CHECK(cudaMemcpy(d_MatA, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_MatB, h_B, nBytes, cudaMemcpyHostToDevice));
// invoke kernel at host side
int dimx = 32;
dim3 block(dimx, 1);
dim3 grid((nx + block.x - 1) / block.x, 1);
iStart = seconds();
sumMatrixOnGPU1D<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
printf("sumMatrixOnGPU1D <<<(%d,%d), (%d,%d)>>> elapsed %f sec\n", grid.x,
grid.y,
block.x, block.y, iElaps);
// check kernel error
CHECK(cudaGetLastError());
// copy kernel result back to host side
CHECK(cudaMemcpy(gpuRef, d_MatC, nBytes, cudaMemcpyDeviceToHost));
// check device results
checkResult(hostRef, gpuRef, nxy);
// free device global memory
CHECK(cudaFree(d_MatA));
CHECK(cudaFree(d_MatB));
CHECK(cudaFree(d_MatC));
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
// reset device
CHECK(cudaDeviceReset());
return (0);
}
|
03026068ceab76597927a053792889da5db7600e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "rocblas.h"
#include "mat_io.h"
#define _USE_MATH_DEFINES
#include <math.h>
#include <stdio.h>
#include <string.h>
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, hipGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
} \
} while (0)
// Constants
const double ALPHA = 15.0f * M_PI / 180.0f;
const int BASE_FRAME = 1;
const int FRAMES = 5;
const double TR = 5.12f;
const double T10b = 1.664f * 1000.0f;
const double T10p = 1.584f * 1000.0f;
const double T10L = 0.8f * 1000.0f;
const double R10b = 1.0f / T10b;
const double R10p = 1.0f / T10p;
const double R10L = 1.0f / T10L;
const double HCT = 0.4f;
const double RELAXIVITY = 6.3f;
double *artConc(const mxArray *artFrac, const mxArray *pv) {
// Calculate S0b
int numRows = mxGetM(artFrac);
const double *artFracData = mxGetPr(artFrac);
const double *pvData = mxGetPr(pv);
float m = 0.0f;
for (int i = 0; i < FRAMES; i++) {
m += pvData[BASE_FRAME + i];
}
m /= (float)FRAMES;
double S0b = m * ((1.0f - exp(-1.0f * R10b * TR) * cos(ALPHA)) / (1.0f - exp(-1.0f * R10b * TR)) / sin(ALPHA));
// Calculate R1b
double *R1b = new double[numRows];
for (int i = 0; i < numRows; i++) {
R1b[i] = log( ( (S0b * sin(ALPHA)) - (artFracData[i] * cos(ALPHA)) ) / (S0b * sin(ALPHA) - artFracData[i]) ) / TR;
}
// Calculate Cb_artery
double *Cb_artery = new double[numRows];
for (int i = 0; i < numRows; i++) {
Cb_artery[i] = (R1b[i] - R10b) * 1000.0f / RELAXIVITY;
}
// Calculate Cb_plasma
double *Cb_plasma = new double[numRows];
for (int i = 0; i < numRows; i++) {
Cb_plasma[i] = Cb_artery[i] / (1.0f - HCT);
}
// Zero everything before the base frame
for (int i = 0; i <= BASE_FRAME; i++) {
Cb_plasma[i] = 0.0f;
}
return Cb_plasma;
}
double *pvConc(const mxArray *pv) {
// Calculate S0p
int numRows = mxGetM(pv);
const double *pvData = mxGetPr(pv);
float m = 0.0f;
for (int i = 0; i < FRAMES; i++) {
m += pvData[BASE_FRAME + i];
}
m /= (float)FRAMES;
double S0p = m * ((1.0f - exp(-1.0f * R10p * TR) * cos(ALPHA)) / (1.0f - exp(-1.0f * R10p * TR)) / sin(ALPHA));
// Calculate R1p
double *R1p = new double[numRows];
for (int i = 0; i < numRows; i++) {
R1p[i] = log( ( (S0p * sin(ALPHA)) - (pvData[i] * cos(ALPHA)) ) / (S0p * sin(ALPHA) - pvData[i]) ) / TR;
}
// Calculate Cp_artery
double *Cp_artery = new double[numRows];
for (int i = 0; i < numRows; i++) {
Cp_artery[i] = (R1p[i] - R10p) * 1000.0f / RELAXIVITY;
}
// Calculate Cp_plasma
double *Cp_plasma = new double[numRows];
for (int i = 0; i < numRows; i++) {
Cp_plasma[i] = Cp_artery[i] / (1.0f - HCT);
}
// Zero everything before the base frame
for (int i = 0; i <= BASE_FRAME; i++) {
Cp_plasma[i] = 0.0f;
}
return Cp_plasma;
}
double *clearance(const mxArray *liver) {
// Calculate S0L
int numRows = mxGetM(liver);
const double *liverData = mxGetPr(liver);
float m = 0.0f;
for (int i = 0; i < FRAMES; i++) {
m += liverData[BASE_FRAME + i];
}
m /= (float)FRAMES;
double S0L = m * ((1.0f - exp(-1.0f * R10L * TR) * cos(ALPHA)) / (1.0f - exp(-1.0f * R10L * TR)) / sin(ALPHA));
// Calculate R1L
double *R1L = new double[numRows];
for (int i = 0; i < numRows; i++) {
R1L[i] = log(((S0L * sin(ALPHA)) - (liverData[i] * cos(ALPHA))) / (S0L * sin(ALPHA) - liverData[i])) / TR;
}
// Calculate CL
double *CL = new double[numRows];
for (int i = 0; i < numRows; i++) {
CL[i] = (R1L[i] - R10L) * 1000.0f / RELAXIVITY;
}
return CL;
}
double *disc(const double *times, const double *artConc, const double *pvConc, const int n, const double AF, const double DV, const double MTT, const double t1, const double t2) {
double k1a = AF * DV / MTT;
double k1p = DV * (1.0f - AF) / MTT;
double k2 = 1.0f / MTT;
double dt = times[1] - times[0];
double *C = (double *)malloc(sizeof(double) * n);
for (int i = 1; i <= n; i++) {
double sum = 0.0f;
for (int j = 1; j <= i; j++) {
double sum1 = 0.0f;
if (round(j - t1 * 1000.0f) > 0.0f) {
sum1 += k1a * artConc[(int)round(j - t1 * 1000.0f)];
}
if (round(j - t2 * 1000.0f) > 0.0f) {
sum1 += k1p * pvConc[(int)round(j - t2 * 1000.0f)];
}
sum += sum1 * exp(-1.0f * k2 * (i - j) * dt) * dt;
}
C[i] = sum;
}
return C;
}
double *linspace(double start, double end, int n) {
double *array = new double[n];
double step = (end - start) / (n - 1);
for (int i = 0; i < n; i++) {
array[i] = start + (i * step);
}
return array;
}
__device__ int multiDimIdxToLinIdx(int *idxs, int *sizes, int nDims) {
int linIdx = 0;
for (int i = 0; i < nDims; i++) {
int sizeProduct = 1;
for (int j = 0; j < i; j++) {
sizeProduct *= sizes[j];
}
linIdx += idxs[i] * sizeProduct;
}
return linIdx;
}
__device__ int fiveDimIdxToLinIdx(int i, int size_i, int j, int size_j, int k, int size_k, int l, int size_l, int m) {
return i + (j * size_i) + (k * size_i * size_j) + (l * size_i * size_j *size_k) + (m * size_i * size_j * size_k * size_l);
}
int fiveDimIdxToLinIdxDev(int i, int size_i, int j, int size_j, int k, int size_k, int l, int size_l, int m) {
return i + (j * size_i) + (k * size_i * size_j) + (l * size_i * size_j *size_k) + (m * size_i * size_j * size_k * size_l);
}
__device__ int *linIdxToMultiDimIdx(int idx, int *sizes, int nDims) {
int *multiDimIdx = new int[nDims];
for (int i = 0; i < nDims; i++) {
if (i == 0) {
multiDimIdx[i] = idx % sizes[i];
}
else if (i == nDims - 1) {
multiDimIdx[i] = idx / sizes[i - 1];
}
else {
multiDimIdx[i] = (idx / sizes[i - 1]) % sizes[i];
}
idx -= multiDimIdx[i];
}
return multiDimIdx;
}
__device__ int *linIdxToFiveDimIdx(int idx, int size_i, int size_j, int size_k, int size_l) {
int *fiveDimIdx = new int[5];
// Get first dim
fiveDimIdx[0] = idx % size_i;
// Get second dim
idx -= fiveDimIdx[0];
fiveDimIdx[1] = (idx / size_i) % size_j;
// Get third dim
idx -= fiveDimIdx[1];
fiveDimIdx[2] = (idx / size_j) % size_k;
// Get fourth dim
idx -= fiveDimIdx[2];
fiveDimIdx[3] = (idx / size_k) % size_l;
// Get fifth dim
idx -= fiveDimIdx[3];
fiveDimIdx[4] = (idx / size_l);
return fiveDimIdx;
}
__global__ void popDict(double *dict, const double *times, const double *artConc, const double *pvConc, const int n, const double *AF, const int AF_length, const double *DV, const double DV_length, const double *MTT, const double MTT_length, const double *t1, const double t1_length, const double *t2, const double t2_length) {
const double AFx = AF[blockIdx.x];
const double DVx = DV[blockIdx.y];
const double MTTx = MTT[blockIdx.z];
const double t1x = t1[threadIdx.x];
const double t2x = t2[threadIdx.y];
int linIdx = fiveDimIdxToLinIdx(blockIdx.x, AF_length, blockIdx.y, DV_length, blockIdx.z, MTT_length, threadIdx.x, t1_length, threadIdx.y);
double k1a = AFx * DVx / MTTx;
double k1p = DVx * (1.0f - AFx) / MTTx;
double k2 = 1.0f / MTTx;
double dt = times[1] - times[0];
for (int i = 0; i < n; i++) {
double sum = 0.0f;
for (int j = 0; j <= i; j++) {
double sum1 = 0.0f;
if (round(j - t1x * 1000.0f) > 0.0f) {
sum1 += k1a * artConc[(int)round(j - t1x * 1000.0f)];
}
if (round(j - t2x * 1000.0f) > 0.0f) {
sum1 += k1p * pvConc[(int)round(j - t2x * 1000.0f)];
}
sum += sum1 * exp(-1.0f * k2 * (i - j) * dt) * dt;
}
dict[linIdx * n + i] = sum;
}
/*double k1a = AF[blockIdx.x] * DV[blockIdx.y] / MTT[blockIdx.z];
double k1p = DV[blockIdx.y] * (1.0f - AF[blockIdx.x]) / MTT[blockIdx.z];
double k2 = 1.0f / MTT[blockIdx.z];
double dt = times[1] - times[0];
for (int i = 0; i < n; i++) {
double sum = 0.0f;
for (int j = 0; j <= i; j++) {
double sum1 = 0.0f;
if (round(j - t1[threadIdx.x] * 1000.0f) > 0.0f) {
sum1 += k1a * artConc[(int)round(j - t1[threadIdx.x] * 1000.0f)];
}
if (round(j - t2[threadIdx.y]) > 0.0f) {
sum1 += k1p * pvConc[(int)round(j - t2[threadIdx.y])];
}
sum += sum1 * exp(-1.0f * k2 * (i - j) * dt) * dt;
}
dict[linIdx * n + i] = sum;
}*/
/*double *disc_out = disc(times, artConc, pvConc, n, AF[blockIdx.x], MTT[blockIdx.y], DV[blockIdx.z], t1[threadIdx.x], t2[threadIdx.y]);
for (int i = 0; i < n; i++) {
dict[linIdx * n + i] = disc_out[i];
}*/
}
int main()
{
mxArray *allts = matGetMatrixInFile("data.mat", "allts");
mxArray *times = matGetColInMatrix(allts, 0);
mxArray *AF = matGetColInMatrix(allts, 1);
mxArray *PV = matGetColInMatrix(allts, 2);
mxArray *Liver = matGetColInMatrix(allts, 3);
int n = mxGetM(times);
double *timesData = mxGetPr(times);
double *Cb_plasma = artConc(AF, PV);
double *Cp_plasma = pvConc(PV);
/*double *AF_range = linspace(0.01f, 1.0f, 21);
double *DV_range = linspace(0.01f, 1.0f, 21);
double *MTT_range = linspace(1.0f, 100.0f, 21);
double *t1_range = linspace(0.001f, 0.02f, 26);
double *t2_range = linspace(0.001f, 0.02f, 26);*/
mxArray *AF_vector = matGetMatrixInFile("AF.mat", "AF");
mxArray *DV_vector = matGetMatrixInFile("DV.mat", "DV");
mxArray *MTT_vector = matGetMatrixInFile("MTT.mat", "MTT");
mxArray *t1_vector = matGetMatrixInFile("t1.mat", "t1");
mxArray *t2_vector = matGetMatrixInFile("t2.mat", "t2");
const int AF_length = mxGetM(AF_vector);
const int DV_length = mxGetM(DV_vector);
const int MTT_length = mxGetM(MTT_vector);
const int t1_length = mxGetM(t1_vector);
const int t2_length = mxGetM(t2_vector);
const double *AF_range = mxGetPr(AF_vector);
const double *DV_range = mxGetPr(DV_vector);
const double *MTT_range = mxGetPr(MTT_vector);
const double *t1_range = mxGetPr(t1_vector);
const double *t2_range = mxGetPr(t2_vector);
printf("Mallocing...\n");
double *dict = (double *)mxMalloc(sizeof(double) * AF_length * DV_length * MTT_length * t1_length * t2_length * n);
printf("Done mallocing.\n");
double *d_timesData;
hipMalloc(&d_timesData, sizeof(double) * n);
hipMemcpy(d_timesData, timesData, sizeof(double) * n, hipMemcpyHostToDevice);
double *d_Cb_plasma, *d_Cp_plasma;
hipMalloc(&d_Cb_plasma, sizeof(double) * n);
hipMalloc(&d_Cp_plasma, sizeof(double) * n);
hipMemcpy(d_Cb_plasma, Cb_plasma, sizeof(double) * n, hipMemcpyHostToDevice);
hipMemcpy(d_Cp_plasma, Cp_plasma, sizeof(double) * n, hipMemcpyHostToDevice);
double *d_AF_range, *d_DV_range, *d_MTT_range, *d_t1_range, *d_t2_range;
hipMalloc(&d_AF_range, sizeof(double) * AF_length);
hipMalloc(&d_DV_range, sizeof(double) * DV_length);
hipMalloc(&d_MTT_range, sizeof(double) * MTT_length);
hipMalloc(&d_t1_range, sizeof(double) * t1_length);
hipMalloc(&d_t2_range, sizeof(double) * t2_length);
hipMemcpy(d_AF_range, AF_range, sizeof(double) * AF_length, hipMemcpyHostToDevice);
hipMemcpy(d_DV_range, DV_range, sizeof(double) * DV_length, hipMemcpyHostToDevice);
hipMemcpy(d_MTT_range, MTT_range, sizeof(double) * MTT_length, hipMemcpyHostToDevice);
hipMemcpy(d_t1_range, t1_range, sizeof(double) * t1_length, hipMemcpyHostToDevice);
hipMemcpy(d_t2_range, t2_range, sizeof(double) * t2_length, hipMemcpyHostToDevice);
double *d_dict;
hipMalloc(&d_dict, sizeof(double) * AF_length * DV_length * MTT_length * t1_length * t2_length * n);
cudaCheckErrors("hipMalloc d_dict");
printf("Launching kernel...\n");
hipLaunchKernelGGL(( popDict) , dim3(dim3(AF_length, DV_length, MTT_length)), dim3(dim3(t1_length, t2_length)) , 0, 0, d_dict, d_timesData, d_Cb_plasma, d_Cp_plasma, n, d_AF_range, AF_length, d_DV_range, DV_length, d_MTT_range, MTT_length, d_t1_range, t1_length, d_t2_range, t2_length);
cudaCheckErrors("launch kernel fail");
hipDeviceSynchronize();
cudaCheckErrors("cuda sync fail");
printf("Kernel finished.\n");
hipMemcpy(dict, d_dict, sizeof(double) * AF_length * DV_length * MTT_length * t1_length * t2_length * n, hipMemcpyDeviceToHost);
cudaCheckErrors("hipMemcpy device to host");
hipFree(d_dict);
mxArray *dictMatrix = mxCreateDoubleMatrix(AF_length * DV_length * MTT_length * t1_length * t2_length, n, mxREAL);
mxSetPr(dictMatrix, dict);
printf("Saving dictionary...\n");
hdf5PutArrayInFile("HDF5_Dictionary.mat", "Dictionary", dictMatrix);
printf("Done saving dictionary.\n");
// Pause
getchar();
hipFree(d_timesData);
hipFree(d_Cb_plasma);
hipFree(d_Cp_plasma);
hipFree(d_AF_range);
hipFree(d_DV_range);
hipFree(d_MTT_range);
hipFree(d_t1_range);
hipFree(d_t2_range);
return 0;
}
| 03026068ceab76597927a053792889da5db7600e.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cublas_v2.h"
#include "mat_io.h"
#define _USE_MATH_DEFINES
#include <math.h>
#include <stdio.h>
#include <string.h>
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
} \
} while (0)
// Constants
const double ALPHA = 15.0f * M_PI / 180.0f;
const int BASE_FRAME = 1;
const int FRAMES = 5;
const double TR = 5.12f;
const double T10b = 1.664f * 1000.0f;
const double T10p = 1.584f * 1000.0f;
const double T10L = 0.8f * 1000.0f;
const double R10b = 1.0f / T10b;
const double R10p = 1.0f / T10p;
const double R10L = 1.0f / T10L;
const double HCT = 0.4f;
const double RELAXIVITY = 6.3f;
double *artConc(const mxArray *artFrac, const mxArray *pv) {
// Calculate S0b
int numRows = mxGetM(artFrac);
const double *artFracData = mxGetPr(artFrac);
const double *pvData = mxGetPr(pv);
float m = 0.0f;
for (int i = 0; i < FRAMES; i++) {
m += pvData[BASE_FRAME + i];
}
m /= (float)FRAMES;
double S0b = m * ((1.0f - exp(-1.0f * R10b * TR) * cos(ALPHA)) / (1.0f - exp(-1.0f * R10b * TR)) / sin(ALPHA));
// Calculate R1b
double *R1b = new double[numRows];
for (int i = 0; i < numRows; i++) {
R1b[i] = log( ( (S0b * sin(ALPHA)) - (artFracData[i] * cos(ALPHA)) ) / (S0b * sin(ALPHA) - artFracData[i]) ) / TR;
}
// Calculate Cb_artery
double *Cb_artery = new double[numRows];
for (int i = 0; i < numRows; i++) {
Cb_artery[i] = (R1b[i] - R10b) * 1000.0f / RELAXIVITY;
}
// Calculate Cb_plasma
double *Cb_plasma = new double[numRows];
for (int i = 0; i < numRows; i++) {
Cb_plasma[i] = Cb_artery[i] / (1.0f - HCT);
}
// Zero everything before the base frame
for (int i = 0; i <= BASE_FRAME; i++) {
Cb_plasma[i] = 0.0f;
}
return Cb_plasma;
}
double *pvConc(const mxArray *pv) {
// Calculate S0p
int numRows = mxGetM(pv);
const double *pvData = mxGetPr(pv);
float m = 0.0f;
for (int i = 0; i < FRAMES; i++) {
m += pvData[BASE_FRAME + i];
}
m /= (float)FRAMES;
double S0p = m * ((1.0f - exp(-1.0f * R10p * TR) * cos(ALPHA)) / (1.0f - exp(-1.0f * R10p * TR)) / sin(ALPHA));
// Calculate R1p
double *R1p = new double[numRows];
for (int i = 0; i < numRows; i++) {
R1p[i] = log( ( (S0p * sin(ALPHA)) - (pvData[i] * cos(ALPHA)) ) / (S0p * sin(ALPHA) - pvData[i]) ) / TR;
}
// Calculate Cp_artery
double *Cp_artery = new double[numRows];
for (int i = 0; i < numRows; i++) {
Cp_artery[i] = (R1p[i] - R10p) * 1000.0f / RELAXIVITY;
}
// Calculate Cp_plasma
double *Cp_plasma = new double[numRows];
for (int i = 0; i < numRows; i++) {
Cp_plasma[i] = Cp_artery[i] / (1.0f - HCT);
}
// Zero everything before the base frame
for (int i = 0; i <= BASE_FRAME; i++) {
Cp_plasma[i] = 0.0f;
}
return Cp_plasma;
}
double *clearance(const mxArray *liver) {
// Calculate S0L
int numRows = mxGetM(liver);
const double *liverData = mxGetPr(liver);
float m = 0.0f;
for (int i = 0; i < FRAMES; i++) {
m += liverData[BASE_FRAME + i];
}
m /= (float)FRAMES;
double S0L = m * ((1.0f - exp(-1.0f * R10L * TR) * cos(ALPHA)) / (1.0f - exp(-1.0f * R10L * TR)) / sin(ALPHA));
// Calculate R1L
double *R1L = new double[numRows];
for (int i = 0; i < numRows; i++) {
R1L[i] = log(((S0L * sin(ALPHA)) - (liverData[i] * cos(ALPHA))) / (S0L * sin(ALPHA) - liverData[i])) / TR;
}
// Calculate CL
double *CL = new double[numRows];
for (int i = 0; i < numRows; i++) {
CL[i] = (R1L[i] - R10L) * 1000.0f / RELAXIVITY;
}
return CL;
}
double *disc(const double *times, const double *artConc, const double *pvConc, const int n, const double AF, const double DV, const double MTT, const double t1, const double t2) {
double k1a = AF * DV / MTT;
double k1p = DV * (1.0f - AF) / MTT;
double k2 = 1.0f / MTT;
double dt = times[1] - times[0];
double *C = (double *)malloc(sizeof(double) * n);
for (int i = 1; i <= n; i++) {
double sum = 0.0f;
for (int j = 1; j <= i; j++) {
double sum1 = 0.0f;
if (round(j - t1 * 1000.0f) > 0.0f) {
sum1 += k1a * artConc[(int)round(j - t1 * 1000.0f)];
}
if (round(j - t2 * 1000.0f) > 0.0f) {
sum1 += k1p * pvConc[(int)round(j - t2 * 1000.0f)];
}
sum += sum1 * exp(-1.0f * k2 * (i - j) * dt) * dt;
}
C[i] = sum;
}
return C;
}
double *linspace(double start, double end, int n) {
double *array = new double[n];
double step = (end - start) / (n - 1);
for (int i = 0; i < n; i++) {
array[i] = start + (i * step);
}
return array;
}
__device__ int multiDimIdxToLinIdx(int *idxs, int *sizes, int nDims) {
int linIdx = 0;
for (int i = 0; i < nDims; i++) {
int sizeProduct = 1;
for (int j = 0; j < i; j++) {
sizeProduct *= sizes[j];
}
linIdx += idxs[i] * sizeProduct;
}
return linIdx;
}
__device__ int fiveDimIdxToLinIdx(int i, int size_i, int j, int size_j, int k, int size_k, int l, int size_l, int m) {
return i + (j * size_i) + (k * size_i * size_j) + (l * size_i * size_j *size_k) + (m * size_i * size_j * size_k * size_l);
}
int fiveDimIdxToLinIdxDev(int i, int size_i, int j, int size_j, int k, int size_k, int l, int size_l, int m) {
return i + (j * size_i) + (k * size_i * size_j) + (l * size_i * size_j *size_k) + (m * size_i * size_j * size_k * size_l);
}
__device__ int *linIdxToMultiDimIdx(int idx, int *sizes, int nDims) {
int *multiDimIdx = new int[nDims];
for (int i = 0; i < nDims; i++) {
if (i == 0) {
multiDimIdx[i] = idx % sizes[i];
}
else if (i == nDims - 1) {
multiDimIdx[i] = idx / sizes[i - 1];
}
else {
multiDimIdx[i] = (idx / sizes[i - 1]) % sizes[i];
}
idx -= multiDimIdx[i];
}
return multiDimIdx;
}
__device__ int *linIdxToFiveDimIdx(int idx, int size_i, int size_j, int size_k, int size_l) {
int *fiveDimIdx = new int[5];
// Get first dim
fiveDimIdx[0] = idx % size_i;
// Get second dim
idx -= fiveDimIdx[0];
fiveDimIdx[1] = (idx / size_i) % size_j;
// Get third dim
idx -= fiveDimIdx[1];
fiveDimIdx[2] = (idx / size_j) % size_k;
// Get fourth dim
idx -= fiveDimIdx[2];
fiveDimIdx[3] = (idx / size_k) % size_l;
// Get fifth dim
idx -= fiveDimIdx[3];
fiveDimIdx[4] = (idx / size_l);
return fiveDimIdx;
}
__global__ void popDict(double *dict, const double *times, const double *artConc, const double *pvConc, const int n, const double *AF, const int AF_length, const double *DV, const double DV_length, const double *MTT, const double MTT_length, const double *t1, const double t1_length, const double *t2, const double t2_length) {
const double AFx = AF[blockIdx.x];
const double DVx = DV[blockIdx.y];
const double MTTx = MTT[blockIdx.z];
const double t1x = t1[threadIdx.x];
const double t2x = t2[threadIdx.y];
int linIdx = fiveDimIdxToLinIdx(blockIdx.x, AF_length, blockIdx.y, DV_length, blockIdx.z, MTT_length, threadIdx.x, t1_length, threadIdx.y);
double k1a = AFx * DVx / MTTx;
double k1p = DVx * (1.0f - AFx) / MTTx;
double k2 = 1.0f / MTTx;
double dt = times[1] - times[0];
for (int i = 0; i < n; i++) {
double sum = 0.0f;
for (int j = 0; j <= i; j++) {
double sum1 = 0.0f;
if (round(j - t1x * 1000.0f) > 0.0f) {
sum1 += k1a * artConc[(int)round(j - t1x * 1000.0f)];
}
if (round(j - t2x * 1000.0f) > 0.0f) {
sum1 += k1p * pvConc[(int)round(j - t2x * 1000.0f)];
}
sum += sum1 * exp(-1.0f * k2 * (i - j) * dt) * dt;
}
dict[linIdx * n + i] = sum;
}
/*double k1a = AF[blockIdx.x] * DV[blockIdx.y] / MTT[blockIdx.z];
double k1p = DV[blockIdx.y] * (1.0f - AF[blockIdx.x]) / MTT[blockIdx.z];
double k2 = 1.0f / MTT[blockIdx.z];
double dt = times[1] - times[0];
for (int i = 0; i < n; i++) {
double sum = 0.0f;
for (int j = 0; j <= i; j++) {
double sum1 = 0.0f;
if (round(j - t1[threadIdx.x] * 1000.0f) > 0.0f) {
sum1 += k1a * artConc[(int)round(j - t1[threadIdx.x] * 1000.0f)];
}
if (round(j - t2[threadIdx.y]) > 0.0f) {
sum1 += k1p * pvConc[(int)round(j - t2[threadIdx.y])];
}
sum += sum1 * exp(-1.0f * k2 * (i - j) * dt) * dt;
}
dict[linIdx * n + i] = sum;
}*/
/*double *disc_out = disc(times, artConc, pvConc, n, AF[blockIdx.x], MTT[blockIdx.y], DV[blockIdx.z], t1[threadIdx.x], t2[threadIdx.y]);
for (int i = 0; i < n; i++) {
dict[linIdx * n + i] = disc_out[i];
}*/
}
int main()
{
mxArray *allts = matGetMatrixInFile("data.mat", "allts");
mxArray *times = matGetColInMatrix(allts, 0);
mxArray *AF = matGetColInMatrix(allts, 1);
mxArray *PV = matGetColInMatrix(allts, 2);
mxArray *Liver = matGetColInMatrix(allts, 3);
int n = mxGetM(times);
double *timesData = mxGetPr(times);
double *Cb_plasma = artConc(AF, PV);
double *Cp_plasma = pvConc(PV);
/*double *AF_range = linspace(0.01f, 1.0f, 21);
double *DV_range = linspace(0.01f, 1.0f, 21);
double *MTT_range = linspace(1.0f, 100.0f, 21);
double *t1_range = linspace(0.001f, 0.02f, 26);
double *t2_range = linspace(0.001f, 0.02f, 26);*/
mxArray *AF_vector = matGetMatrixInFile("AF.mat", "AF");
mxArray *DV_vector = matGetMatrixInFile("DV.mat", "DV");
mxArray *MTT_vector = matGetMatrixInFile("MTT.mat", "MTT");
mxArray *t1_vector = matGetMatrixInFile("t1.mat", "t1");
mxArray *t2_vector = matGetMatrixInFile("t2.mat", "t2");
const int AF_length = mxGetM(AF_vector);
const int DV_length = mxGetM(DV_vector);
const int MTT_length = mxGetM(MTT_vector);
const int t1_length = mxGetM(t1_vector);
const int t2_length = mxGetM(t2_vector);
const double *AF_range = mxGetPr(AF_vector);
const double *DV_range = mxGetPr(DV_vector);
const double *MTT_range = mxGetPr(MTT_vector);
const double *t1_range = mxGetPr(t1_vector);
const double *t2_range = mxGetPr(t2_vector);
printf("Mallocing...\n");
double *dict = (double *)mxMalloc(sizeof(double) * AF_length * DV_length * MTT_length * t1_length * t2_length * n);
printf("Done mallocing.\n");
double *d_timesData;
cudaMalloc(&d_timesData, sizeof(double) * n);
cudaMemcpy(d_timesData, timesData, sizeof(double) * n, cudaMemcpyHostToDevice);
double *d_Cb_plasma, *d_Cp_plasma;
cudaMalloc(&d_Cb_plasma, sizeof(double) * n);
cudaMalloc(&d_Cp_plasma, sizeof(double) * n);
cudaMemcpy(d_Cb_plasma, Cb_plasma, sizeof(double) * n, cudaMemcpyHostToDevice);
cudaMemcpy(d_Cp_plasma, Cp_plasma, sizeof(double) * n, cudaMemcpyHostToDevice);
double *d_AF_range, *d_DV_range, *d_MTT_range, *d_t1_range, *d_t2_range;
cudaMalloc(&d_AF_range, sizeof(double) * AF_length);
cudaMalloc(&d_DV_range, sizeof(double) * DV_length);
cudaMalloc(&d_MTT_range, sizeof(double) * MTT_length);
cudaMalloc(&d_t1_range, sizeof(double) * t1_length);
cudaMalloc(&d_t2_range, sizeof(double) * t2_length);
cudaMemcpy(d_AF_range, AF_range, sizeof(double) * AF_length, cudaMemcpyHostToDevice);
cudaMemcpy(d_DV_range, DV_range, sizeof(double) * DV_length, cudaMemcpyHostToDevice);
cudaMemcpy(d_MTT_range, MTT_range, sizeof(double) * MTT_length, cudaMemcpyHostToDevice);
cudaMemcpy(d_t1_range, t1_range, sizeof(double) * t1_length, cudaMemcpyHostToDevice);
cudaMemcpy(d_t2_range, t2_range, sizeof(double) * t2_length, cudaMemcpyHostToDevice);
double *d_dict;
cudaMalloc(&d_dict, sizeof(double) * AF_length * DV_length * MTT_length * t1_length * t2_length * n);
cudaCheckErrors("cudaMalloc d_dict");
printf("Launching kernel...\n");
popDict <<< dim3(AF_length, DV_length, MTT_length), dim3(t1_length, t2_length) >>>(d_dict, d_timesData, d_Cb_plasma, d_Cp_plasma, n, d_AF_range, AF_length, d_DV_range, DV_length, d_MTT_range, MTT_length, d_t1_range, t1_length, d_t2_range, t2_length);
cudaCheckErrors("launch kernel fail");
cudaDeviceSynchronize();
cudaCheckErrors("cuda sync fail");
printf("Kernel finished.\n");
cudaMemcpy(dict, d_dict, sizeof(double) * AF_length * DV_length * MTT_length * t1_length * t2_length * n, cudaMemcpyDeviceToHost);
cudaCheckErrors("cudaMemcpy device to host");
cudaFree(d_dict);
mxArray *dictMatrix = mxCreateDoubleMatrix(AF_length * DV_length * MTT_length * t1_length * t2_length, n, mxREAL);
mxSetPr(dictMatrix, dict);
printf("Saving dictionary...\n");
hdf5PutArrayInFile("HDF5_Dictionary.mat", "Dictionary", dictMatrix);
printf("Done saving dictionary.\n");
// Pause
getchar();
cudaFree(d_timesData);
cudaFree(d_Cb_plasma);
cudaFree(d_Cp_plasma);
cudaFree(d_AF_range);
cudaFree(d_DV_range);
cudaFree(d_MTT_range);
cudaFree(d_t1_range);
cudaFree(d_t2_range);
return 0;
}
|
97c4a8835420fa363dcef818861c88b2a1768f16.hip | // !!! This is a file automatically generated by hipify!!!
//=================================================================//
// CUDA BFS kernel
// Topological-Driven: warp_centric, no atomic instructions,
// edges of the same vertex are processed by one warp,
// each edge per thread, low degree vertices can lead
// to under-utilized warps
// Reference:
// Sungpack Hong, et al. Accelerating CUDA graph algorithms
// at maximum warp
//=================================================================//
#include <hip/hip_runtime.h>
#include <stdint.h>
#include <stdio.h>
#include "cudaGraph.h"
// num of vertices per warp
#define CHUNK_SZ 32
#define WARP_SZ 32
__global__ void initialize(uint32_t * d_graph_property, uint64_t num_vertex)
{
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if ( tid < num_vertex )
{
d_graph_property[tid] = MY_INFINITY;
}
}
__global__
void kernel(uint32_t * vplist, cudaGraph graph, unsigned curr, bool *changed) {
uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
uint64_t lane_id = tid % WARP_SZ;
uint64_t warp_id = tid / WARP_SZ;
uint64_t v1= warp_id * CHUNK_SZ;
uint64_t chk_sz=CHUNK_SZ;
if((v1+CHUNK_SZ)>graph.vertex_cnt)
{
if ( graph.vertex_cnt>v1 )
chk_sz = graph.vertex_cnt-v1;
else
return;
}
for(int v=v1; v< chk_sz+v1; v++)
{
if(vplist[v] == curr)
{
uint64_t nbr_off = graph.get_firstedge_index(v);
uint64_t num_nbr = graph.get_edge_index_end(v) - nbr_off;
for(uint64_t i=lane_id; i<num_nbr; i+=WARP_SZ)
{
uint64_t vid = graph.get_edge_dest(i + nbr_off);
if(vplist[vid]==MY_INFINITY)
{
vplist[vid] = curr + 1;
*changed = true;
}
}
}
}
}
void cuda_BFS(uint64_t * vertexlist,
uint64_t * edgelist, uint32_t * vproplist,
uint64_t vertex_cnt, uint64_t edge_cnt,
uint64_t root)
{
uint32_t * device_vpl = 0;
bool * device_over = 0;
float h2d_copy_time = 0; // host to device data transfer time
float d2h_copy_time = 0; // device to host data transfer time
float kernel_time = 0; // kernel execution time
int device;
hipGetDevice(&device);
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp,device);
// Try to use as many threads as possible so that each thread
// is processing one vertex. If max thread is reached,
// split them into multiple blocks.
unsigned int num_thread_per_block = (unsigned int) vertex_cnt;
if (num_thread_per_block > devProp.maxThreadsPerBlock)
num_thread_per_block = devProp.maxThreadsPerBlock;
unsigned int num_block = (unsigned int)ceil( vertex_cnt/(double)num_thread_per_block );
unsigned int num_block_chunked = (unsigned int)ceil( num_block/(double)CHUNK_SZ )*WARP_SZ;
// malloc of gpu side
cudaErrCheck( hipMalloc((void**)&device_vpl, vertex_cnt*sizeof(uint32_t)) );
cudaErrCheck( hipMalloc((void**)&device_over, sizeof(bool)) );
hipEvent_t start_event, stop_event;
cudaErrCheck( hipEventCreate(&start_event) );
cudaErrCheck( hipEventCreate(&stop_event) );
// initialization
hipLaunchKernelGGL(( initialize), dim3(num_block), dim3(num_thread_per_block), 0, 0, device_vpl, vertex_cnt);
// prepare graph struct
// one for host side, one for device side
cudaGraph h_graph, d_graph;
// here copy only the pointers
h_graph.read(vertexlist, edgelist, vertex_cnt, edge_cnt);
uint32_t zeronum=0;
// memcpy from host to device
hipEventRecord(start_event, 0);
// copy graph data to device
h_graph.cudaGraphCopy(&d_graph);
cudaErrCheck( hipMemcpy(&(device_vpl[root]), &zeronum, sizeof(uint32_t),
hipMemcpyHostToDevice) );
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&h2d_copy_time, start_event, stop_event);
// BFS traversal
bool stop;
hipEventRecord(start_event, 0);
int curr=0;
do
{
// Each iteration processes
// one level of BFS traversal
stop = false;
cudaErrCheck( hipMemcpy(device_over, &stop, sizeof(bool), hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( kernel), dim3(num_block_chunked), dim3(num_thread_per_block), 0, 0, device_vpl, d_graph, curr, device_over);
cudaErrCheck( hipMemcpy(&stop, device_over, sizeof(bool), hipMemcpyDeviceToHost) );
curr++;
}while(stop);
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&kernel_time, start_event, stop_event);
hipEventRecord(start_event, 0);
cudaErrCheck( hipMemcpy(vproplist, device_vpl, vertex_cnt*sizeof(uint32_t),
hipMemcpyDeviceToHost) );
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&d2h_copy_time, start_event, stop_event);
printf("== iteration #: %d\n", curr);
printf("== host->device copy time: %f ms\n", h2d_copy_time);
printf("== device->host copy time: %f ms\n", d2h_copy_time);
printf("== kernel time: %f ms\n", kernel_time);
hipEventDestroy(start_event);
hipEventDestroy(stop_event);
// free graph struct on device side
d_graph.cudaGraphFree();
cudaErrCheck( hipFree(device_vpl) );
}
| 97c4a8835420fa363dcef818861c88b2a1768f16.cu | //=================================================================//
// CUDA BFS kernel
// Topological-Driven: warp_centric, no atomic instructions,
// edges of the same vertex are processed by one warp,
// each edge per thread, low degree vertices can lead
// to under-utilized warps
// Reference:
// Sungpack Hong, et al. Accelerating CUDA graph algorithms
// at maximum warp
//=================================================================//
#include <cuda.h>
#include <stdint.h>
#include <stdio.h>
#include "cudaGraph.h"
// num of vertices per warp
#define CHUNK_SZ 32
#define WARP_SZ 32
__global__ void initialize(uint32_t * d_graph_property, uint64_t num_vertex)
{
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if ( tid < num_vertex )
{
d_graph_property[tid] = MY_INFINITY;
}
}
__global__
void kernel(uint32_t * vplist, cudaGraph graph, unsigned curr, bool *changed) {
uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
uint64_t lane_id = tid % WARP_SZ;
uint64_t warp_id = tid / WARP_SZ;
uint64_t v1= warp_id * CHUNK_SZ;
uint64_t chk_sz=CHUNK_SZ;
if((v1+CHUNK_SZ)>graph.vertex_cnt)
{
if ( graph.vertex_cnt>v1 )
chk_sz = graph.vertex_cnt-v1;
else
return;
}
for(int v=v1; v< chk_sz+v1; v++)
{
if(vplist[v] == curr)
{
uint64_t nbr_off = graph.get_firstedge_index(v);
uint64_t num_nbr = graph.get_edge_index_end(v) - nbr_off;
for(uint64_t i=lane_id; i<num_nbr; i+=WARP_SZ)
{
uint64_t vid = graph.get_edge_dest(i + nbr_off);
if(vplist[vid]==MY_INFINITY)
{
vplist[vid] = curr + 1;
*changed = true;
}
}
}
}
}
void cuda_BFS(uint64_t * vertexlist,
uint64_t * edgelist, uint32_t * vproplist,
uint64_t vertex_cnt, uint64_t edge_cnt,
uint64_t root)
{
uint32_t * device_vpl = 0;
bool * device_over = 0;
float h2d_copy_time = 0; // host to device data transfer time
float d2h_copy_time = 0; // device to host data transfer time
float kernel_time = 0; // kernel execution time
int device;
cudaGetDevice(&device);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp,device);
// Try to use as many threads as possible so that each thread
// is processing one vertex. If max thread is reached,
// split them into multiple blocks.
unsigned int num_thread_per_block = (unsigned int) vertex_cnt;
if (num_thread_per_block > devProp.maxThreadsPerBlock)
num_thread_per_block = devProp.maxThreadsPerBlock;
unsigned int num_block = (unsigned int)ceil( vertex_cnt/(double)num_thread_per_block );
unsigned int num_block_chunked = (unsigned int)ceil( num_block/(double)CHUNK_SZ )*WARP_SZ;
// malloc of gpu side
cudaErrCheck( cudaMalloc((void**)&device_vpl, vertex_cnt*sizeof(uint32_t)) );
cudaErrCheck( cudaMalloc((void**)&device_over, sizeof(bool)) );
cudaEvent_t start_event, stop_event;
cudaErrCheck( cudaEventCreate(&start_event) );
cudaErrCheck( cudaEventCreate(&stop_event) );
// initialization
initialize<<<num_block, num_thread_per_block>>>(device_vpl, vertex_cnt);
// prepare graph struct
// one for host side, one for device side
cudaGraph h_graph, d_graph;
// here copy only the pointers
h_graph.read(vertexlist, edgelist, vertex_cnt, edge_cnt);
uint32_t zeronum=0;
// memcpy from host to device
cudaEventRecord(start_event, 0);
// copy graph data to device
h_graph.cudaGraphCopy(&d_graph);
cudaErrCheck( cudaMemcpy(&(device_vpl[root]), &zeronum, sizeof(uint32_t),
cudaMemcpyHostToDevice) );
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&h2d_copy_time, start_event, stop_event);
// BFS traversal
bool stop;
cudaEventRecord(start_event, 0);
int curr=0;
do
{
// Each iteration processes
// one level of BFS traversal
stop = false;
cudaErrCheck( cudaMemcpy(device_over, &stop, sizeof(bool), cudaMemcpyHostToDevice) );
kernel<<<num_block_chunked, num_thread_per_block>>>(device_vpl, d_graph, curr, device_over);
cudaErrCheck( cudaMemcpy(&stop, device_over, sizeof(bool), cudaMemcpyDeviceToHost) );
curr++;
}while(stop);
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&kernel_time, start_event, stop_event);
cudaEventRecord(start_event, 0);
cudaErrCheck( cudaMemcpy(vproplist, device_vpl, vertex_cnt*sizeof(uint32_t),
cudaMemcpyDeviceToHost) );
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&d2h_copy_time, start_event, stop_event);
printf("== iteration #: %d\n", curr);
printf("== host->device copy time: %f ms\n", h2d_copy_time);
printf("== device->host copy time: %f ms\n", d2h_copy_time);
printf("== kernel time: %f ms\n", kernel_time);
cudaEventDestroy(start_event);
cudaEventDestroy(stop_event);
// free graph struct on device side
d_graph.cudaGraphFree();
cudaErrCheck( cudaFree(device_vpl) );
}
|
b0a556172cf0b78d1bb9c0809647ba234bbca721.hip | // !!! This is a file automatically generated by hipify!!!
/* * Copyright (c) 2016 Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. The names of its contributors may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* *********************************************************************************************** *
* CARLsim
* created by: (MDR) Micah Richert, (JN) Jayram M. Nageswaran
* maintained by:
* (MA) Mike Avery <averym@uci.edu>
* (MB) Michael Beyeler <mbeyeler@uci.edu>,
* (KDC) Kristofor Carlson <kdcarlso@uci.edu>
* (TSC) Ting-Shuo Chou <tingshuc@uci.edu>
* (HK) Hirak J Kashyap <kashyaph@uci.edu>
*
* CARLsim v1.0: JM, MDR
* CARLsim v2.0/v2.1/v2.2: JM, MDR, MA, MB, KDC
* CARLsim3: MB, KDC, TSC
* CARLsim4: TSC, HK
*
* CARLsim available from http://socsci.uci.edu/~jkrichma/CARLsim/
* Ver 12/31/2016
*/
#include <snn.h>
#include <spike_buffer.h>
#include <error_code.h>
#include <hip/hip_runtime.h>
#define NUM_THREADS 128
#define NUM_BLOCKS 64
#define WARP_SIZE 32
///////////////////////////////////////////////////////////////////
// Some important ideas that explains the GPU execution are as follows:
// 1. Each GPU block has a local firing table (called fireTable). The block of threads
// reads a bunch of neurons parameters and determines if it needs to fire or not
// Whenever a neuron need to fire, it keeps track of the fired neuron in the local
// table. When the table is full, we go and write back the fireTable to the global
// firing table.
// 2. Firing information is maintained in two tables globally (timingTable and the globalFiringTable)
// for excitatory neuron population and inhibitory neurons.
// The globalFiringTable only stores a sequence of id corresponding to fired neurons.
// The timingTable store the total number of fired neurons till the current time step t.
// These two tables are flushed and adjusted every second.
// This approach requires about half of the memory compared to the traditional AER scheme which
// stores the firing time and firing id together.
// For more details kindly read the enclosed report (report.pdf) in the source directory
//
//
// timeTableD2GPU[0] always is 0 -- index into firingTableD2
// timeTableD2GPU[maxDelay_] -- should be the number of spikes "leftover" from the previous second
// timeTableD2GPU[maxDelay_+1]-timeTableD2GPU[maxDelay_] -- should be the number of spikes in the first ms of the current second
// timeTableD2GPU[1000+maxDelay_] -- should be the number of spikes in the current second + the leftover spikes.
//
///////////////////////////////////////////////////////////////////
__device__ unsigned int timeTableD2GPU[TIMING_COUNT];
__device__ unsigned int timeTableD1GPU[TIMING_COUNT];
__device__ unsigned int spikeCountD2SecGPU;
__device__ unsigned int spikeCountD1SecGPU;
__device__ unsigned int spikeCountD2GPU;
__device__ unsigned int spikeCountD1GPU;
__device__ unsigned int secD2fireCntTest;
__device__ unsigned int secD1fireCntTest;
__device__ unsigned int spikeCountLastSecLeftD2GPU;
__device__ unsigned int spikeCountExtRxD1SecGPU;
__device__ unsigned int spikeCountExtRxD2SecGPU;
__device__ unsigned int spikeCountExtRxD2GPU;
__device__ unsigned int spikeCountExtRxD1GPU;
__device__ __constant__ RuntimeData runtimeDataGPU;
__device__ __constant__ NetworkConfigRT networkConfigGPU;
__device__ __constant__ GroupConfigRT groupConfigsGPU[MAX_GRP_PER_SNN];
__device__ __constant__ float d_mulSynFast[MAX_CONN_PER_SNN];
__device__ __constant__ float d_mulSynSlow[MAX_CONN_PER_SNN];
__device__ int loadBufferCount;
__device__ int loadBufferSize;
texture <int, 1, hipReadModeElementType> timeTableD2GPU_tex;
texture <int, 1, hipReadModeElementType> timeTableD1GPU_tex;
texture <int, 1, hipReadModeElementType> groupIdInfo_tex; // groupIDInfo is allocated using hipMalloc thus doesn't require an offset when using textures
__device__ int timeTableD1GPU_tex_offset;
__device__ int timeTableD2GPU_tex_offset;
// example of the quick synaptic table
// index cnt
// 0000000 - 0
// 0000001 - 0
// 0000010 - 1
// 0100000 - 5
// 0110000 - 4
int quickSynIdTable[256];
__device__ int quickSynIdTableGPU[256];
void initQuickSynIdTable(int netId) {
void* devPtr;
for(int i = 1; i < 256; i++) {
int cnt = 0;
while(i) {
if(((i >> cnt) & 1) == 1) break;
cnt++;
assert(cnt <= 7);
}
quickSynIdTable[i] = cnt;
}
hipSetDevice(netId);
hipGetSymbolAddress(&devPtr, quickSynIdTableGPU);
CUDA_CHECK_ERRORS(hipMemcpy( devPtr, quickSynIdTable, sizeof(quickSynIdTable), hipMemcpyHostToDevice));
}
__device__ inline bool isPoissonGroup(short int lGrpId) {
return (groupConfigsGPU[lGrpId].Type & POISSON_NEURON);
}
__device__ inline void setFiringBitSynapses(int lNId, int synId) {
unsigned int* tmp_I_set_p = ((unsigned int*)((char*)runtimeDataGPU.I_set + ((synId >> 5) * networkConfigGPU.I_setPitch)) + lNId);
atomicOr(tmp_I_set_p, 1 << (synId % 32));
}
__device__ inline unsigned int* getFiringBitGroupPtr(int lNId, int synId) {
return (((unsigned int*)((char*)runtimeDataGPU.I_set + synId * networkConfigGPU.I_setPitch)) + lNId);
}
__device__ inline int getSTPBufPos(int lNId, int simTime) {
return (((simTime + 1) % (networkConfigGPU.maxDelay + 1)) * networkConfigGPU.STP_Pitch + lNId);
}
__device__ inline int2 getStaticThreadLoad(int bufPos) {
return (runtimeDataGPU.neuronAllocation[bufPos]);
}
__device__ inline bool getPoissonSpike(int lNId) {
// Random number value is less than the poisson firing probability
// if poisson firing probability is say 1.0 then the random poisson ptr
// will always be less than 1.0 and hence it will continiously fire
return runtimeDataGPU.randNum[lNId - networkConfigGPU.numNReg] * 1000.0f
< runtimeDataGPU.poissonFireRate[lNId - networkConfigGPU.numNReg];
}
__device__ inline bool getSpikeGenBit(unsigned int nidPos) {
const int nidBitPos = nidPos % 32;
const int nidIndex = nidPos / 32;
return ((runtimeDataGPU.spikeGenBits[nidIndex] >> nidBitPos) & 0x1);
}
/*!
* \brief This device function updates the average firing rate of each neuron, which is required for homeostasis
*
* \param[in] lNId The neuron id to be updated
* \param[in] lGrpId The group id of the neuron
*/
__device__ inline void updateHomeoStaticState(int lNId, int lGrpId) {
// here the homeostasis adjustment
runtimeDataGPU.avgFiring[lNId] *= (groupConfigsGPU[lGrpId].avgTimeScale_decay);
}
/*!
* \brief After every time step we update the time table
*
* Only one cuda thread is required for updating the time table
*
* \param[in] simTime The current time step
*/
__global__ void kernel_updateTimeTable(int simTime) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
timeTableD2GPU[simTime + networkConfigGPU.maxDelay + 1] = spikeCountD2SecGPU + spikeCountLastSecLeftD2GPU;
timeTableD1GPU[simTime + networkConfigGPU.maxDelay + 1] = spikeCountD1SecGPU;
}
__syncthreads();
}
/////////////////////////////////////////////////////////////////////////////////
// Device Kernel Function: Intialization of the GPU side of the simulator ///
// KERNEL: This kernel is called after initialization of various parameters ///
// so that we can reset all required parameters. ///
/////////////////////////////////////////////////////////////////////////////////
__global__ void kernel_initGPUMemory() {
// FIXME: use parallel access
int timeTableIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (timeTableIdx < TIMING_COUNT) {
timeTableD2GPU[timeTableIdx] = 0;
timeTableD1GPU[timeTableIdx] = 0;
}
if (threadIdx.x == 0 && blockIdx.x == 0) {
spikeCountD2SecGPU = 0;
spikeCountD1SecGPU = 0;
spikeCountD2GPU = 0;
spikeCountD1GPU = 0;
secD2fireCntTest = 0;
secD1fireCntTest = 0;
spikeCountLastSecLeftD2GPU = 0;
spikeCountExtRxD2GPU = 0;
spikeCountExtRxD1GPU = 0;
spikeCountExtRxD2SecGPU = 0;
spikeCountExtRxD1SecGPU = 0;
}
}
// Allocation of the group and its id..
void SNN::allocateGroupId(int netId) {
checkAndSetGPUDevice(netId);
assert (runtimeData[netId].groupIdInfo == NULL);
int3* tempNeuronAllocation = (int3*)malloc(sizeof(int3) * networkConfigs[netId].numGroups);
for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
int3 threadLoad;
threadLoad.x = groupConfigs[netId][lGrpId].lStartN;
threadLoad.y = groupConfigs[netId][lGrpId].lEndN;
threadLoad.z = lGrpId;
tempNeuronAllocation[lGrpId] = threadLoad;
}
CUDA_CHECK_ERRORS(hipMalloc((void**)&runtimeData[netId].groupIdInfo, sizeof(int3) * networkConfigs[netId].numGroups));
CUDA_CHECK_ERRORS(hipMemcpy(runtimeData[netId].groupIdInfo, tempNeuronAllocation, sizeof(int3) * networkConfigs[netId].numGroups, hipMemcpyHostToDevice));
CUDA_CHECK_ERRORS(hipBindTexture(NULL, groupIdInfo_tex, runtimeData[netId].groupIdInfo, sizeof(int3) * networkConfigs[netId].numGroups));
free(tempNeuronAllocation);
}
/************************ VARIOUS KERNELS FOR FIRING CALCULATION AND FIRING UPDATE ****************************/
// Static Thread Load Allocation...
// This function is necessary for static allocation of load that each CUDA-SM needs for its computation.
// We store the static load allocation using the following format
// Neuron starting position (32 bit): Group identification (16) : Buffer size (16 bit)
// if we have 3 groups. grp(1) = 400 neurons, grp(2) = 100, grp(3) = 600
// The allocated static table will look as follows..
//-------------------------
// start | grp | size
//-------------------------
// 0 : 0 : 256
// 256 : 0 : 144
// 400 : 1 : 100
// 500 : 2 : 256
// 756 : 2 : 256
// 1012 : 2 : 88
//-----------------------
int SNN::allocateStaticLoad(int netId, int bufSize) {
checkAndSetGPUDevice(netId);
// only one thread does the static load table
int bufferCnt = 0;
for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
int grpBufCnt = (int) ceil(1.0f * groupConfigs[netId][lGrpId].numN / bufSize);
assert(grpBufCnt >= 0);
bufferCnt += grpBufCnt;
KERNEL_DEBUG("Grp Size = %d, Total Buffer Cnt = %d, Buffer Cnt = %d", groupConfigs[netId][lGrpId].numN, bufferCnt, grpBufCnt);
}
assert(bufferCnt > 0);
int2* tempNeuronAllocation = (int2*)malloc(sizeof(int2) * bufferCnt);
KERNEL_DEBUG("STATIC THREAD ALLOCATION");
KERNEL_DEBUG("------------------------");
KERNEL_DEBUG("Buffer Size = %d, Buffer Count = %d", bufSize, bufferCnt);
bufferCnt = 0;
for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId += bufSize) {
int2 threadLoad;
// starting neuron id is saved...
threadLoad.x = lNId;
if ((lNId + bufSize - 1) <= groupConfigs[netId][lGrpId].lEndN)
// grpID + full size
threadLoad.y = (lGrpId + (bufSize << 16)); // can't support group id > 2^16
else
// grpID + left-over size
threadLoad.y = (lGrpId + ((groupConfigs[netId][lGrpId].lEndN - lNId + 1) << 16)); // can't support group id > 2^16
// fill the static load distribution here...
int testGrpId = STATIC_LOAD_GROUP(threadLoad);
tempNeuronAllocation[bufferCnt] = threadLoad;
KERNEL_DEBUG("%d. Start=%d, size=%d grpId=%d:%s (SpikeMonId=%d) (GroupMonId=%d)",
bufferCnt, STATIC_LOAD_START(threadLoad),
STATIC_LOAD_SIZE(threadLoad),
STATIC_LOAD_GROUP(threadLoad),
groupConfigMap[groupConfigs[netId][testGrpId].gGrpId].grpName.c_str(),
groupConfigMDMap[groupConfigs[netId][testGrpId].gGrpId].spikeMonitorId,
groupConfigMDMap[groupConfigs[netId][testGrpId].gGrpId].groupMonitorId);
bufferCnt++;
}
}
assert(runtimeData[netId].allocated == false);
// Finally writeback the total bufferCnt
// Note down the buffer size for reference
KERNEL_DEBUG("GPU loadBufferSize = %d, GPU loadBufferCount = %d", bufSize, bufferCnt);
CUDA_CHECK_ERRORS(hipMemcpyToSymbol(loadBufferCount, &bufferCnt, sizeof(int), 0, hipMemcpyHostToDevice));
CUDA_CHECK_ERRORS(hipMemcpyToSymbol(loadBufferSize, &bufSize, sizeof(int), 0, hipMemcpyHostToDevice));
CUDA_CHECK_ERRORS(hipMalloc((void**) &runtimeData[netId].neuronAllocation, sizeof(int2) * bufferCnt));
CUDA_CHECK_ERRORS(hipMemcpy(runtimeData[netId].neuronAllocation, tempNeuronAllocation, sizeof(int2) * bufferCnt, hipMemcpyHostToDevice));
free(tempNeuronAllocation);
return bufferCnt;
}
//////////////////////////////////////////////////
// 1. KERNELS used when a specific neuron fires //
//////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////
// Device local function: Update the STP Variables ///
// update the STPU and STPX variable after firing ///
/////////////////////////////////////////////////////////////////////////////////
// update the spike-dependent part of du/dt and dx/dt
__device__ void firingUpdateSTP (int nid, int simTime, short int grpId) {
// we need to retrieve the STP values from the right buffer position (right before vs. right after the spike)
int ind_plus = getSTPBufPos(nid, simTime);
int ind_minus = getSTPBufPos(nid, (simTime - 1));
// at this point, stpu[ind_plus] has already been assigned, and the decay applied
// so add the spike-dependent part to that
// du/dt = -u/tau_F + U * (1-u^-) * \delta(t-t_{spk})
runtimeDataGPU.stpu[ind_plus] += groupConfigsGPU[grpId].STP_U * (1.0f - runtimeDataGPU.stpu[ind_minus]);
// dx/dt = (1-x)/tau_D - u^+ * x^- * \delta(t-t_{spk})
runtimeDataGPU.stpx[ind_plus] -= runtimeDataGPU.stpu[ind_plus] * runtimeDataGPU.stpx[ind_minus];
}
__device__ void resetFiredNeuron(int lNId, short int lGrpId, int simTime) {
// \FIXME \TODO: convert this to use coalesced access by grouping into a
// single 16 byte access. This might improve bandwidth performance
// This is fully uncoalsced access...need to convert to coalsced access..
runtimeDataGPU.voltage[lNId] = runtimeDataGPU.Izh_c[lNId];
runtimeDataGPU.recovery[lNId] += runtimeDataGPU.Izh_d[lNId];
if (groupConfigsGPU[lGrpId].WithSTDP)
runtimeDataGPU.lastSpikeTime[lNId] = simTime;
if (networkConfigGPU.sim_with_homeostasis) {
// with homeostasis flag can be used here.
runtimeDataGPU.avgFiring[lNId] += 1000/(groupConfigsGPU[lGrpId].avgTimeScale*1000);
}
}
/*!
* \brief 1. Copy neuron id from local table to global firing table. 2. Reset all neuron properties of neuron id in local table
*
*
* \param[in] fireTablePtr the local shared memory firing table with neuron ids of fired neuron
* \param[in] fireCntD2 the number of neurons in local table that has fired with group's max delay == 1
* \param[in] fireCntD1 the number of neurons in local table that has fired with group's max delay > 1
* \param[in] simTime the current time step, stored as neuron firing time entry
*/
__device__ void updateSpikeCount(volatile unsigned int& fireCnt, volatile unsigned int& fireCntD1, volatile unsigned int& cntD2, volatile unsigned int& cntD1, volatile int& blkErrCode) {
int fireCntD2 = fireCnt - fireCntD1;
cntD2 = atomicAdd(&secD2fireCntTest, fireCntD2);
cntD1 = atomicAdd(&secD1fireCntTest, fireCntD1);
//check for overflow in the firing table size....
if(secD2fireCntTest>networkConfigGPU.maxSpikesD2) {
blkErrCode = NEW_FIRE_UPDATE_OVERFLOW_ERROR2;
return;
}
else if(secD1fireCntTest>networkConfigGPU.maxSpikesD1) {
blkErrCode = NEW_FIRE_UPDATE_OVERFLOW_ERROR1;
return;
}
blkErrCode = 0;
// get a distinct counter to store firing info
// into the firing table
cntD2 = atomicAdd(&spikeCountD2SecGPU, fireCntD2) + spikeCountLastSecLeftD2GPU;
cntD1 = atomicAdd(&spikeCountD1SecGPU, fireCntD1);
}
// update the firing table...
__device__ void updateFiringTable(int lNId, short int lGrpId, volatile unsigned int& cntD2, volatile unsigned int& cntD1) {
int pos;
if (groupConfigsGPU[lGrpId].MaxDelay == 1) {
// this group has a delay of only 1
pos = atomicAdd((int*)&cntD1, 1);
//runtimeDataGPU.firingTableD1[pos] = SET_FIRING_TABLE(nid, grpId);
runtimeDataGPU.firingTableD1[pos] = lNId;
} else {
// all other groups is dumped here
pos = atomicAdd((int*)&cntD2, 1);
//runtimeDataGPU.firingTableD2[pos] = SET_FIRING_TABLE(nid, grpId);
runtimeDataGPU.firingTableD2[pos] = lNId;
}
}
// update the firing table...
__device__ void updateExtFiringTable(int lNId, short int lGrpId) {
int pos;
if (groupConfigsGPU[lGrpId].MaxDelay == 1) {
// this group has a delay of only 1
pos = atomicAdd((int*)&runtimeDataGPU.extFiringTableEndIdxD1[lGrpId] , 1);
//runtimeDataGPU.firingTableD1[pos] = SET_FIRING_TABLE(nid, grpId);
runtimeDataGPU.extFiringTableD1[lGrpId][pos] = lNId + groupConfigsGPU[lGrpId].LtoGOffset; // convert to global neuron id
} else {
// all other groups is dumped here
pos = atomicAdd((int*)&runtimeDataGPU.extFiringTableEndIdxD2[lGrpId], 1);
//runtimeDataGPU.firingTableD2[pos] = SET_FIRING_TABLE(nid, grpId);
runtimeDataGPU.extFiringTableD2[lGrpId][pos] = lNId + groupConfigsGPU[lGrpId].LtoGOffset; // convert to global neuron id
}
}
__device__ int updateNewFirings(int* fireTablePtr, short int* fireGrpId,
volatile unsigned int& fireCnt, volatile unsigned int& fireCntD1, int simTime) {
__shared__ volatile unsigned int cntD2;
__shared__ volatile unsigned int cntD1;
__shared__ volatile int blkErrCode;
blkErrCode = 0;
if (threadIdx.x == 0) {
updateSpikeCount(fireCnt, fireCntD1, cntD2, cntD1, blkErrCode);
}
__syncthreads();
// if we overflow the spike buffer space that is available,
// then we return with an error here...
if (blkErrCode)
return blkErrCode;
for (int i = threadIdx.x; i < fireCnt; i += blockDim.x) {
// Read the firing id from the local table.....
int lNId = fireTablePtr[i];
updateFiringTable(lNId, fireGrpId[i], cntD2, cntD1);
if (groupConfigsGPU[fireGrpId[i]].hasExternalConnect)
updateExtFiringTable(lNId, fireGrpId[i]);
if (groupConfigsGPU[fireGrpId[i]].WithSTP)
firingUpdateSTP(lNId, simTime, fireGrpId[i]);
// keep track of number spikes per neuron
runtimeDataGPU.nSpikeCnt[lNId]++;
// only neurons would do the remaining settings...
// pure poisson generators will return without changing anything else..
if (IS_REGULAR_NEURON(lNId, networkConfigGPU.numNReg, networkConfigGPU.numNPois))
resetFiredNeuron(lNId, fireGrpId[i], simTime);
}
__syncthreads();
return 0;
}
// zero GPU spike counts
__global__ void kernel_resetNSpikeCnt(int lGrpId) {
const int totBuffers = loadBufferCount;
for (int bufPos = blockIdx.x; bufPos < totBuffers; bufPos += gridDim.x) {
// KILLME !!! This can be further optimized ....
// instead of reading each neuron group separately .....
// read a whole buffer and use the result ......
int2 threadLoad = getStaticThreadLoad(bufPos);
int nid = (STATIC_LOAD_START(threadLoad) + threadIdx.x);
int lastId = STATIC_LOAD_SIZE(threadLoad);
int grpId = STATIC_LOAD_GROUP(threadLoad);
if ((lGrpId == ALL || lGrpId == grpId) && (nid <= lastId)) {
runtimeDataGPU.nSpikeCnt[nid] = 0;
}
}
}
// wrapper to call resetSpikeCnt
void SNN::resetSpikeCnt_GPU(int netId, int lGrpId) {
assert(runtimeData[netId].memType == GPU_MEM);
if (lGrpId == ALL) {
checkAndSetGPUDevice(netId);
CUDA_CHECK_ERRORS(hipMemset((void*)runtimeData[netId].nSpikeCnt, 0, sizeof(int) * networkConfigs[netId].numN));
} else {
checkAndSetGPUDevice(netId);
hipLaunchKernelGGL(( kernel_resetNSpikeCnt), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, lGrpId);
}
}
#define LTP_GROUPING_SZ 16 //!< synaptic grouping for LTP Calculation
/*!
* \brief Computes the STDP update values for each of fired neurons stored in the local firing table.
*
* \param[in] fireTablePtr the local firing table with neuron ids of fired neuron
* \param[in] fireCnt the number of fired neurons in local firing table
* \param[in] simTime the current time step, stored as neuron firing time entry
*/
__device__ void updateLTP(int* fireTablePtr, short int* fireGrpId, volatile unsigned int& fireCnt, int simTime) {
for(int pos=threadIdx.x/LTP_GROUPING_SZ; pos < fireCnt; pos += (blockDim.x/LTP_GROUPING_SZ)) {
// each neuron has two variable pre and pre_exc
// pre: number of pre-neuron
// pre_exc: number of neuron had has plastic connections
short int grpId = fireGrpId[pos];
// STDP calculation: the post-synaptic neron fires after the arrival of pre-synaptic neuron's spike
if (groupConfigsGPU[grpId].WithSTDP) { // MDR, FIXME this probably will cause more thread divergence than need be...
int nid = fireTablePtr[pos];
unsigned int end_p = runtimeDataGPU.cumulativePre[nid] + runtimeDataGPU.Npre_plastic[nid];
for(unsigned int p = runtimeDataGPU.cumulativePre[nid] + threadIdx.x % LTP_GROUPING_SZ;
p < end_p;
p+=LTP_GROUPING_SZ) {
int stdp_tDiff = (simTime - runtimeDataGPU.synSpikeTime[p]);
if (stdp_tDiff > 0) {
if (groupConfigsGPU[grpId].WithESTDP) {
// Handle E-STDP curves
switch (groupConfigsGPU[grpId].WithESTDPcurve) {
case EXP_CURVE: // exponential curve
if (stdp_tDiff * groupConfigsGPU[grpId].TAU_PLUS_INV_EXC < 25)
runtimeDataGPU.wtChange[p] += STDP(stdp_tDiff, groupConfigsGPU[grpId].ALPHA_PLUS_EXC, groupConfigsGPU[grpId].TAU_PLUS_INV_EXC);
break;
case TIMING_BASED_CURVE: // sc curve
if (stdp_tDiff * groupConfigsGPU[grpId].TAU_PLUS_INV_EXC < 25) {
if (stdp_tDiff <= groupConfigsGPU[grpId].GAMMA)
runtimeDataGPU.wtChange[p] += groupConfigsGPU[grpId].OMEGA + groupConfigsGPU[grpId].KAPPA * STDP(stdp_tDiff, groupConfigsGPU[grpId].ALPHA_PLUS_EXC, groupConfigsGPU[grpId].TAU_PLUS_INV_EXC);
else // stdp_tDiff > GAMMA
runtimeDataGPU.wtChange[p] -= STDP(stdp_tDiff, groupConfigsGPU[grpId].ALPHA_PLUS_EXC, groupConfigsGPU[grpId].TAU_PLUS_INV_EXC);
}
break;
default:
break;
}
}
if (groupConfigsGPU[grpId].WithISTDP) {
// Handle I-STDP curves
switch (groupConfigsGPU[grpId].WithISTDPcurve) {
case EXP_CURVE: // exponential curve
if (stdp_tDiff * groupConfigsGPU[grpId].TAU_PLUS_INV_INB < 25) { // LTP of inhibitory synapse, which decreases synapse weight
runtimeDataGPU.wtChange[p] -= STDP(stdp_tDiff, groupConfigsGPU[grpId].ALPHA_PLUS_INB, groupConfigsGPU[grpId].TAU_PLUS_INV_INB);
}
break;
case PULSE_CURVE: // pulse curve
if (stdp_tDiff <= groupConfigsGPU[grpId].LAMBDA) { // LTP of inhibitory synapse, which decreases synapse weight
runtimeDataGPU.wtChange[p] -= groupConfigsGPU[grpId].BETA_LTP;
} else if (stdp_tDiff <= groupConfigsGPU[grpId].DELTA) { // LTD of inhibitory syanpse, which increase sysnapse weight
runtimeDataGPU.wtChange[p] -= groupConfigsGPU[grpId].BETA_LTD;
}
break;
default:
break;
}
}
}
}
}
}
__syncthreads();
}
#define FIRE_CHUNK_CNT 512
/*!
* \brief This kernel is responsible for finding the neurons that need to be fired.
*
* We use a buffered firing table that allows neuron to gradually load
* the buffer and make it easy to carry out the calculations in a single group.
* A single function is used for simple neurons and also for poisson neurons.
* The function also update LTP
*
* device access: spikeCountD2SecGPU, spikeCountD1SecGPU
* net access: numNReg numNPois, numN, sim_with_stdp, sim_in_testing, sim_with_homeostasis, maxSpikesD1, maxSpikesD2
* grp access: Type, spikeGenFunc, Noffset, withSpikeCounter, spkCntBufPos, StartN, WithSTP, avgTimeScale
WithSTDP, WithESTDP, WithISTDP, WithESTDPCurve, With ISTDPCurve, all STDP parameters
* rtd access: randNum, poissonFireRate, spkCntBuf, nSpikeCnt, voltage, recovery, Izh_c, Izh_d
* cumulativePre, Npre_plastic, (R)synSpikeTime, (W)lastSpikeTime, (W)wtChange,
* avgFiring
*/
__global__ void kernel_findFiring (int simTime) {
__shared__ volatile unsigned int fireCnt;
__shared__ volatile unsigned int fireCntTest;
__shared__ volatile unsigned int fireCntD1;
__shared__ int fireTable[FIRE_CHUNK_CNT];
__shared__ short int fireGrpId[FIRE_CHUNK_CNT];
__shared__ volatile int errCode;
if (threadIdx.x == 0) {
fireCnt = 0; // initialize total cnt to 0
fireCntD1 = 0; // initialize d1 cnt to 0
fireCntTest = 0; // initialize test cnt to 0
}
const int totBuffers=loadBufferCount;
__syncthreads();
for (int bufPos = blockIdx.x; bufPos < totBuffers; bufPos += gridDim.x) {
// KILLME !!! This can be further optimized ....
// instead of reading each neuron group separately .....
// read a whole buffer and use the result ......
int2 threadLoad = getStaticThreadLoad(bufPos);
int lNId = (STATIC_LOAD_START(threadLoad) + threadIdx.x);
int lastLNId = STATIC_LOAD_SIZE(threadLoad);
short int lGrpId = STATIC_LOAD_GROUP(threadLoad);
bool needToWrite = false; // used by all neuron to indicate firing condition
int fireId = 0;
// threadId is valid and lies within the lastId.....
if ((threadIdx.x < lastLNId) && (lNId < networkConfigGPU.numN)) {
// Simple poisson spiker uses the poisson firing probability
// to detect whether it has fired or not....
if(isPoissonGroup(lGrpId)) { // spikes generated by spikeGenFunc
if(groupConfigsGPU[lGrpId].isSpikeGenFunc) {
unsigned int offset = lNId - groupConfigsGPU[lGrpId].lStartN + groupConfigsGPU[lGrpId].Noffset;
needToWrite = getSpikeGenBit(offset);
} else { // spikes generated by poission rate
needToWrite = getPoissonSpike(lNId);
}
// Note: valid lastSpikeTime of spike gen neurons is required by userDefinedSpikeGenerator()
if (needToWrite)
runtimeDataGPU.lastSpikeTime[lNId] = simTime;
} else {
if (runtimeDataGPU.voltage[lNId] >= 30.0f) {
needToWrite = true;
}
}
}
// loop through a few times to ensure that we have added/processed all spikes that need to be written
// if the buffer is small relative to the number of spikes needing to be written, we may have to empty the buffer a few times...
for (int c = 0; c < 2; c++) {
// we first increment fireCntTest to make sure we haven't filled the buffer
if (needToWrite)
fireId = atomicAdd((int*)&fireCntTest, 1);
// if there is a spike and the buffer still has space...
if (needToWrite && (fireId <(FIRE_CHUNK_CNT))) {
// get our position in the buffer
fireId = atomicAdd((int*)&fireCnt, 1);
if (groupConfigsGPU[lGrpId].MaxDelay == 1)
atomicAdd((int*)&fireCntD1, 1);
// store ID of the fired neuron
needToWrite = false;
fireTable[fireId] = lNId;
fireGrpId[fireId] = lGrpId;//setFireProperties(grpId, isInhib);
}
__syncthreads();
// the local firing table is full. dump the local firing table to the global firing table before proceeding
if (fireCntTest >= (FIRE_CHUNK_CNT)) {
// clear the table and update...
int retCode = updateNewFirings(fireTable, fireGrpId, fireCnt, fireCntD1, simTime);
if (retCode != 0) return;
// update based on stdp rule
// KILLME !!! if (simTime > 0))
if (networkConfigGPU.sim_with_stdp && !networkConfigGPU.sim_in_testing)
updateLTP (fireTable, fireGrpId, fireCnt, simTime);
// reset counters
if (threadIdx.x == 0) {
fireCntD1 = 0;
fireCnt = 0;
fireCntTest = 0;
}
}
}
}
__syncthreads();
// few more fired neurons are left. we update their firing state here..
if (fireCnt) {
int retCode = updateNewFirings(fireTable, fireGrpId, fireCnt, fireCntD1, simTime);
if (retCode != 0) return;
if (networkConfigGPU.sim_with_stdp && !networkConfigGPU.sim_in_testing)
updateLTP(fireTable, fireGrpId, fireCnt, simTime);
}
}
//******************************** UPDATE CONDUCTANCES AND TOTAL SYNAPTIC CURRENT EVERY TIME STEP *****************************
#define LOG_CURRENT_GROUP 5
/*!
* \brief Based on the bitvector used for indicating the presence of spike, the global conductance values are updated.
*
* net access: numNReg, numNPois, I_setPitch, maxDelay, STP_Pitch, sim_with_conductances,
sim_with_NMDA_rise, sim_withGABAb_Rise, sNMDA, sGABAb
* grp access: WithSTP, STP_A
* rtd access: Npre, cumulativePre, I_set, preSynapticIds, grpIds, wt, stpx, stpu, connIdsPreIdx,
gAMPA, gGABAa, gNMDA_r, gNMDA_d, gNMDA, gGABAb_r, gGABAb_d, gGABAb
* glb access: d_mulSynFast, d_mulSynSlow
*/
__global__ void kernel_conductanceUpdate (int simTimeMs, int simTimeSec, int simTime) {
__shared__ int sh_quickSynIdTable[256];
// Table for quick access
for (int i = 0; i < 256; i += blockDim.x) {
if ((i + threadIdx.x) < 256) {
sh_quickSynIdTable[i + threadIdx.x] = quickSynIdTableGPU[i + threadIdx.x];
}
}
__syncthreads();
const int totBuffers = loadBufferCount;
for (int bufPos = blockIdx.x; bufPos < totBuffers; bufPos += gridDim.x) {
// KILLME !!! This can be further optimized ....
// instead of reading each neuron group separately .....
// read a whole buffer and use the result ......
int2 threadLoad = getStaticThreadLoad(bufPos);
int postNId = STATIC_LOAD_START(threadLoad) + threadIdx.x;
int lastNId = STATIC_LOAD_SIZE(threadLoad);
if ((threadIdx.x < lastNId) && (IS_REGULAR_NEURON(postNId, networkConfigGPU.numNReg, networkConfigGPU.numNPois))) {
// P6-1
// load the initial current due to noise inputs for neuron 'post_nid'
// initial values of the conductances for neuron 'post_nid'
float AMPA_sum = 0.0f;
float NMDA_sum = 0.0f;
float NMDA_r_sum = 0.0f;
float NMDA_d_sum = 0.0f;
float GABAa_sum = 0.0f;
float GABAb_sum = 0.0f;
float GABAb_r_sum = 0.0f;
float GABAb_d_sum = 0.0f;
int lmt = runtimeDataGPU.Npre[postNId];
unsigned int cum_pos = runtimeDataGPU.cumulativePre[postNId];
// find the total current to this neuron...
for (int j = 0; (lmt) && (j <= ((lmt - 1) >> LOG_CURRENT_GROUP)); j++) {
// because of malloc2D operation we are using pitch, post_nid, j to get
// actual position of the input current....
// int* tmp_I_set_p = ((int*)((char*)runtimeDataGPU.I_set + j * networkConfigGPU.I_setPitch) + post_nid);
uint32_t* tmp_I_set_p = getFiringBitGroupPtr(postNId, j);
uint32_t tmp_I_set = *tmp_I_set_p;
// table lookup based find bits that are set
int cnt = 0;
int tmp_I_cnt = 0;
while (tmp_I_set) {
int k = (tmp_I_set >> (8 * cnt)) & 0xff;
if (k == 0) {
cnt = cnt + 1;
continue;
}
int wt_i = sh_quickSynIdTable[k];
int wtId = (j * 32 + cnt * 8 + wt_i);
SynInfo synInfo = runtimeDataGPU.preSynapticIds[cum_pos + wtId];
//uint8_t pre_grpId = GET_CONN_GRP_ID(pre_Id);
uint32_t preNId = GET_CONN_NEURON_ID(synInfo);
short int preGrpId = runtimeDataGPU.grpIds[preNId];
char type = groupConfigsGPU[preGrpId].Type;
// load the synaptic weight for the wtId'th input
float change = runtimeDataGPU.wt[cum_pos + wtId];
// Adjust the weight according to STP scaling
if (groupConfigsGPU[preGrpId].WithSTP) {
int tD = 0; // \FIXME find delay
// \FIXME I think pre_nid needs to be adjusted for the delay
int ind_minus = getSTPBufPos(preNId, (simTime - tD - 1)); // \FIXME should be adjusted for delay
int ind_plus = getSTPBufPos(preNId, (simTime - tD));
// dI/dt = -I/tau_S + A * u^+ * x^- * \delta(t-t_{spk})
change *= groupConfigsGPU[preGrpId].STP_A * runtimeDataGPU.stpx[ind_minus] * runtimeDataGPU.stpu[ind_plus];
}
if (networkConfigGPU.sim_with_conductances) {
short int connId = runtimeDataGPU.connIdsPreIdx[cum_pos+wtId];
if (type & TARGET_AMPA)
AMPA_sum += change * d_mulSynFast[connId];
if (type & TARGET_NMDA) {
if (networkConfigGPU.sim_with_NMDA_rise) {
NMDA_r_sum += change * d_mulSynSlow[connId] * networkConfigGPU.sNMDA;
NMDA_d_sum += change * d_mulSynSlow[connId] * networkConfigGPU.sNMDA;
} else {
NMDA_sum += change * d_mulSynSlow[connId];
}
}
if (type & TARGET_GABAa)
GABAa_sum += change * d_mulSynFast[connId]; // wt should be negative for GABAa and GABAb
if (type & TARGET_GABAb) { // but that is dealt with below
if (networkConfigGPU.sim_with_GABAb_rise) {
GABAb_r_sum += change * d_mulSynSlow[connId] * networkConfigGPU.sGABAb;
GABAb_d_sum += change * d_mulSynSlow[connId] * networkConfigGPU.sGABAb;
} else {
GABAb_sum += change * d_mulSynSlow[connId];
}
}
}
else {
// current based model with STP (CUBA)
// updated current for neuron 'post_nid'
AMPA_sum += change;
}
tmp_I_cnt++;
tmp_I_set = tmp_I_set & (~(1 << (8 * cnt + wt_i)));
}
// FIXME: move reset outside kernel for debbuing I_set, resume it later
// reset the input if there are any bit'wt set
if(tmp_I_cnt)
*tmp_I_set_p = 0;
__syncthreads();
}
__syncthreads();
// P6-2
if (networkConfigGPU.sim_with_conductances) {
// don't add mulSynFast/mulSynSlow here, because they depend on the exact pre<->post connection, not
// just post_nid
runtimeDataGPU.gAMPA[postNId] += AMPA_sum;
runtimeDataGPU.gGABAa[postNId] -= GABAa_sum; // wt should be negative for GABAa and GABAb
if (networkConfigGPU.sim_with_NMDA_rise) {
runtimeDataGPU.gNMDA_r[postNId] += NMDA_r_sum;
runtimeDataGPU.gNMDA_d[postNId] += NMDA_d_sum;
} else {
runtimeDataGPU.gNMDA[postNId] += NMDA_sum;
}
if (networkConfigGPU.sim_with_GABAb_rise) {
runtimeDataGPU.gGABAb_r[postNId] -= GABAb_r_sum;
runtimeDataGPU.gGABAb_d[postNId] -= GABAb_d_sum;
} else {
runtimeDataGPU.gGABAb[postNId] -= GABAb_sum;
}
}
else {
runtimeDataGPU.current[postNId] += AMPA_sum;
}
}
}
}
//************************ UPDATE GLOBAL STATE EVERY TIME STEP *******************************************************//
/*!
* \brief This device function implements the equations of neuron dynamics
*
* \param[in] nid The neuron id to be updated
* \param[in] grpId The group id of the neuron
*/
__device__ void updateNeuronState(int nid, int grpId) {
float v = runtimeDataGPU.voltage[nid];
float u = runtimeDataGPU.recovery[nid];
float I_sum, NMDAtmp;
float gNMDA, gGABAb;
// loop that allows smaller integration time step for v's and u's
for (int c = 0; c < COND_INTEGRATION_SCALE; c++) {
I_sum = 0.0f;
if (networkConfigGPU.sim_with_conductances) {
NMDAtmp = (v + 80.0f) * (v + 80.0f) / 60.0f / 60.0f;
gNMDA = (networkConfigGPU.sim_with_NMDA_rise) ? (runtimeDataGPU.gNMDA_d[nid] - runtimeDataGPU.gNMDA_r[nid]) : runtimeDataGPU.gNMDA[nid];
gGABAb = (networkConfigGPU.sim_with_GABAb_rise) ? (runtimeDataGPU.gGABAb_d[nid] - runtimeDataGPU.gGABAb_r[nid]) : runtimeDataGPU.gGABAb[nid];
I_sum = -(runtimeDataGPU.gAMPA[nid] * (v - 0.0f)
+ gNMDA * NMDAtmp / (1.0f + NMDAtmp) * (v - 0.0f)
+ runtimeDataGPU.gGABAa[nid] * (v + 70.0f)
+ gGABAb * (v + 90.0f));
} else {
I_sum = runtimeDataGPU.current[nid];
}
// update vpos and upos for the current neuron
v += ((0.04f * v + 5.0f) * v + 140.0f - u + I_sum + runtimeDataGPU.extCurrent[nid]) / COND_INTEGRATION_SCALE;
if (v > 30.0f) {
v = 30.0f; // break the loop but evaluate u[i]
c = COND_INTEGRATION_SCALE;
}
if (v < -90.0f) v = -90.0f;
u += (runtimeDataGPU.Izh_a[nid] * (runtimeDataGPU.Izh_b[nid] * v - u) / COND_INTEGRATION_SCALE);
}
if(networkConfigGPU.sim_with_conductances) {
runtimeDataGPU.current[nid] = I_sum;
} else {
// current must be reset here for CUBA and not kernel_STPUpdateAndDecayConductances
runtimeDataGPU.current[nid] = 0.0f;
}
runtimeDataGPU.voltage[nid] = v;
runtimeDataGPU.recovery[nid] = u;
}
/*!
* \brief update neuron state
*
* This kernel update neurons' membrance potential according to neurons' dynamics model.
* This kernel also update variables required by homeostasis
*
* net access: numN, numNReg, numNPois, sim_with_conductances, sim_with_NMDA_rise, sim_with_GABAb_rise
* grp access: WithHomeostasis, avgTimeScale_decay
* rtd access: avgFiring, voltage, recovery, gNMDA, gNMDA_r, gNMDA_d, gGABAb, gGABAb_r, gGABAb_d, gAMPA, gGABAa,
* current, extCurrent, Izh_a, Izh_b
* glb access:
*/
__global__ void kernel_neuronStateUpdate() {
const int totBuffers = loadBufferCount;
// update neuron state
for (int bufPos = blockIdx.x; bufPos < totBuffers; bufPos += gridDim.x) {
// KILLME !!! This can be further optimized ....
// instead of reading each neuron group separately .....
// read a whole buffer and use the result ......
int2 threadLoad = getStaticThreadLoad(bufPos);
int nid = (STATIC_LOAD_START(threadLoad) + threadIdx.x);
int lastId = STATIC_LOAD_SIZE(threadLoad);
int grpId = STATIC_LOAD_GROUP(threadLoad);
if ((threadIdx.x < lastId) && (nid < networkConfigGPU.numN)) {
if (IS_REGULAR_NEURON(nid, networkConfigGPU.numNReg, networkConfigGPU.numNPois)) {
// P7
// update neuron state here....
updateNeuronState(nid, grpId);
// P8
if (groupConfigsGPU[grpId].WithHomeostasis)
updateHomeoStaticState(nid, grpId);
}
}
}
}
/*!
* \brief Update the state of groups, which includes concentration of dopamine currently
*
* Update the concentration of neuronmodulator
*
* net access: numGroups
* grp access: WithESTDPtype, WithISTDPtype, baseDP, decayDP
* rtd access: grpDA, grpDABuffer
* glb access:
*/
__global__ void kernel_groupStateUpdate(int simTime) {
// update group state
int grpIdx = blockIdx.x * blockDim.x + threadIdx.x;
// P9
if (grpIdx < networkConfigGPU.numGroups) {
// decay dopamine concentration
if ((groupConfigsGPU[grpIdx].WithESTDPtype == DA_MOD || groupConfigsGPU[grpIdx].WithISTDPtype == DA_MOD) && runtimeDataGPU.grpDA[grpIdx] > groupConfigsGPU[grpIdx].baseDP) {
runtimeDataGPU.grpDA[grpIdx] *= groupConfigsGPU[grpIdx].decayDP;
}
runtimeDataGPU.grpDABuffer[grpIdx * 1000 + simTime] = runtimeDataGPU.grpDA[grpIdx]; // log dopamine concentration
}
}
//******************************** UPDATE STP STATE EVERY TIME STEP **********************************************
/*!
* \brief This function is called for updat STP and decay coductance every time step
*
* net access sim_with_conductance, sim_with_NMDA_rise, sim_with_GABAb_rise, numNReg, numNPois, numN, STP_Pitch, maxDelay
* grp access WithSTP
* rtd access gAMPA, gNMDA_r, gNMDA_d, gNMDA, gBABAa, gGABAb_r, gGABAb_d, gGABAb
* rtd access stpu, stpx
*/
__global__ void kernel_STPUpdateAndDecayConductances (int t, int sec, int simTime) {
const int totBuffers = loadBufferCount;
for (int bufPos = blockIdx.x; bufPos < totBuffers; bufPos += gridDim.x) {
// KILLME !!! This can be further optimized ....
// instead of reading each neuron group separately .....
// read a whole buffer and use the result ......
int2 threadLoad = getStaticThreadLoad(bufPos);
int nid = (STATIC_LOAD_START(threadLoad) + threadIdx.x);
int lastId = STATIC_LOAD_SIZE(threadLoad);
int grpId = STATIC_LOAD_GROUP(threadLoad);
// update the conductane parameter of the current neron
if (networkConfigGPU.sim_with_conductances && IS_REGULAR_NEURON(nid, networkConfigGPU.numNReg, networkConfigGPU.numNPois)) {
runtimeDataGPU.gAMPA[nid] *= networkConfigGPU.dAMPA;
if (networkConfigGPU.sim_with_NMDA_rise) {
runtimeDataGPU.gNMDA_r[nid] *= networkConfigGPU.rNMDA;
runtimeDataGPU.gNMDA_d[nid] *= networkConfigGPU.dNMDA;
} else {
runtimeDataGPU.gNMDA[nid] *= networkConfigGPU.dNMDA;
}
runtimeDataGPU.gGABAa[nid] *= networkConfigGPU.dGABAa;
if (networkConfigGPU.sim_with_GABAb_rise) {
runtimeDataGPU.gGABAb_r[nid] *= networkConfigGPU.rGABAb;
runtimeDataGPU.gGABAb_d[nid] *= networkConfigGPU.dGABAb;
} else {
runtimeDataGPU.gGABAb[nid] *= networkConfigGPU.dGABAb;
}
}
if (groupConfigsGPU[grpId].WithSTP && (threadIdx.x < lastId) && (nid < networkConfigGPU.numN)) {
int ind_plus = getSTPBufPos(nid, simTime);
int ind_minus = getSTPBufPos(nid, (simTime-1)); // \FIXME sure?
runtimeDataGPU.stpu[ind_plus] = runtimeDataGPU.stpu[ind_minus]*(1.0f-groupConfigsGPU[grpId].STP_tau_u_inv);
runtimeDataGPU.stpx[ind_plus] = runtimeDataGPU.stpx[ind_minus] + (1.0f-runtimeDataGPU.stpx[ind_minus])*groupConfigsGPU[grpId].STP_tau_x_inv;
}
}
}
//********************************UPDATE SYNAPTIC WEIGHTS EVERY SECOND *************************************************************
/*!
* \brief This kernel update synaptic weights
*
* This kernel is called every second to adjust the timingTable and globalFiringTable
* We do the following thing:
* 1. We discard all firing information that happened more than 1000-maxDelay_ time step.
* 2. We move the firing information that happened in the last 1000-maxDelay_ time step to
* the begining of the gloalFiringTable.
* 3. We read each value of "wtChange" and update the value of "synaptic weights wt".
* We also clip the "synaptic weight wt" to lie within the required range.
*/
__device__ void updateSynapticWeights(int nid, unsigned int synId, int grpId, float diff_firing, float homeostasisScale, float baseFiring, float avgTimeScaleInv) {
// This function does not get called if the neuron group has all fixed weights.
// t_twChange is adjusted by stdpScaleFactor based on frequency of weight updates (e.g., 10ms, 100ms, 1s)
float t_wt = runtimeDataGPU.wt[synId];
float t_wtChange = runtimeDataGPU.wtChange[synId];
float t_effectiveWtChange = networkConfigGPU.stdpScaleFactor * t_wtChange;
float t_maxWt = runtimeDataGPU.maxSynWt[synId];
switch (groupConfigsGPU[grpId].WithESTDPtype) {
case STANDARD:
if (groupConfigsGPU[grpId].WithHomeostasis) {
// this factor is slow
t_wt += (diff_firing*t_wt*homeostasisScale + t_effectiveWtChange) * baseFiring * avgTimeScaleInv / (1.0f+fabs(diff_firing)*50.0f);
} else {
t_wt += t_effectiveWtChange;
}
break;
case DA_MOD:
if (groupConfigsGPU[grpId].WithHomeostasis) {
t_effectiveWtChange = runtimeDataGPU.grpDA[grpId] * t_effectiveWtChange;
t_wt += (diff_firing*t_wt*homeostasisScale + t_effectiveWtChange) * baseFiring * avgTimeScaleInv / (1.0f+fabs(diff_firing)*50.0f);
} else {
t_wt += runtimeDataGPU.grpDA[grpId] * t_effectiveWtChange;
}
break;
case UNKNOWN_STDP:
default:
// we shouldn't even be here if !WithSTDP
break;
}
switch (groupConfigsGPU[grpId].WithISTDPtype) {
case STANDARD:
if (groupConfigsGPU[grpId].WithHomeostasis) {
// this factor is slow
t_wt += (diff_firing*t_wt*homeostasisScale + t_effectiveWtChange) * baseFiring * avgTimeScaleInv / (1.0f+fabs(diff_firing)*50.0f);
} else {
t_wt += t_effectiveWtChange;
}
break;
case DA_MOD:
if (groupConfigsGPU[grpId].WithHomeostasis) {
t_effectiveWtChange = runtimeDataGPU.grpDA[grpId] * t_effectiveWtChange;
t_wt += (diff_firing*t_wt*homeostasisScale + t_effectiveWtChange) * baseFiring * avgTimeScaleInv / (1.0f+fabs(diff_firing)*50.0f);
} else {
t_wt += runtimeDataGPU.grpDA[grpId] * t_effectiveWtChange;
}
break;
case UNKNOWN_STDP:
default:
// we shouldn't even be here if !WithSTDP
break;
}
// It's user's choice to decay weight change or not
// see setWeightAndWeightChangeUpdate()
t_wtChange *= networkConfigGPU.wtChangeDecay;
// Check the synapse is excitatory or inhibitory first
if (t_maxWt >= 0.0f) { // excitatory synapse
if (t_wt >= t_maxWt) t_wt = t_maxWt;
if (t_wt < 0.0f) t_wt = 0.0f;
} else { // inhibitory synapse
if (t_wt <= t_maxWt) t_wt = t_maxWt;
if (t_wt > 0.0f) t_wt = 0.0f;
}
runtimeDataGPU.wt[synId] = t_wt;
runtimeDataGPU.wtChange[synId] = t_wtChange;
}
#define UPWTS_CLUSTERING_SZ 32
/*!
* \brief this kernel updates all synaptic weights
*
* net access: stdpScaleFactor, wtChangeDecay
* grp access: homeostasisScale, avgTimeScaleInv, FixedInputWts, WithESTDPtype, WithISTDOtype, WithHomeostasis
* rtd access: Npre_plastic, cumulativePre, avgFiring, baseFiringInv, baseFiring, wt, wtChange, maxSynWt
* glb access:
*/
__global__ void kernel_updateWeights() {
__shared__ volatile int errCode;
__shared__ int startId, lastId, grpId, totBuffers, grpNCnt;
__shared__ int2 threadLoad;
// added for homeostasis
__shared__ float homeostasisScale, avgTimeScaleInv;
if(threadIdx.x == 0) {
totBuffers = loadBufferCount;
grpNCnt = (blockDim.x / UPWTS_CLUSTERING_SZ) + ((blockDim.x % UPWTS_CLUSTERING_SZ) != 0);
}
__syncthreads();
for (int bufPos = blockIdx.x; bufPos < totBuffers; bufPos += gridDim.x) {
// KILLME !!! This can be further optimized ....
// instead of reading each neuron group separately .....
// read a whole buffer and use the result ......
// if ( threadIdx.x) { // TSC: this could be a performance bug, 127 threads other than the first thread try to read
// threadLoad and wirte homeostatsisScale and avgTimeScaleInv at the same time
if (threadIdx.x == 0) {
threadLoad = getStaticThreadLoad(bufPos);
startId = STATIC_LOAD_START(threadLoad);
lastId = STATIC_LOAD_SIZE(threadLoad);
grpId = STATIC_LOAD_GROUP(threadLoad);
// load homestasis parameters
if (groupConfigsGPU[grpId].WithHomeostasis) {
homeostasisScale = groupConfigsGPU[grpId].homeostasisScale;
avgTimeScaleInv = groupConfigsGPU[grpId].avgTimeScaleInv;
} else {
homeostasisScale = 0.0f;
avgTimeScaleInv = 1.0f;
}
}
__syncthreads();
// the weights are fixed for this group.. so dont make any changes on
// the weight and continue to the next set of neurons...
if (groupConfigsGPU[grpId].FixedInputWts)
continue;
int nid = (threadIdx.x / UPWTS_CLUSTERING_SZ) + startId;
// update the synaptic weights from the synaptic weight derivatives
for(; nid < startId + lastId; nid += grpNCnt) {
int Npre_plastic = runtimeDataGPU.Npre_plastic[nid];
unsigned int cumulativePre = runtimeDataGPU.cumulativePre[nid];
float diff_firing = 0.0f;
float baseFiring = 0.0f;
if (groupConfigsGPU[grpId].WithHomeostasis) {
diff_firing = (1.0f - runtimeDataGPU.avgFiring[nid] * runtimeDataGPU.baseFiringInv[nid]);
baseFiring = runtimeDataGPU.baseFiring[nid];
}
const int threadIdGrp = (threadIdx.x % UPWTS_CLUSTERING_SZ);
// use 32 threads to update 32 synapses parallely
for(unsigned int synIdOffset = cumulativePre; synIdOffset < cumulativePre + Npre_plastic; synIdOffset += UPWTS_CLUSTERING_SZ) {
//excitatory connection change the synaptic weights
unsigned int synId = synIdOffset + threadIdGrp;
if(synId < cumulativePre + Npre_plastic) {
updateSynapticWeights(nid, synId, grpId, diff_firing, homeostasisScale, baseFiring, avgTimeScaleInv);
}
}
}
}
}
//********************************UPDATE TABLES AND COUNTERS EVERY SECOND *************************************************************
/*!
* \brief This kernel shift the un-processed firing information in firingTableD2 to the beginning of
* firingTableD2 for the next second of simulation.
*
* net access: maxDelay
* grp access: N/A
* rtd access: firingTableD2
* glb access: timeTableD2GPU
*/
__global__ void kernel_shiftFiringTable() {
int gnthreads= blockDim.x * gridDim.x;
for(int p = timeTableD2GPU[999], k = 0; p < timeTableD2GPU[999 + networkConfigGPU.maxDelay + 1]; p += gnthreads, k += gnthreads) {
if ((p + threadIdx.x) < timeTableD2GPU[999 + networkConfigGPU.maxDelay + 1])
runtimeDataGPU.firingTableD2[k + threadIdx.x] = runtimeDataGPU.firingTableD2[p + threadIdx.x];
}
}
/*!
* \brief This kernel shift the un-processed firing information in timeTableD1(D2)GPU to the beginning of
* timeTableD1(D2)GPU for the next second of simulation.
*
* After all the threads/blocks had adjusted the firingTableD1(D2)GPU, we update the timeTableD1(D2)GPU
* so that the firing information that happended in the last maxDelay_ time step would become
* the first maxDelay_ time step firing information for the next second of simulation.
* We also reset/update all spike counters to appropriate values as indicated in the second part
* of this kernel.
*/
__global__ void kernel_shiftTimeTable() {
int maxDelay = networkConfigGPU.maxDelay;
if(blockIdx.x == 0) {
for(int i = threadIdx.x; i < maxDelay; i += blockDim.x) {
// use i+1 instead of just i because timeTableD2GPU[0] should always be 0
timeTableD2GPU[i + 1] = timeTableD2GPU[1000 + i + 1] - timeTableD2GPU[1000];
timeTableD1GPU[i + 1] = timeTableD1GPU[1000 + i + 1] - timeTableD1GPU[1000];
}
}
__syncthreads();
// reset various counters for the firing information
if((blockIdx.x == 0) && (threadIdx.x == 0)) {
timeTableD1GPU[maxDelay] = 0;
spikeCountD2GPU += spikeCountD2SecGPU;
spikeCountD1GPU += spikeCountD1SecGPU;
spikeCountD2SecGPU = 0;
spikeCountD1SecGPU = 0;
spikeCountExtRxD2SecGPU = 0;
spikeCountExtRxD1SecGPU = 0;
spikeCountLastSecLeftD2GPU = timeTableD2GPU[maxDelay];
secD2fireCntTest = timeTableD2GPU[maxDelay];
secD1fireCntTest = 0;
}
}
//****************************** GENERATE POST-SYNAPTIC CURRENT EVERY TIME-STEP ****************************
/*
* The sequence of handling an post synaptic spike in GPU mode:
* P1. Update synSpikeTime
* P2. Update DA,5HT,ACh,NE accordingly
* P3. Update STDP wtChange
* P4. Load wt into change (temporary variable)
* P5. Modulate change by STP (if enabled)
* P6-1. Modulate change by d_mulSynSlow and d_mulSynFast
* P6-2. Accumulate g(AMPA,NMDA,GABAa,GABAb) or current
* P7. Update v(voltage), u(recovery)
* P8. Update homeostasis
* P9. Decay and log DA,5HT,ACh,NE
*/
__device__ void generatePostSynapticSpike(int simTime, int preNId, int postNId, int synId) {
// get the actual position of the synapses and other variables...
unsigned int pos = runtimeDataGPU.cumulativePre[postNId] + synId;
short int preGrpId = runtimeDataGPU.grpIds[preNId]; // STP uses preGrpId
short int postGrpId = runtimeDataGPU.grpIds[postNId]; // STDP uses postGrpId
setFiringBitSynapses(postNId, synId);
// P1
runtimeDataGPU.synSpikeTime[pos] = simTime; //uncoalesced access
// P2
// Got one spike from dopaminergic neuron, increase dopamine concentration in the target area
if (groupConfigsGPU[preGrpId].Type & TARGET_DA) {
atomicAdd(&(runtimeDataGPU.grpDA[postGrpId]), 0.04f);
}
// P3
// STDP calculation: the post-synaptic neuron fires before the arrival of pre-synaptic neuron's spike
if (groupConfigsGPU[postGrpId].WithSTDP && !networkConfigGPU.sim_in_testing) {
int stdp_tDiff = simTime - runtimeDataGPU.lastSpikeTime[postNId];
if (stdp_tDiff >= 0) {
if (groupConfigsGPU[postGrpId].WithESTDP) {
// Handle E-STDP curves
switch (groupConfigsGPU[postGrpId].WithESTDPcurve) {
case EXP_CURVE: // exponential curve
case TIMING_BASED_CURVE: // sc curve
if (stdp_tDiff * groupConfigsGPU[postGrpId].TAU_MINUS_INV_EXC < 25.0f)
runtimeDataGPU.wtChange[pos] += STDP( stdp_tDiff, groupConfigsGPU[postGrpId].ALPHA_MINUS_EXC, groupConfigsGPU[postGrpId].TAU_MINUS_INV_EXC); // uncoalesced access
break;
default:
break;
}
}
if (groupConfigsGPU[postGrpId].WithISTDP) {
// Handle I-STDP curves
switch (groupConfigsGPU[postGrpId].WithISTDPcurve) {
case EXP_CURVE: // exponential curve
if ((stdp_tDiff * groupConfigsGPU[postGrpId].TAU_MINUS_INV_INB) < 25.0f) { // LTD of inhibitory syanpse, which increase synapse weight
runtimeDataGPU.wtChange[pos] -= STDP(stdp_tDiff, groupConfigsGPU[postGrpId].ALPHA_MINUS_INB, groupConfigsGPU[postGrpId].TAU_MINUS_INV_INB);
}
break;
case PULSE_CURVE: // pulse curve
if (stdp_tDiff <= groupConfigsGPU[postGrpId].LAMBDA) { // LTP of inhibitory synapse, which decreases synapse weight
runtimeDataGPU.wtChange[pos] -= groupConfigsGPU[postGrpId].BETA_LTP;
} else if (stdp_tDiff <= groupConfigsGPU[postGrpId].DELTA) { // LTD of inhibitory syanpse, which increase synapse weight
runtimeDataGPU.wtChange[pos] -= groupConfigsGPU[postGrpId].BETA_LTD;
}
break;
default:
break;
}
}
}
}
}
#define READ_CHUNK_SZ 64
/*!
* \brief This kernel updates and generates spikes for delays greater than 1 from the fired neuron.
*
* The LTD computation is also executed by this kernel.
*
* net access: maxDelay, I_setPitch, sim_in_testing
* grp access: Type, WithSTDP, WithESTDP, WithESTDPcurve, WithISDP, WithISTDPcurve, all STDP parameters
* rtd access: firingTableD2, cumulativePost, postDelayInfo, postSynapticIds, cumulativePre, grpIds,
* grpDA, I_set, (W)synSpikeTime, (R)lastSpikeTime, wtChange
* glb access: spikeCountD2SecGPU, timeTableD2GPU_tex, timeTableD2GPU_tex_offset
*/
__global__ void kernel_doCurrentUpdateD2(int simTimeMs, int simTimeSec, int simTime) {
__shared__ volatile int sh_neuronOffsetTable[READ_CHUNK_SZ + 2];
__shared__ int sh_delayLength[READ_CHUNK_SZ + 2];
__shared__ int sh_delayIndexStart[READ_CHUNK_SZ + 2];
__shared__ int sh_firingId[READ_CHUNK_SZ + 2];
__shared__ volatile int sh_NeuronCnt;
const int threadIdWarp = (threadIdx.x % WARP_SIZE);
const int warpId = (threadIdx.x / WARP_SIZE);
// this variable is used to record the
// number of updates done by different blocks
if(threadIdx.x<=0) {
sh_NeuronCnt = 0;
}
__syncthreads();
// stores the number of fired neurons at time t
int k = tex1Dfetch(timeTableD2GPU_tex, simTimeMs + networkConfigGPU.maxDelay + 1 + timeTableD2GPU_tex_offset) - 1;
// stores the number of fired neurons at time (t - maxDelay_)
int k_end = tex1Dfetch(timeTableD2GPU_tex, simTimeMs + 1 + timeTableD2GPU_tex_offset);
int t_pos = simTimeMs;
// we need to read (k-k_end) neurons from the firing
// table and do necesary updates for all these post-synaptic
// connection in these neurons..
while ((k >= k_end) && (k >= 0)) {
// at any point of time EXCIT_READ_CHUNK_SZ neurons
// read different firing id from the firing table
if (threadIdx.x < READ_CHUNK_SZ) { // use 64 threads
int fPos = k - (READ_CHUNK_SZ * blockIdx.x) - threadIdx.x;
if ((fPos >= 0) && (fPos >= k_end)) {
// get the neuron nid here....
//int val = runtimeDataGPU.firingTableD2[fPos];
//int nid = GET_FIRING_TABLE_NID(val);
int nid = runtimeDataGPU.firingTableD2[fPos];
// find the time of firing based on the firing number fPos
while ( !((fPos >= tex1Dfetch(timeTableD2GPU_tex, t_pos + networkConfigGPU.maxDelay + timeTableD2GPU_tex_offset))
&& (fPos < tex1Dfetch(timeTableD2GPU_tex, t_pos + networkConfigGPU.maxDelay + 1 + timeTableD2GPU_tex_offset)))) {
t_pos--;
}
// find the time difference between firing of the neuron and the current time
int tD = simTimeMs - t_pos;
// find the various delay parameters for neuron 'nid', with a delay of 'tD'
//sh_axonDelay[threadIdx.x] = tD;
int tPos = (networkConfigGPU.maxDelay + 1) * nid + tD;
//sh_firingId[threadIdx.x] = val;
sh_firingId[threadIdx.x] = nid;
sh_neuronOffsetTable[threadIdx.x]= runtimeDataGPU.cumulativePost[nid];
sh_delayLength[threadIdx.x] = runtimeDataGPU.postDelayInfo[tPos].delay_length;
sh_delayIndexStart[threadIdx.x] = runtimeDataGPU.postDelayInfo[tPos].delay_index_start;
// This is to indicate that the current thread
// has a valid delay parameter for post-synaptic firing generation
atomicAdd((int*)&sh_NeuronCnt, 1);
}
}
__syncthreads();
// if cnt is zero than no more neurons need to generate
// post-synaptic firing, then we break the loop.
if (sh_NeuronCnt == 0) {
break;
}
// first WARP_SIZE threads the post synaptic
// firing for first neuron, and so on. each of this group
// needs to generate (numPostSynapses/maxDelay_) spikes for every fired neuron, every second
// for numPostSynapses=500,maxDelay_=20, we need to generate 25 spikes for each fired neuron
// for numPostSynapses=600,maxDelay_=20, we need to generate 30 spikes for each fired neuron
for (int pos = warpId; pos < sh_NeuronCnt; pos += (NUM_THREADS / WARP_SIZE)) {
int delId = threadIdWarp;
while (delId < sh_delayLength[pos]) {
// get the post synaptic information for specific delay
SynInfo postInfo = runtimeDataGPU.postSynapticIds[sh_neuronOffsetTable[pos] + sh_delayIndexStart[pos] + delId];
int postNId = GET_CONN_NEURON_ID(postInfo); // get post-neuron id
int synId = GET_CONN_SYN_ID(postInfo); // get synaptic id
if (postNId < networkConfigGPU.numN) // test if post-neuron is a local neuron
generatePostSynapticSpike(simTime, sh_firingId[pos] /* preNId */, postNId, synId);
delId += WARP_SIZE;
}
} //(for all excitory neurons in table)
__syncthreads();
if(threadIdx.x == 0) {
sh_NeuronCnt = 0;
}
k = k - (gridDim.x * READ_CHUNK_SZ);
__syncthreads();
}
__syncthreads();
}
/*!
* \brief This kernel updating and generating spikes on connections with a delay of 1ms from the fired neuron.
*
* This function looks mostly like kernel_doCurrentUpdateD2() but has been optimized for a fixed delay of 1ms.
* Ultimately we may merge this kernel with the kernel_doCurrentUpdateD2().
* The LTD computation is also executed by this kernel.
*
* net access: maxDelay, I_setPitch, sim_in_testing
* grp access: Type, grpDA, WithSTDP, WithESTDP, WithISTDP, WithESTDPcurve, WithISTDPcurve, all STDP parameters
* rtd access: postSynapticIds, cumulativePre, grpIds, I_set, wtChange, (R)lastSpikeTime, (W)synSpikeTime
* glb access: timeTableD1GPU, spikeCountD1SecGPU, firingTableD1
*/
__global__ void kernel_doCurrentUpdateD1(int simTimeMs, int simTimeSec, int simTime) {
__shared__ volatile int sh_NeuronCnt;
__shared__ volatile int sh_neuronOffsetTable[NUM_THREADS / WARP_SIZE + 2];
__shared__ int sh_delayLength[NUM_THREADS / WARP_SIZE + 2];
__shared__ int sh_firingId[NUM_THREADS / WARP_SIZE + 2];
__shared__ int sh_delayIndexStart[NUM_THREADS / WARP_SIZE + 2];
__shared__ int sh_timing;
__shared__ int kPosEnd;
const int warpId = threadIdx.x / WARP_SIZE; // warp id
const int numWarps = blockDim.x / WARP_SIZE; // number of warp
const int threadIdWarp = threadIdx.x % WARP_SIZE; // thread id within a warp
// load the time table for neuron firing
if (threadIdx.x == 0) {
sh_timing = timeTableD1GPU[simTimeMs + networkConfigGPU.maxDelay]; // number of fired neurons at simTimeMs - 1
kPosEnd = timeTableD1GPU[simTimeMs + networkConfigGPU.maxDelay + 1]; // number of fired neurons at simTimeMs, which is equal to spikeCountD1SecGPU
}
__syncthreads();
int kPos = sh_timing + (blockIdx.x * numWarps);
__syncthreads();
// Do current update as long as we have some valid neuron
while ((kPos >= 0) && (kPos < kPosEnd)) {
int fPos = -1;
// a group of threads (4 threads) loads the delay information
if (threadIdx.x < numWarps) {
sh_neuronOffsetTable[threadIdx.x] = -1;
fPos = kPos + threadIdx.x;
// find the neuron nid and also delay information from fPos
if ((fPos >= 0) && (fPos < kPosEnd)) {
atomicAdd((int*)&sh_NeuronCnt, 1);
//int val = runtimeDataGPU.firingTableD1[fPos];
//int nid = GET_FIRING_TABLE_NID(val);
int nid = runtimeDataGPU.firingTableD1[fPos];
int tPos = (networkConfigGPU.maxDelay + 1) * nid;
//sh_firingId[threadIdx.x] = val;
sh_firingId[threadIdx.x] = nid;
sh_neuronOffsetTable[threadIdx.x] = runtimeDataGPU.cumulativePost[nid];
sh_delayLength[threadIdx.x] = runtimeDataGPU.postDelayInfo[tPos].delay_length;
sh_delayIndexStart[threadIdx.x] = runtimeDataGPU.postDelayInfo[tPos].delay_index_start;
}
}
__syncthreads();
// no more fired neuron from table... we just break from loop
if (sh_NeuronCnt == 0) {
break;
}
__syncthreads();
int offset = sh_neuronOffsetTable[warpId];
if (threadIdx.x == 0) {
sh_NeuronCnt = 0;
}
// 32 threads for generatePostSynapticSpike()
if (offset >= 0) {
int delId = threadIdWarp;
while (delId < sh_delayLength[warpId]) {
// get the post synaptic information for specific delay
SynInfo postInfo = runtimeDataGPU.postSynapticIds[offset + sh_delayIndexStart[warpId] + delId];
int postNId = GET_CONN_NEURON_ID(postInfo); // get post-neuron id
int synId = GET_CONN_SYN_ID(postInfo); // get synaptic id
if (postNId < networkConfigGPU.numN) // test if post-neuron is a local neuron
generatePostSynapticSpike(simTime, sh_firingId[warpId] /* preNId */, postNId, synId);
delId += WARP_SIZE;
}
}
__syncthreads();
kPos = kPos + (gridDim.x * numWarps);
}
}
__global__ void kernel_convertExtSpikesD2(int startIdx, int endIdx, int GtoLOffset) {
int firingTableIdx = startIdx + blockIdx.x * blockDim.x + threadIdx.x;
int spikeCountExtRx = endIdx - startIdx; // received external spike count
if (threadIdx.x == 0 && blockIdx.x == 0) {
secD2fireCntTest += spikeCountExtRx;
spikeCountD2SecGPU += spikeCountExtRx;
spikeCountExtRxD2GPU += spikeCountExtRx;
spikeCountExtRxD2SecGPU += spikeCountExtRx;
}
// FIXME: if endIdx - startIdx > 64 * 128
if (firingTableIdx < endIdx)
runtimeDataGPU.firingTableD2[firingTableIdx] += GtoLOffset;
}
__global__ void kernel_convertExtSpikesD1(int startIdx, int endIdx, int GtoLOffset) {
int firingTableIdx = startIdx + blockIdx.x * blockDim.x + threadIdx.x;
int spikeCountExtRx = endIdx - startIdx; // received external spike count
if (threadIdx.x == 0 && blockIdx.x == 0) {
secD1fireCntTest += spikeCountExtRx;
spikeCountD1SecGPU += spikeCountExtRx;
spikeCountExtRxD1GPU += spikeCountExtRx;
spikeCountExtRxD1SecGPU += spikeCountExtRx;
}
// FIXME: if endIdx - startIdx > 64 * 128
if (firingTableIdx < endIdx)
runtimeDataGPU.firingTableD1[firingTableIdx] += GtoLOffset;
}
/*!
* \brief this function allocates device (GPU) memory sapce and copies information of pre-connections to it
*
* This function:
* initialize Npre_plasticInv
* (allocate and) copy Npre, Npre_plastic, Npre_plasticInv, cumulativePre, preSynapticIds
* (allocate and) copy Npost, cumulativePost, postSynapticIds, postDelayInfo
*
*
* \param[in] netId the id of a local network, which is the same as the device (GPU) id
* \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied
* \param[in] dest pointer to runtime data desitnation
* \param[in] src pointer to runtime data source
* \param[in] kind the direction of copying
* \param[in] allocateMem a flag indicates whether allocating memory space before copying
*
* \sa allocateSNN_GPU
* \since v4.0
*/
void SNN::copyPreConnectionInfo(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, hipMemcpyKind kind, bool allocateMem) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, 0); // check that the destination pointer is properly allocated..
int lengthN, lengthSyn, posN, posSyn;
if (lGrpId == ALL) {
lengthN = networkConfigs[netId].numNAssigned;
posN = 0;
} else {
lengthN = groupConfigs[netId][lGrpId].numN;
posN = groupConfigs[netId][lGrpId].lStartN;
}
// connection synaptic lengths and cumulative lengths...
if(allocateMem)
CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->Npre, sizeof(short) * networkConfigs[netId].numNAssigned));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->Npre[posN], &src->Npre[posN], sizeof(short) * lengthN, kind));
// we don't need these data structures if the network doesn't have any plastic synapses at all
if (!sim_with_fixedwts) {
// presyn excitatory connections
if(allocateMem)
CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->Npre_plastic, sizeof(short) * networkConfigs[netId].numNAssigned));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->Npre_plastic[posN], &src->Npre_plastic[posN], sizeof(short) * lengthN, kind));
// Npre_plasticInv is only used on GPUs, only allocate and copy it during initialization
if(allocateMem) {
float* Npre_plasticInv = new float[networkConfigs[netId].numNAssigned];
for (int i = 0; i < networkConfigs[netId].numNAssigned; i++)
Npre_plasticInv[i] = 1.0f / managerRuntimeData.Npre_plastic[i];
CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->Npre_plasticInv, sizeof(float) * networkConfigs[netId].numNAssigned));
CUDA_CHECK_ERRORS(hipMemcpy(dest->Npre_plasticInv, Npre_plasticInv, sizeof(float) * networkConfigs[netId].numNAssigned, kind));
delete[] Npre_plasticInv;
}
}
// beginning position for the pre-synaptic information
if(allocateMem)
CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->cumulativePre, sizeof(int) * networkConfigs[netId].numNAssigned));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->cumulativePre[posN], &src->cumulativePre[posN], sizeof(int) * lengthN, kind));
// Npre, cumulativePre has been copied to destination
if (lGrpId == ALL) {
lengthSyn = networkConfigs[netId].numPreSynNet;
posSyn = 0;
} else {
lengthSyn = 0;
for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++)
lengthSyn += dest->Npre[lNId];
posSyn = dest->cumulativePre[groupConfigs[netId][lGrpId].lStartN];
}
if(allocateMem)
CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->preSynapticIds, sizeof(SynInfo) * networkConfigs[netId].numPreSynNet));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->preSynapticIds[posSyn], &src->preSynapticIds[posSyn], sizeof(SynInfo) * lengthSyn, kind));
}
/*!
* \brief this function allocates device (GPU) memory sapce and copies information of post-connections to it
*
* This function:
* (allocate and) copy Npost, cumulativePost, postSynapticIds, postDelayInfo
*
*
* \param[in] netId the id of a local network, which is the same as the device (GPU) id
* \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied
* \param[in] dest pointer to runtime data desitnation
* \param[in] src pointer to runtime data source
* \param[in] kind the direction of copying
* \param[in] allocateMem a flag indicates whether allocating memory space before copying
*
* \sa allocateSNN_GPU
* \since v4.0
*/
void SNN::copyPostConnectionInfo(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, hipMemcpyKind kind, bool allocateMem) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, 0);// check that the destination pointer is properly allocated..
int lengthN, lengthSyn, posN, posSyn;
if (lGrpId == ALL) {
lengthN = networkConfigs[netId].numNAssigned;
posN = 0;
} else {
lengthN = groupConfigs[netId][lGrpId].numN;
posN = groupConfigs[netId][lGrpId].lStartN;
}
// number of postsynaptic connections
if(allocateMem)
CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->Npost, sizeof(short) * networkConfigs[netId].numNAssigned));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->Npost[posN], &src->Npost[posN], sizeof(short) * lengthN, kind));
// beginning position for the post-synaptic information
if(allocateMem)
CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->cumulativePost, sizeof(int) * networkConfigs[netId].numNAssigned));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->cumulativePost[posN], &src->cumulativePost[posN], sizeof(int) * lengthN, kind));
// Npost, cumulativePost has been copied to destination
if (lGrpId == ALL) {
lengthSyn = networkConfigs[netId].numPostSynNet;
posSyn = 0;
} else {
lengthSyn = 0;
for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++)
lengthSyn += dest->Npost[lNId];
posSyn = dest->cumulativePost[groupConfigs[netId][lGrpId].lStartN];
}
// actual post synaptic connection information...
if(allocateMem)
CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->postSynapticIds, sizeof(SynInfo) * networkConfigs[netId].numPostSynNet));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->postSynapticIds[posSyn], &src->postSynapticIds[posSyn], sizeof(SynInfo) * lengthSyn, kind));
// static specific mapping and actual post-synaptic delay metric
if(allocateMem)
CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->postDelayInfo, sizeof(DelayInfo) * networkConfigs[netId].numNAssigned * (glbNetworkConfig.maxDelay + 1)));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->postDelayInfo[posN * (glbNetworkConfig.maxDelay + 1)], &src->postDelayInfo[posN * (glbNetworkConfig.maxDelay + 1)], sizeof(DelayInfo) * lengthN * (glbNetworkConfig.maxDelay + 1), kind));
}
void SNN::checkDestSrcPtrs(RuntimeData* dest, RuntimeData* src, hipMemcpyKind kind, bool allocateMem, int lGrpId, int destOffset) {
// source should always be allocated
assert(src->allocated);
if(kind == hipMemcpyHostToDevice) {
assert(src->memType == CPU_MEM);
assert(dest->memType == GPU_MEM);
if (allocateMem) {
assert(!dest->allocated); // if allocateMem = true, then the destination must be empty without allocation.
assert(lGrpId == ALL); // if allocateMem = true, then we should not specify any specific group.
} else {
assert(dest->allocated); // if allocateMem = false, then the destination must be allocated.
}
assert(destOffset == 0); // H-to-D only allows local-to-local copy
} else if (kind == hipMemcpyDeviceToHost) {
assert(src->memType == GPU_MEM);
assert(dest->memType == CPU_MEM);
assert(dest->allocated);
if (lGrpId == ALL)
assert(destOffset == 0); // if copy all content, only local-to-local is allowed
} else {
KERNEL_ERROR("Wrong Host-Device copy direction");
exitSimulation(1);
}
}
/*!
* \brief this function allocates device (GPU) memory sapce and copies AMPA conductance to it
*
* This function:
* (allocate and) copy gAMPA
*
* This funcion is called by copyNeuronState() and fetchConductanceAMPA(). It supports bi-directional copying
*
* \param[in] netId the id of a local network, which is the same as the device (GPU) id
* \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied
* \param[in] dest pointer to runtime data desitnation
* \param[in] src pointer to runtime data source
* \param[in] kind the direction of copy
* \param[in] allocateMem a flag indicates whether allocating memory space before copy
* \param[in] destOffset the offset of data destination, which is used in local-to-global copy
*
* \sa copyNeuronState fetchConductanceAMPA
* \since v3.0
*/
void SNN::copyConductanceAMPA(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, hipMemcpyKind kind, bool allocateMem, int destOffset) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, destOffset);// check that the destination pointer is properly allocated..
assert(isSimulationWithCOBA());
int ptrPos, length;
if(lGrpId == ALL) {
ptrPos = 0;
length = networkConfigs[netId].numNReg;
} else {
ptrPos = groupConfigs[netId][lGrpId].lStartN;
length = groupConfigs[netId][lGrpId].numN;
}
assert(length <= networkConfigs[netId].numNReg);
assert(length > 0);
//conductance information
assert(src->gAMPA != NULL);
if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**) &dest->gAMPA, sizeof(float) * length));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->gAMPA[ptrPos + destOffset], &src->gAMPA[ptrPos], sizeof(float) * length, kind));
}
/*!
* \brief this function allocates device (GPU) memory sapce and copies NMDA conductance to it
*
* This function:
* (allocate and) copy gNMDA, gNMDA_r, gNMDA_d
*
* This funcion is called by copyNeuronState() and fetchConductanceNMDA(). It supports bi-directional copying
*
* \param[in] netId the id of a local network, which is the same as the device (GPU) id
* \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied
* \param[in] dest pointer to runtime data desitnation
* \param[in] src pointer to runtime data source
* \param[in] kind the direction of copy
* \param[in] allocateMem a flag indicates whether allocating memory space before copy
* \param[in] destOffset the offset of data destination, which is used in local-to-global copy
*
* \sa copyNeuronState fetchConductanceNMDA
* \since v3.0
*/
void SNN::copyConductanceNMDA(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, hipMemcpyKind kind, bool allocateMem, int destOffset) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, destOffset);// check that the destination pointer is properly allocated..
assert(isSimulationWithCOBA());
int ptrPos, length;
if(lGrpId == ALL) {
ptrPos = 0;
length = networkConfigs[netId].numNReg;
} else {
ptrPos = groupConfigs[netId][lGrpId].lStartN;
length = groupConfigs[netId][lGrpId].numN;
}
assert(length <= networkConfigs[netId].numNReg);
assert(length > 0);
if (isSimulationWithNMDARise()) {
assert(src->gNMDA_r != NULL);
if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**) &dest->gNMDA_r, sizeof(float) * length));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->gNMDA_r[ptrPos], &src->gNMDA_r[ptrPos], sizeof(float) * length, kind));
assert(src->gNMDA_d != NULL);
if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**) &dest->gNMDA_d, sizeof(float) * length));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->gNMDA_d[ptrPos], &src->gNMDA_d[ptrPos], sizeof(float) * length, kind));
} else {
assert(src->gNMDA != NULL);
if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**) &dest->gNMDA, sizeof(float) * length));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->gNMDA[ptrPos + destOffset], &src->gNMDA[ptrPos], sizeof(float) * length, kind));
}
}
/*!
* \brief this function allocates device (GPU) memory sapce and copies GABAa conductance to it
*
* This function:
* (allocate and) copy gGABAa
*
* This funcion is called by copyNeuronState() and fetchConductanceGABAa(). It supports bi-directional copying
*
* \param[in] netId the id of a local network, which is the same as the device (GPU) id
* \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied
* \param[in] dest pointer to runtime data desitnation
* \param[in] src pointer to runtime data source
* \param[in] kind the direction of copy
* \param[in] allocateMem a flag indicates whether allocating memory space before copy
* \param[in] destOffset the offset of data destination, which is used in local-to-global copy
*
* \sa copyNeuronState fetchConductanceGABAa
* \since v3.0
*/
void SNN::copyConductanceGABAa(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, hipMemcpyKind kind, bool allocateMem, int destOffset) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, destOffset); // check that the destination pointer is properly allocated..
assert(isSimulationWithCOBA());
int ptrPos, length;
if(lGrpId == ALL) {
ptrPos = 0;
length = networkConfigs[netId].numNReg;
} else {
ptrPos = groupConfigs[netId][lGrpId].lStartN;
length = groupConfigs[netId][lGrpId].numN;
}
assert(length <= networkConfigs[netId].numNReg);
assert(length > 0);
assert(src->gGABAa != NULL);
if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**) &dest->gGABAa, sizeof(float) * length));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->gGABAa[ptrPos + destOffset], &src->gGABAa[ptrPos], sizeof(float) * length, kind));
}
/*!
* \brief this function allocates device (GPU) memory sapce and copies GABAb conductance to it
*
* This function:
* (allocate and) copy gGABAb, gGABAb_r, gGABAb_d
*
* This funcion is called by copyNeuronState() and fetchConductanceGABAb(). It supports bi-directional copying
*
* \param[in] netId the id of a local network, which is the same as the device (GPU) id
* \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied
* \param[in] dest pointer to runtime data desitnation
* \param[in] src pointer to runtime data source
* \param[in] kind the direction of copy
* \param[in] allocateMem a flag indicates whether allocating memory space before copy
* \param[in] destOffset the offset of data destination, which is used in local-to-global copy
*
* \sa copyNeuronState fetchConductanceGABAb
* \since v3.0
*/
void SNN::copyConductanceGABAb(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, hipMemcpyKind kind, bool allocateMem, int destOffset) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, destOffset); // check that the destination pointer is properly allocated..
assert(isSimulationWithCOBA());
int ptrPos, length;
if(lGrpId == ALL) {
ptrPos = 0;
length = networkConfigs[netId].numNReg;
} else {
ptrPos = groupConfigs[netId][lGrpId].lStartN;
length = groupConfigs[netId][lGrpId].numN;
}
assert(length <= networkConfigs[netId].numNReg);
assert(length > 0);
if (isSimulationWithGABAbRise()) {
assert(src->gGABAb_r != NULL);
if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**) &dest->gGABAb_r, sizeof(float) * length));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->gGABAb_r[ptrPos], &src->gGABAb_r[ptrPos], sizeof(float) * length, kind));
assert(src->gGABAb_d != NULL);
if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**) &dest->gGABAb_d, sizeof(float) * length));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->gGABAb_d[ptrPos], &src->gGABAb_d[ptrPos], sizeof(float) * length, kind));
} else {
assert(src->gGABAb != NULL);
if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->gGABAb, sizeof(float) * length));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->gGABAb[ptrPos + destOffset], &src->gGABAb[ptrPos], sizeof(float) * length, kind));
}
}
/*!
* \brief this function allocates device (GPU) memory sapce and copies variables related to nueron state to it
*
* This function:
* (allocate and) copy voltage, recovery, current, avgFiring
*
* This funcion is called by allocateSNN_GPU(). Only copying from host to device is required
*
* \param[in] netId the id of a local network, which is the same as the device (GPU) id
* \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied
* \param[in] dest pointer to runtime data desitnation
* \param[in] allocateMem a flag indicates whether allocating memory space before copying
*
* \sa allocateSNN_GPU fetchNeuronState
* \since v3.0
*/
void SNN::copyNeuronState(int netId, int lGrpId, RuntimeData* dest, hipMemcpyKind kind, bool allocateMem) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(dest, &managerRuntimeData, hipMemcpyHostToDevice, allocateMem, lGrpId, 0); // check that the destination pointer is properly allocated..
assert(kind == hipMemcpyHostToDevice);
int ptrPos, length;
if(lGrpId == ALL) {
ptrPos = 0;
length = networkConfigs[netId].numNReg;
}
else {
ptrPos = groupConfigs[netId][lGrpId].lStartN;
length = groupConfigs[netId][lGrpId].numN;
}
assert(length <= networkConfigs[netId].numNReg);
if (length == 0)
return;
if(!allocateMem && groupConfigs[netId][lGrpId].Type & POISSON_NEURON)
return;
if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->recovery, sizeof(float) * length));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->recovery[ptrPos], &managerRuntimeData.recovery[ptrPos], sizeof(float) * length, hipMemcpyHostToDevice));
if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->voltage, sizeof(float) * length));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->voltage[ptrPos], &managerRuntimeData.voltage[ptrPos], sizeof(float) * length, hipMemcpyHostToDevice));
//neuron input current...
if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->current, sizeof(float) * length));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->current[ptrPos], &managerRuntimeData.current[ptrPos], sizeof(float) * length, hipMemcpyHostToDevice));
if (sim_with_conductances) {
//conductance information
copyConductanceAMPA(netId, lGrpId, dest, &managerRuntimeData, hipMemcpyHostToDevice, allocateMem, 0);
copyConductanceNMDA(netId, lGrpId, dest, &managerRuntimeData, hipMemcpyHostToDevice, allocateMem, 0);
copyConductanceGABAa(netId, lGrpId, dest, &managerRuntimeData, hipMemcpyHostToDevice, allocateMem, 0);
copyConductanceGABAb(netId, lGrpId, dest, &managerRuntimeData, hipMemcpyHostToDevice, allocateMem, 0);
}
// copying external current needs to be done separately because setExternalCurrent needs to call it, too
// do it only from host to device
copyExternalCurrent(netId, lGrpId, dest, hipMemcpyHostToDevice, allocateMem);
copyNeuronParameters(netId, lGrpId, dest, hipMemcpyHostToDevice, allocateMem);
if (sim_with_homeostasis) {
//Included to enable homeostasis in GPU_MODE.
// Avg. Firing...
if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**) &dest->avgFiring, sizeof(float) * length));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->avgFiring[ptrPos], &managerRuntimeData.avgFiring[ptrPos], sizeof(float) * length, hipMemcpyHostToDevice));
}
}
/*!
* \brief this function allocates device (GPU) memory sapce and copies the spike count of each neuron to it
*
* This function:
* (allocate and) copy nSpikeCnt
*
* This funcion is called by copyAuxiliaryData() and fetchNeuronSpikeCount(). It supports bi-directional copying
*
* \param[in] netId the id of a local network, which is the same as the device (GPU) id
* \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied
* \param[in] dest pointer to runtime data desitnation
* \param[in] src pointer to runtime data source
* \param[in] kind the direction of copy
* \param[in] allocateMem a flag indicates whether allocating memory space before copy
* \param[in] destOffset the offset of data destination, which is used in local-to-global copy
*
* \sa copyAuxiliaryData fetchNeuronSpikeCount
* \since v4.0
*/
void SNN::copyNeuronSpikeCount(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, hipMemcpyKind kind, bool allocateMem, int destOffset) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, destOffset);// check that the destination pointer is properly allocated..
int posN, lengthN;
if(lGrpId == ALL) {
posN = 0;
lengthN = networkConfigs[netId].numN;
} else {
posN = groupConfigs[netId][lGrpId].lStartN;
lengthN = groupConfigs[netId][lGrpId].numN;
}
assert(lengthN > 0 && lengthN <= networkConfigs[netId].numN);
// spike count information
if(allocateMem)
CUDA_CHECK_ERRORS(hipMalloc((void**) &dest->nSpikeCnt, sizeof(int) * lengthN));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->nSpikeCnt[posN + destOffset], &src->nSpikeCnt[posN], sizeof(int) * lengthN, kind));
}
// FIXME: move grpDA(5HT, ACh, NE)Buffer to copyAuxiliaryData
/*!
* \brief this function allocates device (GPU) memory sapce and copies variables related to group state to it
*
* This function:
* (allocate and) copy grpDA, grp5HT, grpACh, grpNE, grpDABuffer, grp5HTBuffer, grpAChBuffer, grpNEBuffer
*
* This funcion is called by allocateSNN_GPU() and fetchGroupState(). It supports bi-directional copying
*
* \param[in] netId the id of a local network, which is the same as the device (GPU) id
* \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied
* \param[in] dest pointer to runtime data desitnation
* \param[in] src pointer to runtime data source
* \param[in] kind the direction of copying
* \param[in] allocateMem a flag indicates whether allocating memory space before copying
*
* \sa allocateSNN_GPU fetchGroupState
* \since v3.0
*/
void SNN::copyGroupState(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, hipMemcpyKind kind, bool allocateMem) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, 0);// check that the destination pointer is properly allocated..
if (allocateMem) {
assert(dest->memType == GPU_MEM && !dest->allocated);
CUDA_CHECK_ERRORS(hipMalloc((void**) &dest->grpDA, sizeof(float) * networkConfigs[netId].numGroups));
CUDA_CHECK_ERRORS(hipMalloc((void**) &dest->grp5HT, sizeof(float) * networkConfigs[netId].numGroups));
CUDA_CHECK_ERRORS(hipMalloc((void**) &dest->grpACh, sizeof(float) * networkConfigs[netId].numGroups));
CUDA_CHECK_ERRORS(hipMalloc((void**) &dest->grpNE, sizeof(float) * networkConfigs[netId].numGroups));
}
CUDA_CHECK_ERRORS(hipMemcpy(dest->grpDA, src->grpDA, sizeof(float) * networkConfigs[netId].numGroups, kind));
CUDA_CHECK_ERRORS(hipMemcpy(dest->grp5HT, src->grp5HT, sizeof(float) * networkConfigs[netId].numGroups, kind));
CUDA_CHECK_ERRORS(hipMemcpy(dest->grpACh, src->grpACh, sizeof(float) * networkConfigs[netId].numGroups, kind));
CUDA_CHECK_ERRORS(hipMemcpy(dest->grpNE, src->grpNE, sizeof(float) * networkConfigs[netId].numGroups, kind));
if (lGrpId < 0) {
if (allocateMem) {
assert(dest->memType == GPU_MEM && !dest->allocated);
CUDA_CHECK_ERRORS(hipMalloc((void**) &dest->grpDABuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups));
CUDA_CHECK_ERRORS(hipMalloc((void**) &dest->grp5HTBuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups));
CUDA_CHECK_ERRORS(hipMalloc((void**) &dest->grpAChBuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups));
CUDA_CHECK_ERRORS(hipMalloc((void**) &dest->grpNEBuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups));
}
CUDA_CHECK_ERRORS(hipMemcpy(dest->grpDABuffer, src->grpDABuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups, kind));
CUDA_CHECK_ERRORS(hipMemcpy(dest->grp5HTBuffer, src->grp5HTBuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups, kind));
CUDA_CHECK_ERRORS(hipMemcpy(dest->grpAChBuffer, src->grpAChBuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups, kind));
CUDA_CHECK_ERRORS(hipMemcpy(dest->grpNEBuffer, src->grpNEBuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups, kind));
} else {
assert(!allocateMem);
CUDA_CHECK_ERRORS(hipMemcpy(&dest->grpDABuffer[lGrpId * 1000], &src->grpDABuffer[lGrpId * 1000], sizeof(float) * 1000, kind));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->grp5HTBuffer[lGrpId * 1000], &src->grp5HTBuffer[lGrpId * 1000], sizeof(float) * 1000, kind));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->grpAChBuffer[lGrpId * 1000], &src->grpAChBuffer[lGrpId * 1000], sizeof(float) * 1000, kind));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->grpNEBuffer[lGrpId * 1000], &src->grpNEBuffer[lGrpId * 1000], sizeof(float) * 1000, kind));
}
}
/*!
* \brief this function allocates device (GPU) memory sapce and copies neural parameters to it
*
* This function:
* (allocate and) copy Izh_a, Izh_b, Izh_c, Izh_d
* initialize baseFiringInv
* (allocate and) copy baseFiring, baseFiringInv
*
* This funcion is only called by copyNeuronState(). Only copying direction from host to device is required.
*
* \param[in] netId the id of a local network, which is the same as the device (GPU) id
* \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied
* \param[in] dest pointer to runtime data desitnation
* \param[in] allocateMem a flag indicates whether allocating memory space before copying
*
* \sa copyNeuronState
* \since v3.0
*/
void SNN::copyNeuronParameters(int netId, int lGrpId, RuntimeData* dest, hipMemcpyKind kind, bool allocateMem) {
checkAndSetGPUDevice(netId);
assert(kind == hipMemcpyHostToDevice);
int ptrPos, length;
// check that the destination pointer is properly allocated..
checkDestSrcPtrs(dest, &managerRuntimeData, hipMemcpyHostToDevice, allocateMem, lGrpId, 0);
// check that the destination pointer is properly allocated..
// cannot use checkDestSrcPtrs here because src pointer would be NULL
if (dest->allocated && allocateMem) {
KERNEL_ERROR("GPU Memory already allocated...");
exitSimulation(1);
}
// when allocating we are allocating the memory.. we need to do it completely... to avoid memory fragmentation..
if (allocateMem) {
assert(lGrpId == ALL);
assert(dest->Izh_a == NULL);
assert(dest->Izh_b == NULL);
assert(dest->Izh_c == NULL);
assert(dest->Izh_d == NULL);
}
if(lGrpId == ALL) {
ptrPos = 0;
length = networkConfigs[netId].numNReg;
}
else {
ptrPos = groupConfigs[netId][lGrpId].lStartN;
length = groupConfigs[netId][lGrpId].numN;
}
if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->Izh_a, sizeof(float) * length));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->Izh_a[ptrPos], &(managerRuntimeData.Izh_a[ptrPos]), sizeof(float) * length, hipMemcpyHostToDevice));
if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->Izh_b, sizeof(float) * length));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->Izh_b[ptrPos], &(managerRuntimeData.Izh_b[ptrPos]), sizeof(float) * length, hipMemcpyHostToDevice));
if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->Izh_c, sizeof(float) * length));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->Izh_c[ptrPos], &(managerRuntimeData.Izh_c[ptrPos]), sizeof(float) * length, hipMemcpyHostToDevice));
if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->Izh_d, sizeof(float) * length));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->Izh_d[ptrPos], &(managerRuntimeData.Izh_d[ptrPos]), sizeof(float) * length, hipMemcpyHostToDevice));
// pre-compute baseFiringInv for fast computation on GPUs.
if (sim_with_homeostasis) {
float* baseFiringInv = new float[length];
for(int nid = 0; nid < length; nid++) {
if (managerRuntimeData.baseFiring[nid] != 0.0f)
baseFiringInv[nid] = 1.0f / managerRuntimeData.baseFiring[ptrPos + nid];
else
baseFiringInv[nid] = 0.0;
}
if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->baseFiringInv, sizeof(float) * length));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->baseFiringInv[ptrPos], baseFiringInv, sizeof(float) * length, hipMemcpyHostToDevice));
if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->baseFiring, sizeof(float) * length));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->baseFiring[ptrPos], managerRuntimeData.baseFiring, sizeof(float) * length, hipMemcpyHostToDevice));
delete [] baseFiringInv;
}
}
/*!
* \brief this function allocates device (GPU) memory sapce and copies short-term plasticity (STP) state to it
*
* This function:
* initialize STP_Pitch
* (allocate and) copy stpu, stpx
*
* This funcion is called by allocateSNN_GPU() and fetchSTPState(). It supports bi-directional copying
*
* \param[in] netId the id of a local network, which is the same as the device (GPU) id
* \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied
* \param[in] dest pointer to runtime data desitnation
* \param[in] src pointer to runtime data source
* \param[in] kind the direction of copying
* \param[in] allocateMem a flag indicates whether allocating memory space before copying
*
* \sa allocateSNN_GPU fetchSTPState
* \since v3.0
*/
void SNN::copySTPState(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, hipMemcpyKind kind, bool allocateMem) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, 0); // check that the destination pointer is properly allocated..
// STP feature is optional, do addtional check for memory space
if(allocateMem) {
assert(dest->stpu == NULL);
assert(dest->stpx == NULL);
} else {
assert(dest->stpu != NULL);
assert(dest->stpx != NULL);
}
assert(src->stpu != NULL); assert(src->stpx != NULL);
size_t STP_Pitch;
size_t widthInBytes = sizeof(float) * networkConfigs[netId].numN;
// if(allocateMem) CUDA_CHECK_ERRORS( hipMalloc( (void**) &dest->stpu, sizeof(float)*networkConfigs[0].numN));
// CUDA_CHECK_ERRORS( hipMemcpy( &dest->stpu[0], &src->stpu[0], sizeof(float)*networkConfigs[0].numN, kind));
// if(allocateMem) CUDA_CHECK_ERRORS( hipMalloc( (void**) &dest->stpx, sizeof(float)*networkConfigs[0].numN));
// CUDA_CHECK_ERRORS( hipMemcpy( &dest->stpx[0], &src->stpx[0], sizeof(float)*networkConfigs[0].numN, kind));
// allocate the stpu and stpx variable
if (allocateMem)
CUDA_CHECK_ERRORS(hipMallocPitch((void**)&dest->stpu, &networkConfigs[netId].STP_Pitch, widthInBytes, networkConfigs[netId].maxDelay + 1));
if (allocateMem)
CUDA_CHECK_ERRORS(hipMallocPitch((void**)&dest->stpx, &STP_Pitch, widthInBytes, networkConfigs[netId].maxDelay + 1));
assert(networkConfigs[netId].STP_Pitch > 0);
assert(STP_Pitch > 0); // stp_pitch should be greater than zero
assert(STP_Pitch == networkConfigs[netId].STP_Pitch); // we want same Pitch for stpu and stpx
assert(networkConfigs[netId].STP_Pitch >= widthInBytes); // stp_pitch should be greater than the width
// convert the Pitch value to multiples of float
assert(networkConfigs[netId].STP_Pitch % (sizeof(float)) == 0);
if (allocateMem)
networkConfigs[netId].STP_Pitch = networkConfigs[netId].STP_Pitch/sizeof(float);
// fprintf(stderr, "STP_Pitch = %ld, STP_witdhInBytes = %d\n", networkConfigs[0].STP_Pitch, widthInBytes);
float* tmp_stp = new float[networkConfigs[netId].numN];
// copy the already generated values of stpx and stpu to the GPU
for(int t = 0; t < networkConfigs[netId].maxDelay + 1; t++) {
if (kind == hipMemcpyHostToDevice) {
// stpu in the CPU might be mapped in a specific way. we want to change the format
// to something that is okay with the GPU STP_U and STP_X variable implementation..
for (int n = 0; n < networkConfigs[netId].numN; n++) {
int idx = STP_BUF_POS(n, t, glbNetworkConfig.maxDelay);
tmp_stp[n] = managerRuntimeData.stpu[idx];
//assert(tmp_stp[n] == 0.0f); // STP is not enabled for all groups
}
CUDA_CHECK_ERRORS(hipMemcpy(&dest->stpu[t * networkConfigs[netId].STP_Pitch], tmp_stp, sizeof(float) * networkConfigs[netId].numN, hipMemcpyHostToDevice));
for (int n = 0; n < networkConfigs[netId].numN; n++) {
int idx = STP_BUF_POS(n, t, glbNetworkConfig.maxDelay);
tmp_stp[n] = managerRuntimeData.stpx[idx];
//assert(tmp_stp[n] == 1.0f); // STP is not enabled for all groups
}
CUDA_CHECK_ERRORS(hipMemcpy(&dest->stpx[t * networkConfigs[netId].STP_Pitch], tmp_stp, sizeof(float) * networkConfigs[netId].numN, hipMemcpyHostToDevice));
} else {
CUDA_CHECK_ERRORS(hipMemcpy(tmp_stp, &dest->stpu[t * networkConfigs[netId].STP_Pitch], sizeof(float) * networkConfigs[netId].numN, hipMemcpyDeviceToHost));
for (int n = 0; n < networkConfigs[netId].numN; n++)
managerRuntimeData.stpu[STP_BUF_POS(n, t, glbNetworkConfig.maxDelay)] = tmp_stp[n];
CUDA_CHECK_ERRORS(hipMemcpy(tmp_stp, &dest->stpx[t * networkConfigs[netId].STP_Pitch], sizeof(float) * networkConfigs[netId].numN, hipMemcpyDeviceToHost));
for (int n = 0; n < networkConfigs[netId].numN; n++)
managerRuntimeData.stpx[STP_BUF_POS(n, t, glbNetworkConfig.maxDelay)] = tmp_stp[n];
}
}
delete [] tmp_stp;
}
/*!
* \brief This function copies networkConfig form host to device
*
* This function:
* copy networkConfig
*
* \param[in] netId the id of a local network whose networkConfig will be copied to device (GPU) memory
*
* \since v4.0
*/
void SNN::copyNetworkConfig(int netId) {
checkAndSetGPUDevice(netId);
CUDA_CHECK_ERRORS(hipMemcpyToSymbol(networkConfigGPU, &networkConfigs[netId], sizeof(NetworkConfigRT), 0, hipMemcpyHostToDevice));
}
/*!
* \brief This function copies groupConfigs form host to device
*
* This function:
* copy groupConfigs
*
* \param[in] netId the id of a local network whose groupConfigs will be copied to device (GPU) memory
*
* \since v4.0
*/
void SNN::copyGroupConfigs(int netId) {
checkAndSetGPUDevice(netId);
CUDA_CHECK_ERRORS(hipMemcpyToSymbol(groupConfigsGPU, groupConfigs[netId], (networkConfigs[netId].numGroupsAssigned) * sizeof(GroupConfigRT), 0, hipMemcpyHostToDevice));
}
/*!
* \brief this function copy weight state in device (GPU) memory sapce to main (CPU) memory space
*
* This function:
* copy wt, wtChange synSpikeTime
*
* This funcion is only called by fetchWeightState(). Only copying direction from device to host is required.
*
* \param[in] netId the id of a local network, which is the same as the device (GPU) id
* \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied
*
* \sa fetchWeightState
* \since v4.0
*/
void SNN::copyWeightState(int netId, int lGrpId, hipMemcpyKind kind) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(&managerRuntimeData, &runtimeData[netId], hipMemcpyDeviceToHost, false, lGrpId, 0); // check that the destination pointer is properly allocated..
assert(kind == hipMemcpyDeviceToHost);
int lengthSyn, posSyn;
// first copy pre-connections info
copyPreConnectionInfo(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], hipMemcpyDeviceToHost, false);
if (lGrpId == ALL) {
lengthSyn = networkConfigs[netId].numPreSynNet;
posSyn = 0;
} else {
lengthSyn = 0;
for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++)
lengthSyn += managerRuntimeData.Npre[lNId];
posSyn = managerRuntimeData.cumulativePre[groupConfigs[netId][lGrpId].lStartN];
}
assert(posSyn < networkConfigs[netId].numPreSynNet || networkConfigs[netId].numPreSynNet == 0);
assert(lengthSyn <= networkConfigs[netId].numPreSynNet);
CUDA_CHECK_ERRORS(hipMemcpy(&managerRuntimeData.wt[posSyn], &runtimeData[netId].wt[posSyn], sizeof(float) * lengthSyn, hipMemcpyDeviceToHost));
// copy firing time for individual synapses
//CUDA_CHECK_ERRORS(hipMemcpy(&managerRuntimeData.synSpikeTime[cumPos_syn], &runtimeData[netId].synSpikeTime[cumPos_syn], sizeof(int) * length_wt, hipMemcpyDeviceToHost));
if ((!sim_with_fixedwts) || sim_with_stdp) {
// copy synaptic weight derivative
CUDA_CHECK_ERRORS(hipMemcpy( &managerRuntimeData.wtChange[posSyn], &runtimeData[netId].wtChange[posSyn], sizeof(float) * lengthSyn, hipMemcpyDeviceToHost));
}
}
/*!
* \brief this function allocates device (GPU) memory sapce and copies variables related to syanpses to it
*
* This function:
* (allocate and) copy wt, wtChange, maxSynWt
*
*
* \param[in] netId the id of a local network, which is the same as the device (GPU) id
* \param[in] dest pointer to runtime data desitnation
* \param[in] src pointer to runtime data source
* \param[in] allocateMem a flag indicates whether allocating memory space before copying
*
* \sa allocateSNN_GPU
* \since v4.0
*/
void SNN::copySynapseState(int netId, RuntimeData* dest, RuntimeData* src, hipMemcpyKind kind, bool allocateMem) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(dest, src, kind, allocateMem, ALL, 0); // check that the destination pointer is properly allocated..
assert(networkConfigs[netId].numPreSynNet > 0);
// synaptic information based
if(allocateMem)
CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->wt, sizeof(float) * networkConfigs[netId].numPreSynNet));
CUDA_CHECK_ERRORS(hipMemcpy(dest->wt, src->wt, sizeof(float) * networkConfigs[netId].numPreSynNet, kind));
// we don't need these data structures if the network doesn't have any plastic synapses at all
// they show up in gpuUpdateLTP() and updateSynapticWeights(), two functions that do not get called if
// sim_with_fixedwts is set
if (!sim_with_fixedwts) {
// synaptic weight derivative
if(allocateMem)
CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->wtChange, sizeof(float) * networkConfigs[netId].numPreSynNet));
CUDA_CHECK_ERRORS(hipMemcpy(dest->wtChange, src->wtChange, sizeof(float) * networkConfigs[netId].numPreSynNet, kind));
// synaptic weight maximum value
if(allocateMem)
CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->maxSynWt, sizeof(float) * networkConfigs[netId].numPreSynNet));
CUDA_CHECK_ERRORS(hipMemcpy(dest->maxSynWt, src->maxSynWt, sizeof(float) * networkConfigs[netId].numPreSynNet, kind));
}
}
/*!
* \brief this function allocates device (GPU) memory sapce and copies auxiliary runtime data to it
*
* This function:
* (allocate and) reset spikeGenBits, poissonFireRate
* initialize I_setLength, I_setPitch; (allocate and) reset I_set
* (allocate and) copy synSpikeTime, lastSpikeTime
* (allocate and) copy nSpikeCnt
* (allocate and) copy grpIds, connIdsPreIdx
* (allocate and) copy firingTableD1, firingTableD2
* This funcion is only called by allocateSNN_GPU. Therefore, only copying direction from host to device is required
*
* \param[in] netId the id of local network, which is the same as device (GPU) id
* \param[in] dest pointer to runtime data desitnation
* \param[in] allocateMem a flag indicates whether allocating memory space before copying
*
* \sa allocateSNN_GPU
* \since v4.0
*/
void SNN::copyAuxiliaryData(int netId, int lGrpId, RuntimeData* dest, hipMemcpyKind kind, bool allocateMem) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(dest, &managerRuntimeData, hipMemcpyHostToDevice, allocateMem, ALL, 0); // check that the destination pointer is properly allocated..
assert(kind == hipMemcpyHostToDevice);
assert(networkConfigs[netId].numN > 0);
if(allocateMem)
CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->spikeGenBits, sizeof(int) * (networkConfigs[netId].numNSpikeGen / 32 + 1)));
CUDA_CHECK_ERRORS(hipMemset(dest->spikeGenBits, 0, sizeof(int) * (networkConfigs[netId].numNSpikeGen / 32 + 1)));
// allocate the poisson neuron poissonFireRate
if(allocateMem)
CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->poissonFireRate, sizeof(float) * networkConfigs[netId].numNPois));
CUDA_CHECK_ERRORS(hipMemset(dest->poissonFireRate, 0, sizeof(float) * networkConfigs[netId].numNPois));
// synaptic auxiliary data
// I_set: a bit vector indicates which synapse got a spike
if(allocateMem) {
networkConfigs[netId].I_setLength = ceil(((networkConfigs[netId].maxNumPreSynN) / 32.0f));
CUDA_CHECK_ERRORS(hipMallocPitch((void**)&dest->I_set, &networkConfigs[netId].I_setPitch, sizeof(int) * networkConfigs[netId].numNReg, networkConfigs[netId].I_setLength));
}
assert(networkConfigs[netId].I_setPitch > 0 || networkConfigs[netId].maxNumPreSynN == 0);
CUDA_CHECK_ERRORS(hipMemset(dest->I_set, 0, networkConfigs[netId].I_setPitch * networkConfigs[netId].I_setLength));
// synSpikeTime: an array indicates the last time when a synapse got a spike
if(allocateMem)
CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->synSpikeTime, sizeof(int) * networkConfigs[netId].numPreSynNet));
CUDA_CHECK_ERRORS(hipMemcpy(dest->synSpikeTime, managerRuntimeData.synSpikeTime, sizeof(int) * networkConfigs[netId].numPreSynNet, hipMemcpyHostToDevice));
// neural auxiliary data
// lastSpikeTime: an array indicates the last time of a neuron emitting a spike
// neuron firing time
if(allocateMem)
CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->lastSpikeTime, sizeof(int) * networkConfigs[netId].numNAssigned));
CUDA_CHECK_ERRORS(hipMemcpy(dest->lastSpikeTime, managerRuntimeData.lastSpikeTime, sizeof(int) * networkConfigs[netId].numNAssigned, hipMemcpyHostToDevice));
// auxiliary data for recording spike count of each neuron
copyNeuronSpikeCount(netId, lGrpId, dest, &managerRuntimeData, hipMemcpyHostToDevice, true, 0);
// quick lookup array for local group ids
if(allocateMem)
CUDA_CHECK_ERRORS(hipMalloc( (void**)&dest->grpIds, sizeof(short int) * networkConfigs[netId].numNAssigned));
CUDA_CHECK_ERRORS(hipMemcpy( dest->grpIds, managerRuntimeData.grpIds, sizeof(short int) * networkConfigs[netId].numNAssigned, hipMemcpyHostToDevice));
// quick lookup array for conn ids
if(allocateMem)
CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->connIdsPreIdx, sizeof(short int) * networkConfigs[netId].numPreSynNet));
CUDA_CHECK_ERRORS(hipMemcpy(dest->connIdsPreIdx, managerRuntimeData.connIdsPreIdx, sizeof(short int) * networkConfigs[netId].numPreSynNet, hipMemcpyHostToDevice));
// firing table
if(allocateMem) {
assert(dest->firingTableD1 == NULL);
assert(dest->firingTableD2 == NULL);
}
// allocate 1ms firing table
if(allocateMem)
CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->firingTableD1, sizeof(int) * networkConfigs[netId].maxSpikesD1));
if (networkConfigs[netId].maxSpikesD1 > 0)
CUDA_CHECK_ERRORS(hipMemcpy(dest->firingTableD1, managerRuntimeData.firingTableD1, sizeof(int) * networkConfigs[netId].maxSpikesD1, hipMemcpyHostToDevice));
// allocate 2+ms firing table
if(allocateMem)
CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->firingTableD2, sizeof(int) * networkConfigs[netId].maxSpikesD2));
if (networkConfigs[netId].maxSpikesD2 > 0)
CUDA_CHECK_ERRORS(hipMemcpy(dest->firingTableD2, managerRuntimeData.firingTableD2, sizeof(int) * networkConfigs[netId].maxSpikesD2, hipMemcpyHostToDevice));
// allocate external 1ms firing table
if (allocateMem) {
void* devPtr;
CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->extFiringTableD1, sizeof(int*) * networkConfigs[netId].numGroups));
CUDA_CHECK_ERRORS(hipMemset(dest->extFiringTableD1, 0 /* NULL */, sizeof(int*) * networkConfigs[netId].numGroups));
for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
if (groupConfigs[netId][lGrpId].hasExternalConnect) {
CUDA_CHECK_ERRORS(hipMalloc((void**)&devPtr, sizeof(int) * groupConfigs[netId][lGrpId].numN * NEURON_MAX_FIRING_RATE));
CUDA_CHECK_ERRORS(hipMemset(devPtr, 0 , sizeof(int) * groupConfigs[netId][lGrpId].numN * NEURON_MAX_FIRING_RATE));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->extFiringTableD1[lGrpId], &devPtr, sizeof(int*), hipMemcpyHostToDevice));
}
}
}
// allocate external 2+ms firing table
if (allocateMem) {
void* devPtr;
CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->extFiringTableD2, sizeof(int*) * networkConfigs[netId].numGroups));
CUDA_CHECK_ERRORS(hipMemset(dest->extFiringTableD2, 0 /* NULL */, sizeof(int*) * networkConfigs[netId].numGroups));
for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
if (groupConfigs[netId][lGrpId].hasExternalConnect) {
CUDA_CHECK_ERRORS(hipMalloc((void**)&devPtr, sizeof(int) * groupConfigs[netId][lGrpId].numN * NEURON_MAX_FIRING_RATE));
CUDA_CHECK_ERRORS(hipMemset(devPtr, 0 , sizeof(int) * groupConfigs[netId][lGrpId].numN * NEURON_MAX_FIRING_RATE));
CUDA_CHECK_ERRORS(hipMemcpy(&dest->extFiringTableD2[lGrpId], &devPtr, sizeof(int*), hipMemcpyHostToDevice));
}
}
}
// allocate external 1ms firing table index
if (allocateMem)
CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->extFiringTableEndIdxD1, sizeof(int) * networkConfigs[netId].numGroups));
CUDA_CHECK_ERRORS(hipMemset(dest->extFiringTableEndIdxD1, 0, sizeof(int) * networkConfigs[netId].numGroups));
// allocate external 2+ms firing table index
if (allocateMem)
CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->extFiringTableEndIdxD2, sizeof(int) * networkConfigs[netId].numGroups));
CUDA_CHECK_ERRORS(hipMemset(dest->extFiringTableEndIdxD2, 0, sizeof(int) * networkConfigs[netId].numGroups));
}
void SNN::copyGrpIdsLookupArray(int netId, hipMemcpyKind kind) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(&managerRuntimeData, &runtimeData[netId], hipMemcpyDeviceToHost, false, ALL, 0);// check that the destination pointer is properly allocated..
assert(kind == hipMemcpyDeviceToHost);
CUDA_CHECK_ERRORS(hipMemcpy(managerRuntimeData.grpIds, runtimeData[netId].grpIds, sizeof(short int) * networkConfigs[netId].numNAssigned, hipMemcpyDeviceToHost));
}
void SNN::copyConnIdsLookupArray(int netId, hipMemcpyKind kind) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(&managerRuntimeData, &runtimeData[netId], hipMemcpyDeviceToHost, false, ALL, 0);// check that the destination pointer is properly allocated..
assert(kind == hipMemcpyDeviceToHost);
CUDA_CHECK_ERRORS(hipMemcpy(managerRuntimeData.connIdsPreIdx, runtimeData[netId].connIdsPreIdx, sizeof(short int) * networkConfigs[netId].numPreSynNet, hipMemcpyDeviceToHost));
}
void SNN::copyLastSpikeTime(int netId, hipMemcpyKind kind) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(&managerRuntimeData, &runtimeData[netId], hipMemcpyDeviceToHost, false, ALL, 0); // check that the destination pointer is properly allocated..
assert(kind == hipMemcpyDeviceToHost);
CUDA_CHECK_ERRORS(hipMemcpy(managerRuntimeData.lastSpikeTime, runtimeData[netId].lastSpikeTime, sizeof(int) * networkConfigs[netId].numN, hipMemcpyDeviceToHost));
}
// spikeGeneratorUpdate on GPUs..
void SNN::spikeGeneratorUpdate_GPU(int netId) {
assert(runtimeData[netId].allocated);
assert(runtimeData[netId].memType == GPU_MEM);
checkAndSetGPUDevice(netId);
// update the random number for poisson spike generator (spikes generated by rate)
if((networkConfigs[netId].numNPois > 0) && (runtimeData[netId].gpuRandGen != NULL)) {
hiprandGenerateUniform(runtimeData[netId].gpuRandGen, runtimeData[netId].randNum, networkConfigs[netId].numNPois);
}
// Use spike generators (user-defined callback function)
if (networkConfigs[netId].numNSpikeGen > 0) {
assert(managerRuntimeData.spikeGenBits != NULL);
// reset the bit status of the spikeGenBits...
memset(managerRuntimeData.spikeGenBits, 0, sizeof(int) * (networkConfigs[netId].numNSpikeGen / 32 + 1));
// fill spikeGenBits from SpikeBuffer
fillSpikeGenBits(netId);
// copy the spikeGenBits from the manager to the GPU..
CUDA_CHECK_ERRORS(hipMemcpy(runtimeData[netId].spikeGenBits, managerRuntimeData.spikeGenBits, sizeof(int) * (networkConfigs[netId].numNSpikeGen / 32 + 1), hipMemcpyHostToDevice));
}
}
void SNN::findFiring_GPU(int netId) {
assert(runtimeData[netId].allocated);
assert(runtimeData[netId].memType == GPU_MEM);
checkAndSetGPUDevice(netId);
hipLaunchKernelGGL(( kernel_findFiring), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, simTime);
CUDA_GET_LAST_ERROR("findFiring kernel failed\n");
}
void SNN::updateTimingTable_GPU(int netId) {
assert(runtimeData[netId].allocated);
assert(runtimeData[netId].memType == GPU_MEM);
checkAndSetGPUDevice(netId);
hipLaunchKernelGGL(( kernel_updateTimeTable), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, simTimeMs);
CUDA_GET_LAST_ERROR("timing Table update kernel failed\n");
}
void SNN::doCurrentUpdateD2_GPU(int netId) {
assert(runtimeData[netId].allocated);
assert(runtimeData[netId].memType == GPU_MEM);
checkAndSetGPUDevice(netId);
if (networkConfigs[netId].maxDelay > 1) {
kernel_doCurrentUpdateD2 << <NUM_BLOCKS, NUM_THREADS >> > (simTimeMs, simTimeSec, simTime);
CUDA_GET_LAST_ERROR("Kernel execution failed");
}
}
void SNN::doCurrentUpdateD1_GPU(int netId) {
assert(runtimeData[netId].allocated);
assert(runtimeData[netId].memType == GPU_MEM);
checkAndSetGPUDevice(netId);
hipLaunchKernelGGL(( kernel_doCurrentUpdateD1), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, simTimeMs,simTimeSec,simTime);
CUDA_GET_LAST_ERROR("Kernel execution failed");
}
void SNN::doSTPUpdateAndDecayCond_GPU(int netId) {
assert(runtimeData[netId].memType == GPU_MEM);
checkAndSetGPUDevice(netId);
if (sim_with_stp || sim_with_conductances) {
hipLaunchKernelGGL(( kernel_STPUpdateAndDecayConductances), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, simTimeMs, simTimeSec, simTime);
CUDA_GET_LAST_ERROR("STP update\n");
}
}
void SNN::initGPU(int netId) {
checkAndSetGPUDevice(netId);
assert(runtimeData[netId].allocated);
hipLaunchKernelGGL(( kernel_initGPUMemory), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, );
CUDA_GET_LAST_ERROR("initGPUMemory kernel failed\n");
}
void SNN::deleteRuntimeData_GPU(int netId) {
assert(runtimeData[netId].memType == GPU_MEM);
checkAndSetGPUDevice(netId);
// hipFree all device pointers
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].voltage) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].recovery) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].current) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].extCurrent) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].Npre) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].Npre_plastic) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].Npre_plasticInv) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].Npost) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].cumulativePost) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].cumulativePre) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].synSpikeTime) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].wt) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].wtChange) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].maxSynWt) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].nSpikeCnt) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].avgFiring) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].baseFiring) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].baseFiringInv) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].grpDA) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].grp5HT) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].grpACh) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].grpNE) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].grpDABuffer) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].grp5HTBuffer) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].grpAChBuffer) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].grpNEBuffer) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].grpIds) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].Izh_a) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].Izh_b) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].Izh_c) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].Izh_d) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].gAMPA) );
if (sim_with_NMDA_rise) {
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].gNMDA_r) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].gNMDA_d) );
} else {
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].gNMDA) );
}
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].gGABAa) );
if (sim_with_GABAb_rise) {
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].gGABAb_r) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].gGABAb_d) );
} else {
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].gGABAb) );
}
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].stpu) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].stpx) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].connIdsPreIdx) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].groupIdInfo) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].neuronAllocation) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].postDelayInfo) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].postSynapticIds) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].preSynapticIds) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].I_set) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].poissonFireRate) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].lastSpikeTime) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].spikeGenBits) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].firingTableD2) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].firingTableD1) );
int** tempPtrs;
tempPtrs = new int*[networkConfigs[netId].numGroups];
// fetch device memory address stored in extFiringTableD2
CUDA_CHECK_ERRORS( hipMemcpy(tempPtrs, runtimeData[netId].extFiringTableD2, sizeof(int*) * networkConfigs[netId].numGroups, hipMemcpyDeviceToHost) );
for (int i = 0; i < networkConfigs[netId].numGroups; i++)
CUDA_CHECK_ERRORS( hipFree(tempPtrs[i]) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].extFiringTableD2) );
// fetch device memory address stored in extFiringTableD1
CUDA_CHECK_ERRORS( hipMemcpy(tempPtrs, runtimeData[netId].extFiringTableD1, sizeof(int*) * networkConfigs[netId].numGroups, hipMemcpyDeviceToHost) );
for (int i = 0; i < networkConfigs[netId].numGroups; i++)
CUDA_CHECK_ERRORS( hipFree(tempPtrs[i]) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].extFiringTableD1) );
delete[] tempPtrs;
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].extFiringTableEndIdxD2) );
CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].extFiringTableEndIdxD1) );
// delete random numbr generator on GPU(s)
// Note: RNG_rand48 objects allocate device memory
if (runtimeData[netId].gpuRandGen != NULL) hiprandDestroyGenerator(runtimeData[netId].gpuRandGen);
runtimeData[netId].gpuRandGen = NULL;
if (runtimeData[netId].randNum != NULL) CUDA_CHECK_ERRORS(hipFree(runtimeData[netId].randNum));
runtimeData[netId].randNum = NULL;
}
void SNN::globalStateUpdate_C_GPU(int netId) {
assert(runtimeData[netId].memType == GPU_MEM);
checkAndSetGPUDevice(netId);
kernel_conductanceUpdate << <NUM_BLOCKS, NUM_THREADS >> > (simTimeMs, simTimeSec, simTime);
CUDA_GET_LAST_ERROR("kernel_conductanceUpdate failed");
// use memset to reset I_set for debugging, resume it later
//CUDA_CHECK_ERRORS(hipMemset(runtimeData[netId].I_set, 0, networkConfigs[netId].I_setPitch * networkConfigs[netId].I_setLength));
}
void SNN::globalStateUpdate_N_GPU(int netId) {
assert(runtimeData[netId].memType == GPU_MEM);
checkAndSetGPUDevice(netId);
// update all neuron state (i.e., voltage and recovery), including homeostasis
kernel_neuronStateUpdate << <NUM_BLOCKS, NUM_THREADS >> > ();
CUDA_GET_LAST_ERROR("Kernel execution failed");
}
void SNN::globalStateUpdate_G_GPU(int netId) {
assert(runtimeData[netId].memType == GPU_MEM);
checkAndSetGPUDevice(netId);
// update all group state (i.e., concentration of neuronmodulators)
// currently support 4 x 128 groups
hipLaunchKernelGGL(( kernel_groupStateUpdate), dim3(4), dim3(NUM_THREADS), 0, 0, simTimeMs);
CUDA_GET_LAST_ERROR("Kernel execution failed");
}
void SNN::assignPoissonFiringRate_GPU(int netId) {
assert(runtimeData[netId].memType == GPU_MEM);
checkAndSetGPUDevice(netId);
for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
// given group of neurons belong to the poisson group....
if (groupConfigs[netId][lGrpId].isSpikeGenerator) {
int lNId = groupConfigs[netId][lGrpId].lStartN;
int gGrpId = groupConfigs[netId][lGrpId].gGrpId;
PoissonRate* rate = groupConfigMDMap[gGrpId].ratePtr;
// if spikeGenFunc group does not have a Poisson pointer, skip
if (groupConfigMap[gGrpId].spikeGenFunc || rate == NULL)
continue;
assert(runtimeData[netId].poissonFireRate != NULL);
if (rate->isOnGPU()) {
// rates allocated on GPU
CUDA_CHECK_ERRORS(hipMemcpy(&runtimeData[netId].poissonFireRate[lNId - networkConfigs[netId].numNReg], rate->getRatePtrGPU(),
sizeof(float) * rate->getNumNeurons(), hipMemcpyDeviceToDevice) );
} else {
// rates allocated on CPU
CUDA_CHECK_ERRORS(hipMemcpy(&runtimeData[netId].poissonFireRate[lNId - networkConfigs[netId].numNReg], rate->getRatePtrCPU(),
sizeof(float) * rate->getNumNeurons(), hipMemcpyHostToDevice) );
}
}
}
}
// Note: for temporarily use, might be merged into exchangeExternalSpike
void SNN::clearExtFiringTable_GPU(int netId) {
assert(runtimeData[netId].memType == GPU_MEM);
checkAndSetGPUDevice(netId);
CUDA_CHECK_ERRORS(hipMemset(runtimeData[netId].extFiringTableEndIdxD1, 0, sizeof(int) * networkConfigs[netId].numGroups));
CUDA_CHECK_ERRORS(hipMemset(runtimeData[netId].extFiringTableEndIdxD2, 0, sizeof(int) * networkConfigs[netId].numGroups));
}
//void SNN::routeSpikes_GPU() {
// int firingTableIdxD2, firingTableIdxD1;
// int GtoLOffset;
// // ToDo: route spikes using routing table. currently only exchange spikes between GPU0 and GPU1
// // GPU0 -> GPU1
// if (!groupPartitionLists[0].empty() && !groupPartitionLists[1].empty()) {
// checkAndSetGPUDevice(0);
// CUDA_CHECK_ERRORS( hipMemcpy(managerRuntimeData.extFiringTableEndIdxD2, runtimeData[0].extFiringTableEndIdxD2, sizeof(int) * networkConfigs[0].numGroups, hipMemcpyDeviceToHost));
// CUDA_CHECK_ERRORS( hipMemcpy(managerRuntimeData.extFiringTableEndIdxD1, runtimeData[0].extFiringTableEndIdxD1, sizeof(int) * networkConfigs[0].numGroups, hipMemcpyDeviceToHost));
// CUDA_CHECK_ERRORS( hipMemcpy(managerRuntimeData.extFiringTableD2, runtimeData[0].extFiringTableD2, sizeof(int*) * networkConfigs[0].numGroups, hipMemcpyDeviceToHost));
// CUDA_CHECK_ERRORS( hipMemcpy(managerRuntimeData.extFiringTableD1, runtimeData[0].extFiringTableD1, sizeof(int*) * networkConfigs[0].numGroups, hipMemcpyDeviceToHost));
// //KERNEL_DEBUG("GPU0 D1ex:%d/D2ex:%d", managerRuntimeData.extFiringTableEndIdxD1[0], managerRuntimeData.extFiringTableEndIdxD2[0]);
//
// checkAndSetGPUDevice(1);
// CUDA_CHECK_ERRORS( hipMemcpyFromSymbol(managerRuntimeData.timeTableD2, timeTableD2GPU, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, hipMemcpyDeviceToHost));
// CUDA_CHECK_ERRORS( hipMemcpyFromSymbol(managerRuntimeData.timeTableD1, timeTableD1GPU, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, hipMemcpyDeviceToHost));
// firingTableIdxD2 = managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1];
// firingTableIdxD1 = managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1];
// //KERNEL_DEBUG("GPU1 D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2);
//
// for (int lGrpId = 0; lGrpId < networkConfigs[0].numGroups; lGrpId++) {
// if (groupConfigs[0][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD2[lGrpId] > 0) {
// CUDA_CHECK_ERRORS( hipMemcpyPeer(runtimeData[1].firingTableD2 + firingTableIdxD2, 1,
// managerRuntimeData.extFiringTableD2[lGrpId], 0,
// sizeof(int) * managerRuntimeData.extFiringTableEndIdxD2[lGrpId]));
//
// for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[1].begin(); grpIt != groupPartitionLists[1].end(); grpIt++) {
// if (grpIt->gGrpId == groupConfigs[0][lGrpId].gGrpId)
// GtoLOffset = grpIt->GtoLOffset;
// }
//
// kernel_convertExtSpikesD2<<<NUM_BLOCKS, NUM_THREADS>>>(firingTableIdxD2,
// firingTableIdxD2 + managerRuntimeData.extFiringTableEndIdxD2[lGrpId],
// GtoLOffset); // [StartIdx, EndIdx)
// firingTableIdxD2 += managerRuntimeData.extFiringTableEndIdxD2[lGrpId];
// }
//
// if (groupConfigs[0][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD1[lGrpId] > 0) {
// CUDA_CHECK_ERRORS( hipMemcpyPeer(runtimeData[1].firingTableD1 + firingTableIdxD1, 1,
// managerRuntimeData.extFiringTableD1[lGrpId], 0,
// sizeof(int) * managerRuntimeData.extFiringTableEndIdxD1[lGrpId]));
//
// for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[1].begin(); grpIt != groupPartitionLists[1].end(); grpIt++) {
// if (grpIt->gGrpId == groupConfigs[0][lGrpId].gGrpId)
// GtoLOffset = grpIt->GtoLOffset;
// }
//
// kernel_convertExtSpikesD1<<<NUM_BLOCKS, NUM_THREADS>>>(firingTableIdxD1,
// firingTableIdxD1 + managerRuntimeData.extFiringTableEndIdxD1[lGrpId],
// GtoLOffset); // [StartIdx, EndIdx)
// firingTableIdxD1 += managerRuntimeData.extFiringTableEndIdxD1[lGrpId];
//
// }
// //KERNEL_DEBUG("GPU1 New D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2);
// }
// managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD2;
// managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD1;
// CUDA_CHECK_ERRORS( hipMemcpyToSymbol(timeTableD2GPU, managerRuntimeData.timeTableD2, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, hipMemcpyHostToDevice));
// CUDA_CHECK_ERRORS( hipMemcpyToSymbol(timeTableD1GPU, managerRuntimeData.timeTableD1, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, hipMemcpyHostToDevice));
// }
//
// // GPU1 -> GPU0
// if (!groupPartitionLists[1].empty() && !groupPartitionLists[0].empty()) {
// checkAndSetGPUDevice(1);
// CUDA_CHECK_ERRORS( hipMemcpy(managerRuntimeData.extFiringTableEndIdxD2, runtimeData[1].extFiringTableEndIdxD2, sizeof(int) * networkConfigs[1].numGroups, hipMemcpyDeviceToHost));
// CUDA_CHECK_ERRORS( hipMemcpy(managerRuntimeData.extFiringTableEndIdxD1, runtimeData[1].extFiringTableEndIdxD1, sizeof(int) * networkConfigs[1].numGroups, hipMemcpyDeviceToHost));
// CUDA_CHECK_ERRORS( hipMemcpy(managerRuntimeData.extFiringTableD2, runtimeData[1].extFiringTableD2, sizeof(int*) * networkConfigs[1].numGroups, hipMemcpyDeviceToHost));
// CUDA_CHECK_ERRORS( hipMemcpy(managerRuntimeData.extFiringTableD1, runtimeData[1].extFiringTableD1, sizeof(int*) * networkConfigs[1].numGroups, hipMemcpyDeviceToHost));
// //KERNEL_DEBUG("GPU1 D1ex:%d/D2ex:%d", managerRuntimeData.extFiringTableEndIdxD1[0], managerRuntimeData.extFiringTableEndIdxD2[0]);
//
// checkAndSetGPUDevice(0);
// CUDA_CHECK_ERRORS( hipMemcpyFromSymbol(managerRuntimeData.timeTableD2, timeTableD2GPU, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, hipMemcpyDeviceToHost));
// CUDA_CHECK_ERRORS( hipMemcpyFromSymbol(managerRuntimeData.timeTableD1, timeTableD1GPU, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, hipMemcpyDeviceToHost));
// firingTableIdxD2 = managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1];
// firingTableIdxD1 = managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1];
// //KERNEL_DEBUG("GPU0 D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2);
//
// for (int lGrpId = 0; lGrpId < networkConfigs[1].numGroups; lGrpId++) {
// if (groupConfigs[1][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD2[lGrpId] > 0) {
// CUDA_CHECK_ERRORS( hipMemcpyPeer(runtimeData[0].firingTableD2 + firingTableIdxD2, 0,
// managerRuntimeData.extFiringTableD2[lGrpId], 1,
// sizeof(int) * managerRuntimeData.extFiringTableEndIdxD2[lGrpId]));
//
// for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[0].begin(); grpIt != groupPartitionLists[0].end(); grpIt++) {
// if (grpIt->gGrpId == groupConfigs[1][lGrpId].gGrpId)
// GtoLOffset = grpIt->GtoLOffset;
// }
//
// kernel_convertExtSpikesD2<<<NUM_BLOCKS, NUM_THREADS>>>(firingTableIdxD2,
// firingTableIdxD2 + managerRuntimeData.extFiringTableEndIdxD2[lGrpId],
// GtoLOffset); // [StartIdx, EndIdx)
// firingTableIdxD2 += managerRuntimeData.extFiringTableEndIdxD2[lGrpId];
// }
//
// if (groupConfigs[1][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD1[lGrpId] > 0) {
// CUDA_CHECK_ERRORS( hipMemcpyPeer(runtimeData[0].firingTableD1 + firingTableIdxD1, 0,
// managerRuntimeData.extFiringTableD1[lGrpId], 1,
// sizeof(int) * managerRuntimeData.extFiringTableEndIdxD1[lGrpId]));
//
// for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[0].begin(); grpIt != groupPartitionLists[0].end(); grpIt++) {
// if (grpIt->gGrpId == groupConfigs[1][lGrpId].gGrpId)
// GtoLOffset = grpIt->GtoLOffset;
// }
//
// kernel_convertExtSpikesD1<<<NUM_BLOCKS, NUM_THREADS>>>(firingTableIdxD1,
// firingTableIdxD1 + managerRuntimeData.extFiringTableEndIdxD1[lGrpId],
// GtoLOffset); // [StartIdx, EndIdx)
// firingTableIdxD1 += managerRuntimeData.extFiringTableEndIdxD1[lGrpId];
// }
// //KERNEL_DEBUG("GPU0 New D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2);
// }
// managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD2;
// managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD1;
// CUDA_CHECK_ERRORS( hipMemcpyToSymbol(timeTableD2GPU, managerRuntimeData.timeTableD2, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, hipMemcpyHostToDevice));
// CUDA_CHECK_ERRORS( hipMemcpyToSymbol(timeTableD1GPU, managerRuntimeData.timeTableD1, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, hipMemcpyHostToDevice));
// }
//
//
// for (std::list<RoutingTableEntry>::iterator rteItr = spikeRoutingTable.begin(); rteItr != spikeRoutingTable.end(); rteItr++) {
// int srcNetId = rteItr->srcNetId;
// int destNetId = rteItr->destNetId;
// assert(srcNetId < CPU_RUNTIME_BASE);
// assert(destNetId < CPU_RUNTIME_BASE);
// checkAndSetGPUDevice(srcNetId);
// CUDA_CHECK_ERRORS( hipMemcpy(managerRuntimeData.extFiringTableEndIdxD2, runtimeData[srcNetId].extFiringTableEndIdxD2, sizeof(int) * networkConfigs[srcNetId].numGroups, hipMemcpyDeviceToHost));
// CUDA_CHECK_ERRORS( hipMemcpy(managerRuntimeData.extFiringTableEndIdxD1, runtimeData[srcNetId].extFiringTableEndIdxD1, sizeof(int) * networkConfigs[srcNetId].numGroups, hipMemcpyDeviceToHost));
// CUDA_CHECK_ERRORS( hipMemcpy(managerRuntimeData.extFiringTableD2, runtimeData[srcNetId].extFiringTableD2, sizeof(int*) * networkConfigs[srcNetId].numGroups, hipMemcpyDeviceToHost));
// CUDA_CHECK_ERRORS( hipMemcpy(managerRuntimeData.extFiringTableD1, runtimeData[srcNetId].extFiringTableD1, sizeof(int*) * networkConfigs[srcNetId].numGroups, hipMemcpyDeviceToHost));
// //KERNEL_DEBUG("GPU0 D1ex:%d/D2ex:%d", managerRuntimeData.extFiringTableEndIdxD1[0], managerRuntimeData.extFiringTableEndIdxD2[0]);
//
// checkAndSetGPUDevice(destNetId);
// CUDA_CHECK_ERRORS( hipMemcpyFromSymbol(managerRuntimeData.timeTableD2, timeTableD2GPU, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, hipMemcpyDeviceToHost));
// CUDA_CHECK_ERRORS( hipMemcpyFromSymbol(managerRuntimeData.timeTableD1, timeTableD1GPU, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, hipMemcpyDeviceToHost));
// firingTableIdxD2 = managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1];
// firingTableIdxD1 = managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1];
// //KERNEL_DEBUG("GPU1 D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2);
//
// for (int lGrpId = 0; lGrpId < networkConfigs[srcNetId].numGroups; lGrpId++) {
// if (groupConfigs[srcNetId][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD2[lGrpId] > 0) {
// CUDA_CHECK_ERRORS( hipMemcpyPeer(runtimeData[destNetId].firingTableD2 + firingTableIdxD2, destNetId,
// managerRuntimeData.extFiringTableD2[lGrpId], srcNetId,
// sizeof(int) * managerRuntimeData.extFiringTableEndIdxD2[lGrpId]));
//
// for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[destNetId].begin(); grpIt != groupPartitionLists[destNetId].end(); grpIt++) {
// if (grpIt->gGrpId == groupConfigs[srcNetId][lGrpId].gGrpId)
// GtoLOffset = grpIt->GtoLOffset;
// }
//
// kernel_convertExtSpikesD2<<<NUM_BLOCKS, NUM_THREADS>>>(firingTableIdxD2,
// firingTableIdxD2 + managerRuntimeData.extFiringTableEndIdxD2[lGrpId],
// GtoLOffset); // [StartIdx, EndIdx)
// firingTableIdxD2 += managerRuntimeData.extFiringTableEndIdxD2[lGrpId];
// }
//
// if (groupConfigs[srcNetId][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD1[lGrpId] > 0) {
// CUDA_CHECK_ERRORS( hipMemcpyPeer(runtimeData[destNetId].firingTableD1 + firingTableIdxD1, destNetId,
// managerRuntimeData.extFiringTableD1[lGrpId], srcNetId,
// sizeof(int) * managerRuntimeData.extFiringTableEndIdxD1[lGrpId]));
//
// for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[destNetId].begin(); grpIt != groupPartitionLists[destNetId].end(); grpIt++) {
// if (grpIt->gGrpId == groupConfigs[srcNetId][lGrpId].gGrpId)
// GtoLOffset = grpIt->GtoLOffset;
// }
//
// kernel_convertExtSpikesD1<<<NUM_BLOCKS, NUM_THREADS>>>(firingTableIdxD1,
// firingTableIdxD1 + managerRuntimeData.extFiringTableEndIdxD1[lGrpId],
// GtoLOffset); // [StartIdx, EndIdx)
// firingTableIdxD1 += managerRuntimeData.extFiringTableEndIdxD1[lGrpId];
//
// }
// //KERNEL_DEBUG("GPU1 New D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2);
// }
// managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD2;
// managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD1;
// CUDA_CHECK_ERRORS( hipMemcpyToSymbol(timeTableD2GPU, managerRuntimeData.timeTableD2, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, hipMemcpyHostToDevice));
// CUDA_CHECK_ERRORS( hipMemcpyToSymbol(timeTableD1GPU, managerRuntimeData.timeTableD1, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, hipMemcpyHostToDevice));
// }
//}
/*!
* \brief This function is called every second by SNN::runNetwork(). It updates the firingTableD1(D2)GPU and
* timeTableD1(D2)GPU by removing older firing information.
*/
void SNN::shiftSpikeTables_F_GPU(int netId) {
assert(runtimeData[netId].memType == GPU_MEM);
checkAndSetGPUDevice(netId);
hipLaunchKernelGGL(( kernel_shiftFiringTable), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, );
}
void SNN::shiftSpikeTables_T_GPU(int netId) {
assert(runtimeData[netId].memType == GPU_MEM);
checkAndSetGPUDevice(netId);
hipLaunchKernelGGL(( kernel_shiftTimeTable), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, );
}
/*
* \brief Update syanptic weights every 10ms, 100ms, or 1000ms
*
*
*/
void SNN::updateWeights_GPU(int netId) {
assert(sim_in_testing == false);
assert(sim_with_fixedwts == false);
assert(runtimeData[netId].memType == GPU_MEM);
checkAndSetGPUDevice(netId);
hipLaunchKernelGGL(( kernel_updateWeights), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, );
}
//__global__ void gpu_resetFiringInformation() {
// if(threadIdx.x==0 && blockIdx.x==0) {
// for(int i = 0; i < ROUNDED_TIMING_COUNT; i++) {
// timeTableD2GPU[i] = 0;
// timeTableD1GPU[i] = 0;
// }
// spikeCountD2SecGPU=0;
// spikeCountD1SecGPU=0;
// secD2fireCntTest=0;
// secD1fireCntTest=0;
// spikeCountD2GPU=0;
// spikeCountD1GPU=0;
//
// //spikeCountAll1Sec=0;//assigned in fetchSpikeTables()
// }
//
//}
//
//void SNN::resetFiringInformation_GPU() {
// checkAndSetGPUDevice();
//
// gpu_resetFiringInformation<<<NUM_BLOCKS,NUM_THREADS>>>();
//}
/*!
* \brief this function allocates device (GPU) memory sapce and copies external current to it
*
* This function:
* (allocate and) copy extCurrent
*
* This funcion is called by copyNeuronState() and setExternalCurrent. Only host-to-divice copy is required
*
* \param[in] netId the id of a local network, which is the same as the device (GPU) id
* \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied
* \param[in] dest pointer to runtime data desitnation
* \param[in] allocateMem a flag indicates whether allocating memory space before copying
*
* \sa allocateSNN_GPU fetchSTPState
* \since v3.0
*/
void SNN::copyExternalCurrent(int netId, int lGrpId, RuntimeData* dest, hipMemcpyKind kind, bool allocateMem) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(dest, &managerRuntimeData, hipMemcpyHostToDevice, allocateMem, lGrpId, 0);// check that the destination pointer is properly allocated..
assert(kind == hipMemcpyHostToDevice);
int posN, lengthN;
if(lGrpId == ALL) {
posN = 0;
lengthN = networkConfigs[netId].numNReg;
} else {
assert(lGrpId >= 0);
posN = groupConfigs[netId][lGrpId].lStartN;
lengthN = groupConfigs[netId][lGrpId].numN;
}
assert(lengthN >= 0 && lengthN <= networkConfigs[netId].numNReg); // assert NOT poisson neurons
//KERNEL_DEBUG("copyExternalCurrent: lGrpId=%d, ptrPos=%d, length=%d, allocate=%s", lGrpId, posN, lengthN, allocateMem?"y":"n");
if(allocateMem)
CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->extCurrent, sizeof(float) * lengthN));
CUDA_CHECK_ERRORS(hipMemcpy(&(dest->extCurrent[posN]), &(managerRuntimeData.extCurrent[posN]), sizeof(float) * lengthN, hipMemcpyHostToDevice));
}
/*!
* \brief This function fetch the spike count in all local networks and sum the up
*/
void SNN::copyNetworkSpikeCount(int netId, hipMemcpyKind kind,
unsigned int* spikeCountD1, unsigned int* spikeCountD2,
unsigned int* spikeCountExtD1, unsigned int* spikeCountExtD2) {
checkAndSetGPUDevice(netId);
assert(kind == hipMemcpyDeviceToHost);
CUDA_CHECK_ERRORS(hipMemcpyFromSymbol(spikeCountExtD2, spikeCountExtRxD2GPU, sizeof(int), 0, hipMemcpyDeviceToHost));
CUDA_CHECK_ERRORS(hipMemcpyFromSymbol(spikeCountExtD1, spikeCountExtRxD1GPU, sizeof(int), 0, hipMemcpyDeviceToHost));
CUDA_CHECK_ERRORS(hipMemcpyFromSymbol(spikeCountD2, spikeCountD2GPU, sizeof(int), 0, hipMemcpyDeviceToHost));
CUDA_CHECK_ERRORS(hipMemcpyFromSymbol(spikeCountD1, spikeCountD1GPU, sizeof(int), 0, hipMemcpyDeviceToHost));
}
/*!
* \brief This function fetch spikeTables in the local network specified by netId
*
* \param[in] netId the id of local network of which timeTableD1(D2) and firingTableD1(D2) are copied to manager runtime data
*/
void SNN::copySpikeTables(int netId, hipMemcpyKind kind) {
unsigned int gpuSpikeCountD1Sec, gpuSpikeCountD2Sec, gpuSpikeCountLastSecLeftD2;
checkAndSetGPUDevice(netId);
assert(kind == hipMemcpyDeviceToHost);
CUDA_CHECK_ERRORS(hipMemcpyFromSymbol(&gpuSpikeCountLastSecLeftD2, spikeCountLastSecLeftD2GPU, sizeof(int), 0, hipMemcpyDeviceToHost));
CUDA_CHECK_ERRORS(hipMemcpyFromSymbol(&gpuSpikeCountD2Sec, spikeCountD2SecGPU, sizeof(int), 0, hipMemcpyDeviceToHost));
CUDA_CHECK_ERRORS(hipMemcpyFromSymbol(&gpuSpikeCountD1Sec, spikeCountD1SecGPU, sizeof(int), 0, hipMemcpyDeviceToHost));
CUDA_CHECK_ERRORS( hipMemcpy(managerRuntimeData.firingTableD2, runtimeData[netId].firingTableD2, sizeof(int)*(gpuSpikeCountD2Sec + gpuSpikeCountLastSecLeftD2), hipMemcpyDeviceToHost));
CUDA_CHECK_ERRORS( hipMemcpy(managerRuntimeData.firingTableD1, runtimeData[netId].firingTableD1, sizeof(int)*gpuSpikeCountD1Sec, hipMemcpyDeviceToHost));
CUDA_CHECK_ERRORS( hipMemcpyFromSymbol(managerRuntimeData.timeTableD2, timeTableD2GPU, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, hipMemcpyDeviceToHost));
CUDA_CHECK_ERRORS( hipMemcpyFromSymbol(managerRuntimeData.timeTableD1, timeTableD1GPU, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, hipMemcpyDeviceToHost));
}
void SNN::copyTimeTable(int netId, hipMemcpyKind kind) {
assert(netId < CPU_RUNTIME_BASE);
checkAndSetGPUDevice(netId);
if (kind == hipMemcpyDeviceToHost) {
CUDA_CHECK_ERRORS(hipMemcpyFromSymbol(managerRuntimeData.timeTableD2, timeTableD2GPU, sizeof(int)*(1000 + glbNetworkConfig.maxDelay + 1), 0, hipMemcpyDeviceToHost));
CUDA_CHECK_ERRORS(hipMemcpyFromSymbol(managerRuntimeData.timeTableD1, timeTableD1GPU, sizeof(int)*(1000 + glbNetworkConfig.maxDelay + 1), 0, hipMemcpyDeviceToHost));
} else { // kind == hipMemcpyHostToDevice
CUDA_CHECK_ERRORS(hipMemcpyToSymbol(timeTableD2GPU, managerRuntimeData.timeTableD2, sizeof(int)*(1000 + glbNetworkConfig.maxDelay + 1), 0, hipMemcpyHostToDevice));
CUDA_CHECK_ERRORS(hipMemcpyToSymbol(timeTableD1GPU, managerRuntimeData.timeTableD1, sizeof(int)*(1000 + glbNetworkConfig.maxDelay + 1), 0, hipMemcpyHostToDevice));
}
}
void SNN::copyExtFiringTable(int netId, hipMemcpyKind kind) {
assert(netId < CPU_RUNTIME_BASE);
checkAndSetGPUDevice(netId);
CUDA_CHECK_ERRORS(hipMemcpy(managerRuntimeData.extFiringTableEndIdxD2, runtimeData[netId].extFiringTableEndIdxD2, sizeof(int) * networkConfigs[netId].numGroups, kind));
CUDA_CHECK_ERRORS(hipMemcpy(managerRuntimeData.extFiringTableEndIdxD1, runtimeData[netId].extFiringTableEndIdxD1, sizeof(int) * networkConfigs[netId].numGroups, kind));
CUDA_CHECK_ERRORS(hipMemcpy(managerRuntimeData.extFiringTableD2, runtimeData[netId].extFiringTableD2, sizeof(int*) * networkConfigs[netId].numGroups, kind));
CUDA_CHECK_ERRORS(hipMemcpy(managerRuntimeData.extFiringTableD1, runtimeData[netId].extFiringTableD1, sizeof(int*) * networkConfigs[netId].numGroups, kind));
//KERNEL_DEBUG("GPU0 D1ex:%d/D2ex:%d", managerRuntimeData.extFiringTableEndIdxD1[0], managerRuntimeData.extFiringTableEndIdxD2[0]);
}
int SNN::configGPUDevice() {
int devCount, devMax;
hipDeviceProp_t deviceProp;
CUDA_CHECK_ERRORS(hipGetDeviceCount(&devCount));
KERNEL_INFO("CUDA devices Configuration:");
KERNEL_INFO(" - Number of CUDA devices = %9d", devCount);
devMax = CUDA_GET_MAXGFLOP_DEVICE_ID();
KERNEL_INFO(" - CUDA device ID with max GFLOPs = %9d", devMax);
for (int ithGPU = 0; ithGPU < devCount; ithGPU++) {
CUDA_CHECK_ERRORS(hipGetDeviceProperties(&deviceProp, ithGPU));
KERNEL_INFO(" + Use CUDA device[%1d] = %9s", ithGPU, deviceProp.name);
KERNEL_INFO(" + CUDA Compute Capability (CC) = %2d.%d", deviceProp.major, deviceProp.minor);
}
if (deviceProp.major < 2) {
// Unmark this when CC 1.3 is deprecated
//KERNEL_ERROR("CARLsim does not support CUDA devices older than CC 2.0");
//exitSimulation(1);
KERNEL_WARN("CUDA device with CC 1.3 will be deprecated in a future release");
}
for (int ithGPU = 0; ithGPU < devCount; ithGPU++) {
CUDA_CHECK_ERRORS(hipSetDevice(ithGPU));
CUDA_DEVICE_RESET();
}
if (devCount >= 2) { // try to setup P2P access if more than 2 GPUs are presented
// FIXME: generalize the initialization for mulit-GPUs up to 4 or 8
// enable P2P access
int canAccessPeer_0_1, canAccessPeer_1_0;
hipDeviceCanAccessPeer(&canAccessPeer_0_1, 0, 1);
hipDeviceCanAccessPeer(&canAccessPeer_1_0, 1, 0);
// enable peer access between GPU0 and GPU1
if (canAccessPeer_0_1 & canAccessPeer_1_0) {
hipSetDevice(0);
hipDeviceEnablePeerAccess(1, 0);
hipSetDevice(1);
hipDeviceEnablePeerAccess(0, 0);
KERNEL_INFO("* Peer Access is enabled");
} else {
KERNEL_INFO("* Peer Access is not enabled");
}
}
return devCount;
}
void SNN::convertExtSpikesD2_GPU(int netId, int startIdx, int endIdx, int GtoLOffset) {
checkAndSetGPUDevice(netId);
hipLaunchKernelGGL(( kernel_convertExtSpikesD2) , dim3(NUM_BLOCKS), dim3(NUM_THREADS) , 0, 0, startIdx, endIdx, GtoLOffset); // [StartIdx, EndIdx)
}
void SNN::convertExtSpikesD1_GPU(int netId, int startIdx, int endIdx, int GtoLOffset) {
checkAndSetGPUDevice(netId);
hipLaunchKernelGGL(( kernel_convertExtSpikesD1) , dim3(NUM_BLOCKS), dim3(NUM_THREADS) , 0, 0, startIdx, endIdx, GtoLOffset); // [StartIdx, EndIdx)
}
void SNN::checkAndSetGPUDevice(int netId) {
int currentDevice;
hipGetDevice(¤tDevice);
assert(netId >= 0 && netId < numAvailableGPUs);
if (currentDevice != netId) {
//KERNEL_DEBUG("Change GPU context from GPU %d to GPU %d", currentDevice, netId);
CUDA_CHECK_ERRORS(hipSetDevice(netId));
}
}
// deprecated
//void SNN::copyWeightsGPU(int nid, int src_grp) {
// checkAndSetGPUDevice("copyWeightsGPU");
//
// assert(nid < numNReg);
// unsigned int cumId = managerRuntimeData.cumulativePre[nid];
// float* synWts = &(managerRuntimeData.wt[cumId]);
// //TODO: NEEDED TO COMMENT THIS FOR CARLSIM 2.1-2.2 FILEMERGE -- KDC
// // assert(cumId >= (nid-numNPois));
// //assert(cumId < numPreSynapses*networkConfigs[0].numN);
//
// CUDA_CHECK_ERRORS( hipMemcpy( synWts, &runtimeData[0].wt[cumId], sizeof(float)*managerRuntimeData.Npre[nid], hipMemcpyDeviceToHost));
//}
// Allocates required memory and then initialize the GPU
void SNN::allocateSNN_GPU(int netId) {
checkAndSetGPUDevice(netId);
// setup memory type of GPU runtime data
runtimeData[netId].memType = GPU_MEM;
// display some memory management info
size_t avail, total, previous;
float toMB = ::pow(1024.0f, 2);
hipMemGetInfo(&avail,&total);
KERNEL_INFO("GPU Memory Management: (Total %2.3f MB)",(float)(total/toMB));
KERNEL_INFO("Data\t\t\tSize\t\tTotal Used\tTotal Available");
KERNEL_INFO("Init:\t\t\t%2.3f MB\t%2.3f MB\t%2.3f MB",(float)(total)/toMB,(float)((total-avail)/toMB),
(float)(avail/toMB));
previous=avail;
// allocate random number generator on GPU(s)
if(runtimeData[netId].gpuRandGen == NULL) {
hiprandCreateGenerator(&runtimeData[netId].gpuRandGen, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(runtimeData[netId].gpuRandGen, randSeed_ + netId);
}
// allocate SNN::runtimeData[0].randNum for random number generators
CUDA_CHECK_ERRORS(hipMalloc((void **)&runtimeData[netId].randNum, networkConfigs[netId].numNPois * sizeof(float)));
hipMemGetInfo(&avail,&total);
KERNEL_INFO("Random Gen:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB",(float)(previous-avail)/toMB, (float)((total-avail)/toMB),(float)(avail/toMB));
previous=avail;
// initialize runtimeData[0].neuronAllocation, __device__ loadBufferCount, loadBufferSize
allocateStaticLoad(netId, NUM_THREADS);
allocateGroupId(netId);
// this table is useful for quick evaluation of the position of fired neuron
// given a sequence of bits denoting the firing..
// initialize __device__ quickSynIdTableGPU[256]
initQuickSynIdTable(netId);
hipMemGetInfo(&avail,&total);
KERNEL_INFO("Static Load:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB",(float)(previous-avail)/toMB, (float)((total-avail)/toMB),(float)(avail/toMB));
previous=avail;
// initialize (copy from SNN) runtimeData[0].Npre, runtimeData[0].Npre_plastic, runtimeData[0].Npre_plasticInv, runtimeData[0].cumulativePre
// initialize (copy from SNN) runtimeData[0].cumulativePost, runtimeData[0].Npost, runtimeData[0].postDelayInfo
// initialize (copy from SNN) runtimeData[0].postSynapticIds, runtimeData[0].preSynapticIds
copyPreConnectionInfo(netId, ALL, &runtimeData[netId], &managerRuntimeData, hipMemcpyHostToDevice, true);
copyPostConnectionInfo(netId, ALL, &runtimeData[netId], &managerRuntimeData, hipMemcpyHostToDevice, true);
hipMemGetInfo(&avail,&total);
KERNEL_INFO("Conn Info:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB",(float)(previous-avail)/toMB,(float)((total-avail)/toMB), (float)(avail/toMB));
previous=avail;
// initialize (copy from SNN) runtimeData[0].wt, runtimeData[0].wtChange, runtimeData[0].maxSynWt
copySynapseState(netId, &runtimeData[netId], &managerRuntimeData, hipMemcpyHostToDevice, true);
hipMemGetInfo(&avail,&total);
KERNEL_INFO("Syn State:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB",(float)(previous-avail)/toMB,(float)((total-avail)/toMB), (float)(avail/toMB));
previous=avail;
// copy the neuron state information to the GPU..
// initialize (copy from managerRuntimeData) runtimeData[0].recovery, runtimeData[0].voltage, runtimeData[0].current
// initialize (copy from managerRuntimeData) runtimeData[0].gGABAa, runtimeData[0].gGABAb, runtimeData[0].gAMPA, runtimeData[0].gNMDA
// initialize (copy from SNN) runtimeData[0].Izh_a, runtimeData[0].Izh_b, runtimeData[0].Izh_c, runtimeData[0].Izh_d
// initialize (copy form SNN) runtimeData[0].baseFiring, runtimeData[0].baseFiringInv
copyNeuronState(netId, ALL, &runtimeData[netId], hipMemcpyHostToDevice, true);
// copy STP state, considered as neuron state
if (sim_with_stp) {
// initialize (copy from SNN) stpu, stpx
copySTPState(netId, ALL, &runtimeData[netId], &managerRuntimeData, hipMemcpyHostToDevice, true);
}
hipMemGetInfo(&avail,&total);
KERNEL_INFO("Neuron State:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB",(float)(previous-avail)/toMB,(float)((total-avail)/toMB), (float)(avail/toMB));
previous=avail;
// initialize (copy from SNN) runtimeData[0].grpDA(5HT,ACh,NE)
// initialize (copy from SNN) runtimeData[0].grpDA(5HT,ACh,NE)Buffer[]
copyGroupState(netId, ALL, &runtimeData[netId], &managerRuntimeData, hipMemcpyHostToDevice, true);
hipMemGetInfo(&avail,&total);
KERNEL_INFO("Group State:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB",(float)(previous-avail)/toMB,(float)((total-avail)/toMB), (float)(avail/toMB));
previous=avail;
// initialize (hipMemset) runtimeData[0].I_set, runtimeData[0].poissonFireRate
// initialize (copy from SNN) runtimeData[0].firingTableD1, runtimeData[0].firingTableD2
// initialize (hipMalloc) runtimeData[0].spikeGenBits
// initialize (copy from managerRuntimeData) runtimeData[0].nSpikeCnt,
// initialize (copy from SNN) runtimeData[0].synSpikeTime, runtimeData[0].lastSpikeTime
copyAuxiliaryData(netId, ALL, &runtimeData[netId], hipMemcpyHostToDevice, true);
hipMemGetInfo(&avail,&total);
KERNEL_INFO("Auxiliary Data:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB\n\n",(float)(previous-avail)/toMB,(float)((total-avail)/toMB), (float)(avail/toMB));
previous=avail;
// copy relevant pointers and network information to GPU
CUDA_CHECK_ERRORS(hipMemcpyToSymbol(runtimeDataGPU, &runtimeData[netId], sizeof(RuntimeData), 0, hipMemcpyHostToDevice));
// copy data to from SNN:: to NetworkConfigRT SNN::networkConfigs[0]
copyNetworkConfig(netId); // FIXME: we can change the group properties such as STDP as the network is running. So, we need a way to updating the GPU when changes are made.
// TODO: move mulSynFast, mulSynSlow to ConnectConfig structure
// copy connection configs
CUDA_CHECK_ERRORS(hipMemcpyToSymbol(d_mulSynFast, mulSynFast, sizeof(float) * networkConfigs[netId].numConnections, 0, hipMemcpyHostToDevice));
CUDA_CHECK_ERRORS(hipMemcpyToSymbol(d_mulSynSlow, mulSynSlow, sizeof(float) * networkConfigs[netId].numConnections, 0, hipMemcpyHostToDevice));
copyGroupConfigs(netId);
KERNEL_DEBUG("Transfering group settings to GPU:");
for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroupsAssigned; lGrpId++) {
KERNEL_DEBUG("Settings for Group %s:", groupConfigMap[groupConfigs[netId][lGrpId].gGrpId].grpName.c_str());
KERNEL_DEBUG("\tType: %d",(int)groupConfigs[netId][lGrpId].Type);
KERNEL_DEBUG("\tNumN: %d",groupConfigs[netId][lGrpId].numN);
KERNEL_DEBUG("\tM: %d",groupConfigs[netId][lGrpId].numPostSynapses);
KERNEL_DEBUG("\tPreM: %d",groupConfigs[netId][lGrpId].numPreSynapses);
KERNEL_DEBUG("\tspikeGenerator: %d",(int)groupConfigs[netId][lGrpId].isSpikeGenerator);
KERNEL_DEBUG("\tFixedInputWts: %d",(int)groupConfigs[netId][lGrpId].FixedInputWts);
KERNEL_DEBUG("\tMaxDelay: %d",(int)groupConfigs[netId][lGrpId].MaxDelay);
KERNEL_DEBUG("\tWithSTDP: %d",(int)groupConfigs[netId][lGrpId].WithSTDP);
if (groupConfigs[netId][lGrpId].WithSTDP) {
KERNEL_DEBUG("\t\tE-STDP type: %s",stdpType_string[groupConfigs[netId][lGrpId].WithESTDPtype]);
KERNEL_DEBUG("\t\tTAU_PLUS_INV_EXC: %f",groupConfigs[netId][lGrpId].TAU_PLUS_INV_EXC);
KERNEL_DEBUG("\t\tTAU_MINUS_INV_EXC: %f",groupConfigs[netId][lGrpId].TAU_MINUS_INV_EXC);
KERNEL_DEBUG("\t\tALPHA_PLUS_EXC: %f",groupConfigs[netId][lGrpId].ALPHA_PLUS_EXC);
KERNEL_DEBUG("\t\tALPHA_MINUS_EXC: %f",groupConfigs[netId][lGrpId].ALPHA_MINUS_EXC);
KERNEL_DEBUG("\t\tI-STDP type: %s",stdpType_string[groupConfigs[netId][lGrpId].WithISTDPtype]);
KERNEL_DEBUG("\t\tTAU_PLUS_INV_INB: %f",groupConfigs[netId][lGrpId].TAU_PLUS_INV_INB);
KERNEL_DEBUG("\t\tTAU_MINUS_INV_INB: %f",groupConfigs[netId][lGrpId].TAU_MINUS_INV_INB);
KERNEL_DEBUG("\t\tALPHA_PLUS_INB: %f",groupConfigs[netId][lGrpId].ALPHA_PLUS_INB);
KERNEL_DEBUG("\t\tALPHA_MINUS_INB: %f",groupConfigs[netId][lGrpId].ALPHA_MINUS_INB);
KERNEL_DEBUG("\t\tLAMBDA: %f",groupConfigs[netId][lGrpId].LAMBDA);
KERNEL_DEBUG("\t\tDELTA: %f",groupConfigs[netId][lGrpId].DELTA);
KERNEL_DEBUG("\t\tBETA_LTP: %f",groupConfigs[netId][lGrpId].BETA_LTP);
KERNEL_DEBUG("\t\tBETA_LTD: %f",groupConfigs[netId][lGrpId].BETA_LTD);
}
KERNEL_DEBUG("\tWithSTP: %d",(int)groupConfigs[netId][lGrpId].WithSTP);
if (groupConfigs[netId][lGrpId].WithSTP) {
KERNEL_DEBUG("\t\tSTP_U: %f",groupConfigs[netId][lGrpId].STP_U);
// KERNEL_DEBUG("\t\tSTP_tD: %f",groupConfigs[netId][lGrpId].STP_tD);
// KERNEL_DEBUG("\t\tSTP_tF: %f",groupConfigs[netId][lGrpId].STP_tF);
}
KERNEL_DEBUG("\tspikeGen: %s", groupConfigs[netId][lGrpId].isSpikeGenFunc? "is Set" : "is not set ");
}
// allocation of gpu runtime data is done
runtimeData[netId].allocated = true;
// map the timing table to texture.. saves a lot of headache in using shared memory
void* devPtr;
size_t offset;
CUDA_CHECK_ERRORS(hipGetSymbolAddress(&devPtr, timeTableD2GPU));
CUDA_CHECK_ERRORS(hipBindTexture(&offset, timeTableD2GPU_tex, devPtr, sizeof(int) * TIMING_COUNT));
offset = offset / sizeof(int);
CUDA_CHECK_ERRORS(hipGetSymbolAddress(&devPtr, timeTableD2GPU_tex_offset));
CUDA_CHECK_ERRORS(hipMemcpy(devPtr, &offset, sizeof(int), hipMemcpyHostToDevice));
CUDA_CHECK_ERRORS(hipGetSymbolAddress(&devPtr, timeTableD1GPU));
CUDA_CHECK_ERRORS(hipBindTexture(&offset, timeTableD1GPU_tex, devPtr, sizeof(int) * TIMING_COUNT));
offset = offset / sizeof(int);
CUDA_CHECK_ERRORS(hipGetSymbolAddress(&devPtr, timeTableD1GPU_tex_offset));
CUDA_CHECK_ERRORS(hipMemcpy(devPtr, &offset, sizeof(int), hipMemcpyHostToDevice));
initGPU(netId);
}
| b0a556172cf0b78d1bb9c0809647ba234bbca721.cu | /* * Copyright (c) 2016 Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. The names of its contributors may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* *********************************************************************************************** *
* CARLsim
* created by: (MDR) Micah Richert, (JN) Jayram M. Nageswaran
* maintained by:
* (MA) Mike Avery <averym@uci.edu>
* (MB) Michael Beyeler <mbeyeler@uci.edu>,
* (KDC) Kristofor Carlson <kdcarlso@uci.edu>
* (TSC) Ting-Shuo Chou <tingshuc@uci.edu>
* (HK) Hirak J Kashyap <kashyaph@uci.edu>
*
* CARLsim v1.0: JM, MDR
* CARLsim v2.0/v2.1/v2.2: JM, MDR, MA, MB, KDC
* CARLsim3: MB, KDC, TSC
* CARLsim4: TSC, HK
*
* CARLsim available from http://socsci.uci.edu/~jkrichma/CARLsim/
* Ver 12/31/2016
*/
#include <snn.h>
#include <spike_buffer.h>
#include <error_code.h>
#include <cuda_runtime.h>
#define NUM_THREADS 128
#define NUM_BLOCKS 64
#define WARP_SIZE 32
///////////////////////////////////////////////////////////////////
// Some important ideas that explains the GPU execution are as follows:
// 1. Each GPU block has a local firing table (called fireTable). The block of threads
// reads a bunch of neurons parameters and determines if it needs to fire or not
// Whenever a neuron need to fire, it keeps track of the fired neuron in the local
// table. When the table is full, we go and write back the fireTable to the global
// firing table.
// 2. Firing information is maintained in two tables globally (timingTable and the globalFiringTable)
// for excitatory neuron population and inhibitory neurons.
// The globalFiringTable only stores a sequence of id corresponding to fired neurons.
// The timingTable store the total number of fired neurons till the current time step t.
// These two tables are flushed and adjusted every second.
// This approach requires about half of the memory compared to the traditional AER scheme which
// stores the firing time and firing id together.
// For more details kindly read the enclosed report (report.pdf) in the source directory
//
//
// timeTableD2GPU[0] always is 0 -- index into firingTableD2
// timeTableD2GPU[maxDelay_] -- should be the number of spikes "leftover" from the previous second
// timeTableD2GPU[maxDelay_+1]-timeTableD2GPU[maxDelay_] -- should be the number of spikes in the first ms of the current second
// timeTableD2GPU[1000+maxDelay_] -- should be the number of spikes in the current second + the leftover spikes.
//
///////////////////////////////////////////////////////////////////
__device__ unsigned int timeTableD2GPU[TIMING_COUNT];
__device__ unsigned int timeTableD1GPU[TIMING_COUNT];
__device__ unsigned int spikeCountD2SecGPU;
__device__ unsigned int spikeCountD1SecGPU;
__device__ unsigned int spikeCountD2GPU;
__device__ unsigned int spikeCountD1GPU;
__device__ unsigned int secD2fireCntTest;
__device__ unsigned int secD1fireCntTest;
__device__ unsigned int spikeCountLastSecLeftD2GPU;
__device__ unsigned int spikeCountExtRxD1SecGPU;
__device__ unsigned int spikeCountExtRxD2SecGPU;
__device__ unsigned int spikeCountExtRxD2GPU;
__device__ unsigned int spikeCountExtRxD1GPU;
__device__ __constant__ RuntimeData runtimeDataGPU;
__device__ __constant__ NetworkConfigRT networkConfigGPU;
__device__ __constant__ GroupConfigRT groupConfigsGPU[MAX_GRP_PER_SNN];
__device__ __constant__ float d_mulSynFast[MAX_CONN_PER_SNN];
__device__ __constant__ float d_mulSynSlow[MAX_CONN_PER_SNN];
__device__ int loadBufferCount;
__device__ int loadBufferSize;
texture <int, 1, cudaReadModeElementType> timeTableD2GPU_tex;
texture <int, 1, cudaReadModeElementType> timeTableD1GPU_tex;
texture <int, 1, cudaReadModeElementType> groupIdInfo_tex; // groupIDInfo is allocated using cudaMalloc thus doesn't require an offset when using textures
__device__ int timeTableD1GPU_tex_offset;
__device__ int timeTableD2GPU_tex_offset;
// example of the quick synaptic table
// index cnt
// 0000000 - 0
// 0000001 - 0
// 0000010 - 1
// 0100000 - 5
// 0110000 - 4
int quickSynIdTable[256];
__device__ int quickSynIdTableGPU[256];
void initQuickSynIdTable(int netId) {
void* devPtr;
for(int i = 1; i < 256; i++) {
int cnt = 0;
while(i) {
if(((i >> cnt) & 1) == 1) break;
cnt++;
assert(cnt <= 7);
}
quickSynIdTable[i] = cnt;
}
cudaSetDevice(netId);
cudaGetSymbolAddress(&devPtr, quickSynIdTableGPU);
CUDA_CHECK_ERRORS(cudaMemcpy( devPtr, quickSynIdTable, sizeof(quickSynIdTable), cudaMemcpyHostToDevice));
}
__device__ inline bool isPoissonGroup(short int lGrpId) {
return (groupConfigsGPU[lGrpId].Type & POISSON_NEURON);
}
__device__ inline void setFiringBitSynapses(int lNId, int synId) {
unsigned int* tmp_I_set_p = ((unsigned int*)((char*)runtimeDataGPU.I_set + ((synId >> 5) * networkConfigGPU.I_setPitch)) + lNId);
atomicOr(tmp_I_set_p, 1 << (synId % 32));
}
__device__ inline unsigned int* getFiringBitGroupPtr(int lNId, int synId) {
return (((unsigned int*)((char*)runtimeDataGPU.I_set + synId * networkConfigGPU.I_setPitch)) + lNId);
}
__device__ inline int getSTPBufPos(int lNId, int simTime) {
return (((simTime + 1) % (networkConfigGPU.maxDelay + 1)) * networkConfigGPU.STP_Pitch + lNId);
}
__device__ inline int2 getStaticThreadLoad(int bufPos) {
return (runtimeDataGPU.neuronAllocation[bufPos]);
}
__device__ inline bool getPoissonSpike(int lNId) {
// Random number value is less than the poisson firing probability
// if poisson firing probability is say 1.0 then the random poisson ptr
// will always be less than 1.0 and hence it will continiously fire
return runtimeDataGPU.randNum[lNId - networkConfigGPU.numNReg] * 1000.0f
< runtimeDataGPU.poissonFireRate[lNId - networkConfigGPU.numNReg];
}
__device__ inline bool getSpikeGenBit(unsigned int nidPos) {
const int nidBitPos = nidPos % 32;
const int nidIndex = nidPos / 32;
return ((runtimeDataGPU.spikeGenBits[nidIndex] >> nidBitPos) & 0x1);
}
/*!
* \brief This device function updates the average firing rate of each neuron, which is required for homeostasis
*
* \param[in] lNId The neuron id to be updated
* \param[in] lGrpId The group id of the neuron
*/
__device__ inline void updateHomeoStaticState(int lNId, int lGrpId) {
// here the homeostasis adjustment
runtimeDataGPU.avgFiring[lNId] *= (groupConfigsGPU[lGrpId].avgTimeScale_decay);
}
/*!
* \brief After every time step we update the time table
*
* Only one cuda thread is required for updating the time table
*
* \param[in] simTime The current time step
*/
__global__ void kernel_updateTimeTable(int simTime) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
timeTableD2GPU[simTime + networkConfigGPU.maxDelay + 1] = spikeCountD2SecGPU + spikeCountLastSecLeftD2GPU;
timeTableD1GPU[simTime + networkConfigGPU.maxDelay + 1] = spikeCountD1SecGPU;
}
__syncthreads();
}
/////////////////////////////////////////////////////////////////////////////////
// Device Kernel Function: Intialization of the GPU side of the simulator ///
// KERNEL: This kernel is called after initialization of various parameters ///
// so that we can reset all required parameters. ///
/////////////////////////////////////////////////////////////////////////////////
__global__ void kernel_initGPUMemory() {
// FIXME: use parallel access
int timeTableIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (timeTableIdx < TIMING_COUNT) {
timeTableD2GPU[timeTableIdx] = 0;
timeTableD1GPU[timeTableIdx] = 0;
}
if (threadIdx.x == 0 && blockIdx.x == 0) {
spikeCountD2SecGPU = 0;
spikeCountD1SecGPU = 0;
spikeCountD2GPU = 0;
spikeCountD1GPU = 0;
secD2fireCntTest = 0;
secD1fireCntTest = 0;
spikeCountLastSecLeftD2GPU = 0;
spikeCountExtRxD2GPU = 0;
spikeCountExtRxD1GPU = 0;
spikeCountExtRxD2SecGPU = 0;
spikeCountExtRxD1SecGPU = 0;
}
}
// Allocation of the group and its id..
void SNN::allocateGroupId(int netId) {
checkAndSetGPUDevice(netId);
assert (runtimeData[netId].groupIdInfo == NULL);
int3* tempNeuronAllocation = (int3*)malloc(sizeof(int3) * networkConfigs[netId].numGroups);
for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
int3 threadLoad;
threadLoad.x = groupConfigs[netId][lGrpId].lStartN;
threadLoad.y = groupConfigs[netId][lGrpId].lEndN;
threadLoad.z = lGrpId;
tempNeuronAllocation[lGrpId] = threadLoad;
}
CUDA_CHECK_ERRORS(cudaMalloc((void**)&runtimeData[netId].groupIdInfo, sizeof(int3) * networkConfigs[netId].numGroups));
CUDA_CHECK_ERRORS(cudaMemcpy(runtimeData[netId].groupIdInfo, tempNeuronAllocation, sizeof(int3) * networkConfigs[netId].numGroups, cudaMemcpyHostToDevice));
CUDA_CHECK_ERRORS(cudaBindTexture(NULL, groupIdInfo_tex, runtimeData[netId].groupIdInfo, sizeof(int3) * networkConfigs[netId].numGroups));
free(tempNeuronAllocation);
}
/************************ VARIOUS KERNELS FOR FIRING CALCULATION AND FIRING UPDATE ****************************/
// Static Thread Load Allocation...
// This function is necessary for static allocation of load that each CUDA-SM needs for its computation.
// We store the static load allocation using the following format
// Neuron starting position (32 bit): Group identification (16) : Buffer size (16 bit)
// if we have 3 groups. grp(1) = 400 neurons, grp(2) = 100, grp(3) = 600
// The allocated static table will look as follows..
//-------------------------
// start | grp | size
//-------------------------
// 0 : 0 : 256
// 256 : 0 : 144
// 400 : 1 : 100
// 500 : 2 : 256
// 756 : 2 : 256
// 1012 : 2 : 88
//-----------------------
int SNN::allocateStaticLoad(int netId, int bufSize) {
checkAndSetGPUDevice(netId);
// only one thread does the static load table
int bufferCnt = 0;
for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
int grpBufCnt = (int) ceil(1.0f * groupConfigs[netId][lGrpId].numN / bufSize);
assert(grpBufCnt >= 0);
bufferCnt += grpBufCnt;
KERNEL_DEBUG("Grp Size = %d, Total Buffer Cnt = %d, Buffer Cnt = %d", groupConfigs[netId][lGrpId].numN, bufferCnt, grpBufCnt);
}
assert(bufferCnt > 0);
int2* tempNeuronAllocation = (int2*)malloc(sizeof(int2) * bufferCnt);
KERNEL_DEBUG("STATIC THREAD ALLOCATION");
KERNEL_DEBUG("------------------------");
KERNEL_DEBUG("Buffer Size = %d, Buffer Count = %d", bufSize, bufferCnt);
bufferCnt = 0;
for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId += bufSize) {
int2 threadLoad;
// starting neuron id is saved...
threadLoad.x = lNId;
if ((lNId + bufSize - 1) <= groupConfigs[netId][lGrpId].lEndN)
// grpID + full size
threadLoad.y = (lGrpId + (bufSize << 16)); // can't support group id > 2^16
else
// grpID + left-over size
threadLoad.y = (lGrpId + ((groupConfigs[netId][lGrpId].lEndN - lNId + 1) << 16)); // can't support group id > 2^16
// fill the static load distribution here...
int testGrpId = STATIC_LOAD_GROUP(threadLoad);
tempNeuronAllocation[bufferCnt] = threadLoad;
KERNEL_DEBUG("%d. Start=%d, size=%d grpId=%d:%s (SpikeMonId=%d) (GroupMonId=%d)",
bufferCnt, STATIC_LOAD_START(threadLoad),
STATIC_LOAD_SIZE(threadLoad),
STATIC_LOAD_GROUP(threadLoad),
groupConfigMap[groupConfigs[netId][testGrpId].gGrpId].grpName.c_str(),
groupConfigMDMap[groupConfigs[netId][testGrpId].gGrpId].spikeMonitorId,
groupConfigMDMap[groupConfigs[netId][testGrpId].gGrpId].groupMonitorId);
bufferCnt++;
}
}
assert(runtimeData[netId].allocated == false);
// Finally writeback the total bufferCnt
// Note down the buffer size for reference
KERNEL_DEBUG("GPU loadBufferSize = %d, GPU loadBufferCount = %d", bufSize, bufferCnt);
CUDA_CHECK_ERRORS(cudaMemcpyToSymbol(loadBufferCount, &bufferCnt, sizeof(int), 0, cudaMemcpyHostToDevice));
CUDA_CHECK_ERRORS(cudaMemcpyToSymbol(loadBufferSize, &bufSize, sizeof(int), 0, cudaMemcpyHostToDevice));
CUDA_CHECK_ERRORS(cudaMalloc((void**) &runtimeData[netId].neuronAllocation, sizeof(int2) * bufferCnt));
CUDA_CHECK_ERRORS(cudaMemcpy(runtimeData[netId].neuronAllocation, tempNeuronAllocation, sizeof(int2) * bufferCnt, cudaMemcpyHostToDevice));
free(tempNeuronAllocation);
return bufferCnt;
}
//////////////////////////////////////////////////
// 1. KERNELS used when a specific neuron fires //
//////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////
// Device local function: Update the STP Variables ///
// update the STPU and STPX variable after firing ///
/////////////////////////////////////////////////////////////////////////////////
// update the spike-dependent part of du/dt and dx/dt
__device__ void firingUpdateSTP (int nid, int simTime, short int grpId) {
// we need to retrieve the STP values from the right buffer position (right before vs. right after the spike)
int ind_plus = getSTPBufPos(nid, simTime);
int ind_minus = getSTPBufPos(nid, (simTime - 1));
// at this point, stpu[ind_plus] has already been assigned, and the decay applied
// so add the spike-dependent part to that
// du/dt = -u/tau_F + U * (1-u^-) * \delta(t-t_{spk})
runtimeDataGPU.stpu[ind_plus] += groupConfigsGPU[grpId].STP_U * (1.0f - runtimeDataGPU.stpu[ind_minus]);
// dx/dt = (1-x)/tau_D - u^+ * x^- * \delta(t-t_{spk})
runtimeDataGPU.stpx[ind_plus] -= runtimeDataGPU.stpu[ind_plus] * runtimeDataGPU.stpx[ind_minus];
}
__device__ void resetFiredNeuron(int lNId, short int lGrpId, int simTime) {
// \FIXME \TODO: convert this to use coalesced access by grouping into a
// single 16 byte access. This might improve bandwidth performance
// This is fully uncoalsced access...need to convert to coalsced access..
runtimeDataGPU.voltage[lNId] = runtimeDataGPU.Izh_c[lNId];
runtimeDataGPU.recovery[lNId] += runtimeDataGPU.Izh_d[lNId];
if (groupConfigsGPU[lGrpId].WithSTDP)
runtimeDataGPU.lastSpikeTime[lNId] = simTime;
if (networkConfigGPU.sim_with_homeostasis) {
// with homeostasis flag can be used here.
runtimeDataGPU.avgFiring[lNId] += 1000/(groupConfigsGPU[lGrpId].avgTimeScale*1000);
}
}
/*!
* \brief 1. Copy neuron id from local table to global firing table. 2. Reset all neuron properties of neuron id in local table
*
*
* \param[in] fireTablePtr the local shared memory firing table with neuron ids of fired neuron
* \param[in] fireCntD2 the number of neurons in local table that has fired with group's max delay == 1
* \param[in] fireCntD1 the number of neurons in local table that has fired with group's max delay > 1
* \param[in] simTime the current time step, stored as neuron firing time entry
*/
__device__ void updateSpikeCount(volatile unsigned int& fireCnt, volatile unsigned int& fireCntD1, volatile unsigned int& cntD2, volatile unsigned int& cntD1, volatile int& blkErrCode) {
int fireCntD2 = fireCnt - fireCntD1;
cntD2 = atomicAdd(&secD2fireCntTest, fireCntD2);
cntD1 = atomicAdd(&secD1fireCntTest, fireCntD1);
//check for overflow in the firing table size....
if(secD2fireCntTest>networkConfigGPU.maxSpikesD2) {
blkErrCode = NEW_FIRE_UPDATE_OVERFLOW_ERROR2;
return;
}
else if(secD1fireCntTest>networkConfigGPU.maxSpikesD1) {
blkErrCode = NEW_FIRE_UPDATE_OVERFLOW_ERROR1;
return;
}
blkErrCode = 0;
// get a distinct counter to store firing info
// into the firing table
cntD2 = atomicAdd(&spikeCountD2SecGPU, fireCntD2) + spikeCountLastSecLeftD2GPU;
cntD1 = atomicAdd(&spikeCountD1SecGPU, fireCntD1);
}
// update the firing table...
__device__ void updateFiringTable(int lNId, short int lGrpId, volatile unsigned int& cntD2, volatile unsigned int& cntD1) {
int pos;
if (groupConfigsGPU[lGrpId].MaxDelay == 1) {
// this group has a delay of only 1
pos = atomicAdd((int*)&cntD1, 1);
//runtimeDataGPU.firingTableD1[pos] = SET_FIRING_TABLE(nid, grpId);
runtimeDataGPU.firingTableD1[pos] = lNId;
} else {
// all other groups is dumped here
pos = atomicAdd((int*)&cntD2, 1);
//runtimeDataGPU.firingTableD2[pos] = SET_FIRING_TABLE(nid, grpId);
runtimeDataGPU.firingTableD2[pos] = lNId;
}
}
// update the firing table...
__device__ void updateExtFiringTable(int lNId, short int lGrpId) {
int pos;
if (groupConfigsGPU[lGrpId].MaxDelay == 1) {
// this group has a delay of only 1
pos = atomicAdd((int*)&runtimeDataGPU.extFiringTableEndIdxD1[lGrpId] , 1);
//runtimeDataGPU.firingTableD1[pos] = SET_FIRING_TABLE(nid, grpId);
runtimeDataGPU.extFiringTableD1[lGrpId][pos] = lNId + groupConfigsGPU[lGrpId].LtoGOffset; // convert to global neuron id
} else {
// all other groups is dumped here
pos = atomicAdd((int*)&runtimeDataGPU.extFiringTableEndIdxD2[lGrpId], 1);
//runtimeDataGPU.firingTableD2[pos] = SET_FIRING_TABLE(nid, grpId);
runtimeDataGPU.extFiringTableD2[lGrpId][pos] = lNId + groupConfigsGPU[lGrpId].LtoGOffset; // convert to global neuron id
}
}
__device__ int updateNewFirings(int* fireTablePtr, short int* fireGrpId,
volatile unsigned int& fireCnt, volatile unsigned int& fireCntD1, int simTime) {
__shared__ volatile unsigned int cntD2;
__shared__ volatile unsigned int cntD1;
__shared__ volatile int blkErrCode;
blkErrCode = 0;
if (threadIdx.x == 0) {
updateSpikeCount(fireCnt, fireCntD1, cntD2, cntD1, blkErrCode);
}
__syncthreads();
// if we overflow the spike buffer space that is available,
// then we return with an error here...
if (blkErrCode)
return blkErrCode;
for (int i = threadIdx.x; i < fireCnt; i += blockDim.x) {
// Read the firing id from the local table.....
int lNId = fireTablePtr[i];
updateFiringTable(lNId, fireGrpId[i], cntD2, cntD1);
if (groupConfigsGPU[fireGrpId[i]].hasExternalConnect)
updateExtFiringTable(lNId, fireGrpId[i]);
if (groupConfigsGPU[fireGrpId[i]].WithSTP)
firingUpdateSTP(lNId, simTime, fireGrpId[i]);
// keep track of number spikes per neuron
runtimeDataGPU.nSpikeCnt[lNId]++;
// only neurons would do the remaining settings...
// pure poisson generators will return without changing anything else..
if (IS_REGULAR_NEURON(lNId, networkConfigGPU.numNReg, networkConfigGPU.numNPois))
resetFiredNeuron(lNId, fireGrpId[i], simTime);
}
__syncthreads();
return 0;
}
// zero GPU spike counts
__global__ void kernel_resetNSpikeCnt(int lGrpId) {
const int totBuffers = loadBufferCount;
for (int bufPos = blockIdx.x; bufPos < totBuffers; bufPos += gridDim.x) {
// KILLME !!! This can be further optimized ....
// instead of reading each neuron group separately .....
// read a whole buffer and use the result ......
int2 threadLoad = getStaticThreadLoad(bufPos);
int nid = (STATIC_LOAD_START(threadLoad) + threadIdx.x);
int lastId = STATIC_LOAD_SIZE(threadLoad);
int grpId = STATIC_LOAD_GROUP(threadLoad);
if ((lGrpId == ALL || lGrpId == grpId) && (nid <= lastId)) {
runtimeDataGPU.nSpikeCnt[nid] = 0;
}
}
}
// wrapper to call resetSpikeCnt
void SNN::resetSpikeCnt_GPU(int netId, int lGrpId) {
assert(runtimeData[netId].memType == GPU_MEM);
if (lGrpId == ALL) {
checkAndSetGPUDevice(netId);
CUDA_CHECK_ERRORS(cudaMemset((void*)runtimeData[netId].nSpikeCnt, 0, sizeof(int) * networkConfigs[netId].numN));
} else {
checkAndSetGPUDevice(netId);
kernel_resetNSpikeCnt<<<NUM_BLOCKS, NUM_THREADS>>>(lGrpId);
}
}
#define LTP_GROUPING_SZ 16 //!< synaptic grouping for LTP Calculation
/*!
* \brief Computes the STDP update values for each of fired neurons stored in the local firing table.
*
* \param[in] fireTablePtr the local firing table with neuron ids of fired neuron
* \param[in] fireCnt the number of fired neurons in local firing table
* \param[in] simTime the current time step, stored as neuron firing time entry
*/
__device__ void updateLTP(int* fireTablePtr, short int* fireGrpId, volatile unsigned int& fireCnt, int simTime) {
for(int pos=threadIdx.x/LTP_GROUPING_SZ; pos < fireCnt; pos += (blockDim.x/LTP_GROUPING_SZ)) {
// each neuron has two variable pre and pre_exc
// pre: number of pre-neuron
// pre_exc: number of neuron had has plastic connections
short int grpId = fireGrpId[pos];
// STDP calculation: the post-synaptic neron fires after the arrival of pre-synaptic neuron's spike
if (groupConfigsGPU[grpId].WithSTDP) { // MDR, FIXME this probably will cause more thread divergence than need be...
int nid = fireTablePtr[pos];
unsigned int end_p = runtimeDataGPU.cumulativePre[nid] + runtimeDataGPU.Npre_plastic[nid];
for(unsigned int p = runtimeDataGPU.cumulativePre[nid] + threadIdx.x % LTP_GROUPING_SZ;
p < end_p;
p+=LTP_GROUPING_SZ) {
int stdp_tDiff = (simTime - runtimeDataGPU.synSpikeTime[p]);
if (stdp_tDiff > 0) {
if (groupConfigsGPU[grpId].WithESTDP) {
// Handle E-STDP curves
switch (groupConfigsGPU[grpId].WithESTDPcurve) {
case EXP_CURVE: // exponential curve
if (stdp_tDiff * groupConfigsGPU[grpId].TAU_PLUS_INV_EXC < 25)
runtimeDataGPU.wtChange[p] += STDP(stdp_tDiff, groupConfigsGPU[grpId].ALPHA_PLUS_EXC, groupConfigsGPU[grpId].TAU_PLUS_INV_EXC);
break;
case TIMING_BASED_CURVE: // sc curve
if (stdp_tDiff * groupConfigsGPU[grpId].TAU_PLUS_INV_EXC < 25) {
if (stdp_tDiff <= groupConfigsGPU[grpId].GAMMA)
runtimeDataGPU.wtChange[p] += groupConfigsGPU[grpId].OMEGA + groupConfigsGPU[grpId].KAPPA * STDP(stdp_tDiff, groupConfigsGPU[grpId].ALPHA_PLUS_EXC, groupConfigsGPU[grpId].TAU_PLUS_INV_EXC);
else // stdp_tDiff > GAMMA
runtimeDataGPU.wtChange[p] -= STDP(stdp_tDiff, groupConfigsGPU[grpId].ALPHA_PLUS_EXC, groupConfigsGPU[grpId].TAU_PLUS_INV_EXC);
}
break;
default:
break;
}
}
if (groupConfigsGPU[grpId].WithISTDP) {
// Handle I-STDP curves
switch (groupConfigsGPU[grpId].WithISTDPcurve) {
case EXP_CURVE: // exponential curve
if (stdp_tDiff * groupConfigsGPU[grpId].TAU_PLUS_INV_INB < 25) { // LTP of inhibitory synapse, which decreases synapse weight
runtimeDataGPU.wtChange[p] -= STDP(stdp_tDiff, groupConfigsGPU[grpId].ALPHA_PLUS_INB, groupConfigsGPU[grpId].TAU_PLUS_INV_INB);
}
break;
case PULSE_CURVE: // pulse curve
if (stdp_tDiff <= groupConfigsGPU[grpId].LAMBDA) { // LTP of inhibitory synapse, which decreases synapse weight
runtimeDataGPU.wtChange[p] -= groupConfigsGPU[grpId].BETA_LTP;
} else if (stdp_tDiff <= groupConfigsGPU[grpId].DELTA) { // LTD of inhibitory syanpse, which increase sysnapse weight
runtimeDataGPU.wtChange[p] -= groupConfigsGPU[grpId].BETA_LTD;
}
break;
default:
break;
}
}
}
}
}
}
__syncthreads();
}
#define FIRE_CHUNK_CNT 512
/*!
* \brief This kernel is responsible for finding the neurons that need to be fired.
*
* We use a buffered firing table that allows neuron to gradually load
* the buffer and make it easy to carry out the calculations in a single group.
* A single function is used for simple neurons and also for poisson neurons.
* The function also update LTP
*
* device access: spikeCountD2SecGPU, spikeCountD1SecGPU
* net access: numNReg numNPois, numN, sim_with_stdp, sim_in_testing, sim_with_homeostasis, maxSpikesD1, maxSpikesD2
* grp access: Type, spikeGenFunc, Noffset, withSpikeCounter, spkCntBufPos, StartN, WithSTP, avgTimeScale
WithSTDP, WithESTDP, WithISTDP, WithESTDPCurve, With ISTDPCurve, all STDP parameters
* rtd access: randNum, poissonFireRate, spkCntBuf, nSpikeCnt, voltage, recovery, Izh_c, Izh_d
* cumulativePre, Npre_plastic, (R)synSpikeTime, (W)lastSpikeTime, (W)wtChange,
* avgFiring
*/
__global__ void kernel_findFiring (int simTime) {
__shared__ volatile unsigned int fireCnt;
__shared__ volatile unsigned int fireCntTest;
__shared__ volatile unsigned int fireCntD1;
__shared__ int fireTable[FIRE_CHUNK_CNT];
__shared__ short int fireGrpId[FIRE_CHUNK_CNT];
__shared__ volatile int errCode;
if (threadIdx.x == 0) {
fireCnt = 0; // initialize total cnt to 0
fireCntD1 = 0; // initialize d1 cnt to 0
fireCntTest = 0; // initialize test cnt to 0
}
const int totBuffers=loadBufferCount;
__syncthreads();
for (int bufPos = blockIdx.x; bufPos < totBuffers; bufPos += gridDim.x) {
// KILLME !!! This can be further optimized ....
// instead of reading each neuron group separately .....
// read a whole buffer and use the result ......
int2 threadLoad = getStaticThreadLoad(bufPos);
int lNId = (STATIC_LOAD_START(threadLoad) + threadIdx.x);
int lastLNId = STATIC_LOAD_SIZE(threadLoad);
short int lGrpId = STATIC_LOAD_GROUP(threadLoad);
bool needToWrite = false; // used by all neuron to indicate firing condition
int fireId = 0;
// threadId is valid and lies within the lastId.....
if ((threadIdx.x < lastLNId) && (lNId < networkConfigGPU.numN)) {
// Simple poisson spiker uses the poisson firing probability
// to detect whether it has fired or not....
if(isPoissonGroup(lGrpId)) { // spikes generated by spikeGenFunc
if(groupConfigsGPU[lGrpId].isSpikeGenFunc) {
unsigned int offset = lNId - groupConfigsGPU[lGrpId].lStartN + groupConfigsGPU[lGrpId].Noffset;
needToWrite = getSpikeGenBit(offset);
} else { // spikes generated by poission rate
needToWrite = getPoissonSpike(lNId);
}
// Note: valid lastSpikeTime of spike gen neurons is required by userDefinedSpikeGenerator()
if (needToWrite)
runtimeDataGPU.lastSpikeTime[lNId] = simTime;
} else {
if (runtimeDataGPU.voltage[lNId] >= 30.0f) {
needToWrite = true;
}
}
}
// loop through a few times to ensure that we have added/processed all spikes that need to be written
// if the buffer is small relative to the number of spikes needing to be written, we may have to empty the buffer a few times...
for (int c = 0; c < 2; c++) {
// we first increment fireCntTest to make sure we haven't filled the buffer
if (needToWrite)
fireId = atomicAdd((int*)&fireCntTest, 1);
// if there is a spike and the buffer still has space...
if (needToWrite && (fireId <(FIRE_CHUNK_CNT))) {
// get our position in the buffer
fireId = atomicAdd((int*)&fireCnt, 1);
if (groupConfigsGPU[lGrpId].MaxDelay == 1)
atomicAdd((int*)&fireCntD1, 1);
// store ID of the fired neuron
needToWrite = false;
fireTable[fireId] = lNId;
fireGrpId[fireId] = lGrpId;//setFireProperties(grpId, isInhib);
}
__syncthreads();
// the local firing table is full. dump the local firing table to the global firing table before proceeding
if (fireCntTest >= (FIRE_CHUNK_CNT)) {
// clear the table and update...
int retCode = updateNewFirings(fireTable, fireGrpId, fireCnt, fireCntD1, simTime);
if (retCode != 0) return;
// update based on stdp rule
// KILLME !!! if (simTime > 0))
if (networkConfigGPU.sim_with_stdp && !networkConfigGPU.sim_in_testing)
updateLTP (fireTable, fireGrpId, fireCnt, simTime);
// reset counters
if (threadIdx.x == 0) {
fireCntD1 = 0;
fireCnt = 0;
fireCntTest = 0;
}
}
}
}
__syncthreads();
// few more fired neurons are left. we update their firing state here..
if (fireCnt) {
int retCode = updateNewFirings(fireTable, fireGrpId, fireCnt, fireCntD1, simTime);
if (retCode != 0) return;
if (networkConfigGPU.sim_with_stdp && !networkConfigGPU.sim_in_testing)
updateLTP(fireTable, fireGrpId, fireCnt, simTime);
}
}
//******************************** UPDATE CONDUCTANCES AND TOTAL SYNAPTIC CURRENT EVERY TIME STEP *****************************
#define LOG_CURRENT_GROUP 5
/*!
* \brief Based on the bitvector used for indicating the presence of spike, the global conductance values are updated.
*
* net access: numNReg, numNPois, I_setPitch, maxDelay, STP_Pitch, sim_with_conductances,
sim_with_NMDA_rise, sim_withGABAb_Rise, sNMDA, sGABAb
* grp access: WithSTP, STP_A
* rtd access: Npre, cumulativePre, I_set, preSynapticIds, grpIds, wt, stpx, stpu, connIdsPreIdx,
gAMPA, gGABAa, gNMDA_r, gNMDA_d, gNMDA, gGABAb_r, gGABAb_d, gGABAb
* glb access: d_mulSynFast, d_mulSynSlow
*/
__global__ void kernel_conductanceUpdate (int simTimeMs, int simTimeSec, int simTime) {
__shared__ int sh_quickSynIdTable[256];
// Table for quick access
for (int i = 0; i < 256; i += blockDim.x) {
if ((i + threadIdx.x) < 256) {
sh_quickSynIdTable[i + threadIdx.x] = quickSynIdTableGPU[i + threadIdx.x];
}
}
__syncthreads();
const int totBuffers = loadBufferCount;
for (int bufPos = blockIdx.x; bufPos < totBuffers; bufPos += gridDim.x) {
// KILLME !!! This can be further optimized ....
// instead of reading each neuron group separately .....
// read a whole buffer and use the result ......
int2 threadLoad = getStaticThreadLoad(bufPos);
int postNId = STATIC_LOAD_START(threadLoad) + threadIdx.x;
int lastNId = STATIC_LOAD_SIZE(threadLoad);
if ((threadIdx.x < lastNId) && (IS_REGULAR_NEURON(postNId, networkConfigGPU.numNReg, networkConfigGPU.numNPois))) {
// P6-1
// load the initial current due to noise inputs for neuron 'post_nid'
// initial values of the conductances for neuron 'post_nid'
float AMPA_sum = 0.0f;
float NMDA_sum = 0.0f;
float NMDA_r_sum = 0.0f;
float NMDA_d_sum = 0.0f;
float GABAa_sum = 0.0f;
float GABAb_sum = 0.0f;
float GABAb_r_sum = 0.0f;
float GABAb_d_sum = 0.0f;
int lmt = runtimeDataGPU.Npre[postNId];
unsigned int cum_pos = runtimeDataGPU.cumulativePre[postNId];
// find the total current to this neuron...
for (int j = 0; (lmt) && (j <= ((lmt - 1) >> LOG_CURRENT_GROUP)); j++) {
// because of malloc2D operation we are using pitch, post_nid, j to get
// actual position of the input current....
// int* tmp_I_set_p = ((int*)((char*)runtimeDataGPU.I_set + j * networkConfigGPU.I_setPitch) + post_nid);
uint32_t* tmp_I_set_p = getFiringBitGroupPtr(postNId, j);
uint32_t tmp_I_set = *tmp_I_set_p;
// table lookup based find bits that are set
int cnt = 0;
int tmp_I_cnt = 0;
while (tmp_I_set) {
int k = (tmp_I_set >> (8 * cnt)) & 0xff;
if (k == 0) {
cnt = cnt + 1;
continue;
}
int wt_i = sh_quickSynIdTable[k];
int wtId = (j * 32 + cnt * 8 + wt_i);
SynInfo synInfo = runtimeDataGPU.preSynapticIds[cum_pos + wtId];
//uint8_t pre_grpId = GET_CONN_GRP_ID(pre_Id);
uint32_t preNId = GET_CONN_NEURON_ID(synInfo);
short int preGrpId = runtimeDataGPU.grpIds[preNId];
char type = groupConfigsGPU[preGrpId].Type;
// load the synaptic weight for the wtId'th input
float change = runtimeDataGPU.wt[cum_pos + wtId];
// Adjust the weight according to STP scaling
if (groupConfigsGPU[preGrpId].WithSTP) {
int tD = 0; // \FIXME find delay
// \FIXME I think pre_nid needs to be adjusted for the delay
int ind_minus = getSTPBufPos(preNId, (simTime - tD - 1)); // \FIXME should be adjusted for delay
int ind_plus = getSTPBufPos(preNId, (simTime - tD));
// dI/dt = -I/tau_S + A * u^+ * x^- * \delta(t-t_{spk})
change *= groupConfigsGPU[preGrpId].STP_A * runtimeDataGPU.stpx[ind_minus] * runtimeDataGPU.stpu[ind_plus];
}
if (networkConfigGPU.sim_with_conductances) {
short int connId = runtimeDataGPU.connIdsPreIdx[cum_pos+wtId];
if (type & TARGET_AMPA)
AMPA_sum += change * d_mulSynFast[connId];
if (type & TARGET_NMDA) {
if (networkConfigGPU.sim_with_NMDA_rise) {
NMDA_r_sum += change * d_mulSynSlow[connId] * networkConfigGPU.sNMDA;
NMDA_d_sum += change * d_mulSynSlow[connId] * networkConfigGPU.sNMDA;
} else {
NMDA_sum += change * d_mulSynSlow[connId];
}
}
if (type & TARGET_GABAa)
GABAa_sum += change * d_mulSynFast[connId]; // wt should be negative for GABAa and GABAb
if (type & TARGET_GABAb) { // but that is dealt with below
if (networkConfigGPU.sim_with_GABAb_rise) {
GABAb_r_sum += change * d_mulSynSlow[connId] * networkConfigGPU.sGABAb;
GABAb_d_sum += change * d_mulSynSlow[connId] * networkConfigGPU.sGABAb;
} else {
GABAb_sum += change * d_mulSynSlow[connId];
}
}
}
else {
// current based model with STP (CUBA)
// updated current for neuron 'post_nid'
AMPA_sum += change;
}
tmp_I_cnt++;
tmp_I_set = tmp_I_set & (~(1 << (8 * cnt + wt_i)));
}
// FIXME: move reset outside kernel for debbuing I_set, resume it later
// reset the input if there are any bit'wt set
if(tmp_I_cnt)
*tmp_I_set_p = 0;
__syncthreads();
}
__syncthreads();
// P6-2
if (networkConfigGPU.sim_with_conductances) {
// don't add mulSynFast/mulSynSlow here, because they depend on the exact pre<->post connection, not
// just post_nid
runtimeDataGPU.gAMPA[postNId] += AMPA_sum;
runtimeDataGPU.gGABAa[postNId] -= GABAa_sum; // wt should be negative for GABAa and GABAb
if (networkConfigGPU.sim_with_NMDA_rise) {
runtimeDataGPU.gNMDA_r[postNId] += NMDA_r_sum;
runtimeDataGPU.gNMDA_d[postNId] += NMDA_d_sum;
} else {
runtimeDataGPU.gNMDA[postNId] += NMDA_sum;
}
if (networkConfigGPU.sim_with_GABAb_rise) {
runtimeDataGPU.gGABAb_r[postNId] -= GABAb_r_sum;
runtimeDataGPU.gGABAb_d[postNId] -= GABAb_d_sum;
} else {
runtimeDataGPU.gGABAb[postNId] -= GABAb_sum;
}
}
else {
runtimeDataGPU.current[postNId] += AMPA_sum;
}
}
}
}
//************************ UPDATE GLOBAL STATE EVERY TIME STEP *******************************************************//
/*!
* \brief This device function implements the equations of neuron dynamics
*
* \param[in] nid The neuron id to be updated
* \param[in] grpId The group id of the neuron
*/
__device__ void updateNeuronState(int nid, int grpId) {
float v = runtimeDataGPU.voltage[nid];
float u = runtimeDataGPU.recovery[nid];
float I_sum, NMDAtmp;
float gNMDA, gGABAb;
// loop that allows smaller integration time step for v's and u's
for (int c = 0; c < COND_INTEGRATION_SCALE; c++) {
I_sum = 0.0f;
if (networkConfigGPU.sim_with_conductances) {
NMDAtmp = (v + 80.0f) * (v + 80.0f) / 60.0f / 60.0f;
gNMDA = (networkConfigGPU.sim_with_NMDA_rise) ? (runtimeDataGPU.gNMDA_d[nid] - runtimeDataGPU.gNMDA_r[nid]) : runtimeDataGPU.gNMDA[nid];
gGABAb = (networkConfigGPU.sim_with_GABAb_rise) ? (runtimeDataGPU.gGABAb_d[nid] - runtimeDataGPU.gGABAb_r[nid]) : runtimeDataGPU.gGABAb[nid];
I_sum = -(runtimeDataGPU.gAMPA[nid] * (v - 0.0f)
+ gNMDA * NMDAtmp / (1.0f + NMDAtmp) * (v - 0.0f)
+ runtimeDataGPU.gGABAa[nid] * (v + 70.0f)
+ gGABAb * (v + 90.0f));
} else {
I_sum = runtimeDataGPU.current[nid];
}
// update vpos and upos for the current neuron
v += ((0.04f * v + 5.0f) * v + 140.0f - u + I_sum + runtimeDataGPU.extCurrent[nid]) / COND_INTEGRATION_SCALE;
if (v > 30.0f) {
v = 30.0f; // break the loop but evaluate u[i]
c = COND_INTEGRATION_SCALE;
}
if (v < -90.0f) v = -90.0f;
u += (runtimeDataGPU.Izh_a[nid] * (runtimeDataGPU.Izh_b[nid] * v - u) / COND_INTEGRATION_SCALE);
}
if(networkConfigGPU.sim_with_conductances) {
runtimeDataGPU.current[nid] = I_sum;
} else {
// current must be reset here for CUBA and not kernel_STPUpdateAndDecayConductances
runtimeDataGPU.current[nid] = 0.0f;
}
runtimeDataGPU.voltage[nid] = v;
runtimeDataGPU.recovery[nid] = u;
}
/*!
* \brief update neuron state
*
* This kernel update neurons' membrance potential according to neurons' dynamics model.
* This kernel also update variables required by homeostasis
*
* net access: numN, numNReg, numNPois, sim_with_conductances, sim_with_NMDA_rise, sim_with_GABAb_rise
* grp access: WithHomeostasis, avgTimeScale_decay
* rtd access: avgFiring, voltage, recovery, gNMDA, gNMDA_r, gNMDA_d, gGABAb, gGABAb_r, gGABAb_d, gAMPA, gGABAa,
* current, extCurrent, Izh_a, Izh_b
* glb access:
*/
__global__ void kernel_neuronStateUpdate() {
const int totBuffers = loadBufferCount;
// update neuron state
for (int bufPos = blockIdx.x; bufPos < totBuffers; bufPos += gridDim.x) {
// KILLME !!! This can be further optimized ....
// instead of reading each neuron group separately .....
// read a whole buffer and use the result ......
int2 threadLoad = getStaticThreadLoad(bufPos);
int nid = (STATIC_LOAD_START(threadLoad) + threadIdx.x);
int lastId = STATIC_LOAD_SIZE(threadLoad);
int grpId = STATIC_LOAD_GROUP(threadLoad);
if ((threadIdx.x < lastId) && (nid < networkConfigGPU.numN)) {
if (IS_REGULAR_NEURON(nid, networkConfigGPU.numNReg, networkConfigGPU.numNPois)) {
// P7
// update neuron state here....
updateNeuronState(nid, grpId);
// P8
if (groupConfigsGPU[grpId].WithHomeostasis)
updateHomeoStaticState(nid, grpId);
}
}
}
}
/*!
* \brief Update the state of groups, which includes concentration of dopamine currently
*
* Update the concentration of neuronmodulator
*
* net access: numGroups
* grp access: WithESTDPtype, WithISTDPtype, baseDP, decayDP
* rtd access: grpDA, grpDABuffer
* glb access:
*/
__global__ void kernel_groupStateUpdate(int simTime) {
// update group state
int grpIdx = blockIdx.x * blockDim.x + threadIdx.x;
// P9
if (grpIdx < networkConfigGPU.numGroups) {
// decay dopamine concentration
if ((groupConfigsGPU[grpIdx].WithESTDPtype == DA_MOD || groupConfigsGPU[grpIdx].WithISTDPtype == DA_MOD) && runtimeDataGPU.grpDA[grpIdx] > groupConfigsGPU[grpIdx].baseDP) {
runtimeDataGPU.grpDA[grpIdx] *= groupConfigsGPU[grpIdx].decayDP;
}
runtimeDataGPU.grpDABuffer[grpIdx * 1000 + simTime] = runtimeDataGPU.grpDA[grpIdx]; // log dopamine concentration
}
}
//******************************** UPDATE STP STATE EVERY TIME STEP **********************************************
/*!
* \brief This function is called for updat STP and decay coductance every time step
*
* net access sim_with_conductance, sim_with_NMDA_rise, sim_with_GABAb_rise, numNReg, numNPois, numN, STP_Pitch, maxDelay
* grp access WithSTP
* rtd access gAMPA, gNMDA_r, gNMDA_d, gNMDA, gBABAa, gGABAb_r, gGABAb_d, gGABAb
* rtd access stpu, stpx
*/
__global__ void kernel_STPUpdateAndDecayConductances (int t, int sec, int simTime) {
const int totBuffers = loadBufferCount;
for (int bufPos = blockIdx.x; bufPos < totBuffers; bufPos += gridDim.x) {
// KILLME !!! This can be further optimized ....
// instead of reading each neuron group separately .....
// read a whole buffer and use the result ......
int2 threadLoad = getStaticThreadLoad(bufPos);
int nid = (STATIC_LOAD_START(threadLoad) + threadIdx.x);
int lastId = STATIC_LOAD_SIZE(threadLoad);
int grpId = STATIC_LOAD_GROUP(threadLoad);
// update the conductane parameter of the current neron
if (networkConfigGPU.sim_with_conductances && IS_REGULAR_NEURON(nid, networkConfigGPU.numNReg, networkConfigGPU.numNPois)) {
runtimeDataGPU.gAMPA[nid] *= networkConfigGPU.dAMPA;
if (networkConfigGPU.sim_with_NMDA_rise) {
runtimeDataGPU.gNMDA_r[nid] *= networkConfigGPU.rNMDA;
runtimeDataGPU.gNMDA_d[nid] *= networkConfigGPU.dNMDA;
} else {
runtimeDataGPU.gNMDA[nid] *= networkConfigGPU.dNMDA;
}
runtimeDataGPU.gGABAa[nid] *= networkConfigGPU.dGABAa;
if (networkConfigGPU.sim_with_GABAb_rise) {
runtimeDataGPU.gGABAb_r[nid] *= networkConfigGPU.rGABAb;
runtimeDataGPU.gGABAb_d[nid] *= networkConfigGPU.dGABAb;
} else {
runtimeDataGPU.gGABAb[nid] *= networkConfigGPU.dGABAb;
}
}
if (groupConfigsGPU[grpId].WithSTP && (threadIdx.x < lastId) && (nid < networkConfigGPU.numN)) {
int ind_plus = getSTPBufPos(nid, simTime);
int ind_minus = getSTPBufPos(nid, (simTime-1)); // \FIXME sure?
runtimeDataGPU.stpu[ind_plus] = runtimeDataGPU.stpu[ind_minus]*(1.0f-groupConfigsGPU[grpId].STP_tau_u_inv);
runtimeDataGPU.stpx[ind_plus] = runtimeDataGPU.stpx[ind_minus] + (1.0f-runtimeDataGPU.stpx[ind_minus])*groupConfigsGPU[grpId].STP_tau_x_inv;
}
}
}
//********************************UPDATE SYNAPTIC WEIGHTS EVERY SECOND *************************************************************
/*!
* \brief This kernel update synaptic weights
*
* This kernel is called every second to adjust the timingTable and globalFiringTable
* We do the following thing:
* 1. We discard all firing information that happened more than 1000-maxDelay_ time step.
* 2. We move the firing information that happened in the last 1000-maxDelay_ time step to
* the begining of the gloalFiringTable.
* 3. We read each value of "wtChange" and update the value of "synaptic weights wt".
* We also clip the "synaptic weight wt" to lie within the required range.
*/
__device__ void updateSynapticWeights(int nid, unsigned int synId, int grpId, float diff_firing, float homeostasisScale, float baseFiring, float avgTimeScaleInv) {
// This function does not get called if the neuron group has all fixed weights.
// t_twChange is adjusted by stdpScaleFactor based on frequency of weight updates (e.g., 10ms, 100ms, 1s)
float t_wt = runtimeDataGPU.wt[synId];
float t_wtChange = runtimeDataGPU.wtChange[synId];
float t_effectiveWtChange = networkConfigGPU.stdpScaleFactor * t_wtChange;
float t_maxWt = runtimeDataGPU.maxSynWt[synId];
switch (groupConfigsGPU[grpId].WithESTDPtype) {
case STANDARD:
if (groupConfigsGPU[grpId].WithHomeostasis) {
// this factor is slow
t_wt += (diff_firing*t_wt*homeostasisScale + t_effectiveWtChange) * baseFiring * avgTimeScaleInv / (1.0f+fabs(diff_firing)*50.0f);
} else {
t_wt += t_effectiveWtChange;
}
break;
case DA_MOD:
if (groupConfigsGPU[grpId].WithHomeostasis) {
t_effectiveWtChange = runtimeDataGPU.grpDA[grpId] * t_effectiveWtChange;
t_wt += (diff_firing*t_wt*homeostasisScale + t_effectiveWtChange) * baseFiring * avgTimeScaleInv / (1.0f+fabs(diff_firing)*50.0f);
} else {
t_wt += runtimeDataGPU.grpDA[grpId] * t_effectiveWtChange;
}
break;
case UNKNOWN_STDP:
default:
// we shouldn't even be here if !WithSTDP
break;
}
switch (groupConfigsGPU[grpId].WithISTDPtype) {
case STANDARD:
if (groupConfigsGPU[grpId].WithHomeostasis) {
// this factor is slow
t_wt += (diff_firing*t_wt*homeostasisScale + t_effectiveWtChange) * baseFiring * avgTimeScaleInv / (1.0f+fabs(diff_firing)*50.0f);
} else {
t_wt += t_effectiveWtChange;
}
break;
case DA_MOD:
if (groupConfigsGPU[grpId].WithHomeostasis) {
t_effectiveWtChange = runtimeDataGPU.grpDA[grpId] * t_effectiveWtChange;
t_wt += (diff_firing*t_wt*homeostasisScale + t_effectiveWtChange) * baseFiring * avgTimeScaleInv / (1.0f+fabs(diff_firing)*50.0f);
} else {
t_wt += runtimeDataGPU.grpDA[grpId] * t_effectiveWtChange;
}
break;
case UNKNOWN_STDP:
default:
// we shouldn't even be here if !WithSTDP
break;
}
// It's user's choice to decay weight change or not
// see setWeightAndWeightChangeUpdate()
t_wtChange *= networkConfigGPU.wtChangeDecay;
// Check the synapse is excitatory or inhibitory first
if (t_maxWt >= 0.0f) { // excitatory synapse
if (t_wt >= t_maxWt) t_wt = t_maxWt;
if (t_wt < 0.0f) t_wt = 0.0f;
} else { // inhibitory synapse
if (t_wt <= t_maxWt) t_wt = t_maxWt;
if (t_wt > 0.0f) t_wt = 0.0f;
}
runtimeDataGPU.wt[synId] = t_wt;
runtimeDataGPU.wtChange[synId] = t_wtChange;
}
#define UPWTS_CLUSTERING_SZ 32
/*!
* \brief this kernel updates all synaptic weights
*
* net access: stdpScaleFactor, wtChangeDecay
* grp access: homeostasisScale, avgTimeScaleInv, FixedInputWts, WithESTDPtype, WithISTDOtype, WithHomeostasis
* rtd access: Npre_plastic, cumulativePre, avgFiring, baseFiringInv, baseFiring, wt, wtChange, maxSynWt
* glb access:
*/
__global__ void kernel_updateWeights() {
__shared__ volatile int errCode;
__shared__ int startId, lastId, grpId, totBuffers, grpNCnt;
__shared__ int2 threadLoad;
// added for homeostasis
__shared__ float homeostasisScale, avgTimeScaleInv;
if(threadIdx.x == 0) {
totBuffers = loadBufferCount;
grpNCnt = (blockDim.x / UPWTS_CLUSTERING_SZ) + ((blockDim.x % UPWTS_CLUSTERING_SZ) != 0);
}
__syncthreads();
for (int bufPos = blockIdx.x; bufPos < totBuffers; bufPos += gridDim.x) {
// KILLME !!! This can be further optimized ....
// instead of reading each neuron group separately .....
// read a whole buffer and use the result ......
// if ( threadIdx.x) { // TSC: this could be a performance bug, 127 threads other than the first thread try to read
// threadLoad and wirte homeostatsisScale and avgTimeScaleInv at the same time
if (threadIdx.x == 0) {
threadLoad = getStaticThreadLoad(bufPos);
startId = STATIC_LOAD_START(threadLoad);
lastId = STATIC_LOAD_SIZE(threadLoad);
grpId = STATIC_LOAD_GROUP(threadLoad);
// load homestasis parameters
if (groupConfigsGPU[grpId].WithHomeostasis) {
homeostasisScale = groupConfigsGPU[grpId].homeostasisScale;
avgTimeScaleInv = groupConfigsGPU[grpId].avgTimeScaleInv;
} else {
homeostasisScale = 0.0f;
avgTimeScaleInv = 1.0f;
}
}
__syncthreads();
// the weights are fixed for this group.. so dont make any changes on
// the weight and continue to the next set of neurons...
if (groupConfigsGPU[grpId].FixedInputWts)
continue;
int nid = (threadIdx.x / UPWTS_CLUSTERING_SZ) + startId;
// update the synaptic weights from the synaptic weight derivatives
for(; nid < startId + lastId; nid += grpNCnt) {
int Npre_plastic = runtimeDataGPU.Npre_plastic[nid];
unsigned int cumulativePre = runtimeDataGPU.cumulativePre[nid];
float diff_firing = 0.0f;
float baseFiring = 0.0f;
if (groupConfigsGPU[grpId].WithHomeostasis) {
diff_firing = (1.0f - runtimeDataGPU.avgFiring[nid] * runtimeDataGPU.baseFiringInv[nid]);
baseFiring = runtimeDataGPU.baseFiring[nid];
}
const int threadIdGrp = (threadIdx.x % UPWTS_CLUSTERING_SZ);
// use 32 threads to update 32 synapses parallely
for(unsigned int synIdOffset = cumulativePre; synIdOffset < cumulativePre + Npre_plastic; synIdOffset += UPWTS_CLUSTERING_SZ) {
//excitatory connection change the synaptic weights
unsigned int synId = synIdOffset + threadIdGrp;
if(synId < cumulativePre + Npre_plastic) {
updateSynapticWeights(nid, synId, grpId, diff_firing, homeostasisScale, baseFiring, avgTimeScaleInv);
}
}
}
}
}
//********************************UPDATE TABLES AND COUNTERS EVERY SECOND *************************************************************
/*!
* \brief This kernel shift the un-processed firing information in firingTableD2 to the beginning of
* firingTableD2 for the next second of simulation.
*
* net access: maxDelay
* grp access: N/A
* rtd access: firingTableD2
* glb access: timeTableD2GPU
*/
__global__ void kernel_shiftFiringTable() {
int gnthreads= blockDim.x * gridDim.x;
for(int p = timeTableD2GPU[999], k = 0; p < timeTableD2GPU[999 + networkConfigGPU.maxDelay + 1]; p += gnthreads, k += gnthreads) {
if ((p + threadIdx.x) < timeTableD2GPU[999 + networkConfigGPU.maxDelay + 1])
runtimeDataGPU.firingTableD2[k + threadIdx.x] = runtimeDataGPU.firingTableD2[p + threadIdx.x];
}
}
/*!
* \brief This kernel shift the un-processed firing information in timeTableD1(D2)GPU to the beginning of
* timeTableD1(D2)GPU for the next second of simulation.
*
* After all the threads/blocks had adjusted the firingTableD1(D2)GPU, we update the timeTableD1(D2)GPU
* so that the firing information that happended in the last maxDelay_ time step would become
* the first maxDelay_ time step firing information for the next second of simulation.
* We also reset/update all spike counters to appropriate values as indicated in the second part
* of this kernel.
*/
__global__ void kernel_shiftTimeTable() {
int maxDelay = networkConfigGPU.maxDelay;
if(blockIdx.x == 0) {
for(int i = threadIdx.x; i < maxDelay; i += blockDim.x) {
// use i+1 instead of just i because timeTableD2GPU[0] should always be 0
timeTableD2GPU[i + 1] = timeTableD2GPU[1000 + i + 1] - timeTableD2GPU[1000];
timeTableD1GPU[i + 1] = timeTableD1GPU[1000 + i + 1] - timeTableD1GPU[1000];
}
}
__syncthreads();
// reset various counters for the firing information
if((blockIdx.x == 0) && (threadIdx.x == 0)) {
timeTableD1GPU[maxDelay] = 0;
spikeCountD2GPU += spikeCountD2SecGPU;
spikeCountD1GPU += spikeCountD1SecGPU;
spikeCountD2SecGPU = 0;
spikeCountD1SecGPU = 0;
spikeCountExtRxD2SecGPU = 0;
spikeCountExtRxD1SecGPU = 0;
spikeCountLastSecLeftD2GPU = timeTableD2GPU[maxDelay];
secD2fireCntTest = timeTableD2GPU[maxDelay];
secD1fireCntTest = 0;
}
}
//****************************** GENERATE POST-SYNAPTIC CURRENT EVERY TIME-STEP ****************************
/*
* The sequence of handling an post synaptic spike in GPU mode:
* P1. Update synSpikeTime
* P2. Update DA,5HT,ACh,NE accordingly
* P3. Update STDP wtChange
* P4. Load wt into change (temporary variable)
* P5. Modulate change by STP (if enabled)
* P6-1. Modulate change by d_mulSynSlow and d_mulSynFast
* P6-2. Accumulate g(AMPA,NMDA,GABAa,GABAb) or current
* P7. Update v(voltage), u(recovery)
* P8. Update homeostasis
* P9. Decay and log DA,5HT,ACh,NE
*/
__device__ void generatePostSynapticSpike(int simTime, int preNId, int postNId, int synId) {
// get the actual position of the synapses and other variables...
unsigned int pos = runtimeDataGPU.cumulativePre[postNId] + synId;
short int preGrpId = runtimeDataGPU.grpIds[preNId]; // STP uses preGrpId
short int postGrpId = runtimeDataGPU.grpIds[postNId]; // STDP uses postGrpId
setFiringBitSynapses(postNId, synId);
// P1
runtimeDataGPU.synSpikeTime[pos] = simTime; //uncoalesced access
// P2
// Got one spike from dopaminergic neuron, increase dopamine concentration in the target area
if (groupConfigsGPU[preGrpId].Type & TARGET_DA) {
atomicAdd(&(runtimeDataGPU.grpDA[postGrpId]), 0.04f);
}
// P3
// STDP calculation: the post-synaptic neuron fires before the arrival of pre-synaptic neuron's spike
if (groupConfigsGPU[postGrpId].WithSTDP && !networkConfigGPU.sim_in_testing) {
int stdp_tDiff = simTime - runtimeDataGPU.lastSpikeTime[postNId];
if (stdp_tDiff >= 0) {
if (groupConfigsGPU[postGrpId].WithESTDP) {
// Handle E-STDP curves
switch (groupConfigsGPU[postGrpId].WithESTDPcurve) {
case EXP_CURVE: // exponential curve
case TIMING_BASED_CURVE: // sc curve
if (stdp_tDiff * groupConfigsGPU[postGrpId].TAU_MINUS_INV_EXC < 25.0f)
runtimeDataGPU.wtChange[pos] += STDP( stdp_tDiff, groupConfigsGPU[postGrpId].ALPHA_MINUS_EXC, groupConfigsGPU[postGrpId].TAU_MINUS_INV_EXC); // uncoalesced access
break;
default:
break;
}
}
if (groupConfigsGPU[postGrpId].WithISTDP) {
// Handle I-STDP curves
switch (groupConfigsGPU[postGrpId].WithISTDPcurve) {
case EXP_CURVE: // exponential curve
if ((stdp_tDiff * groupConfigsGPU[postGrpId].TAU_MINUS_INV_INB) < 25.0f) { // LTD of inhibitory syanpse, which increase synapse weight
runtimeDataGPU.wtChange[pos] -= STDP(stdp_tDiff, groupConfigsGPU[postGrpId].ALPHA_MINUS_INB, groupConfigsGPU[postGrpId].TAU_MINUS_INV_INB);
}
break;
case PULSE_CURVE: // pulse curve
if (stdp_tDiff <= groupConfigsGPU[postGrpId].LAMBDA) { // LTP of inhibitory synapse, which decreases synapse weight
runtimeDataGPU.wtChange[pos] -= groupConfigsGPU[postGrpId].BETA_LTP;
} else if (stdp_tDiff <= groupConfigsGPU[postGrpId].DELTA) { // LTD of inhibitory syanpse, which increase synapse weight
runtimeDataGPU.wtChange[pos] -= groupConfigsGPU[postGrpId].BETA_LTD;
}
break;
default:
break;
}
}
}
}
}
#define READ_CHUNK_SZ 64
/*!
* \brief This kernel updates and generates spikes for delays greater than 1 from the fired neuron.
*
* The LTD computation is also executed by this kernel.
*
* net access: maxDelay, I_setPitch, sim_in_testing
* grp access: Type, WithSTDP, WithESTDP, WithESTDPcurve, WithISDP, WithISTDPcurve, all STDP parameters
* rtd access: firingTableD2, cumulativePost, postDelayInfo, postSynapticIds, cumulativePre, grpIds,
* grpDA, I_set, (W)synSpikeTime, (R)lastSpikeTime, wtChange
* glb access: spikeCountD2SecGPU, timeTableD2GPU_tex, timeTableD2GPU_tex_offset
*/
__global__ void kernel_doCurrentUpdateD2(int simTimeMs, int simTimeSec, int simTime) {
__shared__ volatile int sh_neuronOffsetTable[READ_CHUNK_SZ + 2];
__shared__ int sh_delayLength[READ_CHUNK_SZ + 2];
__shared__ int sh_delayIndexStart[READ_CHUNK_SZ + 2];
__shared__ int sh_firingId[READ_CHUNK_SZ + 2];
__shared__ volatile int sh_NeuronCnt;
const int threadIdWarp = (threadIdx.x % WARP_SIZE);
const int warpId = (threadIdx.x / WARP_SIZE);
// this variable is used to record the
// number of updates done by different blocks
if(threadIdx.x<=0) {
sh_NeuronCnt = 0;
}
__syncthreads();
// stores the number of fired neurons at time t
int k = tex1Dfetch(timeTableD2GPU_tex, simTimeMs + networkConfigGPU.maxDelay + 1 + timeTableD2GPU_tex_offset) - 1;
// stores the number of fired neurons at time (t - maxDelay_)
int k_end = tex1Dfetch(timeTableD2GPU_tex, simTimeMs + 1 + timeTableD2GPU_tex_offset);
int t_pos = simTimeMs;
// we need to read (k-k_end) neurons from the firing
// table and do necesary updates for all these post-synaptic
// connection in these neurons..
while ((k >= k_end) && (k >= 0)) {
// at any point of time EXCIT_READ_CHUNK_SZ neurons
// read different firing id from the firing table
if (threadIdx.x < READ_CHUNK_SZ) { // use 64 threads
int fPos = k - (READ_CHUNK_SZ * blockIdx.x) - threadIdx.x;
if ((fPos >= 0) && (fPos >= k_end)) {
// get the neuron nid here....
//int val = runtimeDataGPU.firingTableD2[fPos];
//int nid = GET_FIRING_TABLE_NID(val);
int nid = runtimeDataGPU.firingTableD2[fPos];
// find the time of firing based on the firing number fPos
while ( !((fPos >= tex1Dfetch(timeTableD2GPU_tex, t_pos + networkConfigGPU.maxDelay + timeTableD2GPU_tex_offset))
&& (fPos < tex1Dfetch(timeTableD2GPU_tex, t_pos + networkConfigGPU.maxDelay + 1 + timeTableD2GPU_tex_offset)))) {
t_pos--;
}
// find the time difference between firing of the neuron and the current time
int tD = simTimeMs - t_pos;
// find the various delay parameters for neuron 'nid', with a delay of 'tD'
//sh_axonDelay[threadIdx.x] = tD;
int tPos = (networkConfigGPU.maxDelay + 1) * nid + tD;
//sh_firingId[threadIdx.x] = val;
sh_firingId[threadIdx.x] = nid;
sh_neuronOffsetTable[threadIdx.x]= runtimeDataGPU.cumulativePost[nid];
sh_delayLength[threadIdx.x] = runtimeDataGPU.postDelayInfo[tPos].delay_length;
sh_delayIndexStart[threadIdx.x] = runtimeDataGPU.postDelayInfo[tPos].delay_index_start;
// This is to indicate that the current thread
// has a valid delay parameter for post-synaptic firing generation
atomicAdd((int*)&sh_NeuronCnt, 1);
}
}
__syncthreads();
// if cnt is zero than no more neurons need to generate
// post-synaptic firing, then we break the loop.
if (sh_NeuronCnt == 0) {
break;
}
// first WARP_SIZE threads the post synaptic
// firing for first neuron, and so on. each of this group
// needs to generate (numPostSynapses/maxDelay_) spikes for every fired neuron, every second
// for numPostSynapses=500,maxDelay_=20, we need to generate 25 spikes for each fired neuron
// for numPostSynapses=600,maxDelay_=20, we need to generate 30 spikes for each fired neuron
for (int pos = warpId; pos < sh_NeuronCnt; pos += (NUM_THREADS / WARP_SIZE)) {
int delId = threadIdWarp;
while (delId < sh_delayLength[pos]) {
// get the post synaptic information for specific delay
SynInfo postInfo = runtimeDataGPU.postSynapticIds[sh_neuronOffsetTable[pos] + sh_delayIndexStart[pos] + delId];
int postNId = GET_CONN_NEURON_ID(postInfo); // get post-neuron id
int synId = GET_CONN_SYN_ID(postInfo); // get synaptic id
if (postNId < networkConfigGPU.numN) // test if post-neuron is a local neuron
generatePostSynapticSpike(simTime, sh_firingId[pos] /* preNId */, postNId, synId);
delId += WARP_SIZE;
}
} //(for all excitory neurons in table)
__syncthreads();
if(threadIdx.x == 0) {
sh_NeuronCnt = 0;
}
k = k - (gridDim.x * READ_CHUNK_SZ);
__syncthreads();
}
__syncthreads();
}
/*!
* \brief This kernel updating and generating spikes on connections with a delay of 1ms from the fired neuron.
*
* This function looks mostly like kernel_doCurrentUpdateD2() but has been optimized for a fixed delay of 1ms.
* Ultimately we may merge this kernel with the kernel_doCurrentUpdateD2().
* The LTD computation is also executed by this kernel.
*
* net access: maxDelay, I_setPitch, sim_in_testing
* grp access: Type, grpDA, WithSTDP, WithESTDP, WithISTDP, WithESTDPcurve, WithISTDPcurve, all STDP parameters
* rtd access: postSynapticIds, cumulativePre, grpIds, I_set, wtChange, (R)lastSpikeTime, (W)synSpikeTime
* glb access: timeTableD1GPU, spikeCountD1SecGPU, firingTableD1
*/
__global__ void kernel_doCurrentUpdateD1(int simTimeMs, int simTimeSec, int simTime) {
__shared__ volatile int sh_NeuronCnt;
__shared__ volatile int sh_neuronOffsetTable[NUM_THREADS / WARP_SIZE + 2];
__shared__ int sh_delayLength[NUM_THREADS / WARP_SIZE + 2];
__shared__ int sh_firingId[NUM_THREADS / WARP_SIZE + 2];
__shared__ int sh_delayIndexStart[NUM_THREADS / WARP_SIZE + 2];
__shared__ int sh_timing;
__shared__ int kPosEnd;
const int warpId = threadIdx.x / WARP_SIZE; // warp id
const int numWarps = blockDim.x / WARP_SIZE; // number of warp
const int threadIdWarp = threadIdx.x % WARP_SIZE; // thread id within a warp
// load the time table for neuron firing
if (threadIdx.x == 0) {
sh_timing = timeTableD1GPU[simTimeMs + networkConfigGPU.maxDelay]; // number of fired neurons at simTimeMs - 1
kPosEnd = timeTableD1GPU[simTimeMs + networkConfigGPU.maxDelay + 1]; // number of fired neurons at simTimeMs, which is equal to spikeCountD1SecGPU
}
__syncthreads();
int kPos = sh_timing + (blockIdx.x * numWarps);
__syncthreads();
// Do current update as long as we have some valid neuron
while ((kPos >= 0) && (kPos < kPosEnd)) {
int fPos = -1;
// a group of threads (4 threads) loads the delay information
if (threadIdx.x < numWarps) {
sh_neuronOffsetTable[threadIdx.x] = -1;
fPos = kPos + threadIdx.x;
// find the neuron nid and also delay information from fPos
if ((fPos >= 0) && (fPos < kPosEnd)) {
atomicAdd((int*)&sh_NeuronCnt, 1);
//int val = runtimeDataGPU.firingTableD1[fPos];
//int nid = GET_FIRING_TABLE_NID(val);
int nid = runtimeDataGPU.firingTableD1[fPos];
int tPos = (networkConfigGPU.maxDelay + 1) * nid;
//sh_firingId[threadIdx.x] = val;
sh_firingId[threadIdx.x] = nid;
sh_neuronOffsetTable[threadIdx.x] = runtimeDataGPU.cumulativePost[nid];
sh_delayLength[threadIdx.x] = runtimeDataGPU.postDelayInfo[tPos].delay_length;
sh_delayIndexStart[threadIdx.x] = runtimeDataGPU.postDelayInfo[tPos].delay_index_start;
}
}
__syncthreads();
// no more fired neuron from table... we just break from loop
if (sh_NeuronCnt == 0) {
break;
}
__syncthreads();
int offset = sh_neuronOffsetTable[warpId];
if (threadIdx.x == 0) {
sh_NeuronCnt = 0;
}
// 32 threads for generatePostSynapticSpike()
if (offset >= 0) {
int delId = threadIdWarp;
while (delId < sh_delayLength[warpId]) {
// get the post synaptic information for specific delay
SynInfo postInfo = runtimeDataGPU.postSynapticIds[offset + sh_delayIndexStart[warpId] + delId];
int postNId = GET_CONN_NEURON_ID(postInfo); // get post-neuron id
int synId = GET_CONN_SYN_ID(postInfo); // get synaptic id
if (postNId < networkConfigGPU.numN) // test if post-neuron is a local neuron
generatePostSynapticSpike(simTime, sh_firingId[warpId] /* preNId */, postNId, synId);
delId += WARP_SIZE;
}
}
__syncthreads();
kPos = kPos + (gridDim.x * numWarps);
}
}
__global__ void kernel_convertExtSpikesD2(int startIdx, int endIdx, int GtoLOffset) {
int firingTableIdx = startIdx + blockIdx.x * blockDim.x + threadIdx.x;
int spikeCountExtRx = endIdx - startIdx; // received external spike count
if (threadIdx.x == 0 && blockIdx.x == 0) {
secD2fireCntTest += spikeCountExtRx;
spikeCountD2SecGPU += spikeCountExtRx;
spikeCountExtRxD2GPU += spikeCountExtRx;
spikeCountExtRxD2SecGPU += spikeCountExtRx;
}
// FIXME: if endIdx - startIdx > 64 * 128
if (firingTableIdx < endIdx)
runtimeDataGPU.firingTableD2[firingTableIdx] += GtoLOffset;
}
__global__ void kernel_convertExtSpikesD1(int startIdx, int endIdx, int GtoLOffset) {
int firingTableIdx = startIdx + blockIdx.x * blockDim.x + threadIdx.x;
int spikeCountExtRx = endIdx - startIdx; // received external spike count
if (threadIdx.x == 0 && blockIdx.x == 0) {
secD1fireCntTest += spikeCountExtRx;
spikeCountD1SecGPU += spikeCountExtRx;
spikeCountExtRxD1GPU += spikeCountExtRx;
spikeCountExtRxD1SecGPU += spikeCountExtRx;
}
// FIXME: if endIdx - startIdx > 64 * 128
if (firingTableIdx < endIdx)
runtimeDataGPU.firingTableD1[firingTableIdx] += GtoLOffset;
}
/*!
* \brief this function allocates device (GPU) memory sapce and copies information of pre-connections to it
*
* This function:
* initialize Npre_plasticInv
* (allocate and) copy Npre, Npre_plastic, Npre_plasticInv, cumulativePre, preSynapticIds
* (allocate and) copy Npost, cumulativePost, postSynapticIds, postDelayInfo
*
*
* \param[in] netId the id of a local network, which is the same as the device (GPU) id
* \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied
* \param[in] dest pointer to runtime data desitnation
* \param[in] src pointer to runtime data source
* \param[in] kind the direction of copying
* \param[in] allocateMem a flag indicates whether allocating memory space before copying
*
* \sa allocateSNN_GPU
* \since v4.0
*/
void SNN::copyPreConnectionInfo(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, cudaMemcpyKind kind, bool allocateMem) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, 0); // check that the destination pointer is properly allocated..
int lengthN, lengthSyn, posN, posSyn;
if (lGrpId == ALL) {
lengthN = networkConfigs[netId].numNAssigned;
posN = 0;
} else {
lengthN = groupConfigs[netId][lGrpId].numN;
posN = groupConfigs[netId][lGrpId].lStartN;
}
// connection synaptic lengths and cumulative lengths...
if(allocateMem)
CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->Npre, sizeof(short) * networkConfigs[netId].numNAssigned));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->Npre[posN], &src->Npre[posN], sizeof(short) * lengthN, kind));
// we don't need these data structures if the network doesn't have any plastic synapses at all
if (!sim_with_fixedwts) {
// presyn excitatory connections
if(allocateMem)
CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->Npre_plastic, sizeof(short) * networkConfigs[netId].numNAssigned));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->Npre_plastic[posN], &src->Npre_plastic[posN], sizeof(short) * lengthN, kind));
// Npre_plasticInv is only used on GPUs, only allocate and copy it during initialization
if(allocateMem) {
float* Npre_plasticInv = new float[networkConfigs[netId].numNAssigned];
for (int i = 0; i < networkConfigs[netId].numNAssigned; i++)
Npre_plasticInv[i] = 1.0f / managerRuntimeData.Npre_plastic[i];
CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->Npre_plasticInv, sizeof(float) * networkConfigs[netId].numNAssigned));
CUDA_CHECK_ERRORS(cudaMemcpy(dest->Npre_plasticInv, Npre_plasticInv, sizeof(float) * networkConfigs[netId].numNAssigned, kind));
delete[] Npre_plasticInv;
}
}
// beginning position for the pre-synaptic information
if(allocateMem)
CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->cumulativePre, sizeof(int) * networkConfigs[netId].numNAssigned));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->cumulativePre[posN], &src->cumulativePre[posN], sizeof(int) * lengthN, kind));
// Npre, cumulativePre has been copied to destination
if (lGrpId == ALL) {
lengthSyn = networkConfigs[netId].numPreSynNet;
posSyn = 0;
} else {
lengthSyn = 0;
for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++)
lengthSyn += dest->Npre[lNId];
posSyn = dest->cumulativePre[groupConfigs[netId][lGrpId].lStartN];
}
if(allocateMem)
CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->preSynapticIds, sizeof(SynInfo) * networkConfigs[netId].numPreSynNet));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->preSynapticIds[posSyn], &src->preSynapticIds[posSyn], sizeof(SynInfo) * lengthSyn, kind));
}
/*!
* \brief this function allocates device (GPU) memory sapce and copies information of post-connections to it
*
* This function:
* (allocate and) copy Npost, cumulativePost, postSynapticIds, postDelayInfo
*
*
* \param[in] netId the id of a local network, which is the same as the device (GPU) id
* \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied
* \param[in] dest pointer to runtime data desitnation
* \param[in] src pointer to runtime data source
* \param[in] kind the direction of copying
* \param[in] allocateMem a flag indicates whether allocating memory space before copying
*
* \sa allocateSNN_GPU
* \since v4.0
*/
void SNN::copyPostConnectionInfo(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, cudaMemcpyKind kind, bool allocateMem) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, 0);// check that the destination pointer is properly allocated..
int lengthN, lengthSyn, posN, posSyn;
if (lGrpId == ALL) {
lengthN = networkConfigs[netId].numNAssigned;
posN = 0;
} else {
lengthN = groupConfigs[netId][lGrpId].numN;
posN = groupConfigs[netId][lGrpId].lStartN;
}
// number of postsynaptic connections
if(allocateMem)
CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->Npost, sizeof(short) * networkConfigs[netId].numNAssigned));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->Npost[posN], &src->Npost[posN], sizeof(short) * lengthN, kind));
// beginning position for the post-synaptic information
if(allocateMem)
CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->cumulativePost, sizeof(int) * networkConfigs[netId].numNAssigned));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->cumulativePost[posN], &src->cumulativePost[posN], sizeof(int) * lengthN, kind));
// Npost, cumulativePost has been copied to destination
if (lGrpId == ALL) {
lengthSyn = networkConfigs[netId].numPostSynNet;
posSyn = 0;
} else {
lengthSyn = 0;
for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++)
lengthSyn += dest->Npost[lNId];
posSyn = dest->cumulativePost[groupConfigs[netId][lGrpId].lStartN];
}
// actual post synaptic connection information...
if(allocateMem)
CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->postSynapticIds, sizeof(SynInfo) * networkConfigs[netId].numPostSynNet));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->postSynapticIds[posSyn], &src->postSynapticIds[posSyn], sizeof(SynInfo) * lengthSyn, kind));
// static specific mapping and actual post-synaptic delay metric
if(allocateMem)
CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->postDelayInfo, sizeof(DelayInfo) * networkConfigs[netId].numNAssigned * (glbNetworkConfig.maxDelay + 1)));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->postDelayInfo[posN * (glbNetworkConfig.maxDelay + 1)], &src->postDelayInfo[posN * (glbNetworkConfig.maxDelay + 1)], sizeof(DelayInfo) * lengthN * (glbNetworkConfig.maxDelay + 1), kind));
}
void SNN::checkDestSrcPtrs(RuntimeData* dest, RuntimeData* src, cudaMemcpyKind kind, bool allocateMem, int lGrpId, int destOffset) {
// source should always be allocated
assert(src->allocated);
if(kind == cudaMemcpyHostToDevice) {
assert(src->memType == CPU_MEM);
assert(dest->memType == GPU_MEM);
if (allocateMem) {
assert(!dest->allocated); // if allocateMem = true, then the destination must be empty without allocation.
assert(lGrpId == ALL); // if allocateMem = true, then we should not specify any specific group.
} else {
assert(dest->allocated); // if allocateMem = false, then the destination must be allocated.
}
assert(destOffset == 0); // H-to-D only allows local-to-local copy
} else if (kind == cudaMemcpyDeviceToHost) {
assert(src->memType == GPU_MEM);
assert(dest->memType == CPU_MEM);
assert(dest->allocated);
if (lGrpId == ALL)
assert(destOffset == 0); // if copy all content, only local-to-local is allowed
} else {
KERNEL_ERROR("Wrong Host-Device copy direction");
exitSimulation(1);
}
}
/*!
* \brief this function allocates device (GPU) memory sapce and copies AMPA conductance to it
*
* This function:
* (allocate and) copy gAMPA
*
* This funcion is called by copyNeuronState() and fetchConductanceAMPA(). It supports bi-directional copying
*
* \param[in] netId the id of a local network, which is the same as the device (GPU) id
* \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied
* \param[in] dest pointer to runtime data desitnation
* \param[in] src pointer to runtime data source
* \param[in] kind the direction of copy
* \param[in] allocateMem a flag indicates whether allocating memory space before copy
* \param[in] destOffset the offset of data destination, which is used in local-to-global copy
*
* \sa copyNeuronState fetchConductanceAMPA
* \since v3.0
*/
void SNN::copyConductanceAMPA(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, cudaMemcpyKind kind, bool allocateMem, int destOffset) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, destOffset);// check that the destination pointer is properly allocated..
assert(isSimulationWithCOBA());
int ptrPos, length;
if(lGrpId == ALL) {
ptrPos = 0;
length = networkConfigs[netId].numNReg;
} else {
ptrPos = groupConfigs[netId][lGrpId].lStartN;
length = groupConfigs[netId][lGrpId].numN;
}
assert(length <= networkConfigs[netId].numNReg);
assert(length > 0);
//conductance information
assert(src->gAMPA != NULL);
if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**) &dest->gAMPA, sizeof(float) * length));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->gAMPA[ptrPos + destOffset], &src->gAMPA[ptrPos], sizeof(float) * length, kind));
}
/*!
* \brief this function allocates device (GPU) memory sapce and copies NMDA conductance to it
*
* This function:
* (allocate and) copy gNMDA, gNMDA_r, gNMDA_d
*
* This funcion is called by copyNeuronState() and fetchConductanceNMDA(). It supports bi-directional copying
*
* \param[in] netId the id of a local network, which is the same as the device (GPU) id
* \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied
* \param[in] dest pointer to runtime data desitnation
* \param[in] src pointer to runtime data source
* \param[in] kind the direction of copy
* \param[in] allocateMem a flag indicates whether allocating memory space before copy
* \param[in] destOffset the offset of data destination, which is used in local-to-global copy
*
* \sa copyNeuronState fetchConductanceNMDA
* \since v3.0
*/
void SNN::copyConductanceNMDA(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, cudaMemcpyKind kind, bool allocateMem, int destOffset) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, destOffset);// check that the destination pointer is properly allocated..
assert(isSimulationWithCOBA());
int ptrPos, length;
if(lGrpId == ALL) {
ptrPos = 0;
length = networkConfigs[netId].numNReg;
} else {
ptrPos = groupConfigs[netId][lGrpId].lStartN;
length = groupConfigs[netId][lGrpId].numN;
}
assert(length <= networkConfigs[netId].numNReg);
assert(length > 0);
if (isSimulationWithNMDARise()) {
assert(src->gNMDA_r != NULL);
if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**) &dest->gNMDA_r, sizeof(float) * length));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->gNMDA_r[ptrPos], &src->gNMDA_r[ptrPos], sizeof(float) * length, kind));
assert(src->gNMDA_d != NULL);
if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**) &dest->gNMDA_d, sizeof(float) * length));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->gNMDA_d[ptrPos], &src->gNMDA_d[ptrPos], sizeof(float) * length, kind));
} else {
assert(src->gNMDA != NULL);
if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**) &dest->gNMDA, sizeof(float) * length));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->gNMDA[ptrPos + destOffset], &src->gNMDA[ptrPos], sizeof(float) * length, kind));
}
}
/*!
* \brief this function allocates device (GPU) memory sapce and copies GABAa conductance to it
*
* This function:
* (allocate and) copy gGABAa
*
* This funcion is called by copyNeuronState() and fetchConductanceGABAa(). It supports bi-directional copying
*
* \param[in] netId the id of a local network, which is the same as the device (GPU) id
* \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied
* \param[in] dest pointer to runtime data desitnation
* \param[in] src pointer to runtime data source
* \param[in] kind the direction of copy
* \param[in] allocateMem a flag indicates whether allocating memory space before copy
* \param[in] destOffset the offset of data destination, which is used in local-to-global copy
*
* \sa copyNeuronState fetchConductanceGABAa
* \since v3.0
*/
void SNN::copyConductanceGABAa(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, cudaMemcpyKind kind, bool allocateMem, int destOffset) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, destOffset); // check that the destination pointer is properly allocated..
assert(isSimulationWithCOBA());
int ptrPos, length;
if(lGrpId == ALL) {
ptrPos = 0;
length = networkConfigs[netId].numNReg;
} else {
ptrPos = groupConfigs[netId][lGrpId].lStartN;
length = groupConfigs[netId][lGrpId].numN;
}
assert(length <= networkConfigs[netId].numNReg);
assert(length > 0);
assert(src->gGABAa != NULL);
if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**) &dest->gGABAa, sizeof(float) * length));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->gGABAa[ptrPos + destOffset], &src->gGABAa[ptrPos], sizeof(float) * length, kind));
}
/*!
* \brief this function allocates device (GPU) memory sapce and copies GABAb conductance to it
*
* This function:
* (allocate and) copy gGABAb, gGABAb_r, gGABAb_d
*
* This funcion is called by copyNeuronState() and fetchConductanceGABAb(). It supports bi-directional copying
*
* \param[in] netId the id of a local network, which is the same as the device (GPU) id
* \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied
* \param[in] dest pointer to runtime data desitnation
* \param[in] src pointer to runtime data source
* \param[in] kind the direction of copy
* \param[in] allocateMem a flag indicates whether allocating memory space before copy
* \param[in] destOffset the offset of data destination, which is used in local-to-global copy
*
* \sa copyNeuronState fetchConductanceGABAb
* \since v3.0
*/
void SNN::copyConductanceGABAb(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, cudaMemcpyKind kind, bool allocateMem, int destOffset) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, destOffset); // check that the destination pointer is properly allocated..
assert(isSimulationWithCOBA());
int ptrPos, length;
if(lGrpId == ALL) {
ptrPos = 0;
length = networkConfigs[netId].numNReg;
} else {
ptrPos = groupConfigs[netId][lGrpId].lStartN;
length = groupConfigs[netId][lGrpId].numN;
}
assert(length <= networkConfigs[netId].numNReg);
assert(length > 0);
if (isSimulationWithGABAbRise()) {
assert(src->gGABAb_r != NULL);
if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**) &dest->gGABAb_r, sizeof(float) * length));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->gGABAb_r[ptrPos], &src->gGABAb_r[ptrPos], sizeof(float) * length, kind));
assert(src->gGABAb_d != NULL);
if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**) &dest->gGABAb_d, sizeof(float) * length));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->gGABAb_d[ptrPos], &src->gGABAb_d[ptrPos], sizeof(float) * length, kind));
} else {
assert(src->gGABAb != NULL);
if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->gGABAb, sizeof(float) * length));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->gGABAb[ptrPos + destOffset], &src->gGABAb[ptrPos], sizeof(float) * length, kind));
}
}
/*!
* \brief this function allocates device (GPU) memory sapce and copies variables related to nueron state to it
*
* This function:
* (allocate and) copy voltage, recovery, current, avgFiring
*
* This funcion is called by allocateSNN_GPU(). Only copying from host to device is required
*
* \param[in] netId the id of a local network, which is the same as the device (GPU) id
* \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied
* \param[in] dest pointer to runtime data desitnation
* \param[in] allocateMem a flag indicates whether allocating memory space before copying
*
* \sa allocateSNN_GPU fetchNeuronState
* \since v3.0
*/
void SNN::copyNeuronState(int netId, int lGrpId, RuntimeData* dest, cudaMemcpyKind kind, bool allocateMem) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(dest, &managerRuntimeData, cudaMemcpyHostToDevice, allocateMem, lGrpId, 0); // check that the destination pointer is properly allocated..
assert(kind == cudaMemcpyHostToDevice);
int ptrPos, length;
if(lGrpId == ALL) {
ptrPos = 0;
length = networkConfigs[netId].numNReg;
}
else {
ptrPos = groupConfigs[netId][lGrpId].lStartN;
length = groupConfigs[netId][lGrpId].numN;
}
assert(length <= networkConfigs[netId].numNReg);
if (length == 0)
return;
if(!allocateMem && groupConfigs[netId][lGrpId].Type & POISSON_NEURON)
return;
if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->recovery, sizeof(float) * length));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->recovery[ptrPos], &managerRuntimeData.recovery[ptrPos], sizeof(float) * length, cudaMemcpyHostToDevice));
if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->voltage, sizeof(float) * length));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->voltage[ptrPos], &managerRuntimeData.voltage[ptrPos], sizeof(float) * length, cudaMemcpyHostToDevice));
//neuron input current...
if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->current, sizeof(float) * length));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->current[ptrPos], &managerRuntimeData.current[ptrPos], sizeof(float) * length, cudaMemcpyHostToDevice));
if (sim_with_conductances) {
//conductance information
copyConductanceAMPA(netId, lGrpId, dest, &managerRuntimeData, cudaMemcpyHostToDevice, allocateMem, 0);
copyConductanceNMDA(netId, lGrpId, dest, &managerRuntimeData, cudaMemcpyHostToDevice, allocateMem, 0);
copyConductanceGABAa(netId, lGrpId, dest, &managerRuntimeData, cudaMemcpyHostToDevice, allocateMem, 0);
copyConductanceGABAb(netId, lGrpId, dest, &managerRuntimeData, cudaMemcpyHostToDevice, allocateMem, 0);
}
// copying external current needs to be done separately because setExternalCurrent needs to call it, too
// do it only from host to device
copyExternalCurrent(netId, lGrpId, dest, cudaMemcpyHostToDevice, allocateMem);
copyNeuronParameters(netId, lGrpId, dest, cudaMemcpyHostToDevice, allocateMem);
if (sim_with_homeostasis) {
//Included to enable homeostasis in GPU_MODE.
// Avg. Firing...
if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**) &dest->avgFiring, sizeof(float) * length));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->avgFiring[ptrPos], &managerRuntimeData.avgFiring[ptrPos], sizeof(float) * length, cudaMemcpyHostToDevice));
}
}
/*!
* \brief this function allocates device (GPU) memory sapce and copies the spike count of each neuron to it
*
* This function:
* (allocate and) copy nSpikeCnt
*
* This funcion is called by copyAuxiliaryData() and fetchNeuronSpikeCount(). It supports bi-directional copying
*
* \param[in] netId the id of a local network, which is the same as the device (GPU) id
* \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied
* \param[in] dest pointer to runtime data desitnation
* \param[in] src pointer to runtime data source
* \param[in] kind the direction of copy
* \param[in] allocateMem a flag indicates whether allocating memory space before copy
* \param[in] destOffset the offset of data destination, which is used in local-to-global copy
*
* \sa copyAuxiliaryData fetchNeuronSpikeCount
* \since v4.0
*/
void SNN::copyNeuronSpikeCount(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, cudaMemcpyKind kind, bool allocateMem, int destOffset) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, destOffset);// check that the destination pointer is properly allocated..
int posN, lengthN;
if(lGrpId == ALL) {
posN = 0;
lengthN = networkConfigs[netId].numN;
} else {
posN = groupConfigs[netId][lGrpId].lStartN;
lengthN = groupConfigs[netId][lGrpId].numN;
}
assert(lengthN > 0 && lengthN <= networkConfigs[netId].numN);
// spike count information
if(allocateMem)
CUDA_CHECK_ERRORS(cudaMalloc((void**) &dest->nSpikeCnt, sizeof(int) * lengthN));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->nSpikeCnt[posN + destOffset], &src->nSpikeCnt[posN], sizeof(int) * lengthN, kind));
}
// FIXME: move grpDA(5HT, ACh, NE)Buffer to copyAuxiliaryData
/*!
* \brief this function allocates device (GPU) memory sapce and copies variables related to group state to it
*
* This function:
* (allocate and) copy grpDA, grp5HT, grpACh, grpNE, grpDABuffer, grp5HTBuffer, grpAChBuffer, grpNEBuffer
*
* This funcion is called by allocateSNN_GPU() and fetchGroupState(). It supports bi-directional copying
*
* \param[in] netId the id of a local network, which is the same as the device (GPU) id
* \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied
* \param[in] dest pointer to runtime data desitnation
* \param[in] src pointer to runtime data source
* \param[in] kind the direction of copying
* \param[in] allocateMem a flag indicates whether allocating memory space before copying
*
* \sa allocateSNN_GPU fetchGroupState
* \since v3.0
*/
void SNN::copyGroupState(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, cudaMemcpyKind kind, bool allocateMem) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, 0);// check that the destination pointer is properly allocated..
if (allocateMem) {
assert(dest->memType == GPU_MEM && !dest->allocated);
CUDA_CHECK_ERRORS(cudaMalloc((void**) &dest->grpDA, sizeof(float) * networkConfigs[netId].numGroups));
CUDA_CHECK_ERRORS(cudaMalloc((void**) &dest->grp5HT, sizeof(float) * networkConfigs[netId].numGroups));
CUDA_CHECK_ERRORS(cudaMalloc((void**) &dest->grpACh, sizeof(float) * networkConfigs[netId].numGroups));
CUDA_CHECK_ERRORS(cudaMalloc((void**) &dest->grpNE, sizeof(float) * networkConfigs[netId].numGroups));
}
CUDA_CHECK_ERRORS(cudaMemcpy(dest->grpDA, src->grpDA, sizeof(float) * networkConfigs[netId].numGroups, kind));
CUDA_CHECK_ERRORS(cudaMemcpy(dest->grp5HT, src->grp5HT, sizeof(float) * networkConfigs[netId].numGroups, kind));
CUDA_CHECK_ERRORS(cudaMemcpy(dest->grpACh, src->grpACh, sizeof(float) * networkConfigs[netId].numGroups, kind));
CUDA_CHECK_ERRORS(cudaMemcpy(dest->grpNE, src->grpNE, sizeof(float) * networkConfigs[netId].numGroups, kind));
if (lGrpId < 0) {
if (allocateMem) {
assert(dest->memType == GPU_MEM && !dest->allocated);
CUDA_CHECK_ERRORS(cudaMalloc((void**) &dest->grpDABuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups));
CUDA_CHECK_ERRORS(cudaMalloc((void**) &dest->grp5HTBuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups));
CUDA_CHECK_ERRORS(cudaMalloc((void**) &dest->grpAChBuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups));
CUDA_CHECK_ERRORS(cudaMalloc((void**) &dest->grpNEBuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups));
}
CUDA_CHECK_ERRORS(cudaMemcpy(dest->grpDABuffer, src->grpDABuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups, kind));
CUDA_CHECK_ERRORS(cudaMemcpy(dest->grp5HTBuffer, src->grp5HTBuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups, kind));
CUDA_CHECK_ERRORS(cudaMemcpy(dest->grpAChBuffer, src->grpAChBuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups, kind));
CUDA_CHECK_ERRORS(cudaMemcpy(dest->grpNEBuffer, src->grpNEBuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups, kind));
} else {
assert(!allocateMem);
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->grpDABuffer[lGrpId * 1000], &src->grpDABuffer[lGrpId * 1000], sizeof(float) * 1000, kind));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->grp5HTBuffer[lGrpId * 1000], &src->grp5HTBuffer[lGrpId * 1000], sizeof(float) * 1000, kind));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->grpAChBuffer[lGrpId * 1000], &src->grpAChBuffer[lGrpId * 1000], sizeof(float) * 1000, kind));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->grpNEBuffer[lGrpId * 1000], &src->grpNEBuffer[lGrpId * 1000], sizeof(float) * 1000, kind));
}
}
/*!
* \brief this function allocates device (GPU) memory sapce and copies neural parameters to it
*
* This function:
* (allocate and) copy Izh_a, Izh_b, Izh_c, Izh_d
* initialize baseFiringInv
* (allocate and) copy baseFiring, baseFiringInv
*
* This funcion is only called by copyNeuronState(). Only copying direction from host to device is required.
*
* \param[in] netId the id of a local network, which is the same as the device (GPU) id
* \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied
* \param[in] dest pointer to runtime data desitnation
* \param[in] allocateMem a flag indicates whether allocating memory space before copying
*
* \sa copyNeuronState
* \since v3.0
*/
void SNN::copyNeuronParameters(int netId, int lGrpId, RuntimeData* dest, cudaMemcpyKind kind, bool allocateMem) {
checkAndSetGPUDevice(netId);
assert(kind == cudaMemcpyHostToDevice);
int ptrPos, length;
// check that the destination pointer is properly allocated..
checkDestSrcPtrs(dest, &managerRuntimeData, cudaMemcpyHostToDevice, allocateMem, lGrpId, 0);
// check that the destination pointer is properly allocated..
// cannot use checkDestSrcPtrs here because src pointer would be NULL
if (dest->allocated && allocateMem) {
KERNEL_ERROR("GPU Memory already allocated...");
exitSimulation(1);
}
// when allocating we are allocating the memory.. we need to do it completely... to avoid memory fragmentation..
if (allocateMem) {
assert(lGrpId == ALL);
assert(dest->Izh_a == NULL);
assert(dest->Izh_b == NULL);
assert(dest->Izh_c == NULL);
assert(dest->Izh_d == NULL);
}
if(lGrpId == ALL) {
ptrPos = 0;
length = networkConfigs[netId].numNReg;
}
else {
ptrPos = groupConfigs[netId][lGrpId].lStartN;
length = groupConfigs[netId][lGrpId].numN;
}
if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->Izh_a, sizeof(float) * length));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->Izh_a[ptrPos], &(managerRuntimeData.Izh_a[ptrPos]), sizeof(float) * length, cudaMemcpyHostToDevice));
if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->Izh_b, sizeof(float) * length));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->Izh_b[ptrPos], &(managerRuntimeData.Izh_b[ptrPos]), sizeof(float) * length, cudaMemcpyHostToDevice));
if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->Izh_c, sizeof(float) * length));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->Izh_c[ptrPos], &(managerRuntimeData.Izh_c[ptrPos]), sizeof(float) * length, cudaMemcpyHostToDevice));
if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->Izh_d, sizeof(float) * length));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->Izh_d[ptrPos], &(managerRuntimeData.Izh_d[ptrPos]), sizeof(float) * length, cudaMemcpyHostToDevice));
// pre-compute baseFiringInv for fast computation on GPUs.
if (sim_with_homeostasis) {
float* baseFiringInv = new float[length];
for(int nid = 0; nid < length; nid++) {
if (managerRuntimeData.baseFiring[nid] != 0.0f)
baseFiringInv[nid] = 1.0f / managerRuntimeData.baseFiring[ptrPos + nid];
else
baseFiringInv[nid] = 0.0;
}
if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->baseFiringInv, sizeof(float) * length));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->baseFiringInv[ptrPos], baseFiringInv, sizeof(float) * length, cudaMemcpyHostToDevice));
if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->baseFiring, sizeof(float) * length));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->baseFiring[ptrPos], managerRuntimeData.baseFiring, sizeof(float) * length, cudaMemcpyHostToDevice));
delete [] baseFiringInv;
}
}
/*!
* \brief this function allocates device (GPU) memory sapce and copies short-term plasticity (STP) state to it
*
* This function:
* initialize STP_Pitch
* (allocate and) copy stpu, stpx
*
* This funcion is called by allocateSNN_GPU() and fetchSTPState(). It supports bi-directional copying
*
* \param[in] netId the id of a local network, which is the same as the device (GPU) id
* \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied
* \param[in] dest pointer to runtime data desitnation
* \param[in] src pointer to runtime data source
* \param[in] kind the direction of copying
* \param[in] allocateMem a flag indicates whether allocating memory space before copying
*
* \sa allocateSNN_GPU fetchSTPState
* \since v3.0
*/
void SNN::copySTPState(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, cudaMemcpyKind kind, bool allocateMem) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, 0); // check that the destination pointer is properly allocated..
// STP feature is optional, do addtional check for memory space
if(allocateMem) {
assert(dest->stpu == NULL);
assert(dest->stpx == NULL);
} else {
assert(dest->stpu != NULL);
assert(dest->stpx != NULL);
}
assert(src->stpu != NULL); assert(src->stpx != NULL);
size_t STP_Pitch;
size_t widthInBytes = sizeof(float) * networkConfigs[netId].numN;
// if(allocateMem) CUDA_CHECK_ERRORS( cudaMalloc( (void**) &dest->stpu, sizeof(float)*networkConfigs[0].numN));
// CUDA_CHECK_ERRORS( cudaMemcpy( &dest->stpu[0], &src->stpu[0], sizeof(float)*networkConfigs[0].numN, kind));
// if(allocateMem) CUDA_CHECK_ERRORS( cudaMalloc( (void**) &dest->stpx, sizeof(float)*networkConfigs[0].numN));
// CUDA_CHECK_ERRORS( cudaMemcpy( &dest->stpx[0], &src->stpx[0], sizeof(float)*networkConfigs[0].numN, kind));
// allocate the stpu and stpx variable
if (allocateMem)
CUDA_CHECK_ERRORS(cudaMallocPitch((void**)&dest->stpu, &networkConfigs[netId].STP_Pitch, widthInBytes, networkConfigs[netId].maxDelay + 1));
if (allocateMem)
CUDA_CHECK_ERRORS(cudaMallocPitch((void**)&dest->stpx, &STP_Pitch, widthInBytes, networkConfigs[netId].maxDelay + 1));
assert(networkConfigs[netId].STP_Pitch > 0);
assert(STP_Pitch > 0); // stp_pitch should be greater than zero
assert(STP_Pitch == networkConfigs[netId].STP_Pitch); // we want same Pitch for stpu and stpx
assert(networkConfigs[netId].STP_Pitch >= widthInBytes); // stp_pitch should be greater than the width
// convert the Pitch value to multiples of float
assert(networkConfigs[netId].STP_Pitch % (sizeof(float)) == 0);
if (allocateMem)
networkConfigs[netId].STP_Pitch = networkConfigs[netId].STP_Pitch/sizeof(float);
// fprintf(stderr, "STP_Pitch = %ld, STP_witdhInBytes = %d\n", networkConfigs[0].STP_Pitch, widthInBytes);
float* tmp_stp = new float[networkConfigs[netId].numN];
// copy the already generated values of stpx and stpu to the GPU
for(int t = 0; t < networkConfigs[netId].maxDelay + 1; t++) {
if (kind == cudaMemcpyHostToDevice) {
// stpu in the CPU might be mapped in a specific way. we want to change the format
// to something that is okay with the GPU STP_U and STP_X variable implementation..
for (int n = 0; n < networkConfigs[netId].numN; n++) {
int idx = STP_BUF_POS(n, t, glbNetworkConfig.maxDelay);
tmp_stp[n] = managerRuntimeData.stpu[idx];
//assert(tmp_stp[n] == 0.0f); // STP is not enabled for all groups
}
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->stpu[t * networkConfigs[netId].STP_Pitch], tmp_stp, sizeof(float) * networkConfigs[netId].numN, cudaMemcpyHostToDevice));
for (int n = 0; n < networkConfigs[netId].numN; n++) {
int idx = STP_BUF_POS(n, t, glbNetworkConfig.maxDelay);
tmp_stp[n] = managerRuntimeData.stpx[idx];
//assert(tmp_stp[n] == 1.0f); // STP is not enabled for all groups
}
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->stpx[t * networkConfigs[netId].STP_Pitch], tmp_stp, sizeof(float) * networkConfigs[netId].numN, cudaMemcpyHostToDevice));
} else {
CUDA_CHECK_ERRORS(cudaMemcpy(tmp_stp, &dest->stpu[t * networkConfigs[netId].STP_Pitch], sizeof(float) * networkConfigs[netId].numN, cudaMemcpyDeviceToHost));
for (int n = 0; n < networkConfigs[netId].numN; n++)
managerRuntimeData.stpu[STP_BUF_POS(n, t, glbNetworkConfig.maxDelay)] = tmp_stp[n];
CUDA_CHECK_ERRORS(cudaMemcpy(tmp_stp, &dest->stpx[t * networkConfigs[netId].STP_Pitch], sizeof(float) * networkConfigs[netId].numN, cudaMemcpyDeviceToHost));
for (int n = 0; n < networkConfigs[netId].numN; n++)
managerRuntimeData.stpx[STP_BUF_POS(n, t, glbNetworkConfig.maxDelay)] = tmp_stp[n];
}
}
delete [] tmp_stp;
}
/*!
* \brief This function copies networkConfig form host to device
*
* This function:
* copy networkConfig
*
* \param[in] netId the id of a local network whose networkConfig will be copied to device (GPU) memory
*
* \since v4.0
*/
void SNN::copyNetworkConfig(int netId) {
checkAndSetGPUDevice(netId);
CUDA_CHECK_ERRORS(cudaMemcpyToSymbol(networkConfigGPU, &networkConfigs[netId], sizeof(NetworkConfigRT), 0, cudaMemcpyHostToDevice));
}
/*!
* \brief This function copies groupConfigs form host to device
*
* This function:
* copy groupConfigs
*
* \param[in] netId the id of a local network whose groupConfigs will be copied to device (GPU) memory
*
* \since v4.0
*/
void SNN::copyGroupConfigs(int netId) {
checkAndSetGPUDevice(netId);
CUDA_CHECK_ERRORS(cudaMemcpyToSymbol(groupConfigsGPU, groupConfigs[netId], (networkConfigs[netId].numGroupsAssigned) * sizeof(GroupConfigRT), 0, cudaMemcpyHostToDevice));
}
/*!
* \brief this function copy weight state in device (GPU) memory sapce to main (CPU) memory space
*
* This function:
* copy wt, wtChange synSpikeTime
*
* This funcion is only called by fetchWeightState(). Only copying direction from device to host is required.
*
* \param[in] netId the id of a local network, which is the same as the device (GPU) id
* \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied
*
* \sa fetchWeightState
* \since v4.0
*/
void SNN::copyWeightState(int netId, int lGrpId, cudaMemcpyKind kind) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(&managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false, lGrpId, 0); // check that the destination pointer is properly allocated..
assert(kind == cudaMemcpyDeviceToHost);
int lengthSyn, posSyn;
// first copy pre-connections info
copyPreConnectionInfo(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false);
if (lGrpId == ALL) {
lengthSyn = networkConfigs[netId].numPreSynNet;
posSyn = 0;
} else {
lengthSyn = 0;
for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++)
lengthSyn += managerRuntimeData.Npre[lNId];
posSyn = managerRuntimeData.cumulativePre[groupConfigs[netId][lGrpId].lStartN];
}
assert(posSyn < networkConfigs[netId].numPreSynNet || networkConfigs[netId].numPreSynNet == 0);
assert(lengthSyn <= networkConfigs[netId].numPreSynNet);
CUDA_CHECK_ERRORS(cudaMemcpy(&managerRuntimeData.wt[posSyn], &runtimeData[netId].wt[posSyn], sizeof(float) * lengthSyn, cudaMemcpyDeviceToHost));
// copy firing time for individual synapses
//CUDA_CHECK_ERRORS(cudaMemcpy(&managerRuntimeData.synSpikeTime[cumPos_syn], &runtimeData[netId].synSpikeTime[cumPos_syn], sizeof(int) * length_wt, cudaMemcpyDeviceToHost));
if ((!sim_with_fixedwts) || sim_with_stdp) {
// copy synaptic weight derivative
CUDA_CHECK_ERRORS(cudaMemcpy( &managerRuntimeData.wtChange[posSyn], &runtimeData[netId].wtChange[posSyn], sizeof(float) * lengthSyn, cudaMemcpyDeviceToHost));
}
}
/*!
* \brief this function allocates device (GPU) memory sapce and copies variables related to syanpses to it
*
* This function:
* (allocate and) copy wt, wtChange, maxSynWt
*
*
* \param[in] netId the id of a local network, which is the same as the device (GPU) id
* \param[in] dest pointer to runtime data desitnation
* \param[in] src pointer to runtime data source
* \param[in] allocateMem a flag indicates whether allocating memory space before copying
*
* \sa allocateSNN_GPU
* \since v4.0
*/
void SNN::copySynapseState(int netId, RuntimeData* dest, RuntimeData* src, cudaMemcpyKind kind, bool allocateMem) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(dest, src, kind, allocateMem, ALL, 0); // check that the destination pointer is properly allocated..
assert(networkConfigs[netId].numPreSynNet > 0);
// synaptic information based
if(allocateMem)
CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->wt, sizeof(float) * networkConfigs[netId].numPreSynNet));
CUDA_CHECK_ERRORS(cudaMemcpy(dest->wt, src->wt, sizeof(float) * networkConfigs[netId].numPreSynNet, kind));
// we don't need these data structures if the network doesn't have any plastic synapses at all
// they show up in gpuUpdateLTP() and updateSynapticWeights(), two functions that do not get called if
// sim_with_fixedwts is set
if (!sim_with_fixedwts) {
// synaptic weight derivative
if(allocateMem)
CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->wtChange, sizeof(float) * networkConfigs[netId].numPreSynNet));
CUDA_CHECK_ERRORS(cudaMemcpy(dest->wtChange, src->wtChange, sizeof(float) * networkConfigs[netId].numPreSynNet, kind));
// synaptic weight maximum value
if(allocateMem)
CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->maxSynWt, sizeof(float) * networkConfigs[netId].numPreSynNet));
CUDA_CHECK_ERRORS(cudaMemcpy(dest->maxSynWt, src->maxSynWt, sizeof(float) * networkConfigs[netId].numPreSynNet, kind));
}
}
/*!
* \brief this function allocates device (GPU) memory sapce and copies auxiliary runtime data to it
*
* This function:
* (allocate and) reset spikeGenBits, poissonFireRate
* initialize I_setLength, I_setPitch; (allocate and) reset I_set
* (allocate and) copy synSpikeTime, lastSpikeTime
* (allocate and) copy nSpikeCnt
* (allocate and) copy grpIds, connIdsPreIdx
* (allocate and) copy firingTableD1, firingTableD2
* This funcion is only called by allocateSNN_GPU. Therefore, only copying direction from host to device is required
*
* \param[in] netId the id of local network, which is the same as device (GPU) id
* \param[in] dest pointer to runtime data desitnation
* \param[in] allocateMem a flag indicates whether allocating memory space before copying
*
* \sa allocateSNN_GPU
* \since v4.0
*/
void SNN::copyAuxiliaryData(int netId, int lGrpId, RuntimeData* dest, cudaMemcpyKind kind, bool allocateMem) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(dest, &managerRuntimeData, cudaMemcpyHostToDevice, allocateMem, ALL, 0); // check that the destination pointer is properly allocated..
assert(kind == cudaMemcpyHostToDevice);
assert(networkConfigs[netId].numN > 0);
if(allocateMem)
CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->spikeGenBits, sizeof(int) * (networkConfigs[netId].numNSpikeGen / 32 + 1)));
CUDA_CHECK_ERRORS(cudaMemset(dest->spikeGenBits, 0, sizeof(int) * (networkConfigs[netId].numNSpikeGen / 32 + 1)));
// allocate the poisson neuron poissonFireRate
if(allocateMem)
CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->poissonFireRate, sizeof(float) * networkConfigs[netId].numNPois));
CUDA_CHECK_ERRORS(cudaMemset(dest->poissonFireRate, 0, sizeof(float) * networkConfigs[netId].numNPois));
// synaptic auxiliary data
// I_set: a bit vector indicates which synapse got a spike
if(allocateMem) {
networkConfigs[netId].I_setLength = ceil(((networkConfigs[netId].maxNumPreSynN) / 32.0f));
CUDA_CHECK_ERRORS(cudaMallocPitch((void**)&dest->I_set, &networkConfigs[netId].I_setPitch, sizeof(int) * networkConfigs[netId].numNReg, networkConfigs[netId].I_setLength));
}
assert(networkConfigs[netId].I_setPitch > 0 || networkConfigs[netId].maxNumPreSynN == 0);
CUDA_CHECK_ERRORS(cudaMemset(dest->I_set, 0, networkConfigs[netId].I_setPitch * networkConfigs[netId].I_setLength));
// synSpikeTime: an array indicates the last time when a synapse got a spike
if(allocateMem)
CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->synSpikeTime, sizeof(int) * networkConfigs[netId].numPreSynNet));
CUDA_CHECK_ERRORS(cudaMemcpy(dest->synSpikeTime, managerRuntimeData.synSpikeTime, sizeof(int) * networkConfigs[netId].numPreSynNet, cudaMemcpyHostToDevice));
// neural auxiliary data
// lastSpikeTime: an array indicates the last time of a neuron emitting a spike
// neuron firing time
if(allocateMem)
CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->lastSpikeTime, sizeof(int) * networkConfigs[netId].numNAssigned));
CUDA_CHECK_ERRORS(cudaMemcpy(dest->lastSpikeTime, managerRuntimeData.lastSpikeTime, sizeof(int) * networkConfigs[netId].numNAssigned, cudaMemcpyHostToDevice));
// auxiliary data for recording spike count of each neuron
copyNeuronSpikeCount(netId, lGrpId, dest, &managerRuntimeData, cudaMemcpyHostToDevice, true, 0);
// quick lookup array for local group ids
if(allocateMem)
CUDA_CHECK_ERRORS(cudaMalloc( (void**)&dest->grpIds, sizeof(short int) * networkConfigs[netId].numNAssigned));
CUDA_CHECK_ERRORS(cudaMemcpy( dest->grpIds, managerRuntimeData.grpIds, sizeof(short int) * networkConfigs[netId].numNAssigned, cudaMemcpyHostToDevice));
// quick lookup array for conn ids
if(allocateMem)
CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->connIdsPreIdx, sizeof(short int) * networkConfigs[netId].numPreSynNet));
CUDA_CHECK_ERRORS(cudaMemcpy(dest->connIdsPreIdx, managerRuntimeData.connIdsPreIdx, sizeof(short int) * networkConfigs[netId].numPreSynNet, cudaMemcpyHostToDevice));
// firing table
if(allocateMem) {
assert(dest->firingTableD1 == NULL);
assert(dest->firingTableD2 == NULL);
}
// allocate 1ms firing table
if(allocateMem)
CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->firingTableD1, sizeof(int) * networkConfigs[netId].maxSpikesD1));
if (networkConfigs[netId].maxSpikesD1 > 0)
CUDA_CHECK_ERRORS(cudaMemcpy(dest->firingTableD1, managerRuntimeData.firingTableD1, sizeof(int) * networkConfigs[netId].maxSpikesD1, cudaMemcpyHostToDevice));
// allocate 2+ms firing table
if(allocateMem)
CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->firingTableD2, sizeof(int) * networkConfigs[netId].maxSpikesD2));
if (networkConfigs[netId].maxSpikesD2 > 0)
CUDA_CHECK_ERRORS(cudaMemcpy(dest->firingTableD2, managerRuntimeData.firingTableD2, sizeof(int) * networkConfigs[netId].maxSpikesD2, cudaMemcpyHostToDevice));
// allocate external 1ms firing table
if (allocateMem) {
void* devPtr;
CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->extFiringTableD1, sizeof(int*) * networkConfigs[netId].numGroups));
CUDA_CHECK_ERRORS(cudaMemset(dest->extFiringTableD1, 0 /* NULL */, sizeof(int*) * networkConfigs[netId].numGroups));
for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
if (groupConfigs[netId][lGrpId].hasExternalConnect) {
CUDA_CHECK_ERRORS(cudaMalloc((void**)&devPtr, sizeof(int) * groupConfigs[netId][lGrpId].numN * NEURON_MAX_FIRING_RATE));
CUDA_CHECK_ERRORS(cudaMemset(devPtr, 0 , sizeof(int) * groupConfigs[netId][lGrpId].numN * NEURON_MAX_FIRING_RATE));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->extFiringTableD1[lGrpId], &devPtr, sizeof(int*), cudaMemcpyHostToDevice));
}
}
}
// allocate external 2+ms firing table
if (allocateMem) {
void* devPtr;
CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->extFiringTableD2, sizeof(int*) * networkConfigs[netId].numGroups));
CUDA_CHECK_ERRORS(cudaMemset(dest->extFiringTableD2, 0 /* NULL */, sizeof(int*) * networkConfigs[netId].numGroups));
for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
if (groupConfigs[netId][lGrpId].hasExternalConnect) {
CUDA_CHECK_ERRORS(cudaMalloc((void**)&devPtr, sizeof(int) * groupConfigs[netId][lGrpId].numN * NEURON_MAX_FIRING_RATE));
CUDA_CHECK_ERRORS(cudaMemset(devPtr, 0 , sizeof(int) * groupConfigs[netId][lGrpId].numN * NEURON_MAX_FIRING_RATE));
CUDA_CHECK_ERRORS(cudaMemcpy(&dest->extFiringTableD2[lGrpId], &devPtr, sizeof(int*), cudaMemcpyHostToDevice));
}
}
}
// allocate external 1ms firing table index
if (allocateMem)
CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->extFiringTableEndIdxD1, sizeof(int) * networkConfigs[netId].numGroups));
CUDA_CHECK_ERRORS(cudaMemset(dest->extFiringTableEndIdxD1, 0, sizeof(int) * networkConfigs[netId].numGroups));
// allocate external 2+ms firing table index
if (allocateMem)
CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->extFiringTableEndIdxD2, sizeof(int) * networkConfigs[netId].numGroups));
CUDA_CHECK_ERRORS(cudaMemset(dest->extFiringTableEndIdxD2, 0, sizeof(int) * networkConfigs[netId].numGroups));
}
void SNN::copyGrpIdsLookupArray(int netId, cudaMemcpyKind kind) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(&managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false, ALL, 0);// check that the destination pointer is properly allocated..
assert(kind == cudaMemcpyDeviceToHost);
CUDA_CHECK_ERRORS(cudaMemcpy(managerRuntimeData.grpIds, runtimeData[netId].grpIds, sizeof(short int) * networkConfigs[netId].numNAssigned, cudaMemcpyDeviceToHost));
}
void SNN::copyConnIdsLookupArray(int netId, cudaMemcpyKind kind) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(&managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false, ALL, 0);// check that the destination pointer is properly allocated..
assert(kind == cudaMemcpyDeviceToHost);
CUDA_CHECK_ERRORS(cudaMemcpy(managerRuntimeData.connIdsPreIdx, runtimeData[netId].connIdsPreIdx, sizeof(short int) * networkConfigs[netId].numPreSynNet, cudaMemcpyDeviceToHost));
}
void SNN::copyLastSpikeTime(int netId, cudaMemcpyKind kind) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(&managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false, ALL, 0); // check that the destination pointer is properly allocated..
assert(kind == cudaMemcpyDeviceToHost);
CUDA_CHECK_ERRORS(cudaMemcpy(managerRuntimeData.lastSpikeTime, runtimeData[netId].lastSpikeTime, sizeof(int) * networkConfigs[netId].numN, cudaMemcpyDeviceToHost));
}
// spikeGeneratorUpdate on GPUs..
void SNN::spikeGeneratorUpdate_GPU(int netId) {
assert(runtimeData[netId].allocated);
assert(runtimeData[netId].memType == GPU_MEM);
checkAndSetGPUDevice(netId);
// update the random number for poisson spike generator (spikes generated by rate)
if((networkConfigs[netId].numNPois > 0) && (runtimeData[netId].gpuRandGen != NULL)) {
curandGenerateUniform(runtimeData[netId].gpuRandGen, runtimeData[netId].randNum, networkConfigs[netId].numNPois);
}
// Use spike generators (user-defined callback function)
if (networkConfigs[netId].numNSpikeGen > 0) {
assert(managerRuntimeData.spikeGenBits != NULL);
// reset the bit status of the spikeGenBits...
memset(managerRuntimeData.spikeGenBits, 0, sizeof(int) * (networkConfigs[netId].numNSpikeGen / 32 + 1));
// fill spikeGenBits from SpikeBuffer
fillSpikeGenBits(netId);
// copy the spikeGenBits from the manager to the GPU..
CUDA_CHECK_ERRORS(cudaMemcpy(runtimeData[netId].spikeGenBits, managerRuntimeData.spikeGenBits, sizeof(int) * (networkConfigs[netId].numNSpikeGen / 32 + 1), cudaMemcpyHostToDevice));
}
}
void SNN::findFiring_GPU(int netId) {
assert(runtimeData[netId].allocated);
assert(runtimeData[netId].memType == GPU_MEM);
checkAndSetGPUDevice(netId);
kernel_findFiring<<<NUM_BLOCKS, NUM_THREADS>>>(simTime);
CUDA_GET_LAST_ERROR("findFiring kernel failed\n");
}
void SNN::updateTimingTable_GPU(int netId) {
assert(runtimeData[netId].allocated);
assert(runtimeData[netId].memType == GPU_MEM);
checkAndSetGPUDevice(netId);
kernel_updateTimeTable<<<NUM_BLOCKS, NUM_THREADS>>>(simTimeMs);
CUDA_GET_LAST_ERROR("timing Table update kernel failed\n");
}
void SNN::doCurrentUpdateD2_GPU(int netId) {
assert(runtimeData[netId].allocated);
assert(runtimeData[netId].memType == GPU_MEM);
checkAndSetGPUDevice(netId);
if (networkConfigs[netId].maxDelay > 1) {
kernel_doCurrentUpdateD2 << <NUM_BLOCKS, NUM_THREADS >> > (simTimeMs, simTimeSec, simTime);
CUDA_GET_LAST_ERROR("Kernel execution failed");
}
}
void SNN::doCurrentUpdateD1_GPU(int netId) {
assert(runtimeData[netId].allocated);
assert(runtimeData[netId].memType == GPU_MEM);
checkAndSetGPUDevice(netId);
kernel_doCurrentUpdateD1<<<NUM_BLOCKS, NUM_THREADS>>>(simTimeMs,simTimeSec,simTime);
CUDA_GET_LAST_ERROR("Kernel execution failed");
}
void SNN::doSTPUpdateAndDecayCond_GPU(int netId) {
assert(runtimeData[netId].memType == GPU_MEM);
checkAndSetGPUDevice(netId);
if (sim_with_stp || sim_with_conductances) {
kernel_STPUpdateAndDecayConductances<<<NUM_BLOCKS, NUM_THREADS>>>(simTimeMs, simTimeSec, simTime);
CUDA_GET_LAST_ERROR("STP update\n");
}
}
void SNN::initGPU(int netId) {
checkAndSetGPUDevice(netId);
assert(runtimeData[netId].allocated);
kernel_initGPUMemory<<<NUM_BLOCKS, NUM_THREADS>>>();
CUDA_GET_LAST_ERROR("initGPUMemory kernel failed\n");
}
void SNN::deleteRuntimeData_GPU(int netId) {
assert(runtimeData[netId].memType == GPU_MEM);
checkAndSetGPUDevice(netId);
// cudaFree all device pointers
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].voltage) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].recovery) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].current) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].extCurrent) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].Npre) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].Npre_plastic) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].Npre_plasticInv) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].Npost) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].cumulativePost) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].cumulativePre) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].synSpikeTime) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].wt) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].wtChange) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].maxSynWt) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].nSpikeCnt) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].avgFiring) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].baseFiring) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].baseFiringInv) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].grpDA) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].grp5HT) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].grpACh) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].grpNE) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].grpDABuffer) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].grp5HTBuffer) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].grpAChBuffer) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].grpNEBuffer) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].grpIds) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].Izh_a) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].Izh_b) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].Izh_c) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].Izh_d) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].gAMPA) );
if (sim_with_NMDA_rise) {
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].gNMDA_r) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].gNMDA_d) );
} else {
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].gNMDA) );
}
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].gGABAa) );
if (sim_with_GABAb_rise) {
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].gGABAb_r) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].gGABAb_d) );
} else {
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].gGABAb) );
}
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].stpu) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].stpx) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].connIdsPreIdx) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].groupIdInfo) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].neuronAllocation) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].postDelayInfo) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].postSynapticIds) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].preSynapticIds) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].I_set) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].poissonFireRate) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].lastSpikeTime) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].spikeGenBits) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].firingTableD2) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].firingTableD1) );
int** tempPtrs;
tempPtrs = new int*[networkConfigs[netId].numGroups];
// fetch device memory address stored in extFiringTableD2
CUDA_CHECK_ERRORS( cudaMemcpy(tempPtrs, runtimeData[netId].extFiringTableD2, sizeof(int*) * networkConfigs[netId].numGroups, cudaMemcpyDeviceToHost) );
for (int i = 0; i < networkConfigs[netId].numGroups; i++)
CUDA_CHECK_ERRORS( cudaFree(tempPtrs[i]) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].extFiringTableD2) );
// fetch device memory address stored in extFiringTableD1
CUDA_CHECK_ERRORS( cudaMemcpy(tempPtrs, runtimeData[netId].extFiringTableD1, sizeof(int*) * networkConfigs[netId].numGroups, cudaMemcpyDeviceToHost) );
for (int i = 0; i < networkConfigs[netId].numGroups; i++)
CUDA_CHECK_ERRORS( cudaFree(tempPtrs[i]) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].extFiringTableD1) );
delete[] tempPtrs;
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].extFiringTableEndIdxD2) );
CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].extFiringTableEndIdxD1) );
// delete random numbr generator on GPU(s)
// Note: RNG_rand48 objects allocate device memory
if (runtimeData[netId].gpuRandGen != NULL) curandDestroyGenerator(runtimeData[netId].gpuRandGen);
runtimeData[netId].gpuRandGen = NULL;
if (runtimeData[netId].randNum != NULL) CUDA_CHECK_ERRORS(cudaFree(runtimeData[netId].randNum));
runtimeData[netId].randNum = NULL;
}
void SNN::globalStateUpdate_C_GPU(int netId) {
assert(runtimeData[netId].memType == GPU_MEM);
checkAndSetGPUDevice(netId);
kernel_conductanceUpdate << <NUM_BLOCKS, NUM_THREADS >> > (simTimeMs, simTimeSec, simTime);
CUDA_GET_LAST_ERROR("kernel_conductanceUpdate failed");
// use memset to reset I_set for debugging, resume it later
//CUDA_CHECK_ERRORS(cudaMemset(runtimeData[netId].I_set, 0, networkConfigs[netId].I_setPitch * networkConfigs[netId].I_setLength));
}
void SNN::globalStateUpdate_N_GPU(int netId) {
assert(runtimeData[netId].memType == GPU_MEM);
checkAndSetGPUDevice(netId);
// update all neuron state (i.e., voltage and recovery), including homeostasis
kernel_neuronStateUpdate << <NUM_BLOCKS, NUM_THREADS >> > ();
CUDA_GET_LAST_ERROR("Kernel execution failed");
}
void SNN::globalStateUpdate_G_GPU(int netId) {
assert(runtimeData[netId].memType == GPU_MEM);
checkAndSetGPUDevice(netId);
// update all group state (i.e., concentration of neuronmodulators)
// currently support 4 x 128 groups
kernel_groupStateUpdate<<<4, NUM_THREADS>>>(simTimeMs);
CUDA_GET_LAST_ERROR("Kernel execution failed");
}
void SNN::assignPoissonFiringRate_GPU(int netId) {
assert(runtimeData[netId].memType == GPU_MEM);
checkAndSetGPUDevice(netId);
for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) {
// given group of neurons belong to the poisson group....
if (groupConfigs[netId][lGrpId].isSpikeGenerator) {
int lNId = groupConfigs[netId][lGrpId].lStartN;
int gGrpId = groupConfigs[netId][lGrpId].gGrpId;
PoissonRate* rate = groupConfigMDMap[gGrpId].ratePtr;
// if spikeGenFunc group does not have a Poisson pointer, skip
if (groupConfigMap[gGrpId].spikeGenFunc || rate == NULL)
continue;
assert(runtimeData[netId].poissonFireRate != NULL);
if (rate->isOnGPU()) {
// rates allocated on GPU
CUDA_CHECK_ERRORS(cudaMemcpy(&runtimeData[netId].poissonFireRate[lNId - networkConfigs[netId].numNReg], rate->getRatePtrGPU(),
sizeof(float) * rate->getNumNeurons(), cudaMemcpyDeviceToDevice) );
} else {
// rates allocated on CPU
CUDA_CHECK_ERRORS(cudaMemcpy(&runtimeData[netId].poissonFireRate[lNId - networkConfigs[netId].numNReg], rate->getRatePtrCPU(),
sizeof(float) * rate->getNumNeurons(), cudaMemcpyHostToDevice) );
}
}
}
}
// Note: for temporarily use, might be merged into exchangeExternalSpike
void SNN::clearExtFiringTable_GPU(int netId) {
assert(runtimeData[netId].memType == GPU_MEM);
checkAndSetGPUDevice(netId);
CUDA_CHECK_ERRORS(cudaMemset(runtimeData[netId].extFiringTableEndIdxD1, 0, sizeof(int) * networkConfigs[netId].numGroups));
CUDA_CHECK_ERRORS(cudaMemset(runtimeData[netId].extFiringTableEndIdxD2, 0, sizeof(int) * networkConfigs[netId].numGroups));
}
//void SNN::routeSpikes_GPU() {
// int firingTableIdxD2, firingTableIdxD1;
// int GtoLOffset;
// // ToDo: route spikes using routing table. currently only exchange spikes between GPU0 and GPU1
// // GPU0 -> GPU1
// if (!groupPartitionLists[0].empty() && !groupPartitionLists[1].empty()) {
// checkAndSetGPUDevice(0);
// CUDA_CHECK_ERRORS( cudaMemcpy(managerRuntimeData.extFiringTableEndIdxD2, runtimeData[0].extFiringTableEndIdxD2, sizeof(int) * networkConfigs[0].numGroups, cudaMemcpyDeviceToHost));
// CUDA_CHECK_ERRORS( cudaMemcpy(managerRuntimeData.extFiringTableEndIdxD1, runtimeData[0].extFiringTableEndIdxD1, sizeof(int) * networkConfigs[0].numGroups, cudaMemcpyDeviceToHost));
// CUDA_CHECK_ERRORS( cudaMemcpy(managerRuntimeData.extFiringTableD2, runtimeData[0].extFiringTableD2, sizeof(int*) * networkConfigs[0].numGroups, cudaMemcpyDeviceToHost));
// CUDA_CHECK_ERRORS( cudaMemcpy(managerRuntimeData.extFiringTableD1, runtimeData[0].extFiringTableD1, sizeof(int*) * networkConfigs[0].numGroups, cudaMemcpyDeviceToHost));
// //KERNEL_DEBUG("GPU0 D1ex:%d/D2ex:%d", managerRuntimeData.extFiringTableEndIdxD1[0], managerRuntimeData.extFiringTableEndIdxD2[0]);
//
// checkAndSetGPUDevice(1);
// CUDA_CHECK_ERRORS( cudaMemcpyFromSymbol(managerRuntimeData.timeTableD2, timeTableD2GPU, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, cudaMemcpyDeviceToHost));
// CUDA_CHECK_ERRORS( cudaMemcpyFromSymbol(managerRuntimeData.timeTableD1, timeTableD1GPU, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, cudaMemcpyDeviceToHost));
// firingTableIdxD2 = managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1];
// firingTableIdxD1 = managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1];
// //KERNEL_DEBUG("GPU1 D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2);
//
// for (int lGrpId = 0; lGrpId < networkConfigs[0].numGroups; lGrpId++) {
// if (groupConfigs[0][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD2[lGrpId] > 0) {
// CUDA_CHECK_ERRORS( cudaMemcpyPeer(runtimeData[1].firingTableD2 + firingTableIdxD2, 1,
// managerRuntimeData.extFiringTableD2[lGrpId], 0,
// sizeof(int) * managerRuntimeData.extFiringTableEndIdxD2[lGrpId]));
//
// for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[1].begin(); grpIt != groupPartitionLists[1].end(); grpIt++) {
// if (grpIt->gGrpId == groupConfigs[0][lGrpId].gGrpId)
// GtoLOffset = grpIt->GtoLOffset;
// }
//
// kernel_convertExtSpikesD2<<<NUM_BLOCKS, NUM_THREADS>>>(firingTableIdxD2,
// firingTableIdxD2 + managerRuntimeData.extFiringTableEndIdxD2[lGrpId],
// GtoLOffset); // [StartIdx, EndIdx)
// firingTableIdxD2 += managerRuntimeData.extFiringTableEndIdxD2[lGrpId];
// }
//
// if (groupConfigs[0][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD1[lGrpId] > 0) {
// CUDA_CHECK_ERRORS( cudaMemcpyPeer(runtimeData[1].firingTableD1 + firingTableIdxD1, 1,
// managerRuntimeData.extFiringTableD1[lGrpId], 0,
// sizeof(int) * managerRuntimeData.extFiringTableEndIdxD1[lGrpId]));
//
// for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[1].begin(); grpIt != groupPartitionLists[1].end(); grpIt++) {
// if (grpIt->gGrpId == groupConfigs[0][lGrpId].gGrpId)
// GtoLOffset = grpIt->GtoLOffset;
// }
//
// kernel_convertExtSpikesD1<<<NUM_BLOCKS, NUM_THREADS>>>(firingTableIdxD1,
// firingTableIdxD1 + managerRuntimeData.extFiringTableEndIdxD1[lGrpId],
// GtoLOffset); // [StartIdx, EndIdx)
// firingTableIdxD1 += managerRuntimeData.extFiringTableEndIdxD1[lGrpId];
//
// }
// //KERNEL_DEBUG("GPU1 New D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2);
// }
// managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD2;
// managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD1;
// CUDA_CHECK_ERRORS( cudaMemcpyToSymbol(timeTableD2GPU, managerRuntimeData.timeTableD2, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, cudaMemcpyHostToDevice));
// CUDA_CHECK_ERRORS( cudaMemcpyToSymbol(timeTableD1GPU, managerRuntimeData.timeTableD1, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, cudaMemcpyHostToDevice));
// }
//
// // GPU1 -> GPU0
// if (!groupPartitionLists[1].empty() && !groupPartitionLists[0].empty()) {
// checkAndSetGPUDevice(1);
// CUDA_CHECK_ERRORS( cudaMemcpy(managerRuntimeData.extFiringTableEndIdxD2, runtimeData[1].extFiringTableEndIdxD2, sizeof(int) * networkConfigs[1].numGroups, cudaMemcpyDeviceToHost));
// CUDA_CHECK_ERRORS( cudaMemcpy(managerRuntimeData.extFiringTableEndIdxD1, runtimeData[1].extFiringTableEndIdxD1, sizeof(int) * networkConfigs[1].numGroups, cudaMemcpyDeviceToHost));
// CUDA_CHECK_ERRORS( cudaMemcpy(managerRuntimeData.extFiringTableD2, runtimeData[1].extFiringTableD2, sizeof(int*) * networkConfigs[1].numGroups, cudaMemcpyDeviceToHost));
// CUDA_CHECK_ERRORS( cudaMemcpy(managerRuntimeData.extFiringTableD1, runtimeData[1].extFiringTableD1, sizeof(int*) * networkConfigs[1].numGroups, cudaMemcpyDeviceToHost));
// //KERNEL_DEBUG("GPU1 D1ex:%d/D2ex:%d", managerRuntimeData.extFiringTableEndIdxD1[0], managerRuntimeData.extFiringTableEndIdxD2[0]);
//
// checkAndSetGPUDevice(0);
// CUDA_CHECK_ERRORS( cudaMemcpyFromSymbol(managerRuntimeData.timeTableD2, timeTableD2GPU, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, cudaMemcpyDeviceToHost));
// CUDA_CHECK_ERRORS( cudaMemcpyFromSymbol(managerRuntimeData.timeTableD1, timeTableD1GPU, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, cudaMemcpyDeviceToHost));
// firingTableIdxD2 = managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1];
// firingTableIdxD1 = managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1];
// //KERNEL_DEBUG("GPU0 D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2);
//
// for (int lGrpId = 0; lGrpId < networkConfigs[1].numGroups; lGrpId++) {
// if (groupConfigs[1][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD2[lGrpId] > 0) {
// CUDA_CHECK_ERRORS( cudaMemcpyPeer(runtimeData[0].firingTableD2 + firingTableIdxD2, 0,
// managerRuntimeData.extFiringTableD2[lGrpId], 1,
// sizeof(int) * managerRuntimeData.extFiringTableEndIdxD2[lGrpId]));
//
// for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[0].begin(); grpIt != groupPartitionLists[0].end(); grpIt++) {
// if (grpIt->gGrpId == groupConfigs[1][lGrpId].gGrpId)
// GtoLOffset = grpIt->GtoLOffset;
// }
//
// kernel_convertExtSpikesD2<<<NUM_BLOCKS, NUM_THREADS>>>(firingTableIdxD2,
// firingTableIdxD2 + managerRuntimeData.extFiringTableEndIdxD2[lGrpId],
// GtoLOffset); // [StartIdx, EndIdx)
// firingTableIdxD2 += managerRuntimeData.extFiringTableEndIdxD2[lGrpId];
// }
//
// if (groupConfigs[1][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD1[lGrpId] > 0) {
// CUDA_CHECK_ERRORS( cudaMemcpyPeer(runtimeData[0].firingTableD1 + firingTableIdxD1, 0,
// managerRuntimeData.extFiringTableD1[lGrpId], 1,
// sizeof(int) * managerRuntimeData.extFiringTableEndIdxD1[lGrpId]));
//
// for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[0].begin(); grpIt != groupPartitionLists[0].end(); grpIt++) {
// if (grpIt->gGrpId == groupConfigs[1][lGrpId].gGrpId)
// GtoLOffset = grpIt->GtoLOffset;
// }
//
// kernel_convertExtSpikesD1<<<NUM_BLOCKS, NUM_THREADS>>>(firingTableIdxD1,
// firingTableIdxD1 + managerRuntimeData.extFiringTableEndIdxD1[lGrpId],
// GtoLOffset); // [StartIdx, EndIdx)
// firingTableIdxD1 += managerRuntimeData.extFiringTableEndIdxD1[lGrpId];
// }
// //KERNEL_DEBUG("GPU0 New D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2);
// }
// managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD2;
// managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD1;
// CUDA_CHECK_ERRORS( cudaMemcpyToSymbol(timeTableD2GPU, managerRuntimeData.timeTableD2, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, cudaMemcpyHostToDevice));
// CUDA_CHECK_ERRORS( cudaMemcpyToSymbol(timeTableD1GPU, managerRuntimeData.timeTableD1, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, cudaMemcpyHostToDevice));
// }
//
//
// for (std::list<RoutingTableEntry>::iterator rteItr = spikeRoutingTable.begin(); rteItr != spikeRoutingTable.end(); rteItr++) {
// int srcNetId = rteItr->srcNetId;
// int destNetId = rteItr->destNetId;
// assert(srcNetId < CPU_RUNTIME_BASE);
// assert(destNetId < CPU_RUNTIME_BASE);
// checkAndSetGPUDevice(srcNetId);
// CUDA_CHECK_ERRORS( cudaMemcpy(managerRuntimeData.extFiringTableEndIdxD2, runtimeData[srcNetId].extFiringTableEndIdxD2, sizeof(int) * networkConfigs[srcNetId].numGroups, cudaMemcpyDeviceToHost));
// CUDA_CHECK_ERRORS( cudaMemcpy(managerRuntimeData.extFiringTableEndIdxD1, runtimeData[srcNetId].extFiringTableEndIdxD1, sizeof(int) * networkConfigs[srcNetId].numGroups, cudaMemcpyDeviceToHost));
// CUDA_CHECK_ERRORS( cudaMemcpy(managerRuntimeData.extFiringTableD2, runtimeData[srcNetId].extFiringTableD2, sizeof(int*) * networkConfigs[srcNetId].numGroups, cudaMemcpyDeviceToHost));
// CUDA_CHECK_ERRORS( cudaMemcpy(managerRuntimeData.extFiringTableD1, runtimeData[srcNetId].extFiringTableD1, sizeof(int*) * networkConfigs[srcNetId].numGroups, cudaMemcpyDeviceToHost));
// //KERNEL_DEBUG("GPU0 D1ex:%d/D2ex:%d", managerRuntimeData.extFiringTableEndIdxD1[0], managerRuntimeData.extFiringTableEndIdxD2[0]);
//
// checkAndSetGPUDevice(destNetId);
// CUDA_CHECK_ERRORS( cudaMemcpyFromSymbol(managerRuntimeData.timeTableD2, timeTableD2GPU, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, cudaMemcpyDeviceToHost));
// CUDA_CHECK_ERRORS( cudaMemcpyFromSymbol(managerRuntimeData.timeTableD1, timeTableD1GPU, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, cudaMemcpyDeviceToHost));
// firingTableIdxD2 = managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1];
// firingTableIdxD1 = managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1];
// //KERNEL_DEBUG("GPU1 D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2);
//
// for (int lGrpId = 0; lGrpId < networkConfigs[srcNetId].numGroups; lGrpId++) {
// if (groupConfigs[srcNetId][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD2[lGrpId] > 0) {
// CUDA_CHECK_ERRORS( cudaMemcpyPeer(runtimeData[destNetId].firingTableD2 + firingTableIdxD2, destNetId,
// managerRuntimeData.extFiringTableD2[lGrpId], srcNetId,
// sizeof(int) * managerRuntimeData.extFiringTableEndIdxD2[lGrpId]));
//
// for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[destNetId].begin(); grpIt != groupPartitionLists[destNetId].end(); grpIt++) {
// if (grpIt->gGrpId == groupConfigs[srcNetId][lGrpId].gGrpId)
// GtoLOffset = grpIt->GtoLOffset;
// }
//
// kernel_convertExtSpikesD2<<<NUM_BLOCKS, NUM_THREADS>>>(firingTableIdxD2,
// firingTableIdxD2 + managerRuntimeData.extFiringTableEndIdxD2[lGrpId],
// GtoLOffset); // [StartIdx, EndIdx)
// firingTableIdxD2 += managerRuntimeData.extFiringTableEndIdxD2[lGrpId];
// }
//
// if (groupConfigs[srcNetId][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD1[lGrpId] > 0) {
// CUDA_CHECK_ERRORS( cudaMemcpyPeer(runtimeData[destNetId].firingTableD1 + firingTableIdxD1, destNetId,
// managerRuntimeData.extFiringTableD1[lGrpId], srcNetId,
// sizeof(int) * managerRuntimeData.extFiringTableEndIdxD1[lGrpId]));
//
// for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[destNetId].begin(); grpIt != groupPartitionLists[destNetId].end(); grpIt++) {
// if (grpIt->gGrpId == groupConfigs[srcNetId][lGrpId].gGrpId)
// GtoLOffset = grpIt->GtoLOffset;
// }
//
// kernel_convertExtSpikesD1<<<NUM_BLOCKS, NUM_THREADS>>>(firingTableIdxD1,
// firingTableIdxD1 + managerRuntimeData.extFiringTableEndIdxD1[lGrpId],
// GtoLOffset); // [StartIdx, EndIdx)
// firingTableIdxD1 += managerRuntimeData.extFiringTableEndIdxD1[lGrpId];
//
// }
// //KERNEL_DEBUG("GPU1 New D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2);
// }
// managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD2;
// managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD1;
// CUDA_CHECK_ERRORS( cudaMemcpyToSymbol(timeTableD2GPU, managerRuntimeData.timeTableD2, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, cudaMemcpyHostToDevice));
// CUDA_CHECK_ERRORS( cudaMemcpyToSymbol(timeTableD1GPU, managerRuntimeData.timeTableD1, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, cudaMemcpyHostToDevice));
// }
//}
/*!
* \brief This function is called every second by SNN::runNetwork(). It updates the firingTableD1(D2)GPU and
* timeTableD1(D2)GPU by removing older firing information.
*/
void SNN::shiftSpikeTables_F_GPU(int netId) {
assert(runtimeData[netId].memType == GPU_MEM);
checkAndSetGPUDevice(netId);
kernel_shiftFiringTable<<<NUM_BLOCKS, NUM_THREADS>>>();
}
void SNN::shiftSpikeTables_T_GPU(int netId) {
assert(runtimeData[netId].memType == GPU_MEM);
checkAndSetGPUDevice(netId);
kernel_shiftTimeTable<<<NUM_BLOCKS, NUM_THREADS>>>();
}
/*
* \brief Update syanptic weights every 10ms, 100ms, or 1000ms
*
*
*/
void SNN::updateWeights_GPU(int netId) {
assert(sim_in_testing == false);
assert(sim_with_fixedwts == false);
assert(runtimeData[netId].memType == GPU_MEM);
checkAndSetGPUDevice(netId);
kernel_updateWeights<<<NUM_BLOCKS, NUM_THREADS>>>();
}
//__global__ void gpu_resetFiringInformation() {
// if(threadIdx.x==0 && blockIdx.x==0) {
// for(int i = 0; i < ROUNDED_TIMING_COUNT; i++) {
// timeTableD2GPU[i] = 0;
// timeTableD1GPU[i] = 0;
// }
// spikeCountD2SecGPU=0;
// spikeCountD1SecGPU=0;
// secD2fireCntTest=0;
// secD1fireCntTest=0;
// spikeCountD2GPU=0;
// spikeCountD1GPU=0;
//
// //spikeCountAll1Sec=0;//assigned in fetchSpikeTables()
// }
//
//}
//
//void SNN::resetFiringInformation_GPU() {
// checkAndSetGPUDevice();
//
// gpu_resetFiringInformation<<<NUM_BLOCKS,NUM_THREADS>>>();
//}
/*!
* \brief this function allocates device (GPU) memory sapce and copies external current to it
*
* This function:
* (allocate and) copy extCurrent
*
* This funcion is called by copyNeuronState() and setExternalCurrent. Only host-to-divice copy is required
*
* \param[in] netId the id of a local network, which is the same as the device (GPU) id
* \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied
* \param[in] dest pointer to runtime data desitnation
* \param[in] allocateMem a flag indicates whether allocating memory space before copying
*
* \sa allocateSNN_GPU fetchSTPState
* \since v3.0
*/
void SNN::copyExternalCurrent(int netId, int lGrpId, RuntimeData* dest, cudaMemcpyKind kind, bool allocateMem) {
checkAndSetGPUDevice(netId);
checkDestSrcPtrs(dest, &managerRuntimeData, cudaMemcpyHostToDevice, allocateMem, lGrpId, 0);// check that the destination pointer is properly allocated..
assert(kind == cudaMemcpyHostToDevice);
int posN, lengthN;
if(lGrpId == ALL) {
posN = 0;
lengthN = networkConfigs[netId].numNReg;
} else {
assert(lGrpId >= 0);
posN = groupConfigs[netId][lGrpId].lStartN;
lengthN = groupConfigs[netId][lGrpId].numN;
}
assert(lengthN >= 0 && lengthN <= networkConfigs[netId].numNReg); // assert NOT poisson neurons
//KERNEL_DEBUG("copyExternalCurrent: lGrpId=%d, ptrPos=%d, length=%d, allocate=%s", lGrpId, posN, lengthN, allocateMem?"y":"n");
if(allocateMem)
CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->extCurrent, sizeof(float) * lengthN));
CUDA_CHECK_ERRORS(cudaMemcpy(&(dest->extCurrent[posN]), &(managerRuntimeData.extCurrent[posN]), sizeof(float) * lengthN, cudaMemcpyHostToDevice));
}
/*!
* \brief This function fetch the spike count in all local networks and sum the up
*/
void SNN::copyNetworkSpikeCount(int netId, cudaMemcpyKind kind,
unsigned int* spikeCountD1, unsigned int* spikeCountD2,
unsigned int* spikeCountExtD1, unsigned int* spikeCountExtD2) {
checkAndSetGPUDevice(netId);
assert(kind == cudaMemcpyDeviceToHost);
CUDA_CHECK_ERRORS(cudaMemcpyFromSymbol(spikeCountExtD2, spikeCountExtRxD2GPU, sizeof(int), 0, cudaMemcpyDeviceToHost));
CUDA_CHECK_ERRORS(cudaMemcpyFromSymbol(spikeCountExtD1, spikeCountExtRxD1GPU, sizeof(int), 0, cudaMemcpyDeviceToHost));
CUDA_CHECK_ERRORS(cudaMemcpyFromSymbol(spikeCountD2, spikeCountD2GPU, sizeof(int), 0, cudaMemcpyDeviceToHost));
CUDA_CHECK_ERRORS(cudaMemcpyFromSymbol(spikeCountD1, spikeCountD1GPU, sizeof(int), 0, cudaMemcpyDeviceToHost));
}
/*!
* \brief This function fetch spikeTables in the local network specified by netId
*
* \param[in] netId the id of local network of which timeTableD1(D2) and firingTableD1(D2) are copied to manager runtime data
*/
void SNN::copySpikeTables(int netId, cudaMemcpyKind kind) {
unsigned int gpuSpikeCountD1Sec, gpuSpikeCountD2Sec, gpuSpikeCountLastSecLeftD2;
checkAndSetGPUDevice(netId);
assert(kind == cudaMemcpyDeviceToHost);
CUDA_CHECK_ERRORS(cudaMemcpyFromSymbol(&gpuSpikeCountLastSecLeftD2, spikeCountLastSecLeftD2GPU, sizeof(int), 0, cudaMemcpyDeviceToHost));
CUDA_CHECK_ERRORS(cudaMemcpyFromSymbol(&gpuSpikeCountD2Sec, spikeCountD2SecGPU, sizeof(int), 0, cudaMemcpyDeviceToHost));
CUDA_CHECK_ERRORS(cudaMemcpyFromSymbol(&gpuSpikeCountD1Sec, spikeCountD1SecGPU, sizeof(int), 0, cudaMemcpyDeviceToHost));
CUDA_CHECK_ERRORS( cudaMemcpy(managerRuntimeData.firingTableD2, runtimeData[netId].firingTableD2, sizeof(int)*(gpuSpikeCountD2Sec + gpuSpikeCountLastSecLeftD2), cudaMemcpyDeviceToHost));
CUDA_CHECK_ERRORS( cudaMemcpy(managerRuntimeData.firingTableD1, runtimeData[netId].firingTableD1, sizeof(int)*gpuSpikeCountD1Sec, cudaMemcpyDeviceToHost));
CUDA_CHECK_ERRORS( cudaMemcpyFromSymbol(managerRuntimeData.timeTableD2, timeTableD2GPU, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, cudaMemcpyDeviceToHost));
CUDA_CHECK_ERRORS( cudaMemcpyFromSymbol(managerRuntimeData.timeTableD1, timeTableD1GPU, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, cudaMemcpyDeviceToHost));
}
void SNN::copyTimeTable(int netId, cudaMemcpyKind kind) {
assert(netId < CPU_RUNTIME_BASE);
checkAndSetGPUDevice(netId);
if (kind == cudaMemcpyDeviceToHost) {
CUDA_CHECK_ERRORS(cudaMemcpyFromSymbol(managerRuntimeData.timeTableD2, timeTableD2GPU, sizeof(int)*(1000 + glbNetworkConfig.maxDelay + 1), 0, cudaMemcpyDeviceToHost));
CUDA_CHECK_ERRORS(cudaMemcpyFromSymbol(managerRuntimeData.timeTableD1, timeTableD1GPU, sizeof(int)*(1000 + glbNetworkConfig.maxDelay + 1), 0, cudaMemcpyDeviceToHost));
} else { // kind == cudaMemcpyHostToDevice
CUDA_CHECK_ERRORS(cudaMemcpyToSymbol(timeTableD2GPU, managerRuntimeData.timeTableD2, sizeof(int)*(1000 + glbNetworkConfig.maxDelay + 1), 0, cudaMemcpyHostToDevice));
CUDA_CHECK_ERRORS(cudaMemcpyToSymbol(timeTableD1GPU, managerRuntimeData.timeTableD1, sizeof(int)*(1000 + glbNetworkConfig.maxDelay + 1), 0, cudaMemcpyHostToDevice));
}
}
void SNN::copyExtFiringTable(int netId, cudaMemcpyKind kind) {
assert(netId < CPU_RUNTIME_BASE);
checkAndSetGPUDevice(netId);
CUDA_CHECK_ERRORS(cudaMemcpy(managerRuntimeData.extFiringTableEndIdxD2, runtimeData[netId].extFiringTableEndIdxD2, sizeof(int) * networkConfigs[netId].numGroups, kind));
CUDA_CHECK_ERRORS(cudaMemcpy(managerRuntimeData.extFiringTableEndIdxD1, runtimeData[netId].extFiringTableEndIdxD1, sizeof(int) * networkConfigs[netId].numGroups, kind));
CUDA_CHECK_ERRORS(cudaMemcpy(managerRuntimeData.extFiringTableD2, runtimeData[netId].extFiringTableD2, sizeof(int*) * networkConfigs[netId].numGroups, kind));
CUDA_CHECK_ERRORS(cudaMemcpy(managerRuntimeData.extFiringTableD1, runtimeData[netId].extFiringTableD1, sizeof(int*) * networkConfigs[netId].numGroups, kind));
//KERNEL_DEBUG("GPU0 D1ex:%d/D2ex:%d", managerRuntimeData.extFiringTableEndIdxD1[0], managerRuntimeData.extFiringTableEndIdxD2[0]);
}
int SNN::configGPUDevice() {
int devCount, devMax;
cudaDeviceProp deviceProp;
CUDA_CHECK_ERRORS(cudaGetDeviceCount(&devCount));
KERNEL_INFO("CUDA devices Configuration:");
KERNEL_INFO(" - Number of CUDA devices = %9d", devCount);
devMax = CUDA_GET_MAXGFLOP_DEVICE_ID();
KERNEL_INFO(" - CUDA device ID with max GFLOPs = %9d", devMax);
for (int ithGPU = 0; ithGPU < devCount; ithGPU++) {
CUDA_CHECK_ERRORS(cudaGetDeviceProperties(&deviceProp, ithGPU));
KERNEL_INFO(" + Use CUDA device[%1d] = %9s", ithGPU, deviceProp.name);
KERNEL_INFO(" + CUDA Compute Capability (CC) = %2d.%d", deviceProp.major, deviceProp.minor);
}
if (deviceProp.major < 2) {
// Unmark this when CC 1.3 is deprecated
//KERNEL_ERROR("CARLsim does not support CUDA devices older than CC 2.0");
//exitSimulation(1);
KERNEL_WARN("CUDA device with CC 1.3 will be deprecated in a future release");
}
for (int ithGPU = 0; ithGPU < devCount; ithGPU++) {
CUDA_CHECK_ERRORS(cudaSetDevice(ithGPU));
CUDA_DEVICE_RESET();
}
if (devCount >= 2) { // try to setup P2P access if more than 2 GPUs are presented
// FIXME: generalize the initialization for mulit-GPUs up to 4 or 8
// enable P2P access
int canAccessPeer_0_1, canAccessPeer_1_0;
cudaDeviceCanAccessPeer(&canAccessPeer_0_1, 0, 1);
cudaDeviceCanAccessPeer(&canAccessPeer_1_0, 1, 0);
// enable peer access between GPU0 and GPU1
if (canAccessPeer_0_1 & canAccessPeer_1_0) {
cudaSetDevice(0);
cudaDeviceEnablePeerAccess(1, 0);
cudaSetDevice(1);
cudaDeviceEnablePeerAccess(0, 0);
KERNEL_INFO("* Peer Access is enabled");
} else {
KERNEL_INFO("* Peer Access is not enabled");
}
}
return devCount;
}
void SNN::convertExtSpikesD2_GPU(int netId, int startIdx, int endIdx, int GtoLOffset) {
checkAndSetGPUDevice(netId);
kernel_convertExtSpikesD2 <<<NUM_BLOCKS, NUM_THREADS >>>(startIdx, endIdx, GtoLOffset); // [StartIdx, EndIdx)
}
void SNN::convertExtSpikesD1_GPU(int netId, int startIdx, int endIdx, int GtoLOffset) {
checkAndSetGPUDevice(netId);
kernel_convertExtSpikesD1 <<<NUM_BLOCKS, NUM_THREADS >>>(startIdx, endIdx, GtoLOffset); // [StartIdx, EndIdx)
}
void SNN::checkAndSetGPUDevice(int netId) {
int currentDevice;
cudaGetDevice(¤tDevice);
assert(netId >= 0 && netId < numAvailableGPUs);
if (currentDevice != netId) {
//KERNEL_DEBUG("Change GPU context from GPU %d to GPU %d", currentDevice, netId);
CUDA_CHECK_ERRORS(cudaSetDevice(netId));
}
}
// deprecated
//void SNN::copyWeightsGPU(int nid, int src_grp) {
// checkAndSetGPUDevice("copyWeightsGPU");
//
// assert(nid < numNReg);
// unsigned int cumId = managerRuntimeData.cumulativePre[nid];
// float* synWts = &(managerRuntimeData.wt[cumId]);
// //TODO: NEEDED TO COMMENT THIS FOR CARLSIM 2.1-2.2 FILEMERGE -- KDC
// // assert(cumId >= (nid-numNPois));
// //assert(cumId < numPreSynapses*networkConfigs[0].numN);
//
// CUDA_CHECK_ERRORS( cudaMemcpy( synWts, &runtimeData[0].wt[cumId], sizeof(float)*managerRuntimeData.Npre[nid], cudaMemcpyDeviceToHost));
//}
// Allocates required memory and then initialize the GPU
void SNN::allocateSNN_GPU(int netId) {
checkAndSetGPUDevice(netId);
// setup memory type of GPU runtime data
runtimeData[netId].memType = GPU_MEM;
// display some memory management info
size_t avail, total, previous;
float toMB = std::pow(1024.0f, 2);
cudaMemGetInfo(&avail,&total);
KERNEL_INFO("GPU Memory Management: (Total %2.3f MB)",(float)(total/toMB));
KERNEL_INFO("Data\t\t\tSize\t\tTotal Used\tTotal Available");
KERNEL_INFO("Init:\t\t\t%2.3f MB\t%2.3f MB\t%2.3f MB",(float)(total)/toMB,(float)((total-avail)/toMB),
(float)(avail/toMB));
previous=avail;
// allocate random number generator on GPU(s)
if(runtimeData[netId].gpuRandGen == NULL) {
curandCreateGenerator(&runtimeData[netId].gpuRandGen, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(runtimeData[netId].gpuRandGen, randSeed_ + netId);
}
// allocate SNN::runtimeData[0].randNum for random number generators
CUDA_CHECK_ERRORS(cudaMalloc((void **)&runtimeData[netId].randNum, networkConfigs[netId].numNPois * sizeof(float)));
cudaMemGetInfo(&avail,&total);
KERNEL_INFO("Random Gen:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB",(float)(previous-avail)/toMB, (float)((total-avail)/toMB),(float)(avail/toMB));
previous=avail;
// initialize runtimeData[0].neuronAllocation, __device__ loadBufferCount, loadBufferSize
allocateStaticLoad(netId, NUM_THREADS);
allocateGroupId(netId);
// this table is useful for quick evaluation of the position of fired neuron
// given a sequence of bits denoting the firing..
// initialize __device__ quickSynIdTableGPU[256]
initQuickSynIdTable(netId);
cudaMemGetInfo(&avail,&total);
KERNEL_INFO("Static Load:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB",(float)(previous-avail)/toMB, (float)((total-avail)/toMB),(float)(avail/toMB));
previous=avail;
// initialize (copy from SNN) runtimeData[0].Npre, runtimeData[0].Npre_plastic, runtimeData[0].Npre_plasticInv, runtimeData[0].cumulativePre
// initialize (copy from SNN) runtimeData[0].cumulativePost, runtimeData[0].Npost, runtimeData[0].postDelayInfo
// initialize (copy from SNN) runtimeData[0].postSynapticIds, runtimeData[0].preSynapticIds
copyPreConnectionInfo(netId, ALL, &runtimeData[netId], &managerRuntimeData, cudaMemcpyHostToDevice, true);
copyPostConnectionInfo(netId, ALL, &runtimeData[netId], &managerRuntimeData, cudaMemcpyHostToDevice, true);
cudaMemGetInfo(&avail,&total);
KERNEL_INFO("Conn Info:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB",(float)(previous-avail)/toMB,(float)((total-avail)/toMB), (float)(avail/toMB));
previous=avail;
// initialize (copy from SNN) runtimeData[0].wt, runtimeData[0].wtChange, runtimeData[0].maxSynWt
copySynapseState(netId, &runtimeData[netId], &managerRuntimeData, cudaMemcpyHostToDevice, true);
cudaMemGetInfo(&avail,&total);
KERNEL_INFO("Syn State:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB",(float)(previous-avail)/toMB,(float)((total-avail)/toMB), (float)(avail/toMB));
previous=avail;
// copy the neuron state information to the GPU..
// initialize (copy from managerRuntimeData) runtimeData[0].recovery, runtimeData[0].voltage, runtimeData[0].current
// initialize (copy from managerRuntimeData) runtimeData[0].gGABAa, runtimeData[0].gGABAb, runtimeData[0].gAMPA, runtimeData[0].gNMDA
// initialize (copy from SNN) runtimeData[0].Izh_a, runtimeData[0].Izh_b, runtimeData[0].Izh_c, runtimeData[0].Izh_d
// initialize (copy form SNN) runtimeData[0].baseFiring, runtimeData[0].baseFiringInv
copyNeuronState(netId, ALL, &runtimeData[netId], cudaMemcpyHostToDevice, true);
// copy STP state, considered as neuron state
if (sim_with_stp) {
// initialize (copy from SNN) stpu, stpx
copySTPState(netId, ALL, &runtimeData[netId], &managerRuntimeData, cudaMemcpyHostToDevice, true);
}
cudaMemGetInfo(&avail,&total);
KERNEL_INFO("Neuron State:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB",(float)(previous-avail)/toMB,(float)((total-avail)/toMB), (float)(avail/toMB));
previous=avail;
// initialize (copy from SNN) runtimeData[0].grpDA(5HT,ACh,NE)
// initialize (copy from SNN) runtimeData[0].grpDA(5HT,ACh,NE)Buffer[]
copyGroupState(netId, ALL, &runtimeData[netId], &managerRuntimeData, cudaMemcpyHostToDevice, true);
cudaMemGetInfo(&avail,&total);
KERNEL_INFO("Group State:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB",(float)(previous-avail)/toMB,(float)((total-avail)/toMB), (float)(avail/toMB));
previous=avail;
// initialize (cudaMemset) runtimeData[0].I_set, runtimeData[0].poissonFireRate
// initialize (copy from SNN) runtimeData[0].firingTableD1, runtimeData[0].firingTableD2
// initialize (cudaMalloc) runtimeData[0].spikeGenBits
// initialize (copy from managerRuntimeData) runtimeData[0].nSpikeCnt,
// initialize (copy from SNN) runtimeData[0].synSpikeTime, runtimeData[0].lastSpikeTime
copyAuxiliaryData(netId, ALL, &runtimeData[netId], cudaMemcpyHostToDevice, true);
cudaMemGetInfo(&avail,&total);
KERNEL_INFO("Auxiliary Data:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB\n\n",(float)(previous-avail)/toMB,(float)((total-avail)/toMB), (float)(avail/toMB));
previous=avail;
// copy relevant pointers and network information to GPU
CUDA_CHECK_ERRORS(cudaMemcpyToSymbol(runtimeDataGPU, &runtimeData[netId], sizeof(RuntimeData), 0, cudaMemcpyHostToDevice));
// copy data to from SNN:: to NetworkConfigRT SNN::networkConfigs[0]
copyNetworkConfig(netId); // FIXME: we can change the group properties such as STDP as the network is running. So, we need a way to updating the GPU when changes are made.
// TODO: move mulSynFast, mulSynSlow to ConnectConfig structure
// copy connection configs
CUDA_CHECK_ERRORS(cudaMemcpyToSymbol(d_mulSynFast, mulSynFast, sizeof(float) * networkConfigs[netId].numConnections, 0, cudaMemcpyHostToDevice));
CUDA_CHECK_ERRORS(cudaMemcpyToSymbol(d_mulSynSlow, mulSynSlow, sizeof(float) * networkConfigs[netId].numConnections, 0, cudaMemcpyHostToDevice));
copyGroupConfigs(netId);
KERNEL_DEBUG("Transfering group settings to GPU:");
for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroupsAssigned; lGrpId++) {
KERNEL_DEBUG("Settings for Group %s:", groupConfigMap[groupConfigs[netId][lGrpId].gGrpId].grpName.c_str());
KERNEL_DEBUG("\tType: %d",(int)groupConfigs[netId][lGrpId].Type);
KERNEL_DEBUG("\tNumN: %d",groupConfigs[netId][lGrpId].numN);
KERNEL_DEBUG("\tM: %d",groupConfigs[netId][lGrpId].numPostSynapses);
KERNEL_DEBUG("\tPreM: %d",groupConfigs[netId][lGrpId].numPreSynapses);
KERNEL_DEBUG("\tspikeGenerator: %d",(int)groupConfigs[netId][lGrpId].isSpikeGenerator);
KERNEL_DEBUG("\tFixedInputWts: %d",(int)groupConfigs[netId][lGrpId].FixedInputWts);
KERNEL_DEBUG("\tMaxDelay: %d",(int)groupConfigs[netId][lGrpId].MaxDelay);
KERNEL_DEBUG("\tWithSTDP: %d",(int)groupConfigs[netId][lGrpId].WithSTDP);
if (groupConfigs[netId][lGrpId].WithSTDP) {
KERNEL_DEBUG("\t\tE-STDP type: %s",stdpType_string[groupConfigs[netId][lGrpId].WithESTDPtype]);
KERNEL_DEBUG("\t\tTAU_PLUS_INV_EXC: %f",groupConfigs[netId][lGrpId].TAU_PLUS_INV_EXC);
KERNEL_DEBUG("\t\tTAU_MINUS_INV_EXC: %f",groupConfigs[netId][lGrpId].TAU_MINUS_INV_EXC);
KERNEL_DEBUG("\t\tALPHA_PLUS_EXC: %f",groupConfigs[netId][lGrpId].ALPHA_PLUS_EXC);
KERNEL_DEBUG("\t\tALPHA_MINUS_EXC: %f",groupConfigs[netId][lGrpId].ALPHA_MINUS_EXC);
KERNEL_DEBUG("\t\tI-STDP type: %s",stdpType_string[groupConfigs[netId][lGrpId].WithISTDPtype]);
KERNEL_DEBUG("\t\tTAU_PLUS_INV_INB: %f",groupConfigs[netId][lGrpId].TAU_PLUS_INV_INB);
KERNEL_DEBUG("\t\tTAU_MINUS_INV_INB: %f",groupConfigs[netId][lGrpId].TAU_MINUS_INV_INB);
KERNEL_DEBUG("\t\tALPHA_PLUS_INB: %f",groupConfigs[netId][lGrpId].ALPHA_PLUS_INB);
KERNEL_DEBUG("\t\tALPHA_MINUS_INB: %f",groupConfigs[netId][lGrpId].ALPHA_MINUS_INB);
KERNEL_DEBUG("\t\tLAMBDA: %f",groupConfigs[netId][lGrpId].LAMBDA);
KERNEL_DEBUG("\t\tDELTA: %f",groupConfigs[netId][lGrpId].DELTA);
KERNEL_DEBUG("\t\tBETA_LTP: %f",groupConfigs[netId][lGrpId].BETA_LTP);
KERNEL_DEBUG("\t\tBETA_LTD: %f",groupConfigs[netId][lGrpId].BETA_LTD);
}
KERNEL_DEBUG("\tWithSTP: %d",(int)groupConfigs[netId][lGrpId].WithSTP);
if (groupConfigs[netId][lGrpId].WithSTP) {
KERNEL_DEBUG("\t\tSTP_U: %f",groupConfigs[netId][lGrpId].STP_U);
// KERNEL_DEBUG("\t\tSTP_tD: %f",groupConfigs[netId][lGrpId].STP_tD);
// KERNEL_DEBUG("\t\tSTP_tF: %f",groupConfigs[netId][lGrpId].STP_tF);
}
KERNEL_DEBUG("\tspikeGen: %s", groupConfigs[netId][lGrpId].isSpikeGenFunc? "is Set" : "is not set ");
}
// allocation of gpu runtime data is done
runtimeData[netId].allocated = true;
// map the timing table to texture.. saves a lot of headache in using shared memory
void* devPtr;
size_t offset;
CUDA_CHECK_ERRORS(cudaGetSymbolAddress(&devPtr, timeTableD2GPU));
CUDA_CHECK_ERRORS(cudaBindTexture(&offset, timeTableD2GPU_tex, devPtr, sizeof(int) * TIMING_COUNT));
offset = offset / sizeof(int);
CUDA_CHECK_ERRORS(cudaGetSymbolAddress(&devPtr, timeTableD2GPU_tex_offset));
CUDA_CHECK_ERRORS(cudaMemcpy(devPtr, &offset, sizeof(int), cudaMemcpyHostToDevice));
CUDA_CHECK_ERRORS(cudaGetSymbolAddress(&devPtr, timeTableD1GPU));
CUDA_CHECK_ERRORS(cudaBindTexture(&offset, timeTableD1GPU_tex, devPtr, sizeof(int) * TIMING_COUNT));
offset = offset / sizeof(int);
CUDA_CHECK_ERRORS(cudaGetSymbolAddress(&devPtr, timeTableD1GPU_tex_offset));
CUDA_CHECK_ERRORS(cudaMemcpy(devPtr, &offset, sizeof(int), cudaMemcpyHostToDevice));
initGPU(netId);
}
|
a83af5353ad4ccf1f303ebd6d6080cdc34d7649d.hip | // !!! This is a file automatically generated by hipify!!!
#include "generate_bgfg.h"
#include "timing.h"
#include <hip/hip_runtime.h>
#include <cstdio>
/**
*
*
* @param width
* @param height
* @param img
* @param orig
* @param offset
* @param SCALE
* @param bgPixels
* @param frames
* @param curFrame
* */
__global__ void kernTakeMean(int width, int height, float * img, unsigned char * orig, glm::vec2 offset,
float SCALE, float * bgPixels, int frames, int curFrame) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height) {
return;
}
int idx = y * width + x;
glm::ivec2 warp = glm::ivec2(x + (int)offset.x, y + (int)offset.y); // position of the moved point
warp.x = max(0, min(width - 1, warp.x));
warp.y = max(0, min(height - 1, warp.y));
float t = (float)orig[warp.y * width + warp.x];
img[idx] += SCALE * t;
if (bgPixels != NULL) {
bgPixels[frames * idx + curFrame] = t;
}
return;
}
/**
*
*
* @param width
* @param height
* @param frames
* @param bgImg
* @param fgImg
* @param grayscale
* @param groupVectors
* @param bgPixels
* */
void generateBgFg(int width, int height, int frames, float * bgImg, float * fgImg, unsigned char ** grayscale,
std::pair<glm::vec2, glm::vec2> * groupVectors, float * bgPixels) {
// =====================
// GPU Parallelism Setup
// =====================
dim3 blockSize2d(16, 16);
dim3 blocksPerGrid2d(\
(width + blockSize2d.x - 1) / blockSize2d.x,
(height + blockSize2d.y - 1) / blockSize2d.y
);
float * dev_bgImg, * dev_fgImg, * dev_bgPixels;
unsigned char * dev_grayscale;
int N = width * height;
float SCALE = 1.0f / (float) (frames + 1); // 1 / (frames + 1)
hipMalloc(&dev_bgPixels, (frames + 1) * N * sizeof(float));
hipMalloc(&dev_bgImg, N * sizeof(float));
hipMalloc(&dev_fgImg, N * sizeof(float));
hipMalloc(&dev_grayscale, N * sizeof(unsigned char));
hipMemset(dev_bgImg, 0, N * sizeof(float));
hipMemset(dev_fgImg, 0, N * sizeof(float));
hipMemcpy(dev_grayscale, grayscale[2], N * sizeof(unsigned char), hipMemcpyHostToDevice);
TIMEINIT
hipLaunchKernelGGL(( TIMEIT((kernTakeMean), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, width, height, dev_bgImg, dev_grayscale, glm::vec2(0.0f, 0.0f), SCALE, dev_bgPixels, frames + 1, 0)), "Take Mean")
hipMemcpy(dev_fgImg, dev_bgImg, N * sizeof(float), hipMemcpyDeviceToDevice);
for (int i = 0; i < frames; i++) {
int ni = (i >= 2) ? i + 1 : i; // TODO: to un-hardcode it
hipMemcpy(dev_grayscale, grayscale[ni], N * sizeof(unsigned char), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( TIMEIT((kernTakeMean), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, width, height, dev_bgImg, dev_grayscale, groupVectors[i].first, SCALE, dev_bgPixels, frames + 1, i + 1)), "Take Mean 1")
hipLaunchKernelGGL(( TIMEIT((kernTakeMean), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, width, height, dev_fgImg, dev_grayscale, groupVectors[i].second, SCALE, NULL, 0, 0)), "Take Mean 1")
hipDeviceSynchronize();
}
TIMEEND
hipMemcpy(bgImg, dev_bgImg, N * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(fgImg, dev_fgImg, N * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(bgPixels, dev_bgPixels, (frames + 1) * N * sizeof(float), hipMemcpyDeviceToHost);
hipFree(dev_bgImg);
hipFree(dev_fgImg);
hipFree(dev_grayscale);
hipFree(dev_bgPixels);
return;
}
| a83af5353ad4ccf1f303ebd6d6080cdc34d7649d.cu | #include "generate_bgfg.h"
#include "timing.h"
#include <cuda.h>
#include <cstdio>
/**
*
*
* @param width
* @param height
* @param img
* @param orig
* @param offset
* @param SCALE
* @param bgPixels
* @param frames
* @param curFrame
* */
__global__ void kernTakeMean(int width, int height, float * img, unsigned char * orig, glm::vec2 offset,
float SCALE, float * bgPixels, int frames, int curFrame) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height) {
return;
}
int idx = y * width + x;
glm::ivec2 warp = glm::ivec2(x + (int)offset.x, y + (int)offset.y); // position of the moved point
warp.x = max(0, min(width - 1, warp.x));
warp.y = max(0, min(height - 1, warp.y));
float t = (float)orig[warp.y * width + warp.x];
img[idx] += SCALE * t;
if (bgPixels != NULL) {
bgPixels[frames * idx + curFrame] = t;
}
return;
}
/**
*
*
* @param width
* @param height
* @param frames
* @param bgImg
* @param fgImg
* @param grayscale
* @param groupVectors
* @param bgPixels
* */
void generateBgFg(int width, int height, int frames, float * bgImg, float * fgImg, unsigned char ** grayscale,
std::pair<glm::vec2, glm::vec2> * groupVectors, float * bgPixels) {
// =====================
// GPU Parallelism Setup
// =====================
dim3 blockSize2d(16, 16);
dim3 blocksPerGrid2d(\
(width + blockSize2d.x - 1) / blockSize2d.x,
(height + blockSize2d.y - 1) / blockSize2d.y
);
float * dev_bgImg, * dev_fgImg, * dev_bgPixels;
unsigned char * dev_grayscale;
int N = width * height;
float SCALE = 1.0f / (float) (frames + 1); // 1 / (frames + 1)
cudaMalloc(&dev_bgPixels, (frames + 1) * N * sizeof(float));
cudaMalloc(&dev_bgImg, N * sizeof(float));
cudaMalloc(&dev_fgImg, N * sizeof(float));
cudaMalloc(&dev_grayscale, N * sizeof(unsigned char));
cudaMemset(dev_bgImg, 0, N * sizeof(float));
cudaMemset(dev_fgImg, 0, N * sizeof(float));
cudaMemcpy(dev_grayscale, grayscale[2], N * sizeof(unsigned char), cudaMemcpyHostToDevice);
TIMEINIT
TIMEIT((kernTakeMean<<<blocksPerGrid2d, blockSize2d>>>(width, height, dev_bgImg, dev_grayscale, glm::vec2(0.0f, 0.0f), SCALE, dev_bgPixels, frames + 1, 0)), "Take Mean")
cudaMemcpy(dev_fgImg, dev_bgImg, N * sizeof(float), cudaMemcpyDeviceToDevice);
for (int i = 0; i < frames; i++) {
int ni = (i >= 2) ? i + 1 : i; // TODO: to un-hardcode it
cudaMemcpy(dev_grayscale, grayscale[ni], N * sizeof(unsigned char), cudaMemcpyHostToDevice);
TIMEIT((kernTakeMean<<<blocksPerGrid2d, blockSize2d>>>(width, height, dev_bgImg, dev_grayscale, groupVectors[i].first, SCALE, dev_bgPixels, frames + 1, i + 1)), "Take Mean 1")
TIMEIT((kernTakeMean<<<blocksPerGrid2d, blockSize2d>>>(width, height, dev_fgImg, dev_grayscale, groupVectors[i].second, SCALE, NULL, 0, 0)), "Take Mean 1")
cudaDeviceSynchronize();
}
TIMEEND
cudaMemcpy(bgImg, dev_bgImg, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(fgImg, dev_fgImg, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(bgPixels, dev_bgPixels, (frames + 1) * N * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(dev_bgImg);
cudaFree(dev_fgImg);
cudaFree(dev_grayscale);
cudaFree(dev_bgPixels);
return;
}
|
0015e11a2f16a4219c1319f1b4903f3c38c01c20.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Noel Lopes is an Assistant Professor at the Polytechnic Institute of Guarda, Portugal
Copyright (C) 2009, 2010, 2011, 2012 Noel de Jesus Mendona Lopes
This file is part of GPUMLib.
GPUMLib is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <assert.h>
#include <stdlib.h>
#include "BackPropagation.h"
#include "MBPkernels.h"
namespace GPUMLib {
int BackPropagation::Layer::totalNeuronsWithSelectiveActivation = 0;
int BackPropagation::Layer::patterns;
void BackPropagation::Layer::RandomizeWeights(cudafloat minValue, cudafloat maxValue, cudafloat initialLearningRate) {
assert(maxValue > minValue);
HostArray<cudafloat> learnRate(connections);
HostArray<cudafloat> delta(connections);
HostArray<cudafloat> weights(connections);
for(int c = 0; c < connections; c++) {
weights[c] = (maxValue - minValue) * ((cudafloat) rand() / RAND_MAX) + minValue;
learnRate[c] = initialLearningRate;
delta[c] = CUDA_VALUE(0.0);
}
d_bestWeights = d_weights = weights;
d_learnRate = learnRate;
d_lastDelta = d_lastDeltaWithoutLearningMomentum = delta;
}
void BackPropagation::Layer::Fire(hipStream_t stream) {
dim3 dimNeuronsPatterns;
dimNeuronsPatterns.x = neurons;
if (isOutputLayer) {
if(connections > MAX_THREADS_PER_BLOCK) {
int processed = 0;
do {
int patternsToProcess = (patterns > 65535) ? 65535 : patterns;
dimNeuronsPatterns.y = patternsToProcess;
KernelFireOutputLayer(stream, dimNeuronsPatterns, inputsBlockSize, d_inputs + (processed * inputsWithoutBias), d_weights.Pointer(), (d_m != nullptr) ? d_m + (processed * totalNeuronsWithSelectiveActivation) : nullptr, mOffset, totalNeuronsWithSelectiveActivation, d_desOutputs + (processed * neurons), d_outputs.Pointer() + (processed * neurons), d_localGradient.Pointer() + (processed * neurons), d_rms + processed, (d_localGradSpaceNet == nullptr) ? nullptr : d_localGradSpaceNet + (processed * totalNeuronsWithSelectiveActivation), inputsWithoutBias);
processed += patternsToProcess;
} while (processed < patterns);
} else {
int processed = 0;
do {
int patternsToProcess = (patterns > 65535) ? 65535 : patterns;
hipLaunchKernelGGL(( FireOutputLayer), dim3(patternsToProcess), dim3(dimInputsNeurons), sharedMemFire, stream, d_inputs + (processed * inputsWithoutBias), d_weights.Pointer(), (d_m == nullptr) ? nullptr : d_m + (processed * totalNeuronsWithSelectiveActivation), mOffset, totalNeuronsWithSelectiveActivation, d_desOutputs + (processed * neurons), d_outputs.Pointer() + (processed * neurons), d_localGradient.Pointer() + (processed * neurons), d_rms + processed, (d_m == nullptr) ? nullptr : d_localGradSpaceNet + (processed * totalNeuronsWithSelectiveActivation));
processed += patternsToProcess;
} while (processed < patterns);
}
} else {
if(connections > MAX_THREADS_PER_BLOCK) {
int processed = 0;
do {
int patternsToProcess = (patterns > 65535) ? 65535 : patterns;
dimNeuronsPatterns.y = patternsToProcess;
KernelFireLayer(stream, dimNeuronsPatterns, inputsBlockSize, d_inputs + (processed * inputsWithoutBias), d_weights.Pointer(), (d_m != nullptr) ? d_m + (processed * totalNeuronsWithSelectiveActivation) : nullptr, mOffset, totalNeuronsWithSelectiveActivation, d_outputs.Pointer() + (processed * neurons), inputsWithoutBias);
processed += patternsToProcess;
} while (processed < patterns);
} else {
int processed = 0;
do {
int patternsToProcess = (patterns > 65535) ? 65535 : patterns;
hipLaunchKernelGGL(( FireLayer), dim3(patternsToProcess), dim3(dimInputsNeurons), sharedMemFire, stream, d_inputs + (processed * inputsWithoutBias), d_weights.Pointer(), (d_m != nullptr) ? d_m + (processed * totalNeuronsWithSelectiveActivation) : nullptr, mOffset, totalNeuronsWithSelectiveActivation, d_outputs.Pointer() + (processed * neurons));
processed += patternsToProcess;
} while (processed < patterns);
}
}
}
void BackPropagation::Layer::CalculateLocalGradient(hipStream_t stream, cudafloat * rms, cudafloat * bestRMS, cudafloat rmsGrowToApplyRobustLearning, Layer & nextLayer) {
int processed = 0;
do {
int patternsToProcess = (patterns > 65535) ? 65535 : patterns;
hipLaunchKernelGGL(( GPUMLib::CalculateLocalGradient), dim3(patternsToProcess), dim3(dimOutputsNeurons), sharedMemGradients, stream, rms, bestRMS, rmsGrowToApplyRobustLearning, d_outputs.Pointer() + (processed * neurons), nextLayer.d_weights.Pointer(), (d_m != nullptr) ? d_m + (processed * totalNeuronsWithSelectiveActivation) : nullptr, mOffset, totalNeuronsWithSelectiveActivation, nextLayer.d_localGradient.Pointer() + (processed * dimOutputsNeurons.x), d_localGradient.Pointer() + (processed * neurons), (d_m == nullptr) ? nullptr : d_localGradSpaceNet + (processed * totalNeuronsWithSelectiveActivation));
processed += patternsToProcess;
} while (processed < patterns);
}
void BackPropagation::Layer::CorrectWeights(hipStream_t stream, int patternsBlockSize, cudafloat * rms, cudafloat * bestRMS, cudafloat rmsGrowToApplyRobustLearning, cudafloat robustFactor, cudafloat momentum, cudafloat u, cudafloat d, cudafloat maxStepSize) {
KernelCorrectLayerWeights(stream, dimInputsNeurons, patternsBlockSize, rms, bestRMS, rmsGrowToApplyRobustLearning, d_inputs, d_localGradient.Pointer(), d_weights.Pointer(), d_learnRate.Pointer(), d_lastDeltaWithoutLearningMomentum.Pointer(), d_lastDelta.Pointer(), maxStepSize, u, d, robustFactor, momentum, patterns);
}
void BackPropagation::Layer::Init(int neurons, int inputs, int nextLayerNeurons, cudafloat initialLearningRate, cudafloat * layerInputs, bool isOutputLayer, cudafloat * m, cudafloat * localGradSpaceNet, int mOffset) {
connections = inputs * neurons;
this->neurons = neurons;
inputsWithoutBias = inputs - 1;
RandomizeWeights(CUDA_VALUE(-1.0), CUDA_VALUE(1.0), initialLearningRate);
d_m = m;
d_localGradSpaceNet = localGradSpaceNet;
this->mOffset = mOffset;
inputsBlockSize = 1;
while(inputsBlockSize < MAX_THREADS_PER_BLOCK && inputsBlockSize < inputs) inputsBlockSize <<= 1;
d_inputs = layerInputs;
d_outputs.ResizeWithoutPreservingData(neurons * patterns);
d_localGradient.ResizeWithoutPreservingData(neurons * patterns);
sharedMemFire = connections * sizeof(cudafloat);
sharedMemGradients = (nextLayerNeurons * (neurons + 1)) * sizeof(cudafloat);
dimInputsNeurons.x = inputs;
dimInputsNeurons.y = neurons;
dimOutputsNeurons.x = nextLayerNeurons;
dimOutputsNeurons.y = neurons;
this->isOutputLayer = isOutputLayer;
}
void BackPropagation::SelectiveInputLayer::RandomizeWeights(cudafloat minValue, cudafloat maxValue, cudafloat initialLearningRate, HostArray<bool> & selectiveInputs) {
assert(maxValue > minValue);
int ninputs = selectiveInputs.Length();
HostArray<cudafloat> weights(ninputs);
HostArray<cudafloat> bias(ninputs);
HostArray<cudafloat> learningRate(ninputs);
HostArray<cudafloat> delta(ninputs);
for(int i = 0; i < ninputs; i++) {
if (selectiveInputs[i]) {
weights[i] = (maxValue - minValue) * ((cudafloat) rand() / RAND_MAX) + minValue;
bias[i] = (maxValue - minValue) * ((cudafloat) rand() / RAND_MAX) + minValue;
} else {
weights[i] = CUDA_VALUE(0.0);
bias[i] = CUDA_VALUE(0.0);
}
learningRate[i] = initialLearningRate;
delta[i] = CUDA_VALUE(0.0);
}
d_bestWeights = d_weights = weights;
d_bestBias = d_bias = bias;
d_learnRateBias = d_learnRate = learningRate;
d_lastDelta = d_lastDeltaBias = d_lastDeltaWithoutLearningMomentum = d_lastDeltaWithoutLearningMomentumBias = delta;
}
void BackPropagation::SelectiveInputLayer::Fire(hipStream_t stream) {
int processed = 0;
do {
int patternsToProcess = (patterns > 65535) ? 65535 : patterns;
hipLaunchKernelGGL(( FireSelectiveInputs), dim3(patternsToProcess), dim3(neurons), 0, stream, d_inputs + (processed * neurons), d_weights.Pointer(), d_bias.Pointer(), d_outputs.Pointer() + (processed * neurons), neurons);
processed += patternsToProcess;
} while (processed < patterns);
}
void BackPropagation::SelectiveInputLayer::CalculateLocalGradient(hipStream_t stream, cudafloat * rms, cudafloat * bestRMS, cudafloat rmsGrowToApplyRobustLearning, Layer & nextLayer) {
int processed = 0;
do {
int patternsToProcess = (patterns > 65535) ? 65535 : patterns;
hipLaunchKernelGGL(( CalcLocalGradSelectiveInputs), dim3(patternsToProcess), dim3(dimOutputsNeurons), sharedMemGradients, stream, rms, bestRMS, rmsGrowToApplyRobustLearning, d_inputs + (processed * neurons), d_weights.Pointer(), d_bias.Pointer(), nextLayer.d_weights.Pointer(), nextLayer.d_localGradient.Pointer() + (processed * dimOutputsNeurons.x), d_localGradient.Pointer() + (processed * neurons));
processed += patternsToProcess;
} while (processed < patterns);
}
void BackPropagation::SelectiveInputLayer::CorrectWeights(hipStream_t stream, cudafloat * rms, cudafloat * bestRMS, cudafloat rmsGrowToApplyRobustLearning, cudafloat robustFactor, cudafloat momentum, cudafloat u, cudafloat d, cudafloat maxStepSize) {
KernelCorrectWeightsSelectiveInputs(stream, neurons, patterns, rms, bestRMS, rmsGrowToApplyRobustLearning, d_inputs, d_localGradient.Pointer(), d_weights.Pointer(), d_bias.Pointer(), d_learnRate.Pointer(), d_learnRateBias.Pointer(), d_lastDeltaWithoutLearningMomentum.Pointer(), d_lastDeltaWithoutLearningMomentumBias.Pointer(), d_lastDelta.Pointer(), d_lastDeltaBias.Pointer(), u, d, maxStepSize, robustFactor, momentum, patterns);
}
void BackPropagation::CreateNetwork(HostArray<int> & sizeLayers, HostArray<int> * sizeSpaceLayers, HostArray<bool> * selectiveNeurons, HostMatrix<cudafloat> & trainInputPatterns, HostMatrix<cudafloat> & trainDesiredOutputPatterns, cudafloat initialLearningRate) {
int nsamples = trainInputPatterns.Rows();
int ninputs = trainInputPatterns.Columns();
Layer::patterns = nsamples;
assert(Layer::patterns > 0 && Layer::patterns == trainDesiredOutputPatterns.Rows());
d_inputs = trainInputPatterns;
d_desOutputs = trainDesiredOutputPatterns;
d_rmsOut.ResizeWithoutPreservingData(1);
this->initialLearningRate = initialLearningRate;
assert(initialLearningRate > CUDA_VALUE(0.0));
// Check for selective inputs
bool hasSelectiveInputs = false;
selectiveInputs.ResizeWithoutPreservingData(ninputs);
for(int i = 0; i < ninputs; i++) selectiveInputs[i] = false;
int fi = 0;
int li = ninputs - 1;
for(int s = 0; s < nsamples; s++) {
for(int i = fi; i <= li; i++) {
if (!selectiveInputs[i] && IsInfOrNaN(trainInputPatterns(s, i))) {
selectiveInputs[i] = hasSelectiveInputs = true;
if (i == fi) fi++; else if (i == li) li--;
}
}
if (fi >= li) break;
}
//Create the space layers
int numberSpaceLayers = (sizeSpaceLayers == nullptr) ? 0 : sizeSpaceLayers->Length();
selectiveInputLayerSpaceNetwork = nullptr;
if (numberSpaceLayers) {
assert(selectiveNeurons != nullptr);
spaceLayers.ResizeWithoutPreservingData(numberSpaceLayers);
int inputsWithoutBias = sizeLayers[0];
cudafloat * layerInputs = d_inputs.Pointer();
if (hasSelectiveInputs) {
selectiveInputLayerSpaceNetwork = new SelectiveInputLayer(nsamples, selectiveInputs, (*sizeSpaceLayers)[0], layerInputs, initialLearningRate);
layerInputs = selectiveInputLayerSpaceNetwork->d_outputs.Pointer();
}
for(int l = 0; l < numberSpaceLayers; l++) {
int neurons = (*sizeSpaceLayers)[l];
int nextLayerNeurons;
if (l == numberSpaceLayers - 1) {
Layer::totalNeuronsWithSelectiveActivation = neurons;
nextLayerNeurons = 0;
} else {
nextLayerNeurons = (*sizeSpaceLayers)[l + 1];
}
spaceLayers[l].Init(neurons, inputsWithoutBias + 1, nextLayerNeurons, initialLearningRate, layerInputs, false);
layerInputs = spaceLayers[l].d_outputs.Pointer();
inputsWithoutBias = neurons;
}
}
//Create the layers
int numberLayers = sizeLayers.Length() - 1;
assert(numberLayers > 0);
layers.ResizeWithoutPreservingData(numberLayers);
int outputLayer = numberLayers - 1;
int inputsWithoutBias = sizeLayers[0];
assert(inputsWithoutBias > 0 && inputsWithoutBias == trainInputPatterns.Columns());
cudafloat * layerInputs = d_inputs.Pointer();
if (hasSelectiveInputs) {
selectiveInputLayer = new SelectiveInputLayer(nsamples, selectiveInputs, sizeLayers[1], layerInputs, initialLearningRate);
layerInputs = selectiveInputLayer->d_outputs.Pointer();
} else {
selectiveInputLayer = nullptr;
}
cudafloat * m = (numberSpaceLayers == 0) ? nullptr : spaceLayers[numberSpaceLayers - 1].d_outputs.Pointer();
cudafloat * localGradSpaceNet = (numberSpaceLayers == 0) ? nullptr : spaceLayers[numberSpaceLayers - 1].d_localGradient.Pointer();
int mOffset = 0;
for(int l = 0; l < numberLayers; l++) {
int neurons = sizeLayers[l + 1];
assert(neurons > 0);
bool isOutputLayer = (l == outputLayer) ? true : false;
int nextLayerNeurons = (isOutputLayer) ? 0 : sizeLayers[l + 2];
bool hasSelectiveNeurons = (numberSpaceLayers > 0 && (*selectiveNeurons)[l]) ? true : false;
layers[l].Init(neurons, inputsWithoutBias + 1, nextLayerNeurons, initialLearningRate, layerInputs, isOutputLayer, (hasSelectiveNeurons) ? m : nullptr, (hasSelectiveNeurons) ? localGradSpaceNet : nullptr, mOffset);
if (hasSelectiveNeurons) mOffset += neurons;
layerInputs = layers[l].d_outputs.Pointer();
inputsWithoutBias = neurons;
}
//Robust Learning
layersRobustTraining = numberLayers + numberSpaceLayers;
if (hasSelectiveInputs) layersRobustTraining += (numberSpaceLayers) ? 4 : 2;
HostArray<int> numberWeightsLayer(layersRobustTraining);
HostArray<cudafloat *> weightsLayers(layersRobustTraining);
HostArray<cudafloat *> bestWeightsLayers(layersRobustTraining);
HostArray<cudafloat *> learnRatesLayers(layersRobustTraining);
HostArray<cudafloat *> lastDeltaLayers(layersRobustTraining);
HostArray<cudafloat *> lastDeltaWithoutLMlayers(layersRobustTraining);
maxNumberWeigths = 0;
int ll = 0;
while (ll < numberSpaceLayers) {
int connections = spaceLayers[ll].connections;
if (connections > maxNumberWeigths) maxNumberWeigths = connections;
numberWeightsLayer[ll] = connections;
weightsLayers[ll] = spaceLayers[ll].d_weights.Pointer();
bestWeightsLayers[ll] = spaceLayers[ll].d_bestWeights.Pointer();
learnRatesLayers[ll] = spaceLayers[ll].d_learnRate.Pointer();
lastDeltaLayers[ll] = spaceLayers[ll].d_lastDelta.Pointer();
lastDeltaWithoutLMlayers[ll] = spaceLayers[ll].d_lastDeltaWithoutLearningMomentum.Pointer();
ll++;
}
for(int l = 0; l < numberLayers; l++) {
int connections = layers[l].connections;
if (connections > maxNumberWeigths) maxNumberWeigths = connections;
numberWeightsLayer[ll] = connections;
weightsLayers[ll] = layers[l].d_weights.Pointer();
bestWeightsLayers[ll] = layers[l].d_bestWeights.Pointer();
learnRatesLayers[ll] = layers[l].d_learnRate.Pointer();
lastDeltaLayers[ll] = layers[l].d_lastDelta.Pointer();
lastDeltaWithoutLMlayers[ll] = layers[l].d_lastDeltaWithoutLearningMomentum.Pointer();
ll++;
}
if (hasSelectiveInputs) {
numberWeightsLayer[ll] = ninputs;
weightsLayers[ll] = selectiveInputLayer->d_weights.Pointer();
bestWeightsLayers[ll] = selectiveInputLayer->d_bestWeights.Pointer();
learnRatesLayers[ll] = selectiveInputLayer->d_learnRate.Pointer();
lastDeltaLayers[ll] = selectiveInputLayer->d_lastDelta.Pointer();
lastDeltaWithoutLMlayers[ll] = selectiveInputLayer->d_lastDeltaWithoutLearningMomentum.Pointer();
ll++;
numberWeightsLayer[ll] = ninputs;
weightsLayers[ll] = selectiveInputLayer->d_bias.Pointer();
bestWeightsLayers[ll] = selectiveInputLayer->d_bestBias.Pointer();
learnRatesLayers[ll] = selectiveInputLayer->d_learnRateBias.Pointer();
lastDeltaLayers[ll] = selectiveInputLayer->d_lastDeltaBias.Pointer();
lastDeltaWithoutLMlayers[ll] = selectiveInputLayer->d_lastDeltaWithoutLearningMomentumBias.Pointer();
ll++;
if (numberSpaceLayers) {
numberWeightsLayer[ll] = ninputs;
weightsLayers[ll] = selectiveInputLayerSpaceNetwork->d_weights.Pointer();
bestWeightsLayers[ll] = selectiveInputLayerSpaceNetwork->d_bestWeights.Pointer();
learnRatesLayers[ll] = selectiveInputLayerSpaceNetwork->d_learnRate.Pointer();
lastDeltaLayers[ll] = selectiveInputLayerSpaceNetwork->d_lastDelta.Pointer();
lastDeltaWithoutLMlayers[ll] = selectiveInputLayerSpaceNetwork->d_lastDeltaWithoutLearningMomentum.Pointer();
ll++;
numberWeightsLayer[ll] = ninputs;
weightsLayers[ll] = selectiveInputLayerSpaceNetwork->d_bias.Pointer();
bestWeightsLayers[ll] = selectiveInputLayerSpaceNetwork->d_bestBias.Pointer();
learnRatesLayers[ll] = selectiveInputLayerSpaceNetwork->d_learnRateBias.Pointer();
lastDeltaLayers[ll] = selectiveInputLayerSpaceNetwork->d_lastDeltaBias.Pointer();
lastDeltaWithoutLMlayers[ll] = selectiveInputLayerSpaceNetwork->d_lastDeltaWithoutLearningMomentumBias.Pointer();
ll++;
}
}
d_numberWeightsLayer = numberWeightsLayer;
d_weightsLayers = weightsLayers;
d_bestWeightsLayers = bestWeightsLayers;
d_learnRatesLayers = learnRatesLayers;
d_lastDeltaLayers = lastDeltaLayers;
d_lastDeltaWithoutLMlayers = lastDeltaWithoutLMlayers;
robustLearning = true;
rmsGrowToApplyRobustLearning = CUDA_VALUE(1.001); // 0.1%
robustFactor = CUDA_VALUE(0.5);
momentum = CUDA_VALUE(0.7);
u = CUDA_VALUE(1.2);
d = CUDA_VALUE(0.8);
maxStepSize = CUDA_VALUE(10.0);
//Create the RMS vectors
int sizeRMSvector = (layers[outputLayer].connections > MAX_THREADS_PER_BLOCK) ? Layer::patterns * layers[outputLayer].neurons : Layer::patterns;
d_rms.ResizeWithoutPreservingData(sizeRMSvector);
layers[outputLayer].d_desOutputs = d_desOutputs.Pointer();
layers[outputLayer].d_rms = d_rms.Pointer();
layers[outputLayer].sharedMemFire += layers[outputLayer].neurons * sizeof(cudafloat);
// Initialize the initial RMS
HostArray<cudafloat> h_bestRMS(1);
h_bestRMS[0] = CUDA_VALUE(1.0);
d_bestRMS = h_bestRMS;
rms.Value() = h_bestRMS[0];
//Other stuff
patternsBlockSize = 1;
while(patternsBlockSize < MAX_THREADS_PER_BLOCK && patternsBlockSize < Layer::patterns) patternsBlockSize <<= 1;
numberPatternsNeurons = (cudafloat) Layer::patterns * (cudafloat) layers[outputLayer].neurons;
epoch = 0;
}
BackPropagation::BackPropagation(HostArray<int> & sizeLayers, HostMatrix<cudafloat> & trainInputPatterns, HostMatrix<cudafloat> & trainDesiredOutputPatterns, cudafloat initialLearningRate) {
CreateNetwork(sizeLayers, nullptr, nullptr, trainInputPatterns, trainDesiredOutputPatterns, initialLearningRate);
}
HostArray<cudafloat> BackPropagation::GetLayerWeights(int layer) {
assert(layer >= 0 && layer < layers.Length());
return HostArray<cudafloat>(layers[layer].d_weights);
}
void BackPropagation::SetLayerWeights(int layer, HostArray<cudafloat> & weights) {
assert(layer >= 0 && layer < layers.Length());
layers[layer].d_weights = weights;
}
void BackPropagation::SetLayerWeights(int layer, HostMatrix<cudafloat> & weights, HostArray<cudafloat> & bias) {
assert(layer >= 0 && layer < layers.Length());
Layer * l = &(layers[layer]);
int neurons = l->neurons;
int inputs = weights.Columns();
assert(neurons == bias.Length());
HostArray<cudafloat> weights_bias(weights.Elements() + bias.Length());
int w = 0;
for(int n = 0; n < neurons; n++) {
weights_bias[w++] = bias[n];
for(int i = 0; i < inputs; i++) {
weights_bias[w++] = weights(n, i);
}
}
layers[layer].d_weights = weights_bias;
}
HostArray<cudafloat> BackPropagation::GetSelectiveInputWeights() {
return HostArray<cudafloat>(selectiveInputLayer->d_weights);
}
void BackPropagation::SetSelectiveInputWeights(HostArray<cudafloat> & weights) {
selectiveInputLayer->d_weights = weights;
}
HostArray<cudafloat> BackPropagation::GetSelectiveInputBias() {
return HostArray<cudafloat>(selectiveInputLayer->d_bias);
}
void BackPropagation::SetSelectiveInputBias(HostArray<cudafloat> & bias) {
selectiveInputLayer->d_bias = bias;
}
void BackPropagation::RandomizeWeights(cudafloat minValue, cudafloat maxValue) {
int nSpaceLayers = spaceLayers.Length();
for (int layer = 0; layer < nSpaceLayers; layer++) spaceLayers[layer].RandomizeWeights(minValue, maxValue, initialLearningRate);
int nLayers = layers.Length();
for (int layer = 0; layer < nLayers; layer++) layers[layer].RandomizeWeights(minValue, maxValue, initialLearningRate);
if (selectiveInputLayerSpaceNetwork) selectiveInputLayerSpaceNetwork->RandomizeWeights(minValue, maxValue, initialLearningRate, selectiveInputs);
if (selectiveInputLayer) selectiveInputLayer->RandomizeWeights(minValue, maxValue, initialLearningRate, selectiveInputs);
epoch = 0;
}
bool BackPropagation::GetRobustLearning() const {
return robustLearning;
}
void BackPropagation::SetRobustLearning(bool value) {
robustLearning = value;
}
cudafloat BackPropagation::GetMaxPercentageRMSGrow() const {
return rmsGrowToApplyRobustLearning - CUDA_VALUE(1.0);
}
void BackPropagation::SetMaxPercentageRMSGrow(cudafloat value) {
assert(value > CUDA_VALUE(0.0));
rmsGrowToApplyRobustLearning = CUDA_VALUE(1.0) + value;
}
cudafloat BackPropagation::GetRobustFactor() const {
return robustFactor;
}
void BackPropagation::SetRobustFactor(cudafloat value) {
assert(value > CUDA_VALUE(0.0) && value < CUDA_VALUE(1.0));
robustFactor = value;
}
cudafloat BackPropagation::GetMomentum() const {
return momentum;
}
void BackPropagation::SetMomentum(cudafloat value) {
assert(value > CUDA_VALUE(0.0) && value < CUDA_VALUE(1.0));
momentum = value;
}
cudafloat BackPropagation::GetUpStepSizeFactor() const {
return u;
}
void BackPropagation::SetUpStepSizeFactor(cudafloat value){
assert(value > CUDA_VALUE(1.0));
u = value;
}
cudafloat BackPropagation::GetDownStepSizeFactor() const {
return d;
}
void BackPropagation::SetDownStepSizeFactor(cudafloat value) {
assert(value > CUDA_VALUE(0.0) && value < CUDA_VALUE(1.0));
d = value;
}
cudafloat BackPropagation::GetMaxStepSize() const {
return maxStepSize;
}
void BackPropagation::SetMaxStepSize(cudafloat value) {
assert(value > CUDA_VALUE(0.0));
maxStepSize = value;
}
int BackPropagation::GetEpoch() const {
return epoch;
}
int BackPropagation::GetNumberLayers() const {
return layers.Length();
}
int BackPropagation::GetNumberInputs() const {
return layers[0].inputsWithoutBias;
}
int BackPropagation::GetNumberOutputs() const {
return layers[layers.Length() - 1].neurons;
}
int BackPropagation::GetNumberNeurons(int layer) const {
assert(layer >= 0 && layer < layers.Length());
return layers[layer].neurons;
}
void BackPropagation::Fire() {
if (selectiveInputLayerSpaceNetwork != nullptr) selectiveInputLayerSpaceNetwork->Fire(streamKernels);
int nSpaceLayers = spaceLayers.Length();
for (int l = 0; l < nSpaceLayers; l++) spaceLayers[l].Fire(streamKernels);
if (selectiveInputLayer != nullptr) selectiveInputLayer->Fire(streamKernels);
int numLayers = layers.Length();
for(int l = 0; l < numLayers; l++) layers[l].Fire(streamKernels);
}
cudafloat BackPropagation::GetRMS() {
hipDeviceSynchronize();
Fire(); // Determine the network outputs
// Calculate the RMS
KernelCalculateRMS(streamKernels, patternsBlockSize, d_rms.Pointer(), d_rmsOut.Pointer(), d_rms.Length(), numberPatternsNeurons);
rms.UpdateValue(d_rmsOut.Pointer());
return rms.Value();
}
cudafloat BackPropagation::GetRMSestimate() {
cudafloat RMS = rms.Value();
if (epoch == 0 && RMS >= CUDA_VALUE(1.0)) return GetRMS();
return RMS;
}
void BackPropagation::TrainOneEpoch() {
int numLayers = layers.Length();
int nSpaceLayers = spaceLayers.Length();
Fire(); // Determine the network outputs
// Calculate the RMS / Robust training
if (robustLearning) {
KernelCalculateRMS(streamKernels, patternsBlockSize, d_rms.Pointer(), d_rmsOut.Pointer(), d_rms.Length(), numberPatternsNeurons);
if (hipStreamQuery(streamRMS) == hipSuccess) rms.UpdateValueAsync(d_rmsOut.Pointer(), streamRMS);
hipLaunchKernelGGL(( RobustLearning), dim3(1), dim3(maxNumberWeigths), 0, streamKernels, d_rmsOut.Pointer(), d_bestRMS.Pointer(), (cudafloat) rmsGrowToApplyRobustLearning, layersRobustTraining, d_numberWeightsLayer.Pointer(), d_weightsLayers.Pointer(), d_bestWeightsLayers.Pointer(), d_learnRatesLayers.Pointer(), robustFactor, d_lastDeltaWithoutLMlayers.Pointer(), d_lastDeltaLayers.Pointer());
} else {
if (hipStreamQuery(streamRMS) == hipSuccess) {
KernelCalculateRMS(streamRMS, patternsBlockSize, d_rms.Pointer(), d_rmsOut.Pointer(), d_rms.Length(), numberPatternsNeurons);
rms.UpdateValueAsync(d_rmsOut.Pointer(), streamRMS);
}
}
// Calculate local gradients. The local gradient for the output layer was already calculated.
cudafloat * rms = (robustLearning) ? d_rmsOut.Pointer() : nullptr;
cudafloat * bestRMS = (robustLearning) ? d_bestRMS.Pointer() : nullptr;
for(int l = numLayers - 2; l >= 0; l--) {
layers[l].CalculateLocalGradient(streamKernels, rms, bestRMS, rmsGrowToApplyRobustLearning, layers[l + 1]);
}
if (selectiveInputLayer != nullptr) selectiveInputLayer->CalculateLocalGradient(streamKernels, rms, bestRMS, rmsGrowToApplyRobustLearning, layers[0]);
for (int l = nSpaceLayers -2; l >= 0; l--) spaceLayers[l].CalculateLocalGradient(streamKernels, rms, bestRMS, rmsGrowToApplyRobustLearning, spaceLayers[l + 1]);
if (selectiveInputLayerSpaceNetwork != nullptr) selectiveInputLayerSpaceNetwork->CalculateLocalGradient(streamKernels, rms, bestRMS, rmsGrowToApplyRobustLearning, spaceLayers[0]);
// Correct the weights
for(int l = numLayers - 1; l >= 0; l--) {
layers[l].CorrectWeights(streamKernels, patternsBlockSize, rms, bestRMS, rmsGrowToApplyRobustLearning, robustFactor, momentum, u, d, maxStepSize);
}
if (selectiveInputLayer != nullptr) selectiveInputLayer->CorrectWeights(streamKernels, rms, bestRMS, rmsGrowToApplyRobustLearning, robustFactor, momentum, u, d, maxStepSize);
for (int l = nSpaceLayers - 1; l >= 0; l--) spaceLayers[l].CorrectWeights(streamKernels, patternsBlockSize, rms, bestRMS, rmsGrowToApplyRobustLearning, robustFactor, momentum, u, d, maxStepSize);
if (selectiveInputLayerSpaceNetwork != nullptr) selectiveInputLayerSpaceNetwork->CorrectWeights(streamKernels, rms, bestRMS, rmsGrowToApplyRobustLearning, robustFactor, momentum, u, d, maxStepSize);
epoch++;
}
void BackPropagation::Train(int epochs) {
for (int e = 0; e < epochs; e++) TrainOneEpoch();
}
void BackPropagation::Train(int epochs, cudafloat rmsStop) {
// In some situations, we may get the RMS error from a previous trained network.
// To avoid this, we compute the actual RMS before training the network.
GetRMS();
for (int e = 0; e < epochs; e++) {
TrainOneEpoch();
if (GetRMSestimate() <= rmsStop) break;
}
}
HostMatrix<cudafloat> BackPropagation::GetOutputs(HostMatrix<cudafloat> & inputs) {
int patterns = inputs.Rows();
int numberLayers = layers.Length();
int numberSpaceLayers = spaceLayers.Length();
DeviceMatrix<cudafloat> d_inputs(inputs);
HostArray< DeviceMatrix<cudafloat> * > spaceLayerOutputs;
spaceLayerOutputs.ResizeWithoutPreservingData(numberSpaceLayers);
for (int l = 0; l < numberSpaceLayers; l++) {
spaceLayerOutputs[l] = new DeviceMatrix<cudafloat>(patterns, spaceLayers[l].neurons);
}
HostArray< DeviceMatrix<cudafloat> * > layerOutputs;
layerOutputs.ResizeWithoutPreservingData(numberLayers);
for (int l = 0; l < numberLayers; l++) {
layerOutputs[l] = new DeviceMatrix<cudafloat>(patterns, layers[l].neurons);
}
cudafloat * layerInputs = d_inputs.Pointer();
int ninputs = d_inputs.Columns();
DeviceArray<cudafloat> outputsSelectiveInput(patterns * ninputs);
if (selectiveInputLayerSpaceNetwork != nullptr) {
int processed = 0;
do {
int patternsToProcess = (patterns > 65535) ? 65535 : patterns;
hipLaunchKernelGGL(( FireSelectiveInputs), dim3(patternsToProcess), dim3(ninputs), 0, streamKernels, layerInputs + (processed * ninputs), selectiveInputLayerSpaceNetwork->d_weights.Pointer(), selectiveInputLayerSpaceNetwork->d_bias.Pointer(), outputsSelectiveInput.Pointer() + (processed * ninputs), ninputs);
processed += patternsToProcess;
} while (processed < patterns);
layerInputs = outputsSelectiveInput.Pointer();
}
for (int l = 0; l < numberSpaceLayers; l++) {
if(spaceLayers[l].connections > MAX_THREADS_PER_BLOCK) {
dim3 dimNeuronsPatterns;
dimNeuronsPatterns.x = spaceLayers[l].neurons;
int processed = 0;
do {
int patternsToProcess = (patterns > 65535) ? 65535 : patterns;
dimNeuronsPatterns.y = patternsToProcess;
KernelFireLayer(streamKernels, dimNeuronsPatterns, spaceLayers[l].inputsBlockSize, layerInputs + (processed * spaceLayers[l].inputsWithoutBias), spaceLayers[l].d_weights.Pointer(), nullptr, 0, Layer::totalNeuronsWithSelectiveActivation, spaceLayerOutputs[l]->Pointer() + (processed * spaceLayers[l].inputsWithoutBias), spaceLayers[l].inputsWithoutBias);
processed += patternsToProcess;
} while (processed < patterns);
} else {
int processed = 0;
do {
int patternsToProcess = (patterns > 65535) ? 65535 : patterns;
hipLaunchKernelGGL(( FireLayer), dim3(patternsToProcess), dim3(spaceLayers[l].dimInputsNeurons), spaceLayers[l].sharedMemFire, streamKernels, layerInputs + (processed * spaceLayers[l].inputsWithoutBias), spaceLayers[l].d_weights.Pointer(), nullptr, 0, Layer::totalNeuronsWithSelectiveActivation, spaceLayerOutputs[l]->Pointer() + (processed * spaceLayers[l].inputsWithoutBias));
processed += patternsToProcess;
} while (processed < patterns);
}
layerInputs = spaceLayerOutputs[l]->Pointer();
}
cudafloat * d_m = nullptr;
if (numberSpaceLayers > 0) d_m = layerInputs;
layerInputs = d_inputs.Pointer();
if (selectiveInputLayer != nullptr) {
int processed = 0;
do {
int patternsToProcess = (patterns > 65535) ? 65535 : patterns;
hipLaunchKernelGGL(( FireSelectiveInputs), dim3(patternsToProcess), dim3(ninputs), 0, streamKernels, layerInputs + (processed * ninputs), selectiveInputLayer->d_weights.Pointer(), selectiveInputLayer->d_bias.Pointer(), outputsSelectiveInput.Pointer() + (processed * ninputs), ninputs);
processed += patternsToProcess;
} while (processed < patterns);
layerInputs = outputsSelectiveInput.Pointer();
}
for (int l = 0; l < numberLayers; l++) {
if(layers[l].connections > MAX_THREADS_PER_BLOCK) {
dim3 dimNeuronsPatterns;
dimNeuronsPatterns.x = layers[l].neurons;
int processed = 0;
do {
int patternsToProcess = (patterns > 65535) ? 65535 : patterns;
dimNeuronsPatterns.y = patternsToProcess;
KernelFireLayer(streamKernels, dimNeuronsPatterns, layers[l].inputsBlockSize, layerInputs + (processed * layers[l].inputsWithoutBias), layers[l].d_weights.Pointer(), (layers[l].d_m != nullptr) ? d_m + (processed * Layer::totalNeuronsWithSelectiveActivation) : nullptr, layers[l].mOffset, Layer::totalNeuronsWithSelectiveActivation, layerOutputs[l]->Pointer() + (processed * layers[l].inputsWithoutBias), layers[l].inputsWithoutBias);
processed += patternsToProcess;
} while (processed < patterns);
} else {
int processed = 0;
do {
int patternsToProcess = (patterns > 65535) ? 65535 : patterns;
hipLaunchKernelGGL(( FireLayer), dim3(patternsToProcess), dim3(layers[l].dimInputsNeurons), layers[l].sharedMemFire, streamKernels, layerInputs + (processed * layers[l].inputsWithoutBias), layers[l].d_weights.Pointer(), (layers[l].d_m != nullptr) ? d_m + (processed * Layer::totalNeuronsWithSelectiveActivation) : nullptr, layers[l].mOffset, Layer::totalNeuronsWithSelectiveActivation, layerOutputs[l]->Pointer() + (processed * layers[l].inputsWithoutBias));
processed += patternsToProcess;
} while (processed < patterns);
}
layerInputs = layerOutputs[l]->Pointer();
}
HostMatrix<cudafloat> outputs(*(layerOutputs[numberLayers - 1]));
for (int l = 0; l < numberSpaceLayers; l++) {
delete spaceLayerOutputs[l];
}
for (int l = 0; l < numberLayers; l++) {
delete layerOutputs[l];
}
return outputs;
}
} | 0015e11a2f16a4219c1319f1b4903f3c38c01c20.cu | /*
Noel Lopes is an Assistant Professor at the Polytechnic Institute of Guarda, Portugal
Copyright (C) 2009, 2010, 2011, 2012 Noel de Jesus Mendonša Lopes
This file is part of GPUMLib.
GPUMLib is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <assert.h>
#include <stdlib.h>
#include "BackPropagation.h"
#include "MBPkernels.h"
namespace GPUMLib {
int BackPropagation::Layer::totalNeuronsWithSelectiveActivation = 0;
int BackPropagation::Layer::patterns;
void BackPropagation::Layer::RandomizeWeights(cudafloat minValue, cudafloat maxValue, cudafloat initialLearningRate) {
assert(maxValue > minValue);
HostArray<cudafloat> learnRate(connections);
HostArray<cudafloat> delta(connections);
HostArray<cudafloat> weights(connections);
for(int c = 0; c < connections; c++) {
weights[c] = (maxValue - minValue) * ((cudafloat) rand() / RAND_MAX) + minValue;
learnRate[c] = initialLearningRate;
delta[c] = CUDA_VALUE(0.0);
}
d_bestWeights = d_weights = weights;
d_learnRate = learnRate;
d_lastDelta = d_lastDeltaWithoutLearningMomentum = delta;
}
void BackPropagation::Layer::Fire(cudaStream_t stream) {
dim3 dimNeuronsPatterns;
dimNeuronsPatterns.x = neurons;
if (isOutputLayer) {
if(connections > MAX_THREADS_PER_BLOCK) {
int processed = 0;
do {
int patternsToProcess = (patterns > 65535) ? 65535 : patterns;
dimNeuronsPatterns.y = patternsToProcess;
KernelFireOutputLayer(stream, dimNeuronsPatterns, inputsBlockSize, d_inputs + (processed * inputsWithoutBias), d_weights.Pointer(), (d_m != nullptr) ? d_m + (processed * totalNeuronsWithSelectiveActivation) : nullptr, mOffset, totalNeuronsWithSelectiveActivation, d_desOutputs + (processed * neurons), d_outputs.Pointer() + (processed * neurons), d_localGradient.Pointer() + (processed * neurons), d_rms + processed, (d_localGradSpaceNet == nullptr) ? nullptr : d_localGradSpaceNet + (processed * totalNeuronsWithSelectiveActivation), inputsWithoutBias);
processed += patternsToProcess;
} while (processed < patterns);
} else {
int processed = 0;
do {
int patternsToProcess = (patterns > 65535) ? 65535 : patterns;
FireOutputLayer<<<patternsToProcess, dimInputsNeurons, sharedMemFire, stream>>>(d_inputs + (processed * inputsWithoutBias), d_weights.Pointer(), (d_m == nullptr) ? nullptr : d_m + (processed * totalNeuronsWithSelectiveActivation), mOffset, totalNeuronsWithSelectiveActivation, d_desOutputs + (processed * neurons), d_outputs.Pointer() + (processed * neurons), d_localGradient.Pointer() + (processed * neurons), d_rms + processed, (d_m == nullptr) ? nullptr : d_localGradSpaceNet + (processed * totalNeuronsWithSelectiveActivation));
processed += patternsToProcess;
} while (processed < patterns);
}
} else {
if(connections > MAX_THREADS_PER_BLOCK) {
int processed = 0;
do {
int patternsToProcess = (patterns > 65535) ? 65535 : patterns;
dimNeuronsPatterns.y = patternsToProcess;
KernelFireLayer(stream, dimNeuronsPatterns, inputsBlockSize, d_inputs + (processed * inputsWithoutBias), d_weights.Pointer(), (d_m != nullptr) ? d_m + (processed * totalNeuronsWithSelectiveActivation) : nullptr, mOffset, totalNeuronsWithSelectiveActivation, d_outputs.Pointer() + (processed * neurons), inputsWithoutBias);
processed += patternsToProcess;
} while (processed < patterns);
} else {
int processed = 0;
do {
int patternsToProcess = (patterns > 65535) ? 65535 : patterns;
FireLayer<<<patternsToProcess, dimInputsNeurons, sharedMemFire, stream>>>(d_inputs + (processed * inputsWithoutBias), d_weights.Pointer(), (d_m != nullptr) ? d_m + (processed * totalNeuronsWithSelectiveActivation) : nullptr, mOffset, totalNeuronsWithSelectiveActivation, d_outputs.Pointer() + (processed * neurons));
processed += patternsToProcess;
} while (processed < patterns);
}
}
}
void BackPropagation::Layer::CalculateLocalGradient(cudaStream_t stream, cudafloat * rms, cudafloat * bestRMS, cudafloat rmsGrowToApplyRobustLearning, Layer & nextLayer) {
int processed = 0;
do {
int patternsToProcess = (patterns > 65535) ? 65535 : patterns;
GPUMLib::CalculateLocalGradient<<<patternsToProcess, dimOutputsNeurons, sharedMemGradients, stream>>>(rms, bestRMS, rmsGrowToApplyRobustLearning, d_outputs.Pointer() + (processed * neurons), nextLayer.d_weights.Pointer(), (d_m != nullptr) ? d_m + (processed * totalNeuronsWithSelectiveActivation) : nullptr, mOffset, totalNeuronsWithSelectiveActivation, nextLayer.d_localGradient.Pointer() + (processed * dimOutputsNeurons.x), d_localGradient.Pointer() + (processed * neurons), (d_m == nullptr) ? nullptr : d_localGradSpaceNet + (processed * totalNeuronsWithSelectiveActivation));
processed += patternsToProcess;
} while (processed < patterns);
}
void BackPropagation::Layer::CorrectWeights(cudaStream_t stream, int patternsBlockSize, cudafloat * rms, cudafloat * bestRMS, cudafloat rmsGrowToApplyRobustLearning, cudafloat robustFactor, cudafloat momentum, cudafloat u, cudafloat d, cudafloat maxStepSize) {
KernelCorrectLayerWeights(stream, dimInputsNeurons, patternsBlockSize, rms, bestRMS, rmsGrowToApplyRobustLearning, d_inputs, d_localGradient.Pointer(), d_weights.Pointer(), d_learnRate.Pointer(), d_lastDeltaWithoutLearningMomentum.Pointer(), d_lastDelta.Pointer(), maxStepSize, u, d, robustFactor, momentum, patterns);
}
void BackPropagation::Layer::Init(int neurons, int inputs, int nextLayerNeurons, cudafloat initialLearningRate, cudafloat * layerInputs, bool isOutputLayer, cudafloat * m, cudafloat * localGradSpaceNet, int mOffset) {
connections = inputs * neurons;
this->neurons = neurons;
inputsWithoutBias = inputs - 1;
RandomizeWeights(CUDA_VALUE(-1.0), CUDA_VALUE(1.0), initialLearningRate);
d_m = m;
d_localGradSpaceNet = localGradSpaceNet;
this->mOffset = mOffset;
inputsBlockSize = 1;
while(inputsBlockSize < MAX_THREADS_PER_BLOCK && inputsBlockSize < inputs) inputsBlockSize <<= 1;
d_inputs = layerInputs;
d_outputs.ResizeWithoutPreservingData(neurons * patterns);
d_localGradient.ResizeWithoutPreservingData(neurons * patterns);
sharedMemFire = connections * sizeof(cudafloat);
sharedMemGradients = (nextLayerNeurons * (neurons + 1)) * sizeof(cudafloat);
dimInputsNeurons.x = inputs;
dimInputsNeurons.y = neurons;
dimOutputsNeurons.x = nextLayerNeurons;
dimOutputsNeurons.y = neurons;
this->isOutputLayer = isOutputLayer;
}
void BackPropagation::SelectiveInputLayer::RandomizeWeights(cudafloat minValue, cudafloat maxValue, cudafloat initialLearningRate, HostArray<bool> & selectiveInputs) {
assert(maxValue > minValue);
int ninputs = selectiveInputs.Length();
HostArray<cudafloat> weights(ninputs);
HostArray<cudafloat> bias(ninputs);
HostArray<cudafloat> learningRate(ninputs);
HostArray<cudafloat> delta(ninputs);
for(int i = 0; i < ninputs; i++) {
if (selectiveInputs[i]) {
weights[i] = (maxValue - minValue) * ((cudafloat) rand() / RAND_MAX) + minValue;
bias[i] = (maxValue - minValue) * ((cudafloat) rand() / RAND_MAX) + minValue;
} else {
weights[i] = CUDA_VALUE(0.0);
bias[i] = CUDA_VALUE(0.0);
}
learningRate[i] = initialLearningRate;
delta[i] = CUDA_VALUE(0.0);
}
d_bestWeights = d_weights = weights;
d_bestBias = d_bias = bias;
d_learnRateBias = d_learnRate = learningRate;
d_lastDelta = d_lastDeltaBias = d_lastDeltaWithoutLearningMomentum = d_lastDeltaWithoutLearningMomentumBias = delta;
}
void BackPropagation::SelectiveInputLayer::Fire(cudaStream_t stream) {
int processed = 0;
do {
int patternsToProcess = (patterns > 65535) ? 65535 : patterns;
FireSelectiveInputs<<<patternsToProcess, neurons, 0, stream>>>(d_inputs + (processed * neurons), d_weights.Pointer(), d_bias.Pointer(), d_outputs.Pointer() + (processed * neurons), neurons);
processed += patternsToProcess;
} while (processed < patterns);
}
void BackPropagation::SelectiveInputLayer::CalculateLocalGradient(cudaStream_t stream, cudafloat * rms, cudafloat * bestRMS, cudafloat rmsGrowToApplyRobustLearning, Layer & nextLayer) {
int processed = 0;
do {
int patternsToProcess = (patterns > 65535) ? 65535 : patterns;
CalcLocalGradSelectiveInputs<<<patternsToProcess, dimOutputsNeurons, sharedMemGradients, stream>>>(rms, bestRMS, rmsGrowToApplyRobustLearning, d_inputs + (processed * neurons), d_weights.Pointer(), d_bias.Pointer(), nextLayer.d_weights.Pointer(), nextLayer.d_localGradient.Pointer() + (processed * dimOutputsNeurons.x), d_localGradient.Pointer() + (processed * neurons));
processed += patternsToProcess;
} while (processed < patterns);
}
void BackPropagation::SelectiveInputLayer::CorrectWeights(cudaStream_t stream, cudafloat * rms, cudafloat * bestRMS, cudafloat rmsGrowToApplyRobustLearning, cudafloat robustFactor, cudafloat momentum, cudafloat u, cudafloat d, cudafloat maxStepSize) {
KernelCorrectWeightsSelectiveInputs(stream, neurons, patterns, rms, bestRMS, rmsGrowToApplyRobustLearning, d_inputs, d_localGradient.Pointer(), d_weights.Pointer(), d_bias.Pointer(), d_learnRate.Pointer(), d_learnRateBias.Pointer(), d_lastDeltaWithoutLearningMomentum.Pointer(), d_lastDeltaWithoutLearningMomentumBias.Pointer(), d_lastDelta.Pointer(), d_lastDeltaBias.Pointer(), u, d, maxStepSize, robustFactor, momentum, patterns);
}
void BackPropagation::CreateNetwork(HostArray<int> & sizeLayers, HostArray<int> * sizeSpaceLayers, HostArray<bool> * selectiveNeurons, HostMatrix<cudafloat> & trainInputPatterns, HostMatrix<cudafloat> & trainDesiredOutputPatterns, cudafloat initialLearningRate) {
int nsamples = trainInputPatterns.Rows();
int ninputs = trainInputPatterns.Columns();
Layer::patterns = nsamples;
assert(Layer::patterns > 0 && Layer::patterns == trainDesiredOutputPatterns.Rows());
d_inputs = trainInputPatterns;
d_desOutputs = trainDesiredOutputPatterns;
d_rmsOut.ResizeWithoutPreservingData(1);
this->initialLearningRate = initialLearningRate;
assert(initialLearningRate > CUDA_VALUE(0.0));
// Check for selective inputs
bool hasSelectiveInputs = false;
selectiveInputs.ResizeWithoutPreservingData(ninputs);
for(int i = 0; i < ninputs; i++) selectiveInputs[i] = false;
int fi = 0;
int li = ninputs - 1;
for(int s = 0; s < nsamples; s++) {
for(int i = fi; i <= li; i++) {
if (!selectiveInputs[i] && IsInfOrNaN(trainInputPatterns(s, i))) {
selectiveInputs[i] = hasSelectiveInputs = true;
if (i == fi) fi++; else if (i == li) li--;
}
}
if (fi >= li) break;
}
//Create the space layers
int numberSpaceLayers = (sizeSpaceLayers == nullptr) ? 0 : sizeSpaceLayers->Length();
selectiveInputLayerSpaceNetwork = nullptr;
if (numberSpaceLayers) {
assert(selectiveNeurons != nullptr);
spaceLayers.ResizeWithoutPreservingData(numberSpaceLayers);
int inputsWithoutBias = sizeLayers[0];
cudafloat * layerInputs = d_inputs.Pointer();
if (hasSelectiveInputs) {
selectiveInputLayerSpaceNetwork = new SelectiveInputLayer(nsamples, selectiveInputs, (*sizeSpaceLayers)[0], layerInputs, initialLearningRate);
layerInputs = selectiveInputLayerSpaceNetwork->d_outputs.Pointer();
}
for(int l = 0; l < numberSpaceLayers; l++) {
int neurons = (*sizeSpaceLayers)[l];
int nextLayerNeurons;
if (l == numberSpaceLayers - 1) {
Layer::totalNeuronsWithSelectiveActivation = neurons;
nextLayerNeurons = 0;
} else {
nextLayerNeurons = (*sizeSpaceLayers)[l + 1];
}
spaceLayers[l].Init(neurons, inputsWithoutBias + 1, nextLayerNeurons, initialLearningRate, layerInputs, false);
layerInputs = spaceLayers[l].d_outputs.Pointer();
inputsWithoutBias = neurons;
}
}
//Create the layers
int numberLayers = sizeLayers.Length() - 1;
assert(numberLayers > 0);
layers.ResizeWithoutPreservingData(numberLayers);
int outputLayer = numberLayers - 1;
int inputsWithoutBias = sizeLayers[0];
assert(inputsWithoutBias > 0 && inputsWithoutBias == trainInputPatterns.Columns());
cudafloat * layerInputs = d_inputs.Pointer();
if (hasSelectiveInputs) {
selectiveInputLayer = new SelectiveInputLayer(nsamples, selectiveInputs, sizeLayers[1], layerInputs, initialLearningRate);
layerInputs = selectiveInputLayer->d_outputs.Pointer();
} else {
selectiveInputLayer = nullptr;
}
cudafloat * m = (numberSpaceLayers == 0) ? nullptr : spaceLayers[numberSpaceLayers - 1].d_outputs.Pointer();
cudafloat * localGradSpaceNet = (numberSpaceLayers == 0) ? nullptr : spaceLayers[numberSpaceLayers - 1].d_localGradient.Pointer();
int mOffset = 0;
for(int l = 0; l < numberLayers; l++) {
int neurons = sizeLayers[l + 1];
assert(neurons > 0);
bool isOutputLayer = (l == outputLayer) ? true : false;
int nextLayerNeurons = (isOutputLayer) ? 0 : sizeLayers[l + 2];
bool hasSelectiveNeurons = (numberSpaceLayers > 0 && (*selectiveNeurons)[l]) ? true : false;
layers[l].Init(neurons, inputsWithoutBias + 1, nextLayerNeurons, initialLearningRate, layerInputs, isOutputLayer, (hasSelectiveNeurons) ? m : nullptr, (hasSelectiveNeurons) ? localGradSpaceNet : nullptr, mOffset);
if (hasSelectiveNeurons) mOffset += neurons;
layerInputs = layers[l].d_outputs.Pointer();
inputsWithoutBias = neurons;
}
//Robust Learning
layersRobustTraining = numberLayers + numberSpaceLayers;
if (hasSelectiveInputs) layersRobustTraining += (numberSpaceLayers) ? 4 : 2;
HostArray<int> numberWeightsLayer(layersRobustTraining);
HostArray<cudafloat *> weightsLayers(layersRobustTraining);
HostArray<cudafloat *> bestWeightsLayers(layersRobustTraining);
HostArray<cudafloat *> learnRatesLayers(layersRobustTraining);
HostArray<cudafloat *> lastDeltaLayers(layersRobustTraining);
HostArray<cudafloat *> lastDeltaWithoutLMlayers(layersRobustTraining);
maxNumberWeigths = 0;
int ll = 0;
while (ll < numberSpaceLayers) {
int connections = spaceLayers[ll].connections;
if (connections > maxNumberWeigths) maxNumberWeigths = connections;
numberWeightsLayer[ll] = connections;
weightsLayers[ll] = spaceLayers[ll].d_weights.Pointer();
bestWeightsLayers[ll] = spaceLayers[ll].d_bestWeights.Pointer();
learnRatesLayers[ll] = spaceLayers[ll].d_learnRate.Pointer();
lastDeltaLayers[ll] = spaceLayers[ll].d_lastDelta.Pointer();
lastDeltaWithoutLMlayers[ll] = spaceLayers[ll].d_lastDeltaWithoutLearningMomentum.Pointer();
ll++;
}
for(int l = 0; l < numberLayers; l++) {
int connections = layers[l].connections;
if (connections > maxNumberWeigths) maxNumberWeigths = connections;
numberWeightsLayer[ll] = connections;
weightsLayers[ll] = layers[l].d_weights.Pointer();
bestWeightsLayers[ll] = layers[l].d_bestWeights.Pointer();
learnRatesLayers[ll] = layers[l].d_learnRate.Pointer();
lastDeltaLayers[ll] = layers[l].d_lastDelta.Pointer();
lastDeltaWithoutLMlayers[ll] = layers[l].d_lastDeltaWithoutLearningMomentum.Pointer();
ll++;
}
if (hasSelectiveInputs) {
numberWeightsLayer[ll] = ninputs;
weightsLayers[ll] = selectiveInputLayer->d_weights.Pointer();
bestWeightsLayers[ll] = selectiveInputLayer->d_bestWeights.Pointer();
learnRatesLayers[ll] = selectiveInputLayer->d_learnRate.Pointer();
lastDeltaLayers[ll] = selectiveInputLayer->d_lastDelta.Pointer();
lastDeltaWithoutLMlayers[ll] = selectiveInputLayer->d_lastDeltaWithoutLearningMomentum.Pointer();
ll++;
numberWeightsLayer[ll] = ninputs;
weightsLayers[ll] = selectiveInputLayer->d_bias.Pointer();
bestWeightsLayers[ll] = selectiveInputLayer->d_bestBias.Pointer();
learnRatesLayers[ll] = selectiveInputLayer->d_learnRateBias.Pointer();
lastDeltaLayers[ll] = selectiveInputLayer->d_lastDeltaBias.Pointer();
lastDeltaWithoutLMlayers[ll] = selectiveInputLayer->d_lastDeltaWithoutLearningMomentumBias.Pointer();
ll++;
if (numberSpaceLayers) {
numberWeightsLayer[ll] = ninputs;
weightsLayers[ll] = selectiveInputLayerSpaceNetwork->d_weights.Pointer();
bestWeightsLayers[ll] = selectiveInputLayerSpaceNetwork->d_bestWeights.Pointer();
learnRatesLayers[ll] = selectiveInputLayerSpaceNetwork->d_learnRate.Pointer();
lastDeltaLayers[ll] = selectiveInputLayerSpaceNetwork->d_lastDelta.Pointer();
lastDeltaWithoutLMlayers[ll] = selectiveInputLayerSpaceNetwork->d_lastDeltaWithoutLearningMomentum.Pointer();
ll++;
numberWeightsLayer[ll] = ninputs;
weightsLayers[ll] = selectiveInputLayerSpaceNetwork->d_bias.Pointer();
bestWeightsLayers[ll] = selectiveInputLayerSpaceNetwork->d_bestBias.Pointer();
learnRatesLayers[ll] = selectiveInputLayerSpaceNetwork->d_learnRateBias.Pointer();
lastDeltaLayers[ll] = selectiveInputLayerSpaceNetwork->d_lastDeltaBias.Pointer();
lastDeltaWithoutLMlayers[ll] = selectiveInputLayerSpaceNetwork->d_lastDeltaWithoutLearningMomentumBias.Pointer();
ll++;
}
}
d_numberWeightsLayer = numberWeightsLayer;
d_weightsLayers = weightsLayers;
d_bestWeightsLayers = bestWeightsLayers;
d_learnRatesLayers = learnRatesLayers;
d_lastDeltaLayers = lastDeltaLayers;
d_lastDeltaWithoutLMlayers = lastDeltaWithoutLMlayers;
robustLearning = true;
rmsGrowToApplyRobustLearning = CUDA_VALUE(1.001); // 0.1%
robustFactor = CUDA_VALUE(0.5);
momentum = CUDA_VALUE(0.7);
u = CUDA_VALUE(1.2);
d = CUDA_VALUE(0.8);
maxStepSize = CUDA_VALUE(10.0);
//Create the RMS vectors
int sizeRMSvector = (layers[outputLayer].connections > MAX_THREADS_PER_BLOCK) ? Layer::patterns * layers[outputLayer].neurons : Layer::patterns;
d_rms.ResizeWithoutPreservingData(sizeRMSvector);
layers[outputLayer].d_desOutputs = d_desOutputs.Pointer();
layers[outputLayer].d_rms = d_rms.Pointer();
layers[outputLayer].sharedMemFire += layers[outputLayer].neurons * sizeof(cudafloat);
// Initialize the initial RMS
HostArray<cudafloat> h_bestRMS(1);
h_bestRMS[0] = CUDA_VALUE(1.0);
d_bestRMS = h_bestRMS;
rms.Value() = h_bestRMS[0];
//Other stuff
patternsBlockSize = 1;
while(patternsBlockSize < MAX_THREADS_PER_BLOCK && patternsBlockSize < Layer::patterns) patternsBlockSize <<= 1;
numberPatternsNeurons = (cudafloat) Layer::patterns * (cudafloat) layers[outputLayer].neurons;
epoch = 0;
}
BackPropagation::BackPropagation(HostArray<int> & sizeLayers, HostMatrix<cudafloat> & trainInputPatterns, HostMatrix<cudafloat> & trainDesiredOutputPatterns, cudafloat initialLearningRate) {
CreateNetwork(sizeLayers, nullptr, nullptr, trainInputPatterns, trainDesiredOutputPatterns, initialLearningRate);
}
HostArray<cudafloat> BackPropagation::GetLayerWeights(int layer) {
assert(layer >= 0 && layer < layers.Length());
return HostArray<cudafloat>(layers[layer].d_weights);
}
void BackPropagation::SetLayerWeights(int layer, HostArray<cudafloat> & weights) {
assert(layer >= 0 && layer < layers.Length());
layers[layer].d_weights = weights;
}
void BackPropagation::SetLayerWeights(int layer, HostMatrix<cudafloat> & weights, HostArray<cudafloat> & bias) {
assert(layer >= 0 && layer < layers.Length());
Layer * l = &(layers[layer]);
int neurons = l->neurons;
int inputs = weights.Columns();
assert(neurons == bias.Length());
HostArray<cudafloat> weights_bias(weights.Elements() + bias.Length());
int w = 0;
for(int n = 0; n < neurons; n++) {
weights_bias[w++] = bias[n];
for(int i = 0; i < inputs; i++) {
weights_bias[w++] = weights(n, i);
}
}
layers[layer].d_weights = weights_bias;
}
HostArray<cudafloat> BackPropagation::GetSelectiveInputWeights() {
return HostArray<cudafloat>(selectiveInputLayer->d_weights);
}
void BackPropagation::SetSelectiveInputWeights(HostArray<cudafloat> & weights) {
selectiveInputLayer->d_weights = weights;
}
HostArray<cudafloat> BackPropagation::GetSelectiveInputBias() {
return HostArray<cudafloat>(selectiveInputLayer->d_bias);
}
void BackPropagation::SetSelectiveInputBias(HostArray<cudafloat> & bias) {
selectiveInputLayer->d_bias = bias;
}
void BackPropagation::RandomizeWeights(cudafloat minValue, cudafloat maxValue) {
int nSpaceLayers = spaceLayers.Length();
for (int layer = 0; layer < nSpaceLayers; layer++) spaceLayers[layer].RandomizeWeights(minValue, maxValue, initialLearningRate);
int nLayers = layers.Length();
for (int layer = 0; layer < nLayers; layer++) layers[layer].RandomizeWeights(minValue, maxValue, initialLearningRate);
if (selectiveInputLayerSpaceNetwork) selectiveInputLayerSpaceNetwork->RandomizeWeights(minValue, maxValue, initialLearningRate, selectiveInputs);
if (selectiveInputLayer) selectiveInputLayer->RandomizeWeights(minValue, maxValue, initialLearningRate, selectiveInputs);
epoch = 0;
}
bool BackPropagation::GetRobustLearning() const {
return robustLearning;
}
void BackPropagation::SetRobustLearning(bool value) {
robustLearning = value;
}
cudafloat BackPropagation::GetMaxPercentageRMSGrow() const {
return rmsGrowToApplyRobustLearning - CUDA_VALUE(1.0);
}
void BackPropagation::SetMaxPercentageRMSGrow(cudafloat value) {
assert(value > CUDA_VALUE(0.0));
rmsGrowToApplyRobustLearning = CUDA_VALUE(1.0) + value;
}
cudafloat BackPropagation::GetRobustFactor() const {
return robustFactor;
}
void BackPropagation::SetRobustFactor(cudafloat value) {
assert(value > CUDA_VALUE(0.0) && value < CUDA_VALUE(1.0));
robustFactor = value;
}
cudafloat BackPropagation::GetMomentum() const {
return momentum;
}
void BackPropagation::SetMomentum(cudafloat value) {
assert(value > CUDA_VALUE(0.0) && value < CUDA_VALUE(1.0));
momentum = value;
}
cudafloat BackPropagation::GetUpStepSizeFactor() const {
return u;
}
void BackPropagation::SetUpStepSizeFactor(cudafloat value){
assert(value > CUDA_VALUE(1.0));
u = value;
}
cudafloat BackPropagation::GetDownStepSizeFactor() const {
return d;
}
void BackPropagation::SetDownStepSizeFactor(cudafloat value) {
assert(value > CUDA_VALUE(0.0) && value < CUDA_VALUE(1.0));
d = value;
}
cudafloat BackPropagation::GetMaxStepSize() const {
return maxStepSize;
}
void BackPropagation::SetMaxStepSize(cudafloat value) {
assert(value > CUDA_VALUE(0.0));
maxStepSize = value;
}
int BackPropagation::GetEpoch() const {
return epoch;
}
int BackPropagation::GetNumberLayers() const {
return layers.Length();
}
int BackPropagation::GetNumberInputs() const {
return layers[0].inputsWithoutBias;
}
int BackPropagation::GetNumberOutputs() const {
return layers[layers.Length() - 1].neurons;
}
int BackPropagation::GetNumberNeurons(int layer) const {
assert(layer >= 0 && layer < layers.Length());
return layers[layer].neurons;
}
void BackPropagation::Fire() {
if (selectiveInputLayerSpaceNetwork != nullptr) selectiveInputLayerSpaceNetwork->Fire(streamKernels);
int nSpaceLayers = spaceLayers.Length();
for (int l = 0; l < nSpaceLayers; l++) spaceLayers[l].Fire(streamKernels);
if (selectiveInputLayer != nullptr) selectiveInputLayer->Fire(streamKernels);
int numLayers = layers.Length();
for(int l = 0; l < numLayers; l++) layers[l].Fire(streamKernels);
}
cudafloat BackPropagation::GetRMS() {
cudaDeviceSynchronize();
Fire(); // Determine the network outputs
// Calculate the RMS
KernelCalculateRMS(streamKernels, patternsBlockSize, d_rms.Pointer(), d_rmsOut.Pointer(), d_rms.Length(), numberPatternsNeurons);
rms.UpdateValue(d_rmsOut.Pointer());
return rms.Value();
}
cudafloat BackPropagation::GetRMSestimate() {
cudafloat RMS = rms.Value();
if (epoch == 0 && RMS >= CUDA_VALUE(1.0)) return GetRMS();
return RMS;
}
void BackPropagation::TrainOneEpoch() {
int numLayers = layers.Length();
int nSpaceLayers = spaceLayers.Length();
Fire(); // Determine the network outputs
// Calculate the RMS / Robust training
if (robustLearning) {
KernelCalculateRMS(streamKernels, patternsBlockSize, d_rms.Pointer(), d_rmsOut.Pointer(), d_rms.Length(), numberPatternsNeurons);
if (cudaStreamQuery(streamRMS) == cudaSuccess) rms.UpdateValueAsync(d_rmsOut.Pointer(), streamRMS);
RobustLearning<<<1, maxNumberWeigths, 0, streamKernels>>>(d_rmsOut.Pointer(), d_bestRMS.Pointer(), (cudafloat) rmsGrowToApplyRobustLearning, layersRobustTraining, d_numberWeightsLayer.Pointer(), d_weightsLayers.Pointer(), d_bestWeightsLayers.Pointer(), d_learnRatesLayers.Pointer(), robustFactor, d_lastDeltaWithoutLMlayers.Pointer(), d_lastDeltaLayers.Pointer());
} else {
if (cudaStreamQuery(streamRMS) == cudaSuccess) {
KernelCalculateRMS(streamRMS, patternsBlockSize, d_rms.Pointer(), d_rmsOut.Pointer(), d_rms.Length(), numberPatternsNeurons);
rms.UpdateValueAsync(d_rmsOut.Pointer(), streamRMS);
}
}
// Calculate local gradients. The local gradient for the output layer was already calculated.
cudafloat * rms = (robustLearning) ? d_rmsOut.Pointer() : nullptr;
cudafloat * bestRMS = (robustLearning) ? d_bestRMS.Pointer() : nullptr;
for(int l = numLayers - 2; l >= 0; l--) {
layers[l].CalculateLocalGradient(streamKernels, rms, bestRMS, rmsGrowToApplyRobustLearning, layers[l + 1]);
}
if (selectiveInputLayer != nullptr) selectiveInputLayer->CalculateLocalGradient(streamKernels, rms, bestRMS, rmsGrowToApplyRobustLearning, layers[0]);
for (int l = nSpaceLayers -2; l >= 0; l--) spaceLayers[l].CalculateLocalGradient(streamKernels, rms, bestRMS, rmsGrowToApplyRobustLearning, spaceLayers[l + 1]);
if (selectiveInputLayerSpaceNetwork != nullptr) selectiveInputLayerSpaceNetwork->CalculateLocalGradient(streamKernels, rms, bestRMS, rmsGrowToApplyRobustLearning, spaceLayers[0]);
// Correct the weights
for(int l = numLayers - 1; l >= 0; l--) {
layers[l].CorrectWeights(streamKernels, patternsBlockSize, rms, bestRMS, rmsGrowToApplyRobustLearning, robustFactor, momentum, u, d, maxStepSize);
}
if (selectiveInputLayer != nullptr) selectiveInputLayer->CorrectWeights(streamKernels, rms, bestRMS, rmsGrowToApplyRobustLearning, robustFactor, momentum, u, d, maxStepSize);
for (int l = nSpaceLayers - 1; l >= 0; l--) spaceLayers[l].CorrectWeights(streamKernels, patternsBlockSize, rms, bestRMS, rmsGrowToApplyRobustLearning, robustFactor, momentum, u, d, maxStepSize);
if (selectiveInputLayerSpaceNetwork != nullptr) selectiveInputLayerSpaceNetwork->CorrectWeights(streamKernels, rms, bestRMS, rmsGrowToApplyRobustLearning, robustFactor, momentum, u, d, maxStepSize);
epoch++;
}
void BackPropagation::Train(int epochs) {
for (int e = 0; e < epochs; e++) TrainOneEpoch();
}
void BackPropagation::Train(int epochs, cudafloat rmsStop) {
// In some situations, we may get the RMS error from a previous trained network.
// To avoid this, we compute the actual RMS before training the network.
GetRMS();
for (int e = 0; e < epochs; e++) {
TrainOneEpoch();
if (GetRMSestimate() <= rmsStop) break;
}
}
HostMatrix<cudafloat> BackPropagation::GetOutputs(HostMatrix<cudafloat> & inputs) {
int patterns = inputs.Rows();
int numberLayers = layers.Length();
int numberSpaceLayers = spaceLayers.Length();
DeviceMatrix<cudafloat> d_inputs(inputs);
HostArray< DeviceMatrix<cudafloat> * > spaceLayerOutputs;
spaceLayerOutputs.ResizeWithoutPreservingData(numberSpaceLayers);
for (int l = 0; l < numberSpaceLayers; l++) {
spaceLayerOutputs[l] = new DeviceMatrix<cudafloat>(patterns, spaceLayers[l].neurons);
}
HostArray< DeviceMatrix<cudafloat> * > layerOutputs;
layerOutputs.ResizeWithoutPreservingData(numberLayers);
for (int l = 0; l < numberLayers; l++) {
layerOutputs[l] = new DeviceMatrix<cudafloat>(patterns, layers[l].neurons);
}
cudafloat * layerInputs = d_inputs.Pointer();
int ninputs = d_inputs.Columns();
DeviceArray<cudafloat> outputsSelectiveInput(patterns * ninputs);
if (selectiveInputLayerSpaceNetwork != nullptr) {
int processed = 0;
do {
int patternsToProcess = (patterns > 65535) ? 65535 : patterns;
FireSelectiveInputs<<<patternsToProcess, ninputs, 0, streamKernels>>>(layerInputs + (processed * ninputs), selectiveInputLayerSpaceNetwork->d_weights.Pointer(), selectiveInputLayerSpaceNetwork->d_bias.Pointer(), outputsSelectiveInput.Pointer() + (processed * ninputs), ninputs);
processed += patternsToProcess;
} while (processed < patterns);
layerInputs = outputsSelectiveInput.Pointer();
}
for (int l = 0; l < numberSpaceLayers; l++) {
if(spaceLayers[l].connections > MAX_THREADS_PER_BLOCK) {
dim3 dimNeuronsPatterns;
dimNeuronsPatterns.x = spaceLayers[l].neurons;
int processed = 0;
do {
int patternsToProcess = (patterns > 65535) ? 65535 : patterns;
dimNeuronsPatterns.y = patternsToProcess;
KernelFireLayer(streamKernels, dimNeuronsPatterns, spaceLayers[l].inputsBlockSize, layerInputs + (processed * spaceLayers[l].inputsWithoutBias), spaceLayers[l].d_weights.Pointer(), nullptr, 0, Layer::totalNeuronsWithSelectiveActivation, spaceLayerOutputs[l]->Pointer() + (processed * spaceLayers[l].inputsWithoutBias), spaceLayers[l].inputsWithoutBias);
processed += patternsToProcess;
} while (processed < patterns);
} else {
int processed = 0;
do {
int patternsToProcess = (patterns > 65535) ? 65535 : patterns;
FireLayer<<<patternsToProcess, spaceLayers[l].dimInputsNeurons, spaceLayers[l].sharedMemFire, streamKernels>>>(layerInputs + (processed * spaceLayers[l].inputsWithoutBias), spaceLayers[l].d_weights.Pointer(), nullptr, 0, Layer::totalNeuronsWithSelectiveActivation, spaceLayerOutputs[l]->Pointer() + (processed * spaceLayers[l].inputsWithoutBias));
processed += patternsToProcess;
} while (processed < patterns);
}
layerInputs = spaceLayerOutputs[l]->Pointer();
}
cudafloat * d_m = nullptr;
if (numberSpaceLayers > 0) d_m = layerInputs;
layerInputs = d_inputs.Pointer();
if (selectiveInputLayer != nullptr) {
int processed = 0;
do {
int patternsToProcess = (patterns > 65535) ? 65535 : patterns;
FireSelectiveInputs<<<patternsToProcess, ninputs, 0, streamKernels>>>(layerInputs + (processed * ninputs), selectiveInputLayer->d_weights.Pointer(), selectiveInputLayer->d_bias.Pointer(), outputsSelectiveInput.Pointer() + (processed * ninputs), ninputs);
processed += patternsToProcess;
} while (processed < patterns);
layerInputs = outputsSelectiveInput.Pointer();
}
for (int l = 0; l < numberLayers; l++) {
if(layers[l].connections > MAX_THREADS_PER_BLOCK) {
dim3 dimNeuronsPatterns;
dimNeuronsPatterns.x = layers[l].neurons;
int processed = 0;
do {
int patternsToProcess = (patterns > 65535) ? 65535 : patterns;
dimNeuronsPatterns.y = patternsToProcess;
KernelFireLayer(streamKernels, dimNeuronsPatterns, layers[l].inputsBlockSize, layerInputs + (processed * layers[l].inputsWithoutBias), layers[l].d_weights.Pointer(), (layers[l].d_m != nullptr) ? d_m + (processed * Layer::totalNeuronsWithSelectiveActivation) : nullptr, layers[l].mOffset, Layer::totalNeuronsWithSelectiveActivation, layerOutputs[l]->Pointer() + (processed * layers[l].inputsWithoutBias), layers[l].inputsWithoutBias);
processed += patternsToProcess;
} while (processed < patterns);
} else {
int processed = 0;
do {
int patternsToProcess = (patterns > 65535) ? 65535 : patterns;
FireLayer<<<patternsToProcess, layers[l].dimInputsNeurons, layers[l].sharedMemFire, streamKernels>>>(layerInputs + (processed * layers[l].inputsWithoutBias), layers[l].d_weights.Pointer(), (layers[l].d_m != nullptr) ? d_m + (processed * Layer::totalNeuronsWithSelectiveActivation) : nullptr, layers[l].mOffset, Layer::totalNeuronsWithSelectiveActivation, layerOutputs[l]->Pointer() + (processed * layers[l].inputsWithoutBias));
processed += patternsToProcess;
} while (processed < patterns);
}
layerInputs = layerOutputs[l]->Pointer();
}
HostMatrix<cudafloat> outputs(*(layerOutputs[numberLayers - 1]));
for (int l = 0; l < numberSpaceLayers; l++) {
delete spaceLayerOutputs[l];
}
for (int l = 0; l < numberLayers; l++) {
delete layerOutputs[l];
}
return outputs;
}
} |
c7cad8a339da75c9320086b711adf031e801e6c1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/cuda/array/cuda_array.hpp>
#include <nbla/cuda/function/sum.hpp>
#include <nbla/cuda/math.hpp>
#include <nbla/cuda/utils/block_reduce.cuh>
namespace nbla {
template <typename T>
__global__ void kernel_reduce_per_block(const int N, const T *x, T *buff) {
typedef typename CudaTypeForceFloat<T>::type AccT;
AccT thread_data = 0;
NBLA_CUDA_KERNEL_LOOP(i, N) { thread_data += (AccT)x[i]; }
thread_data = blockReduceSum(thread_data);
if (threadIdx.x == 0) {
buff[blockIdx.x] = thread_data;
}
}
template <typename T>
void SumCuda<T>::forward_impl_reduce(const T *x_, T *y_, int outer_size,
int reduction_size) {
const Tc *x = reinterpret_cast<const Tc *>(x_);
Tc *y = reinterpret_cast<Tc *>(y_);
cuda_set_device(this->device_);
if (outer_size == 1) {
if (reduction_size >= 1024) {
int blocks =
min(NBLA_CUDA_GET_BLOCKS(reduction_size), /*max blocks*/ 1024);
shared_ptr<CudaCachedArray> arr_buff =
make_shared<CudaCachedArray>(blocks, get_dtype<Tc>(), this->ctx_);
Tc *buff = arr_buff->pointer<Tc>();
hipLaunchKernelGGL(( kernel_reduce_per_block), dim3(blocks), dim3(NBLA_CUDA_NUM_THREADS), 0, 0, reduction_size,
x, buff);
hipLaunchKernelGGL(( kernel_reduce_per_block), dim3(1), dim3(1024), 0, 0, blocks, buff, y);
} else {
hipLaunchKernelGGL(( kernel_reduce_per_block), dim3(1), dim3(1024), 0, 0, reduction_size, x, y);
}
return;
}
const Tc *ones =
static_cast<const Tc *>(SingletonManager::get<NNabla>()->ones(
reduction_size, get_dtype<Tc>(), this->ctx_));
cuda_gemv<Tc>(this->device_, y, x, reduction_size, outer_size, true, ones,
reduction_size, 1, 0);
}
template <typename T, bool accum>
__global__ void kernel_reduce_sum_backward(const int num, T *dx, const T *dy) {
NBLA_CUDA_KERNEL_LOOP(idx, num) { dx[idx] = (accum ? dx[idx] : (T)0) + *dy; }
}
template <typename T>
void SumCuda<T>::backward_impl_reduce(const T *dy_, T *dx_, int outer_size,
int reduction_size, bool accum) {
const Tc *dy = reinterpret_cast<const Tc *>(dy_);
Tc *dx = reinterpret_cast<Tc *>(dx_);
cuda_set_device(this->device_);
if (outer_size == 1) {
if (accum) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_reduce_sum_backward<Tc, true>),
reduction_size, dx, dy);
} else {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_reduce_sum_backward<Tc, false>),
reduction_size, dx, dy);
}
return;
}
const Tc *ones =
static_cast<const Tc *>(SingletonManager::get<NNabla>()->ones(
reduction_size, get_dtype<Tc>(), this->ctx_));
cuda_gemm<Tc>(this->device_, dx, true, dy, outer_size, 1, false, ones, 1,
reduction_size, false, 1, accum ? 1 : 0);
}
}
| c7cad8a339da75c9320086b711adf031e801e6c1.cu | // Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/cuda/array/cuda_array.hpp>
#include <nbla/cuda/function/sum.hpp>
#include <nbla/cuda/math.hpp>
#include <nbla/cuda/utils/block_reduce.cuh>
namespace nbla {
template <typename T>
__global__ void kernel_reduce_per_block(const int N, const T *x, T *buff) {
typedef typename CudaTypeForceFloat<T>::type AccT;
AccT thread_data = 0;
NBLA_CUDA_KERNEL_LOOP(i, N) { thread_data += (AccT)x[i]; }
thread_data = blockReduceSum(thread_data);
if (threadIdx.x == 0) {
buff[blockIdx.x] = thread_data;
}
}
template <typename T>
void SumCuda<T>::forward_impl_reduce(const T *x_, T *y_, int outer_size,
int reduction_size) {
const Tc *x = reinterpret_cast<const Tc *>(x_);
Tc *y = reinterpret_cast<Tc *>(y_);
cuda_set_device(this->device_);
if (outer_size == 1) {
if (reduction_size >= 1024) {
int blocks =
min(NBLA_CUDA_GET_BLOCKS(reduction_size), /*max blocks*/ 1024);
shared_ptr<CudaCachedArray> arr_buff =
make_shared<CudaCachedArray>(blocks, get_dtype<Tc>(), this->ctx_);
Tc *buff = arr_buff->pointer<Tc>();
kernel_reduce_per_block<<<blocks, NBLA_CUDA_NUM_THREADS>>>(reduction_size,
x, buff);
kernel_reduce_per_block<<<1, 1024>>>(blocks, buff, y);
} else {
kernel_reduce_per_block<<<1, 1024>>>(reduction_size, x, y);
}
return;
}
const Tc *ones =
static_cast<const Tc *>(SingletonManager::get<NNabla>()->ones(
reduction_size, get_dtype<Tc>(), this->ctx_));
cuda_gemv<Tc>(this->device_, y, x, reduction_size, outer_size, true, ones,
reduction_size, 1, 0);
}
template <typename T, bool accum>
__global__ void kernel_reduce_sum_backward(const int num, T *dx, const T *dy) {
NBLA_CUDA_KERNEL_LOOP(idx, num) { dx[idx] = (accum ? dx[idx] : (T)0) + *dy; }
}
template <typename T>
void SumCuda<T>::backward_impl_reduce(const T *dy_, T *dx_, int outer_size,
int reduction_size, bool accum) {
const Tc *dy = reinterpret_cast<const Tc *>(dy_);
Tc *dx = reinterpret_cast<Tc *>(dx_);
cuda_set_device(this->device_);
if (outer_size == 1) {
if (accum) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_reduce_sum_backward<Tc, true>),
reduction_size, dx, dy);
} else {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_reduce_sum_backward<Tc, false>),
reduction_size, dx, dy);
}
return;
}
const Tc *ones =
static_cast<const Tc *>(SingletonManager::get<NNabla>()->ones(
reduction_size, get_dtype<Tc>(), this->ctx_));
cuda_gemm<Tc>(this->device_, dx, true, dy, outer_size, 1, false, ones, 1,
reduction_size, false, 1, accum ? 1 : 0);
}
}
|
aa193624d9a911019a8278451ca829f37526239e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* simulate ncells cells using the model from Diekman et al. */
/* equations are solved in PARALLEL on GPUs */
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include "scn.h"
#include "df.h"
#include "parameters.h"
__global__ void leapfrog(Estate *xi, Estate *xf, Eparameters *p, Mstate *M, ephys_t *input, int *upstream, double t)
{
int j;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = NTHREADS * NBLOCKS;
ephys_t a, b, c, R, G, E, gkca, gkleak, gnaleak;
a=1.0/Edt+1.0/(2.0*ts);
ephys_t minf, hinf, ninf, rlinf, rnlinf, fnlinf, sinfi, sinff;
ephys_t taum, tauh, taun, taufnl, tausi, tausf;
ephys_t ya, yb;
for (j=tid; j<ncells; j+=stride) {
if (KO == 10)
if (j < 615) // DV separated on 12/18/2015
// R = p->clk[j]*11.36*(M->G[j]-0.2); //added 12/19/2015 - Ventral R1
// R = p->clk[j]*14.20*(M->G[j]-0.2); //added 12/20/2015 - Ventral R2
R = p->clk[j]*17.04*(M->G[j]-0.185); //added 12/20/2015 - Ventral R3
else
// R = p->clk[j]*17.04*(M->G[j]-0.165); //added 12/19/2015 - Dorsal R1
// R = p->clk[j]*14.20*(M->G[j]-0.155); //added 12/20/2015 - Dorsal R2
R = p->clk[j]*14.20*(M->G[j]-0.145); //added 12/28/2015 - Dorsal R3
// R = p->clk[j]*11.36*(M->G[j]-0.165); //added 11/23/2015
// R = p->clk[j]*17.04*(M->G[j]-0.165); //added 12/12/2015
else
R = p->clk[j]*8.52*(M->G[j]-0.25); //changed in 12/2015 for WT in Afh sims
// R = p->clk[j]*5.68*(M->G[j]-0.25); //changed in 12/2015 for WT in Afh sims
// R = p->clk[j]*11.36*(M->G[j]-0.25); //changed in ~2/2014? = R2
/////////////////////////////////////////////////////////////////////////////////////////////////////
gkca = 198.0/(1.0+exp(R))+2.0;
gkleak = 0.2/(1.0+exp(R));
gnaleak = p->gnaleak[j];
//Time t+1/2 update:
//Calculate time constants and eq values
minf = 1.0/(1.0+exp(-(xi->V[j]+35.2)/8.1));
taum = exp(-(xi->V[j]+286.0)/160.0);
hinf = 1.0/(1.0+exp((xi->V[j]+62.0)/2.0));
tauh = 0.51+exp(-(xi->V[j]+26.6)/7.1);
ninf = 1.0/pow(1.0+exp((xi->V[j]-14.0)/(-17.0)),.25);
taun = exp(-(xi->V[j]-67.0)/68.0);
rlinf = 1.0/(1.0+exp(-(xi->V[j]+36.0)/5.1));
rnlinf = 1.0/(1.0+exp(-(xi->V[j]+21.6)/6.7));
fnlinf = 1.0/(1.0+exp((xi->V[j]+260.0)/65.0));
taufnl = exp(-(xi->V[j]-444.0)/220.0);
sinfi = 1e7*pow(xi->cas[j],2)/(1e7*pow(xi->cas[j],2)+5.6);
tausi = 500.0/(1e7*pow(xi->cas[j],2)+5.6);
// ya = 5.0*(t);
// yb = 5.0*(t)+0.18;
// ya = 5.0*(input[j]/upstream[j]);
// yb = 5.0*(input[j]/upstream[j])+0.18;
ya = 5.0*(input[j]/10.0);
yb = 5.0*(input[j]/10.0)+0.18;
//Update gating variables
xf->m[j] = 2.0*Edt/(2.0*taum+Edt)*minf+(2.0*taum-Edt)/(2.0*taum+Edt)*xi->m[j];
xf->h[j] = 2.0*Edt/(2.0*tauh+Edt)*hinf+(2.0*tauh-Edt)/(2.0*tauh+Edt)*xi->h[j];
xf->n[j] = 2.0*Edt/(2.0*taun+Edt)*ninf+(2.0*taun-Edt)/(2.0*taun+Edt)*xi->n[j];
xf->rl[j] = 2.0*Edt/(2.0*taurl+Edt)*rlinf+(2.0*taurl-Edt)/(2.0*taurl+Edt)*xi->rl[j];
xf->rnl[j] = 2.0*Edt/(2.0*taurnl+Edt)*rnlinf+(2.0*taurnl-Edt)/(2.0*taurnl+Edt)*xi->rnl[j];
xf->fnl[j] = 2.0*Edt/(2.0*taufnl+Edt)*fnlinf+(2.0*taufnl-Edt)/(2.0*taufnl+Edt)*xi->fnl[j];
//solve quadratic equation for cas (a is constant)
b=(K2-xi->cas[j])/Edt+ks/2.0*p->gcanl[j]*xf->rnl[j]*xf->fnl[j]*(xi->V[j]-p->Eca[j])+(K2+xi->cas[j])/(2.0*ts)-bs+ks/2.0*(p->gcal[j]*xi->rl[j]*K1/(K2+xi->cas[j])*(xi->V[j]-p->Eca[j])+p->gcanl[j]*xi->rnl[j]*xi->fnl[j]*(xi->V[j]-p->Eca[j]));
c=-K2/Edt*xi->cas[j]+ks/2.0*p->gcal[j]*xf->rl[j]*K1*(xi->V[j]-p->Eca[j])+ks*K2/2.0*p->gcanl[j]*xf->rnl[j]*xf->fnl[j]*(xi->V[j]-p->Eca[j])+K2/(2.0*ts)*xi->cas[j]-bs*K2+K2*ks/2*(p->gcal[j]*xi->rl[j]*K1/(K2+xi->cas[j])*(xi->V[j]-p->Eca[j])+p->gcanl[j]*xi->rnl[j]*xi->fnl[j]*(xi->V[j]-p->Eca[j]));
//Update cas before s and cac (same time step, but s and cac depend on current cas)
xf->cas[j]=(-b+sqrt(pow(b,2)-4.0*a*c))/(2.0*a);
sinff = 1e7*pow(xf->cas[j],2)/(1e7*pow(xf->cas[j],2)+5.6);
tausf = 500.0/(1e7*pow(xf->cas[j],2)+5.6);
xf->s[j]=1.0/(1.0+Edt/(2.0*tausf))*(xi->s[j]*(1.0-Edt/(2.0*tausi))+Edt/2.0*(sinfi/tausi+sinff/tausf));
xf->cac[j]=1.0/(1.0+Edt/(2.0*tc))*(xi->cac[j]*(1.0-Edt/(2.0*tc))+bc*Edt-Edt*kc/2.0*( p->gcal[j]*xf->rl[j]*(K1/(K2+xf->cas[j]))*(xi->V[j]-p->Eca[j])+p->gcanl[j]*xf->rnl[j]*xf->fnl[j]*(xi->V[j]-p->Eca[j])+p->gcal[j]*xi->rl[j]*(K1/(K2+xi->cas[j]))*(xi->V[j]-p->Eca[j])+p->gcanl[j]*xi->rnl[j]*xi->fnl[j]*(xi->V[j]-p->Eca[j])));
//update synaptic gating variable
xf->y[j] = (2.0*Edt*ya+(2.0-yb*Edt)*xi->y[j])/(2.0+yb*Edt);
//Time t+1 update:
G = p->gna[j]*pow(xf->m[j],3)*xf->h[j]+p->gk[j]*pow(xf->n[j],4)+p->gcal[j]*xf->rl[j]*(K1/(K2+xf->cas[j]))+p->gcanl[j]*xf->rnl[j]*xf->fnl[j]+gkca*pow(xf->s[j],2)+gkleak+gnaleak+gsyn*xf->y[j];
E = p->Ena[j]*(p->gna[j]*pow(xf->m[j],3)*xf->h[j]+gnaleak)+p->Ek[j]*(p->gk[j]*pow(xf->n[j],4)+gkca*pow(xf->s[j],2)+gkleak)+p->Eca[j]*(p->gcal[j]*xf->rl[j]*(K1/(K2+xf->cas[j]))+p->gcanl[j]*xf->rnl[j]*xf->fnl[j])+p->Egaba[j]*gsyn*xf->y[j];
//Update voltage
xf->V[j] = 1.0/(p->c[j]+Edt/2.0*G)*(Edt*(Iapp+E)+(p->c[j]-Edt/2.0*G)*xi->V[j]);
//Update post-synaptic current out of this cell
xf->out[j] = gsyn*xf->y[j]*(p->Egaba[j]-xf->V[j]); // not really ouput; this is the gaba current the cell experiences
xf->gaba[j] = 1.0/(1.0+exp(-(xf->V[j]+20.0)/3.0)); // g2 changed 20140406
}
}
__global__ void leapfrog_copy(int i, int res_len, ephys_t *result, Estate *xi, Estate *xf, ephys_t *input, int *upstream )
{
// Copy values from end of time step to be initial values of next
int j;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = NTHREADS * NBLOCKS;
for (j=tid; j<ncells; j+=stride) {
result[i+j] = xi->V[j]+200*VMAXMIN;
result[res_len+i+j] = xf->cac[j]; // cytosolic calcium
// result[2*res_len+i+j] = xf->gaba[j]; // gaba output from each cell
result[2*res_len+i+j] = xf->out[j]; // output current from each cell
xi->V[j] = xf->V[j];
xi->m[j] = xf->m[j];
xi->h[j] = xf->h[j];
xi->n[j] = xf->n[j];
xi->rl[j] = xf->rl[j];
xi->rnl[j] = xf->rnl[j];
xi->fnl[j] = xf->fnl[j];
xi->s[j] = xf->s[j];
xi->cas[j] = xf->cas[j];
xi->cac[j] = xf->cac[j];
xi->out[j] = xf->out[j];
xi->gaba[j] = xf->gaba[j];
xi->y[j] = xf->y[j];
}
}
void leapfrog_wrapper(Estate *xi, Estate *xf, Eparameters *p, Mstate *M, ephys_t *input, int *upstream, double t) {
hipLaunchKernelGGL(( leapfrog) , dim3(NBLOCKS), dim3(NTHREADS) , 0, 0, xi, xf, p, M, input, upstream, t);
}
void leapfrog_copy_wrapper(int i, int res_len, ephys_t *result, Estate *xi, Estate *xf, ephys_t *input, int *upstream) {
hipLaunchKernelGGL(( leapfrog_copy) , dim3(NBLOCKS), dim3(NTHREADS) , 0, 0, i, res_len, result, xi, xf, input, upstream);
}
| aa193624d9a911019a8278451ca829f37526239e.cu | /* simulate ncells cells using the model from Diekman et al. */
/* equations are solved in PARALLEL on GPUs */
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include "scn.h"
#include "df.h"
#include "parameters.h"
__global__ void leapfrog(Estate *xi, Estate *xf, Eparameters *p, Mstate *M, ephys_t *input, int *upstream, double t)
{
int j;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = NTHREADS * NBLOCKS;
ephys_t a, b, c, R, G, E, gkca, gkleak, gnaleak;
a=1.0/Edt+1.0/(2.0*ts);
ephys_t minf, hinf, ninf, rlinf, rnlinf, fnlinf, sinfi, sinff;
ephys_t taum, tauh, taun, taufnl, tausi, tausf;
ephys_t ya, yb;
for (j=tid; j<ncells; j+=stride) {
if (KO == 10)
if (j < 615) // DV separated on 12/18/2015
// R = p->clk[j]*11.36*(M->G[j]-0.2); //added 12/19/2015 - Ventral R1
// R = p->clk[j]*14.20*(M->G[j]-0.2); //added 12/20/2015 - Ventral R2
R = p->clk[j]*17.04*(M->G[j]-0.185); //added 12/20/2015 - Ventral R3
else
// R = p->clk[j]*17.04*(M->G[j]-0.165); //added 12/19/2015 - Dorsal R1
// R = p->clk[j]*14.20*(M->G[j]-0.155); //added 12/20/2015 - Dorsal R2
R = p->clk[j]*14.20*(M->G[j]-0.145); //added 12/28/2015 - Dorsal R3
// R = p->clk[j]*11.36*(M->G[j]-0.165); //added 11/23/2015
// R = p->clk[j]*17.04*(M->G[j]-0.165); //added 12/12/2015
else
R = p->clk[j]*8.52*(M->G[j]-0.25); //changed in 12/2015 for WT in Afh sims
// R = p->clk[j]*5.68*(M->G[j]-0.25); //changed in 12/2015 for WT in Afh sims
// R = p->clk[j]*11.36*(M->G[j]-0.25); //changed in ~2/2014? = R2
/////////////////////////////////////////////////////////////////////////////////////////////////////
gkca = 198.0/(1.0+exp(R))+2.0;
gkleak = 0.2/(1.0+exp(R));
gnaleak = p->gnaleak[j];
//Time t+1/2 update:
//Calculate time constants and eq values
minf = 1.0/(1.0+exp(-(xi->V[j]+35.2)/8.1));
taum = exp(-(xi->V[j]+286.0)/160.0);
hinf = 1.0/(1.0+exp((xi->V[j]+62.0)/2.0));
tauh = 0.51+exp(-(xi->V[j]+26.6)/7.1);
ninf = 1.0/pow(1.0+exp((xi->V[j]-14.0)/(-17.0)),.25);
taun = exp(-(xi->V[j]-67.0)/68.0);
rlinf = 1.0/(1.0+exp(-(xi->V[j]+36.0)/5.1));
rnlinf = 1.0/(1.0+exp(-(xi->V[j]+21.6)/6.7));
fnlinf = 1.0/(1.0+exp((xi->V[j]+260.0)/65.0));
taufnl = exp(-(xi->V[j]-444.0)/220.0);
sinfi = 1e7*pow(xi->cas[j],2)/(1e7*pow(xi->cas[j],2)+5.6);
tausi = 500.0/(1e7*pow(xi->cas[j],2)+5.6);
// ya = 5.0*(t);
// yb = 5.0*(t)+0.18;
// ya = 5.0*(input[j]/upstream[j]);
// yb = 5.0*(input[j]/upstream[j])+0.18;
ya = 5.0*(input[j]/10.0);
yb = 5.0*(input[j]/10.0)+0.18;
//Update gating variables
xf->m[j] = 2.0*Edt/(2.0*taum+Edt)*minf+(2.0*taum-Edt)/(2.0*taum+Edt)*xi->m[j];
xf->h[j] = 2.0*Edt/(2.0*tauh+Edt)*hinf+(2.0*tauh-Edt)/(2.0*tauh+Edt)*xi->h[j];
xf->n[j] = 2.0*Edt/(2.0*taun+Edt)*ninf+(2.0*taun-Edt)/(2.0*taun+Edt)*xi->n[j];
xf->rl[j] = 2.0*Edt/(2.0*taurl+Edt)*rlinf+(2.0*taurl-Edt)/(2.0*taurl+Edt)*xi->rl[j];
xf->rnl[j] = 2.0*Edt/(2.0*taurnl+Edt)*rnlinf+(2.0*taurnl-Edt)/(2.0*taurnl+Edt)*xi->rnl[j];
xf->fnl[j] = 2.0*Edt/(2.0*taufnl+Edt)*fnlinf+(2.0*taufnl-Edt)/(2.0*taufnl+Edt)*xi->fnl[j];
//solve quadratic equation for cas (a is constant)
b=(K2-xi->cas[j])/Edt+ks/2.0*p->gcanl[j]*xf->rnl[j]*xf->fnl[j]*(xi->V[j]-p->Eca[j])+(K2+xi->cas[j])/(2.0*ts)-bs+ks/2.0*(p->gcal[j]*xi->rl[j]*K1/(K2+xi->cas[j])*(xi->V[j]-p->Eca[j])+p->gcanl[j]*xi->rnl[j]*xi->fnl[j]*(xi->V[j]-p->Eca[j]));
c=-K2/Edt*xi->cas[j]+ks/2.0*p->gcal[j]*xf->rl[j]*K1*(xi->V[j]-p->Eca[j])+ks*K2/2.0*p->gcanl[j]*xf->rnl[j]*xf->fnl[j]*(xi->V[j]-p->Eca[j])+K2/(2.0*ts)*xi->cas[j]-bs*K2+K2*ks/2*(p->gcal[j]*xi->rl[j]*K1/(K2+xi->cas[j])*(xi->V[j]-p->Eca[j])+p->gcanl[j]*xi->rnl[j]*xi->fnl[j]*(xi->V[j]-p->Eca[j]));
//Update cas before s and cac (same time step, but s and cac depend on current cas)
xf->cas[j]=(-b+sqrt(pow(b,2)-4.0*a*c))/(2.0*a);
sinff = 1e7*pow(xf->cas[j],2)/(1e7*pow(xf->cas[j],2)+5.6);
tausf = 500.0/(1e7*pow(xf->cas[j],2)+5.6);
xf->s[j]=1.0/(1.0+Edt/(2.0*tausf))*(xi->s[j]*(1.0-Edt/(2.0*tausi))+Edt/2.0*(sinfi/tausi+sinff/tausf));
xf->cac[j]=1.0/(1.0+Edt/(2.0*tc))*(xi->cac[j]*(1.0-Edt/(2.0*tc))+bc*Edt-Edt*kc/2.0*( p->gcal[j]*xf->rl[j]*(K1/(K2+xf->cas[j]))*(xi->V[j]-p->Eca[j])+p->gcanl[j]*xf->rnl[j]*xf->fnl[j]*(xi->V[j]-p->Eca[j])+p->gcal[j]*xi->rl[j]*(K1/(K2+xi->cas[j]))*(xi->V[j]-p->Eca[j])+p->gcanl[j]*xi->rnl[j]*xi->fnl[j]*(xi->V[j]-p->Eca[j])));
//update synaptic gating variable
xf->y[j] = (2.0*Edt*ya+(2.0-yb*Edt)*xi->y[j])/(2.0+yb*Edt);
//Time t+1 update:
G = p->gna[j]*pow(xf->m[j],3)*xf->h[j]+p->gk[j]*pow(xf->n[j],4)+p->gcal[j]*xf->rl[j]*(K1/(K2+xf->cas[j]))+p->gcanl[j]*xf->rnl[j]*xf->fnl[j]+gkca*pow(xf->s[j],2)+gkleak+gnaleak+gsyn*xf->y[j];
E = p->Ena[j]*(p->gna[j]*pow(xf->m[j],3)*xf->h[j]+gnaleak)+p->Ek[j]*(p->gk[j]*pow(xf->n[j],4)+gkca*pow(xf->s[j],2)+gkleak)+p->Eca[j]*(p->gcal[j]*xf->rl[j]*(K1/(K2+xf->cas[j]))+p->gcanl[j]*xf->rnl[j]*xf->fnl[j])+p->Egaba[j]*gsyn*xf->y[j];
//Update voltage
xf->V[j] = 1.0/(p->c[j]+Edt/2.0*G)*(Edt*(Iapp+E)+(p->c[j]-Edt/2.0*G)*xi->V[j]);
//Update post-synaptic current out of this cell
xf->out[j] = gsyn*xf->y[j]*(p->Egaba[j]-xf->V[j]); // not really ouput; this is the gaba current the cell experiences
xf->gaba[j] = 1.0/(1.0+exp(-(xf->V[j]+20.0)/3.0)); // g2 changed 20140406
}
}
__global__ void leapfrog_copy(int i, int res_len, ephys_t *result, Estate *xi, Estate *xf, ephys_t *input, int *upstream )
{
// Copy values from end of time step to be initial values of next
int j;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = NTHREADS * NBLOCKS;
for (j=tid; j<ncells; j+=stride) {
result[i+j] = xi->V[j]+200*VMAXMIN;
result[res_len+i+j] = xf->cac[j]; // cytosolic calcium
// result[2*res_len+i+j] = xf->gaba[j]; // gaba output from each cell
result[2*res_len+i+j] = xf->out[j]; // output current from each cell
xi->V[j] = xf->V[j];
xi->m[j] = xf->m[j];
xi->h[j] = xf->h[j];
xi->n[j] = xf->n[j];
xi->rl[j] = xf->rl[j];
xi->rnl[j] = xf->rnl[j];
xi->fnl[j] = xf->fnl[j];
xi->s[j] = xf->s[j];
xi->cas[j] = xf->cas[j];
xi->cac[j] = xf->cac[j];
xi->out[j] = xf->out[j];
xi->gaba[j] = xf->gaba[j];
xi->y[j] = xf->y[j];
}
}
void leapfrog_wrapper(Estate *xi, Estate *xf, Eparameters *p, Mstate *M, ephys_t *input, int *upstream, double t) {
leapfrog <<< NBLOCKS, NTHREADS >>> (xi, xf, p, M, input, upstream, t);
}
void leapfrog_copy_wrapper(int i, int res_len, ephys_t *result, Estate *xi, Estate *xf, ephys_t *input, int *upstream) {
leapfrog_copy <<< NBLOCKS, NTHREADS >>> (i, res_len, result, xi, xf, input, upstream);
}
|
7725ef193c93363f06369082955013a3266ca236.hip | // !!! This is a file automatically generated by hipify!!!
/*
*
* win32GPULoad.cu
*
* Multithreaded Win32 application pushes bandwidth to or from one
* socket using a given GPU. Periodically reports observed
* bandwidth.
*
* Runs indefinitely until it detects a 'Q' keystroke.
* Hitting the space bar will cause it to reset counts.
*
* Copyright (c) 2011-2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <conio.h>
#include <hip/hip_runtime.h>
#include <chTimer.h>
#include <chCommandLine.h>
#include <emmintrin.h>
int g_numNodes;
int g_cIterations = 10;
bool
numNodes( int *p )
{
ULONG maxNode;
if ( GetNumaHighestNodeNumber( &maxNode ) ) {
*p = (int) maxNode+1;
return true;
}
return false;
}
double
elapsedTimeCopyToGPU( void *dst, void *src, size_t bytes, int cIterations )
{
double ret = 0.0f;
chTimerTimestamp start, end;
chTimerGetTime( &start );
{
for ( int i = 0; i < cIterations; i++ ) {
if ( hipSuccess != hipMemcpyAsync( dst, src, bytes, hipMemcpyHostToDevice ) )
goto Error;
}
}
if ( hipSuccess != hipDeviceSynchronize() )
goto Error;
chTimerGetTime( &end );
ret = chTimerElapsedTime( &start, &end );
Error:
return ret;
}
double
elapsedTimeCopyFromGPU( void *dst, void *src, size_t bytes, int cIterations )
{
double ret = 0.0f;
chTimerTimestamp start, end;
chTimerGetTime( &start );
{
for ( int i = 0; i < cIterations; i++ ) {
if ( hipSuccess != hipMemcpyAsync( dst, src, bytes, hipMemcpyDeviceToHost ) )
goto Error;
}
}
if ( hipSuccess != hipDeviceSynchronize() )
goto Error;
chTimerGetTime( &end );
ret = chTimerElapsedTime( &start, &end );
Error:
return ret;
}
void *
pageAlignedNumaAlloc( size_t bytes, int node )
{
void *ret;
printf( "Allocating on node %d\n", node ); fflush(stdout);
ret = VirtualAllocExNuma( GetCurrentProcess(),
NULL,
bytes,
MEM_COMMIT | MEM_RESERVE,
PAGE_READWRITE,
node );
return ret;
}
void
pageAlignedNumaFree( void *p )
{
VirtualFreeEx( GetCurrentProcess(), p, 0, MEM_RELEASE );
}
typedef struct __GPU_BANDWIDTH_PARAMETERS
{
int device;
int node;
bool copyToDevice;
size_t size;
} GPU_BANDWIDTH_PARAMETERS;
typedef struct __GLOBAL_RUNNING_SUMS {
CRITICAL_SECTION cs;
double totalTime;
unsigned long long totalBytes;
bool bExit;
size_t size;
} GLOBAL_RUNNING_SUMS;
GLOBAL_RUNNING_SUMS globals;
DWORD WINAPI
threadBandwidthToSocket( LPVOID _p )
{
DWORD ret = 1;
GPU_BANDWIDTH_PARAMETERS *p = (GPU_BANDWIDTH_PARAMETERS *) _p;
void *pDevice = 0;
void *pHost = pageAlignedNumaAlloc( p->size, p->node );
if ( ! pHost )
goto Error;
if ( hipSuccess != hipSetDevice( p->device ) )
goto Error;
if ( hipSuccess != hipHostRegister( pHost, p->size, 0 ) )
goto Error;
if ( hipSuccess != hipMalloc( &pDevice, p->size ) )
goto Error;
while ( ! globals.bExit ) {
double et = p->copyToDevice ?
elapsedTimeCopyToGPU( pDevice, pHost, p->size, g_cIterations ) :
elapsedTimeCopyFromGPU( pHost, pDevice, p->size, g_cIterations );
if ( 0.0 == et ) {
printf( "Error during DMA\n" );
goto Error;
}
EnterCriticalSection( &globals.cs );
globals.totalBytes += g_cIterations*p->size;
globals.totalTime += et;
LeaveCriticalSection( &globals.cs );
}
ret = 0;
Error:
if ( pDevice ) hipFree( pDevice );
if ( pHost ) {
hipHostUnregister( pHost );
pageAlignedNumaFree( pHost );
}
EnterCriticalSection( &globals.cs );
globals.bExit = true;
LeaveCriticalSection( &globals.cs );
return ret;
}
int
main( int argc, char *argv[] )
{
int ret = 1;
int node = 0;
int device = 0;
int deviceCount = 0;
bool bCopyToDevice = false;
int size = 384; // 384MB buffer size by default
if ( ! numNodes( &g_numNodes ) ) {
fprintf( stderr, "Failed to query the number of nodes\n" );
return 1;
}
if ( hipSuccess != hipGetDeviceCount( &deviceCount ) ) {
fprintf( stderr, "Failed to get CUDA device count\n" );
return 1;
}
if ( 0 == deviceCount ) {
fprintf( stderr, "No CUDA devices available\n" );
return 1;
}
if ( argc == 1 ) {
printf( "Usage: %s --node <src> --device <device> [--numThreads <count>] [--size size]\n", argv[0] );
printf( " --node: specify node to allocate host memory on\n" );
printf( " --device: specify GPU (device) to use\n" );
printf( " --size: size (in MB) of the buffer\n" );
printf( " --iterations <count>: number of memcpy's per timing event (default 10)\n" );
printf( " --copyToDevice: if specified, the app performs host->device copies from the given node.\n" );
printf( " The default is to write to the node with device->host copies.\n" );
printf( "Note: This platform has %d nodes available, numbered 0..%d.\n", g_numNodes, g_numNodes-1 );
printf( " This platform has %d devices available, numbered 0..%d.\n", deviceCount, deviceCount-1 );
printf( "\nThis program runs indefinitely until you quit with the Q key.\n" );
printf( "The bandwidth reported is a running average. To reset the counters, hit the space key.\n" );
exit(0);
}
chCommandLineGet( &g_cIterations, "iterations", argc, argv );
chCommandLineGet( &node, "node", argc, argv );
if ( node < 0 || node >= g_numNodes ) {
fprintf( stderr, "node must be in the range 0..%d\n", g_numNodes-1 );
exit(1);
}
chCommandLineGet( &device, "device", argc, argv );
if ( device < 0 || device >= deviceCount ) {
fprintf( stderr, "device must be in the range 0..%d\n", deviceCount-1 );
exit(1);
}
bCopyToDevice = chCommandLineGetBool( "copyToDevice", argc, argv );
chCommandLineGet( &size, "size", argc, argv );
globals.size = size*(size_t) 1048576;
printf( "%d MB on node %d is being %s by GPU %d\n", size, node, bCopyToDevice?"read":"written", device );
InitializeCriticalSection( &globals.cs );
{
GPU_BANDWIDTH_PARAMETERS cpuParms;
HANDLE hThread;
DWORD dwThreadId;
cpuParms.node = node;
cpuParms.device = device;
cpuParms.size = globals.size;
cpuParms.copyToDevice = bCopyToDevice;
hThread = CreateThread( NULL, 0, threadBandwidthToSocket, &cpuParms, 0, &dwThreadId );
if ( NULL == hThread ) {
fprintf( stderr, "Yipes. Thread creation failed\n" );
exit(1);
}
if ( ! hThread ) {
globals.bExit = true;
goto Error;
}
}
do {
Sleep( 10000 );
EnterCriticalSection( &globals.cs );
printf( "Bandwidth: %.2f GB/s\n", (double) globals.totalBytes /1e9 / globals.totalTime );
globals.totalBytes = 0;
globals.totalTime = 0;
if ( kbhit() ) {
int ch = getch();
if ( ch == ' ' ) {
printf( "Resetting counts\n" );
globals.totalBytes = 0;
globals.totalTime = 0;
}
if ( toupper(ch) == 'Q' ) {
printf( "Quitting\n" );
globals.bExit = true;
}
}
LeaveCriticalSection( &globals.cs );
} while ( ! globals.bExit );
ret = 0;
Error:
return ret;
}
| 7725ef193c93363f06369082955013a3266ca236.cu | /*
*
* win32GPULoad.cu
*
* Multithreaded Win32 application pushes bandwidth to or from one
* socket using a given GPU. Periodically reports observed
* bandwidth.
*
* Runs indefinitely until it detects a 'Q' keystroke.
* Hitting the space bar will cause it to reset counts.
*
* Copyright (c) 2011-2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <conio.h>
#include <cuda.h>
#include <chTimer.h>
#include <chCommandLine.h>
#include <emmintrin.h>
int g_numNodes;
int g_cIterations = 10;
bool
numNodes( int *p )
{
ULONG maxNode;
if ( GetNumaHighestNodeNumber( &maxNode ) ) {
*p = (int) maxNode+1;
return true;
}
return false;
}
double
elapsedTimeCopyToGPU( void *dst, void *src, size_t bytes, int cIterations )
{
double ret = 0.0f;
chTimerTimestamp start, end;
chTimerGetTime( &start );
{
for ( int i = 0; i < cIterations; i++ ) {
if ( cudaSuccess != cudaMemcpyAsync( dst, src, bytes, cudaMemcpyHostToDevice ) )
goto Error;
}
}
if ( cudaSuccess != cudaDeviceSynchronize() )
goto Error;
chTimerGetTime( &end );
ret = chTimerElapsedTime( &start, &end );
Error:
return ret;
}
double
elapsedTimeCopyFromGPU( void *dst, void *src, size_t bytes, int cIterations )
{
double ret = 0.0f;
chTimerTimestamp start, end;
chTimerGetTime( &start );
{
for ( int i = 0; i < cIterations; i++ ) {
if ( cudaSuccess != cudaMemcpyAsync( dst, src, bytes, cudaMemcpyDeviceToHost ) )
goto Error;
}
}
if ( cudaSuccess != cudaDeviceSynchronize() )
goto Error;
chTimerGetTime( &end );
ret = chTimerElapsedTime( &start, &end );
Error:
return ret;
}
void *
pageAlignedNumaAlloc( size_t bytes, int node )
{
void *ret;
printf( "Allocating on node %d\n", node ); fflush(stdout);
ret = VirtualAllocExNuma( GetCurrentProcess(),
NULL,
bytes,
MEM_COMMIT | MEM_RESERVE,
PAGE_READWRITE,
node );
return ret;
}
void
pageAlignedNumaFree( void *p )
{
VirtualFreeEx( GetCurrentProcess(), p, 0, MEM_RELEASE );
}
typedef struct __GPU_BANDWIDTH_PARAMETERS
{
int device;
int node;
bool copyToDevice;
size_t size;
} GPU_BANDWIDTH_PARAMETERS;
typedef struct __GLOBAL_RUNNING_SUMS {
CRITICAL_SECTION cs;
double totalTime;
unsigned long long totalBytes;
bool bExit;
size_t size;
} GLOBAL_RUNNING_SUMS;
GLOBAL_RUNNING_SUMS globals;
DWORD WINAPI
threadBandwidthToSocket( LPVOID _p )
{
DWORD ret = 1;
GPU_BANDWIDTH_PARAMETERS *p = (GPU_BANDWIDTH_PARAMETERS *) _p;
void *pDevice = 0;
void *pHost = pageAlignedNumaAlloc( p->size, p->node );
if ( ! pHost )
goto Error;
if ( cudaSuccess != cudaSetDevice( p->device ) )
goto Error;
if ( cudaSuccess != cudaHostRegister( pHost, p->size, 0 ) )
goto Error;
if ( cudaSuccess != cudaMalloc( &pDevice, p->size ) )
goto Error;
while ( ! globals.bExit ) {
double et = p->copyToDevice ?
elapsedTimeCopyToGPU( pDevice, pHost, p->size, g_cIterations ) :
elapsedTimeCopyFromGPU( pHost, pDevice, p->size, g_cIterations );
if ( 0.0 == et ) {
printf( "Error during DMA\n" );
goto Error;
}
EnterCriticalSection( &globals.cs );
globals.totalBytes += g_cIterations*p->size;
globals.totalTime += et;
LeaveCriticalSection( &globals.cs );
}
ret = 0;
Error:
if ( pDevice ) cudaFree( pDevice );
if ( pHost ) {
cudaHostUnregister( pHost );
pageAlignedNumaFree( pHost );
}
EnterCriticalSection( &globals.cs );
globals.bExit = true;
LeaveCriticalSection( &globals.cs );
return ret;
}
int
main( int argc, char *argv[] )
{
int ret = 1;
int node = 0;
int device = 0;
int deviceCount = 0;
bool bCopyToDevice = false;
int size = 384; // 384MB buffer size by default
if ( ! numNodes( &g_numNodes ) ) {
fprintf( stderr, "Failed to query the number of nodes\n" );
return 1;
}
if ( cudaSuccess != cudaGetDeviceCount( &deviceCount ) ) {
fprintf( stderr, "Failed to get CUDA device count\n" );
return 1;
}
if ( 0 == deviceCount ) {
fprintf( stderr, "No CUDA devices available\n" );
return 1;
}
if ( argc == 1 ) {
printf( "Usage: %s --node <src> --device <device> [--numThreads <count>] [--size size]\n", argv[0] );
printf( " --node: specify node to allocate host memory on\n" );
printf( " --device: specify GPU (device) to use\n" );
printf( " --size: size (in MB) of the buffer\n" );
printf( " --iterations <count>: number of memcpy's per timing event (default 10)\n" );
printf( " --copyToDevice: if specified, the app performs host->device copies from the given node.\n" );
printf( " The default is to write to the node with device->host copies.\n" );
printf( "Note: This platform has %d nodes available, numbered 0..%d.\n", g_numNodes, g_numNodes-1 );
printf( " This platform has %d devices available, numbered 0..%d.\n", deviceCount, deviceCount-1 );
printf( "\nThis program runs indefinitely until you quit with the Q key.\n" );
printf( "The bandwidth reported is a running average. To reset the counters, hit the space key.\n" );
exit(0);
}
chCommandLineGet( &g_cIterations, "iterations", argc, argv );
chCommandLineGet( &node, "node", argc, argv );
if ( node < 0 || node >= g_numNodes ) {
fprintf( stderr, "node must be in the range 0..%d\n", g_numNodes-1 );
exit(1);
}
chCommandLineGet( &device, "device", argc, argv );
if ( device < 0 || device >= deviceCount ) {
fprintf( stderr, "device must be in the range 0..%d\n", deviceCount-1 );
exit(1);
}
bCopyToDevice = chCommandLineGetBool( "copyToDevice", argc, argv );
chCommandLineGet( &size, "size", argc, argv );
globals.size = size*(size_t) 1048576;
printf( "%d MB on node %d is being %s by GPU %d\n", size, node, bCopyToDevice?"read":"written", device );
InitializeCriticalSection( &globals.cs );
{
GPU_BANDWIDTH_PARAMETERS cpuParms;
HANDLE hThread;
DWORD dwThreadId;
cpuParms.node = node;
cpuParms.device = device;
cpuParms.size = globals.size;
cpuParms.copyToDevice = bCopyToDevice;
hThread = CreateThread( NULL, 0, threadBandwidthToSocket, &cpuParms, 0, &dwThreadId );
if ( NULL == hThread ) {
fprintf( stderr, "Yipes. Thread creation failed\n" );
exit(1);
}
if ( ! hThread ) {
globals.bExit = true;
goto Error;
}
}
do {
Sleep( 10000 );
EnterCriticalSection( &globals.cs );
printf( "Bandwidth: %.2f GB/s\n", (double) globals.totalBytes /1e9 / globals.totalTime );
globals.totalBytes = 0;
globals.totalTime = 0;
if ( kbhit() ) {
int ch = getch();
if ( ch == ' ' ) {
printf( "Resetting counts\n" );
globals.totalBytes = 0;
globals.totalTime = 0;
}
if ( toupper(ch) == 'Q' ) {
printf( "Quitting\n" );
globals.bExit = true;
}
}
LeaveCriticalSection( &globals.cs );
} while ( ! globals.bExit );
ret = 0;
Error:
return ret;
}
|
fe728d6f62e20465ce2a4b460b89605cdfb9b028.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2020 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#define EIGEN_USE_GPU
#include "ContinuousConvBackpropFilterOpKernel.h"
#include "open3d/ml/Helper.h"
#include "open3d/ml/impl/continuous_conv/ContinuousConvBackpropFilter.cuh"
using namespace open3d;
using namespace open3d::ml;
using namespace open3d::ml::impl;
using namespace tensorflow;
template <class TReal, class TIndex>
class ContinuousConvBackpropFilterOpKernelCUDA
: public ContinuousConvBackpropFilterOpKernel<TIndex> {
public:
explicit ContinuousConvBackpropFilterOpKernelCUDA(
OpKernelConstruction* construction)
: ContinuousConvBackpropFilterOpKernel<TIndex>(construction) {
texture_alignment = GetCUDACurrentDeviceTextureAlignment();
}
void Kernel(tensorflow::OpKernelContext* context,
const tensorflow::Tensor& filter,
const tensorflow::Tensor& out_positions,
const tensorflow::Tensor& extents,
const tensorflow::Tensor& offset,
const tensorflow::Tensor& inp_positions,
const tensorflow::Tensor& inp_features,
const tensorflow::Tensor& inp_importance,
const tensorflow::Tensor& neighbors_index,
const tensorflow::Tensor& neighbors_importance,
const tensorflow::Tensor& neighbors_row_splits,
const tensorflow::Tensor& out_features_gradient,
const std::vector<int>& filter_dims,
const bool individual_extents,
const bool isotropic_extents,
const bool point_importances,
const bool has_neighbors_importances,
tensorflow::Tensor& filter_backprop) {
auto device = context->eigen_gpu_device();
void* temp_ptr = nullptr;
size_t temp_size = 0;
size_t max_temp_size = 0;
// determine temp_size
CConvBackpropFilterCUDA<TReal, TIndex>(
device.stream(), temp_ptr, temp_size, max_temp_size,
texture_alignment, filter_backprop.flat<TReal>().data(),
filter_dims, out_positions.shape().dim_size(0),
out_positions.flat<TReal>().data(),
inp_positions.shape().dim_size(0),
inp_positions.flat<TReal>().data(),
inp_features.flat<TReal>().data(),
point_importances ? inp_importance.flat<TReal>().data()
: nullptr,
neighbors_index.shape().dim_size(0),
(TIndex*)neighbors_index.flat<TIndex>().data(),
has_neighbors_importances
? neighbors_importance.flat<TReal>().data()
: nullptr,
(int64_t*)neighbors_row_splits.flat<int64>().data(),
extents.flat<TReal>().data(), offset.flat<TReal>().data(),
out_features_gradient.flat<TReal>().data(), this->interpolation,
this->coordinate_mapping, this->align_corners,
individual_extents, isotropic_extents, this->normalize);
temp_size =
::max(::min(size_t(this->max_temp_mem_MB) * 1024 * 1024,
max_temp_size),
temp_size);
Tensor temp_tensor;
TensorShape temp_shape({ssize_t(temp_size)});
OP_REQUIRES_OK(context,
context->allocate_temp(DataTypeToEnum<uint8_t>::v(),
temp_shape, &temp_tensor));
temp_ptr = temp_tensor.flat<uint8_t>().data();
// actually run the operation
CConvBackpropFilterCUDA<TReal, TIndex>(
device.stream(), temp_ptr, temp_size, max_temp_size,
texture_alignment, filter_backprop.flat<TReal>().data(),
filter_dims, out_positions.shape().dim_size(0),
out_positions.flat<TReal>().data(),
inp_positions.shape().dim_size(0),
inp_positions.flat<TReal>().data(),
inp_features.flat<TReal>().data(),
point_importances ? inp_importance.flat<TReal>().data()
: nullptr,
neighbors_index.shape().dim_size(0),
(TIndex*)neighbors_index.flat<TIndex>().data(),
has_neighbors_importances
? neighbors_importance.flat<TReal>().data()
: nullptr,
(int64_t*)neighbors_row_splits.flat<int64>().data(),
extents.flat<TReal>().data(), offset.flat<TReal>().data(),
out_features_gradient.flat<TReal>().data(), this->interpolation,
this->coordinate_mapping, this->align_corners,
individual_extents, isotropic_extents, this->normalize);
}
private:
int texture_alignment;
};
#define REG_KB(type, indextype) \
REGISTER_KERNEL_BUILDER( \
Name("Open3DContinuousConvBackpropFilter") \
.Device(DEVICE_GPU) \
.TypeConstraint<type>("TReal") \
.TypeConstraint<indextype>("TIndex"), \
ContinuousConvBackpropFilterOpKernelCUDA<type, indextype>);
REG_KB(float, int32)
#undef REG_KB
| fe728d6f62e20465ce2a4b460b89605cdfb9b028.cu | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2020 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#define EIGEN_USE_GPU
#include "ContinuousConvBackpropFilterOpKernel.h"
#include "open3d/ml/Helper.h"
#include "open3d/ml/impl/continuous_conv/ContinuousConvBackpropFilter.cuh"
using namespace open3d;
using namespace open3d::ml;
using namespace open3d::ml::impl;
using namespace tensorflow;
template <class TReal, class TIndex>
class ContinuousConvBackpropFilterOpKernelCUDA
: public ContinuousConvBackpropFilterOpKernel<TIndex> {
public:
explicit ContinuousConvBackpropFilterOpKernelCUDA(
OpKernelConstruction* construction)
: ContinuousConvBackpropFilterOpKernel<TIndex>(construction) {
texture_alignment = GetCUDACurrentDeviceTextureAlignment();
}
void Kernel(tensorflow::OpKernelContext* context,
const tensorflow::Tensor& filter,
const tensorflow::Tensor& out_positions,
const tensorflow::Tensor& extents,
const tensorflow::Tensor& offset,
const tensorflow::Tensor& inp_positions,
const tensorflow::Tensor& inp_features,
const tensorflow::Tensor& inp_importance,
const tensorflow::Tensor& neighbors_index,
const tensorflow::Tensor& neighbors_importance,
const tensorflow::Tensor& neighbors_row_splits,
const tensorflow::Tensor& out_features_gradient,
const std::vector<int>& filter_dims,
const bool individual_extents,
const bool isotropic_extents,
const bool point_importances,
const bool has_neighbors_importances,
tensorflow::Tensor& filter_backprop) {
auto device = context->eigen_gpu_device();
void* temp_ptr = nullptr;
size_t temp_size = 0;
size_t max_temp_size = 0;
// determine temp_size
CConvBackpropFilterCUDA<TReal, TIndex>(
device.stream(), temp_ptr, temp_size, max_temp_size,
texture_alignment, filter_backprop.flat<TReal>().data(),
filter_dims, out_positions.shape().dim_size(0),
out_positions.flat<TReal>().data(),
inp_positions.shape().dim_size(0),
inp_positions.flat<TReal>().data(),
inp_features.flat<TReal>().data(),
point_importances ? inp_importance.flat<TReal>().data()
: nullptr,
neighbors_index.shape().dim_size(0),
(TIndex*)neighbors_index.flat<TIndex>().data(),
has_neighbors_importances
? neighbors_importance.flat<TReal>().data()
: nullptr,
(int64_t*)neighbors_row_splits.flat<int64>().data(),
extents.flat<TReal>().data(), offset.flat<TReal>().data(),
out_features_gradient.flat<TReal>().data(), this->interpolation,
this->coordinate_mapping, this->align_corners,
individual_extents, isotropic_extents, this->normalize);
temp_size =
std::max(std::min(size_t(this->max_temp_mem_MB) * 1024 * 1024,
max_temp_size),
temp_size);
Tensor temp_tensor;
TensorShape temp_shape({ssize_t(temp_size)});
OP_REQUIRES_OK(context,
context->allocate_temp(DataTypeToEnum<uint8_t>::v(),
temp_shape, &temp_tensor));
temp_ptr = temp_tensor.flat<uint8_t>().data();
// actually run the operation
CConvBackpropFilterCUDA<TReal, TIndex>(
device.stream(), temp_ptr, temp_size, max_temp_size,
texture_alignment, filter_backprop.flat<TReal>().data(),
filter_dims, out_positions.shape().dim_size(0),
out_positions.flat<TReal>().data(),
inp_positions.shape().dim_size(0),
inp_positions.flat<TReal>().data(),
inp_features.flat<TReal>().data(),
point_importances ? inp_importance.flat<TReal>().data()
: nullptr,
neighbors_index.shape().dim_size(0),
(TIndex*)neighbors_index.flat<TIndex>().data(),
has_neighbors_importances
? neighbors_importance.flat<TReal>().data()
: nullptr,
(int64_t*)neighbors_row_splits.flat<int64>().data(),
extents.flat<TReal>().data(), offset.flat<TReal>().data(),
out_features_gradient.flat<TReal>().data(), this->interpolation,
this->coordinate_mapping, this->align_corners,
individual_extents, isotropic_extents, this->normalize);
}
private:
int texture_alignment;
};
#define REG_KB(type, indextype) \
REGISTER_KERNEL_BUILDER( \
Name("Open3DContinuousConvBackpropFilter") \
.Device(DEVICE_GPU) \
.TypeConstraint<type>("TReal") \
.TypeConstraint<indextype>("TIndex"), \
ContinuousConvBackpropFilterOpKernelCUDA<type, indextype>);
REG_KB(float, int32)
#undef REG_KB
|
a6c71d440a8c27212da082bc174e7e9ba423d6f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S1_14.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, num_volumes);
check_cuda_error( hipPeekAtLastError() );
hipDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( hipPeekAtLastError() );
check_cuda_error(hipFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
/* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.7583701946981,0.00123873000842469,0.784340161140820,0.784215606615357,0.000169930428482162,0.487069003179197,0.00290075487318139,0.999998410707334,1.87318094176279e-08,1.84373365407432e-05,0.999775582067229,1.00670303904820,0.999986058074727,5.42971721960811e-05,0.634327980617936,8.41115593615013,141.093709811985}; for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
//real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
// Setting Elnaz's parameters
real parameters []={13.1091507301611,0.000227256021779263,0.000160878689291275,0.000673024254981826,0.282780279127790,0.152968591524632,0.160618022632678,3.47494398199649,0.0192974956098871,3.38770971695943,1099.68930724138,0.000549392613760007,0.234906883379890,0.0197346252955553,0.00432868966873845,5.16755137958392e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
| a6c71d440a8c27212da082bc174e7e9ba423d6f5.cu | #include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S1_14.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, num_volumes);
check_cuda_error( cudaPeekAtLastError() );
cudaDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));
}
solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( cudaPeekAtLastError() );
check_cuda_error(cudaFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
/* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.7583701946981,0.00123873000842469,0.784340161140820,0.784215606615357,0.000169930428482162,0.487069003179197,0.00290075487318139,0.999998410707334,1.87318094176279e-08,1.84373365407432e-05,0.999775582067229,1.00670303904820,0.999986058074727,5.42971721960811e-05,0.634327980617936,8.41115593615013,141.093709811985}; for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
//real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
// Setting Elnaz's parameters
real parameters []={13.1091507301611,0.000227256021779263,0.000160878689291275,0.000673024254981826,0.282780279127790,0.152968591524632,0.160618022632678,3.47494398199649,0.0192974956098871,3.38770971695943,1099.68930724138,0.000549392613760007,0.234906883379890,0.0197346252955553,0.00432868966873845,5.16755137958392e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
510c80d4eba37d3ca216c6c7dc16bdd0da045576.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <vector>
// Here you can set the device ID that was assigned to you
#define MYDEVICE 0
__global__ void saxpy(unsigned int n, double a, double* x, double* y)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
y[i] = a * x[i] + y[i];
}
int main(void)
{
hipSetDevice(MYDEVICE);
// 1<<N is the equivalent to 2^N
unsigned int N = 20 * (1 << 20);
double *x, *y, *d_x, *d_y;
std::vector<double> x(N, 1.);
std::vector<double> y(N, 2.);
hipMalloc(&d_x, N * sizeof(double));
hipMalloc(&d_y, N * sizeof(double));
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipMemcpy(d_x, x.data(), N * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_y, y.data(), N * sizeof(double), hipMemcpyHostToDevice);
hipEventRecord(start);
hipLaunchKernelGGL(( saxpy), dim3((N + 511) / 512), dim3(512), 0, 0, N, 2.0, d_x, d_y);
hipEventRecord(stop);
hipMemcpy(y.data(), d_y, N * sizeof(double), hipMemcpyDeviceToHost);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
double maxError = 0.;
for (unsigned int i = 0; i < N; i++) {
maxError = max(maxError, abs(y[i] - 4.0));
}
hipFree(d_x);
hipFree(d_y);
}
| 510c80d4eba37d3ca216c6c7dc16bdd0da045576.cu | #include <iostream>
#include <vector>
// Here you can set the device ID that was assigned to you
#define MYDEVICE 0
__global__ void saxpy(unsigned int n, double a, double* x, double* y)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
y[i] = a * x[i] + y[i];
}
int main(void)
{
cudaSetDevice(MYDEVICE);
// 1<<N is the equivalent to 2^N
unsigned int N = 20 * (1 << 20);
double *x, *y, *d_x, *d_y;
std::vector<double> x(N, 1.);
std::vector<double> y(N, 2.);
cudaMalloc(&d_x, N * sizeof(double));
cudaMalloc(&d_y, N * sizeof(double));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMemcpy(d_x, x.data(), N * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y.data(), N * sizeof(double), cudaMemcpyHostToDevice);
cudaEventRecord(start);
saxpy<<<(N + 511) / 512, 512>>>(N, 2.0, d_x, d_y);
cudaEventRecord(stop);
cudaMemcpy(y.data(), d_y, N * sizeof(double), cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
double maxError = 0.;
for (unsigned int i = 0; i < N; i++) {
maxError = max(maxError, abs(y[i] - 4.0));
}
cudaFree(d_x);
cudaFree(d_y);
}
|
66df4a210e01d0c8496271a80637ff90a828707a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "elim.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int index = 1;
int bsize = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
elim), dim3(gridBlock),dim3(threadBlock), 0, 0, A,n,index,bsize);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
elim), dim3(gridBlock),dim3(threadBlock), 0, 0, A,n,index,bsize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
elim), dim3(gridBlock),dim3(threadBlock), 0, 0, A,n,index,bsize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 66df4a210e01d0c8496271a80637ff90a828707a.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "elim.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int index = 1;
int bsize = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
elim<<<gridBlock,threadBlock>>>(A,n,index,bsize);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
elim<<<gridBlock,threadBlock>>>(A,n,index,bsize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
elim<<<gridBlock,threadBlock>>>(A,n,index,bsize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2a31d739fe588f28bd39a0951af4bc42e715eedc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESqS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/* Template project which demonstrates the basics on how to setup a project
* example application.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <helper_cuda.h>
#define LOOPS 10000
#define USE_ALL_REGS
////////////////////////////////////////////////////////////////////////////////
//! Simple test kernel template for flops test
//! @param g_idata input data in global memory
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
__global__ void
testKernel( float* g_idata, float* g_odata, float value)
{
// global ID of the thread
int id = blockIdx.x*blockDim.x + threadIdx.x;
float result=value;
// We have 32 registers here
// Try adding more to see how performance varies
float val1 = g_idata[id];
float val2 = sin(result*result+2);
float val3 = sin(result*result+5);
float val4 = sin(result*result+45);
float val5 = sin(result*result+7892);
float val6 = sin(result*result+72);
float val7 = sin(result*result+2);
float val8 = sin(result*result+2);
float val9 = sin(result*result+2);
float val10 = sin(result*result+2);
float val11 = sin(result*result+2);
float val12 = sin(result*result+2);
float val13 = sin(result*result+2);
float val14 = sin(result*result+2);
float val15 = sin(result*result+2);
float val16 = sin(result*result+2);
float val17 = sin(result*result+2);
float val18 = sin(result*result+2);
float val19 = sin(result*result+2);
float val20 = sin(result*result+2);
float val21 = sin(result*result+2);
float val22 = sin(result*result+2);
float val23 = sin(result*result+2);
float val24 = sin(result*result+2);
float val25 = sin(result*result+2);
float val26 = sin(result*result+2);
float val27 = sin(result*result+2);
float val28 = sin(result*result+2);
float val29 = sin(result*result+2);
float val30 = sin(result*result+2);
float val31 = sin(result*result+2);
float val32 = sin(result*result+2);
/*float val33 = sin(result*result+33);
float val34 = sin(result*result+34);
float val35 = sin(result*result+35);
float val36 = sin(result*result+36);
float val37 = sin(result*result+37);
float val38 = sin(result*result+38);
float val39 = sin(result*result+39);
float val40 = sin(result*result+40);
float val41 = sin(result*result+41);
float val42 = sin(result*result+42);
float val43 = sin(result*result+43);
float val44 = sin(result*result+44);
float val45 = sin(result*result+45);
float val46 = sin(result*result+46);
float val47 = sin(result*result+47);
float val48 = sin(result*result+48);
float val49 = sin(result*result+49);
float val50 = sin(result*result+50);
float val51 = sin(result*result+51);
float val52 = sin(result*result+52);
float val53 = sin(result*result+53);
float val54 = sin(result*result+54);*/
for(int i=0; i<LOOPS; i++)
{
#ifdef USE_ALL_REGS
// Uses all of the above registers
float x = val1;
//x+=31*val2;
x+=val2;
x+=val3;
x+=val4;
x+=val5;
x+=val6;
x+=val7;
x+=val8;
x+=val9;
x+=val10;
x+=val11;
x+=val12;
x+=val13;
x+=val14;
x+=val15;
x+=val16;
x+=val17;
x+=val18;
x+=val19;
x+=val20;
x+=val21;
x+=val22;
x+=val23;
x+=val24;
x+=val25;
x+=val26;
x+=val27;
x+=val28;
x+=val29;
x+=val30;
x+=val31;
x+=val32;
/*x+=val33;
x+=val34;
x+=val35;
x+=val36;
x+=val37;
x+=val38;
x+=val39;
x+=val40;
x+=val41;
x+=val42;
x+=val43;
x+=val44;
x+=val45;
x+=val46;
x+=val47;
x+=val48;
x+=val49;
x+=val50;
x+=val51;
x+=val52;
x+=val53;
x+=val54;*/
result += x;
#else
// Uses only a few of the above registers
// but does the same math
// TODO:
//
// Write a sequence of operations that will perform the same math/computation
// as above, except it will use less registers.
//
// Hint: Notice that the values of val1,val2,...,val32 are the same.
// However, the compiler still uses all the registers in the above even though
// the values are the same. You can do the above with much less registers.
// Verify your code uses less registers by using the option 'ptxas=1' during compilation.
//
// Don't forget to comment out the "#define USE_ALL_REGS" line above.
float x = val1;
for(int i=0;i<31;i++){
x+=val2;
}
result += x;
#endif
__syncthreads();
}
// write final output
g_odata[id] = result;
}
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
//cutilExit(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
hipSetDevice(1);
//unsigned int timer = 0;
// cutCreateTimer( &timer));
//cutStartTimer( timer);
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipEventRecord(start));
// adjust number of threads & blocks here
unsigned int num_threads_per_block = 128;
unsigned int num_blocks = 2048;
unsigned int num_threads = num_threads_per_block * num_blocks;
unsigned int mem_size = sizeof(float) * num_threads;
// allocate host memory
float* h_idata = (float*) malloc(mem_size);
// initalize the memory
for( unsigned int i = 0; i < num_threads; ++i)
{
h_idata[i] = (float) i;
}
// allocate device memory
float* d_idata;
checkCudaErrors(hipMalloc( (void**) &d_idata, mem_size));
// copy host memory to device
checkCudaErrors(hipMemcpy( d_idata, h_idata, mem_size,hipMemcpyHostToDevice));
// allocate device memory for result
float* d_odata;
checkCudaErrors(hipMalloc( (void**) &d_odata, mem_size));
// setup execution parameters
// adjust thread block sizes here
dim3 grid(num_blocks, 1, 1);
dim3 threads(num_threads_per_block, 1, 1);
// execute the kernel
hipLaunchKernelGGL(( testKernel), dim3(grid), dim3(threads) , 0, 0, d_idata, d_odata, 5);
// allocate mem for the result on host side
float* h_odata = (float*) malloc( mem_size);
// copy result from device to host
checkCudaErrors(hipMemcpy( h_odata, d_odata, sizeof( float) * num_threads, hipMemcpyDeviceToHost));
//cutStopTimer( timer);
checkCudaErrors(hipEventRecord(stop));
float milliseconds = -1;
hipDeviceSynchronize();
checkCudaErrors(hipEventElapsedTime(&milliseconds, start, stop));
printf( "h_odata= %f\n", *h_odata);
printf( "Processing time: %f (ms)\n", milliseconds);
// cleanup memory
free( h_idata);
free( h_odata);
hipFree(d_idata);
hipFree(d_odata);
}
| 2a31d739fe588f28bd39a0951af4bc42e715eedc.cu | /*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESqS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/* Template project which demonstrates the basics on how to setup a project
* example application.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <helper_cuda.h>
#define LOOPS 10000
#define USE_ALL_REGS
////////////////////////////////////////////////////////////////////////////////
//! Simple test kernel template for flops test
//! @param g_idata input data in global memory
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
__global__ void
testKernel( float* g_idata, float* g_odata, float value)
{
// global ID of the thread
int id = blockIdx.x*blockDim.x + threadIdx.x;
float result=value;
// We have 32 registers here
// Try adding more to see how performance varies
float val1 = g_idata[id];
float val2 = sin(result*result+2);
float val3 = sin(result*result+5);
float val4 = sin(result*result+45);
float val5 = sin(result*result+7892);
float val6 = sin(result*result+72);
float val7 = sin(result*result+2);
float val8 = sin(result*result+2);
float val9 = sin(result*result+2);
float val10 = sin(result*result+2);
float val11 = sin(result*result+2);
float val12 = sin(result*result+2);
float val13 = sin(result*result+2);
float val14 = sin(result*result+2);
float val15 = sin(result*result+2);
float val16 = sin(result*result+2);
float val17 = sin(result*result+2);
float val18 = sin(result*result+2);
float val19 = sin(result*result+2);
float val20 = sin(result*result+2);
float val21 = sin(result*result+2);
float val22 = sin(result*result+2);
float val23 = sin(result*result+2);
float val24 = sin(result*result+2);
float val25 = sin(result*result+2);
float val26 = sin(result*result+2);
float val27 = sin(result*result+2);
float val28 = sin(result*result+2);
float val29 = sin(result*result+2);
float val30 = sin(result*result+2);
float val31 = sin(result*result+2);
float val32 = sin(result*result+2);
/*float val33 = sin(result*result+33);
float val34 = sin(result*result+34);
float val35 = sin(result*result+35);
float val36 = sin(result*result+36);
float val37 = sin(result*result+37);
float val38 = sin(result*result+38);
float val39 = sin(result*result+39);
float val40 = sin(result*result+40);
float val41 = sin(result*result+41);
float val42 = sin(result*result+42);
float val43 = sin(result*result+43);
float val44 = sin(result*result+44);
float val45 = sin(result*result+45);
float val46 = sin(result*result+46);
float val47 = sin(result*result+47);
float val48 = sin(result*result+48);
float val49 = sin(result*result+49);
float val50 = sin(result*result+50);
float val51 = sin(result*result+51);
float val52 = sin(result*result+52);
float val53 = sin(result*result+53);
float val54 = sin(result*result+54);*/
for(int i=0; i<LOOPS; i++)
{
#ifdef USE_ALL_REGS
// Uses all of the above registers
float x = val1;
//x+=31*val2;
x+=val2;
x+=val3;
x+=val4;
x+=val5;
x+=val6;
x+=val7;
x+=val8;
x+=val9;
x+=val10;
x+=val11;
x+=val12;
x+=val13;
x+=val14;
x+=val15;
x+=val16;
x+=val17;
x+=val18;
x+=val19;
x+=val20;
x+=val21;
x+=val22;
x+=val23;
x+=val24;
x+=val25;
x+=val26;
x+=val27;
x+=val28;
x+=val29;
x+=val30;
x+=val31;
x+=val32;
/*x+=val33;
x+=val34;
x+=val35;
x+=val36;
x+=val37;
x+=val38;
x+=val39;
x+=val40;
x+=val41;
x+=val42;
x+=val43;
x+=val44;
x+=val45;
x+=val46;
x+=val47;
x+=val48;
x+=val49;
x+=val50;
x+=val51;
x+=val52;
x+=val53;
x+=val54;*/
result += x;
#else
// Uses only a few of the above registers
// but does the same math
// TODO:
//
// Write a sequence of operations that will perform the same math/computation
// as above, except it will use less registers.
//
// Hint: Notice that the values of val1,val2,...,val32 are the same.
// However, the compiler still uses all the registers in the above even though
// the values are the same. You can do the above with much less registers.
// Verify your code uses less registers by using the option 'ptxas=1' during compilation.
//
// Don't forget to comment out the "#define USE_ALL_REGS" line above.
float x = val1;
for(int i=0;i<31;i++){
x+=val2;
}
result += x;
#endif
__syncthreads();
}
// write final output
g_odata[id] = result;
}
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
//cutilExit(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
cudaSetDevice(1);
//unsigned int timer = 0;
// cutCreateTimer( &timer));
//cutStartTimer( timer);
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaEventRecord(start));
// adjust number of threads & blocks here
unsigned int num_threads_per_block = 128;
unsigned int num_blocks = 2048;
unsigned int num_threads = num_threads_per_block * num_blocks;
unsigned int mem_size = sizeof(float) * num_threads;
// allocate host memory
float* h_idata = (float*) malloc(mem_size);
// initalize the memory
for( unsigned int i = 0; i < num_threads; ++i)
{
h_idata[i] = (float) i;
}
// allocate device memory
float* d_idata;
checkCudaErrors(cudaMalloc( (void**) &d_idata, mem_size));
// copy host memory to device
checkCudaErrors(cudaMemcpy( d_idata, h_idata, mem_size,cudaMemcpyHostToDevice));
// allocate device memory for result
float* d_odata;
checkCudaErrors(cudaMalloc( (void**) &d_odata, mem_size));
// setup execution parameters
// adjust thread block sizes here
dim3 grid(num_blocks, 1, 1);
dim3 threads(num_threads_per_block, 1, 1);
// execute the kernel
testKernel<<< grid, threads >>>( d_idata, d_odata, 5);
// allocate mem for the result on host side
float* h_odata = (float*) malloc( mem_size);
// copy result from device to host
checkCudaErrors(cudaMemcpy( h_odata, d_odata, sizeof( float) * num_threads, cudaMemcpyDeviceToHost));
//cutStopTimer( timer);
checkCudaErrors(cudaEventRecord(stop));
float milliseconds = -1;
cudaDeviceSynchronize();
checkCudaErrors(cudaEventElapsedTime(&milliseconds, start, stop));
printf( "h_odata= %f\n", *h_odata);
printf( "Processing time: %f (ms)\n", milliseconds);
// cleanup memory
free( h_idata);
free( h_odata);
cudaFree(d_idata);
cudaFree(d_odata);
}
|
d6808305e6ebeda8bc2702b01f57941c2ee178e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "base_membrane.h"
#include "force_kernels/common.h"
#include <mirheo/core/pvs/membrane_vector.h>
#include <mirheo/core/pvs/views/ov.h>
#include <mirheo/core/utils/cuda_common.h>
#include <mirheo/core/utils/kernel_launch.h>
#include <mirheo/core/utils/macros.h>
namespace mirheo
{
namespace BaseMembraneInteractionKernels
{
__global__ void computeAreaAndVolume(OVviewWithAreaVolume view, MeshView mesh)
{
const int objId = blockIdx.x;
const int offset = objId * mesh.nvertices;
real2 a_v = make_real2(0.0_r);
for (int i = threadIdx.x; i < mesh.ntriangles; i += blockDim.x) {
const int3 ids = mesh.triangles[i];
const auto v0 = make_mReal3(make_real3( view.readPosition(offset + ids.x) ));
const auto v1 = make_mReal3(make_real3( view.readPosition(offset + ids.y) ));
const auto v2 = make_mReal3(make_real3( view.readPosition(offset + ids.z) ));
a_v.x += triangleArea(v0, v1, v2);
a_v.y += triangleSignedVolume(v0, v1, v2);
}
a_v = warpReduce( a_v, [] (real a, real b) { return a+b; } );
if (laneId() == 0)
atomicAdd(&view.area_volumes[objId], a_v);
}
} // namespace BaseMembraneInteractionKernels
BaseMembraneInteraction::BaseMembraneInteraction(const MirState *state, const std::string& name) :
Interaction(state, name)
{}
BaseMembraneInteraction::BaseMembraneInteraction(const MirState *state, Loader& loader, const ConfigObject& config) :
Interaction(state, loader, config)
{}
BaseMembraneInteraction::~BaseMembraneInteraction() = default;
void BaseMembraneInteraction::setPrerequisites(ParticleVector *pv1, ParticleVector *pv2,
__UNUSED CellList *cl1, __UNUSED CellList *cl2)
{
if (pv1 != pv2)
die("Internal membrane forces can't be computed between two different particle vectors");
if (auto mv = dynamic_cast<MembraneVector*>(pv1))
{
mv->requireDataPerObject<real2>(ChannelNames::areaVolumes, DataManager::PersistenceMode::None);
}
else
{
die("Internal membrane forces can only be computed with a MembraneVector");
}
}
void BaseMembraneInteraction::halo(ParticleVector *pv1,
__UNUSED ParticleVector *pv2,
__UNUSED CellList *cl1,
__UNUSED CellList *cl2,
__UNUSED hipStream_t stream)
{
debug("Not computing internal membrane forces between local and halo membranes of '%s'",
pv1->getCName());
}
bool BaseMembraneInteraction::isSelfObjectInteraction() const
{
return true;
}
void BaseMembraneInteraction::_precomputeQuantities(MembraneVector *mv, hipStream_t stream)
{
if (mv->getObjectSize() != mv->mesh->getNvertices())
die("Object size of '%s' (%d) and number of vertices (%d) mismatch",
mv->getCName(), mv->getObjectSize(), mv->mesh->getNvertices());
debug("Computing areas and volumes for %d cells of '%s'",
mv->local()->getNumObjects(), mv->getCName());
OVviewWithAreaVolume view(mv, mv->local());
MembraneMeshView mesh(static_cast<MembraneMesh*>(mv->mesh.get()));
mv->local()
->dataPerObject.getData<real2>(ChannelNames::areaVolumes)
->clearDevice(stream);
constexpr int nthreads = 128;
SAFE_KERNEL_LAUNCH(
BaseMembraneInteractionKernels::computeAreaAndVolume,
view.nObjects, nthreads, 0, stream,
view, mesh);
}
} // namespace mirheo
| d6808305e6ebeda8bc2702b01f57941c2ee178e3.cu | #include "base_membrane.h"
#include "force_kernels/common.h"
#include <mirheo/core/pvs/membrane_vector.h>
#include <mirheo/core/pvs/views/ov.h>
#include <mirheo/core/utils/cuda_common.h>
#include <mirheo/core/utils/kernel_launch.h>
#include <mirheo/core/utils/macros.h>
namespace mirheo
{
namespace BaseMembraneInteractionKernels
{
__global__ void computeAreaAndVolume(OVviewWithAreaVolume view, MeshView mesh)
{
const int objId = blockIdx.x;
const int offset = objId * mesh.nvertices;
real2 a_v = make_real2(0.0_r);
for (int i = threadIdx.x; i < mesh.ntriangles; i += blockDim.x) {
const int3 ids = mesh.triangles[i];
const auto v0 = make_mReal3(make_real3( view.readPosition(offset + ids.x) ));
const auto v1 = make_mReal3(make_real3( view.readPosition(offset + ids.y) ));
const auto v2 = make_mReal3(make_real3( view.readPosition(offset + ids.z) ));
a_v.x += triangleArea(v0, v1, v2);
a_v.y += triangleSignedVolume(v0, v1, v2);
}
a_v = warpReduce( a_v, [] (real a, real b) { return a+b; } );
if (laneId() == 0)
atomicAdd(&view.area_volumes[objId], a_v);
}
} // namespace BaseMembraneInteractionKernels
BaseMembraneInteraction::BaseMembraneInteraction(const MirState *state, const std::string& name) :
Interaction(state, name)
{}
BaseMembraneInteraction::BaseMembraneInteraction(const MirState *state, Loader& loader, const ConfigObject& config) :
Interaction(state, loader, config)
{}
BaseMembraneInteraction::~BaseMembraneInteraction() = default;
void BaseMembraneInteraction::setPrerequisites(ParticleVector *pv1, ParticleVector *pv2,
__UNUSED CellList *cl1, __UNUSED CellList *cl2)
{
if (pv1 != pv2)
die("Internal membrane forces can't be computed between two different particle vectors");
if (auto mv = dynamic_cast<MembraneVector*>(pv1))
{
mv->requireDataPerObject<real2>(ChannelNames::areaVolumes, DataManager::PersistenceMode::None);
}
else
{
die("Internal membrane forces can only be computed with a MembraneVector");
}
}
void BaseMembraneInteraction::halo(ParticleVector *pv1,
__UNUSED ParticleVector *pv2,
__UNUSED CellList *cl1,
__UNUSED CellList *cl2,
__UNUSED cudaStream_t stream)
{
debug("Not computing internal membrane forces between local and halo membranes of '%s'",
pv1->getCName());
}
bool BaseMembraneInteraction::isSelfObjectInteraction() const
{
return true;
}
void BaseMembraneInteraction::_precomputeQuantities(MembraneVector *mv, cudaStream_t stream)
{
if (mv->getObjectSize() != mv->mesh->getNvertices())
die("Object size of '%s' (%d) and number of vertices (%d) mismatch",
mv->getCName(), mv->getObjectSize(), mv->mesh->getNvertices());
debug("Computing areas and volumes for %d cells of '%s'",
mv->local()->getNumObjects(), mv->getCName());
OVviewWithAreaVolume view(mv, mv->local());
MembraneMeshView mesh(static_cast<MembraneMesh*>(mv->mesh.get()));
mv->local()
->dataPerObject.getData<real2>(ChannelNames::areaVolumes)
->clearDevice(stream);
constexpr int nthreads = 128;
SAFE_KERNEL_LAUNCH(
BaseMembraneInteractionKernels::computeAreaAndVolume,
view.nObjects, nthreads, 0, stream,
view, mesh);
}
} // namespace mirheo
|
2789f5074377e1f4920bc47c8cdbf7bbcb35a9ca.hip | // !!! This is a file automatically generated by hipify!!!
// cd /home/hork/cuda-workspace/CudaSHA256/Debug/files
// time ~/Dropbox/FIIT/APS/Projekt/CpuSHA256/a.out -f ../file-list
// time ../CudaSHA256 -f ../file-list
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <hip/hip_runtime.h>
#include <memory.h>
#include <iostream>
#include <dirent.h>
#include <ctype.h>
#include <time.h>
#include "sha256.h"
#define N 10
#define M 100
#define MAXLOOP M/N
//#define N 6
#define checkCudaErrors(x) \
{ \
hipGetLastError(); \
x; \
hipError_t err = hipGetLastError(); \
if (err != hipSuccess) \
printf("GPU: hipError_t %d (%s)\n", err, hipGetErrorString(err)); \
}
// datatypes -----------------------------------------------------------------------
#ifndef DATATYPES
#define DATATYPES
// Data types
typedef unsigned char BYTE; // 8-bit byte
typedef unsigned int WORD; // 32-bit word
#endif
// sha256 ----------------------------------------------------------------------------------------
#ifndef PBKDF2
#define PBKDF2
#include <iostream>
#include "sha256.h"
#define ipad_elm 0x36363636
#define opad_elm 0x5c5c5c5c
#define SUM(a,b) (a+b) & 0xffffffff
// #define SALSA_MIX(destination ,a1, a2, b) (destination ^ (((SUM(a1,a2) << b) & 0xffffffff) | ((SUM(a1,a2) >> (32-b))&0xffffffff)))
#define SALSA_MIX(destination ,a1, a2, b) (destination ^ (ROTLEFT(SUM(a1,a2),b)))
const WORD IPAD[8] = {ipad_elm, ipad_elm, ipad_elm, ipad_elm, ipad_elm, ipad_elm, ipad_elm, ipad_elm}; // 256-bit 363636...36
const WORD OPAD[8] = {opad_elm, opad_elm, opad_elm, opad_elm, opad_elm, opad_elm, opad_elm, opad_elm}; // 256-bit 5c5c5c...5c
// Function in scrypt
WORD* hmac(SHA256_CTX *ctx, WORD *salt, unsigned long salt_len, WORD *message, unsigned long message_len);
WORD* pbkdf2(SHA256_CTX *ctx, WORD *block, unsigned long block_len, int dklenP);
WORD* pbkdf2_2nd(SHA256_CTX *ctx, WORD *rm_out, unsigned long rm_out_len, WORD *block, unsigned long block_len, int dklenP);
void salsa_round(WORD *x1, WORD *x2, WORD *x3, WORD *x4);
WORD * salsa20_8(WORD *x);
WORD * blockmix(WORD *block);
WORD * romix(WORD *block, int N);
WORD * scrypt(SHA256_CTX *ctx, WORD *block, unsigned long block_len, int dklenP1, int N, int dklenP2);
#endif
static const WORD k[64] = {0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, \
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, \
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, \
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, \
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, \
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, \
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, \
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2};
// Create init state for SHA-256
void sha256_init(SHA256_CTX *ctx)
{
ctx->datalen = 0;
ctx->bitlen = 0;
ctx->state[0] = h0;
ctx->state[1] = h1;
ctx->state[2] = h2;
ctx->state[3] = h3;
ctx->state[4] = h4;
ctx->state[5] = h5;
ctx->state[6] = h6;
ctx->state[7] = h7;
}
void sha256_transform(SHA256_CTX *ctx, const BYTE data[])
{
// m is W in hardware design
WORD a, b, c, d, e, f, g, h, i, j, t1, t2, m[64];
// Calculate the first 16 m elements.
for (i = 0, j = 0; i < 16; ++i, j += 4)
m[i] = (data[j] << 24) | (data[j + 1] << 16) | (data[j + 2] << 8) | (data[j + 3]);
// Calculate the remain elements.
for ( ; i < 64; ++i)
m[i] = SIG1(m[i - 2]) + m[i - 7] + SIG0(m[i - 15]) + m[i - 16];
// update the new value of state after each block
a = ctx->state[0];
b = ctx->state[1];
c = ctx->state[2];
d = ctx->state[3];
e = ctx->state[4];
f = ctx->state[5];
g = ctx->state[6];
h = ctx->state[7];
// process 64 rounds
for (i = 0; i < 64; ++i) {
t1 = h + EP1(e) + CH(e,f,g) + k[i] + m[i];
t2 = EP0(a) + MAJ(a,b,c);
h = g;
g = f;
f = e;
e = d + t1;
d = c;
c = b;
b = a;
a = t1 + t2;
}
ctx->state[0] += a;
ctx->state[1] += b;
ctx->state[2] += c;
ctx->state[3] += d;
ctx->state[4] += e;
ctx->state[5] += f;
ctx->state[6] += g;
ctx->state[7] += h;
}
// the total length of the message has to be specified
void sha256_update(SHA256_CTX *ctx, const BYTE data[], size_t len)
{
WORD i;
for (i = 0; i < len; ++i){
ctx->data[ctx->datalen] = data[i]; // Pad data (message) for each 512-block in --> transform
ctx->datalen++;
// after browse for 64 bytes (512-bit block) -> transform the block.
if(ctx->datalen == 64){
sha256_transform(ctx, ctx->data);
ctx->bitlen += 512; // increase the bit length by 512
ctx->datalen = 0;
}
}
}
// this function processes for the last block -> after all real data is browsed
void sha256_final(SHA256_CTX *ctx, WORD *hash){
WORD i;
// padding is processed from here
i = ctx->datalen;
if (ctx->datalen < 56){
// add byte 0x80 at the first if the datalength is lower than 56
ctx->data[i++] = 0x80;
// pad the zero bytes until the byte 56th
while (i<56)
{
ctx->data[i++]=0x00;
}
}
else{
// add byte at the first
ctx->data[i++]=0x80;
// pad zero bytes until the last block
while (i<64){
ctx->data[i++]=0x00;
}
// transform this block --> it's not the last block
sha256_transform(ctx, ctx->data);
// set 56 zero bytes from last_block[0:55]
memset(ctx->data, 0, 56);
}
// Append to the padding the total message's length in bits and transform.
ctx->bitlen += ctx->datalen * 8;
ctx->data[63] = ctx->bitlen;
ctx->data[62] = ctx->bitlen >> 8;
ctx->data[61] = ctx->bitlen >> 16;
ctx->data[60] = ctx->bitlen >> 24;
ctx->data[59] = ctx->bitlen >> 32;
ctx->data[58] = ctx->bitlen >> 40;
ctx->data[57] = ctx->bitlen >> 48;
ctx->data[56] = ctx->bitlen >> 56;
// end padding
sha256_transform(ctx, ctx->data);
// Since this implementation uses little endian byte ordering and SHA uses big endian,
// reverse all the bytes when copying the final state to the output hash.
hash[0] = ctx->state[0];
hash[1] = ctx->state[1];
hash[2] = ctx->state[2];
hash[3] = ctx->state[3];
hash[4] = ctx->state[4];
hash[5] = ctx->state[5];
hash[6] = ctx->state[6];
hash[7] = ctx->state[7];
}
char * sha256(SHA256_CTX *ctx, char hex_str_in[], unsigned long hex_str_len){
unsigned long datalen = hex_str_len/2;
BYTE *data=new BYTE[datalen]();
// WORD hash_w[8]; //--> true
// WORD *hash_w = (WORD*)malloc(sizeof(WORD)*8); //--> true
WORD *hash_w = new WORD[64](); //--> false
static char *out = new char[64]();
hex_string_to_bytes(hex_str_in, hex_str_len, data);
sha256_init(ctx);
sha256_update(ctx, data, datalen);
sha256_final(ctx,hash_w);
words_to_hex_string(hash_w, 8, out, 64);
return out;
}
void sha256_w(SHA256_CTX *ctx, char hex_str_in[], unsigned long hex_str_len, WORD *hash_w){
unsigned long datalen = hex_str_len/2;
BYTE *data=new BYTE[datalen]();
hex_string_to_bytes(hex_str_in, hex_str_len, data);
sha256_init(ctx);
sha256_update(ctx, data, datalen);
sha256_final(ctx, hash_w);
}
void sha256_in_bytes(SHA256_CTX *ctx, BYTE *bytes_in, unsigned long bytes_in_len, WORD *hash_w){
sha256_init(ctx);
sha256_update(ctx, bytes_in, bytes_in_len);
sha256_final(ctx, hash_w);
}
void sha256_in_words(SHA256_CTX *ctx, WORD *words_in, unsigned long words_in_len, WORD *hash_w){
unsigned bytes_in_len = words_in_len * 4;
BYTE bytes_in[bytes_in_len];
for (int i = 0; i<words_in_len; i++){
bytes_in[4*i] = words_in[i] >> 24;
bytes_in[4*i+1] = words_in[i] >> 16;
bytes_in[4*i+2] = words_in[i] >> 8;
bytes_in[4*i+3] = words_in[i];
}
sha256_init(ctx);
sha256_update(ctx, bytes_in, bytes_in_len);
sha256_final(ctx, hash_w);
}
// utils ------------------------------------------------------------------------------------------
#ifndef UTILS
#define UTILS
#include "datatypes.h"
#include <iostream>
#include <algorithm>
#include <stddef.h>
BYTE hex_char_to_byte(char hex_char);
void hex_string_to_bytes(char hex_str_in[], unsigned long hex_str_len, BYTE bytes_out[]);
void half_byte_to_hex(BYTE half_byte_in, char hex);
void word_to_hex_eight(WORD word_in, char *hex_eight, unsigned long hex_eight_size);
void words_to_hex_string(WORD words_in[], unsigned long words_len, char hex_str[], unsigned long hex_str_len);
void hex_string_to_words(char hex_str_in[], unsigned long hex_str_len, WORD words_out[]);
void add_two_words_array_512_bit(WORD *a, WORD *b);
void print_words_inline(WORD *w, unsigned long w_len);
void print_words_multiline(WORD *w, unsigned long w_len);
void add_two_words_array_512_bit_with_carry(WORD *a, WORD *b);
void endian_cvt(WORD *w);
void endian_full(WORD *w, unsigned long w_len);
void little_endian(char *c, unsigned long w_len);
#endif
// ----------------------- Utils functions ------------------------
BYTE hex_char_to_byte(char hex_char){
if(hex_char >= 'a' && hex_char <='f'){
return hex_char - 'a' + 10;
}
else if(hex_char >='A' && hex_char <= 'F'){
return hex_char - 'A' + 10;
} else if (hex_char >='0' && hex_char <= '9')
{
return hex_char - '0';
}
return 0;
}
void hex_string_to_bytes(char hex_str_in[], unsigned long hex_str_len, BYTE bytes_out[]){
for (int i = 0; i<hex_str_len-1; i+=2){
bytes_out[i/2] = ((hex_char_to_byte(hex_str_in[i])) << 4) | (hex_char_to_byte(hex_str_in[i+1]));
}
}
void hex_string_to_words(char hex_str_in[], unsigned long hex_str_len, WORD words_out[]){
for (int i = 0; i<hex_str_len-1; i+=8){
words_out[i/8] = (\
hex_char_to_byte(hex_str_in[i])<<28|\
(hex_char_to_byte(hex_str_in[i+1])<<24 & 0x0f000000)|\
(hex_char_to_byte(hex_str_in[i+2])<<20 & 0x00f00000)|\
(hex_char_to_byte(hex_str_in[i+3])<<16 & 0x000f0000)|\
(hex_char_to_byte(hex_str_in[i+4])<<12 & 0x0000f000)|\
(hex_char_to_byte(hex_str_in[i+5])<<8 & 0x00000f00)|\
(hex_char_to_byte(hex_str_in[i+6])<<4 & 0x000000f0)|\
(hex_char_to_byte(hex_str_in[i+7]) & 0x0000000f)\
);
// printf("%08x %d\n", words_out[i/8], i/8);
}
}
void half_byte_to_hex(BYTE half_byte_in, char *hex){
BYTE half_byte_conv = half_byte_in & 0x0f;
if(half_byte_conv<16){
if (half_byte_conv>=10){
*hex = 'a'+ half_byte_conv - 10;
// printf("%c\n", *hex);
return;
}
else if(half_byte_conv>=0){
*hex = '0' + half_byte_conv;
// printf("%c\n", *hex);
return;
}
}
printf("The half byte must be in range of [0:15]\n");
}
void word_to_hex_eight(WORD word_in, char *hex_eight, unsigned long hex_eight_size){
if(hex_eight_size==8){
half_byte_to_hex(word_in>>28, &hex_eight[0]);
half_byte_to_hex(word_in>>24, &hex_eight[1]);
half_byte_to_hex(word_in>>20, &hex_eight[2]);
half_byte_to_hex(word_in>>16, &hex_eight[3]);
half_byte_to_hex(word_in>>12, &hex_eight[4]);
half_byte_to_hex(word_in>>8, &hex_eight[5]);
half_byte_to_hex(word_in>>4, &hex_eight[6]);
half_byte_to_hex(word_in, &hex_eight[7]);
// printf("%c", hex_eight[0]);
// printf("%d", word_in>>24);
return;
}
printf("The hex_pair must have the length of two characters: %d\n", (int)hex_eight_size);
}
void words_to_hex_string(WORD *words_in, unsigned long words_len, char hex_str[], unsigned long hex_str_len){
char hex_eight[8];
if(hex_str_len == 8*words_len){
for (int i = 0; i<words_len; ++i){
// printf("\n w: %08x", words_in[i]);
word_to_hex_eight(words_in[i], hex_eight, sizeof(hex_eight));
hex_str[8*i] = hex_eight[0];
hex_str[8*i+1] = hex_eight[1];
hex_str[8*i+2] = hex_eight[2];
hex_str[8*i+3] = hex_eight[3];
hex_str[8*i+4] = hex_eight[4];
hex_str[8*i+5] = hex_eight[5];
hex_str[8*i+6] = hex_eight[6];
hex_str[8*i+7] = hex_eight[7];
// printf("%c \n", hex_eight[7]);
}
// printf("\n%s", hex_str);
return;
}
printf("The hex_string must have the lenght of 4*bytes_len: %d\n", (int)hex_str_len);
}
void add_two_words_array_512_bit(WORD *a, WORD *b){
for (int i = 15; i>=0; i--){
a[i] += b[i];
}
}
void add_two_words_array_512_bit_with_carry(WORD *a, WORD *b){
WORD sum = 0;
WORD sum1 = 0;
for (int i = 15; i>=0; i--){
sum = ((a[i]&0x0000ffff)+(b[i]&0x0000ffff)+(sum1>>16));
sum1 = ((a[i]>>16)+(b[i]>>16)+(sum>>16));
a[i]= (sum & 0x0000ffff) + (sum1<<16);
}
}
void print_words_inline(WORD *w, unsigned long w_len){
printf("\n");
for (int i = 0; i< w_len; i++){
printf("%08x", w[i]);
}
printf("\n");
}
void print_words_multiline(WORD *w, unsigned long w_len){
printf("\n");
for (int i = 0; i< w_len; i++){
printf("%08x\n", w[i]);
}
printf("\n");
}
void endian_cvt(WORD *w){
WORD out;
out = (*w>>24)|((*w>>8)&0x0000ff00)|((*w<<8)&0x00ff0000)|(*w<<24);
*w = out;
}
void endian_full(WORD *w, unsigned long w_len){
for (int i = 0; i < w_len; i++)
{
endian_cvt(&w[i]);
}
}
void little_endian(char *c, unsigned long w_len){
char dc[w_len];
for (int i = 0; i< w_len; i+=2){
dc[w_len-2-i] = c[i];
dc[w_len-1-i] = c[i+1];
}
for (int i = 0; i< w_len; i++){
c[i] = dc[i];
}
c[w_len] = '\0';
}
// scrypt --------------------------------------------------------------------------------------------------------------------------
#ifndef PBKDF2
#define PBKDF2
#include <iostream>
#include "sha256.h"
#define ipad_elm 0x36363636
#define opad_elm 0x5c5c5c5c
#define SUM(a,b) (a+b) & 0xffffffff
// #define SALSA_MIX(destination ,a1, a2, b) (destination ^ (((SUM(a1,a2) << b) & 0xffffffff) | ((SUM(a1,a2) >> (32-b))&0xffffffff)))
#define SALSA_MIX(destination ,a1, a2, b) (destination ^ (ROTLEFT(SUM(a1,a2),b)))
const WORD IPAD[8] = {ipad_elm, ipad_elm, ipad_elm, ipad_elm, ipad_elm, ipad_elm, ipad_elm, ipad_elm}; // 256-bit 363636...36
const WORD OPAD[8] = {opad_elm, opad_elm, opad_elm, opad_elm, opad_elm, opad_elm, opad_elm, opad_elm}; // 256-bit 5c5c5c...5c
// Function in scrypt
WORD* hmac(SHA256_CTX *ctx, WORD *salt, unsigned long salt_len, WORD *message, unsigned long message_len);
WORD* pbkdf2(SHA256_CTX *ctx, WORD *block, unsigned long block_len, int dklenP);
WORD* pbkdf2_2nd(SHA256_CTX *ctx, WORD *rm_out, unsigned long rm_out_len, WORD *block, unsigned long block_len, int dklenP);
void salsa_round(WORD *x1, WORD *x2, WORD *x3, WORD *x4);
WORD * salsa20_8(WORD *x);
WORD * blockmix(WORD *block);
WORD * romix(WORD *block, int N);
WORD * scrypt(SHA256_CTX *ctx, WORD *block, unsigned long block_len, int dklenP1, int N, int dklenP2);
#endif
WORD* hmac(SHA256_CTX *ctx, WORD *salt, unsigned long salt_len, WORD *message, unsigned long message_len){
WORD *khash = (WORD*) malloc(sizeof(WORD)*8);
sha256_in_words(ctx, message, message_len, khash);
// for(int i=0;i<8; i++){
// printf("%08x", khash[i]);
// }
WORD ixor[16] = {\
IPAD[0]^khash[0],\
IPAD[1]^khash[1],\
IPAD[2]^khash[2],\
IPAD[3]^khash[3],\
IPAD[4]^khash[4],\
IPAD[5]^khash[5],\
IPAD[6]^khash[6],\
IPAD[7]^khash[7],\
IPAD[0],\
IPAD[1],\
IPAD[2],\
IPAD[3],\
IPAD[4],\
IPAD[5],\
IPAD[6],\
IPAD[7],\
};
WORD oxor[16] = {\
OPAD[0]^khash[0],\
OPAD[1]^khash[1],\
OPAD[2]^khash[2],\
OPAD[3]^khash[3],\
OPAD[4]^khash[4],\
OPAD[5]^khash[5],\
OPAD[6]^khash[6],\
OPAD[7]^khash[7],\
OPAD[0],\
OPAD[1],\
OPAD[2],\
OPAD[3],\
OPAD[4],\
OPAD[5],\
OPAD[6],\
OPAD[7],\
};
WORD in_ihash[sizeof(ixor)/sizeof(WORD)+salt_len];
int i;
for(i = 0; i<sizeof(ixor)/sizeof(WORD); i++){
in_ihash[i] = ixor[i];
}
for(;i<sizeof(ixor)/sizeof(WORD)+salt_len; i++){
in_ihash[i] = salt[i-sizeof(ixor)/sizeof(WORD)];
}
WORD ihash[8];
sha256_in_words(ctx, in_ihash, sizeof(in_ihash)/sizeof(WORD), ihash);
WORD in_ohash[sizeof(oxor)/sizeof(WORD)+sizeof(ihash)/sizeof(WORD)];
for(i = 0; i<sizeof(oxor)/sizeof(WORD); i++){
in_ohash[i] = oxor[i];
}
for(;i<sizeof(ixor)/sizeof(WORD)+salt_len; i++){
in_ohash[i] = ihash[i-sizeof(oxor)/sizeof(WORD)];
}
static WORD ohash[8];
sha256_in_words(ctx, in_ohash, sizeof(in_ohash)/sizeof(WORD), ohash);
return ohash;
}
WORD* pbkdf2(SHA256_CTX *ctx, WORD *block, unsigned long block_len, int dklenP){
int num_loop = 1024/dklenP;
WORD salt[block_len+1];
WORD *hmac_out;
// int hmac_out_len = 8;
static WORD *pbkdf2_out = new WORD[num_loop*8]();
for(int i = 0; i<block_len; i++){
salt[i]=block[i];
}
for (int i = 1; i <= num_loop; i++)
{
salt[block_len] = i;
hmac_out = hmac(ctx, salt, block_len+1, block, block_len);
for(int j = 0; j<8; j++){
pbkdf2_out[(i-1)*8+j] = hmac_out[j];
}
}
return pbkdf2_out;
}
WORD* pbkdf2_2nd(SHA256_CTX *ctx, WORD *rm_out, unsigned long rm_out_len, WORD *block, unsigned long block_len, int dklenP){
int num_loop = 1024/dklenP;
WORD salt[rm_out_len+1];
WORD *hmac_out;
// int hmac_out_len = 8;
static WORD *pbkdf2_out = new WORD[num_loop*8]();
for(int i = 0; i<rm_out_len; i++){
salt[i]=rm_out[i];
}
for (int i = 1; i <= num_loop; i++)
{
salt[rm_out_len] = i;
hmac_out = hmac(ctx, salt, rm_out_len+1, block, block_len);
for(int j = 0; j<8; j++){
pbkdf2_out[(i-1)*8+j] = hmac_out[j];
}
}
return pbkdf2_out;
}
void salsa_round(WORD *x1, WORD *x2, WORD *x3, WORD *x4){
*x1 = SALSA_MIX(*x1, *x4, *x3, 7);
*x2 = SALSA_MIX(*x2, *x1, *x4, 9);
*x3 = SALSA_MIX(*x3, *x2, *x1, 13);
*x4 = SALSA_MIX(*x4, *x3, *x2, 18);
}
WORD * salsa20_8(WORD *x){
static WORD out[16];
for(int i = 0; i<4; i++){
salsa_round(&x[4], &x[8], &x[12], &x[0]);
salsa_round(&x[9], &x[13], &x[1], &x[5]);
salsa_round(&x[14], &x[2], &x[6], &x[10]);
salsa_round(&x[3], &x[7], &x[11], &x[15]);
salsa_round(&x[1], &x[2], &x[3], &x[0]);
salsa_round(&x[6], &x[7], &x[4], &x[5]);
salsa_round(&x[11], &x[8], &x[9], &x[10]);
salsa_round(&x[12], &x[13], &x[14], &x[15]);
}
for(int i=0; i<16; i++){
out[i] = x[i];
}
return out;
}
WORD * blockmix(WORD *block){
WORD x_arr[16];
WORD x_arr_cpy[16];
static WORD *out = new WORD[32]();
for (int i = 0; i < 16; i++){
x_arr[i] = block[i];
}
for (int i = 0; i<2; i++){
for (int j = 0; j < 16; j++){
x_arr_cpy[j] = x_arr[j] ^ block[j+16];
x_arr[j] ^= block[j+16];
}
add_two_words_array_512_bit(x_arr, salsa20_8(x_arr_cpy));
for (int j = 0; j < 16; j++){
out[(16*i)+j] = x_arr[j];
}
}
return out;
}
WORD * romix(WORD *block, int N){
WORD mem[1024][32];
static WORD *out = new WORD[32]();
int j;
for (int i = 0; i<N; i++){
for (j = 0; j < 32; j++){
mem[i][j] = block[j];
}
block = blockmix(block);
}
for (int i = 0; i<N; i++){
j = (block[16] & 0x000003ff);
for (int k = 0; k<32; k++){
block[k] ^= mem[j][k];
}
block = blockmix(block);
}
out = block;
return out;
}
WORD * scrypt(SHA256_CTX *ctx, WORD *block, unsigned long block_len, int dklenP1, int N, int dklenP2){
int pbkdf2_out_len_1 = 8*(1024/dklenP1);
int pbkdf2_out_len_2 = 8*(1024/dklenP2);
WORD *pbkdf2_1_out = new WORD[pbkdf2_out_len_1]();
WORD *romix_out = new WORD[32]();
static WORD *pbkdf2_2_out = new WORD[pbkdf2_out_len_2]();
pbkdf2_1_out = pbkdf2(ctx, block, block_len, dklenP1);
endian_full(pbkdf2_1_out, pbkdf2_out_len_1);
romix_out = romix(pbkdf2_1_out, N);
endian_full(romix_out, 32);
pbkdf2_2_out = pbkdf2_2nd(ctx, romix_out, 32, block, block_len, dklenP2);
return pbkdf2_2_out;
}
__device__ void scrypt_cuda(SHA256_CTX *ctx, WORD block[], unsigned long block_len, int dklenP1, int N, int dklenP2, WORD hash_out[])
{
hash_out = scrypt(ctx, block, block_len, dklenP1, N, dklenP2);
}
__global__ void scrypt_top_cuda(uint32_t max_loop) {
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t stride = blockDim.x * gridDim.x;
uint32_t j;
for (j = index; j < N; j += stride){
SHA256_CTX *ctx = new SHA256_CTX();
char ver[]="20000000";
char prev_block[]="48f4bdc6cbabf6e59d5714adc7caa1af293bc49c75d447c2fdc1843694d1ef56";
char mrkl_root[]="f03a2314e267c0e67627a51aa8c7bcdd99a2d173deec41ab96945eb4c7e43dee";
char time[9];
char bits[9];
little_endian(ver, sizeof(ver) - 1);
little_endian(prev_block, sizeof(prev_block) - 1);
little_endian(mrkl_root, sizeof(mrkl_root) - 1);
// Get time
struct tm t;
time_t t_of_day;
t.tm_year = 2019-1900; // Year - 1900
t.tm_mon = 3-1; // Month, where 1 = jan
t.tm_mday = 13; // Day of the month
t.tm_hour = 7+9;
t.tm_min = 51;
t.tm_sec = 51;
t.tm_isdst = -1; // Is DST on? 1 = yes, 0 = no, -1 = unknown
t_of_day = mktime(&t);
WORD *wtime = new WORD(t_of_day);
endian_cvt(wtime);
word_to_hex_eight(*wtime, time, 8);
word_to_hex_eight(436330391, bits, 8); // bits -- input
little_endian(bits, 8);
char test_scrypt_in[153];
int in_index = 0;
WORD i;
for( i = 0; i < sizeof(ver)-1; i++){
test_scrypt_in[i]=ver[i];
}
in_index += sizeof(ver)-1;
for( i = 0; i < sizeof(prev_block); i++){
test_scrypt_in[in_index+i] = prev_block[i];
}
in_index += sizeof(prev_block)-1;
for( i = 0; i < sizeof(mrkl_root); i++){
test_scrypt_in[in_index+i] = mrkl_root[i];
}
in_index += sizeof(mrkl_root)-1;
for( i = 0; i < sizeof(time); i++){
test_scrypt_in[in_index+i] = time[i];
}
in_index += sizeof(time)-1;
for( i = 0; i < sizeof(bits); i++){
test_scrypt_in[in_index+i] = bits[i];
}
WORD *test_scrypt_out_w = new WORD[8]();
char *test_scrypt_out = new char[32*8]();
WORD test_scrypt_in_w[20];
for (i = j*max_loop; i<(j+1)*max_loop; i++){
hex_string_to_words(test_scrypt_in, sizeof(test_scrypt_in), test_scrypt_in_w);
test_scrypt_in_w[19] = i;
endian_cvt(&test_scrypt_in_w[19]);
scrypt_cuda(ctx, test_scrypt_in_w, 20, 256, 1024, 1024, test_scrypt_out_w);
if(i==(index+1)*max_loop-1){
printf("\nThread id: %d, nonce: %d\n", index, i);
}
}
}
}
int main(void)
{
int GPU_N;
checkCudaErrors(hipGetDeviceCount(&GPU_N));
printf("CUDA-capable device count: %d\n", GPU_N);
checkCudaErrors(hipSetDevice(GPU_N-1));
uint32_t blockSize = 256;
uint32_t numBlocks = (N + blockSize - 1) / blockSize;
// uint32_t *max_loop_cpu = (uint32_t *)malloc(sizeof(uint32_t));
// *max_loop_cpu = M;
// checkCudaErrors(hipMallocManaged(&max_loop_gpu, sizeof(uint32_t)));
// hipMemcpy(max_loop_gpu, max_loop_cpu, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( scrypt_top_cuda) , dim3(numBlocks), dim3(blockSize), 0, 0, MAXLOOP);
hipDeviceReset();
return 0;
}
| 2789f5074377e1f4920bc47c8cdbf7bbcb35a9ca.cu | // cd /home/hork/cuda-workspace/CudaSHA256/Debug/files
// time ~/Dropbox/FIIT/APS/Projekt/CpuSHA256/a.out -f ../file-list
// time ../CudaSHA256 -f ../file-list
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <cuda_runtime.h>
#include <memory.h>
#include <iostream>
#include <dirent.h>
#include <ctype.h>
#include <time.h>
#include "sha256.h"
#define N 10
#define M 100
#define MAXLOOP M/N
//#define N 6
#define checkCudaErrors(x) \
{ \
cudaGetLastError(); \
x; \
cudaError_t err = cudaGetLastError(); \
if (err != cudaSuccess) \
printf("GPU: cudaError %d (%s)\n", err, cudaGetErrorString(err)); \
}
// datatypes -----------------------------------------------------------------------
#ifndef DATATYPES
#define DATATYPES
// Data types
typedef unsigned char BYTE; // 8-bit byte
typedef unsigned int WORD; // 32-bit word
#endif
// sha256 ----------------------------------------------------------------------------------------
#ifndef PBKDF2
#define PBKDF2
#include <iostream>
#include "sha256.h"
#define ipad_elm 0x36363636
#define opad_elm 0x5c5c5c5c
#define SUM(a,b) (a+b) & 0xffffffff
// #define SALSA_MIX(destination ,a1, a2, b) (destination ^ (((SUM(a1,a2) << b) & 0xffffffff) | ((SUM(a1,a2) >> (32-b))&0xffffffff)))
#define SALSA_MIX(destination ,a1, a2, b) (destination ^ (ROTLEFT(SUM(a1,a2),b)))
const WORD IPAD[8] = {ipad_elm, ipad_elm, ipad_elm, ipad_elm, ipad_elm, ipad_elm, ipad_elm, ipad_elm}; // 256-bit 363636...36
const WORD OPAD[8] = {opad_elm, opad_elm, opad_elm, opad_elm, opad_elm, opad_elm, opad_elm, opad_elm}; // 256-bit 5c5c5c...5c
// Function in scrypt
WORD* hmac(SHA256_CTX *ctx, WORD *salt, unsigned long salt_len, WORD *message, unsigned long message_len);
WORD* pbkdf2(SHA256_CTX *ctx, WORD *block, unsigned long block_len, int dklenP);
WORD* pbkdf2_2nd(SHA256_CTX *ctx, WORD *rm_out, unsigned long rm_out_len, WORD *block, unsigned long block_len, int dklenP);
void salsa_round(WORD *x1, WORD *x2, WORD *x3, WORD *x4);
WORD * salsa20_8(WORD *x);
WORD * blockmix(WORD *block);
WORD * romix(WORD *block, int N);
WORD * scrypt(SHA256_CTX *ctx, WORD *block, unsigned long block_len, int dklenP1, int N, int dklenP2);
#endif
static const WORD k[64] = {0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, \
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, \
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, \
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, \
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, \
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, \
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, \
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2};
// Create init state for SHA-256
void sha256_init(SHA256_CTX *ctx)
{
ctx->datalen = 0;
ctx->bitlen = 0;
ctx->state[0] = h0;
ctx->state[1] = h1;
ctx->state[2] = h2;
ctx->state[3] = h3;
ctx->state[4] = h4;
ctx->state[5] = h5;
ctx->state[6] = h6;
ctx->state[7] = h7;
}
void sha256_transform(SHA256_CTX *ctx, const BYTE data[])
{
// m is W in hardware design
WORD a, b, c, d, e, f, g, h, i, j, t1, t2, m[64];
// Calculate the first 16 m elements.
for (i = 0, j = 0; i < 16; ++i, j += 4)
m[i] = (data[j] << 24) | (data[j + 1] << 16) | (data[j + 2] << 8) | (data[j + 3]);
// Calculate the remain elements.
for ( ; i < 64; ++i)
m[i] = SIG1(m[i - 2]) + m[i - 7] + SIG0(m[i - 15]) + m[i - 16];
// update the new value of state after each block
a = ctx->state[0];
b = ctx->state[1];
c = ctx->state[2];
d = ctx->state[3];
e = ctx->state[4];
f = ctx->state[5];
g = ctx->state[6];
h = ctx->state[7];
// process 64 rounds
for (i = 0; i < 64; ++i) {
t1 = h + EP1(e) + CH(e,f,g) + k[i] + m[i];
t2 = EP0(a) + MAJ(a,b,c);
h = g;
g = f;
f = e;
e = d + t1;
d = c;
c = b;
b = a;
a = t1 + t2;
}
ctx->state[0] += a;
ctx->state[1] += b;
ctx->state[2] += c;
ctx->state[3] += d;
ctx->state[4] += e;
ctx->state[5] += f;
ctx->state[6] += g;
ctx->state[7] += h;
}
// the total length of the message has to be specified
void sha256_update(SHA256_CTX *ctx, const BYTE data[], size_t len)
{
WORD i;
for (i = 0; i < len; ++i){
ctx->data[ctx->datalen] = data[i]; // Pad data (message) for each 512-block in --> transform
ctx->datalen++;
// after browse for 64 bytes (512-bit block) -> transform the block.
if(ctx->datalen == 64){
sha256_transform(ctx, ctx->data);
ctx->bitlen += 512; // increase the bit length by 512
ctx->datalen = 0;
}
}
}
// this function processes for the last block -> after all real data is browsed
void sha256_final(SHA256_CTX *ctx, WORD *hash){
WORD i;
// padding is processed from here
i = ctx->datalen;
if (ctx->datalen < 56){
// add byte 0x80 at the first if the datalength is lower than 56
ctx->data[i++] = 0x80;
// pad the zero bytes until the byte 56th
while (i<56)
{
ctx->data[i++]=0x00;
}
}
else{
// add byte at the first
ctx->data[i++]=0x80;
// pad zero bytes until the last block
while (i<64){
ctx->data[i++]=0x00;
}
// transform this block --> it's not the last block
sha256_transform(ctx, ctx->data);
// set 56 zero bytes from last_block[0:55]
memset(ctx->data, 0, 56);
}
// Append to the padding the total message's length in bits and transform.
ctx->bitlen += ctx->datalen * 8;
ctx->data[63] = ctx->bitlen;
ctx->data[62] = ctx->bitlen >> 8;
ctx->data[61] = ctx->bitlen >> 16;
ctx->data[60] = ctx->bitlen >> 24;
ctx->data[59] = ctx->bitlen >> 32;
ctx->data[58] = ctx->bitlen >> 40;
ctx->data[57] = ctx->bitlen >> 48;
ctx->data[56] = ctx->bitlen >> 56;
// end padding
sha256_transform(ctx, ctx->data);
// Since this implementation uses little endian byte ordering and SHA uses big endian,
// reverse all the bytes when copying the final state to the output hash.
hash[0] = ctx->state[0];
hash[1] = ctx->state[1];
hash[2] = ctx->state[2];
hash[3] = ctx->state[3];
hash[4] = ctx->state[4];
hash[5] = ctx->state[5];
hash[6] = ctx->state[6];
hash[7] = ctx->state[7];
}
char * sha256(SHA256_CTX *ctx, char hex_str_in[], unsigned long hex_str_len){
unsigned long datalen = hex_str_len/2;
BYTE *data=new BYTE[datalen]();
// WORD hash_w[8]; //--> true
// WORD *hash_w = (WORD*)malloc(sizeof(WORD)*8); //--> true
WORD *hash_w = new WORD[64](); //--> false
static char *out = new char[64]();
hex_string_to_bytes(hex_str_in, hex_str_len, data);
sha256_init(ctx);
sha256_update(ctx, data, datalen);
sha256_final(ctx,hash_w);
words_to_hex_string(hash_w, 8, out, 64);
return out;
}
void sha256_w(SHA256_CTX *ctx, char hex_str_in[], unsigned long hex_str_len, WORD *hash_w){
unsigned long datalen = hex_str_len/2;
BYTE *data=new BYTE[datalen]();
hex_string_to_bytes(hex_str_in, hex_str_len, data);
sha256_init(ctx);
sha256_update(ctx, data, datalen);
sha256_final(ctx, hash_w);
}
void sha256_in_bytes(SHA256_CTX *ctx, BYTE *bytes_in, unsigned long bytes_in_len, WORD *hash_w){
sha256_init(ctx);
sha256_update(ctx, bytes_in, bytes_in_len);
sha256_final(ctx, hash_w);
}
void sha256_in_words(SHA256_CTX *ctx, WORD *words_in, unsigned long words_in_len, WORD *hash_w){
unsigned bytes_in_len = words_in_len * 4;
BYTE bytes_in[bytes_in_len];
for (int i = 0; i<words_in_len; i++){
bytes_in[4*i] = words_in[i] >> 24;
bytes_in[4*i+1] = words_in[i] >> 16;
bytes_in[4*i+2] = words_in[i] >> 8;
bytes_in[4*i+3] = words_in[i];
}
sha256_init(ctx);
sha256_update(ctx, bytes_in, bytes_in_len);
sha256_final(ctx, hash_w);
}
// utils ------------------------------------------------------------------------------------------
#ifndef UTILS
#define UTILS
#include "datatypes.h"
#include <iostream>
#include <algorithm>
#include <stddef.h>
BYTE hex_char_to_byte(char hex_char);
void hex_string_to_bytes(char hex_str_in[], unsigned long hex_str_len, BYTE bytes_out[]);
void half_byte_to_hex(BYTE half_byte_in, char hex);
void word_to_hex_eight(WORD word_in, char *hex_eight, unsigned long hex_eight_size);
void words_to_hex_string(WORD words_in[], unsigned long words_len, char hex_str[], unsigned long hex_str_len);
void hex_string_to_words(char hex_str_in[], unsigned long hex_str_len, WORD words_out[]);
void add_two_words_array_512_bit(WORD *a, WORD *b);
void print_words_inline(WORD *w, unsigned long w_len);
void print_words_multiline(WORD *w, unsigned long w_len);
void add_two_words_array_512_bit_with_carry(WORD *a, WORD *b);
void endian_cvt(WORD *w);
void endian_full(WORD *w, unsigned long w_len);
void little_endian(char *c, unsigned long w_len);
#endif
// ----------------------- Utils functions ------------------------
BYTE hex_char_to_byte(char hex_char){
if(hex_char >= 'a' && hex_char <='f'){
return hex_char - 'a' + 10;
}
else if(hex_char >='A' && hex_char <= 'F'){
return hex_char - 'A' + 10;
} else if (hex_char >='0' && hex_char <= '9')
{
return hex_char - '0';
}
return 0;
}
void hex_string_to_bytes(char hex_str_in[], unsigned long hex_str_len, BYTE bytes_out[]){
for (int i = 0; i<hex_str_len-1; i+=2){
bytes_out[i/2] = ((hex_char_to_byte(hex_str_in[i])) << 4) | (hex_char_to_byte(hex_str_in[i+1]));
}
}
void hex_string_to_words(char hex_str_in[], unsigned long hex_str_len, WORD words_out[]){
for (int i = 0; i<hex_str_len-1; i+=8){
words_out[i/8] = (\
hex_char_to_byte(hex_str_in[i])<<28|\
(hex_char_to_byte(hex_str_in[i+1])<<24 & 0x0f000000)|\
(hex_char_to_byte(hex_str_in[i+2])<<20 & 0x00f00000)|\
(hex_char_to_byte(hex_str_in[i+3])<<16 & 0x000f0000)|\
(hex_char_to_byte(hex_str_in[i+4])<<12 & 0x0000f000)|\
(hex_char_to_byte(hex_str_in[i+5])<<8 & 0x00000f00)|\
(hex_char_to_byte(hex_str_in[i+6])<<4 & 0x000000f0)|\
(hex_char_to_byte(hex_str_in[i+7]) & 0x0000000f)\
);
// printf("%08x %d\n", words_out[i/8], i/8);
}
}
void half_byte_to_hex(BYTE half_byte_in, char *hex){
BYTE half_byte_conv = half_byte_in & 0x0f;
if(half_byte_conv<16){
if (half_byte_conv>=10){
*hex = 'a'+ half_byte_conv - 10;
// printf("%c\n", *hex);
return;
}
else if(half_byte_conv>=0){
*hex = '0' + half_byte_conv;
// printf("%c\n", *hex);
return;
}
}
printf("The half byte must be in range of [0:15]\n");
}
void word_to_hex_eight(WORD word_in, char *hex_eight, unsigned long hex_eight_size){
if(hex_eight_size==8){
half_byte_to_hex(word_in>>28, &hex_eight[0]);
half_byte_to_hex(word_in>>24, &hex_eight[1]);
half_byte_to_hex(word_in>>20, &hex_eight[2]);
half_byte_to_hex(word_in>>16, &hex_eight[3]);
half_byte_to_hex(word_in>>12, &hex_eight[4]);
half_byte_to_hex(word_in>>8, &hex_eight[5]);
half_byte_to_hex(word_in>>4, &hex_eight[6]);
half_byte_to_hex(word_in, &hex_eight[7]);
// printf("%c", hex_eight[0]);
// printf("%d", word_in>>24);
return;
}
printf("The hex_pair must have the length of two characters: %d\n", (int)hex_eight_size);
}
void words_to_hex_string(WORD *words_in, unsigned long words_len, char hex_str[], unsigned long hex_str_len){
char hex_eight[8];
if(hex_str_len == 8*words_len){
for (int i = 0; i<words_len; ++i){
// printf("\n w: %08x", words_in[i]);
word_to_hex_eight(words_in[i], hex_eight, sizeof(hex_eight));
hex_str[8*i] = hex_eight[0];
hex_str[8*i+1] = hex_eight[1];
hex_str[8*i+2] = hex_eight[2];
hex_str[8*i+3] = hex_eight[3];
hex_str[8*i+4] = hex_eight[4];
hex_str[8*i+5] = hex_eight[5];
hex_str[8*i+6] = hex_eight[6];
hex_str[8*i+7] = hex_eight[7];
// printf("%c \n", hex_eight[7]);
}
// printf("\n%s", hex_str);
return;
}
printf("The hex_string must have the lenght of 4*bytes_len: %d\n", (int)hex_str_len);
}
void add_two_words_array_512_bit(WORD *a, WORD *b){
for (int i = 15; i>=0; i--){
a[i] += b[i];
}
}
void add_two_words_array_512_bit_with_carry(WORD *a, WORD *b){
WORD sum = 0;
WORD sum1 = 0;
for (int i = 15; i>=0; i--){
sum = ((a[i]&0x0000ffff)+(b[i]&0x0000ffff)+(sum1>>16));
sum1 = ((a[i]>>16)+(b[i]>>16)+(sum>>16));
a[i]= (sum & 0x0000ffff) + (sum1<<16);
}
}
void print_words_inline(WORD *w, unsigned long w_len){
printf("\n");
for (int i = 0; i< w_len; i++){
printf("%08x", w[i]);
}
printf("\n");
}
void print_words_multiline(WORD *w, unsigned long w_len){
printf("\n");
for (int i = 0; i< w_len; i++){
printf("%08x\n", w[i]);
}
printf("\n");
}
void endian_cvt(WORD *w){
WORD out;
out = (*w>>24)|((*w>>8)&0x0000ff00)|((*w<<8)&0x00ff0000)|(*w<<24);
*w = out;
}
void endian_full(WORD *w, unsigned long w_len){
for (int i = 0; i < w_len; i++)
{
endian_cvt(&w[i]);
}
}
void little_endian(char *c, unsigned long w_len){
char dc[w_len];
for (int i = 0; i< w_len; i+=2){
dc[w_len-2-i] = c[i];
dc[w_len-1-i] = c[i+1];
}
for (int i = 0; i< w_len; i++){
c[i] = dc[i];
}
c[w_len] = '\0';
}
// scrypt --------------------------------------------------------------------------------------------------------------------------
#ifndef PBKDF2
#define PBKDF2
#include <iostream>
#include "sha256.h"
#define ipad_elm 0x36363636
#define opad_elm 0x5c5c5c5c
#define SUM(a,b) (a+b) & 0xffffffff
// #define SALSA_MIX(destination ,a1, a2, b) (destination ^ (((SUM(a1,a2) << b) & 0xffffffff) | ((SUM(a1,a2) >> (32-b))&0xffffffff)))
#define SALSA_MIX(destination ,a1, a2, b) (destination ^ (ROTLEFT(SUM(a1,a2),b)))
const WORD IPAD[8] = {ipad_elm, ipad_elm, ipad_elm, ipad_elm, ipad_elm, ipad_elm, ipad_elm, ipad_elm}; // 256-bit 363636...36
const WORD OPAD[8] = {opad_elm, opad_elm, opad_elm, opad_elm, opad_elm, opad_elm, opad_elm, opad_elm}; // 256-bit 5c5c5c...5c
// Function in scrypt
WORD* hmac(SHA256_CTX *ctx, WORD *salt, unsigned long salt_len, WORD *message, unsigned long message_len);
WORD* pbkdf2(SHA256_CTX *ctx, WORD *block, unsigned long block_len, int dklenP);
WORD* pbkdf2_2nd(SHA256_CTX *ctx, WORD *rm_out, unsigned long rm_out_len, WORD *block, unsigned long block_len, int dklenP);
void salsa_round(WORD *x1, WORD *x2, WORD *x3, WORD *x4);
WORD * salsa20_8(WORD *x);
WORD * blockmix(WORD *block);
WORD * romix(WORD *block, int N);
WORD * scrypt(SHA256_CTX *ctx, WORD *block, unsigned long block_len, int dklenP1, int N, int dklenP2);
#endif
WORD* hmac(SHA256_CTX *ctx, WORD *salt, unsigned long salt_len, WORD *message, unsigned long message_len){
WORD *khash = (WORD*) malloc(sizeof(WORD)*8);
sha256_in_words(ctx, message, message_len, khash);
// for(int i=0;i<8; i++){
// printf("%08x", khash[i]);
// }
WORD ixor[16] = {\
IPAD[0]^khash[0],\
IPAD[1]^khash[1],\
IPAD[2]^khash[2],\
IPAD[3]^khash[3],\
IPAD[4]^khash[4],\
IPAD[5]^khash[5],\
IPAD[6]^khash[6],\
IPAD[7]^khash[7],\
IPAD[0],\
IPAD[1],\
IPAD[2],\
IPAD[3],\
IPAD[4],\
IPAD[5],\
IPAD[6],\
IPAD[7],\
};
WORD oxor[16] = {\
OPAD[0]^khash[0],\
OPAD[1]^khash[1],\
OPAD[2]^khash[2],\
OPAD[3]^khash[3],\
OPAD[4]^khash[4],\
OPAD[5]^khash[5],\
OPAD[6]^khash[6],\
OPAD[7]^khash[7],\
OPAD[0],\
OPAD[1],\
OPAD[2],\
OPAD[3],\
OPAD[4],\
OPAD[5],\
OPAD[6],\
OPAD[7],\
};
WORD in_ihash[sizeof(ixor)/sizeof(WORD)+salt_len];
int i;
for(i = 0; i<sizeof(ixor)/sizeof(WORD); i++){
in_ihash[i] = ixor[i];
}
for(;i<sizeof(ixor)/sizeof(WORD)+salt_len; i++){
in_ihash[i] = salt[i-sizeof(ixor)/sizeof(WORD)];
}
WORD ihash[8];
sha256_in_words(ctx, in_ihash, sizeof(in_ihash)/sizeof(WORD), ihash);
WORD in_ohash[sizeof(oxor)/sizeof(WORD)+sizeof(ihash)/sizeof(WORD)];
for(i = 0; i<sizeof(oxor)/sizeof(WORD); i++){
in_ohash[i] = oxor[i];
}
for(;i<sizeof(ixor)/sizeof(WORD)+salt_len; i++){
in_ohash[i] = ihash[i-sizeof(oxor)/sizeof(WORD)];
}
static WORD ohash[8];
sha256_in_words(ctx, in_ohash, sizeof(in_ohash)/sizeof(WORD), ohash);
return ohash;
}
WORD* pbkdf2(SHA256_CTX *ctx, WORD *block, unsigned long block_len, int dklenP){
int num_loop = 1024/dklenP;
WORD salt[block_len+1];
WORD *hmac_out;
// int hmac_out_len = 8;
static WORD *pbkdf2_out = new WORD[num_loop*8]();
for(int i = 0; i<block_len; i++){
salt[i]=block[i];
}
for (int i = 1; i <= num_loop; i++)
{
salt[block_len] = i;
hmac_out = hmac(ctx, salt, block_len+1, block, block_len);
for(int j = 0; j<8; j++){
pbkdf2_out[(i-1)*8+j] = hmac_out[j];
}
}
return pbkdf2_out;
}
WORD* pbkdf2_2nd(SHA256_CTX *ctx, WORD *rm_out, unsigned long rm_out_len, WORD *block, unsigned long block_len, int dklenP){
int num_loop = 1024/dklenP;
WORD salt[rm_out_len+1];
WORD *hmac_out;
// int hmac_out_len = 8;
static WORD *pbkdf2_out = new WORD[num_loop*8]();
for(int i = 0; i<rm_out_len; i++){
salt[i]=rm_out[i];
}
for (int i = 1; i <= num_loop; i++)
{
salt[rm_out_len] = i;
hmac_out = hmac(ctx, salt, rm_out_len+1, block, block_len);
for(int j = 0; j<8; j++){
pbkdf2_out[(i-1)*8+j] = hmac_out[j];
}
}
return pbkdf2_out;
}
void salsa_round(WORD *x1, WORD *x2, WORD *x3, WORD *x4){
*x1 = SALSA_MIX(*x1, *x4, *x3, 7);
*x2 = SALSA_MIX(*x2, *x1, *x4, 9);
*x3 = SALSA_MIX(*x3, *x2, *x1, 13);
*x4 = SALSA_MIX(*x4, *x3, *x2, 18);
}
WORD * salsa20_8(WORD *x){
static WORD out[16];
for(int i = 0; i<4; i++){
salsa_round(&x[4], &x[8], &x[12], &x[0]);
salsa_round(&x[9], &x[13], &x[1], &x[5]);
salsa_round(&x[14], &x[2], &x[6], &x[10]);
salsa_round(&x[3], &x[7], &x[11], &x[15]);
salsa_round(&x[1], &x[2], &x[3], &x[0]);
salsa_round(&x[6], &x[7], &x[4], &x[5]);
salsa_round(&x[11], &x[8], &x[9], &x[10]);
salsa_round(&x[12], &x[13], &x[14], &x[15]);
}
for(int i=0; i<16; i++){
out[i] = x[i];
}
return out;
}
WORD * blockmix(WORD *block){
WORD x_arr[16];
WORD x_arr_cpy[16];
static WORD *out = new WORD[32]();
for (int i = 0; i < 16; i++){
x_arr[i] = block[i];
}
for (int i = 0; i<2; i++){
for (int j = 0; j < 16; j++){
x_arr_cpy[j] = x_arr[j] ^ block[j+16];
x_arr[j] ^= block[j+16];
}
add_two_words_array_512_bit(x_arr, salsa20_8(x_arr_cpy));
for (int j = 0; j < 16; j++){
out[(16*i)+j] = x_arr[j];
}
}
return out;
}
WORD * romix(WORD *block, int N){
WORD mem[1024][32];
static WORD *out = new WORD[32]();
int j;
for (int i = 0; i<N; i++){
for (j = 0; j < 32; j++){
mem[i][j] = block[j];
}
block = blockmix(block);
}
for (int i = 0; i<N; i++){
j = (block[16] & 0x000003ff);
for (int k = 0; k<32; k++){
block[k] ^= mem[j][k];
}
block = blockmix(block);
}
out = block;
return out;
}
WORD * scrypt(SHA256_CTX *ctx, WORD *block, unsigned long block_len, int dklenP1, int N, int dklenP2){
int pbkdf2_out_len_1 = 8*(1024/dklenP1);
int pbkdf2_out_len_2 = 8*(1024/dklenP2);
WORD *pbkdf2_1_out = new WORD[pbkdf2_out_len_1]();
WORD *romix_out = new WORD[32]();
static WORD *pbkdf2_2_out = new WORD[pbkdf2_out_len_2]();
pbkdf2_1_out = pbkdf2(ctx, block, block_len, dklenP1);
endian_full(pbkdf2_1_out, pbkdf2_out_len_1);
romix_out = romix(pbkdf2_1_out, N);
endian_full(romix_out, 32);
pbkdf2_2_out = pbkdf2_2nd(ctx, romix_out, 32, block, block_len, dklenP2);
return pbkdf2_2_out;
}
__device__ void scrypt_cuda(SHA256_CTX *ctx, WORD block[], unsigned long block_len, int dklenP1, int N, int dklenP2, WORD hash_out[])
{
hash_out = scrypt(ctx, block, block_len, dklenP1, N, dklenP2);
}
__global__ void scrypt_top_cuda(uint32_t max_loop) {
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t stride = blockDim.x * gridDim.x;
uint32_t j;
for (j = index; j < N; j += stride){
SHA256_CTX *ctx = new SHA256_CTX();
char ver[]="20000000";
char prev_block[]="48f4bdc6cbabf6e59d5714adc7caa1af293bc49c75d447c2fdc1843694d1ef56";
char mrkl_root[]="f03a2314e267c0e67627a51aa8c7bcdd99a2d173deec41ab96945eb4c7e43dee";
char time[9];
char bits[9];
little_endian(ver, sizeof(ver) - 1);
little_endian(prev_block, sizeof(prev_block) - 1);
little_endian(mrkl_root, sizeof(mrkl_root) - 1);
// Get time
struct tm t;
time_t t_of_day;
t.tm_year = 2019-1900; // Year - 1900
t.tm_mon = 3-1; // Month, where 1 = jan
t.tm_mday = 13; // Day of the month
t.tm_hour = 7+9;
t.tm_min = 51;
t.tm_sec = 51;
t.tm_isdst = -1; // Is DST on? 1 = yes, 0 = no, -1 = unknown
t_of_day = mktime(&t);
WORD *wtime = new WORD(t_of_day);
endian_cvt(wtime);
word_to_hex_eight(*wtime, time, 8);
word_to_hex_eight(436330391, bits, 8); // bits -- input
little_endian(bits, 8);
char test_scrypt_in[153];
int in_index = 0;
WORD i;
for( i = 0; i < sizeof(ver)-1; i++){
test_scrypt_in[i]=ver[i];
}
in_index += sizeof(ver)-1;
for( i = 0; i < sizeof(prev_block); i++){
test_scrypt_in[in_index+i] = prev_block[i];
}
in_index += sizeof(prev_block)-1;
for( i = 0; i < sizeof(mrkl_root); i++){
test_scrypt_in[in_index+i] = mrkl_root[i];
}
in_index += sizeof(mrkl_root)-1;
for( i = 0; i < sizeof(time); i++){
test_scrypt_in[in_index+i] = time[i];
}
in_index += sizeof(time)-1;
for( i = 0; i < sizeof(bits); i++){
test_scrypt_in[in_index+i] = bits[i];
}
WORD *test_scrypt_out_w = new WORD[8]();
char *test_scrypt_out = new char[32*8]();
WORD test_scrypt_in_w[20];
for (i = j*max_loop; i<(j+1)*max_loop; i++){
hex_string_to_words(test_scrypt_in, sizeof(test_scrypt_in), test_scrypt_in_w);
test_scrypt_in_w[19] = i;
endian_cvt(&test_scrypt_in_w[19]);
scrypt_cuda(ctx, test_scrypt_in_w, 20, 256, 1024, 1024, test_scrypt_out_w);
if(i==(index+1)*max_loop-1){
printf("\nThread id: %d, nonce: %d\n", index, i);
}
}
}
}
int main(void)
{
int GPU_N;
checkCudaErrors(cudaGetDeviceCount(&GPU_N));
printf("CUDA-capable device count: %d\n", GPU_N);
checkCudaErrors(cudaSetDevice(GPU_N-1));
uint32_t blockSize = 256;
uint32_t numBlocks = (N + blockSize - 1) / blockSize;
// uint32_t *max_loop_cpu = (uint32_t *)malloc(sizeof(uint32_t));
// *max_loop_cpu = M;
// checkCudaErrors(cudaMallocManaged(&max_loop_gpu, sizeof(uint32_t)));
// cudaMemcpy(max_loop_gpu, max_loop_cpu, cudaMemcpyHostToDevice);
scrypt_top_cuda <<<numBlocks, blockSize>>> (MAXLOOP);
cudaDeviceReset();
return 0;
}
|
709efcfcf973334738d23f8fbc6ce04e484b7251.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../common/book.h"
#include "../common/cpu_bitmap.h"
#define rnd(x) (x*rand()/RAND_MAX)
#define SPHERES 20
#define DIM 10240
#define INF 2e10f
struct Sphere{
float r, g, b;
float radius;
float x, y, z;
__device__ float hit(float ox, float oy, float *n){
float dx = ox - x;
float dy = oy - y;
if(dx*dx + dy*dy < radius*radius){
float dz = sqrtf(radius*radius - dx*dx - dy*dy);
*n = dz/sqrtf(radius*radius);
return dz + z;
}
return -INF;
}
};
__constant__ Sphere s[SPHERES];
__global__ void kernel(unsigned char *ptr){
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int offset = x + y*blockDim.x*gridDim.x;
float ox = (x - DIM/2);
float oy = (y - DIM/2);
float r=0, g=0, b=0;
float maxz = -INF;
for(int i=0; i<SPHERES; i++){
float n,t = s[i].hit(ox, oy, &n);
if(t > maxz){
float fscale = n;
r = s[i].r*fscale;
g = s[i].g*fscale;
b = s[i].b*fscale;
maxz = t;
}
}
ptr[offset*4 + 0] = (int) (r*255);
ptr[offset*4 + 1] = (int) (g*255);
ptr[offset*4 + 2] = (int) (b*255);
ptr[offset*4 + 3] = 255;
}
int main(void){
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
CPUBitmap bitmap(DIM, DIM);
unsigned char *dev_bitmap;
hipMalloc((void **)&dev_bitmap, bitmap.image_size());
Sphere *temp_s = (Sphere*)malloc(sizeof(Sphere)*SPHERES);
for(int i = 0; i < SPHERES; i++){
temp_s[i].r = rnd(1.0f);
temp_s[i].g = rnd(1.0f);
temp_s[i].b = rnd(1.0f);
temp_s[i].x = rnd(1000.0f) - 500;
temp_s[i].y = rnd(1000.0f) - 500;
temp_s[i].z = rnd(1000.0f) - 500;
temp_s[i].radius = rnd(100.0f) + 20;
}
hipMemcpyToSymbol(s, temp_s, sizeof(Sphere)*SPHERES);
free(temp_s);
dim3 grids(DIM/16, DIM/16);
dim3 threads(16, 16);
hipLaunchKernelGGL(( kernel), dim3(grids), dim3(threads), 0, 0, dev_bitmap);
hipMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float dt;
hipEventElapsedTime(&dt, start, stop);
printf("Time taken: %3.1f ms\n", dt);
bitmap.display_and_exit();
hipFree(dev_bitmap);
hipFree(s);
hipEventDestroy(start);
hipEventDestroy(stop);
return 0;
}
| 709efcfcf973334738d23f8fbc6ce04e484b7251.cu | #include "../common/book.h"
#include "../common/cpu_bitmap.h"
#define rnd(x) (x*rand()/RAND_MAX)
#define SPHERES 20
#define DIM 10240
#define INF 2e10f
struct Sphere{
float r, g, b;
float radius;
float x, y, z;
__device__ float hit(float ox, float oy, float *n){
float dx = ox - x;
float dy = oy - y;
if(dx*dx + dy*dy < radius*radius){
float dz = sqrtf(radius*radius - dx*dx - dy*dy);
*n = dz/sqrtf(radius*radius);
return dz + z;
}
return -INF;
}
};
__constant__ Sphere s[SPHERES];
__global__ void kernel(unsigned char *ptr){
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int offset = x + y*blockDim.x*gridDim.x;
float ox = (x - DIM/2);
float oy = (y - DIM/2);
float r=0, g=0, b=0;
float maxz = -INF;
for(int i=0; i<SPHERES; i++){
float n,t = s[i].hit(ox, oy, &n);
if(t > maxz){
float fscale = n;
r = s[i].r*fscale;
g = s[i].g*fscale;
b = s[i].b*fscale;
maxz = t;
}
}
ptr[offset*4 + 0] = (int) (r*255);
ptr[offset*4 + 1] = (int) (g*255);
ptr[offset*4 + 2] = (int) (b*255);
ptr[offset*4 + 3] = 255;
}
int main(void){
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
CPUBitmap bitmap(DIM, DIM);
unsigned char *dev_bitmap;
cudaMalloc((void **)&dev_bitmap, bitmap.image_size());
Sphere *temp_s = (Sphere*)malloc(sizeof(Sphere)*SPHERES);
for(int i = 0; i < SPHERES; i++){
temp_s[i].r = rnd(1.0f);
temp_s[i].g = rnd(1.0f);
temp_s[i].b = rnd(1.0f);
temp_s[i].x = rnd(1000.0f) - 500;
temp_s[i].y = rnd(1000.0f) - 500;
temp_s[i].z = rnd(1000.0f) - 500;
temp_s[i].radius = rnd(100.0f) + 20;
}
cudaMemcpyToSymbol(s, temp_s, sizeof(Sphere)*SPHERES);
free(temp_s);
dim3 grids(DIM/16, DIM/16);
dim3 threads(16, 16);
kernel<<<grids, threads>>>(dev_bitmap);
cudaMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float dt;
cudaEventElapsedTime(&dt, start, stop);
printf("Time taken: %3.1f ms\n", dt);
bitmap.display_and_exit();
cudaFree(dev_bitmap);
cudaFree(s);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
31739c81510d5626c0bc7a10208b5de022659fab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "util.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <sys/timeb.h>
using namespace std;
#define cudaAssertSuccess(ans) { _cudaAssertSuccess((ans), __FILE__, __LINE__); }
__device__ __constant__ unsigned char d_const_colormap[(MAX_ITERATION + 1) * 3 * sizeof(unsigned char)];
__global__ void generate_image(unsigned char *d_image, unsigned char *d_colormap);
inline void _cudaAssertSuccess(hipError_t code, char *file, int line) {
if(code != hipSuccess) {
fprintf(stderr, "_cudaAssertSuccess: %s %s %d\n", hipGetErrorString(code), file, line);
exit(code);
}
}
__global__ void generate_image(unsigned char *d_image, unsigned char *d_colormap) {
double c_re, c_im, x, y, x_new;
int row, col, idx, iteration;
int width = WIDTH;
int height = HEIGHT;
int max = MAX_ITERATION;
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
idx = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if (idx >= width * height) return;
for (int i = 0; i < PIXELS; i++) {
int new_idx = PIXELS * idx + i;
row = new_idx / WIDTH;
col = new_idx % WIDTH;
c_re = (col - width / 2.0)*4.0 / width;
c_im = (row - height / 2.0)*4.0 / width;
x = 0, y = 0;
iteration = 0;
while (x*x + y*y <= 4 && iteration < max) {
x_new = x*x - y*y + c_re;
y = 2 * x*y + c_im;
x = x_new;
iteration++;
}
if (iteration > max) {
iteration = max;
}
d_image[4 * new_idx + 0] = d_colormap[iteration * 3 + 0];
d_image[4 * new_idx + 1] = d_colormap[iteration * 3 + 1];
d_image[4 * new_idx + 2] = d_colormap[iteration * 3 + 2];
d_image[4 * new_idx + 3] = 255;
}
}
__global__ void generate_image(unsigned char *d_image) {
double c_re, c_im, x, y, x_new;
int row, col, idx, iteration;
int width = WIDTH;
int height = HEIGHT;
int max = MAX_ITERATION;
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
idx = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if(idx >= width * height) return;
for(int i = 0; i < PIXELS; i++) {
int new_idx = PIXELS * idx + i;
row = new_idx / WIDTH;
col = new_idx % WIDTH;
c_re = (col - width / 2.0)*4.0 / width;
c_im = (row - height / 2.0)*4.0 / width;
x = 0, y = 0;
iteration = 0;
while(x*x + y*y <= 4 && iteration < max) {
x_new = x*x - y*y + c_re;
y = 2 * x*y + c_im;
x = x_new;
iteration++;
}
if(iteration > max) {
iteration = max;
}
d_image[4 * new_idx + 0] = d_const_colormap[iteration * 3 + 0];
d_image[4 * new_idx + 1] = d_const_colormap[iteration * 3 + 1];
d_image[4 * new_idx + 2] = d_const_colormap[iteration * 3 + 2];
d_image[4 * new_idx + 3] = 255;
}
}
void fractals(unsigned char *image, unsigned char *colormap, double *times) {
unsigned char *d_image, *d_colormap;
struct timeb start[REPEAT], end[REPEAT], before_data_send, after_data_send;
char path[255];
double data_send_time;
dim3 grid(GRID_SIZE_X, GRID_SIZE_Y);
dim3 block(BLOCK_SIZE_X, BLOCK_SIZE_Y);
hipError_t cudaStatus;
ftime(&before_data_send);
cudaStatus = hipSetDevice(0);
if(cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error_No_Free;
}
cudaStatus = hipMalloc(&d_image, WIDTH * HEIGHT * 4 * sizeof(unsigned char));
if(cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error_No_Free;
}
if (USE_GLOBAL_MEMORY == 0) {
cudaStatus = hipMalloc(&d_colormap, (MAX_ITERATION + 1) * 3 * sizeof(unsigned char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error_Free_Image;
}
cudaStatus = hipMemcpy(d_colormap, colormap, (MAX_ITERATION + 1) * 3 * sizeof(unsigned char), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
} else {
cudaStatus = hipMemcpyToSymbol(d_const_colormap, colormap, (MAX_ITERATION + 1) * 3 * sizeof(unsigned char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
}
memset(image, 0, WIDTH * HEIGHT * 4 * sizeof(unsigned char));
hipMemcpy(d_image, image, WIDTH * HEIGHT * 4 * sizeof(unsigned char), hipMemcpyHostToDevice);
if(cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
ftime(&after_data_send);
data_send_time = after_data_send.time - before_data_send.time + ((double)after_data_send.millitm - (double)before_data_send.millitm) / 1000.0;
for(int i = 0; i < REPEAT; i++) {
ftime(&start[i]);
if (USE_GLOBAL_MEMORY == 0) {
hipLaunchKernelGGL(( generate_image) , dim3(grid), dim3(block) , 0, 0, d_image, d_colormap);
}
else {
hipLaunchKernelGGL(( generate_image) , dim3(grid), dim3(block) , 0, 0, d_image);
}
cudaStatus = hipGetLastError();
if(cudaStatus != hipSuccess) {
fprintf(stderr, "fractal launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if(cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
cudaStatus = hipMemcpy(image, d_image, WIDTH * HEIGHT * 4 * sizeof(unsigned char), hipMemcpyDeviceToHost);
if(cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
ftime(&end[i]);
times[i] = data_send_time + end[i].time - start[i].time + ((double)end[i].millitm - (double)start[i].millitm) / 1000.0;
sprintf(path, IMAGE, "gpu", i);
save_image(path, image, WIDTH, HEIGHT);
progress("gpu", i, times[i]);
}
Error:
if (USE_GLOBAL_MEMORY == 0) {
hipFree(d_colormap);
}
Error_Free_Image:
hipFree(d_image);
Error_No_Free:
}
int main(int argc, char** argv) {
struct arg a;
double *times = (double*)malloc(sizeof(double)*REPEAT);
unsigned char *colormap = (unsigned char*)malloc((MAX_ITERATION + 1) * 3);
unsigned char *image = (unsigned char*)malloc(WIDTH * HEIGHT * 4);
init_colormap(MAX_ITERATION, colormap);
fractals(image, colormap, times);
getchar();
report("gpu", times);
free(image);
free(colormap);
return 0;
} | 31739c81510d5626c0bc7a10208b5de022659fab.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "util.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <sys/timeb.h>
using namespace std;
#define cudaAssertSuccess(ans) { _cudaAssertSuccess((ans), __FILE__, __LINE__); }
__device__ __constant__ unsigned char d_const_colormap[(MAX_ITERATION + 1) * 3 * sizeof(unsigned char)];
__global__ void generate_image(unsigned char *d_image, unsigned char *d_colormap);
inline void _cudaAssertSuccess(cudaError_t code, char *file, int line) {
if(code != cudaSuccess) {
fprintf(stderr, "_cudaAssertSuccess: %s %s %d\n", cudaGetErrorString(code), file, line);
exit(code);
}
}
__global__ void generate_image(unsigned char *d_image, unsigned char *d_colormap) {
double c_re, c_im, x, y, x_new;
int row, col, idx, iteration;
int width = WIDTH;
int height = HEIGHT;
int max = MAX_ITERATION;
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
idx = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if (idx >= width * height) return;
for (int i = 0; i < PIXELS; i++) {
int new_idx = PIXELS * idx + i;
row = new_idx / WIDTH;
col = new_idx % WIDTH;
c_re = (col - width / 2.0)*4.0 / width;
c_im = (row - height / 2.0)*4.0 / width;
x = 0, y = 0;
iteration = 0;
while (x*x + y*y <= 4 && iteration < max) {
x_new = x*x - y*y + c_re;
y = 2 * x*y + c_im;
x = x_new;
iteration++;
}
if (iteration > max) {
iteration = max;
}
d_image[4 * new_idx + 0] = d_colormap[iteration * 3 + 0];
d_image[4 * new_idx + 1] = d_colormap[iteration * 3 + 1];
d_image[4 * new_idx + 2] = d_colormap[iteration * 3 + 2];
d_image[4 * new_idx + 3] = 255;
}
}
__global__ void generate_image(unsigned char *d_image) {
double c_re, c_im, x, y, x_new;
int row, col, idx, iteration;
int width = WIDTH;
int height = HEIGHT;
int max = MAX_ITERATION;
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
idx = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if(idx >= width * height) return;
for(int i = 0; i < PIXELS; i++) {
int new_idx = PIXELS * idx + i;
row = new_idx / WIDTH;
col = new_idx % WIDTH;
c_re = (col - width / 2.0)*4.0 / width;
c_im = (row - height / 2.0)*4.0 / width;
x = 0, y = 0;
iteration = 0;
while(x*x + y*y <= 4 && iteration < max) {
x_new = x*x - y*y + c_re;
y = 2 * x*y + c_im;
x = x_new;
iteration++;
}
if(iteration > max) {
iteration = max;
}
d_image[4 * new_idx + 0] = d_const_colormap[iteration * 3 + 0];
d_image[4 * new_idx + 1] = d_const_colormap[iteration * 3 + 1];
d_image[4 * new_idx + 2] = d_const_colormap[iteration * 3 + 2];
d_image[4 * new_idx + 3] = 255;
}
}
void fractals(unsigned char *image, unsigned char *colormap, double *times) {
unsigned char *d_image, *d_colormap;
struct timeb start[REPEAT], end[REPEAT], before_data_send, after_data_send;
char path[255];
double data_send_time;
dim3 grid(GRID_SIZE_X, GRID_SIZE_Y);
dim3 block(BLOCK_SIZE_X, BLOCK_SIZE_Y);
cudaError_t cudaStatus;
ftime(&before_data_send);
cudaStatus = cudaSetDevice(0);
if(cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error_No_Free;
}
cudaStatus = cudaMalloc(&d_image, WIDTH * HEIGHT * 4 * sizeof(unsigned char));
if(cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error_No_Free;
}
if (USE_GLOBAL_MEMORY == 0) {
cudaStatus = cudaMalloc(&d_colormap, (MAX_ITERATION + 1) * 3 * sizeof(unsigned char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error_Free_Image;
}
cudaStatus = cudaMemcpy(d_colormap, colormap, (MAX_ITERATION + 1) * 3 * sizeof(unsigned char), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
} else {
cudaStatus = cudaMemcpyToSymbol(d_const_colormap, colormap, (MAX_ITERATION + 1) * 3 * sizeof(unsigned char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
}
memset(image, 0, WIDTH * HEIGHT * 4 * sizeof(unsigned char));
cudaMemcpy(d_image, image, WIDTH * HEIGHT * 4 * sizeof(unsigned char), cudaMemcpyHostToDevice);
if(cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
ftime(&after_data_send);
data_send_time = after_data_send.time - before_data_send.time + ((double)after_data_send.millitm - (double)before_data_send.millitm) / 1000.0;
for(int i = 0; i < REPEAT; i++) {
ftime(&start[i]);
if (USE_GLOBAL_MEMORY == 0) {
generate_image <<<grid, block >>> (d_image, d_colormap);
}
else {
generate_image <<<grid, block >>> (d_image);
}
cudaStatus = cudaGetLastError();
if(cudaStatus != cudaSuccess) {
fprintf(stderr, "fractal launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if(cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(image, d_image, WIDTH * HEIGHT * 4 * sizeof(unsigned char), cudaMemcpyDeviceToHost);
if(cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
ftime(&end[i]);
times[i] = data_send_time + end[i].time - start[i].time + ((double)end[i].millitm - (double)start[i].millitm) / 1000.0;
sprintf(path, IMAGE, "gpu", i);
save_image(path, image, WIDTH, HEIGHT);
progress("gpu", i, times[i]);
}
Error:
if (USE_GLOBAL_MEMORY == 0) {
cudaFree(d_colormap);
}
Error_Free_Image:
cudaFree(d_image);
Error_No_Free:
}
int main(int argc, char** argv) {
struct arg a;
double *times = (double*)malloc(sizeof(double)*REPEAT);
unsigned char *colormap = (unsigned char*)malloc((MAX_ITERATION + 1) * 3);
unsigned char *image = (unsigned char*)malloc(WIDTH * HEIGHT * 4);
init_colormap(MAX_ITERATION, colormap);
fractals(image, colormap, times);
getchar();
report("gpu", times);
free(image);
free(colormap);
return 0;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.