hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
5e52d7a327820d6c264b934e8711fb88b4aab3ee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2017 Stanford, NVIDIA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Tensor FFModel::softmax(std::string name,
const Tensor& _logit,
const Tensor& _label)
{
assert(_logit.numDim == 2);
assert(_label.numDim == 2);
Softmax *sm = new Softmax(*this, name, _logit, _label);
layers.push_back(sm);
return sm->output;
}
Softmax::Softmax(FFModel& model,
const std::string& pcname,
const Tensor& _logit,
const Tensor& _label)
: Op(pcname, _logit, _label), profiling(model.config.profiling)
{
// Retrive the task indexspace for the op
task_is = IndexSpaceT<2>(model.get_or_create_task_is(2, pcname));
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Rect<2> part_rect = runtime->get_index_space_domain(ctx, task_is);
int num_par_c = part_rect.hi[0] - part_rect.lo[0] + 1;
int num_par_n = part_rect.hi[1] - part_rect.lo[1] + 1;
// Current require data parallelism for Softmax
assert(num_par_c == 1);
{
const int dims[2] = {_logit.adim[1], _logit.adim[0]};
output = model.create_tensor<2>(dims, task_is, DT_FLOAT);
}
// Compute partition bound for input
Rect<2> logit_rect = runtime->get_index_partition_color_space(
ctx, inputs[0].part.get_index_partition());
Rect<2> label_rect = runtime->get_index_partition_color_space(
ctx, inputs[1].part.get_index_partition());
if (logit_rect == part_rect) {
input_lps[0] = inputs[0].part;
input_grad_lps[0] = inputs[0].part_grad;
} else {
model.create_disjoint_partition(
inputs[0], task_is, input_lps[0], input_grad_lps[0]);
}
if (label_rect == part_rect) {
input_lps[1] = inputs[1].part;
input_grad_lps[1] = inputs[1].part_grad;
} else {
model.create_disjoint_partition(
inputs[1], task_is, input_lps[1], input_grad_lps[1]);
}
}
/*
regions[0]: input
regions[1]: output
*/
OpMeta* Softmax::init_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
//const Softmax* softmax = (Softmax*) task->args;
TensorAccessorR<float, 2> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 2> acc_output(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
false/*readutput*/);
FFHandler handle = *((const FFHandler*) task->local_args);
SoftmaxMeta* m = new SoftmaxMeta(handle);
checkCUDNN(cudnnCreateTensorDescriptor(&m->inputTensor));
//checkCUDNN(cudnnCreateTensorDescriptor(&m->outputTensor));
assert(acc_input.rect == acc_output.rect);
int input_c = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1;
int input_n = acc_input.rect.hi[1] - acc_input.rect.lo[1] + 1;
checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
input_n, input_c, 1, 1));
return m;
}
__host__
void Softmax::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<2> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
FFHandler handle = ff.handlers[idx++];
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler)));
}
IndexLauncher launcher(SOFTMAX_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(Softmax)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(output.part, 0/*projection id*/,
WRITE_DISCARD, EXCLUSIVE, output.region));
launcher.add_field(1, FID_DATA);
FutureMap fm = runtime->execute_index_space(ctx, launcher);
fm.wait_all_results();
idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
meta[idx++] = fm.get_result<OpMeta*>(*it);
}
}
/*
regions[0](I): input
regions[1](O): output
*/
__host__
void Softmax::forward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
float alpha = 1.0f, beta = 0.0f;
const Softmax* softmax = (Softmax*) task->args;
const SoftmaxMeta* m = *((SoftmaxMeta**) task->local_args);
TensorAccessorR<float, 2> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 2> acc_output(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
false/*readOutput*/);
hipEvent_t t_start, t_end;
if (softmax->profiling) {
hipEventCreate(&t_start);
hipEventCreate(&t_end);
hipEventRecord(t_start);
}
#ifndef DISABLE_LEGION_CUDA_HIJACK
hipStream_t stream;
checkCUDA(hipStreamCreate(&stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
checkCUDNN(cudnnSoftmaxForward(m->handle.dnn,
CUDNN_SOFTMAX_ACCURATE,
CUDNN_SOFTMAX_MODE_CHANNEL,
&alpha, m->inputTensor, acc_input.ptr,
&beta, m->inputTensor, acc_output.ptr));
if (softmax->profiling) {
hipEventRecord(t_end);
checkCUDA(hipEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end));
hipEventDestroy(t_start);
hipEventDestroy(t_end);
printf("Softmax forward time = %.2fms\n", elapsed);
}
}
__host__
void Softmax::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<2> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
IndexLauncher launcher(SOFTMAX_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Softmax)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(output.part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, output.region));
launcher.add_field(1, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
__global__ void SoftmaxLossBackprop(float *input, const int *label, int num_labels, int batch_size)
{
CUDA_KERNEL_LOOP(i, batch_size)
{
int label_idx = label[i];
input[i * num_labels + label_idx] -= 1.0f;
}
}
/*
regions[0](O): input_grad
regions[1](I): output
regions[2](I): labels
*/
__host__
void Softmax::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 3);
assert(task->regions.size() == 3);
const Softmax* softmax = (Softmax*) task->args;
const SoftmaxMeta* m = *((SoftmaxMeta**) task->local_args);
TensorAccessorW<float, 2> acc_input_grad(
regions[0], task->regions[0], FID_DATA, ctx, runtime,
true/*readOutput*/);
TensorAccessorR<float, 2> acc_output(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
TensorAccessorR<int, 2> acc_label(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
// make sure the image indices match!
assert(acc_label.rect.hi[1] == acc_output.rect.hi[1]);
assert(acc_label.rect.lo[1] == acc_output.rect.lo[1]);
assert(acc_input_grad.rect == acc_output.rect);
assert(acc_label.rect.lo[0] == acc_label.rect.hi[0]);
// make sure each sample only has one label
int num_samples = acc_output.rect.hi[1] - acc_output.rect.lo[1] + 1;
int num_labels = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1;
//assert(num_labels == 1000); // check that we have 1000 different labels
hipEvent_t t_start, t_end;
if (softmax->profiling) {
hipEventCreate(&t_start);
hipEventCreate(&t_end);
hipEventRecord(t_start);
}
#ifndef DISABLE_LEGION_CUDA_HIJACK
hipStream_t stream;
checkCUDA(hipStreamCreate(&stream));
checkCUDA(hipblasSetStream(m->handle.blas, stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
checkCUDA(hipMemcpyAsync(acc_input_grad.ptr, acc_output.ptr,
acc_input_grad.rect.volume() * sizeof(float),
hipMemcpyDeviceToDevice));
//SoftmaxLossBackprop<<<GET_BLOCKS(num_samples), CUDA_NUM_THREADS>>>(
// acc_input_grad.ptr, acc_label.ptr, num_labels, num_samples);
// Accouting for batch size in SGD
float scalVal = 1.0f / static_cast<float>(num_samples);
checkCUDA(hipblasSscal(m->handle.blas, acc_input_grad.rect.volume(),
&scalVal, acc_input_grad.ptr, 1));
if (softmax->profiling) {
hipEventRecord(t_end);
checkCUDA(hipEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end));
hipEventDestroy(t_start);
hipEventDestroy(t_end);
printf("Softmax backward time = %.2fms\n", elapsed);
}
}
__host__
void Softmax::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
int idx = 0;
Rect<2> rect = runtime->get_index_space_domain(ctx, task_is);
for (PointInRectIterator<2> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
IndexLauncher launcher(SOFTMAX_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Softmax)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[0], 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(output.part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, output.region));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(input_lps[1], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[1].region));
launcher.add_field(2, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
//void Softmax::update(const FFModel& ff)
//{
//}
| 5e52d7a327820d6c264b934e8711fb88b4aab3ee.cu | /* Copyright 2017 Stanford, NVIDIA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Tensor FFModel::softmax(std::string name,
const Tensor& _logit,
const Tensor& _label)
{
assert(_logit.numDim == 2);
assert(_label.numDim == 2);
Softmax *sm = new Softmax(*this, name, _logit, _label);
layers.push_back(sm);
return sm->output;
}
Softmax::Softmax(FFModel& model,
const std::string& pcname,
const Tensor& _logit,
const Tensor& _label)
: Op(pcname, _logit, _label), profiling(model.config.profiling)
{
// Retrive the task indexspace for the op
task_is = IndexSpaceT<2>(model.get_or_create_task_is(2, pcname));
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Rect<2> part_rect = runtime->get_index_space_domain(ctx, task_is);
int num_par_c = part_rect.hi[0] - part_rect.lo[0] + 1;
int num_par_n = part_rect.hi[1] - part_rect.lo[1] + 1;
// Current require data parallelism for Softmax
assert(num_par_c == 1);
{
const int dims[2] = {_logit.adim[1], _logit.adim[0]};
output = model.create_tensor<2>(dims, task_is, DT_FLOAT);
}
// Compute partition bound for input
Rect<2> logit_rect = runtime->get_index_partition_color_space(
ctx, inputs[0].part.get_index_partition());
Rect<2> label_rect = runtime->get_index_partition_color_space(
ctx, inputs[1].part.get_index_partition());
if (logit_rect == part_rect) {
input_lps[0] = inputs[0].part;
input_grad_lps[0] = inputs[0].part_grad;
} else {
model.create_disjoint_partition(
inputs[0], task_is, input_lps[0], input_grad_lps[0]);
}
if (label_rect == part_rect) {
input_lps[1] = inputs[1].part;
input_grad_lps[1] = inputs[1].part_grad;
} else {
model.create_disjoint_partition(
inputs[1], task_is, input_lps[1], input_grad_lps[1]);
}
}
/*
regions[0]: input
regions[1]: output
*/
OpMeta* Softmax::init_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
//const Softmax* softmax = (Softmax*) task->args;
TensorAccessorR<float, 2> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 2> acc_output(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
false/*readutput*/);
FFHandler handle = *((const FFHandler*) task->local_args);
SoftmaxMeta* m = new SoftmaxMeta(handle);
checkCUDNN(cudnnCreateTensorDescriptor(&m->inputTensor));
//checkCUDNN(cudnnCreateTensorDescriptor(&m->outputTensor));
assert(acc_input.rect == acc_output.rect);
int input_c = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1;
int input_n = acc_input.rect.hi[1] - acc_input.rect.lo[1] + 1;
checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
input_n, input_c, 1, 1));
return m;
}
__host__
void Softmax::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<2> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
FFHandler handle = ff.handlers[idx++];
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler)));
}
IndexLauncher launcher(SOFTMAX_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(Softmax)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(output.part, 0/*projection id*/,
WRITE_DISCARD, EXCLUSIVE, output.region));
launcher.add_field(1, FID_DATA);
FutureMap fm = runtime->execute_index_space(ctx, launcher);
fm.wait_all_results();
idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
meta[idx++] = fm.get_result<OpMeta*>(*it);
}
}
/*
regions[0](I): input
regions[1](O): output
*/
__host__
void Softmax::forward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
float alpha = 1.0f, beta = 0.0f;
const Softmax* softmax = (Softmax*) task->args;
const SoftmaxMeta* m = *((SoftmaxMeta**) task->local_args);
TensorAccessorR<float, 2> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 2> acc_output(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
false/*readOutput*/);
cudaEvent_t t_start, t_end;
if (softmax->profiling) {
cudaEventCreate(&t_start);
cudaEventCreate(&t_end);
cudaEventRecord(t_start);
}
#ifndef DISABLE_LEGION_CUDA_HIJACK
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
checkCUDNN(cudnnSoftmaxForward(m->handle.dnn,
CUDNN_SOFTMAX_ACCURATE,
CUDNN_SOFTMAX_MODE_CHANNEL,
&alpha, m->inputTensor, acc_input.ptr,
&beta, m->inputTensor, acc_output.ptr));
if (softmax->profiling) {
cudaEventRecord(t_end);
checkCUDA(cudaEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end));
cudaEventDestroy(t_start);
cudaEventDestroy(t_end);
printf("Softmax forward time = %.2fms\n", elapsed);
}
}
__host__
void Softmax::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<2> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
IndexLauncher launcher(SOFTMAX_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Softmax)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(output.part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, output.region));
launcher.add_field(1, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
__global__ void SoftmaxLossBackprop(float *input, const int *label, int num_labels, int batch_size)
{
CUDA_KERNEL_LOOP(i, batch_size)
{
int label_idx = label[i];
input[i * num_labels + label_idx] -= 1.0f;
}
}
/*
regions[0](O): input_grad
regions[1](I): output
regions[2](I): labels
*/
__host__
void Softmax::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 3);
assert(task->regions.size() == 3);
const Softmax* softmax = (Softmax*) task->args;
const SoftmaxMeta* m = *((SoftmaxMeta**) task->local_args);
TensorAccessorW<float, 2> acc_input_grad(
regions[0], task->regions[0], FID_DATA, ctx, runtime,
true/*readOutput*/);
TensorAccessorR<float, 2> acc_output(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
TensorAccessorR<int, 2> acc_label(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
// make sure the image indices match!
assert(acc_label.rect.hi[1] == acc_output.rect.hi[1]);
assert(acc_label.rect.lo[1] == acc_output.rect.lo[1]);
assert(acc_input_grad.rect == acc_output.rect);
assert(acc_label.rect.lo[0] == acc_label.rect.hi[0]);
// make sure each sample only has one label
int num_samples = acc_output.rect.hi[1] - acc_output.rect.lo[1] + 1;
int num_labels = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1;
//assert(num_labels == 1000); // check that we have 1000 different labels
cudaEvent_t t_start, t_end;
if (softmax->profiling) {
cudaEventCreate(&t_start);
cudaEventCreate(&t_end);
cudaEventRecord(t_start);
}
#ifndef DISABLE_LEGION_CUDA_HIJACK
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
checkCUDA(cublasSetStream(m->handle.blas, stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
checkCUDA(cudaMemcpyAsync(acc_input_grad.ptr, acc_output.ptr,
acc_input_grad.rect.volume() * sizeof(float),
cudaMemcpyDeviceToDevice));
//SoftmaxLossBackprop<<<GET_BLOCKS(num_samples), CUDA_NUM_THREADS>>>(
// acc_input_grad.ptr, acc_label.ptr, num_labels, num_samples);
// Accouting for batch size in SGD
float scalVal = 1.0f / static_cast<float>(num_samples);
checkCUDA(cublasSscal(m->handle.blas, acc_input_grad.rect.volume(),
&scalVal, acc_input_grad.ptr, 1));
if (softmax->profiling) {
cudaEventRecord(t_end);
checkCUDA(cudaEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end));
cudaEventDestroy(t_start);
cudaEventDestroy(t_end);
printf("Softmax backward time = %.2fms\n", elapsed);
}
}
__host__
void Softmax::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
int idx = 0;
Rect<2> rect = runtime->get_index_space_domain(ctx, task_is);
for (PointInRectIterator<2> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
IndexLauncher launcher(SOFTMAX_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Softmax)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[0], 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(output.part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, output.region));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(input_lps[1], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[1].region));
launcher.add_field(2, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
//void Softmax::update(const FFModel& ff)
//{
//}
|
72430d9a9525b9592da447b12a3c8bb8f97eb6e8.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (C) 2018 ETH Zurich
// Copyright (C) 2018 UT-Battelle, LLC
// All rights reserved.
//
// See LICENSE.txt for terms of usage.
// See CITATION.txt for citation guidelines if you use this code for scientific publications.
//
// Author: Giovanni Balduzzi (gbalduzz@itp.phys.ethz.ch)
//
// This file implements the static methods of CtintHelper.
#include "dca/phys/dca_step/cluster_solver/ctint/device_helper/ctint_helper.cuh"
#include <stdexcept>
#include <mutex>
namespace dca {
namespace phys {
namespace solver {
namespace ctint {
// dca::phys::solver::ctint::
// Global helper instance.
__device__ __constant__ CtintHelper ctint_helper;
void CtintHelper::set(const int* add_r, int lda, const int* sub_r, int lds, const int nb,
const int nc, const int r0) {
static std::once_flag flag;
std::call_once(flag, [&] {
// Initialize real space cluster.
solver::details::ClusterHelper::set(nc, add_r, lda, sub_r, lds, 0);
CtintHelper host_helper;
host_helper.subdm_step_[0] = nb;
host_helper.subdm_step_[1] = nb * nb;
hipMemcpyToSymbol(ctint_helper, &host_helper, sizeof(CtintHelper));
});
}
} // namespace ctint
} // namespace solver
} // namespace phys
} // namespace dca
| 72430d9a9525b9592da447b12a3c8bb8f97eb6e8.cu | // Copyright (C) 2018 ETH Zurich
// Copyright (C) 2018 UT-Battelle, LLC
// All rights reserved.
//
// See LICENSE.txt for terms of usage.
// See CITATION.txt for citation guidelines if you use this code for scientific publications.
//
// Author: Giovanni Balduzzi (gbalduzz@itp.phys.ethz.ch)
//
// This file implements the static methods of CtintHelper.
#include "dca/phys/dca_step/cluster_solver/ctint/device_helper/ctint_helper.cuh"
#include <stdexcept>
#include <mutex>
namespace dca {
namespace phys {
namespace solver {
namespace ctint {
// dca::phys::solver::ctint::
// Global helper instance.
__device__ __constant__ CtintHelper ctint_helper;
void CtintHelper::set(const int* add_r, int lda, const int* sub_r, int lds, const int nb,
const int nc, const int r0) {
static std::once_flag flag;
std::call_once(flag, [&] {
// Initialize real space cluster.
solver::details::ClusterHelper::set(nc, add_r, lda, sub_r, lds, 0);
CtintHelper host_helper;
host_helper.subdm_step_[0] = nb;
host_helper.subdm_step_[1] = nb * nb;
cudaMemcpyToSymbol(ctint_helper, &host_helper, sizeof(CtintHelper));
});
}
} // namespace ctint
} // namespace solver
} // namespace phys
} // namespace dca
|
36d79103cee23be95adcd080dd09ef2ef7ee3c0f.hip | // !!! This is a file automatically generated by hipify!!!
/******************************************************************************
*cr
*cr (C) Copyright 2010-2013 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include "support.h"
#include "kernel.hip"
int main(int argc, char* argv[])
{
Timer timer;
// Initialize host variables ----------------------------------------------
// Variables
float *in_val_h;
float *in_pos_h;
float *out_h;
float *in_val_d;
float *in_pos_d;
float *out_d;
unsigned int grid_size, num_in;
hipError_t cuda_ret;
// Constants
const unsigned int maxVal = 1; // Maximum input value
const float cutoff = 3000.0f; // Cutoff distance for optimized computation
const float cutoff2 = cutoff*cutoff;
// Extras needed for input binning
unsigned int* binCounts_h;
unsigned int* binPtrs_h;
float* in_val_sorted_h;
float* in_pos_sorted_h;
unsigned int* binCounts_d;
unsigned int* binPtrs_d;
float* in_val_sorted_d;
float* in_pos_sorted_d;
enum Mode {CPU_NORMAL = 1, GPU_NORMAL, GPU_CUTOFF,
GPU_BINNED_CPU_PREPROCESSING, GPU_BINNED_GPU_PREPROCESSING};
Mode mode;
if(argc == 2) {
mode = (Mode) atoi(argv[1]);
grid_size = 20000;
num_in = 60000;
} else if(argc == 3) {
mode = (Mode) atoi(argv[1]);
grid_size = atoi(argv[2]);
num_in = 3*grid_size;
} else if(argc == 4) {
mode = (Mode) atoi(argv[1]);
grid_size = atoi(argv[2]);
num_in = atoi(argv[3]);
} else {
printf("\n Invalid input parameters."
"\n"
"\n Usage: ./binning <m> # Mode: m, Grid: 20,000, Input: 60,000"
"\n ./binning <m> <M> # Mode: m, Grid: M, Input: 3*M"
"\n ./binning <m> <M> <N> # Mode: m, Grid: M, Input: N"
"\n"
"\n Modes: 1 = CPU normal execution"
"\n 2 = GPU normal execution"
"\n 3 = GPU with cutoff"
"\n 4 = GPU with cutoff and binned input (CPU preprocessing)"
"\n 5 = GPU with cutoff and binned input (GPU preprocessing)"
"\n\n");
exit(0);
}
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
initVector(&in_val_h, num_in, maxVal);
initVector(&in_pos_h, num_in, grid_size);
out_h = (float*) malloc(grid_size*sizeof(float));
memset((void*) out_h, 0, grid_size*sizeof(float));
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" Grid size = %u\n Input size = %u\n", grid_size, num_in);
// CPU Preprocessing ------------------------------------------------------
if(mode == GPU_BINNED_CPU_PREPROCESSING) {
printf("Preprocessing data on the CPU..."); fflush(stdout);
startTime(&timer);
// Data structures needed to preprocess the bins on the CPU
binCounts_h = (unsigned int*) malloc(NUM_BINS*sizeof(unsigned int));
memset((void*) out_h, 0, grid_size*sizeof(float));
binPtrs_h = (unsigned int*) malloc((NUM_BINS + 1)*sizeof(unsigned int));
in_val_sorted_h = (float*) malloc(num_in*sizeof(float));
in_pos_sorted_h = (float*) malloc(num_in*sizeof(float));
cpu_preprocess(in_val_h, in_pos_h, in_val_sorted_h, in_pos_sorted_h,
grid_size, num_in, binCounts_h, binPtrs_h);
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
}
// Allocate device variables ----------------------------------------------
if(mode != CPU_NORMAL) {
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
// If preprocessing on the CPU, GPU doesn't need the unsorted arrays
if(mode != GPU_BINNED_CPU_PREPROCESSING) {
cuda_ret = hipMalloc((void**)&in_val_d, num_in * sizeof(float));
if(cuda_ret!=hipSuccess) FATAL("Unable to allocate device memory");
cuda_ret = hipMalloc((void**)&in_pos_d, num_in * sizeof(float));
if(cuda_ret!=hipSuccess) FATAL("Unable to allocate device memory");
}
// All modes need the output array
cuda_ret = hipMalloc((void**)&out_d, grid_size * sizeof(float));
if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory");
// Only binning modes need binning information
if(mode == GPU_BINNED_CPU_PREPROCESSING ||
mode == GPU_BINNED_GPU_PREPROCESSING) {
cuda_ret = hipMalloc((void**)&in_val_sorted_d,
num_in*sizeof(float));
if(cuda_ret != hipSuccess) {
FATAL("Unable to allocate device memory");
}
cuda_ret = hipMalloc((void**)&in_pos_sorted_d,
num_in*sizeof(float));
if(cuda_ret != hipSuccess) {
FATAL("Unable to allocate device memory");
}
cuda_ret = hipMalloc((void**)&binPtrs_d,
(NUM_BINS + 1)*sizeof(unsigned int));
if(cuda_ret != hipSuccess) {
FATAL("Unable to allocate device memory");
}
if(mode == GPU_BINNED_GPU_PREPROCESSING) {
// Only used in preprocessing but not the actual computation
cuda_ret = hipMalloc((void**)&binCounts_d,
NUM_BINS*sizeof(unsigned int));
if(cuda_ret != hipSuccess) {
FATAL("Unable to allocate device memory");
}
}
}
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
}
// Copy host variables to device ------------------------------------------
if(mode != CPU_NORMAL) {
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
// If preprocessing on the CPU, GPU doesn't need the unsorted arrays
if(mode != GPU_BINNED_CPU_PREPROCESSING) {
cuda_ret = hipMemcpy(in_val_d, in_val_h, num_in * sizeof(float),
hipMemcpyHostToDevice);
if(cuda_ret != hipSuccess) {
FATAL("Unable to copy memory to the device");
}
cuda_ret = hipMemcpy(in_pos_d, in_pos_h, num_in * sizeof(float),
hipMemcpyHostToDevice);
if(cuda_ret != hipSuccess) {
FATAL("Unable to copy memory to the device");
}
}
// All modes need the output array
cuda_ret = hipMemset(out_d, 0, grid_size * sizeof(float));
if(cuda_ret != hipSuccess) FATAL("Unable to set device memory");
if(mode == GPU_BINNED_CPU_PREPROCESSING) {
cuda_ret = hipMemcpy(in_val_sorted_d, in_val_sorted_h,
num_in * sizeof(float), hipMemcpyHostToDevice);
if(cuda_ret != hipSuccess) {
FATAL("Unable to copy memory to the device");
}
cuda_ret = hipMemcpy(in_pos_sorted_d, in_pos_sorted_h,
num_in * sizeof(float), hipMemcpyHostToDevice);
if(cuda_ret != hipSuccess) {
FATAL("Unable to copy memory to the device");
}
cuda_ret = hipMemcpy(binPtrs_d, binPtrs_h,
(NUM_BINS + 1)*sizeof(unsigned int), hipMemcpyHostToDevice);
if(cuda_ret != hipSuccess) {
FATAL("Unable to copy memory to the device");
}
} else if(mode == GPU_BINNED_GPU_PREPROCESSING) {
// If preprocessing on the GPU, bin counts need to be initialized
// and nothing needs to be copied
cuda_ret=hipMemset(binCounts_d, 0, NUM_BINS*sizeof(unsigned int));
if(cuda_ret != hipSuccess) FATAL("Unable to set device memory");
}
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
}
// GPU Preprocessing ------------------------------------------------------
if(mode == GPU_BINNED_GPU_PREPROCESSING) {
printf("Preprocessing data on the GPU..."); fflush(stdout);
startTime(&timer);
gpu_preprocess(in_val_d, in_pos_d, in_val_sorted_d, in_pos_sorted_d,
grid_size, num_in, binCounts_d, binPtrs_d);
cuda_ret = hipDeviceSynchronize();
if(cuda_ret != hipSuccess) FATAL("Unable to launch/execute kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
}
// Launch kernel ----------------------------------------------------------
printf("Launching kernel ");
if(mode == CPU_NORMAL) {
printf("(CPU normal version)...");fflush(stdout);
startTime(&timer);
cpu_normal(in_val_h, in_pos_h, out_h, grid_size, num_in);
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
} else if(mode == GPU_NORMAL) {
printf("(GPU normal version)...");fflush(stdout);
startTime(&timer);
gpu_normal(in_val_d, in_pos_d, out_d, grid_size, num_in);
cuda_ret = hipDeviceSynchronize();
if(cuda_ret != hipSuccess) FATAL("Unable to launch/execute kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
} else if(mode == GPU_CUTOFF) {
printf("(GPU with cuttoff)...");fflush(stdout);
startTime(&timer);
gpu_cutoff(in_val_d, in_pos_d, out_d, grid_size, num_in, cutoff2);
cuda_ret = hipDeviceSynchronize();
if(cuda_ret != hipSuccess) FATAL("Unable to launch/execute kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
} else if(mode == GPU_BINNED_CPU_PREPROCESSING ||
mode == GPU_BINNED_GPU_PREPROCESSING) {
printf("(GPU with input binning and cutoff)...");fflush(stdout);
startTime(&timer);
gpu_cutoff_binned(binPtrs_d, in_val_sorted_d, in_pos_sorted_d, out_d,
grid_size, cutoff2);
cuda_ret = hipDeviceSynchronize();
if(cuda_ret != hipSuccess) FATAL("Unable to launch/execute kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
} else {
printf("Invalid mode!\n");
exit(0);
}
// Copy device variables from host ----------------------------------------
if(mode != CPU_NORMAL) {
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
cuda_ret = hipMemcpy(out_h, out_d, grid_size * sizeof(float),
hipMemcpyDeviceToHost);
if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to host");
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
}
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
if(mode == CPU_NORMAL || mode == GPU_NORMAL) {
verify(in_val_h, in_pos_h, out_h, grid_size, num_in);
} else {
verify_cutoff(in_val_h, in_pos_h, out_h, grid_size, num_in, cutoff2);
}
// Free memory ------------------------------------------------------------
free(in_val_h); free(in_pos_h); free(out_h);
if(mode == GPU_BINNED_CPU_PREPROCESSING) {
free(binCounts_h); free(binPtrs_h);
free(in_val_sorted_h); free(in_pos_sorted_h);
}
if(mode != CPU_NORMAL) {
if(mode != GPU_BINNED_CPU_PREPROCESSING) {
hipFree(in_val_d); hipFree(in_pos_d);
}
hipFree(out_d);
if(mode == GPU_BINNED_CPU_PREPROCESSING ||
mode == GPU_BINNED_GPU_PREPROCESSING) {
hipFree(in_val_sorted_d); hipFree(in_pos_sorted_d);
hipFree(binPtrs_d);
if(mode == GPU_BINNED_GPU_PREPROCESSING) {
hipFree(binCounts_d);
}
}
}
return 0;
}
| 36d79103cee23be95adcd080dd09ef2ef7ee3c0f.cu | /******************************************************************************
*cr
*cr (C) Copyright 2010-2013 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include "support.h"
#include "kernel.cu"
int main(int argc, char* argv[])
{
Timer timer;
// Initialize host variables ----------------------------------------------
// Variables
float *in_val_h;
float *in_pos_h;
float *out_h;
float *in_val_d;
float *in_pos_d;
float *out_d;
unsigned int grid_size, num_in;
cudaError_t cuda_ret;
// Constants
const unsigned int maxVal = 1; // Maximum input value
const float cutoff = 3000.0f; // Cutoff distance for optimized computation
const float cutoff2 = cutoff*cutoff;
// Extras needed for input binning
unsigned int* binCounts_h;
unsigned int* binPtrs_h;
float* in_val_sorted_h;
float* in_pos_sorted_h;
unsigned int* binCounts_d;
unsigned int* binPtrs_d;
float* in_val_sorted_d;
float* in_pos_sorted_d;
enum Mode {CPU_NORMAL = 1, GPU_NORMAL, GPU_CUTOFF,
GPU_BINNED_CPU_PREPROCESSING, GPU_BINNED_GPU_PREPROCESSING};
Mode mode;
if(argc == 2) {
mode = (Mode) atoi(argv[1]);
grid_size = 20000;
num_in = 60000;
} else if(argc == 3) {
mode = (Mode) atoi(argv[1]);
grid_size = atoi(argv[2]);
num_in = 3*grid_size;
} else if(argc == 4) {
mode = (Mode) atoi(argv[1]);
grid_size = atoi(argv[2]);
num_in = atoi(argv[3]);
} else {
printf("\n Invalid input parameters."
"\n"
"\n Usage: ./binning <m> # Mode: m, Grid: 20,000, Input: 60,000"
"\n ./binning <m> <M> # Mode: m, Grid: M, Input: 3*M"
"\n ./binning <m> <M> <N> # Mode: m, Grid: M, Input: N"
"\n"
"\n Modes: 1 = CPU normal execution"
"\n 2 = GPU normal execution"
"\n 3 = GPU with cutoff"
"\n 4 = GPU with cutoff and binned input (CPU preprocessing)"
"\n 5 = GPU with cutoff and binned input (GPU preprocessing)"
"\n\n");
exit(0);
}
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
initVector(&in_val_h, num_in, maxVal);
initVector(&in_pos_h, num_in, grid_size);
out_h = (float*) malloc(grid_size*sizeof(float));
memset((void*) out_h, 0, grid_size*sizeof(float));
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" Grid size = %u\n Input size = %u\n", grid_size, num_in);
// CPU Preprocessing ------------------------------------------------------
if(mode == GPU_BINNED_CPU_PREPROCESSING) {
printf("Preprocessing data on the CPU..."); fflush(stdout);
startTime(&timer);
// Data structures needed to preprocess the bins on the CPU
binCounts_h = (unsigned int*) malloc(NUM_BINS*sizeof(unsigned int));
memset((void*) out_h, 0, grid_size*sizeof(float));
binPtrs_h = (unsigned int*) malloc((NUM_BINS + 1)*sizeof(unsigned int));
in_val_sorted_h = (float*) malloc(num_in*sizeof(float));
in_pos_sorted_h = (float*) malloc(num_in*sizeof(float));
cpu_preprocess(in_val_h, in_pos_h, in_val_sorted_h, in_pos_sorted_h,
grid_size, num_in, binCounts_h, binPtrs_h);
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
}
// Allocate device variables ----------------------------------------------
if(mode != CPU_NORMAL) {
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
// If preprocessing on the CPU, GPU doesn't need the unsorted arrays
if(mode != GPU_BINNED_CPU_PREPROCESSING) {
cuda_ret = cudaMalloc((void**)&in_val_d, num_in * sizeof(float));
if(cuda_ret!=cudaSuccess) FATAL("Unable to allocate device memory");
cuda_ret = cudaMalloc((void**)&in_pos_d, num_in * sizeof(float));
if(cuda_ret!=cudaSuccess) FATAL("Unable to allocate device memory");
}
// All modes need the output array
cuda_ret = cudaMalloc((void**)&out_d, grid_size * sizeof(float));
if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory");
// Only binning modes need binning information
if(mode == GPU_BINNED_CPU_PREPROCESSING ||
mode == GPU_BINNED_GPU_PREPROCESSING) {
cuda_ret = cudaMalloc((void**)&in_val_sorted_d,
num_in*sizeof(float));
if(cuda_ret != cudaSuccess) {
FATAL("Unable to allocate device memory");
}
cuda_ret = cudaMalloc((void**)&in_pos_sorted_d,
num_in*sizeof(float));
if(cuda_ret != cudaSuccess) {
FATAL("Unable to allocate device memory");
}
cuda_ret = cudaMalloc((void**)&binPtrs_d,
(NUM_BINS + 1)*sizeof(unsigned int));
if(cuda_ret != cudaSuccess) {
FATAL("Unable to allocate device memory");
}
if(mode == GPU_BINNED_GPU_PREPROCESSING) {
// Only used in preprocessing but not the actual computation
cuda_ret = cudaMalloc((void**)&binCounts_d,
NUM_BINS*sizeof(unsigned int));
if(cuda_ret != cudaSuccess) {
FATAL("Unable to allocate device memory");
}
}
}
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
}
// Copy host variables to device ------------------------------------------
if(mode != CPU_NORMAL) {
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
// If preprocessing on the CPU, GPU doesn't need the unsorted arrays
if(mode != GPU_BINNED_CPU_PREPROCESSING) {
cuda_ret = cudaMemcpy(in_val_d, in_val_h, num_in * sizeof(float),
cudaMemcpyHostToDevice);
if(cuda_ret != cudaSuccess) {
FATAL("Unable to copy memory to the device");
}
cuda_ret = cudaMemcpy(in_pos_d, in_pos_h, num_in * sizeof(float),
cudaMemcpyHostToDevice);
if(cuda_ret != cudaSuccess) {
FATAL("Unable to copy memory to the device");
}
}
// All modes need the output array
cuda_ret = cudaMemset(out_d, 0, grid_size * sizeof(float));
if(cuda_ret != cudaSuccess) FATAL("Unable to set device memory");
if(mode == GPU_BINNED_CPU_PREPROCESSING) {
cuda_ret = cudaMemcpy(in_val_sorted_d, in_val_sorted_h,
num_in * sizeof(float), cudaMemcpyHostToDevice);
if(cuda_ret != cudaSuccess) {
FATAL("Unable to copy memory to the device");
}
cuda_ret = cudaMemcpy(in_pos_sorted_d, in_pos_sorted_h,
num_in * sizeof(float), cudaMemcpyHostToDevice);
if(cuda_ret != cudaSuccess) {
FATAL("Unable to copy memory to the device");
}
cuda_ret = cudaMemcpy(binPtrs_d, binPtrs_h,
(NUM_BINS + 1)*sizeof(unsigned int), cudaMemcpyHostToDevice);
if(cuda_ret != cudaSuccess) {
FATAL("Unable to copy memory to the device");
}
} else if(mode == GPU_BINNED_GPU_PREPROCESSING) {
// If preprocessing on the GPU, bin counts need to be initialized
// and nothing needs to be copied
cuda_ret=cudaMemset(binCounts_d, 0, NUM_BINS*sizeof(unsigned int));
if(cuda_ret != cudaSuccess) FATAL("Unable to set device memory");
}
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
}
// GPU Preprocessing ------------------------------------------------------
if(mode == GPU_BINNED_GPU_PREPROCESSING) {
printf("Preprocessing data on the GPU..."); fflush(stdout);
startTime(&timer);
gpu_preprocess(in_val_d, in_pos_d, in_val_sorted_d, in_pos_sorted_d,
grid_size, num_in, binCounts_d, binPtrs_d);
cuda_ret = cudaDeviceSynchronize();
if(cuda_ret != cudaSuccess) FATAL("Unable to launch/execute kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
}
// Launch kernel ----------------------------------------------------------
printf("Launching kernel ");
if(mode == CPU_NORMAL) {
printf("(CPU normal version)...");fflush(stdout);
startTime(&timer);
cpu_normal(in_val_h, in_pos_h, out_h, grid_size, num_in);
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
} else if(mode == GPU_NORMAL) {
printf("(GPU normal version)...");fflush(stdout);
startTime(&timer);
gpu_normal(in_val_d, in_pos_d, out_d, grid_size, num_in);
cuda_ret = cudaDeviceSynchronize();
if(cuda_ret != cudaSuccess) FATAL("Unable to launch/execute kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
} else if(mode == GPU_CUTOFF) {
printf("(GPU with cuttoff)...");fflush(stdout);
startTime(&timer);
gpu_cutoff(in_val_d, in_pos_d, out_d, grid_size, num_in, cutoff2);
cuda_ret = cudaDeviceSynchronize();
if(cuda_ret != cudaSuccess) FATAL("Unable to launch/execute kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
} else if(mode == GPU_BINNED_CPU_PREPROCESSING ||
mode == GPU_BINNED_GPU_PREPROCESSING) {
printf("(GPU with input binning and cutoff)...");fflush(stdout);
startTime(&timer);
gpu_cutoff_binned(binPtrs_d, in_val_sorted_d, in_pos_sorted_d, out_d,
grid_size, cutoff2);
cuda_ret = cudaDeviceSynchronize();
if(cuda_ret != cudaSuccess) FATAL("Unable to launch/execute kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
} else {
printf("Invalid mode!\n");
exit(0);
}
// Copy device variables from host ----------------------------------------
if(mode != CPU_NORMAL) {
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
cuda_ret = cudaMemcpy(out_h, out_d, grid_size * sizeof(float),
cudaMemcpyDeviceToHost);
if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to host");
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
}
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
if(mode == CPU_NORMAL || mode == GPU_NORMAL) {
verify(in_val_h, in_pos_h, out_h, grid_size, num_in);
} else {
verify_cutoff(in_val_h, in_pos_h, out_h, grid_size, num_in, cutoff2);
}
// Free memory ------------------------------------------------------------
free(in_val_h); free(in_pos_h); free(out_h);
if(mode == GPU_BINNED_CPU_PREPROCESSING) {
free(binCounts_h); free(binPtrs_h);
free(in_val_sorted_h); free(in_pos_sorted_h);
}
if(mode != CPU_NORMAL) {
if(mode != GPU_BINNED_CPU_PREPROCESSING) {
cudaFree(in_val_d); cudaFree(in_pos_d);
}
cudaFree(out_d);
if(mode == GPU_BINNED_CPU_PREPROCESSING ||
mode == GPU_BINNED_GPU_PREPROCESSING) {
cudaFree(in_val_sorted_d); cudaFree(in_pos_sorted_d);
cudaFree(binPtrs_d);
if(mode == GPU_BINNED_GPU_PREPROCESSING) {
cudaFree(binCounts_d);
}
}
}
return 0;
}
|
0e1ea92ded9f7087a14f71e3a824aead7db1e0ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_128x64_k65536_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_128x64_k65536_dropout_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_128x64_k65536_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_128x64_k65536_dropout_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 128, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 128, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_128x64_k65536_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 128, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 900
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 128, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_128x64_k65536_dropout_sm80` is for sm80-sm90, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm50` is for sm50-sm70, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 900
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm80` is for sm80-sm90, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
| 0e1ea92ded9f7087a14f71e3a824aead7db1e0ed.cu | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_128x64_k65536_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_128x64_k65536_dropout_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_128x64_k65536_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_128x64_k65536_dropout_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 128, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 128, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_128x64_k65536_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 128, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 900
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 128, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_128x64_k65536_dropout_sm80` is for sm80-sm90, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm50` is for sm50-sm70, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 900
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k65536_dropout_sm80` is for sm80-sm90, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
|
ea5c81c252739457d45e939503253a7d27da32ef.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "removeRuntyPartsKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int size = XSIZE*YSIZE;
int *partition = NULL;
hipMalloc(&partition, XSIZE*YSIZE);
int *removeStencil = NULL;
hipMalloc(&removeStencil, XSIZE*YSIZE);
int *subtractions = NULL;
hipMalloc(&subtractions, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
removeRuntyPartsKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,partition,removeStencil,subtractions);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
removeRuntyPartsKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,partition,removeStencil,subtractions);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
removeRuntyPartsKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,partition,removeStencil,subtractions);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | ea5c81c252739457d45e939503253a7d27da32ef.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "removeRuntyPartsKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int size = XSIZE*YSIZE;
int *partition = NULL;
cudaMalloc(&partition, XSIZE*YSIZE);
int *removeStencil = NULL;
cudaMalloc(&removeStencil, XSIZE*YSIZE);
int *subtractions = NULL;
cudaMalloc(&subtractions, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
removeRuntyPartsKernel<<<gridBlock,threadBlock>>>(size,partition,removeStencil,subtractions);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
removeRuntyPartsKernel<<<gridBlock,threadBlock>>>(size,partition,removeStencil,subtractions);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
removeRuntyPartsKernel<<<gridBlock,threadBlock>>>(size,partition,removeStencil,subtractions);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
57d2ec20f402adaaeaebefd91236a4c31ed63660.hip | // !!! This is a file automatically generated by hipify!!!
#include <fluidity/dimension/thread_index.hpp>
#if defined(__HIPCC__)
namespace fluid {
namespace detail {
/*
/// Implementation for the x dimension case for the thread index.
fluidity_device_only inline std::size_t thread_id_impl(dim_x)
{
return threadIdx.x;
}
/// Implementation for the x dimension case for the thread index.
fluidity_device_only std::size_t thread_id_impl(dim_y)
{
return threadIdx.y;
}
/// Implementation for the x dimension case for the thread index.
fluidity_device_only std::size_t thread_id_impl(dim_z)
{
return threadIdx.z;
}
/// Implementation for the x dimension case for the flattened index.
fluidity_device_only std::size_t flattened_id_impl(dim_x)
{
return threadIdx.x + blockIdx.x * blockDim.x;
}
/// Implementation for the y dimension case for the flattened index.
fluidity_device_only std::size_t flattened_id_impl(dim_y)
{
return threadIdx.y + blockIdx.y * blockDim.y;
}
/// Implementation for the z dimension case for the flattened index.
fluidity_device_only std::size_t flattened_id_impl(dim_z)
{
return threadIdx.z + blockIdx.z * blockDim.z;
}
*/
}} // namespace fluid::detail
#endif // __CUDACC__ | 57d2ec20f402adaaeaebefd91236a4c31ed63660.cu | #include <fluidity/dimension/thread_index.hpp>
#if defined(__CUDACC__)
namespace fluid {
namespace detail {
/*
/// Implementation for the x dimension case for the thread index.
fluidity_device_only inline std::size_t thread_id_impl(dim_x)
{
return threadIdx.x;
}
/// Implementation for the x dimension case for the thread index.
fluidity_device_only std::size_t thread_id_impl(dim_y)
{
return threadIdx.y;
}
/// Implementation for the x dimension case for the thread index.
fluidity_device_only std::size_t thread_id_impl(dim_z)
{
return threadIdx.z;
}
/// Implementation for the x dimension case for the flattened index.
fluidity_device_only std::size_t flattened_id_impl(dim_x)
{
return threadIdx.x + blockIdx.x * blockDim.x;
}
/// Implementation for the y dimension case for the flattened index.
fluidity_device_only std::size_t flattened_id_impl(dim_y)
{
return threadIdx.y + blockIdx.y * blockDim.y;
}
/// Implementation for the z dimension case for the flattened index.
fluidity_device_only std::size_t flattened_id_impl(dim_z)
{
return threadIdx.z + blockIdx.z * blockDim.z;
}
*/
}} // namespace fluid::detail
#endif // __CUDACC__ |
Ex1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// Compilation:
// nvcc Ex1.cu -o Ex1.exe
// __global__ => this function executes on the GPU.
// Please note that it also could be: __device__.
// This is this only code that executes on the GPU.
__global__ void kernel(double *a, double *b, double *c, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
c[i] = a[i] + b[i];
}
}
int main(int argc, char **argv)
{
int N = 1000;
int sz_in_bytes = N*sizeof(double);
double *h_a, *h_b, *h_c; // "h" for "host" (allocated in RAM).
double *d_a, *d_b, *d_c; // "d" for "device" (allocated in the GPU).
// Allocate memory in RAM (that is, the "host"):
// 3 arrays that contain N elements. Each element is a "double".
h_a = (double*)malloc(sz_in_bytes);
h_b = (double*)malloc(sz_in_bytes);
h_c = (double*)malloc(sz_in_bytes);
// Initiate values on h_a and h_b
for(int i = 0 ; i < N ; i++) {
h_a[i] = 1./(1.+i);
h_b[i] = (i-1.)/(i+1.);
}
// Allocate memory in the GPU (that is, the "device").
hipMalloc((void**)&d_a, sz_in_bytes);
hipMalloc((void**)&d_b, sz_in_bytes);
hipMalloc((void**)&d_c, sz_in_bytes);
// Copy the data from the RAM (host) to the GPU (device).
// Note: hipMemcpy(dst, src, count, kind)
hipMemcpy(d_a, h_a, sz_in_bytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, sz_in_bytes, hipMemcpyHostToDevice);
// Set 64*1*1 thread per blocks.
// x: 64
// y: 1
// z: 1
// Note: we statically initialize *structure**.
dim3 dimBlock(64, 1, 1);
// Set (N + dimBlock.x - 1)/dimBlock.x * 1 * 1 blocs.
// If N=1000: (N + dimBlock.x - 1)/dimBlock.x => 16 blocks
// (1000 + 64 - 1) / 64 = 16
// (1000 + 64 - 1) % 64 = 39
// => There are more threads that elements in the array.
// Note: dimBlock.x = 64.
// Note: we statically initialize *structure**.
dim3 dimGrid((N + dimBlock.x - 1)/dimBlock.x, 1, 1);
// Thus, we have 64*16 = 1024 threads.
// Run the "kernel" (in the GPU).
// dimGrid: number of block in the grid => 16
// dimBlock: number of threads per bloc => 64
hipLaunchKernelGGL(( kernel), dim3(dimGrid) , dim3(dimBlock), 0, 0, d_a, d_b, d_c, N);
// Result is pointed by d_c on device.
// Copy this result on host (result pointed by h_c on host)
hipMemcpy(h_c, d_c, sz_in_bytes, hipMemcpyDeviceToHost);
// freeing on device
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
free(h_a);
free(h_b);
free(h_c);
return 0;
}
| Ex1.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
// Compilation:
// nvcc Ex1.cu -o Ex1.exe
// __global__ => this function executes on the GPU.
// Please note that it also could be: __device__.
// This is this only code that executes on the GPU.
__global__ void kernel(double *a, double *b, double *c, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
c[i] = a[i] + b[i];
}
}
int main(int argc, char **argv)
{
int N = 1000;
int sz_in_bytes = N*sizeof(double);
double *h_a, *h_b, *h_c; // "h" for "host" (allocated in RAM).
double *d_a, *d_b, *d_c; // "d" for "device" (allocated in the GPU).
// Allocate memory in RAM (that is, the "host"):
// 3 arrays that contain N elements. Each element is a "double".
h_a = (double*)malloc(sz_in_bytes);
h_b = (double*)malloc(sz_in_bytes);
h_c = (double*)malloc(sz_in_bytes);
// Initiate values on h_a and h_b
for(int i = 0 ; i < N ; i++) {
h_a[i] = 1./(1.+i);
h_b[i] = (i-1.)/(i+1.);
}
// Allocate memory in the GPU (that is, the "device").
cudaMalloc((void**)&d_a, sz_in_bytes);
cudaMalloc((void**)&d_b, sz_in_bytes);
cudaMalloc((void**)&d_c, sz_in_bytes);
// Copy the data from the RAM (host) to the GPU (device).
// Note: cudaMemcpy(dst, src, count, kind)
cudaMemcpy(d_a, h_a, sz_in_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sz_in_bytes, cudaMemcpyHostToDevice);
// Set 64*1*1 thread per blocks.
// x: 64
// y: 1
// z: 1
// Note: we statically initialize *structure**.
dim3 dimBlock(64, 1, 1);
// Set (N + dimBlock.x - 1)/dimBlock.x * 1 * 1 blocs.
// If N=1000: (N + dimBlock.x - 1)/dimBlock.x => 16 blocks
// (1000 + 64 - 1) / 64 = 16
// (1000 + 64 - 1) % 64 = 39
// => There are more threads that elements in the array.
// Note: dimBlock.x = 64.
// Note: we statically initialize *structure**.
dim3 dimGrid((N + dimBlock.x - 1)/dimBlock.x, 1, 1);
// Thus, we have 64*16 = 1024 threads.
// Run the "kernel" (in the GPU).
// dimGrid: number of block in the grid => 16
// dimBlock: number of threads per bloc => 64
kernel<<<dimGrid , dimBlock>>>(d_a, d_b, d_c, N);
// Result is pointed by d_c on device.
// Copy this result on host (result pointed by h_c on host)
cudaMemcpy(h_c, d_c, sz_in_bytes, cudaMemcpyDeviceToHost);
// freeing on device
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
1cf3f2325759344cc4f0f49f254cabb8adc8bb81.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//kernelPBO.cu (Rob Farber)
#include "fluids.h"
using namespace glm;
class Ray {
public:
vec3 p, d;
__device__ Ray(const vec3 & P, const vec3 & D);
__device__ vec3 solve(const float &t) const;
};
class Camera{
public:
vec3 center, lookat, up, right, cof;
float c, d, e, f;
Camera();
Camera(vec3 eye, vec3 lookat, vec3 up, int width, int height);
__device__ Ray compute_ray(float pixel_x, float pixel_y);
void transform(glm::mat4 t);
};
struct interval
{
float tmin, tmax;
};
class AABB{
public:
vec3 bounds[2], center;
__device__ struct interval intersect(const Ray ray, float t0, float t1) const;
AABB(){
bounds[0] = vec3(0);
bounds[1] = vec3(0);
center = vec3(0);
}
AABB(vec3 min, vec3 max) {
bounds[0] = min;
bounds[1] = max;
center = bounds[0] + (bounds[1] - bounds[0]) / 2.f;
}
};
static void HandleError( hipError_t err,
const char *file,
int line ) {
if (err != hipSuccess) {
printf( "%s in %s at line %d\n", hipGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
__host__ __device__ void printVec3(vec3 v){
printf("<%f, %f, %f>\n", v.x, v.y, v.z);
}
struct bufferPointers{
float *u, *v, *w, *u0, *v0, *w0, *dens, *dens0, *sources, *u_s, *v_s, *w_s;
};
struct cudaDims{
dim3 dimBlockFluid, dimGridFluid;
dim3 dimBlockBounds, dimGridBounds;
dim3 dimBlockDraw, dimGridDraw;
};
struct bufferPointers device_pointers;
struct cudaDims dims;
bool draw_dens_flag = true;
Camera cam;
AABB fluidBounds;
__device__ vec4 marchRay(Ray r, struct bufferPointers p, bool draw_dens_flag, AABB fluidBounds){
vec4 ray_color(0);
float multiplier = 1;
float opacity = 0.1 * RAY_STEP;
struct interval interval = fluidBounds.intersect(r, -10000, 10000);
float t = interval.tmin;
vec3 pos = r.solve(t);
int pixel_x= blockIdx.x * blockDim.x + threadIdx.x;
int pixel_y= blockIdx.y * blockDim.y + threadIdx.y;
bool flag = false && pixel_x == 512 && pixel_y == 512;
if(flag) printVec3(r.p);
if(flag) printVec3(r.d);
while(t < interval.tmax && multiplier > 0){
int i = (int) pos.x;
int j = (int) pos.y;
int k = (int) pos.z;
if(i > 0 && i <= N && j > 0 && j <= N && k > 0 && k <=N){
int index = IX(i, j, k);
if(flag) printVec3(pos);
if(flag) printf("%d %d %d\n", i, j, k);
if(flag) printVec3(vec3(p.dens[index]));
if(draw_dens_flag){
ray_color += multiplier * opacity * vec4(p.dens[index]);
multiplier *= (1 - opacity * p.dens[index]);
}else{
float weight =1;
ray_color.x += opacity * abs(weight * p.u[index]);
ray_color.y += opacity * abs(weight * p.v[index]);
ray_color.z += opacity * abs(weight * p.w[index]);
}
}
t += RAY_STEP;
pos = r.solve(t);
}
return ray_color;
}
__global__ void draw_dens_kernel(struct bufferPointers p, bool draw_dens_flag, AABB fluidBounds, float dt, Camera cam, uchar4* pixels){
//printf("kernel\n");
int pixel_x= blockIdx.x * blockDim.x + threadIdx.x;
int pixel_y= blockIdx.y * blockDim.y + threadIdx.y;
int index = pixel_x+pixel_y*blockDim.x*gridDim.x;
vec4 color = marchRay(cam.compute_ray(pixel_x, pixel_y), p, draw_dens_flag, fluidBounds);
// vec4 color(0);
// if(draw_dens_flag){
// color = vec4(p.dens[index]);
// }else{
// color.x = abs(1000 * p.u[index]);
// color.y = abs(1000 * p.v[index]);
// }
// Each thread writes one pixel location in the texture (textel)
pixels[index].x = (unsigned char)(min(1.0, color.x) * 255.9999f);
pixels[index].y = (unsigned char)(min(1.0, color.y) * 255.9999f);
pixels[index].z = (unsigned char)(min(1.0, color.z) * 255.9999f);
pixels[index].w = (unsigned char)(min(1.0, color.w) * 255.9999f);
}
__global__ void set_bnd_kernel ( int b, float * x )
{
int pixel_x= blockIdx.x * blockDim.x + threadIdx.x;
int pixel_y= blockIdx.y * blockDim.y + threadIdx.y;
int i = pixel_x;
int j = pixel_y;//IX(pixel_x, pixel_y);
//if(i > 0 && i <= N && j > 0 && j <= N){
switch(b){
case 0:
// x[IX(1 ,i)] += x[IX(0,i)];
// x[IX(N,i)] += x[IX(N+1,i)];
// x[IX(i,1 )] += x[IX(i,0)];
// x[IX(i,N)] += x[IX(i,N+1)];
// x[IX(1 ,i, j)] += x[IX(0 ,i, j)];
// x[IX(N,i, j)] += x[IX(N+1,i, j)];
// x[IX(i,1, j)] += x[IX(i,0, j)];
// x[IX(i,N, j)] += x[IX(i,N+1, j)];
// x[IX(i, j, 1)] += x[IX(i, j, 0)];
// x[IX(i, j, N)] += x[IX(i, j, N+1)];
x[IX(0 ,i, j)] = 0;
x[IX(N+1,i, j)] = 0;
x[IX(i,0, j)] = 0;
x[IX(i,N+1, j)] = 0;
x[IX(i, j, 0)] = 0;
x[IX(i, j, N+1)] = 0;
break;
case 1:
if(x[IX(0 ,i, j)] < 0) x[IX(0 ,i, j)] = -x[IX(0,i, j)];
if(x[IX(N+1,i, j)] > 0) x[IX(N+1,i, j)] = -x[IX(N+1,i, j)];
break;
case 2:
if(x[IX(i,0 , j)] < 0) x[IX(i,0, j )] = -x[IX(i,0, j)];
if(x[IX(i,N+1, j)] >0) x[IX(i,N+1, j)] = -x[IX(i,N+1, j)];
break;
case 3:
if(x[IX(i, j, 0)] < 0) x[IX(i, j, 0)] = -x[IX(i,j, 0)];
if(x[IX(i, j, N+1)] >0) x[IX(i, j, N+1)] = -x[IX(i, j, N+1)];
break;
default:
break;
// x[IX(0 ,i)] = b == 1 ? -x[IX(1,i)] : x[IX(1,i)];
// x[IX(N+1,i)] = b == 1 ? -x[IX(N,i)] : x[IX(N,i)];
// x[IX(i,0 )] = b==2 ? -x[IX(i,1)] : x[IX(i,1)];
// x[IX(i,N+1)] = b==2 ? -x[IX(i,N)] : x[IX(i,N)];
}
//}
// if(i < 32){
// x[IX(0 ,0 )] = 0.5*(x[IX(1,0 )]+x[IX(0 ,1)]);
// x[IX(0 ,N+1)] = 0.5*(x[IX(1,N+1)]+x[IX(0 ,N )]);
// x[IX(N+1,0 )] = 0.5*(x[IX(N,0 )]+x[IX(N+1,1)]);
// x[IX(N+1,N+1)] = 0.5*(x[IX(N,N+1)]+x[IX(N+1,N )]);
// }
}
__host__ void set_bnd(int b, float * x){
hipLaunchKernelGGL(( set_bnd_kernel), dim3(dims.dimGridBounds), dim3(dims.dimBlockBounds), 0, 0, b, x);
checkCUDAError("kernel failed!");
hipDeviceSynchronize();
}
__global__ void add_source_kernel(float * x, float * s, float dt )
{
int i= blockIdx.x * blockDim.x + threadIdx.x;
int j= blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.z * blockDim.z + threadIdx.z;
int index = IX(i, j, k);
x[index] += dt * s[index];
}
__host__ void add_source(struct cudaDims dims, float * x, float * s, float dt ){
hipLaunchKernelGGL(( add_source_kernel), dim3(dims.dimGridFluid), dim3(dims.dimBlockFluid), 0, 0, x, s, dt );
checkCUDAError("kernel failed!");
hipDeviceSynchronize();
}
__global__ void diffuse_kernel(float * x, float * x0, float diff, float dt){
int i, j, k;
i = blockIdx.x * blockDim.x + threadIdx.x;
j = blockIdx.y * blockDim.y + threadIdx.y;
k = blockIdx.z * blockDim.z + threadIdx.z;
float a = dt * diff * N * N * N ;
if(i > 0 && i <= N && j > 0 && j <= N && k > 0 && k <=N){
x[IX(i, j, k)] = (x0[IX(i , j, k)] + a * (
x[IX(i - 1, j, k)] +
x[IX(i + 1, j, k)] +
x[IX(i, j - 1, k)] +
x[IX(i, j + 1, k)] +
x[IX(i, j, k - 1)] +
x[IX(i, j, k + 1)]
)) / (1 + 6 * a);
}
}
__host__ void diffuse(struct cudaDims dims, int b, float *x, float*x0, float diff, float dt){
for(int k = 0; k < K; k++){
hipLaunchKernelGGL(( diffuse_kernel), dim3(dims.dimGridFluid), dim3(dims.dimBlockFluid), 0, 0, x, x0, diff, dt);
checkCUDAError("kernel failed!");
hipDeviceSynchronize();
set_bnd(b, x);
}
}
//set bounds after call
__global__ void advect_kernel(float * d, float * d0, float * u, float * v,float * w, float dt )
{
int i, j, k, ia[2], ja[2], ka[2];
i = blockIdx.x * blockDim.x + threadIdx.x;
j = blockIdx.y * blockDim.y + threadIdx.y;
k = blockIdx.z * blockDim.z + threadIdx.z;
float x, y, z, sa[2], ta[2], ua[2], dt0;
dt0 = dt * N;
if(i > 0 && i <= N && j > 0 && j <= N && k > 0 && k <=N){
x = i - dt0 * u[IX(i, j, k)];
y = j - dt0 * v[IX(i, j, k)];
z = k - dt0 * w[IX(i, j, k)];
if (x < 0.5) x = 0.5;
if (x > N + 0.5) x = N + 0.5;
ia[0] = (int) x;
ia[1] = ia[0] + 1;
if (y < 0.5) y = 0.5;
if (y > N + 0.5) y = N + 0.5;
ja[0] = (int) y;
ja[1] = ja[0] + 1;
if (z < 0.5) z = 0.5;
if (z > N + 0.5) z = N + 0.5;
ka[0] = (int) z;
ka[1] = ka[0] + 1;
sa[1] = x - ia[0];
sa[0] = 1 - sa[1];
ta[1] = y - ja[0];
ta[0] = 1 - ta[1];
ua[1] = z - ka[0];
ua[0] = 1 - ua[1];
// d[IX(i,j, k)] = ua[0] * (sa[0] * (ta[0] * d0[IX(ia[0],ja[0], ka[0])] + ta[1] * d0[IX(ia[0], ja[1], ka[0])]) +
// sa[1] * (ta[0] * d0[IX(ia[1], ja[0], ka[0])] + ta[1] * d0[IX(ia[1], ja[1], ka[0])])) +
// ua[1] * (sa[0] * (ta[0] * d0[IX(ia[0],ja[0], ka[1])] + ta[1] * d0[IX(ia[0], ja[1], ka[1])]) +
// sa[1] * (ta[0] * d0[IX(ia[1], ja[0], ka[1])] + ta[1] * d0[IX(ia[1], ja[1], ka[1])]));
d[IX(i,j, k)] = 0;
for(int a = 0; a < 2; a++){
for (int b = 0; b < 2; b++){
for (int c = 0; c < 2; c++){
d[IX(i,j, k)] += sa[a] * ta[b] * ua[c] * d0[IX(ia[a], ja[b], ka[c])];
}
}
}
}
//set_bnd ( N, b, d );
}
__host__ void advect(struct cudaDims dims, int b, float * d, float * d0, float * u, float * v, float * w, float dt ){
hipLaunchKernelGGL(( advect_kernel), dim3(dims.dimGridFluid), dim3(dims.dimBlockFluid), 0, 0, d, d0, u, v, w, dt);
checkCUDAError("kernel failed!");
hipDeviceSynchronize();
set_bnd(b, d);
}
__global__ void project_kernel_1(float * u, float * v, float *w, float * momentum, float * divergence){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.z * blockDim.z + threadIdx.z;
float h = 1.0/N;
if(i > 0 && i <= N && j > 0 && j <= N && k > 0 && k <=N){
divergence[IX(i,j, k)] = -0.5*h*(u[IX(i + 1, j, k)] - u[IX(i - 1, j, k)]+
v[IX(i, j + 1, k)] - v[IX(i, j - 1, k)]+
v[IX(i, j, k + 1)] - v[IX(i, j, k - 1)]);
momentum[IX(i,j, k)] = 0;
}
}
__global__ void project_kernel_2(float * u, float * v, float *w, float * momentum, float * divergence){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.z * blockDim.z + threadIdx.z;
float h = 1.0/N;
if(i > 0 && i <= N && j > 0 && j <= N && k > 0 && k <=N){
momentum[IX(i,j, k)] = (divergence[IX(i, j, k)] +
momentum[IX(i - 1, j, k)] + momentum[IX(i + 1, j, k)]+
momentum[IX(i, j - 1, k)] + momentum[IX(i, j + 1, k)]+
momentum[IX(i, j, k-1)] + momentum[IX(i, j, k + 1)])/6;
}
}
__global__ void project_kernel_3(float * u, float * v, float *w, float * momentum, float * divergence){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.z * blockDim.z + threadIdx.z;
float h = 1.0/N;
if(i > 0 && i <= N && j > 0 && j <= N && k > 0 && k <=N){
u[IX(i,j,k)] -= 0.5*(momentum[IX(i+1, j, k)]-momentum[IX(i-1, j, k)])/h;
v[IX(i,j,k)] -= 0.5*(momentum[IX(i, j+1, k)]-momentum[IX(i, j-1, k)])/h;
w[IX(i,j,k)] -= 0.5*(momentum[IX(i, j, k+1)]-momentum[IX(i, j, k-1)])/h;
}
}
__host__ void project (cudaDims dims, float * u, float * v, float *w, float * momentum, float * divergence)
{
int k;
hipLaunchKernelGGL(( project_kernel_1), dim3(dims.dimGridFluid), dim3(dims.dimBlockFluid), 0, 0, u, v, w, momentum, divergence);
checkCUDAError("kernel failed!");
hipDeviceSynchronize();
set_bnd (0, momentum );
set_bnd (0, divergence );
for ( k=0 ; k<K ; k++ ) {
hipLaunchKernelGGL(( project_kernel_2), dim3(dims.dimGridFluid), dim3(dims.dimBlockFluid), 0, 0, u, v, w, momentum, divergence);
checkCUDAError("kernel failed!");
hipDeviceSynchronize();
set_bnd (0, momentum );
}
hipLaunchKernelGGL(( project_kernel_3), dim3(dims.dimGridFluid), dim3(dims.dimBlockFluid), 0, 0, u, v, w, momentum, divergence);
checkCUDAError("kernel failed!");
hipDeviceSynchronize();
set_bnd (1, u );
set_bnd (2, v );
set_bnd (3, w );
}
void vel_step (struct cudaDims dims, float * u, float * v, float *w, float* u0, float * v0, float * w0, float visc, float dt )
{
// add_source(dims, u, u0, dt );
// add_source(dims, v, v0, dt );
// add_source(dims, w, w0, dt );
SWAP (u0, u );
SWAP (v0, v );
SWAP (w0, w );
diffuse(dims, 1, u, u0, visc, dt);
diffuse(dims, 2, v, v0, visc, dt);
diffuse(dims, 3, w, w0, visc, dt);
project (dims, u, v, w, u0, v0);
SWAP ( u0, u );
SWAP ( v0, v );
SWAP ( w0, w );
advect (dims, 1, u, u0, u0, v0, w0, dt );
advect (dims, 2, v, v0, u0, v0, w0, dt );
advect (dims, 3, w, w0, u0, v0, w0, dt );
project (dims, u, v, w, u0, v0);
}
__host__ void dens_step(struct cudaDims dims, float * x, float * x0, float * u, float * v, float * w, float diff, float dt)
{
//add_source(dims, x, x0, dt);
SWAP(x0, x);
diffuse(dims, 0, x, x0, diff, dt);
SWAP(x0, x);
advect(dims, 0, x, x0, u, v, w, dt);
}
// Wrapper for the __global__ call that sets up the kernel call
extern "C" void launch_kernel(uchar4* pixels, unsigned int image_width, unsigned int image_height, float dt)
{
struct bufferPointers p = device_pointers;
add_source(dims, p.u, p.u_s, dt );
add_source(dims, p.v, p.v_s, dt );
add_source(dims, p.w, p.w_s, dt );
add_source(dims, p.dens, p.sources, dt );
vel_step (dims, p.u, p.v, p.w, p.u0, p.v0, p.w0, VISC, dt );
dens_step(dims, p.dens, p.dens0, p.u, p.v, p.w, DIFF, dt);
hipLaunchKernelGGL(( draw_dens_kernel), dim3(dims.dimGridDraw), dim3(dims.dimBlockDraw), 0, 0, device_pointers, draw_dens_flag, fluidBounds, dt, cam, pixels);
checkCUDAError("kernel failed!");
size_t memsize = SIZE * sizeof(float);
// HANDLE_ERROR(hipMemset((device_pointers.u0), 0, memsize));
// HANDLE_ERROR(hipMemset((device_pointers.v0), 0, memsize));
// HANDLE_ERROR(hipMemset((device_pointers.w0), 0, memsize));
//HANDLE_ERROR(hipMemset((device_pointers.dens0), 0, memsize));
// SWAP(device_pointers.v0, device_pointers.v);
// SWAP(device_pointers.u0, device_pointers.u);
// SWAP(device_pointers.w0, device_pointers.w);
// SWAP(device_pointers.dens0, device_pointers.dens);
}
extern "C" void setup_scene(unsigned int image_width, unsigned int image_height){
printf("setting up scene\n");
fluidBounds = AABB(vec3(1), vec3(N+1));
printf("creating camera\n");
cam = Camera(vec3(N, -N, -N), vec3(N/2, N/2, N/2), vec3(0, 1, 0), image_width, image_height);
printf("computing dimensions\n");
int block_width=32, block_height=32;
dims.dimGridFluid = dim3((N + 2) / block_width, (N + 2) / block_height, N + 2);
dims.dimBlockFluid = dim3(block_width, block_height, 1);
int block_width_draw=32, block_height_draw=16;
dims.dimGridDraw = dim3(image_width / block_width_draw, image_height / block_height_draw);
dims.dimBlockDraw = dim3(block_width_draw, block_height_draw);
dims.dimGridBounds = dim3((N + 2) / block_width, (N + 2) / block_height);
dims.dimBlockBounds = dim3(block_width, block_height);
printf("allocating buffers\n");
size_t memsize = SIZE * sizeof(float);
HANDLE_ERROR(hipMalloc(&(device_pointers.u), memsize));
HANDLE_ERROR(hipMemset((device_pointers.u), 0, memsize));
HANDLE_ERROR(hipMalloc(&(device_pointers.u0), memsize));
HANDLE_ERROR(hipMemset((device_pointers.u0), 0, memsize));
HANDLE_ERROR(hipMalloc(&(device_pointers.v), memsize));
HANDLE_ERROR(hipMemset((device_pointers.v), 0, memsize));
HANDLE_ERROR(hipMalloc(&(device_pointers.v0), memsize));
HANDLE_ERROR(hipMemset((device_pointers.v0), 0, memsize));
HANDLE_ERROR(hipMalloc(&(device_pointers.w), memsize));
HANDLE_ERROR(hipMemset((device_pointers.w), 0, memsize));
HANDLE_ERROR(hipMalloc(&(device_pointers.w0), memsize));
HANDLE_ERROR(hipMemset((device_pointers.w0), 0, memsize));
HANDLE_ERROR(hipMalloc(&(device_pointers.dens), memsize));
HANDLE_ERROR(hipMemset((device_pointers.dens), 0, memsize));
HANDLE_ERROR(hipMalloc(&(device_pointers.dens0), memsize));
HANDLE_ERROR(hipMemset((device_pointers.dens0), 0, memsize));
HANDLE_ERROR(hipMalloc(&(device_pointers.u_s), memsize));
HANDLE_ERROR(hipMemset((device_pointers.u_s), 0, memsize));
HANDLE_ERROR(hipMalloc(&(device_pointers.v_s), memsize));
HANDLE_ERROR(hipMemset((device_pointers.v_s), 0, memsize));
HANDLE_ERROR(hipMalloc(&(device_pointers.w_s), memsize));
HANDLE_ERROR(hipMemset((device_pointers.w_s), 0, memsize));
HANDLE_ERROR(hipMalloc(&(device_pointers.sources), memsize));
HANDLE_ERROR(hipMemset((device_pointers.sources), 0, memsize));
// //float source = 1.0;
// //HANDLE_ERROR(hipMemcpy(&(device_pointers.dens0[IX(N/2, N/2)]), &source, sizeof(float), hipMemcpyHostToDevice));
// //float v[SIZE];
// // for(int i=0; i<SIZE; i++){
// // v[i] = 1000.0;
// // }
// //
// //float source_array[SIZE];
printf("instantiating arrays\n");
float velmag = 0.01;
float v[SIZE], u[SIZE], w[SIZE], dens0[SIZE], sources[SIZE];
int offset = 16;
int min_index = N/2 - offset;
int max_index = N/2 + offset;
for(int i = 1; i <= N; i++){
for(int j = 1; j <= N ; j++){
for(int k = 1; k <= N ; k++){
int index = IX(i, j, k);
u[index] = 0;
v[index] = 0;
w[index] = 0;
if(i > min_index && i < max_index && j > N+2 - offset && k > min_index && k < max_index){
sources[index] =0.1;
}else{
sources[index] =0;
}
if(((i/offset)%2 ==0 ^ (k/offset)%2 ==0) && j > N+2 - offset){
//if(((i/offset)%8 ==0 && (k/offset)%8 ==0) && j == 1){
dens0[index] = 0.0;
//w[index] = velmag;
}else{
dens0[index] = 0.0;
}
// u[index] = velmag;
// v[index] = velmag;
// w[index] = velmag;
float R = 4.0;
float x = i - N/2.0;
float y = j - N/2.0;
float z = k - N/2.0;
float r = length(vec2(x, z));
if( r > 1){
//dens0[index] = 1.0;
// u[index] = velmag * z/r;
// v[index] = velmag * -100/r;
// w[index] = velmag * -x/r;
vec3 vel = vec3(z, -10, -x);
vel /= r;
//vel *= 1/(abs(r-R)+1);
//vec3 vel = (r - R)/abs(r - R) * 1/(abs(r-R)+1) * velmag * vel;
u[index] = vel.x;
v[index] = vel.y;
w[index] = vel.z;
}
// vec3 vel = vec3(z, 1, -x);
// if(length(vel) > 0.1){
// vec3 vel = /*(r - R)/abs(r - R) * 1/(abs(r-R)+1)*/ velmag * vel;
// u[index] = vel.x;
// v[index] = vel.y;
// w[index] = vel.z;
// }else{
// u[index] = 0;
// v[index] = 0;
// w[index] = 0;
// }
//printf("%f, %f\n", vel.x, vel.y);
//dens0[index] = 0.1 /(abs(r-R)+1);
if(abs(r-R) < 0.01){
u[index] = 0;
v[index] = 0;
w[index] = 0;
}
}
}
}
int index = IX(N/2, N/2, N/2);
u[index] = 0;
v[index] = 0;
w[index] = 0;
HANDLE_ERROR(hipMemcpy(device_pointers.dens, dens0, memsize, hipMemcpyHostToDevice));
// HANDLE_ERROR(hipMemcpy(device_pointers.u, u, memsize, hipMemcpyHostToDevice));
// HANDLE_ERROR(hipMemcpy(device_pointers.v, v, memsize, hipMemcpyHostToDevice));
// HANDLE_ERROR(hipMemcpy(device_pointers.w, w, memsize, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(device_pointers.sources, sources, memsize, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(device_pointers.u_s, u, memsize, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(device_pointers.v_s, v, memsize, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(device_pointers.w_s, w, memsize, hipMemcpyHostToDevice));
}
extern "C" void destroy_scene(){
printf("destroying scene\n");
hipFree(device_pointers.u);
hipFree(device_pointers.v);
hipFree(device_pointers.u0);
hipFree(device_pointers.v0);
hipFree(device_pointers.dens);
hipFree(device_pointers.dens0);
hipFree(device_pointers.sources);
}
//! Keyboard events handler for GLUT
void keyboard(unsigned char key, int x, int y)
{
switch(key) {
case(27) :case('q') :
exit(0);
break;
case('w'):
break;
case('a'):
case('s'):
case('d'):
break;
case('v'):
draw_dens_flag = !draw_dens_flag;
break;
}
// indicate the display must be redrawn
glutPostRedisplay();
}
Camera::Camera(vec3 eye, vec3 lookat_in, vec3 up_in, int width, int height){
float near = 1;
cof = eye;
up = normalize(up_in);
lookat = normalize(lookat_in - cof);
right = normalize(cross(lookat, up))/2.f ;
center = cof + (normalize(lookat) * near);
up = normalize(cross(right, lookat))/2.f ;
printVec3(center);
printVec3(right);
printVec3(up);
printVec3(lookat);
if (width > height) {
d = (width - 1) / 2.0;
c = (height * (width - 1.0))/ (2.0 * width);
f = (height - 1.0) / 2.0;
e = (height - 1.0) / 2.0;
} else {
d = (width - 1.0) / 2.0;
c = (width - 1.0) / 2.0;
f = (height - 1.0) / 2.0;
e = (width * (height - 1.0))/ (2.0 * height);
}
}
Camera::Camera(){}
__device__ Ray Camera::compute_ray(float pixel_x, float pixel_y){
float x = (pixel_x - d) / c, y = ((pixel_y - f) / e);
vec3 pixel_point = center + (up * y) + (right * x);
Ray result(cof, normalize((pixel_point - cof)));
return result;
}
void Camera::transform(glm::mat4 t){
center=vec3(t * vec4(center, 1));
lookat=vec3(t*vec4(lookat, 0));
up = vec3(t * vec4(up, 0));
right=vec3( t* vec4(right, 0));
cof=vec3(t*vec4(cof, 1));;
}
__device__ Ray::Ray(const vec3 & P, const vec3 & D): p(P), d(D){}
__device__ vec3 Ray::solve(const float &t) const{
return p+(d*t);
}
__device__ struct interval AABB::intersect(const Ray r, float t0, float t1) const {
float tmin, tmax, tymin, tymax, tzmin, tzmax;
struct interval result = {-1, -1};
if (r.d.x >= 0) {
tmin = (bounds[0].x - r.p.x) / r.d.x;
tmax = (bounds[1].x - r.p.x) / r.d.x;
}
else {
tmin = (bounds[1].x - r.p.x) / r.d.x;
tmax = (bounds[0].x - r.p.x) / r.d.x;
}
if (r.d.y >= 0) {
tymin = (bounds[0].y - r.p.y) / r.d.y;
tymax = (bounds[1].y - r.p.y) / r.d.y;
}
else {
tymin = (bounds[1].y - r.p.y) / r.d.y;
tymax = (bounds[0].y - r.p.y) / r.d.y;
}
if ( (tmin > tymax) || (tymin > tmax) )
return result;
if (tymin > tmin)
tmin = tymin;
if (tymax < tmax)
tmax = tymax;
if (r.d.z >= 0) {
tzmin = (bounds[0].z - r.p.z) / r.d.z;
tzmax = (bounds[1].z - r.p.z) / r.d.z;
}
else {
tzmin = (bounds[1].z - r.p.z) / r.d.z;
tzmax = (bounds[0].z - r.p.z) / r.d.z;
}
if ( (tmin > tzmax) || (tzmin > tmax) )
return result;
if (tzmin > tmin)
tmin = tzmin;
if (tzmax < tmax)
tmax = tzmax;
if((tmin < t1) && (tmax > t0)){
result.tmin = tmin;
result.tmax = tmax;
return result;
}else{
return result;
}
} | 1cf3f2325759344cc4f0f49f254cabb8adc8bb81.cu | //kernelPBO.cu (Rob Farber)
#include "fluids.h"
using namespace glm;
class Ray {
public:
vec3 p, d;
__device__ Ray(const vec3 & P, const vec3 & D);
__device__ vec3 solve(const float &t) const;
};
class Camera{
public:
vec3 center, lookat, up, right, cof;
float c, d, e, f;
Camera();
Camera(vec3 eye, vec3 lookat, vec3 up, int width, int height);
__device__ Ray compute_ray(float pixel_x, float pixel_y);
void transform(glm::mat4 t);
};
struct interval
{
float tmin, tmax;
};
class AABB{
public:
vec3 bounds[2], center;
__device__ struct interval intersect(const Ray ray, float t0, float t1) const;
AABB(){
bounds[0] = vec3(0);
bounds[1] = vec3(0);
center = vec3(0);
}
AABB(vec3 min, vec3 max) {
bounds[0] = min;
bounds[1] = max;
center = bounds[0] + (bounds[1] - bounds[0]) / 2.f;
}
};
static void HandleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
__host__ __device__ void printVec3(vec3 v){
printf("<%f, %f, %f>\n", v.x, v.y, v.z);
}
struct bufferPointers{
float *u, *v, *w, *u0, *v0, *w0, *dens, *dens0, *sources, *u_s, *v_s, *w_s;
};
struct cudaDims{
dim3 dimBlockFluid, dimGridFluid;
dim3 dimBlockBounds, dimGridBounds;
dim3 dimBlockDraw, dimGridDraw;
};
struct bufferPointers device_pointers;
struct cudaDims dims;
bool draw_dens_flag = true;
Camera cam;
AABB fluidBounds;
__device__ vec4 marchRay(Ray r, struct bufferPointers p, bool draw_dens_flag, AABB fluidBounds){
vec4 ray_color(0);
float multiplier = 1;
float opacity = 0.1 * RAY_STEP;
struct interval interval = fluidBounds.intersect(r, -10000, 10000);
float t = interval.tmin;
vec3 pos = r.solve(t);
int pixel_x= blockIdx.x * blockDim.x + threadIdx.x;
int pixel_y= blockIdx.y * blockDim.y + threadIdx.y;
bool flag = false && pixel_x == 512 && pixel_y == 512;
if(flag) printVec3(r.p);
if(flag) printVec3(r.d);
while(t < interval.tmax && multiplier > 0){
int i = (int) pos.x;
int j = (int) pos.y;
int k = (int) pos.z;
if(i > 0 && i <= N && j > 0 && j <= N && k > 0 && k <=N){
int index = IX(i, j, k);
if(flag) printVec3(pos);
if(flag) printf("%d %d %d\n", i, j, k);
if(flag) printVec3(vec3(p.dens[index]));
if(draw_dens_flag){
ray_color += multiplier * opacity * vec4(p.dens[index]);
multiplier *= (1 - opacity * p.dens[index]);
}else{
float weight =1;
ray_color.x += opacity * abs(weight * p.u[index]);
ray_color.y += opacity * abs(weight * p.v[index]);
ray_color.z += opacity * abs(weight * p.w[index]);
}
}
t += RAY_STEP;
pos = r.solve(t);
}
return ray_color;
}
__global__ void draw_dens_kernel(struct bufferPointers p, bool draw_dens_flag, AABB fluidBounds, float dt, Camera cam, uchar4* pixels){
//printf("kernel\n");
int pixel_x= blockIdx.x * blockDim.x + threadIdx.x;
int pixel_y= blockIdx.y * blockDim.y + threadIdx.y;
int index = pixel_x+pixel_y*blockDim.x*gridDim.x;
vec4 color = marchRay(cam.compute_ray(pixel_x, pixel_y), p, draw_dens_flag, fluidBounds);
// vec4 color(0);
// if(draw_dens_flag){
// color = vec4(p.dens[index]);
// }else{
// color.x = abs(1000 * p.u[index]);
// color.y = abs(1000 * p.v[index]);
// }
// Each thread writes one pixel location in the texture (textel)
pixels[index].x = (unsigned char)(min(1.0, color.x) * 255.9999f);
pixels[index].y = (unsigned char)(min(1.0, color.y) * 255.9999f);
pixels[index].z = (unsigned char)(min(1.0, color.z) * 255.9999f);
pixels[index].w = (unsigned char)(min(1.0, color.w) * 255.9999f);
}
__global__ void set_bnd_kernel ( int b, float * x )
{
int pixel_x= blockIdx.x * blockDim.x + threadIdx.x;
int pixel_y= blockIdx.y * blockDim.y + threadIdx.y;
int i = pixel_x;
int j = pixel_y;//IX(pixel_x, pixel_y);
//if(i > 0 && i <= N && j > 0 && j <= N){
switch(b){
case 0:
// x[IX(1 ,i)] += x[IX(0,i)];
// x[IX(N,i)] += x[IX(N+1,i)];
// x[IX(i,1 )] += x[IX(i,0)];
// x[IX(i,N)] += x[IX(i,N+1)];
// x[IX(1 ,i, j)] += x[IX(0 ,i, j)];
// x[IX(N,i, j)] += x[IX(N+1,i, j)];
// x[IX(i,1, j)] += x[IX(i,0, j)];
// x[IX(i,N, j)] += x[IX(i,N+1, j)];
// x[IX(i, j, 1)] += x[IX(i, j, 0)];
// x[IX(i, j, N)] += x[IX(i, j, N+1)];
x[IX(0 ,i, j)] = 0;
x[IX(N+1,i, j)] = 0;
x[IX(i,0, j)] = 0;
x[IX(i,N+1, j)] = 0;
x[IX(i, j, 0)] = 0;
x[IX(i, j, N+1)] = 0;
break;
case 1:
if(x[IX(0 ,i, j)] < 0) x[IX(0 ,i, j)] = -x[IX(0,i, j)];
if(x[IX(N+1,i, j)] > 0) x[IX(N+1,i, j)] = -x[IX(N+1,i, j)];
break;
case 2:
if(x[IX(i,0 , j)] < 0) x[IX(i,0, j )] = -x[IX(i,0, j)];
if(x[IX(i,N+1, j)] >0) x[IX(i,N+1, j)] = -x[IX(i,N+1, j)];
break;
case 3:
if(x[IX(i, j, 0)] < 0) x[IX(i, j, 0)] = -x[IX(i,j, 0)];
if(x[IX(i, j, N+1)] >0) x[IX(i, j, N+1)] = -x[IX(i, j, N+1)];
break;
default:
break;
// x[IX(0 ,i)] = b == 1 ? -x[IX(1,i)] : x[IX(1,i)];
// x[IX(N+1,i)] = b == 1 ? -x[IX(N,i)] : x[IX(N,i)];
// x[IX(i,0 )] = b==2 ? -x[IX(i,1)] : x[IX(i,1)];
// x[IX(i,N+1)] = b==2 ? -x[IX(i,N)] : x[IX(i,N)];
}
//}
// if(i < 32){
// x[IX(0 ,0 )] = 0.5*(x[IX(1,0 )]+x[IX(0 ,1)]);
// x[IX(0 ,N+1)] = 0.5*(x[IX(1,N+1)]+x[IX(0 ,N )]);
// x[IX(N+1,0 )] = 0.5*(x[IX(N,0 )]+x[IX(N+1,1)]);
// x[IX(N+1,N+1)] = 0.5*(x[IX(N,N+1)]+x[IX(N+1,N )]);
// }
}
__host__ void set_bnd(int b, float * x){
set_bnd_kernel<<<dims.dimGridBounds, dims.dimBlockBounds>>>(b, x);
checkCUDAError("kernel failed!");
cudaThreadSynchronize();
}
__global__ void add_source_kernel(float * x, float * s, float dt )
{
int i= blockIdx.x * blockDim.x + threadIdx.x;
int j= blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.z * blockDim.z + threadIdx.z;
int index = IX(i, j, k);
x[index] += dt * s[index];
}
__host__ void add_source(struct cudaDims dims, float * x, float * s, float dt ){
add_source_kernel<<<dims.dimGridFluid, dims.dimBlockFluid>>>(x, s, dt );
checkCUDAError("kernel failed!");
cudaThreadSynchronize();
}
__global__ void diffuse_kernel(float * x, float * x0, float diff, float dt){
int i, j, k;
i = blockIdx.x * blockDim.x + threadIdx.x;
j = blockIdx.y * blockDim.y + threadIdx.y;
k = blockIdx.z * blockDim.z + threadIdx.z;
float a = dt * diff * N * N * N ;
if(i > 0 && i <= N && j > 0 && j <= N && k > 0 && k <=N){
x[IX(i, j, k)] = (x0[IX(i , j, k)] + a * (
x[IX(i - 1, j, k)] +
x[IX(i + 1, j, k)] +
x[IX(i, j - 1, k)] +
x[IX(i, j + 1, k)] +
x[IX(i, j, k - 1)] +
x[IX(i, j, k + 1)]
)) / (1 + 6 * a);
}
}
__host__ void diffuse(struct cudaDims dims, int b, float *x, float*x0, float diff, float dt){
for(int k = 0; k < K; k++){
diffuse_kernel<<<dims.dimGridFluid, dims.dimBlockFluid>>>(x, x0, diff, dt);
checkCUDAError("kernel failed!");
cudaThreadSynchronize();
set_bnd(b, x);
}
}
//set bounds after call
__global__ void advect_kernel(float * d, float * d0, float * u, float * v,float * w, float dt )
{
int i, j, k, ia[2], ja[2], ka[2];
i = blockIdx.x * blockDim.x + threadIdx.x;
j = blockIdx.y * blockDim.y + threadIdx.y;
k = blockIdx.z * blockDim.z + threadIdx.z;
float x, y, z, sa[2], ta[2], ua[2], dt0;
dt0 = dt * N;
if(i > 0 && i <= N && j > 0 && j <= N && k > 0 && k <=N){
x = i - dt0 * u[IX(i, j, k)];
y = j - dt0 * v[IX(i, j, k)];
z = k - dt0 * w[IX(i, j, k)];
if (x < 0.5) x = 0.5;
if (x > N + 0.5) x = N + 0.5;
ia[0] = (int) x;
ia[1] = ia[0] + 1;
if (y < 0.5) y = 0.5;
if (y > N + 0.5) y = N + 0.5;
ja[0] = (int) y;
ja[1] = ja[0] + 1;
if (z < 0.5) z = 0.5;
if (z > N + 0.5) z = N + 0.5;
ka[0] = (int) z;
ka[1] = ka[0] + 1;
sa[1] = x - ia[0];
sa[0] = 1 - sa[1];
ta[1] = y - ja[0];
ta[0] = 1 - ta[1];
ua[1] = z - ka[0];
ua[0] = 1 - ua[1];
// d[IX(i,j, k)] = ua[0] * (sa[0] * (ta[0] * d0[IX(ia[0],ja[0], ka[0])] + ta[1] * d0[IX(ia[0], ja[1], ka[0])]) +
// sa[1] * (ta[0] * d0[IX(ia[1], ja[0], ka[0])] + ta[1] * d0[IX(ia[1], ja[1], ka[0])])) +
// ua[1] * (sa[0] * (ta[0] * d0[IX(ia[0],ja[0], ka[1])] + ta[1] * d0[IX(ia[0], ja[1], ka[1])]) +
// sa[1] * (ta[0] * d0[IX(ia[1], ja[0], ka[1])] + ta[1] * d0[IX(ia[1], ja[1], ka[1])]));
d[IX(i,j, k)] = 0;
for(int a = 0; a < 2; a++){
for (int b = 0; b < 2; b++){
for (int c = 0; c < 2; c++){
d[IX(i,j, k)] += sa[a] * ta[b] * ua[c] * d0[IX(ia[a], ja[b], ka[c])];
}
}
}
}
//set_bnd ( N, b, d );
}
__host__ void advect(struct cudaDims dims, int b, float * d, float * d0, float * u, float * v, float * w, float dt ){
advect_kernel<<<dims.dimGridFluid, dims.dimBlockFluid>>>(d, d0, u, v, w, dt);
checkCUDAError("kernel failed!");
cudaThreadSynchronize();
set_bnd(b, d);
}
__global__ void project_kernel_1(float * u, float * v, float *w, float * momentum, float * divergence){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.z * blockDim.z + threadIdx.z;
float h = 1.0/N;
if(i > 0 && i <= N && j > 0 && j <= N && k > 0 && k <=N){
divergence[IX(i,j, k)] = -0.5*h*(u[IX(i + 1, j, k)] - u[IX(i - 1, j, k)]+
v[IX(i, j + 1, k)] - v[IX(i, j - 1, k)]+
v[IX(i, j, k + 1)] - v[IX(i, j, k - 1)]);
momentum[IX(i,j, k)] = 0;
}
}
__global__ void project_kernel_2(float * u, float * v, float *w, float * momentum, float * divergence){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.z * blockDim.z + threadIdx.z;
float h = 1.0/N;
if(i > 0 && i <= N && j > 0 && j <= N && k > 0 && k <=N){
momentum[IX(i,j, k)] = (divergence[IX(i, j, k)] +
momentum[IX(i - 1, j, k)] + momentum[IX(i + 1, j, k)]+
momentum[IX(i, j - 1, k)] + momentum[IX(i, j + 1, k)]+
momentum[IX(i, j, k-1)] + momentum[IX(i, j, k + 1)])/6;
}
}
__global__ void project_kernel_3(float * u, float * v, float *w, float * momentum, float * divergence){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.z * blockDim.z + threadIdx.z;
float h = 1.0/N;
if(i > 0 && i <= N && j > 0 && j <= N && k > 0 && k <=N){
u[IX(i,j,k)] -= 0.5*(momentum[IX(i+1, j, k)]-momentum[IX(i-1, j, k)])/h;
v[IX(i,j,k)] -= 0.5*(momentum[IX(i, j+1, k)]-momentum[IX(i, j-1, k)])/h;
w[IX(i,j,k)] -= 0.5*(momentum[IX(i, j, k+1)]-momentum[IX(i, j, k-1)])/h;
}
}
__host__ void project (cudaDims dims, float * u, float * v, float *w, float * momentum, float * divergence)
{
int k;
project_kernel_1<<<dims.dimGridFluid, dims.dimBlockFluid>>>(u, v, w, momentum, divergence);
checkCUDAError("kernel failed!");
cudaThreadSynchronize();
set_bnd (0, momentum );
set_bnd (0, divergence );
for ( k=0 ; k<K ; k++ ) {
project_kernel_2<<<dims.dimGridFluid, dims.dimBlockFluid>>>(u, v, w, momentum, divergence);
checkCUDAError("kernel failed!");
cudaThreadSynchronize();
set_bnd (0, momentum );
}
project_kernel_3<<<dims.dimGridFluid, dims.dimBlockFluid>>>(u, v, w, momentum, divergence);
checkCUDAError("kernel failed!");
cudaThreadSynchronize();
set_bnd (1, u );
set_bnd (2, v );
set_bnd (3, w );
}
void vel_step (struct cudaDims dims, float * u, float * v, float *w, float* u0, float * v0, float * w0, float visc, float dt )
{
// add_source(dims, u, u0, dt );
// add_source(dims, v, v0, dt );
// add_source(dims, w, w0, dt );
SWAP (u0, u );
SWAP (v0, v );
SWAP (w0, w );
diffuse(dims, 1, u, u0, visc, dt);
diffuse(dims, 2, v, v0, visc, dt);
diffuse(dims, 3, w, w0, visc, dt);
project (dims, u, v, w, u0, v0);
SWAP ( u0, u );
SWAP ( v0, v );
SWAP ( w0, w );
advect (dims, 1, u, u0, u0, v0, w0, dt );
advect (dims, 2, v, v0, u0, v0, w0, dt );
advect (dims, 3, w, w0, u0, v0, w0, dt );
project (dims, u, v, w, u0, v0);
}
__host__ void dens_step(struct cudaDims dims, float * x, float * x0, float * u, float * v, float * w, float diff, float dt)
{
//add_source(dims, x, x0, dt);
SWAP(x0, x);
diffuse(dims, 0, x, x0, diff, dt);
SWAP(x0, x);
advect(dims, 0, x, x0, u, v, w, dt);
}
// Wrapper for the __global__ call that sets up the kernel call
extern "C" void launch_kernel(uchar4* pixels, unsigned int image_width, unsigned int image_height, float dt)
{
struct bufferPointers p = device_pointers;
add_source(dims, p.u, p.u_s, dt );
add_source(dims, p.v, p.v_s, dt );
add_source(dims, p.w, p.w_s, dt );
add_source(dims, p.dens, p.sources, dt );
vel_step (dims, p.u, p.v, p.w, p.u0, p.v0, p.w0, VISC, dt );
dens_step(dims, p.dens, p.dens0, p.u, p.v, p.w, DIFF, dt);
draw_dens_kernel<<<dims.dimGridDraw, dims.dimBlockDraw>>>(device_pointers, draw_dens_flag, fluidBounds, dt, cam, pixels);
checkCUDAError("kernel failed!");
size_t memsize = SIZE * sizeof(float);
// HANDLE_ERROR(cudaMemset((device_pointers.u0), 0, memsize));
// HANDLE_ERROR(cudaMemset((device_pointers.v0), 0, memsize));
// HANDLE_ERROR(cudaMemset((device_pointers.w0), 0, memsize));
//HANDLE_ERROR(cudaMemset((device_pointers.dens0), 0, memsize));
// SWAP(device_pointers.v0, device_pointers.v);
// SWAP(device_pointers.u0, device_pointers.u);
// SWAP(device_pointers.w0, device_pointers.w);
// SWAP(device_pointers.dens0, device_pointers.dens);
}
extern "C" void setup_scene(unsigned int image_width, unsigned int image_height){
printf("setting up scene\n");
fluidBounds = AABB(vec3(1), vec3(N+1));
printf("creating camera\n");
cam = Camera(vec3(N, -N, -N), vec3(N/2, N/2, N/2), vec3(0, 1, 0), image_width, image_height);
printf("computing dimensions\n");
int block_width=32, block_height=32;
dims.dimGridFluid = dim3((N + 2) / block_width, (N + 2) / block_height, N + 2);
dims.dimBlockFluid = dim3(block_width, block_height, 1);
int block_width_draw=32, block_height_draw=16;
dims.dimGridDraw = dim3(image_width / block_width_draw, image_height / block_height_draw);
dims.dimBlockDraw = dim3(block_width_draw, block_height_draw);
dims.dimGridBounds = dim3((N + 2) / block_width, (N + 2) / block_height);
dims.dimBlockBounds = dim3(block_width, block_height);
printf("allocating buffers\n");
size_t memsize = SIZE * sizeof(float);
HANDLE_ERROR(cudaMalloc(&(device_pointers.u), memsize));
HANDLE_ERROR(cudaMemset((device_pointers.u), 0, memsize));
HANDLE_ERROR(cudaMalloc(&(device_pointers.u0), memsize));
HANDLE_ERROR(cudaMemset((device_pointers.u0), 0, memsize));
HANDLE_ERROR(cudaMalloc(&(device_pointers.v), memsize));
HANDLE_ERROR(cudaMemset((device_pointers.v), 0, memsize));
HANDLE_ERROR(cudaMalloc(&(device_pointers.v0), memsize));
HANDLE_ERROR(cudaMemset((device_pointers.v0), 0, memsize));
HANDLE_ERROR(cudaMalloc(&(device_pointers.w), memsize));
HANDLE_ERROR(cudaMemset((device_pointers.w), 0, memsize));
HANDLE_ERROR(cudaMalloc(&(device_pointers.w0), memsize));
HANDLE_ERROR(cudaMemset((device_pointers.w0), 0, memsize));
HANDLE_ERROR(cudaMalloc(&(device_pointers.dens), memsize));
HANDLE_ERROR(cudaMemset((device_pointers.dens), 0, memsize));
HANDLE_ERROR(cudaMalloc(&(device_pointers.dens0), memsize));
HANDLE_ERROR(cudaMemset((device_pointers.dens0), 0, memsize));
HANDLE_ERROR(cudaMalloc(&(device_pointers.u_s), memsize));
HANDLE_ERROR(cudaMemset((device_pointers.u_s), 0, memsize));
HANDLE_ERROR(cudaMalloc(&(device_pointers.v_s), memsize));
HANDLE_ERROR(cudaMemset((device_pointers.v_s), 0, memsize));
HANDLE_ERROR(cudaMalloc(&(device_pointers.w_s), memsize));
HANDLE_ERROR(cudaMemset((device_pointers.w_s), 0, memsize));
HANDLE_ERROR(cudaMalloc(&(device_pointers.sources), memsize));
HANDLE_ERROR(cudaMemset((device_pointers.sources), 0, memsize));
// //float source = 1.0;
// //HANDLE_ERROR(cudaMemcpy(&(device_pointers.dens0[IX(N/2, N/2)]), &source, sizeof(float), cudaMemcpyHostToDevice));
// //float v[SIZE];
// // for(int i=0; i<SIZE; i++){
// // v[i] = 1000.0;
// // }
// //
// //float source_array[SIZE];
printf("instantiating arrays\n");
float velmag = 0.01;
float v[SIZE], u[SIZE], w[SIZE], dens0[SIZE], sources[SIZE];
int offset = 16;
int min_index = N/2 - offset;
int max_index = N/2 + offset;
for(int i = 1; i <= N; i++){
for(int j = 1; j <= N ; j++){
for(int k = 1; k <= N ; k++){
int index = IX(i, j, k);
u[index] = 0;
v[index] = 0;
w[index] = 0;
if(i > min_index && i < max_index && j > N+2 - offset && k > min_index && k < max_index){
sources[index] =0.1;
}else{
sources[index] =0;
}
if(((i/offset)%2 ==0 ^ (k/offset)%2 ==0) && j > N+2 - offset){
//if(((i/offset)%8 ==0 && (k/offset)%8 ==0) && j == 1){
dens0[index] = 0.0;
//w[index] = velmag;
}else{
dens0[index] = 0.0;
}
// u[index] = velmag;
// v[index] = velmag;
// w[index] = velmag;
float R = 4.0;
float x = i - N/2.0;
float y = j - N/2.0;
float z = k - N/2.0;
float r = length(vec2(x, z));
if( r > 1){
//dens0[index] = 1.0;
// u[index] = velmag * z/r;
// v[index] = velmag * -100/r;
// w[index] = velmag * -x/r;
vec3 vel = vec3(z, -10, -x);
vel /= r;
//vel *= 1/(abs(r-R)+1);
//vec3 vel = (r - R)/abs(r - R) * 1/(abs(r-R)+1) * velmag * vel;
u[index] = vel.x;
v[index] = vel.y;
w[index] = vel.z;
}
// vec3 vel = vec3(z, 1, -x);
// if(length(vel) > 0.1){
// vec3 vel = /*(r - R)/abs(r - R) * 1/(abs(r-R)+1)*/ velmag * vel;
// u[index] = vel.x;
// v[index] = vel.y;
// w[index] = vel.z;
// }else{
// u[index] = 0;
// v[index] = 0;
// w[index] = 0;
// }
//printf("%f, %f\n", vel.x, vel.y);
//dens0[index] = 0.1 /(abs(r-R)+1);
if(abs(r-R) < 0.01){
u[index] = 0;
v[index] = 0;
w[index] = 0;
}
}
}
}
int index = IX(N/2, N/2, N/2);
u[index] = 0;
v[index] = 0;
w[index] = 0;
HANDLE_ERROR(cudaMemcpy(device_pointers.dens, dens0, memsize, cudaMemcpyHostToDevice));
// HANDLE_ERROR(cudaMemcpy(device_pointers.u, u, memsize, cudaMemcpyHostToDevice));
// HANDLE_ERROR(cudaMemcpy(device_pointers.v, v, memsize, cudaMemcpyHostToDevice));
// HANDLE_ERROR(cudaMemcpy(device_pointers.w, w, memsize, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(device_pointers.sources, sources, memsize, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(device_pointers.u_s, u, memsize, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(device_pointers.v_s, v, memsize, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(device_pointers.w_s, w, memsize, cudaMemcpyHostToDevice));
}
extern "C" void destroy_scene(){
printf("destroying scene\n");
cudaFree(device_pointers.u);
cudaFree(device_pointers.v);
cudaFree(device_pointers.u0);
cudaFree(device_pointers.v0);
cudaFree(device_pointers.dens);
cudaFree(device_pointers.dens0);
cudaFree(device_pointers.sources);
}
//! Keyboard events handler for GLUT
void keyboard(unsigned char key, int x, int y)
{
switch(key) {
case(27) :case('q') :
exit(0);
break;
case('w'):
break;
case('a'):
case('s'):
case('d'):
break;
case('v'):
draw_dens_flag = !draw_dens_flag;
break;
}
// indicate the display must be redrawn
glutPostRedisplay();
}
Camera::Camera(vec3 eye, vec3 lookat_in, vec3 up_in, int width, int height){
float near = 1;
cof = eye;
up = normalize(up_in);
lookat = normalize(lookat_in - cof);
right = normalize(cross(lookat, up))/2.f ;
center = cof + (normalize(lookat) * near);
up = normalize(cross(right, lookat))/2.f ;
printVec3(center);
printVec3(right);
printVec3(up);
printVec3(lookat);
if (width > height) {
d = (width - 1) / 2.0;
c = (height * (width - 1.0))/ (2.0 * width);
f = (height - 1.0) / 2.0;
e = (height - 1.0) / 2.0;
} else {
d = (width - 1.0) / 2.0;
c = (width - 1.0) / 2.0;
f = (height - 1.0) / 2.0;
e = (width * (height - 1.0))/ (2.0 * height);
}
}
Camera::Camera(){}
__device__ Ray Camera::compute_ray(float pixel_x, float pixel_y){
float x = (pixel_x - d) / c, y = ((pixel_y - f) / e);
vec3 pixel_point = center + (up * y) + (right * x);
Ray result(cof, normalize((pixel_point - cof)));
return result;
}
void Camera::transform(glm::mat4 t){
center=vec3(t * vec4(center, 1));
lookat=vec3(t*vec4(lookat, 0));
up = vec3(t * vec4(up, 0));
right=vec3( t* vec4(right, 0));
cof=vec3(t*vec4(cof, 1));;
}
__device__ Ray::Ray(const vec3 & P, const vec3 & D): p(P), d(D){}
__device__ vec3 Ray::solve(const float &t) const{
return p+(d*t);
}
__device__ struct interval AABB::intersect(const Ray r, float t0, float t1) const {
float tmin, tmax, tymin, tymax, tzmin, tzmax;
struct interval result = {-1, -1};
if (r.d.x >= 0) {
tmin = (bounds[0].x - r.p.x) / r.d.x;
tmax = (bounds[1].x - r.p.x) / r.d.x;
}
else {
tmin = (bounds[1].x - r.p.x) / r.d.x;
tmax = (bounds[0].x - r.p.x) / r.d.x;
}
if (r.d.y >= 0) {
tymin = (bounds[0].y - r.p.y) / r.d.y;
tymax = (bounds[1].y - r.p.y) / r.d.y;
}
else {
tymin = (bounds[1].y - r.p.y) / r.d.y;
tymax = (bounds[0].y - r.p.y) / r.d.y;
}
if ( (tmin > tymax) || (tymin > tmax) )
return result;
if (tymin > tmin)
tmin = tymin;
if (tymax < tmax)
tmax = tymax;
if (r.d.z >= 0) {
tzmin = (bounds[0].z - r.p.z) / r.d.z;
tzmax = (bounds[1].z - r.p.z) / r.d.z;
}
else {
tzmin = (bounds[1].z - r.p.z) / r.d.z;
tzmax = (bounds[0].z - r.p.z) / r.d.z;
}
if ( (tmin > tzmax) || (tzmin > tmax) )
return result;
if (tzmin > tmin)
tmin = tzmin;
if (tzmax < tmax)
tmax = tzmax;
if((tmin < t1) && (tmax > t0)){
result.tmin = tmin;
result.tmax = tmax;
return result;
}else{
return result;
}
} |
2c84814331b5d94bc8c88c14230595fc36c41745.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#define M 8
#define N 12
extern __shared__ int shared[];
__global__ void func(int*data, int m,int n){
int * t_s = shared;
// __shared__ int t_s[12];
if(threadIdx.x==0){
memcpy(t_s + blockIdx.x*n,data + blockIdx.x*n,sizeof(int)*n);
}
__syncthreads();
data[blockIdx.x*n + threadIdx.x] =
t_s[blockIdx.x*n + threadIdx.x+1];
}
int main(){
int h_d[M*N];
int *d_d;
for(int i=0;i<M*N;i++)
h_d[i]=i+1;
for(int i=0;i<M;i++){
for(int j=0;j<N;j++)
printf("%2d ",h_d[i*N+j]);
printf("\n");
}
printf("\n");
hipMalloc((void**)&d_d,sizeof(int)*M*N);
hipMemcpy(d_d,h_d,sizeof(int)*M*N,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( func), dim3(M),dim3(N-1),sizeof(int)*M*N, 0, d_d,M,N);
hipDeviceSynchronize();
memset(h_d,0,sizeof(int)*M*N);
hipMemcpy(h_d,d_d,sizeof(int)*M*N,hipMemcpyDeviceToHost);
for(int i=0;i<M;i++){
for(int j=0;j<N;j++)
printf("%2d ",h_d[i*N+j]);
printf("\n");
}
printf("\n");
return 0;
}
| 2c84814331b5d94bc8c88c14230595fc36c41745.cu | #include <stdio.h>
#include "cuda_runtime.h"
#define M 8
#define N 12
extern __shared__ int shared[];
__global__ void func(int*data, int m,int n){
int * t_s = shared;
// __shared__ int t_s[12];
if(threadIdx.x==0){
memcpy(t_s + blockIdx.x*n,data + blockIdx.x*n,sizeof(int)*n);
}
__syncthreads();
data[blockIdx.x*n + threadIdx.x] =
t_s[blockIdx.x*n + threadIdx.x+1];
}
int main(){
int h_d[M*N];
int *d_d;
for(int i=0;i<M*N;i++)
h_d[i]=i+1;
for(int i=0;i<M;i++){
for(int j=0;j<N;j++)
printf("%2d ",h_d[i*N+j]);
printf("\n");
}
printf("\n");
cudaMalloc((void**)&d_d,sizeof(int)*M*N);
cudaMemcpy(d_d,h_d,sizeof(int)*M*N,cudaMemcpyHostToDevice);
func<<<M,N-1,sizeof(int)*M*N>>>(d_d,M,N);
cudaThreadSynchronize();
memset(h_d,0,sizeof(int)*M*N);
cudaMemcpy(h_d,d_d,sizeof(int)*M*N,cudaMemcpyDeviceToHost);
for(int i=0;i<M;i++){
for(int j=0;j<N;j++)
printf("%2d ",h_d[i*N+j]);
printf("\n");
}
printf("\n");
return 0;
}
|
9192f1d47e3820c074f169047b2f55c457b6aaea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void compute_primitive_vars_kernel (double *vx, double *vy, double *vz, double *u, int nelt, int nxyz,int ntot,int irpu, int irpv, int irpw, int iret, int irg, int toteq,int if3d,double *scr, double* energy, double *vtrans, int irho, double *phig, int lx1, int ly1, int lz1, int *lglel, double *xm1, double *ym1, double *zm1, double *t,int ldimt, int npscal, double *pr, double p0th, double *sii, double *siii, double *vdiff, int ifield,char *cb, int icv, int icp, double *csound, int imu,int ilam, double cpgref, double cvgref, double gmaref, double rgasref, int ltot,int lxy){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<nelt*nxyz){
int e = id/nxyz;
int i = id%nxyz;
int e_offset = toteq*nxyz;
double c = u[e*e_offset+(irg-1)*nxyz+i];
vx[id] = u[e*e_offset+(irpu-1)*nxyz+i]/c;//invcol3
vy[id] = u[e*e_offset+(irpv-1)*nxyz+i]/c;
vz[id] = u[e*e_offset+(irpw-1)*nxyz+i]/c;
if(if3d){
//Compute a Cartesian vector dot product. 3-d version vdot3
scr[id] = u[e*e_offset+(irpu-1)*nxyz+i]*u[e*e_offset+(irpu-1)*nxyz+i]+u[e*e_offset+(irpv-1)*nxyz+i]*u[e*e_offset+(irpv-1)*nxyz+i]+u[e*e_offset+(irpw-1)*nxyz+i]*u[e*e_offset+(irpw-1)*nxyz+i];
}
else{
// compute vector dot product 2d version vdot2
scr[id] = u[e*e_offset+(irpu-1)*nxyz+i]*u[e*e_offset+(irpu-1)*nxyz+i]+u[e*e_offset+(irpv-1)*nxyz+i]*u[e*e_offset+(irpv-1)*nxyz+i];
}
scr[id] = scr[id]/c; //invcol2
scr[id] = scr[id] * 0.5; //cmult
energy[id] = u[e*e_offset+(iret-1)*nxyz+i] -scr[id];// sub3
energy[id] = energy[id]/c;// invcol2
vtrans[(irho-1)*ltot+id ] = c / phig[id]; //invcol3
// subroutine tdstate
int eg= lglel[e]; // this never uses. Check with Dr.Tania
int k = (id / (lx1*ly1))%lz1;
int j = (id/lx1)%ly1;
int newi = id % lx1;
double x = xm1[e*nxyz+k*lxy+j*lx1+newi];
double y = ym1[e*nxyz+k*lxy+j*lx1+newi];
double z = zm1[e*nxyz+k*lxy+j*lx1+newi];
double r = x*x+y*y;
double theta=0.0;
if (r>0.0){ r = sqrtf(r);}
if ( x != 0.0 || y!= 0.0){theta = atan2(y,x); }
double ux= vx[e*nxyz+k*lxy+j*lx1+newi];
double uy= vy[e*nxyz+k*lxy+j*lx1+newi];
double uz= vz[e*nxyz+k*lxy+j*lx1+newi];
double temp = t [ e*nxyz+k*lxy+j*lx1+newi ];
int ips;
double ps[10]; // ps is size of ldimt which is 3. Not sure npscal is also 3. Need to check with Dr.Tania
for (ips=0;ips<npscal;ips++){
ps[ips]=t[(ips+1)*ltot+e*nxyz+k*lxy+j*lx1+newi ]; // 5 th dimension of t is idlmt which is 3. Not sure how the nekasgn access ips+1. Need to check with Dr.Tania
}
double pa = pr [e*nxyz+k*lxy+j*lx1+newi];
double p0= p0th;
double si2 = sii[e*nxyz+k*lxy+j*lx1+newi];
double si3 = siii[e*nxyz+k*lxy+j*lx1+newi];
double udiff = vdiff[(ifield-1)*ltot+e*nxyz+k*lxy+j*lx1+newi];
double utrans = vtrans[(ifield-1)*ltot+e*nxyz+k*lxy+j*lx1+newi];
char cbu1 = cb[0];
char cbu2 = cb[1];
char cbu3 = cb[2];
// subroutine cmtasgn
int eqnum;
double varsic[10];
for (eqnum=0;eqnum<toteq;eqnum++){
varsic[eqnum] = u[e*e_offset+eqnum*nxyz+k*lxy+j*lx1+newi];
}
double phi = phig[e*nxyz+k*lxy+j*lx1+newi];
double rho = vtrans[(irho-1)*ltot +e*nxyz+k*lxy+j*lx1+newi];
double pres = pr[e*nxyz+k*lxy+j*lx1+newi];
double cv=0.0,cp=0.0;
if(rho!=0){
cv=vtrans[(icv-1)*ltot +e*nxyz+k*lxy+j*lx1+newi]/rho;
cp=vtrans[(icp-1)*ltot +e*nxyz+k*lxy+j*lx1+newi]/rho;
}
double asnd = csound [e*nxyz+k*lxy+j*lx1+newi];
double mu = vdiff[(imu-1)*ltot+e*nxyz+k*lxy+j*lx1+newi];
udiff = vdiff[(imu-1)*ltot+e*nxyz+k*lxy+j*lx1+newi];// this overrides the udiff in nekasgn (line 63 in this function). Need to check withDr.Tania
double lambda = vdiff[(ilam-1)*ltot+e*nxyz+k*lxy+j*lx1+newi];
double e_internal = energy[e*nxyz+k*lxy+j*lx1+newi];
//subroutine cmt_userEOS
cp=cpgref;
cv=cvgref;
temp=e_internal/cv; // overrides
// function MixtPerf_C_GRT
asnd=sqrtf(gmaref*rgasref*temp); //overrides
// function MixtPerf_P_DRT
pres=rho*rgasref*temp;//overrides
vtrans[(icp-1)*ltot +e*nxyz+k*lxy+j*lx1+newi]=cp*rho;
vtrans[(icv-1)*ltot +e*nxyz+k*lxy+j*lx1+newi]=cv*rho;
t [ e*nxyz+k*lxy+j*lx1+newi ]= temp;
csound [e*nxyz+k*lxy+j*lx1+newi]=asnd;
}
}
extern "C" void compute_primitive_vars_gpu_wrapper_(int *glbblockSize1,double *d_vx, double *d_vy, double *d_vz, double *d_u, int *nxyz, int *ntot, int *nelt,int *irpu, int *irpv, int *irpw, int* iret, int *irg, int *toteq, int *if3d, double *d_vtrans, int *irho, double *d_phig, int *lx1, int *ly1, int *lz1, int *d_lglel, double *d_xm1, double *d_ym1, double *d_zm1, double *d_t,int *ldimt, int *npscal, double *d_pr, double *p0th, double *d_sii, double *d_siii, double *d_vdiff, int *ifield,char *d_cb, int *icv, int *icp, double *d_csound, int *imu,int *ilam, double *cpgref, double *cvgref, double *gmaref, double *rgasref, int *ltot){
hipError_t code1 = hipPeekAtLastError();
printf("CUDA: Start compute_primitive_vars_gpu_wrapper cuda status: %s\n",hipGetErrorString(code1));
printf("CUDA: Start compute_primitive_vars_gpu_wrapper values nxyz = %d,ntot = %d,nelt = %d,irpu = %d,irpv = %d,irpw = %d,iret = %d,irg=%d ,toteq = %d,if3d = %d,irho = %d,lx1 = %d,ly1 = %d,lz1 = %d,ldimt = %d,npscal = %d,p0th = %lf,ifield = %d,icv = %d,icp = %d,imu = %d,ilam = %d,cpgref = %lf,cvgref = %lf,gmaref = %lf,rgasref = %lf,ltot = %d, \n", nxyz[0],ntot[0],nelt[0],irpu[0],irpv[0],irpw[0],iret[0],irg[0],toteq[0],if3d[0],irho[0],lx1[0],ly1[0],lz1[0],ldimt[0],npscal[0],p0th[0],ifield[0],icv[0],icp[0],imu[0],ilam[0],cpgref[0],cvgref[0],gmaref[0],rgasref[0],ltot[0]);
double *d_scr; // I think this is a tempory variable. need to check with Dr.Tania. adeesha
double *d_energy;
hipMalloc((void**)&d_scr,ntot[0] * sizeof(double));
hipMalloc((void**)&d_energy,ntot[0] * sizeof(double));
int lxy=lx1[0]*ly1[0];
int blockSize =glbblockSize1[0], gridSize;
gridSize = (int)ceil((float)nelt[0]*nxyz[0]/blockSize);
hipLaunchKernelGGL(( compute_primitive_vars_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_vx, d_vy, d_vz, d_u, nelt[0], nxyz[0],ntot[0], irpu[0], irpv[0], irpw[0], iret[0],irg[0],toteq[0],if3d[0],d_scr,d_energy,d_vtrans, irho[0],d_phig ,lx1[0], ly1[0],lz1[0], d_lglel, d_xm1, d_ym1,d_zm1, d_t,ldimt[0], npscal[0], d_pr,p0th[0], d_sii,d_siii,d_vdiff, ifield[0],d_cb, icv[0], icp[0],d_csound,imu[0],ilam[0], cpgref[0], cvgref[0], gmaref[0], rgasref[0],ltot[0],lxy);
hipFree(d_scr);
hipFree(d_energy);
hipError_t code2 = hipPeekAtLastError();
printf("CUDA: End compute_primitive_vars_gpu_wrapper cuda status: %s\n",hipGetErrorString(code2));
}
| 9192f1d47e3820c074f169047b2f55c457b6aaea.cu | #include <stdio.h>
__global__ void compute_primitive_vars_kernel (double *vx, double *vy, double *vz, double *u, int nelt, int nxyz,int ntot,int irpu, int irpv, int irpw, int iret, int irg, int toteq,int if3d,double *scr, double* energy, double *vtrans, int irho, double *phig, int lx1, int ly1, int lz1, int *lglel, double *xm1, double *ym1, double *zm1, double *t,int ldimt, int npscal, double *pr, double p0th, double *sii, double *siii, double *vdiff, int ifield,char *cb, int icv, int icp, double *csound, int imu,int ilam, double cpgref, double cvgref, double gmaref, double rgasref, int ltot,int lxy){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<nelt*nxyz){
int e = id/nxyz;
int i = id%nxyz;
int e_offset = toteq*nxyz;
double c = u[e*e_offset+(irg-1)*nxyz+i];
vx[id] = u[e*e_offset+(irpu-1)*nxyz+i]/c;//invcol3
vy[id] = u[e*e_offset+(irpv-1)*nxyz+i]/c;
vz[id] = u[e*e_offset+(irpw-1)*nxyz+i]/c;
if(if3d){
//Compute a Cartesian vector dot product. 3-d version vdot3
scr[id] = u[e*e_offset+(irpu-1)*nxyz+i]*u[e*e_offset+(irpu-1)*nxyz+i]+u[e*e_offset+(irpv-1)*nxyz+i]*u[e*e_offset+(irpv-1)*nxyz+i]+u[e*e_offset+(irpw-1)*nxyz+i]*u[e*e_offset+(irpw-1)*nxyz+i];
}
else{
// compute vector dot product 2d version vdot2
scr[id] = u[e*e_offset+(irpu-1)*nxyz+i]*u[e*e_offset+(irpu-1)*nxyz+i]+u[e*e_offset+(irpv-1)*nxyz+i]*u[e*e_offset+(irpv-1)*nxyz+i];
}
scr[id] = scr[id]/c; //invcol2
scr[id] = scr[id] * 0.5; //cmult
energy[id] = u[e*e_offset+(iret-1)*nxyz+i] -scr[id];// sub3
energy[id] = energy[id]/c;// invcol2
vtrans[(irho-1)*ltot+id ] = c / phig[id]; //invcol3
// subroutine tdstate
int eg= lglel[e]; // this never uses. Check with Dr.Tania
int k = (id / (lx1*ly1))%lz1;
int j = (id/lx1)%ly1;
int newi = id % lx1;
double x = xm1[e*nxyz+k*lxy+j*lx1+newi];
double y = ym1[e*nxyz+k*lxy+j*lx1+newi];
double z = zm1[e*nxyz+k*lxy+j*lx1+newi];
double r = x*x+y*y;
double theta=0.0;
if (r>0.0){ r = sqrtf(r);}
if ( x != 0.0 || y!= 0.0){theta = atan2(y,x); }
double ux= vx[e*nxyz+k*lxy+j*lx1+newi];
double uy= vy[e*nxyz+k*lxy+j*lx1+newi];
double uz= vz[e*nxyz+k*lxy+j*lx1+newi];
double temp = t [ e*nxyz+k*lxy+j*lx1+newi ];
int ips;
double ps[10]; // ps is size of ldimt which is 3. Not sure npscal is also 3. Need to check with Dr.Tania
for (ips=0;ips<npscal;ips++){
ps[ips]=t[(ips+1)*ltot+e*nxyz+k*lxy+j*lx1+newi ]; // 5 th dimension of t is idlmt which is 3. Not sure how the nekasgn access ips+1. Need to check with Dr.Tania
}
double pa = pr [e*nxyz+k*lxy+j*lx1+newi];
double p0= p0th;
double si2 = sii[e*nxyz+k*lxy+j*lx1+newi];
double si3 = siii[e*nxyz+k*lxy+j*lx1+newi];
double udiff = vdiff[(ifield-1)*ltot+e*nxyz+k*lxy+j*lx1+newi];
double utrans = vtrans[(ifield-1)*ltot+e*nxyz+k*lxy+j*lx1+newi];
char cbu1 = cb[0];
char cbu2 = cb[1];
char cbu3 = cb[2];
// subroutine cmtasgn
int eqnum;
double varsic[10];
for (eqnum=0;eqnum<toteq;eqnum++){
varsic[eqnum] = u[e*e_offset+eqnum*nxyz+k*lxy+j*lx1+newi];
}
double phi = phig[e*nxyz+k*lxy+j*lx1+newi];
double rho = vtrans[(irho-1)*ltot +e*nxyz+k*lxy+j*lx1+newi];
double pres = pr[e*nxyz+k*lxy+j*lx1+newi];
double cv=0.0,cp=0.0;
if(rho!=0){
cv=vtrans[(icv-1)*ltot +e*nxyz+k*lxy+j*lx1+newi]/rho;
cp=vtrans[(icp-1)*ltot +e*nxyz+k*lxy+j*lx1+newi]/rho;
}
double asnd = csound [e*nxyz+k*lxy+j*lx1+newi];
double mu = vdiff[(imu-1)*ltot+e*nxyz+k*lxy+j*lx1+newi];
udiff = vdiff[(imu-1)*ltot+e*nxyz+k*lxy+j*lx1+newi];// this overrides the udiff in nekasgn (line 63 in this function). Need to check withDr.Tania
double lambda = vdiff[(ilam-1)*ltot+e*nxyz+k*lxy+j*lx1+newi];
double e_internal = energy[e*nxyz+k*lxy+j*lx1+newi];
//subroutine cmt_userEOS
cp=cpgref;
cv=cvgref;
temp=e_internal/cv; // overrides
// function MixtPerf_C_GRT
asnd=sqrtf(gmaref*rgasref*temp); //overrides
// function MixtPerf_P_DRT
pres=rho*rgasref*temp;//overrides
vtrans[(icp-1)*ltot +e*nxyz+k*lxy+j*lx1+newi]=cp*rho;
vtrans[(icv-1)*ltot +e*nxyz+k*lxy+j*lx1+newi]=cv*rho;
t [ e*nxyz+k*lxy+j*lx1+newi ]= temp;
csound [e*nxyz+k*lxy+j*lx1+newi]=asnd;
}
}
extern "C" void compute_primitive_vars_gpu_wrapper_(int *glbblockSize1,double *d_vx, double *d_vy, double *d_vz, double *d_u, int *nxyz, int *ntot, int *nelt,int *irpu, int *irpv, int *irpw, int* iret, int *irg, int *toteq, int *if3d, double *d_vtrans, int *irho, double *d_phig, int *lx1, int *ly1, int *lz1, int *d_lglel, double *d_xm1, double *d_ym1, double *d_zm1, double *d_t,int *ldimt, int *npscal, double *d_pr, double *p0th, double *d_sii, double *d_siii, double *d_vdiff, int *ifield,char *d_cb, int *icv, int *icp, double *d_csound, int *imu,int *ilam, double *cpgref, double *cvgref, double *gmaref, double *rgasref, int *ltot){
cudaError_t code1 = cudaPeekAtLastError();
printf("CUDA: Start compute_primitive_vars_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code1));
printf("CUDA: Start compute_primitive_vars_gpu_wrapper values nxyz = %d,ntot = %d,nelt = %d,irpu = %d,irpv = %d,irpw = %d,iret = %d,irg=%d ,toteq = %d,if3d = %d,irho = %d,lx1 = %d,ly1 = %d,lz1 = %d,ldimt = %d,npscal = %d,p0th = %lf,ifield = %d,icv = %d,icp = %d,imu = %d,ilam = %d,cpgref = %lf,cvgref = %lf,gmaref = %lf,rgasref = %lf,ltot = %d, \n", nxyz[0],ntot[0],nelt[0],irpu[0],irpv[0],irpw[0],iret[0],irg[0],toteq[0],if3d[0],irho[0],lx1[0],ly1[0],lz1[0],ldimt[0],npscal[0],p0th[0],ifield[0],icv[0],icp[0],imu[0],ilam[0],cpgref[0],cvgref[0],gmaref[0],rgasref[0],ltot[0]);
double *d_scr; // I think this is a tempory variable. need to check with Dr.Tania. adeesha
double *d_energy;
cudaMalloc((void**)&d_scr,ntot[0] * sizeof(double));
cudaMalloc((void**)&d_energy,ntot[0] * sizeof(double));
int lxy=lx1[0]*ly1[0];
int blockSize =glbblockSize1[0], gridSize;
gridSize = (int)ceil((float)nelt[0]*nxyz[0]/blockSize);
compute_primitive_vars_kernel<<<gridSize, blockSize>>>(d_vx, d_vy, d_vz, d_u, nelt[0], nxyz[0],ntot[0], irpu[0], irpv[0], irpw[0], iret[0],irg[0],toteq[0],if3d[0],d_scr,d_energy,d_vtrans, irho[0],d_phig ,lx1[0], ly1[0],lz1[0], d_lglel, d_xm1, d_ym1,d_zm1, d_t,ldimt[0], npscal[0], d_pr,p0th[0], d_sii,d_siii,d_vdiff, ifield[0],d_cb, icv[0], icp[0],d_csound,imu[0],ilam[0], cpgref[0], cvgref[0], gmaref[0], rgasref[0],ltot[0],lxy);
cudaFree(d_scr);
cudaFree(d_energy);
cudaError_t code2 = cudaPeekAtLastError();
printf("CUDA: End compute_primitive_vars_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code2));
}
|
921525e9bbb24b8cb35fc6d3b1327d72a9fcd0ca.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2011, Alex Krizhevsky (akrizhevsky@gmail.com)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <vector>
#include <iostream>
#include <string>
#include <nvmatrix.cuh>
#include <nvmatrix_operators.cuh>
#include <matrix.h>
#include <convnet.cuh>
#include <weights.cuh>
#include <util.cuh>
#include "common/logging.h"
using namespace std;
/*
* =======================
* ConvNet
* =======================
*/
ConvNet::ConvNet(PyListObject* layerParams, int minibatchSize, int deviceID) : Thread(false), _deviceID(deviceID), _data(NULL) {
try {
int numLayers = PyList_GET_SIZE(layerParams);
for (int i = 0; i < numLayers; i++) {
PyObject* paramsDict = PyList_GET_ITEM(layerParams, i);
string layerType = pyDictGetString(paramsDict, "type");
Layer* l = initLayer(layerType, paramsDict);
// Connect backward links in graph for this layer
intv* inputLayers = pyDictGetIntV(paramsDict, "inputs");
if (inputLayers != NULL) {
for (int i = 0; i < inputLayers->size(); i++) {
l->addPrev(&getLayer(inputLayers->at(i)));
}
}
delete inputLayers;
}
// Connect the forward links in the graph
for (int i = 0; i < _layers.size(); i++) {
vector<Layer*>& prev = _layers[i]->getPrev();
for (int j = 0; j < prev.size(); j++) {
prev[j]->addNext(_layers[i]);
}
}
// Execute post-initialization stuff
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->postInit();
}
_dp = new DataProvider(minibatchSize);
} catch (string& s) {
cout << "Error creating ConvNet: " << s << endl;
exit(1);
}
}
/*
* Override this in derived classes
*/
Layer* ConvNet::initLayer(string& layerType, PyObject* paramsDict) {
if (layerType == "fc") {
_layers.push_back(new FCLayer(paramsDict));
} else if (layerType == "conv") {
_layers.push_back(new ConvLayer(paramsDict));
} else if (layerType == "local") {
_layers.push_back(new LocalUnsharedLayer(paramsDict));
} else if (layerType == "pool") {
_layers.push_back(&PoolLayer::makePoolLayer(paramsDict));
} else if (layerType == "rnorm") {
_layers.push_back(new ResponseNormLayer(paramsDict));
} else if (layerType == "cmrnorm") {
_layers.push_back(new CrossMapResponseNormLayer(paramsDict));
} else if (layerType == "cnorm") {
_layers.push_back(new ContrastNormLayer(paramsDict));
} else if (layerType == "softmax") {
_layers.push_back(new SoftmaxLayer(paramsDict));
} else if (layerType == "eltsum") {
_layers.push_back(new EltwiseSumLayer(paramsDict));
} else if (layerType == "eltmax") {
_layers.push_back(new EltwiseMaxLayer(paramsDict));
} else if (layerType == "neuron") {
_layers.push_back(new NeuronLayer(paramsDict));
} else if (layerType == "nailbed") {
_layers.push_back(new NailbedLayer(paramsDict));
} else if (layerType == "blur") {
_layers.push_back(new GaussianBlurLayer(paramsDict));
} else if (layerType == "resize") {
_layers.push_back(new ResizeLayer(paramsDict));
} else if (layerType == "rgb2yuv") {
_layers.push_back(new RGBToYUVLayer(paramsDict));
} else if (layerType == "rgb2lab") {
_layers.push_back(new RGBToLABLayer(paramsDict));
} else if (layerType == "data") {
DataLayer *d = new DataLayer(paramsDict);
_layers.push_back(d);
_dataLayers.push_back(d);
} else if (strncmp(layerType.c_str(), "cost.", 5) == 0) {
CostLayer *c = &CostLayer::makeCostLayer(layerType, paramsDict);
_layers.push_back(c);
_costs.push_back(c);
} else {
throw string("Unknown layer type ") + layerType;
}
_layers.back()->initialize(this, paramsDict);
return _layers.back();
}
/*
* This executes in a new CPU thread so it's OK to initialize CUDA stuff here.
*/
void ConvNet::initCuda() {
hipSetDevice(_deviceID < 0 ? cutGetMaxGflopsDeviceId() : _deviceID);
hipDeviceSetCacheConfig(hipFuncCachePreferShared);
hipblasInit();
NVMatrix::initRandom(time(0));
NetworkManager::initialize();
copyToGPU();
}
void* ConvNet::run() {
initCuda();
while (true) {
Worker* worker = _workerQueue.dequeue();
NetworkManager::resumeMPI();
worker->run();
delete worker;
NetworkManager::pauseMPI();
}
return NULL;
}
Queue<Worker*>& ConvNet::getWorkerQueue() {
return _workerQueue;
}
Queue<WorkResult*>& ConvNet::getResultQueue() {
return _resultQueue;
}
DataProvider& ConvNet::getDataProvider() {
return *_dp;
}
Layer& ConvNet::operator[](int idx) {
return *_layers[idx];
}
Layer& ConvNet::getLayer(int idx) {
return *_layers[idx];
}
void ConvNet::copyToCPU() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->copyToCPU();
}
}
void ConvNet::copyToGPU() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->copyToGPU();
}
}
void ConvNet::updateWeights() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->updateWeights();
}
}
void ConvNet::reset() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->reset();
}
}
int ConvNet::getNumLayers() {
return _layers.size();
}
void ConvNet::bprop(PASS_TYPE passType) {
for (int i = 0; i < _costs.size(); i++) {
_costs[i]->bprop(passType);
}
reset();
}
void ConvNet::fprop(PASS_TYPE passType) {
assert(_data != NULL);
reset();
double batchStart = Now();
for (int i = 0; i < _dataLayers.size(); i++) {
double start = Now();
_dataLayers[i]->fprop(_data->getData(), passType);
}
}
void ConvNet::fprop(GPUData& data, PASS_TYPE passType) {
if (&data != _data) {
delete _data;
}
_data = &data;
fprop(passType);
}
void ConvNet::fprop(int miniIdx, PASS_TYPE passType) {
delete _data;
_data = &_dp->getMinibatch(miniIdx);
fprop(passType);
}
Cost& ConvNet::getCost() {
return *new Cost(_data->getNumCases(), _costs);
}
// Same as getCost() but adds results to given cost and returns it
Cost& ConvNet::getCost(Cost& cost) {
Cost& newCost = getCost();
cost += newCost;
delete &newCost;
return cost;
}
double ConvNet::getCostValue() {
Cost& cost = getCost();
double val = cost.getValue();
delete &cost;
return val;
}
/*
* Gradient checking stuff
*/
void ConvNet::checkGradients() {
_numFailures = 0;
_numTests = 0;
fprop(0, PASS_GC);
_baseErr = getCostValue();
bprop(PASS_GC);
for (vector<Layer*>::iterator it = _layers.begin(); it != _layers.end(); ++it) {
(*it)->checkGradients(this);
}
cout << "------------------------" << endl;
if (_numFailures > 0) {
cout << _numFailures << "/" << _numTests << " TESTS FAILED" << endl;
} else {
cout << "ALL " << _numTests << " TESTS PASSED" << endl;
}
}
/*
* name: weight matrix name
* eps: finite difference step
*/
bool ConvNet::checkGradient(const string& name, float eps, Weights& weights) {
Matrix numGrad(weights.getNumRows(), weights.getNumCols());
Matrix diff(numGrad);
numGrad.apply(Matrix::ZERO);
Matrix weightsCPU;
weights.getW().copyToHost(weightsCPU, true);
for(int i = 0; i < weights.getNumRows(); i++) {
for (int j = 0; j < weights.getNumCols(); j++) {
float v = weightsCPU(i,j);
weightsCPU(i,j) += eps;
weights.getW().copyFromHost(weightsCPU);
weightsCPU(i,j) = v;
fprop(PASS_GC);
double err = getCostValue();
numGrad(i,j) = (err - _baseErr) / (_data->getNumCases() * eps);
if (isnan(numGrad(i,j)) || isinf(numGrad(i,j))) {
cout << "Numerical computation produced nan or inf when checking '" << name << "': " << numGrad(i,j) << endl;
cout << "Consider reducing the sizes of the weights or finite difference steps." << endl;
cout << "Exiting." << endl;
exit(1);
}
weights.getW().copyFromHost(weightsCPU);
}
}
Matrix gradCPU;
weights.getGrad().copyToHost(gradCPU, true);
gradCPU.scale(-1.0 / _data->getNumCases());
float analNorm = gradCPU.norm();
float numNorm = numGrad.norm();
numGrad.subtract(gradCPU, diff);
float relErr = diff.norm() / analNorm;
bool fail = relErr >= GC_REL_ERR_THRESH;
if (fail || !GC_SUPPRESS_PASSES) {
cout << "========================" << endl;
printf("(%s) %s GRADIENT CHECK\n", fail ? "****FAIL****" : "PASS", name.c_str());
cout << "========================" << endl;
cout << "Analytic:" << endl;
gradCPU.print(6,4);
cout << "Numeric:" << endl;
numGrad.print(6,4);
printf("Analytic norm: %e\n", analNorm);
printf("Numeric norm: %e\n", numNorm);
printf("Relative error: %e\n", relErr);
}
_numTests++;
_numFailures += fail;
return fail;
}
| 921525e9bbb24b8cb35fc6d3b1327d72a9fcd0ca.cu | /*
* Copyright (c) 2011, Alex Krizhevsky (akrizhevsky@gmail.com)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <vector>
#include <iostream>
#include <string>
#include <nvmatrix.cuh>
#include <nvmatrix_operators.cuh>
#include <matrix.h>
#include <convnet.cuh>
#include <weights.cuh>
#include <util.cuh>
#include "common/logging.h"
using namespace std;
/*
* =======================
* ConvNet
* =======================
*/
ConvNet::ConvNet(PyListObject* layerParams, int minibatchSize, int deviceID) : Thread(false), _deviceID(deviceID), _data(NULL) {
try {
int numLayers = PyList_GET_SIZE(layerParams);
for (int i = 0; i < numLayers; i++) {
PyObject* paramsDict = PyList_GET_ITEM(layerParams, i);
string layerType = pyDictGetString(paramsDict, "type");
Layer* l = initLayer(layerType, paramsDict);
// Connect backward links in graph for this layer
intv* inputLayers = pyDictGetIntV(paramsDict, "inputs");
if (inputLayers != NULL) {
for (int i = 0; i < inputLayers->size(); i++) {
l->addPrev(&getLayer(inputLayers->at(i)));
}
}
delete inputLayers;
}
// Connect the forward links in the graph
for (int i = 0; i < _layers.size(); i++) {
vector<Layer*>& prev = _layers[i]->getPrev();
for (int j = 0; j < prev.size(); j++) {
prev[j]->addNext(_layers[i]);
}
}
// Execute post-initialization stuff
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->postInit();
}
_dp = new DataProvider(minibatchSize);
} catch (string& s) {
cout << "Error creating ConvNet: " << s << endl;
exit(1);
}
}
/*
* Override this in derived classes
*/
Layer* ConvNet::initLayer(string& layerType, PyObject* paramsDict) {
if (layerType == "fc") {
_layers.push_back(new FCLayer(paramsDict));
} else if (layerType == "conv") {
_layers.push_back(new ConvLayer(paramsDict));
} else if (layerType == "local") {
_layers.push_back(new LocalUnsharedLayer(paramsDict));
} else if (layerType == "pool") {
_layers.push_back(&PoolLayer::makePoolLayer(paramsDict));
} else if (layerType == "rnorm") {
_layers.push_back(new ResponseNormLayer(paramsDict));
} else if (layerType == "cmrnorm") {
_layers.push_back(new CrossMapResponseNormLayer(paramsDict));
} else if (layerType == "cnorm") {
_layers.push_back(new ContrastNormLayer(paramsDict));
} else if (layerType == "softmax") {
_layers.push_back(new SoftmaxLayer(paramsDict));
} else if (layerType == "eltsum") {
_layers.push_back(new EltwiseSumLayer(paramsDict));
} else if (layerType == "eltmax") {
_layers.push_back(new EltwiseMaxLayer(paramsDict));
} else if (layerType == "neuron") {
_layers.push_back(new NeuronLayer(paramsDict));
} else if (layerType == "nailbed") {
_layers.push_back(new NailbedLayer(paramsDict));
} else if (layerType == "blur") {
_layers.push_back(new GaussianBlurLayer(paramsDict));
} else if (layerType == "resize") {
_layers.push_back(new ResizeLayer(paramsDict));
} else if (layerType == "rgb2yuv") {
_layers.push_back(new RGBToYUVLayer(paramsDict));
} else if (layerType == "rgb2lab") {
_layers.push_back(new RGBToLABLayer(paramsDict));
} else if (layerType == "data") {
DataLayer *d = new DataLayer(paramsDict);
_layers.push_back(d);
_dataLayers.push_back(d);
} else if (strncmp(layerType.c_str(), "cost.", 5) == 0) {
CostLayer *c = &CostLayer::makeCostLayer(layerType, paramsDict);
_layers.push_back(c);
_costs.push_back(c);
} else {
throw string("Unknown layer type ") + layerType;
}
_layers.back()->initialize(this, paramsDict);
return _layers.back();
}
/*
* This executes in a new CPU thread so it's OK to initialize CUDA stuff here.
*/
void ConvNet::initCuda() {
cudaSetDevice(_deviceID < 0 ? cutGetMaxGflopsDeviceId() : _deviceID);
cudaDeviceSetCacheConfig(cudaFuncCachePreferShared);
cublasInit();
NVMatrix::initRandom(time(0));
NetworkManager::initialize();
copyToGPU();
}
void* ConvNet::run() {
initCuda();
while (true) {
Worker* worker = _workerQueue.dequeue();
NetworkManager::resumeMPI();
worker->run();
delete worker;
NetworkManager::pauseMPI();
}
return NULL;
}
Queue<Worker*>& ConvNet::getWorkerQueue() {
return _workerQueue;
}
Queue<WorkResult*>& ConvNet::getResultQueue() {
return _resultQueue;
}
DataProvider& ConvNet::getDataProvider() {
return *_dp;
}
Layer& ConvNet::operator[](int idx) {
return *_layers[idx];
}
Layer& ConvNet::getLayer(int idx) {
return *_layers[idx];
}
void ConvNet::copyToCPU() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->copyToCPU();
}
}
void ConvNet::copyToGPU() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->copyToGPU();
}
}
void ConvNet::updateWeights() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->updateWeights();
}
}
void ConvNet::reset() {
for (int i = 0; i < _layers.size(); i++) {
_layers[i]->reset();
}
}
int ConvNet::getNumLayers() {
return _layers.size();
}
void ConvNet::bprop(PASS_TYPE passType) {
for (int i = 0; i < _costs.size(); i++) {
_costs[i]->bprop(passType);
}
reset();
}
void ConvNet::fprop(PASS_TYPE passType) {
assert(_data != NULL);
reset();
double batchStart = Now();
for (int i = 0; i < _dataLayers.size(); i++) {
double start = Now();
_dataLayers[i]->fprop(_data->getData(), passType);
}
}
void ConvNet::fprop(GPUData& data, PASS_TYPE passType) {
if (&data != _data) {
delete _data;
}
_data = &data;
fprop(passType);
}
void ConvNet::fprop(int miniIdx, PASS_TYPE passType) {
delete _data;
_data = &_dp->getMinibatch(miniIdx);
fprop(passType);
}
Cost& ConvNet::getCost() {
return *new Cost(_data->getNumCases(), _costs);
}
// Same as getCost() but adds results to given cost and returns it
Cost& ConvNet::getCost(Cost& cost) {
Cost& newCost = getCost();
cost += newCost;
delete &newCost;
return cost;
}
double ConvNet::getCostValue() {
Cost& cost = getCost();
double val = cost.getValue();
delete &cost;
return val;
}
/*
* Gradient checking stuff
*/
void ConvNet::checkGradients() {
_numFailures = 0;
_numTests = 0;
fprop(0, PASS_GC);
_baseErr = getCostValue();
bprop(PASS_GC);
for (vector<Layer*>::iterator it = _layers.begin(); it != _layers.end(); ++it) {
(*it)->checkGradients(this);
}
cout << "------------------------" << endl;
if (_numFailures > 0) {
cout << _numFailures << "/" << _numTests << " TESTS FAILED" << endl;
} else {
cout << "ALL " << _numTests << " TESTS PASSED" << endl;
}
}
/*
* name: weight matrix name
* eps: finite difference step
*/
bool ConvNet::checkGradient(const string& name, float eps, Weights& weights) {
Matrix numGrad(weights.getNumRows(), weights.getNumCols());
Matrix diff(numGrad);
numGrad.apply(Matrix::ZERO);
Matrix weightsCPU;
weights.getW().copyToHost(weightsCPU, true);
for(int i = 0; i < weights.getNumRows(); i++) {
for (int j = 0; j < weights.getNumCols(); j++) {
float v = weightsCPU(i,j);
weightsCPU(i,j) += eps;
weights.getW().copyFromHost(weightsCPU);
weightsCPU(i,j) = v;
fprop(PASS_GC);
double err = getCostValue();
numGrad(i,j) = (err - _baseErr) / (_data->getNumCases() * eps);
if (isnan(numGrad(i,j)) || isinf(numGrad(i,j))) {
cout << "Numerical computation produced nan or inf when checking '" << name << "': " << numGrad(i,j) << endl;
cout << "Consider reducing the sizes of the weights or finite difference steps." << endl;
cout << "Exiting." << endl;
exit(1);
}
weights.getW().copyFromHost(weightsCPU);
}
}
Matrix gradCPU;
weights.getGrad().copyToHost(gradCPU, true);
gradCPU.scale(-1.0 / _data->getNumCases());
float analNorm = gradCPU.norm();
float numNorm = numGrad.norm();
numGrad.subtract(gradCPU, diff);
float relErr = diff.norm() / analNorm;
bool fail = relErr >= GC_REL_ERR_THRESH;
if (fail || !GC_SUPPRESS_PASSES) {
cout << "========================" << endl;
printf("(%s) %s GRADIENT CHECK\n", fail ? "****FAIL****" : "PASS", name.c_str());
cout << "========================" << endl;
cout << "Analytic:" << endl;
gradCPU.print(6,4);
cout << "Numeric:" << endl;
numGrad.print(6,4);
printf("Analytic norm: %e\n", analNorm);
printf("Numeric norm: %e\n", numNorm);
printf("Relative error: %e\n", relErr);
}
_numTests++;
_numFailures += fail;
return fail;
}
|
d52cf257499c7803485f6a4f7fb3cb7677347586.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void convertFloatToRGBA_kernel(uchar4 *out_image, const float *in_image, int width, int height, float lowerLim, float upperLim) {
const int x = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int y = __mul24(blockIdx.y, blockDim.y) + threadIdx.y;
uchar4 temp;
if (x < width && y < height) {
float val = in_image[__mul24(y, width) + x];
// first draw unmatched pixels in white
if (!isfinite(val)) {
temp.x = 255;
temp.y = 255;
temp.z = 255;
temp.w = 255;
} else {
// rescale value from [lowerLim,upperLim] to [0,1]
val -= lowerLim;
val /= (upperLim - lowerLim);
float r = 1.0f;
float g = 1.0f;
float b = 1.0f;
if (val < 0.25f) {
r = 0;
g = 4.0f * val;
} else if (val < 0.5f) {
r = 0;
b = 1.0 + 4.0f * (0.25f - val);
} else if (val < 0.75f) {
r = 4.0f * (val - 0.5f);
b = 0;
} else {
g = 1.0f + 4.0f * (0.75f - val);
b = 0;
}
temp.x = 255.0 * r;
temp.y = 255.0 * g;
temp.z = 255.0 * b;
temp.w = 255;
}
out_image[__mul24(y, width) + x] = temp;
}
} | d52cf257499c7803485f6a4f7fb3cb7677347586.cu | #include "includes.h"
__global__ void convertFloatToRGBA_kernel(uchar4 *out_image, const float *in_image, int width, int height, float lowerLim, float upperLim) {
const int x = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int y = __mul24(blockIdx.y, blockDim.y) + threadIdx.y;
uchar4 temp;
if (x < width && y < height) {
float val = in_image[__mul24(y, width) + x];
// first draw unmatched pixels in white
if (!isfinite(val)) {
temp.x = 255;
temp.y = 255;
temp.z = 255;
temp.w = 255;
} else {
// rescale value from [lowerLim,upperLim] to [0,1]
val -= lowerLim;
val /= (upperLim - lowerLim);
float r = 1.0f;
float g = 1.0f;
float b = 1.0f;
if (val < 0.25f) {
r = 0;
g = 4.0f * val;
} else if (val < 0.5f) {
r = 0;
b = 1.0 + 4.0f * (0.25f - val);
} else if (val < 0.75f) {
r = 4.0f * (val - 0.5f);
b = 0;
} else {
g = 1.0f + 4.0f * (0.75f - val);
b = 0;
}
temp.x = 255.0 * r;
temp.y = 255.0 * g;
temp.z = 255.0 * b;
temp.w = 255;
}
out_image[__mul24(y, width) + x] = temp;
}
} |
7761fd8803d9af90482d313b8d3accc15aab4734.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime_api.h"
#include <cstdlib>
#include <stdlib.h>
#include <conio.h>
#include <ctype.h>
#include <hip/device_functions.h>
#include <string.h>
#include <inttypes.h>
#ifndef __HIPCC__
#define __HIPCC__
#endif
#include <hip/hip_runtime.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <time.h>
#include <inttypes.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <math.h>
#include <Python.h>
#define potential 1
//print area in the console
void print(int size,double *tab) {
for (int y = 0; y < size; y++) {
for (int x = 0; x < size; x++) {
printf(" %.3f ", tab[y * size + x]);
}
printf("\n");
}
}
//set electric potential in area to zero
__global__ void Initialization_kernel(int size,double *area_gpu) {
int y = blockDim.y * blockIdx.y + threadIdx.y;
int x = blockDim.x * blockIdx.x + threadIdx.x;
area_gpu[x*size + y] = 0;
}
//initialize Core in area
__global__ void Core_kernel(int size, int center,double* area_gpu, int* radius) {
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
double x_dis = abs((((x + size) % size)-center));
double y_dis = abs((y - center));
double r = *radius;
//set core potential
if ((x_dis*x_dis)+(y_dis*y_dis) <= r*r) {
area_gpu[x*size + y] = potential;
}
}
//calculate potential in the sub-area
__global__ void Calculation_kernel(double *epsilon, int center, int size, int *r, int *R , double *Buffor, double *area ) {
int diff = *R - *r;
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
int Rr = (*R) * (*R);
double x_dis = abs((((x + size) % size) - center));
double y_dis = abs((y - center));
Buffor[x*size + y] = area[x*size + y];
if ((x_dis*x_dis+y_dis*y_dis <= Rr)) {
__syncthreads();
for (int layer = 1; layer <= diff; layer++) {
Buffor[x * size + y] = area[x * size + y];
__syncthreads();
double r_small = *r + layer - 1;
double R_big = *r + layer;
if (((x_dis*x_dis)+(y_dis*y_dis) > (r_small*r_small)) && (((x_dis*x_dis)+(y_dis*y_dis)) <= (R_big*R_big))) {
//Numeric Laplace Equation
area[x * size + y] = (Buffor[x * size + y - 1] + Buffor[x * size + y + 1] + Buffor[x * (size)+y - size] + Buffor[x * (size)+y + size]) / 4;
__syncthreads();
//calculate difference between actual and previous iteration
if (area[x * size + y] >= 0.000001) {
*epsilon = abs(area[x * size + y] - Buffor[x * size + y]);
}
__syncthreads();
}
}
__syncthreads();
}
}
//save results to csv file
void create_csv(int size, char* filename, double* area, int area_radius) {
char* filename1;
char str1[] = "Radius";
FILE *fp, *fp1;
filename = strcat(filename, ".csv");
fp = fopen(filename, "w+");
filename1 = strcat(str1, ".csv");
fp1 = fopen(filename1, "w+");
if (fp == NULL){
printf("Unable to create a file.\n");
exit(EXIT_FAILURE);
}
if (fp1 == NULL){
printf("Unable to create a file.\n");
exit(EXIT_FAILURE);
}
int newline = 0;
for (int i = 0; i < size*size; i++) {
if (((i + size) % size == 0) && (newline > 0)) {
fprintf(fp, "\n");
fprintf(fp, "%f,", area[i]);
}
else{
fprintf(fp, "%f,", area[i]);
}
newline++;
}
fprintf(fp1, "%i", area_radius);
fclose(fp1);
fclose(fp);
printf("\n %s, %s files created", filename, filename1);
}
int main(){
const int size = 32;
const int n = 1024;
const int center = n/2;
bool exit_program = false;
int choice = 10;
int radius;
int area_radius = 0;
int* R_area_gpu, *r_gpu;
double *area_gpu, *area_cpu, *Buffor ,*Buffor_cpu, *epsilon_g;
char str[] = "Data";
double epsilon_c = 1;
unsigned long long int iteration = 0;
dim3 dimblock(size, size);
dim3 dimGrid(n / size, n / size);
area_cpu = (double*)malloc(n * n * sizeof(double));
Buffor_cpu = (double*)malloc(n * n * sizeof(double));
hipMalloc((void**)&area_gpu, n * n * sizeof(double));
hipMalloc((void**)&epsilon_g, sizeof(double));
hipMalloc((void**)&R_area_gpu, sizeof(int));
hipMalloc((void**)&Buffor, n * n * sizeof(double));
hipMalloc((void**)&r_gpu, sizeof(int));
//program menu
while (exit_program == false) {
system("cls");
printf("Choose operation\n");
printf("Quit program - 0\n");
printf("Initialize area - 1\n");
printf("Calculate the core - 2\n");
printf("Calculate area - 3\n");
printf("Print actual area - 5\n");
printf("Save into CSV file - 6\n");
printf("Visualisation - 7\n");
scanf_s("%i", &choice);
if (choice == 0) {
hipFree(area_gpu);
free(area_cpu);
hipFree(r_gpu);
hipFree(R_area_gpu);
hipFree(Buffor);
free(Buffor_cpu);
hipFree(epsilon_g);
exit_program = true;
}
if (choice == 1) {
hipMemcpy(Buffor, Buffor_cpu, n * n * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(area_gpu, area_cpu, n * n * sizeof(double), hipMemcpyHostToDevice);
Initialization_kernel << <dimblock, dimGrid >> > (n,area_gpu);
Initialization_kernel << <dimblock, dimGrid >> > (n,Buffor);
hipMemcpy(area_cpu, area_gpu, n * n * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(Buffor_cpu, Buffor, n * n * sizeof(double), hipMemcpyDeviceToHost);
}
if (choice == 3) {
iteration = 0;
epsilon_c = 1;
printf("\nArea radius: ");
scanf_s("%i", &area_radius);
hipMemcpy(area_gpu, area_cpu,n * n * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(R_area_gpu, &area_radius, sizeof(int), hipMemcpyHostToDevice);
while (epsilon_c > 0.000000015) {
hipMemcpy(epsilon_g, &epsilon_c, sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(Buffor, Buffor_cpu, n * n * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(area_gpu, area_cpu, n * n * sizeof(double), hipMemcpyHostToDevice);
Calculation_kernel << < dimblock, dimGrid >> > (epsilon_g, center, n, r_gpu, R_area_gpu, Buffor, area_gpu);
hipMemcpy(Buffor_cpu, Buffor, n * n * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(area_cpu, area_gpu, n * n * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(&epsilon_c, epsilon_g, sizeof(double), hipMemcpyDeviceToHost);
iteration = iteration + 1;
if (iteration > n*2) {
epsilon_c = 0;
}
}
printf("iterations %llu\n", iteration);
printf("calculations done..\n");
char mychar;
scanf("%c", &mychar);
getchar();
iteration = 0;
epsilon_c = 1;
}
if (choice == 5) {
printf("Electrical potential for this area \n");
print(n, area_cpu);
printf("calculations done..\n");
char mychar;
scanf("%c", &mychar);
getchar();
}
if (choice == 2) {
printf("Core radius: ");
scanf_s("%i", &radius);
hipMemcpy(r_gpu, &radius, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(area_gpu, area_cpu, n * n * sizeof(double), hipMemcpyHostToDevice);
Core_kernel << <dimblock,dimGrid >> > (n,center,area_gpu, r_gpu);
hipMemcpy(area_cpu, area_gpu, n * n * sizeof(double), hipMemcpyDeviceToHost);
}
if (choice == 6) {
char str[] = "Data";
create_csv(n, str, area_cpu, area_radius);
char mychar;
scanf("%c", &mychar);
getchar();
}
//Python visualisation
if (choice == 7) {
char path[] = "simulation.py";
FILE* fp;
int argc = 1;
wchar_t* argv[1];
argv[0] = L"simulation.py";
Py_Initialize();
Py_SetProgramName(argv[0]);
PySys_SetArgv(argc, argv);
fp = _Py_fopen(path, "r");
PyRun_SimpleFile(fp, path);
Py_Finalize();
_getch();
}
}
return 0;
}
| 7761fd8803d9af90482d313b8d3accc15aab4734.cu |
#include "cuda_runtime_api.h"
#include <cstdlib>
#include <stdlib.h>
#include <conio.h>
#include <ctype.h>
#include <device_functions.h>
#include <string.h>
#include <inttypes.h>
#ifndef __CUDACC__
#define __CUDACC__
#endif
#include <cuda.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <time.h>
#include <inttypes.h>
#include <curand.h>
#include <curand_kernel.h>
#include <math.h>
#include <Python.h>
#define potential 1
//print area in the console
void print(int size,double *tab) {
for (int y = 0; y < size; y++) {
for (int x = 0; x < size; x++) {
printf(" %.3f ", tab[y * size + x]);
}
printf("\n");
}
}
//set electric potential in area to zero
__global__ void Initialization_kernel(int size,double *area_gpu) {
int y = blockDim.y * blockIdx.y + threadIdx.y;
int x = blockDim.x * blockIdx.x + threadIdx.x;
area_gpu[x*size + y] = 0;
}
//initialize Core in area
__global__ void Core_kernel(int size, int center,double* area_gpu, int* radius) {
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
double x_dis = abs((((x + size) % size)-center));
double y_dis = abs((y - center));
double r = *radius;
//set core potential
if ((x_dis*x_dis)+(y_dis*y_dis) <= r*r) {
area_gpu[x*size + y] = potential;
}
}
//calculate potential in the sub-area
__global__ void Calculation_kernel(double *epsilon, int center, int size, int *r, int *R , double *Buffor, double *area ) {
int diff = *R - *r;
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
int Rr = (*R) * (*R);
double x_dis = abs((((x + size) % size) - center));
double y_dis = abs((y - center));
Buffor[x*size + y] = area[x*size + y];
if ((x_dis*x_dis+y_dis*y_dis <= Rr)) {
__syncthreads();
for (int layer = 1; layer <= diff; layer++) {
Buffor[x * size + y] = area[x * size + y];
__syncthreads();
double r_small = *r + layer - 1;
double R_big = *r + layer;
if (((x_dis*x_dis)+(y_dis*y_dis) > (r_small*r_small)) && (((x_dis*x_dis)+(y_dis*y_dis)) <= (R_big*R_big))) {
//Numeric Laplace Equation
area[x * size + y] = (Buffor[x * size + y - 1] + Buffor[x * size + y + 1] + Buffor[x * (size)+y - size] + Buffor[x * (size)+y + size]) / 4;
__syncthreads();
//calculate difference between actual and previous iteration
if (area[x * size + y] >= 0.000001) {
*epsilon = abs(area[x * size + y] - Buffor[x * size + y]);
}
__syncthreads();
}
}
__syncthreads();
}
}
//save results to csv file
void create_csv(int size, char* filename, double* area, int area_radius) {
char* filename1;
char str1[] = "Radius";
FILE *fp, *fp1;
filename = strcat(filename, ".csv");
fp = fopen(filename, "w+");
filename1 = strcat(str1, ".csv");
fp1 = fopen(filename1, "w+");
if (fp == NULL){
printf("Unable to create a file.\n");
exit(EXIT_FAILURE);
}
if (fp1 == NULL){
printf("Unable to create a file.\n");
exit(EXIT_FAILURE);
}
int newline = 0;
for (int i = 0; i < size*size; i++) {
if (((i + size) % size == 0) && (newline > 0)) {
fprintf(fp, "\n");
fprintf(fp, "%f,", area[i]);
}
else{
fprintf(fp, "%f,", area[i]);
}
newline++;
}
fprintf(fp1, "%i", area_radius);
fclose(fp1);
fclose(fp);
printf("\n %s, %s files created", filename, filename1);
}
int main(){
const int size = 32;
const int n = 1024;
const int center = n/2;
bool exit_program = false;
int choice = 10;
int radius;
int area_radius = 0;
int* R_area_gpu, *r_gpu;
double *area_gpu, *area_cpu, *Buffor ,*Buffor_cpu, *epsilon_g;
char str[] = "Data";
double epsilon_c = 1;
unsigned long long int iteration = 0;
dim3 dimblock(size, size);
dim3 dimGrid(n / size, n / size);
area_cpu = (double*)malloc(n * n * sizeof(double));
Buffor_cpu = (double*)malloc(n * n * sizeof(double));
cudaMalloc((void**)&area_gpu, n * n * sizeof(double));
cudaMalloc((void**)&epsilon_g, sizeof(double));
cudaMalloc((void**)&R_area_gpu, sizeof(int));
cudaMalloc((void**)&Buffor, n * n * sizeof(double));
cudaMalloc((void**)&r_gpu, sizeof(int));
//program menu
while (exit_program == false) {
system("cls");
printf("Choose operation\n");
printf("Quit program - 0\n");
printf("Initialize area - 1\n");
printf("Calculate the core - 2\n");
printf("Calculate area - 3\n");
printf("Print actual area - 5\n");
printf("Save into CSV file - 6\n");
printf("Visualisation - 7\n");
scanf_s("%i", &choice);
if (choice == 0) {
cudaFree(area_gpu);
free(area_cpu);
cudaFree(r_gpu);
cudaFree(R_area_gpu);
cudaFree(Buffor);
free(Buffor_cpu);
cudaFree(epsilon_g);
exit_program = true;
}
if (choice == 1) {
cudaMemcpy(Buffor, Buffor_cpu, n * n * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(area_gpu, area_cpu, n * n * sizeof(double), cudaMemcpyHostToDevice);
Initialization_kernel << <dimblock, dimGrid >> > (n,area_gpu);
Initialization_kernel << <dimblock, dimGrid >> > (n,Buffor);
cudaMemcpy(area_cpu, area_gpu, n * n * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(Buffor_cpu, Buffor, n * n * sizeof(double), cudaMemcpyDeviceToHost);
}
if (choice == 3) {
iteration = 0;
epsilon_c = 1;
printf("\nArea radius: ");
scanf_s("%i", &area_radius);
cudaMemcpy(area_gpu, area_cpu,n * n * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(R_area_gpu, &area_radius, sizeof(int), cudaMemcpyHostToDevice);
while (epsilon_c > 0.000000015) {
cudaMemcpy(epsilon_g, &epsilon_c, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(Buffor, Buffor_cpu, n * n * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(area_gpu, area_cpu, n * n * sizeof(double), cudaMemcpyHostToDevice);
Calculation_kernel << < dimblock, dimGrid >> > (epsilon_g, center, n, r_gpu, R_area_gpu, Buffor, area_gpu);
cudaMemcpy(Buffor_cpu, Buffor, n * n * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(area_cpu, area_gpu, n * n * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(&epsilon_c, epsilon_g, sizeof(double), cudaMemcpyDeviceToHost);
iteration = iteration + 1;
if (iteration > n*2) {
epsilon_c = 0;
}
}
printf("iterations %llu\n", iteration);
printf("calculations done..\n");
char mychar;
scanf("%c", &mychar);
getchar();
iteration = 0;
epsilon_c = 1;
}
if (choice == 5) {
printf("Electrical potential for this area \n");
print(n, area_cpu);
printf("calculations done..\n");
char mychar;
scanf("%c", &mychar);
getchar();
}
if (choice == 2) {
printf("Core radius: ");
scanf_s("%i", &radius);
cudaMemcpy(r_gpu, &radius, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(area_gpu, area_cpu, n * n * sizeof(double), cudaMemcpyHostToDevice);
Core_kernel << <dimblock,dimGrid >> > (n,center,area_gpu, r_gpu);
cudaMemcpy(area_cpu, area_gpu, n * n * sizeof(double), cudaMemcpyDeviceToHost);
}
if (choice == 6) {
char str[] = "Data";
create_csv(n, str, area_cpu, area_radius);
char mychar;
scanf("%c", &mychar);
getchar();
}
//Python visualisation
if (choice == 7) {
char path[] = "simulation.py";
FILE* fp;
int argc = 1;
wchar_t* argv[1];
argv[0] = L"simulation.py";
Py_Initialize();
Py_SetProgramName(argv[0]);
PySys_SetArgv(argc, argv);
fp = _Py_fopen(path, "r");
PyRun_SimpleFile(fp, path);
Py_Finalize();
_getch();
}
}
return 0;
}
|
b21ccf747c40ca4f0e266d10e478c55e935d01df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zaxpycp.cu normal z -> c, Fri Sep 11 18:29:19 2015
*/
#include "common_magma.h"
#define NB 64
// adds x += r --and--
// copies r = b
// each thread does one index, x[i] and r[i]
__global__ void
caxpycp_kernel(
int m, magmaFloatComplex *r, magmaFloatComplex *x,
const magmaFloatComplex *b)
{
const int i = threadIdx.x + blockIdx.x*NB;
if ( i < m ) {
x[i] = MAGMA_C_ADD( x[i], r[i] );
r[i] = b[i];
}
}
// ----------------------------------------------------------------------
// adds x += r --and--
// copies r = b
extern "C" void
magmablas_caxpycp_q(
magma_int_t m,
magmaFloatComplex_ptr r,
magmaFloatComplex_ptr x,
magmaFloatComplex_const_ptr b,
magma_queue_t queue )
{
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
hipLaunchKernelGGL(( caxpycp_kernel) , dim3(grid), dim3(threads), 0, queue , m, r, x, b );
}
extern "C" void
magmablas_caxpycp(
magma_int_t m,
magmaFloatComplex_ptr r,
magmaFloatComplex_ptr x,
magmaFloatComplex_const_ptr b)
{
magmablas_caxpycp_q( m, r, x, b, magma_stream );
}
| b21ccf747c40ca4f0e266d10e478c55e935d01df.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zaxpycp.cu normal z -> c, Fri Sep 11 18:29:19 2015
*/
#include "common_magma.h"
#define NB 64
// adds x += r --and--
// copies r = b
// each thread does one index, x[i] and r[i]
__global__ void
caxpycp_kernel(
int m, magmaFloatComplex *r, magmaFloatComplex *x,
const magmaFloatComplex *b)
{
const int i = threadIdx.x + blockIdx.x*NB;
if ( i < m ) {
x[i] = MAGMA_C_ADD( x[i], r[i] );
r[i] = b[i];
}
}
// ----------------------------------------------------------------------
// adds x += r --and--
// copies r = b
extern "C" void
magmablas_caxpycp_q(
magma_int_t m,
magmaFloatComplex_ptr r,
magmaFloatComplex_ptr x,
magmaFloatComplex_const_ptr b,
magma_queue_t queue )
{
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
caxpycp_kernel <<< grid, threads, 0, queue >>> ( m, r, x, b );
}
extern "C" void
magmablas_caxpycp(
magma_int_t m,
magmaFloatComplex_ptr r,
magmaFloatComplex_ptr x,
magmaFloatComplex_const_ptr b)
{
magmablas_caxpycp_q( m, r, x, b, magma_stream );
}
|
3d500c54097a008d9c5d7c46bbaeb5441263c8fb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernel_modtest_write.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
char *_ptr = NULL;
hipMalloc(&_ptr, XSIZE*YSIZE);
char *end_ptr = NULL;
hipMalloc(&end_ptr, XSIZE*YSIZE);
unsigned int offset = 1;
unsigned int p1 = 1;
unsigned int p2 = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernel_modtest_write), dim3(gridBlock),dim3(threadBlock), 0, 0, _ptr,end_ptr,offset,p1,p2);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernel_modtest_write), dim3(gridBlock),dim3(threadBlock), 0, 0, _ptr,end_ptr,offset,p1,p2);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernel_modtest_write), dim3(gridBlock),dim3(threadBlock), 0, 0, _ptr,end_ptr,offset,p1,p2);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 3d500c54097a008d9c5d7c46bbaeb5441263c8fb.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernel_modtest_write.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
char *_ptr = NULL;
cudaMalloc(&_ptr, XSIZE*YSIZE);
char *end_ptr = NULL;
cudaMalloc(&end_ptr, XSIZE*YSIZE);
unsigned int offset = 1;
unsigned int p1 = 1;
unsigned int p2 = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernel_modtest_write<<<gridBlock,threadBlock>>>(_ptr,end_ptr,offset,p1,p2);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernel_modtest_write<<<gridBlock,threadBlock>>>(_ptr,end_ptr,offset,p1,p2);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernel_modtest_write<<<gridBlock,threadBlock>>>(_ptr,end_ptr,offset,p1,p2);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
769d1cce68d80e49ea8fa4c1bf94733ec08c9b64.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-20, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <groupby/common/utils.hpp>
#include <groupby/hash/groupby_kernels.cuh>
#include <hash/concurrent_unordered_map.cuh>
#include <cudf/detail/utilities/hash_functions.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/column/column.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/groupby.hpp>
#include <cudf/detail/groupby.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/replace.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/table/row_operators.cuh>
#include <cudf/types.hpp>
#include <cudf/aggregation.hpp>
#include <cudf/detail/aggregation/aggregation.hpp>
#include <cudf/detail/aggregation/aggregation.cuh>
#include <cudf/detail/aggregation/result_cache.hpp>
#include <cudf/utilities/traits.hpp>
#include <memory>
#include <utility>
namespace cudf {
namespace experimental {
namespace groupby {
namespace detail {
namespace hash {
namespace {
// This is a temporary fix due to compiler bug and we can resort back to
// constexpr once cuda 10.2 becomes RAPIDS's minimum compiler version
#if 0
/**
* @brief List of aggregation operations that can be computed with a hash-based
* implementation.
*/
constexpr std::array<aggregation::Kind, 7> hash_aggregations{
aggregation::SUM, aggregation::MIN, aggregation::MAX,
aggregation::COUNT_VALID, aggregation::COUNT_ALL,
aggregation::ARGMIN, aggregation::ARGMAX};
template <class T, size_t N>
constexpr bool array_contains(std::array<T, N> const& haystack, T needle) {
for (auto i = 0u; i < N; ++i) {
if (haystack[i] == needle) return true;
}
return false;
}
#endif
/**
* @brief Indicates whether the specified aggregation operation can be computed
* with a hash-based implementation.
*
* @param t The aggregation operation to verify
* @return true `t` is valid for a hash based groupby
* @return false `t` is invalid for a hash based groupby
*/
bool constexpr is_hash_aggregation(aggregation::Kind t) {
// this is a temporary fix due to compiler bug and we can resort back to
// constexpr once cuda 10.2 becomes RAPIDS's minimum compiler version
// return array_contains(hash_aggregations, t);
return (t == aggregation::SUM) or (t == aggregation::MIN) or
(t == aggregation::MAX) or (t == aggregation::COUNT_VALID) or
(t == aggregation::COUNT_ALL) or
(t == aggregation::ARGMIN) or (t == aggregation::ARGMAX);
}
// flatten aggs to filter in single pass aggs
std::tuple<table_view, std::vector<aggregation::Kind>, std::vector<size_t> >
flatten_single_pass_aggs(std::vector<aggregation_request> const& requests) {
std::vector<column_view> columns;
std::vector<aggregation::Kind> agg_kinds;
std::vector<size_t> col_ids;
for (size_t i = 0; i < requests.size(); i++) {
auto const& request = requests[i];
auto const& agg_v = request.aggregations;
auto insert_agg = [&agg_kinds, &columns, &col_ids, &request, i]
(aggregation::Kind k) {
agg_kinds.push_back(k);
columns.push_back(request.values);
col_ids.push_back(i);
};
for (auto &&agg : agg_v) {
if (is_hash_aggregation(agg->kind)) {
if (is_fixed_width(request.values.type()) or
agg->kind == aggregation::COUNT_VALID or
agg->kind == aggregation::COUNT_ALL) {
insert_agg(agg->kind);
} else if (request.values.type().id() == type_id::STRING) {
// For string type, only ARGMIN, ARGMAX, MIN, and MAX are supported
if (agg->kind == aggregation::ARGMIN or
agg->kind == aggregation::ARGMAX) {
insert_agg(agg->kind);
} else if (agg->kind == aggregation::MIN) {
insert_agg(aggregation::ARGMIN);
} else if (agg->kind == aggregation::MAX) {
insert_agg(aggregation::ARGMAX);
}
}
}
}
}
return std::make_tuple(table_view(columns),
std::move(agg_kinds), std::move(col_ids));
}
/**
* @brief Gather sparse results into dense using `gather_map` and add to
* `dense_cache`
*
* @see groupby_null_templated()
*/
void sparse_to_dense_results(
std::vector<aggregation_request> const& requests,
experimental::detail::result_cache const& sparse_results,
experimental::detail::result_cache* dense_results,
rmm::device_vector<size_type> const& gather_map, size_type map_size,
hipStream_t stream,
rmm::mr::device_memory_resource* mr)
{
for (size_t i = 0; i < requests.size(); i++) {
auto const& agg_v = requests[i].aggregations;
auto const& col = requests[i].values;
// Given an aggregation, this will get the result from sparse_results and
// convert and return dense, compacted result
auto to_dense_agg_result =
[&sparse_results, &gather_map, map_size, i, mr, stream]
(auto const& agg) {
auto s = sparse_results.get_result(i, agg);
auto dense_result_table =
experimental::detail::gather(
table_view({s}),
gather_map.begin(),
gather_map.begin() + map_size,
false, mr, stream);
return std::move(dense_result_table->release()[0]);
};
// Enables conversion of ARGMIN/ARGMAX into MIN/MAX
auto transformed_result =
[&col, to_dense_agg_result, mr, stream]
(auto const& agg_kind) {
auto transformed_agg = std::make_unique<aggregation>(agg_kind);
auto arg_result = to_dense_agg_result(transformed_agg);
// We make a view of ARG(MIN/MAX) result without a null mask and gather
// using this map. The values in data buffer of ARG(MIN/MAX) result
// corresponding to null values was initialized to ARG(MIN/MAX)_SENTINEL
// which is an out of bounds index value (-1) and causes the gathered
// value to be null.
column_view null_removed_map(data_type(type_to_id<size_type>()),
arg_result->size(),
static_cast<void const*>(arg_result->view().template data<size_type>()));
auto transformed_result = experimental::detail::gather(table_view({col}),
null_removed_map, false, arg_result->nullable(), false, mr, stream);
return std::move(transformed_result->release()[0]);
};
for (auto &&agg : agg_v) {
if (agg->kind == aggregation::COUNT_VALID or
agg->kind == aggregation::COUNT_ALL) {
dense_results->add_result(i, agg, to_dense_agg_result(agg));
}
else if (col.type().id() == type_id::STRING and
(agg->kind == aggregation::MAX or agg->kind == aggregation::MIN)) {
if (agg->kind == aggregation::MAX) {
dense_results->add_result(i, agg,
transformed_result(aggregation::ARGMAX));
}
else if (agg->kind == aggregation::MIN) {
dense_results->add_result(i, agg,
transformed_result(aggregation::ARGMIN));
}
}
else if (sparse_results.has_result(i, agg)) {
dense_results->add_result(i, agg, to_dense_agg_result(agg));
}
}
}
}
/**
* @brief Construct hash map that uses row comparator and row hasher on
* `d_keys` table and stores indices
*/
template <bool keys_have_nulls>
auto create_hash_map(table_device_view const& d_keys,
include_nulls include_null_keys,
hipStream_t stream = 0)
{
size_type constexpr unused_key{std::numeric_limits<size_type>::max()};
size_type constexpr unused_value{std::numeric_limits<size_type>::max()};
using map_type =
concurrent_unordered_map<size_type, size_type,
row_hasher<default_hash, keys_have_nulls>,
row_equality_comparator<keys_have_nulls>>;
using allocator_type = typename map_type::allocator_type;
bool const null_keys_are_equal{include_null_keys == include_nulls::YES};
row_hasher<default_hash, keys_have_nulls> hasher{d_keys};
row_equality_comparator<keys_have_nulls> rows_equal{
d_keys, d_keys, null_keys_are_equal};
return map_type::create(compute_hash_table_size(d_keys.num_rows()),
unused_key, unused_value, hasher, rows_equal,
allocator_type(), stream);
}
/**
* @brief Computes all aggregations from `requests` that require a single pass
* over the data and stores the results in `sparse_results`
*
* @see groupby_null_templated()
*/
template <bool keys_have_nulls, typename Map>
void compute_single_pass_aggs(table_view const& keys,
std::vector<aggregation_request> const& requests,
experimental::detail::result_cache* sparse_results,
Map& map, include_nulls include_null_keys,
hipStream_t stream)
{
// flatten the aggs to a table that can be operated on by aggregate_row
table_view flattened_values;
std::vector<aggregation::Kind> aggs;
std::vector<size_t> col_ids;
std::tie(flattened_values, aggs, col_ids) = flatten_single_pass_aggs(requests);
// make table that will hold sparse results
std::vector<std::unique_ptr<column>> sparse_columns;
std::transform(flattened_values.begin(), flattened_values.end(),
aggs.begin(), std::back_inserter(sparse_columns),
[stream] (auto const& col, auto const& agg) {
bool nullable =
(agg == aggregation::COUNT_VALID or agg == aggregation::COUNT_ALL)
? false : col.has_nulls();
auto mask_flag = (nullable) ? mask_state::ALL_NULL
: mask_state::UNALLOCATED;
return make_fixed_width_column(
experimental::detail::target_type(col.type(), agg),
col.size(), mask_flag, stream);
});
table sparse_table(std::move(sparse_columns));
mutable_table_view table_view = sparse_table.mutable_view();
experimental::detail::initialize_with_identity(table_view, aggs, stream);
// prepare to launch kernel to do the actual aggregation
auto d_sparse_table = mutable_table_device_view::create(sparse_table);
auto d_values = table_device_view::create(flattened_values);
rmm::device_vector<aggregation::Kind> d_aggs(aggs);
bool skip_key_rows_with_nulls = keys_have_nulls and
include_null_keys == include_nulls::NO;
if (skip_key_rows_with_nulls) {
auto row_bitmask{bitmask_and(keys, rmm::mr::get_default_resource(), stream)};
thrust::for_each_n(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator(0), keys.num_rows(),
hash::compute_single_pass_aggs<true, Map>{
map, keys.num_rows(), *d_values, *d_sparse_table, d_aggs.data().get(),
static_cast<bitmask_type*>(row_bitmask.data())});
} else {
thrust::for_each_n(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator(0), keys.num_rows(),
hash::compute_single_pass_aggs<false, Map>{
map, keys.num_rows(), *d_values, *d_sparse_table, d_aggs.data().get(),
nullptr});
}
// Add results back to sparse_results cache
auto sparse_result_cols = sparse_table.release();
for (size_t i = 0; i < aggs.size(); i++) {
sparse_results->add_result(col_ids[i],
std::make_unique<aggregation>(aggs[i]),
std::move(sparse_result_cols[i]));
}
}
/**
* @brief Computes and returns a device vector containing all populated keys in
* `map`.
*/
template <typename Map>
std::pair<rmm::device_vector<size_type>, size_type> extract_populated_keys(
Map map, size_type num_keys, hipStream_t stream = 0)
{
rmm::device_vector<size_type> populated_keys(num_keys);
auto get_key = [] __device__ (auto const& element) {
size_type key, value;
thrust::tie(key, value) = element;
return key;
};
auto end_it = thrust::copy_if(rmm::exec_policy(stream)->on(stream),
thrust::make_transform_iterator(map.data(), get_key),
thrust::make_transform_iterator(map.data() + map.capacity(), get_key),
populated_keys.begin(),
[unused_key = map.get_unused_key()] __device__ (size_type key) {
return key != unused_key;
});
size_type map_size = end_it - populated_keys.begin();
return std::make_pair(std::move(populated_keys), map_size);
}
/**
* @brief Computes groupby using hash table.
*
* First, we create a hash table that stores the indices of unique rows in
* `keys`. The upper limit on the number of values in this map is the number
* of rows in `keys`.
*
* To store the results of aggregations, we create temporary sparse columns
* which have the same size as input value columns. Using the hash map, we
* determine the location within the sparse column to write the result of the
* aggregation into.
*
* The sparse column results of all aggregations are stored into the cache
* `sparse_results`. This enables the use of previously calculated results in
* other aggregations.
*
* All the aggregations which can be computed in a single pass are computed
* first, in a combined kernel. Then using these results, aggregations that
* require multiple passes, will be computed.
*
* Finally, using the hash map, we generate a vector of indices of populated
* values in sparse result columns. Then, for each aggregation originally
* requested in `requests`, we gather sparse results into a column of dense
* results using the aforementioned index vector. Dense results are stored into
* the in/out parameter `cache`.
*
*/
template <bool keys_have_nulls>
std::unique_ptr<table> groupby_null_templated(
table_view const& keys, std::vector<aggregation_request> const& requests,
experimental::detail::result_cache* cache,
include_nulls include_null_keys, hipStream_t stream,
rmm::mr::device_memory_resource* mr)
{
auto d_keys = table_device_view::create(keys);
auto map = create_hash_map<keys_have_nulls>(*d_keys, include_null_keys, stream);
// Cache of sparse results where the location of aggregate value in each
// column is indexed by the hash map
experimental::detail::result_cache sparse_results(requests.size());
// Compute all single pass aggs first
compute_single_pass_aggs<keys_have_nulls>(
keys, requests, &sparse_results, *map, include_null_keys, stream);
// Now continue with remaining multi-pass aggs
// <placeholder>
// Extract the populated indices from the hash map and create a gather map.
// Gathering using this map from sparse results will give dense results.
rmm::device_vector<size_type> gather_map;
size_type map_size;
std::tie(gather_map, map_size) =
extract_populated_keys(*map, keys.num_rows(), stream);
// Compact all results from sparse_results and insert into cache
sparse_to_dense_results(requests, sparse_results, cache, gather_map, map_size,
stream, mr);
auto unique_keys = experimental::detail::gather(
keys, gather_map.begin(), gather_map.begin() + map_size, false, mr, stream);
return unique_keys;
}
} // namespace
/**
* @brief Indicates if a set of aggregation requests can be satisfied with a
* hash-based groupby implementation.
*
* @param keys The table of keys
* @param requests The set of columns to aggregate and the aggregations to
* perform
* @return true A hash-based groupby should be used
* @return false A hash-based groupby should not be used
*/
bool can_use_hash_groupby(table_view const& keys,
std::vector<aggregation_request> const& requests) {
return std::all_of(
requests.begin(), requests.end(), [](aggregation_request const& r) {
return std::all_of(
r.aggregations.begin(), r.aggregations.end(),
[](auto const& a) { return is_hash_aggregation(a->kind); });
});
}
// Hash-based groupby
std::pair<std::unique_ptr<table>, std::vector<aggregation_result>> groupby(
table_view const& keys, std::vector<aggregation_request> const& requests,
include_nulls include_null_keys, hipStream_t stream,
rmm::mr::device_memory_resource* mr)
{
experimental::detail::result_cache cache(requests.size());
std::unique_ptr<table> unique_keys;
if (has_nulls(keys)) {
unique_keys = groupby_null_templated<true>(keys, requests, &cache,
include_null_keys, stream, mr);
} else {
unique_keys = groupby_null_templated<false>(keys, requests, &cache,
include_null_keys, stream, mr);
}
return std::make_pair(std::move(unique_keys), extract_results(requests, cache));
}
} // namespace hash
} // namespace detail
} // namespace groupby
} // namespace experimental
} // namespace cudf
| 769d1cce68d80e49ea8fa4c1bf94733ec08c9b64.cu | /*
* Copyright (c) 2019-20, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <groupby/common/utils.hpp>
#include <groupby/hash/groupby_kernels.cuh>
#include <hash/concurrent_unordered_map.cuh>
#include <cudf/detail/utilities/hash_functions.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/column/column.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/groupby.hpp>
#include <cudf/detail/groupby.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/replace.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/table/row_operators.cuh>
#include <cudf/types.hpp>
#include <cudf/aggregation.hpp>
#include <cudf/detail/aggregation/aggregation.hpp>
#include <cudf/detail/aggregation/aggregation.cuh>
#include <cudf/detail/aggregation/result_cache.hpp>
#include <cudf/utilities/traits.hpp>
#include <memory>
#include <utility>
namespace cudf {
namespace experimental {
namespace groupby {
namespace detail {
namespace hash {
namespace {
// This is a temporary fix due to compiler bug and we can resort back to
// constexpr once cuda 10.2 becomes RAPIDS's minimum compiler version
#if 0
/**
* @brief List of aggregation operations that can be computed with a hash-based
* implementation.
*/
constexpr std::array<aggregation::Kind, 7> hash_aggregations{
aggregation::SUM, aggregation::MIN, aggregation::MAX,
aggregation::COUNT_VALID, aggregation::COUNT_ALL,
aggregation::ARGMIN, aggregation::ARGMAX};
template <class T, size_t N>
constexpr bool array_contains(std::array<T, N> const& haystack, T needle) {
for (auto i = 0u; i < N; ++i) {
if (haystack[i] == needle) return true;
}
return false;
}
#endif
/**
* @brief Indicates whether the specified aggregation operation can be computed
* with a hash-based implementation.
*
* @param t The aggregation operation to verify
* @return true `t` is valid for a hash based groupby
* @return false `t` is invalid for a hash based groupby
*/
bool constexpr is_hash_aggregation(aggregation::Kind t) {
// this is a temporary fix due to compiler bug and we can resort back to
// constexpr once cuda 10.2 becomes RAPIDS's minimum compiler version
// return array_contains(hash_aggregations, t);
return (t == aggregation::SUM) or (t == aggregation::MIN) or
(t == aggregation::MAX) or (t == aggregation::COUNT_VALID) or
(t == aggregation::COUNT_ALL) or
(t == aggregation::ARGMIN) or (t == aggregation::ARGMAX);
}
// flatten aggs to filter in single pass aggs
std::tuple<table_view, std::vector<aggregation::Kind>, std::vector<size_t> >
flatten_single_pass_aggs(std::vector<aggregation_request> const& requests) {
std::vector<column_view> columns;
std::vector<aggregation::Kind> agg_kinds;
std::vector<size_t> col_ids;
for (size_t i = 0; i < requests.size(); i++) {
auto const& request = requests[i];
auto const& agg_v = request.aggregations;
auto insert_agg = [&agg_kinds, &columns, &col_ids, &request, i]
(aggregation::Kind k) {
agg_kinds.push_back(k);
columns.push_back(request.values);
col_ids.push_back(i);
};
for (auto &&agg : agg_v) {
if (is_hash_aggregation(agg->kind)) {
if (is_fixed_width(request.values.type()) or
agg->kind == aggregation::COUNT_VALID or
agg->kind == aggregation::COUNT_ALL) {
insert_agg(agg->kind);
} else if (request.values.type().id() == type_id::STRING) {
// For string type, only ARGMIN, ARGMAX, MIN, and MAX are supported
if (agg->kind == aggregation::ARGMIN or
agg->kind == aggregation::ARGMAX) {
insert_agg(agg->kind);
} else if (agg->kind == aggregation::MIN) {
insert_agg(aggregation::ARGMIN);
} else if (agg->kind == aggregation::MAX) {
insert_agg(aggregation::ARGMAX);
}
}
}
}
}
return std::make_tuple(table_view(columns),
std::move(agg_kinds), std::move(col_ids));
}
/**
* @brief Gather sparse results into dense using `gather_map` and add to
* `dense_cache`
*
* @see groupby_null_templated()
*/
void sparse_to_dense_results(
std::vector<aggregation_request> const& requests,
experimental::detail::result_cache const& sparse_results,
experimental::detail::result_cache* dense_results,
rmm::device_vector<size_type> const& gather_map, size_type map_size,
cudaStream_t stream,
rmm::mr::device_memory_resource* mr)
{
for (size_t i = 0; i < requests.size(); i++) {
auto const& agg_v = requests[i].aggregations;
auto const& col = requests[i].values;
// Given an aggregation, this will get the result from sparse_results and
// convert and return dense, compacted result
auto to_dense_agg_result =
[&sparse_results, &gather_map, map_size, i, mr, stream]
(auto const& agg) {
auto s = sparse_results.get_result(i, agg);
auto dense_result_table =
experimental::detail::gather(
table_view({s}),
gather_map.begin(),
gather_map.begin() + map_size,
false, mr, stream);
return std::move(dense_result_table->release()[0]);
};
// Enables conversion of ARGMIN/ARGMAX into MIN/MAX
auto transformed_result =
[&col, to_dense_agg_result, mr, stream]
(auto const& agg_kind) {
auto transformed_agg = std::make_unique<aggregation>(agg_kind);
auto arg_result = to_dense_agg_result(transformed_agg);
// We make a view of ARG(MIN/MAX) result without a null mask and gather
// using this map. The values in data buffer of ARG(MIN/MAX) result
// corresponding to null values was initialized to ARG(MIN/MAX)_SENTINEL
// which is an out of bounds index value (-1) and causes the gathered
// value to be null.
column_view null_removed_map(data_type(type_to_id<size_type>()),
arg_result->size(),
static_cast<void const*>(arg_result->view().template data<size_type>()));
auto transformed_result = experimental::detail::gather(table_view({col}),
null_removed_map, false, arg_result->nullable(), false, mr, stream);
return std::move(transformed_result->release()[0]);
};
for (auto &&agg : agg_v) {
if (agg->kind == aggregation::COUNT_VALID or
agg->kind == aggregation::COUNT_ALL) {
dense_results->add_result(i, agg, to_dense_agg_result(agg));
}
else if (col.type().id() == type_id::STRING and
(agg->kind == aggregation::MAX or agg->kind == aggregation::MIN)) {
if (agg->kind == aggregation::MAX) {
dense_results->add_result(i, agg,
transformed_result(aggregation::ARGMAX));
}
else if (agg->kind == aggregation::MIN) {
dense_results->add_result(i, agg,
transformed_result(aggregation::ARGMIN));
}
}
else if (sparse_results.has_result(i, agg)) {
dense_results->add_result(i, agg, to_dense_agg_result(agg));
}
}
}
}
/**
* @brief Construct hash map that uses row comparator and row hasher on
* `d_keys` table and stores indices
*/
template <bool keys_have_nulls>
auto create_hash_map(table_device_view const& d_keys,
include_nulls include_null_keys,
cudaStream_t stream = 0)
{
size_type constexpr unused_key{std::numeric_limits<size_type>::max()};
size_type constexpr unused_value{std::numeric_limits<size_type>::max()};
using map_type =
concurrent_unordered_map<size_type, size_type,
row_hasher<default_hash, keys_have_nulls>,
row_equality_comparator<keys_have_nulls>>;
using allocator_type = typename map_type::allocator_type;
bool const null_keys_are_equal{include_null_keys == include_nulls::YES};
row_hasher<default_hash, keys_have_nulls> hasher{d_keys};
row_equality_comparator<keys_have_nulls> rows_equal{
d_keys, d_keys, null_keys_are_equal};
return map_type::create(compute_hash_table_size(d_keys.num_rows()),
unused_key, unused_value, hasher, rows_equal,
allocator_type(), stream);
}
/**
* @brief Computes all aggregations from `requests` that require a single pass
* over the data and stores the results in `sparse_results`
*
* @see groupby_null_templated()
*/
template <bool keys_have_nulls, typename Map>
void compute_single_pass_aggs(table_view const& keys,
std::vector<aggregation_request> const& requests,
experimental::detail::result_cache* sparse_results,
Map& map, include_nulls include_null_keys,
cudaStream_t stream)
{
// flatten the aggs to a table that can be operated on by aggregate_row
table_view flattened_values;
std::vector<aggregation::Kind> aggs;
std::vector<size_t> col_ids;
std::tie(flattened_values, aggs, col_ids) = flatten_single_pass_aggs(requests);
// make table that will hold sparse results
std::vector<std::unique_ptr<column>> sparse_columns;
std::transform(flattened_values.begin(), flattened_values.end(),
aggs.begin(), std::back_inserter(sparse_columns),
[stream] (auto const& col, auto const& agg) {
bool nullable =
(agg == aggregation::COUNT_VALID or agg == aggregation::COUNT_ALL)
? false : col.has_nulls();
auto mask_flag = (nullable) ? mask_state::ALL_NULL
: mask_state::UNALLOCATED;
return make_fixed_width_column(
experimental::detail::target_type(col.type(), agg),
col.size(), mask_flag, stream);
});
table sparse_table(std::move(sparse_columns));
mutable_table_view table_view = sparse_table.mutable_view();
experimental::detail::initialize_with_identity(table_view, aggs, stream);
// prepare to launch kernel to do the actual aggregation
auto d_sparse_table = mutable_table_device_view::create(sparse_table);
auto d_values = table_device_view::create(flattened_values);
rmm::device_vector<aggregation::Kind> d_aggs(aggs);
bool skip_key_rows_with_nulls = keys_have_nulls and
include_null_keys == include_nulls::NO;
if (skip_key_rows_with_nulls) {
auto row_bitmask{bitmask_and(keys, rmm::mr::get_default_resource(), stream)};
thrust::for_each_n(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator(0), keys.num_rows(),
hash::compute_single_pass_aggs<true, Map>{
map, keys.num_rows(), *d_values, *d_sparse_table, d_aggs.data().get(),
static_cast<bitmask_type*>(row_bitmask.data())});
} else {
thrust::for_each_n(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator(0), keys.num_rows(),
hash::compute_single_pass_aggs<false, Map>{
map, keys.num_rows(), *d_values, *d_sparse_table, d_aggs.data().get(),
nullptr});
}
// Add results back to sparse_results cache
auto sparse_result_cols = sparse_table.release();
for (size_t i = 0; i < aggs.size(); i++) {
sparse_results->add_result(col_ids[i],
std::make_unique<aggregation>(aggs[i]),
std::move(sparse_result_cols[i]));
}
}
/**
* @brief Computes and returns a device vector containing all populated keys in
* `map`.
*/
template <typename Map>
std::pair<rmm::device_vector<size_type>, size_type> extract_populated_keys(
Map map, size_type num_keys, cudaStream_t stream = 0)
{
rmm::device_vector<size_type> populated_keys(num_keys);
auto get_key = [] __device__ (auto const& element) {
size_type key, value;
thrust::tie(key, value) = element;
return key;
};
auto end_it = thrust::copy_if(rmm::exec_policy(stream)->on(stream),
thrust::make_transform_iterator(map.data(), get_key),
thrust::make_transform_iterator(map.data() + map.capacity(), get_key),
populated_keys.begin(),
[unused_key = map.get_unused_key()] __device__ (size_type key) {
return key != unused_key;
});
size_type map_size = end_it - populated_keys.begin();
return std::make_pair(std::move(populated_keys), map_size);
}
/**
* @brief Computes groupby using hash table.
*
* First, we create a hash table that stores the indices of unique rows in
* `keys`. The upper limit on the number of values in this map is the number
* of rows in `keys`.
*
* To store the results of aggregations, we create temporary sparse columns
* which have the same size as input value columns. Using the hash map, we
* determine the location within the sparse column to write the result of the
* aggregation into.
*
* The sparse column results of all aggregations are stored into the cache
* `sparse_results`. This enables the use of previously calculated results in
* other aggregations.
*
* All the aggregations which can be computed in a single pass are computed
* first, in a combined kernel. Then using these results, aggregations that
* require multiple passes, will be computed.
*
* Finally, using the hash map, we generate a vector of indices of populated
* values in sparse result columns. Then, for each aggregation originally
* requested in `requests`, we gather sparse results into a column of dense
* results using the aforementioned index vector. Dense results are stored into
* the in/out parameter `cache`.
*
*/
template <bool keys_have_nulls>
std::unique_ptr<table> groupby_null_templated(
table_view const& keys, std::vector<aggregation_request> const& requests,
experimental::detail::result_cache* cache,
include_nulls include_null_keys, cudaStream_t stream,
rmm::mr::device_memory_resource* mr)
{
auto d_keys = table_device_view::create(keys);
auto map = create_hash_map<keys_have_nulls>(*d_keys, include_null_keys, stream);
// Cache of sparse results where the location of aggregate value in each
// column is indexed by the hash map
experimental::detail::result_cache sparse_results(requests.size());
// Compute all single pass aggs first
compute_single_pass_aggs<keys_have_nulls>(
keys, requests, &sparse_results, *map, include_null_keys, stream);
// Now continue with remaining multi-pass aggs
// <placeholder>
// Extract the populated indices from the hash map and create a gather map.
// Gathering using this map from sparse results will give dense results.
rmm::device_vector<size_type> gather_map;
size_type map_size;
std::tie(gather_map, map_size) =
extract_populated_keys(*map, keys.num_rows(), stream);
// Compact all results from sparse_results and insert into cache
sparse_to_dense_results(requests, sparse_results, cache, gather_map, map_size,
stream, mr);
auto unique_keys = experimental::detail::gather(
keys, gather_map.begin(), gather_map.begin() + map_size, false, mr, stream);
return unique_keys;
}
} // namespace
/**
* @brief Indicates if a set of aggregation requests can be satisfied with a
* hash-based groupby implementation.
*
* @param keys The table of keys
* @param requests The set of columns to aggregate and the aggregations to
* perform
* @return true A hash-based groupby should be used
* @return false A hash-based groupby should not be used
*/
bool can_use_hash_groupby(table_view const& keys,
std::vector<aggregation_request> const& requests) {
return std::all_of(
requests.begin(), requests.end(), [](aggregation_request const& r) {
return std::all_of(
r.aggregations.begin(), r.aggregations.end(),
[](auto const& a) { return is_hash_aggregation(a->kind); });
});
}
// Hash-based groupby
std::pair<std::unique_ptr<table>, std::vector<aggregation_result>> groupby(
table_view const& keys, std::vector<aggregation_request> const& requests,
include_nulls include_null_keys, cudaStream_t stream,
rmm::mr::device_memory_resource* mr)
{
experimental::detail::result_cache cache(requests.size());
std::unique_ptr<table> unique_keys;
if (has_nulls(keys)) {
unique_keys = groupby_null_templated<true>(keys, requests, &cache,
include_null_keys, stream, mr);
} else {
unique_keys = groupby_null_templated<false>(keys, requests, &cache,
include_null_keys, stream, mr);
}
return std::make_pair(std::move(unique_keys), extract_results(requests, cache));
}
} // namespace hash
} // namespace detail
} // namespace groupby
} // namespace experimental
} // namespace cudf
|
5e105b808ac0fdd4d6416ff7aa6c911f699f3963.hip | // !!! This is a file automatically generated by hipify!!!
#define GRB_USE_CUDA
#define private public
#include <iostream>
#include <algorithm>
#include <string>
#include <cstdio>
#include <cstdlib>
#include <boost/program_options.hpp>
#include "graphblas/graphblas.hpp"
#include "test/test.hpp"
int main( int argc, char** argv )
{
std::vector<graphblas::Index> row_indices;
std::vector<graphblas::Index> col_indices;
std::vector<float> values;
graphblas::Index nrows, ncols, nvals;
// Parse arguments
bool DEBUG = true;
// Read in sparse matrix
if (argc < 2) {
fprintf(stderr, "Usage: %s [matrix-market-filename]\n", argv[0]);
exit(1);
} else {
readMtx(argv[argc-1], &row_indices, &col_indices, &values, &nrows, &ncols,
&nvals, 0, DEBUG);
}
// Vector mask
graphblas::Vector<float> m(nrows);
std::vector<graphblas::Index> m_ind = {1, 2, 3};
std::vector<float> m_val = {1.f, 1.f, 1.f};
CHECK( m.build(&m_ind, &m_val, 3, GrB_NULL) );
CHECK( m.size(&nrows) );
if( DEBUG ) CHECK( m.print() );
// Vector v
graphblas::Vector<float> v(nrows);
CHECK( v.fill(-1.f) );
CHECK( v.setElement(0.f, 1) );
CHECK( v.size(&nrows) );
// Descriptor
graphblas::Descriptor desc;
//CHECK( desc.set(graphblas::GrB_MASK, graphblas::GrB_SCMP) );
// Warmup
CpuTimer warmup;
warmup.Start();
graphblas::assign<float, float>(&v, &m, GrB_NULL, (float)1.f, GrB_ALL, nrows,
&desc);
warmup.Stop();
CpuTimer cpu_vxm;
//hipProfilerStart();
cpu_vxm.Start();
int NUM_ITER = 1;//0;
for( int i=0; i<NUM_ITER; i++ )
{
graphblas::assign<float, float>(&v, &m, GrB_NULL, (float)1.f, GrB_ALL,
nrows, &desc);
}
//hipProfilerStop();
cpu_vxm.Stop();
float flop = 0;
if( DEBUG ) std::cout << "warmup, " << warmup.ElapsedMillis() << ", " <<
flop/warmup.ElapsedMillis()/1000000.0 << "\n";
float elapsed_vxm = cpu_vxm.ElapsedMillis();
std::cout << "vxm, " << elapsed_vxm/NUM_ITER << "\n";
if( DEBUG ) v.print();
return 0;
}
| 5e105b808ac0fdd4d6416ff7aa6c911f699f3963.cu | #define GRB_USE_CUDA
#define private public
#include <iostream>
#include <algorithm>
#include <string>
#include <cstdio>
#include <cstdlib>
#include <boost/program_options.hpp>
#include "graphblas/graphblas.hpp"
#include "test/test.hpp"
int main( int argc, char** argv )
{
std::vector<graphblas::Index> row_indices;
std::vector<graphblas::Index> col_indices;
std::vector<float> values;
graphblas::Index nrows, ncols, nvals;
// Parse arguments
bool DEBUG = true;
// Read in sparse matrix
if (argc < 2) {
fprintf(stderr, "Usage: %s [matrix-market-filename]\n", argv[0]);
exit(1);
} else {
readMtx(argv[argc-1], &row_indices, &col_indices, &values, &nrows, &ncols,
&nvals, 0, DEBUG);
}
// Vector mask
graphblas::Vector<float> m(nrows);
std::vector<graphblas::Index> m_ind = {1, 2, 3};
std::vector<float> m_val = {1.f, 1.f, 1.f};
CHECK( m.build(&m_ind, &m_val, 3, GrB_NULL) );
CHECK( m.size(&nrows) );
if( DEBUG ) CHECK( m.print() );
// Vector v
graphblas::Vector<float> v(nrows);
CHECK( v.fill(-1.f) );
CHECK( v.setElement(0.f, 1) );
CHECK( v.size(&nrows) );
// Descriptor
graphblas::Descriptor desc;
//CHECK( desc.set(graphblas::GrB_MASK, graphblas::GrB_SCMP) );
// Warmup
CpuTimer warmup;
warmup.Start();
graphblas::assign<float, float>(&v, &m, GrB_NULL, (float)1.f, GrB_ALL, nrows,
&desc);
warmup.Stop();
CpuTimer cpu_vxm;
//cudaProfilerStart();
cpu_vxm.Start();
int NUM_ITER = 1;//0;
for( int i=0; i<NUM_ITER; i++ )
{
graphblas::assign<float, float>(&v, &m, GrB_NULL, (float)1.f, GrB_ALL,
nrows, &desc);
}
//cudaProfilerStop();
cpu_vxm.Stop();
float flop = 0;
if( DEBUG ) std::cout << "warmup, " << warmup.ElapsedMillis() << ", " <<
flop/warmup.ElapsedMillis()/1000000.0 << "\n";
float elapsed_vxm = cpu_vxm.ElapsedMillis();
std::cout << "vxm, " << elapsed_vxm/NUM_ITER << "\n";
if( DEBUG ) v.print();
return 0;
}
|
45026d2bdd59483d7dc1d126812336c27904cc91.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include "avgpool_layer.h"
#include "hip/hip_runtime.h"
}
__global__ void forward_avgpool_layer_kernel(int n, int w, int h, int c, float *input, float *output)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int k = id % c;
id /= c;
int b = id;
int i;
int out_index = (k + c*b);
output[out_index] = 0;
for(i = 0; i < w*h; ++i){
int in_index = i + h*w*(k + b*c);
output[out_index] += input[in_index];
}
output[out_index] /= w*h;
}
//__global__ void backward_avgpool_layer_kernel(int n, int w, int h, int c, float *in_delta, float *out_delta)
//{
// int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
// if(id >= n) return;
//
// int k = id % c;
// id /= c;
// int b = id;
//
// int i;
// int out_index = (k + c*b);
// for(i = 0; i < w*h; ++i){
// int in_index = i + h*w*(k + b*c);
// in_delta[in_index] += out_delta[out_index] / (w*h);
// }
//}
extern "C" void forward_avgpool_layer_gpu(avgpool_layer layer, network net)
{
size_t n = layer.c*layer.batch;
hipLaunchKernelGGL(( forward_avgpool_layer_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, layer.w, layer.h, layer.c, net.input_gpu, layer.output_gpu);
check_error(hipPeekAtLastError());
}
//extern "C" void backward_avgpool_layer_gpu(avgpool_layer layer, network net)
//{
// size_t n = layer.c*layer.batch;
//
// backward_avgpool_layer_kernel<<<cuda_gridsize(n), BLOCK>>>(n, layer.w, layer.h, layer.c, net.delta_gpu, layer.delta_gpu);
// check_error(hipPeekAtLastError());
//}
| 45026d2bdd59483d7dc1d126812336c27904cc91.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include "avgpool_layer.h"
#include "cuda.h"
}
__global__ void forward_avgpool_layer_kernel(int n, int w, int h, int c, float *input, float *output)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int k = id % c;
id /= c;
int b = id;
int i;
int out_index = (k + c*b);
output[out_index] = 0;
for(i = 0; i < w*h; ++i){
int in_index = i + h*w*(k + b*c);
output[out_index] += input[in_index];
}
output[out_index] /= w*h;
}
//__global__ void backward_avgpool_layer_kernel(int n, int w, int h, int c, float *in_delta, float *out_delta)
//{
// int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
// if(id >= n) return;
//
// int k = id % c;
// id /= c;
// int b = id;
//
// int i;
// int out_index = (k + c*b);
// for(i = 0; i < w*h; ++i){
// int in_index = i + h*w*(k + b*c);
// in_delta[in_index] += out_delta[out_index] / (w*h);
// }
//}
extern "C" void forward_avgpool_layer_gpu(avgpool_layer layer, network net)
{
size_t n = layer.c*layer.batch;
forward_avgpool_layer_kernel<<<cuda_gridsize(n), BLOCK>>>(n, layer.w, layer.h, layer.c, net.input_gpu, layer.output_gpu);
check_error(cudaPeekAtLastError());
}
//extern "C" void backward_avgpool_layer_gpu(avgpool_layer layer, network net)
//{
// size_t n = layer.c*layer.batch;
//
// backward_avgpool_layer_kernel<<<cuda_gridsize(n), BLOCK>>>(n, layer.w, layer.h, layer.c, net.delta_gpu, layer.delta_gpu);
// check_error(cudaPeekAtLastError());
//}
|
615991ec81619414162f596106c2f2d9b6ccfe25.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//nvcc Final.cu -o out -lglut -lGLEW -lGL -lm -ccbin clang-3.8 -lstdc++
///////////////////////////////////////////////////
// Call to general libraries //
///////////////////////////////////////////////////
#include <cstdio>
#include <stdlib.h>
#include <stdint.h>
#include <assert.h>
#include <math.h>
#include <complex.h>
#include <string.h>
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <GL/glut.h>
#include <GL/freeglut_ext.h>
#include <cuda_gl_interop.h>
///////////////////////////////////////////////////
// Call to cuda funtions //
///////////////////////////////////////////////////
#include "FFT.cu"
#include "convolutions.cu"
#include "threshold.cu"
#include "RE_transform.cu"
#include "equalization.cu"
#include "noise.cu"
#include "backup.cu"
#include "compression.cu"
using namespace std;
///////////////////////////////////////////////////
// Defining variables //
///////////////////////////////////////////////////
#define WIDTH 1280
#define HEIGHT 960
#define DIM 1600
#define PI 3.14159265
static int sub_00;
static int sub_01;
static int sub_02;
static int sub_03;
static int sub_04;
bool Equalization = 0, Flag_Filt = 0, Flag_Med = 0, Flag_PPnoise = 0,Flag_Pix = 0,Flag_req=0;
bool Flag_Reset = 0, Flag_Ero = 0, Flag_Dil = 0, Flag_Gray = 0, Flag_BW = 0, Flag_Fourier = 0;
long long int sizeImage;
float Scale_Factor;
float Rotation_Factor;
unsigned long widht, height;
int Num_Cols, Num_Rows, Dim_Con, Num_Rows_Fourier, Num_Cols_Fourier, Max_E;
size_t or_size, mor_size, equ_size, fou_size;
unsigned char *Image_R, *Image_G, *Image_B;
unsigned char *Image_R_bk, *Image_G_bk, *Image_B_bk;
unsigned char *Equalizar_R, *Equalizar_G, *Equalizar_B;
unsigned char *Convol_R, *Convol_G, *Convol_B;
unsigned char *Fourier_R, *Fourier_G, *Fourier_B;
unsigned char *Morfo_R, *Morfo_G, *Morfo_B;
float *Val_Real, *Val_Real_out, *Val_Imag, *Val_Imag_out;
unsigned int *d_his_r;
unsigned int *d_his_g;
unsigned int *d_his_b;
float *DMask;
float *Mask = (float*)malloc(625*sizeof(float));
///////////////////////////////////////////////////
// Serial part of cuda funtions //
///////////////////////////////////////////////////
int Threshold(unsigned char *r_data, unsigned char *g_data, unsigned char *b_data, size_t pitch);
void Equalization_PC (unsigned char *r_data, unsigned char *g_data,
unsigned char *b_data, size_t pitch,
unsigned char *r_dataE, unsigned char *g_dataE,
unsigned char *b_dataE );
void FFT();
///////////////////////////////////////////////////
// Function to display with glut //
///////////////////////////////////////////////////
void display(){
GLuint bufferObj;
struct cudaGraphicsResource* resource;
bool Flag_conv = 1;
glClearColor( 255.0, 255.0, 255.0, 1.0 );
glClear( GL_COLOR_BUFFER_BIT );
glGenBuffers( 1, &bufferObj );
glBindBuffer( GL_PIXEL_UNPACK_BUFFER_ARB, bufferObj );
glBufferData( GL_PIXEL_UNPACK_BUFFER_ARB, widht * height * 4, NULL, GL_DYNAMIC_DRAW_ARB );
hipGraphicsGLRegisterBuffer( &resource, bufferObj, hipGraphicsMapFlagsNone );
uchar4* devPtr;
size_t size;
hipGraphicsMapResources( 1, &resource, NULL ) ;
hipGraphicsResourceGetMappedPointer( (void**)&devPtr, &size, resource );
dim3 grids(Num_Cols/16,Num_Rows/16);
dim3 threads(16, 16);
dim3 grids_01(DIM/16,DIM/16);
dim3 threads_01(16, 16);
dim3 grids_02(widht/16,height/16);
dim3 threads_02(16, 16);
///////////////////////////////////////////////////
// Cuda functions called by the menu //
///////////////////////////////////////////////////
if(Flag_Pix){
hipLaunchKernelGGL(( Pixelado), dim3(grids),dim3(threads), 0, 0, Image_R, Image_G, Image_B,or_size, Image_R, Image_G, Image_B);
Flag_Pix = 0;}
if(Flag_req){
hipLaunchKernelGGL(( Requant), dim3(grids),dim3(threads), 0, 0, Image_R, Image_G, Image_B,or_size, Image_R, Image_G, Image_B);
Flag_req = 0;}
if(Flag_Reset){
hipLaunchKernelGGL(( Backup), dim3(grids),dim3(threads), 0, 0, Image_R_bk, Image_G_bk, Image_B_bk, or_size,
Image_R, Image_G, Image_B);
Flag_Reset = 0;}
if(Flag_Ero){
hipLaunchKernelGGL(( erode), dim3(grids),dim3(threads), 0, 0, Image_R, Image_G, Image_B,
or_size, Image_R, Image_G, Image_B, Num_Cols, Num_Rows, Dim_Con, Max_E);
Flag_Ero = 0;}
if(Flag_Dil){
hipLaunchKernelGGL(( dilate), dim3(grids),dim3(threads), 0, 0, Image_R, Image_G, Image_B,
or_size, Image_R, Image_G, Image_B, Num_Cols, Num_Rows, Dim_Con, Max_E);
Flag_Dil = 0;}
if(Flag_Gray){
hipLaunchKernelGGL(( grayscale), dim3(grids),dim3(threads), 0, 0, Image_R, Image_G, Image_B, or_size, d_his_r);
Flag_Gray = 0;}
if(Flag_BW){
Threshold (Image_R, Image_G, Image_B, or_size );
Flag_BW = 0;}
if (Flag_PPnoise){
hipLaunchKernelGGL(( PPnoise), dim3(grids),dim3(threads), 0, 0, Image_R, Image_G, Image_B, or_size, 1, rand()%100);
Flag_PPnoise = 0;}
if(Flag_Fourier){
FFT();
Flag_Fourier = 0;}
if (Equalization){
Equalization_PC (Image_R, Image_G, Image_B, or_size,Convol_R, Convol_G, Convol_B );
hipLaunchKernelGGL(( Backup), dim3(grids),dim3(threads), 0, 0, Convol_R, Convol_G, Convol_B, or_size,
Image_R, Image_G, Image_B);
Equalization=0;
}
if (Flag_Med) {
hipLaunchKernelGGL(( median_filter), dim3(grids),dim3(threads), 0, 0, Image_R, Image_G, Image_B, or_size,
Convol_R, Convol_G, Convol_B, Num_Cols, Num_Rows, 3);
hipLaunchKernelGGL(( Backup), dim3(grids),dim3(threads), 0, 0, Convol_R, Convol_G, Convol_B, or_size,
Image_R, Image_G, Image_B);
Flag_Med=0;
}
if (Flag_Filt) {
hipLaunchKernelGGL(( Operador_Convolucion), dim3(grids),dim3(threads), 0, 0, Image_R, Image_G, Image_B,
or_size, Convol_R, Convol_G, Convol_B, Num_Cols, Num_Rows, DMask, Dim_Con);
hipLaunchKernelGGL(( Backup), dim3(grids),dim3(threads), 0, 0, Convol_R, Convol_G, Convol_B, or_size,
Image_R, Image_G, Image_B);
Flag_Filt=0;
}
if (Flag_conv) {
hipLaunchKernelGGL(( Scale), dim3(grids_01),dim3(threads_01), 0, 0, Image_R, Image_G, Image_B, Morfo_R, Morfo_G, Morfo_B,
or_size, mor_size, Scale_Factor, Num_Cols, Num_Rows);
}else{
hipLaunchKernelGGL(( Scale), dim3(grids_01),dim3(threads_01), 0, 0, Convol_R, Convol_G, Convol_B, Morfo_R, Morfo_G, Morfo_B,
or_size, mor_size, Scale_Factor, Num_Cols, Num_Rows);
}
hipLaunchKernelGGL(( Rotate), dim3(grids_02),dim3(threads_02), 0, 0, devPtr, Morfo_R, Morfo_G, Morfo_B,
mor_size, Rotation_Factor, DIM, DIM);
hipGraphicsUnmapResources( 1, &resource, NULL ) ;
glDrawPixels( widht, height, GL_RGBA, GL_UNSIGNED_BYTE, 0 );
glutSwapBuffers();
hipGraphicsUnregisterResource( resource ) ;
glBindBuffer( GL_PIXEL_UNPACK_BUFFER_ARB, 0 );
glDeleteBuffers( 1, &bufferObj );
}
///////////////////////////////////////////////////
// Serial funtions //
///////////////////////////////////////////////////
int Threshold(unsigned char *r_data, unsigned char *g_data, unsigned char *b_data, size_t pitch) {
unsigned int his_size = sizeof(unsigned int)*256;
unsigned int *his = (unsigned int*)malloc(his_size);
hipMemset( d_his_r, 0, his_size);
dim3 grids(Num_Cols,Num_Rows);
dim3 threads(1, 1);
hipLaunchKernelGGL(( grayscale), dim3(grids),dim3(threads), 0, 0, r_data, g_data, b_data, pitch, d_his_r);
hipMemcpy(his, d_his_r, his_size, hipMemcpyDeviceToHost);
int m = Num_Cols*Num_Rows/2, h = 0, um, i;
for (i = 0; i < 256; i++) {
h += his[i];
if (h > m) {
um = i;
break;
}
}
hipLaunchKernelGGL(( binary), dim3(grids),dim3(threads), 0, 0, r_data, g_data, b_data, pitch, um);
return um;
}
void FFT(){
hipLaunchKernelGGL(( FFT_X), dim3(Num_Cols_Fourier/128), dim3(128), 0, 0, Image_R, Image_G, Image_B,
or_size, Val_Real, Val_Imag, Val_Real_out, Val_Imag_out,
Image_R, Image_G, Image_B, Num_Cols, Num_Rows, Num_Cols_Fourier, Num_Rows_Fourier);
hipLaunchKernelGGL(( FFT_Y), dim3(Num_Rows_Fourier/128), dim3(128), 0, 0, Image_R, Image_G, Image_B,
or_size, Val_Real, Val_Imag, Val_Real_out, Val_Imag_out,
Image_R, Image_G, Image_B, Num_Cols, Num_Rows, Num_Cols_Fourier, Num_Rows_Fourier);
}
void Equalization_PC (unsigned char *r_data, unsigned char *g_data,
unsigned char *b_data, size_t pitch,
unsigned char *r_dataE, unsigned char *g_dataE,
unsigned char *b_dataE ){
int i;
unsigned int his_size = sizeof(unsigned int)*256;
float hisAc_size = sizeof(float)*256;
unsigned int *his_r = (unsigned int*)malloc(his_size);
unsigned int *his_g = (unsigned int*)malloc(his_size);
unsigned int *his_b = (unsigned int*)malloc(his_size);
float *hisAc_r = (float*)malloc(hisAc_size);
float *hisAc_g = (float*)malloc(hisAc_size);
float *hisAc_b = (float*)malloc(hisAc_size);
hipMemset( d_his_r, 0, his_size);
hipMemset( d_his_g, 0, his_size);
hipMemset( d_his_b, 0, his_size);
dim3 grids(Num_Cols,Num_Rows);
dim3 threads(1, 1);
hipLaunchKernelGGL(( Get_Histogram), dim3(grids),dim3(threads), 0, 0, r_data, g_data, b_data, pitch, d_his_r, d_his_g, d_his_b);
hipMemcpy(his_r, d_his_r, his_size, hipMemcpyDeviceToHost);
hipMemcpy(his_g, d_his_g, his_size, hipMemcpyDeviceToHost);
hipMemcpy(his_b, d_his_b, his_size, hipMemcpyDeviceToHost);
float szImage=Num_Cols*Num_Rows;
hisAc_r[0] = ((float)his_r[0]);
hisAc_g[0] = ((float)his_g[0]);
hisAc_b[0] = ((float)his_b[0]);
for (i = 1; i < 256; i++) {
hisAc_r[i] = hisAc_r[i-1] + (((float)his_r[i]));
hisAc_g[i] = hisAc_g[i-1] + (((float)his_g[i]));
hisAc_b[i] = hisAc_b[i-1] + (((float)his_b[i]));
}
his_r[0] = 0;
his_g[0] = 0;
his_b[0] = 0;
for (i = 1; i < 255; i++) {
his_r[i] = (int)(hisAc_r[i - 1]*255/szImage);
his_g[i] = (int)(hisAc_g[i - 1]*255/szImage);
his_b[i] = (int)(hisAc_b[i - 1]*255/szImage);
}
his_r[255] = 255;
his_g[255] = 255;
his_b[255] = 255;
hipMemcpy(d_his_r, his_r, his_size, hipMemcpyHostToDevice);
hipMemcpy(d_his_g, his_g, his_size, hipMemcpyHostToDevice);
hipMemcpy(d_his_b, his_b, his_size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( Equalization_GPU), dim3(grids),dim3(threads), 0, 0, r_data, g_data, b_data,
or_size, r_dataE, g_dataE, b_dataE, d_his_r, d_his_g, d_his_b);
}
///////////////////////////////////////////////////
// Menu options //
///////////////////////////////////////////////////
void call_back_function(int val){
switch (val) {
case 2:
if(Scale_Factor < 1)Scale_Factor = 1/((1/Scale_Factor) + 0.15);
else Scale_Factor -= 0.15;
break;
case 1:
if(Scale_Factor < 1)Scale_Factor = 1/((1/Scale_Factor) - 0.15);
else Scale_Factor += 0.15;
break;
case 3:
Rotation_Factor -= 0.01*PI;
break;
case 4:
Rotation_Factor += 0.01*PI;
break;
case 18:
Equalization = 1;
break;
case 6:
Flag_Filt = 1;
Dim_Con = 3;
Mask[0] = 1/9.0; Mask[1] = 1/9.0; Mask[2] = 1/9.0;
Mask[3] = 1/9.0; Mask[4] = 1/9.0; Mask[5] = 1/9.0;
Mask[6] = 1/9.0; Mask[7] = 1/9.0; Mask[8] = 1/9.0;
hipMemcpy(DMask, Mask, 9*sizeof(float), hipMemcpyHostToDevice);
break;
case 8:
Flag_Filt = 1;
Dim_Con = 3;
Mask[0] = 1/16.0; Mask[1] = 2/16.0; Mask[2] = 1/16.0;
Mask[3] = 2/16.0; Mask[4] = 4/16.0; Mask[5] = 2/16.0;
Mask[6] = 1/16.0; Mask[7] = 2/16.0; Mask[8] = 1/16.0;
hipMemcpy(DMask, Mask, 9*sizeof(float), hipMemcpyHostToDevice);
break;
case 9:
Flag_Filt = 1;
Dim_Con = 3;
Mask[0] = 0; Mask[1] = -1; Mask[2] = 0;
Mask[3] = -1; Mask[4] = 4; Mask[5] = -1;
Mask[6] = 0; Mask[7] = -1; Mask[8] = 0;
hipMemcpy(DMask, Mask, 9*sizeof(float), hipMemcpyHostToDevice);
break;
case 10:
Flag_Filt = 1;
Dim_Con = 3;
Mask[0] = -1; Mask[1] = -1; Mask[2] = -1;
Mask[3] = -1; Mask[4] = 8; Mask[5] = -1;
Mask[6] = -1; Mask[7] = -1; Mask[8] = -1;
hipMemcpy(DMask, Mask, 9*sizeof(float), hipMemcpyHostToDevice);
break;
case 11:
Flag_Filt = 1;
Dim_Con = 3;
Mask[0] = -1; Mask[1] = 0; Mask[2] = 1;
Mask[3] = -1; Mask[4] = 0; Mask[5] = 1;
Mask[6] = -1; Mask[7] = 0; Mask[8] = 1;
hipMemcpy(DMask, Mask, 9*sizeof(float), hipMemcpyHostToDevice);
case 12:
Flag_Filt = 1;
Dim_Con = 3;
Mask[0] = -1; Mask[1] = 0; Mask[2] = 1;
Mask[3] = -2; Mask[4] = 0; Mask[5] = 2;
Mask[6] = -1; Mask[7] = 0; Mask[8] = 1;
hipMemcpy(DMask, Mask, 9*sizeof(float), hipMemcpyHostToDevice);
break;
case 13:
Flag_Filt = 1;
Dim_Con = 3;
Mask[0] = 1; Mask[1] = 2; Mask[2] = 1;
Mask[3] = 0; Mask[4] = 0; Mask[5] = 0;
Mask[6] = -1; Mask[7] = -2; Mask[8] = -1;
hipMemcpy(DMask, Mask, 9*sizeof(float), hipMemcpyHostToDevice);
case 5:
Flag_PPnoise = 1;
break;
case 7:
Flag_Med = 1;
break;
case 19:
Flag_Fourier = 1;
break;
case 20:
Flag_Reset = 1;
Scale_Factor = 1;
Rotation_Factor = 0;
break;
case 16:
Flag_Ero = 1;
Dim_Con = 3;
Max_E = 255;
break;
case 17:
Flag_Dil = 1;
Dim_Con = 3;
break;
case 14:
Flag_Gray = 1;
break;
case 15:
Flag_BW = 1;
break;
case 21:
exit(0);
break;
case 22:
Flag_Pix = 1;
break;
case 23:
Flag_req = 1;
break;
default:{
}
}
display();
}
///////////////////////////////////////////////////
// Creating menu //
///////////////////////////////////////////////////
void Create_call_back_function(void) {
sub_00 = glutCreateMenu(call_back_function);
glutAddMenuEntry("Acercar", 1);
glutAddMenuEntry("Alejar", 2);
glutAddMenuEntry("Rotar derecha", 3);
glutAddMenuEntry("Rotar izquierda", 4);
sub_01 = glutCreateMenu(call_back_function);
glutAddMenuEntry("Agregar ruido", 5);
glutAddMenuEntry("Filtro de media", 6);
glutAddMenuEntry("Filtro de mediana", 7);
glutAddMenuEntry("Filtro gaussiano", 8);
sub_02 = glutCreateMenu(call_back_function);
glutAddMenuEntry("2D - 4 conexion", 9);
glutAddMenuEntry("2D - 8 conexion", 10);
glutAddMenuEntry("Prewitt", 11);
glutAddMenuEntry("Sobel X", 12);
glutAddMenuEntry("Sobel Y", 13);
sub_03 = glutCreateMenu(call_back_function);
glutAddMenuEntry("Escala de grises", 14);
glutAddMenuEntry("Binarizado", 15);
sub_04 = glutCreateMenu(call_back_function);
glutAddMenuEntry("Erosion", 16);
glutAddMenuEntry("Dilatacion", 17);
glutCreateMenu(call_back_function);
glutAddMenuEntry("Pixelado", 22);
glutAddMenuEntry("Recuantizacion", 23);
glutAddSubMenu("Rotacion-escala", sub_00);
glutAddSubMenu("Ruido-suavizado", sub_01);
glutAddSubMenu("Deteccion de bordes", sub_02);
glutAddSubMenu("Sistemas de color", sub_03);
glutAddSubMenu("Operaciones morfologicas", sub_04);
glutAddMenuEntry("Ecualizacion", 18);
glutAddMenuEntry("Transformacion Fourier", 19);
glutAddMenuEntry("Restaurar original", 20);
glutAddMenuEntry("Salir", 21);
glutAttachMenu(GLUT_RIGHT_BUTTON);
}
///////////////////////////////////////////////////
// Struct and function to read data from image //
///////////////////////////////////////////////////
typedef struct BMP_Info{
unsigned long bytesInHeader;
unsigned long widht;
unsigned long height;
unsigned int planes;
unsigned int bitsPerPixel;
unsigned long compression;
unsigned long sizeImage;
unsigned long hResolution;
unsigned long vResolution;
unsigned long nIndexes;
unsigned long nIIndexes;
char type[3];
unsigned long size;
char reserved[5];
unsigned long offset;
} BMP_Info;
unsigned long Turn_Data_Long(FILE* fp){
uint32_t data32;
fread (&(data32),4, 1,fp);
unsigned long data = (unsigned long)data32;
return data;
}
unsigned int Turn_Data_Int(FILE* fp){
uint16_t data16;
fread (&(data16), 2, 1, fp);
unsigned int data = (unsigned int)data16;
return data;
}
void Read_Image(FILE* fp, BMP_Info* Image_Raw){
fgets(Image_Raw->type, 3, fp);
Image_Raw->size = Turn_Data_Long(fp);
fgets(Image_Raw->reserved, 5, fp);
Image_Raw->offset = Turn_Data_Long(fp);
Image_Raw->bytesInHeader = Turn_Data_Long(fp);
Image_Raw->widht = Turn_Data_Long(fp);
Image_Raw->height = Turn_Data_Long(fp);
Image_Raw->planes = Turn_Data_Int(fp);
Image_Raw->bitsPerPixel = Turn_Data_Int(fp);
Image_Raw->compression = Turn_Data_Long(fp);
Image_Raw->sizeImage = Turn_Data_Long(fp);
Image_Raw->hResolution = Turn_Data_Long(fp);
Image_Raw->vResolution = Turn_Data_Long(fp);
Image_Raw->nIndexes = Turn_Data_Long(fp);
Image_Raw->nIIndexes = Turn_Data_Long(fp);
}
FILE *fp;
BMP_Info Image_Raw;
| 615991ec81619414162f596106c2f2d9b6ccfe25.cu | //nvcc Final.cu -o out -lglut -lGLEW -lGL -lm -ccbin clang-3.8 -lstdc++
///////////////////////////////////////////////////
// Call to general libraries //
///////////////////////////////////////////////////
#include <cstdio>
#include <stdlib.h>
#include <stdint.h>
#include <assert.h>
#include <math.h>
#include <complex.h>
#include <string.h>
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <GL/glut.h>
#include <GL/freeglut_ext.h>
#include <cuda_gl_interop.h>
///////////////////////////////////////////////////
// Call to cuda funtions //
///////////////////////////////////////////////////
#include "FFT.cu"
#include "convolutions.cu"
#include "threshold.cu"
#include "RE_transform.cu"
#include "equalization.cu"
#include "noise.cu"
#include "backup.cu"
#include "compression.cu"
using namespace std;
///////////////////////////////////////////////////
// Defining variables //
///////////////////////////////////////////////////
#define WIDTH 1280
#define HEIGHT 960
#define DIM 1600
#define PI 3.14159265
static int sub_00;
static int sub_01;
static int sub_02;
static int sub_03;
static int sub_04;
bool Equalization = 0, Flag_Filt = 0, Flag_Med = 0, Flag_PPnoise = 0,Flag_Pix = 0,Flag_req=0;
bool Flag_Reset = 0, Flag_Ero = 0, Flag_Dil = 0, Flag_Gray = 0, Flag_BW = 0, Flag_Fourier = 0;
long long int sizeImage;
float Scale_Factor;
float Rotation_Factor;
unsigned long widht, height;
int Num_Cols, Num_Rows, Dim_Con, Num_Rows_Fourier, Num_Cols_Fourier, Max_E;
size_t or_size, mor_size, equ_size, fou_size;
unsigned char *Image_R, *Image_G, *Image_B;
unsigned char *Image_R_bk, *Image_G_bk, *Image_B_bk;
unsigned char *Equalizar_R, *Equalizar_G, *Equalizar_B;
unsigned char *Convol_R, *Convol_G, *Convol_B;
unsigned char *Fourier_R, *Fourier_G, *Fourier_B;
unsigned char *Morfo_R, *Morfo_G, *Morfo_B;
float *Val_Real, *Val_Real_out, *Val_Imag, *Val_Imag_out;
unsigned int *d_his_r;
unsigned int *d_his_g;
unsigned int *d_his_b;
float *DMask;
float *Mask = (float*)malloc(625*sizeof(float));
///////////////////////////////////////////////////
// Serial part of cuda funtions //
///////////////////////////////////////////////////
int Threshold(unsigned char *r_data, unsigned char *g_data, unsigned char *b_data, size_t pitch);
void Equalization_PC (unsigned char *r_data, unsigned char *g_data,
unsigned char *b_data, size_t pitch,
unsigned char *r_dataE, unsigned char *g_dataE,
unsigned char *b_dataE );
void FFT();
///////////////////////////////////////////////////
// Function to display with glut //
///////////////////////////////////////////////////
void display(){
GLuint bufferObj;
struct cudaGraphicsResource* resource;
bool Flag_conv = 1;
glClearColor( 255.0, 255.0, 255.0, 1.0 );
glClear( GL_COLOR_BUFFER_BIT );
glGenBuffers( 1, &bufferObj );
glBindBuffer( GL_PIXEL_UNPACK_BUFFER_ARB, bufferObj );
glBufferData( GL_PIXEL_UNPACK_BUFFER_ARB, widht * height * 4, NULL, GL_DYNAMIC_DRAW_ARB );
cudaGraphicsGLRegisterBuffer( &resource, bufferObj, cudaGraphicsMapFlagsNone );
uchar4* devPtr;
size_t size;
cudaGraphicsMapResources( 1, &resource, NULL ) ;
cudaGraphicsResourceGetMappedPointer( (void**)&devPtr, &size, resource );
dim3 grids(Num_Cols/16,Num_Rows/16);
dim3 threads(16, 16);
dim3 grids_01(DIM/16,DIM/16);
dim3 threads_01(16, 16);
dim3 grids_02(widht/16,height/16);
dim3 threads_02(16, 16);
///////////////////////////////////////////////////
// Cuda functions called by the menu //
///////////////////////////////////////////////////
if(Flag_Pix){
Pixelado<<<grids,threads>>>(Image_R, Image_G, Image_B,or_size, Image_R, Image_G, Image_B);
Flag_Pix = 0;}
if(Flag_req){
Requant<<<grids,threads>>>(Image_R, Image_G, Image_B,or_size, Image_R, Image_G, Image_B);
Flag_req = 0;}
if(Flag_Reset){
Backup<<<grids,threads>>>(Image_R_bk, Image_G_bk, Image_B_bk, or_size,
Image_R, Image_G, Image_B);
Flag_Reset = 0;}
if(Flag_Ero){
erode<<<grids,threads>>>(Image_R, Image_G, Image_B,
or_size, Image_R, Image_G, Image_B, Num_Cols, Num_Rows, Dim_Con, Max_E);
Flag_Ero = 0;}
if(Flag_Dil){
dilate<<<grids,threads>>>(Image_R, Image_G, Image_B,
or_size, Image_R, Image_G, Image_B, Num_Cols, Num_Rows, Dim_Con, Max_E);
Flag_Dil = 0;}
if(Flag_Gray){
grayscale<<<grids,threads>>>(Image_R, Image_G, Image_B, or_size, d_his_r);
Flag_Gray = 0;}
if(Flag_BW){
Threshold (Image_R, Image_G, Image_B, or_size );
Flag_BW = 0;}
if (Flag_PPnoise){
PPnoise<<<grids,threads>>>(Image_R, Image_G, Image_B, or_size, 1, rand()%100);
Flag_PPnoise = 0;}
if(Flag_Fourier){
FFT();
Flag_Fourier = 0;}
if (Equalization){
Equalization_PC (Image_R, Image_G, Image_B, or_size,Convol_R, Convol_G, Convol_B );
Backup<<<grids,threads>>>(Convol_R, Convol_G, Convol_B, or_size,
Image_R, Image_G, Image_B);
Equalization=0;
}
if (Flag_Med) {
median_filter<<<grids,threads>>>(Image_R, Image_G, Image_B, or_size,
Convol_R, Convol_G, Convol_B, Num_Cols, Num_Rows, 3);
Backup<<<grids,threads>>>(Convol_R, Convol_G, Convol_B, or_size,
Image_R, Image_G, Image_B);
Flag_Med=0;
}
if (Flag_Filt) {
Operador_Convolucion<<<grids,threads>>>(Image_R, Image_G, Image_B,
or_size, Convol_R, Convol_G, Convol_B, Num_Cols, Num_Rows, DMask, Dim_Con);
Backup<<<grids,threads>>>(Convol_R, Convol_G, Convol_B, or_size,
Image_R, Image_G, Image_B);
Flag_Filt=0;
}
if (Flag_conv) {
Scale<<<grids_01,threads_01>>>(Image_R, Image_G, Image_B, Morfo_R, Morfo_G, Morfo_B,
or_size, mor_size, Scale_Factor, Num_Cols, Num_Rows);
}else{
Scale<<<grids_01,threads_01>>>(Convol_R, Convol_G, Convol_B, Morfo_R, Morfo_G, Morfo_B,
or_size, mor_size, Scale_Factor, Num_Cols, Num_Rows);
}
Rotate<<<grids_02,threads_02>>>( devPtr, Morfo_R, Morfo_G, Morfo_B,
mor_size, Rotation_Factor, DIM, DIM);
cudaGraphicsUnmapResources( 1, &resource, NULL ) ;
glDrawPixels( widht, height, GL_RGBA, GL_UNSIGNED_BYTE, 0 );
glutSwapBuffers();
cudaGraphicsUnregisterResource( resource ) ;
glBindBuffer( GL_PIXEL_UNPACK_BUFFER_ARB, 0 );
glDeleteBuffers( 1, &bufferObj );
}
///////////////////////////////////////////////////
// Serial funtions //
///////////////////////////////////////////////////
int Threshold(unsigned char *r_data, unsigned char *g_data, unsigned char *b_data, size_t pitch) {
unsigned int his_size = sizeof(unsigned int)*256;
unsigned int *his = (unsigned int*)malloc(his_size);
cudaMemset( d_his_r, 0, his_size);
dim3 grids(Num_Cols,Num_Rows);
dim3 threads(1, 1);
grayscale<<<grids,threads>>>(r_data, g_data, b_data, pitch, d_his_r);
cudaMemcpy(his, d_his_r, his_size, cudaMemcpyDeviceToHost);
int m = Num_Cols*Num_Rows/2, h = 0, um, i;
for (i = 0; i < 256; i++) {
h += his[i];
if (h > m) {
um = i;
break;
}
}
binary<<<grids,threads>>>(r_data, g_data, b_data, pitch, um);
return um;
}
void FFT(){
FFT_X<<<Num_Cols_Fourier/128, 128>>>(Image_R, Image_G, Image_B,
or_size, Val_Real, Val_Imag, Val_Real_out, Val_Imag_out,
Image_R, Image_G, Image_B, Num_Cols, Num_Rows, Num_Cols_Fourier, Num_Rows_Fourier);
FFT_Y<<<Num_Rows_Fourier/128, 128>>>(Image_R, Image_G, Image_B,
or_size, Val_Real, Val_Imag, Val_Real_out, Val_Imag_out,
Image_R, Image_G, Image_B, Num_Cols, Num_Rows, Num_Cols_Fourier, Num_Rows_Fourier);
}
void Equalization_PC (unsigned char *r_data, unsigned char *g_data,
unsigned char *b_data, size_t pitch,
unsigned char *r_dataE, unsigned char *g_dataE,
unsigned char *b_dataE ){
int i;
unsigned int his_size = sizeof(unsigned int)*256;
float hisAc_size = sizeof(float)*256;
unsigned int *his_r = (unsigned int*)malloc(his_size);
unsigned int *his_g = (unsigned int*)malloc(his_size);
unsigned int *his_b = (unsigned int*)malloc(his_size);
float *hisAc_r = (float*)malloc(hisAc_size);
float *hisAc_g = (float*)malloc(hisAc_size);
float *hisAc_b = (float*)malloc(hisAc_size);
cudaMemset( d_his_r, 0, his_size);
cudaMemset( d_his_g, 0, his_size);
cudaMemset( d_his_b, 0, his_size);
dim3 grids(Num_Cols,Num_Rows);
dim3 threads(1, 1);
Get_Histogram<<<grids,threads>>>(r_data, g_data, b_data, pitch, d_his_r, d_his_g, d_his_b);
cudaMemcpy(his_r, d_his_r, his_size, cudaMemcpyDeviceToHost);
cudaMemcpy(his_g, d_his_g, his_size, cudaMemcpyDeviceToHost);
cudaMemcpy(his_b, d_his_b, his_size, cudaMemcpyDeviceToHost);
float szImage=Num_Cols*Num_Rows;
hisAc_r[0] = ((float)his_r[0]);
hisAc_g[0] = ((float)his_g[0]);
hisAc_b[0] = ((float)his_b[0]);
for (i = 1; i < 256; i++) {
hisAc_r[i] = hisAc_r[i-1] + (((float)his_r[i]));
hisAc_g[i] = hisAc_g[i-1] + (((float)his_g[i]));
hisAc_b[i] = hisAc_b[i-1] + (((float)his_b[i]));
}
his_r[0] = 0;
his_g[0] = 0;
his_b[0] = 0;
for (i = 1; i < 255; i++) {
his_r[i] = (int)(hisAc_r[i - 1]*255/szImage);
his_g[i] = (int)(hisAc_g[i - 1]*255/szImage);
his_b[i] = (int)(hisAc_b[i - 1]*255/szImage);
}
his_r[255] = 255;
his_g[255] = 255;
his_b[255] = 255;
cudaMemcpy(d_his_r, his_r, his_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_his_g, his_g, his_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_his_b, his_b, his_size, cudaMemcpyHostToDevice);
Equalization_GPU<<<grids,threads>>>(r_data, g_data, b_data,
or_size, r_dataE, g_dataE, b_dataE, d_his_r, d_his_g, d_his_b);
}
///////////////////////////////////////////////////
// Menu options //
///////////////////////////////////////////////////
void call_back_function(int val){
switch (val) {
case 2:
if(Scale_Factor < 1)Scale_Factor = 1/((1/Scale_Factor) + 0.15);
else Scale_Factor -= 0.15;
break;
case 1:
if(Scale_Factor < 1)Scale_Factor = 1/((1/Scale_Factor) - 0.15);
else Scale_Factor += 0.15;
break;
case 3:
Rotation_Factor -= 0.01*PI;
break;
case 4:
Rotation_Factor += 0.01*PI;
break;
case 18:
Equalization = 1;
break;
case 6:
Flag_Filt = 1;
Dim_Con = 3;
Mask[0] = 1/9.0; Mask[1] = 1/9.0; Mask[2] = 1/9.0;
Mask[3] = 1/9.0; Mask[4] = 1/9.0; Mask[5] = 1/9.0;
Mask[6] = 1/9.0; Mask[7] = 1/9.0; Mask[8] = 1/9.0;
cudaMemcpy(DMask, Mask, 9*sizeof(float), cudaMemcpyHostToDevice);
break;
case 8:
Flag_Filt = 1;
Dim_Con = 3;
Mask[0] = 1/16.0; Mask[1] = 2/16.0; Mask[2] = 1/16.0;
Mask[3] = 2/16.0; Mask[4] = 4/16.0; Mask[5] = 2/16.0;
Mask[6] = 1/16.0; Mask[7] = 2/16.0; Mask[8] = 1/16.0;
cudaMemcpy(DMask, Mask, 9*sizeof(float), cudaMemcpyHostToDevice);
break;
case 9:
Flag_Filt = 1;
Dim_Con = 3;
Mask[0] = 0; Mask[1] = -1; Mask[2] = 0;
Mask[3] = -1; Mask[4] = 4; Mask[5] = -1;
Mask[6] = 0; Mask[7] = -1; Mask[8] = 0;
cudaMemcpy(DMask, Mask, 9*sizeof(float), cudaMemcpyHostToDevice);
break;
case 10:
Flag_Filt = 1;
Dim_Con = 3;
Mask[0] = -1; Mask[1] = -1; Mask[2] = -1;
Mask[3] = -1; Mask[4] = 8; Mask[5] = -1;
Mask[6] = -1; Mask[7] = -1; Mask[8] = -1;
cudaMemcpy(DMask, Mask, 9*sizeof(float), cudaMemcpyHostToDevice);
break;
case 11:
Flag_Filt = 1;
Dim_Con = 3;
Mask[0] = -1; Mask[1] = 0; Mask[2] = 1;
Mask[3] = -1; Mask[4] = 0; Mask[5] = 1;
Mask[6] = -1; Mask[7] = 0; Mask[8] = 1;
cudaMemcpy(DMask, Mask, 9*sizeof(float), cudaMemcpyHostToDevice);
case 12:
Flag_Filt = 1;
Dim_Con = 3;
Mask[0] = -1; Mask[1] = 0; Mask[2] = 1;
Mask[3] = -2; Mask[4] = 0; Mask[5] = 2;
Mask[6] = -1; Mask[7] = 0; Mask[8] = 1;
cudaMemcpy(DMask, Mask, 9*sizeof(float), cudaMemcpyHostToDevice);
break;
case 13:
Flag_Filt = 1;
Dim_Con = 3;
Mask[0] = 1; Mask[1] = 2; Mask[2] = 1;
Mask[3] = 0; Mask[4] = 0; Mask[5] = 0;
Mask[6] = -1; Mask[7] = -2; Mask[8] = -1;
cudaMemcpy(DMask, Mask, 9*sizeof(float), cudaMemcpyHostToDevice);
case 5:
Flag_PPnoise = 1;
break;
case 7:
Flag_Med = 1;
break;
case 19:
Flag_Fourier = 1;
break;
case 20:
Flag_Reset = 1;
Scale_Factor = 1;
Rotation_Factor = 0;
break;
case 16:
Flag_Ero = 1;
Dim_Con = 3;
Max_E = 255;
break;
case 17:
Flag_Dil = 1;
Dim_Con = 3;
break;
case 14:
Flag_Gray = 1;
break;
case 15:
Flag_BW = 1;
break;
case 21:
exit(0);
break;
case 22:
Flag_Pix = 1;
break;
case 23:
Flag_req = 1;
break;
default:{
}
}
display();
}
///////////////////////////////////////////////////
// Creating menu //
///////////////////////////////////////////////////
void Create_call_back_function(void) {
sub_00 = glutCreateMenu(call_back_function);
glutAddMenuEntry("Acercar", 1);
glutAddMenuEntry("Alejar", 2);
glutAddMenuEntry("Rotar derecha", 3);
glutAddMenuEntry("Rotar izquierda", 4);
sub_01 = glutCreateMenu(call_back_function);
glutAddMenuEntry("Agregar ruido", 5);
glutAddMenuEntry("Filtro de media", 6);
glutAddMenuEntry("Filtro de mediana", 7);
glutAddMenuEntry("Filtro gaussiano", 8);
sub_02 = glutCreateMenu(call_back_function);
glutAddMenuEntry("2D - 4 conexion", 9);
glutAddMenuEntry("2D - 8 conexion", 10);
glutAddMenuEntry("Prewitt", 11);
glutAddMenuEntry("Sobel X", 12);
glutAddMenuEntry("Sobel Y", 13);
sub_03 = glutCreateMenu(call_back_function);
glutAddMenuEntry("Escala de grises", 14);
glutAddMenuEntry("Binarizado", 15);
sub_04 = glutCreateMenu(call_back_function);
glutAddMenuEntry("Erosion", 16);
glutAddMenuEntry("Dilatacion", 17);
glutCreateMenu(call_back_function);
glutAddMenuEntry("Pixelado", 22);
glutAddMenuEntry("Recuantizacion", 23);
glutAddSubMenu("Rotacion-escala", sub_00);
glutAddSubMenu("Ruido-suavizado", sub_01);
glutAddSubMenu("Deteccion de bordes", sub_02);
glutAddSubMenu("Sistemas de color", sub_03);
glutAddSubMenu("Operaciones morfologicas", sub_04);
glutAddMenuEntry("Ecualizacion", 18);
glutAddMenuEntry("Transformacion Fourier", 19);
glutAddMenuEntry("Restaurar original", 20);
glutAddMenuEntry("Salir", 21);
glutAttachMenu(GLUT_RIGHT_BUTTON);
}
///////////////////////////////////////////////////
// Struct and function to read data from image //
///////////////////////////////////////////////////
typedef struct BMP_Info{
unsigned long bytesInHeader;
unsigned long widht;
unsigned long height;
unsigned int planes;
unsigned int bitsPerPixel;
unsigned long compression;
unsigned long sizeImage;
unsigned long hResolution;
unsigned long vResolution;
unsigned long nIndexes;
unsigned long nIIndexes;
char type[3];
unsigned long size;
char reserved[5];
unsigned long offset;
} BMP_Info;
unsigned long Turn_Data_Long(FILE* fp){
uint32_t data32;
fread (&(data32),4, 1,fp);
unsigned long data = (unsigned long)data32;
return data;
}
unsigned int Turn_Data_Int(FILE* fp){
uint16_t data16;
fread (&(data16), 2, 1, fp);
unsigned int data = (unsigned int)data16;
return data;
}
void Read_Image(FILE* fp, BMP_Info* Image_Raw){
fgets(Image_Raw->type, 3, fp);
Image_Raw->size = Turn_Data_Long(fp);
fgets(Image_Raw->reserved, 5, fp);
Image_Raw->offset = Turn_Data_Long(fp);
Image_Raw->bytesInHeader = Turn_Data_Long(fp);
Image_Raw->widht = Turn_Data_Long(fp);
Image_Raw->height = Turn_Data_Long(fp);
Image_Raw->planes = Turn_Data_Int(fp);
Image_Raw->bitsPerPixel = Turn_Data_Int(fp);
Image_Raw->compression = Turn_Data_Long(fp);
Image_Raw->sizeImage = Turn_Data_Long(fp);
Image_Raw->hResolution = Turn_Data_Long(fp);
Image_Raw->vResolution = Turn_Data_Long(fp);
Image_Raw->nIndexes = Turn_Data_Long(fp);
Image_Raw->nIIndexes = Turn_Data_Long(fp);
}
FILE *fp;
BMP_Info Image_Raw;
|
a50008b272cc92e928818e83b8b405b7135d4b7a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Demodulator.cu
*
* Created on: May 13, 2013
* Author: adm85
*/
#include <vector>
#include <math.h>
#include <iostream>
#include <fstream>
#include "Demodulator.h"
#include "Samples.h"
#include "Kernels.h"
using namespace std;
namespace PAQ_SOQPSK {
Demodulator::Demodulator() {
initializeStatesAndSignals();
}
Demodulator::~Demodulator() {}
vector<int>& Demodulator::demodulate(Samples& inputSamples){
//----------------------------------------------------------------------
// Debug Code
//----------------------------------------------------------------------
//Load the CSV file
//FileReader fileReader;
//Samples ccwFarrowOutSamples = fileReader.loadCSVFile("/fslhome/adm85/compute/paq/soqpsk_demod/src/matlabFarrowOutput.csv");
vector<float> ISamples;
vector<float> QSamples;
//vector<float> PEDError;
//vector<float> LoopFilterError;
//ofstream errorFile = new ofstream("/fslhome/adm85/compute/paq/soqpsk_demod/output/errorTrack.csv");
ofstream errorFile;
errorFile.open("/home/eswindle/Documents/Output/errorTrack.csv");
if(!errorFile.is_open()) {
cout << "Could not open error file." << endl;
throw exception();
}
ofstream stateFile;
stateFile.open("/home/eswindle/Documents/Output/hostState.csv");
if(!stateFile.is_open()) {
cout << "Could not open state file." << endl;
throw exception();
}
ofstream bitDecisionFile;
bitDecisionFile.open("/home/eswindle/Documents/Output/hostBitDecisions.csv");
if(!bitDecisionFile.is_open()) {
cout << "Could not open bit decision file." << endl;
throw exception();
}
ofstream hostMuFile;
hostMuFile.open("/home/eswindle/Documents/Output/hostMu.csv");
if(!hostMuFile.is_open()) {
cout << "Could not open hostMu file." << endl;
throw exception();
}
ofstream hostStrobeFile;
hostStrobeFile.open("/home/eswindle/Documents/Output/hostStrobe.csv");
if(!hostStrobeFile.is_open()) {
cout << "Could not open hostStrobe file." << endl;
throw exception();
}
ofstream hostIQFile;
hostIQFile.open("/home/eswindle/Documents/Output/hostIQ.csv");
if(!hostIQFile.is_open()) {
cout << "Could not open hostStrobe file." << endl;
throw exception();
}
//Return array
vector<int>* bitIndices = new vector<int>;
//Sample values
float I, Q;
float I_Decision, Q_Decision;
//Farrow interpolator values
float h0, h1, h2, h3;
//Bit decision
int bitDecision;
//cout << "About to enter loop..." << endl;
for(int sampleIndex = 0; sampleIndex < inputSamples.getSize(); sampleIndex++) {
//cout << "Sample Index: " << sampleIndex << endl;
//----------------------------------------------------------------------
// Output Equations
//----------------------------------------------------------------------
//Get samples
I = inputSamples.getI().at(sampleIndex);
Q = inputSamples.getQ().at(sampleIndex);
//cout << "Got Samples" << endl;
//Derotate and add to queues
xr = (cos(dds) * I) + (sin(dds) * Q);
yr = (cos(dds) * Q) - (sin(dds) * I);
for(int i=(SAMPLE_BUFFER_SIZE-1); i>0; i--) {
I_Buffer.at(i) = I_Buffer.at(i-1);
Q_Buffer.at(i) = Q_Buffer.at(i-1);
}
I_Buffer.at(0) = xr;
Q_Buffer.at(0) = yr;
//cout << "Derotated" << endl;
//Grab mu
mu = M;
hostMuFile << mu << "\n";
hostStrobeFile << strobe << "\n";
stateFile << state << "\n";
//Farrow Interpolator
h0 = 0.5 * mu * (mu-1);
h1 = -0.5 * mu * (mu-3);
h2 = (-0.5 * mu * (mu+1))+1;
h3 = h0;
I_Prime = (h0*xr) + (h1*FI1) + (h2*FI2) + (h3*FI3);
Q_Prime = (h0*yr) + (h1*FQ1) + (h2*FQ2) + (h3*FQ3);
//Grab final I and Q values
if(strobe) {
Q_Decision = Q_Prime;
switch(state) {
case 0: //This should never happen
cout << "Error in bit decision block. State should never be 0." << endl;
throw exception();
case 1:
case 2:
case 3:
case 5:
I_Decision = B1;
break;
case 4:
I_Decision = I_Buffer.at(TIME_N_MINUS_THREE);
break;
case 6:
I_Decision = I_Buffer.at(TIME_N_MINUS_TWO);
break;
case 7: //This should never happen
cout << "Error in bit decision block. State should never be 7." << endl;
throw exception();
}
//Save to file
hostIQFile << I_Decision << "," << Q_Decision << "\n";
//Bit decision
bitDecision = getSampleIndex(I_Decision, Q_Decision);
//cout << "SampleIndex: " << sampleIndex << " Bit Decision: " << bitDecision << endl;
bitDecisionFile << bitDecision << "\n";
bitIndices->push_back(bitDecision);
}
//cout << "I and Q complete" << endl;
//----------------------------------------------------------------------
// Internal Signal Update Equations
//----------------------------------------------------------------------
//Phase Loop
ep = calcPhaseError(I_Prime, I_Buffer.at(TIME_N_MINUS_TWO), I_Buffer.at(TIME_N_MINUS_THREE),
Q_Prime, Q_Buffer.at(TIME_N_MINUS_TWO), Q_Buffer.at(TIME_N_MINUS_THREE),
state);
vp = (K1p*ep) + (K2p*ep) + VIp;
//Timing Loop
et = calcTimingError(I_Prime, I_Buffer.at(TIME_N_MINUS_TWO), I_Buffer.at(TIME_N_MINUS_THREE),
Q_Prime, state);
vt = (K1t*et) + (K2t*et) + VIt;
//cout << "Internal signals update complete" << endl;
errorFile << vp << "," << vt << "\n";
//----------------------------------------------------------------------
// State Update Equations
//----------------------------------------------------------------------
//Farrow Interpolator states
FI3 = FI2; FI2 = FI1; FI1 = xr;
FQ3 = FQ2; FQ2 = FQ1; FQ1 = yr;
//Bit Decision block
B1 = I_Prime;
//PED Loop
ped1 = I_Prime;
ped2 = Q_Prime;
VIp = VIp + K2p*ep;
dds = dds + vp;
//TED Loop
ted3 = ted2;
ted2 = calcTED2(ted1, I_Buffer.at(TIME_N_MINUS_TWO), I_Buffer.at(TIME_N_MINUS_THREE), state);
ted1 = I_Prime;
ted5 = ted4;
ted4 = Q_Prime;
VIt = VIt + K2t*et;
OLD_NCO = NCO;
NCO = fmod(NCO, 1) - vt - 0.5;
if(NCO < 0) {
strobe = true;
M = 2*fmod(OLD_NCO, 1);
NCO += 1; //Equivalent to fmod(NCO, 1);
} else {
strobe = false;
}
//State machine
state = (4*strobe) + 2*s1 + s2;
s2 = s1;
s1 = strobe;
//cout << "State updates complete" << endl;
}
//Close the debug files
errorFile.close();
stateFile.close();
bitDecisionFile.close();
hostMuFile.close();
hostStrobeFile.close();
hostIQFile.close();
return *bitIndices;
}
vector<int>& Demodulator::demodulateCuda(Samples& inputSamples)
{
// Initialize variables
float* i_samples, *q_samples, *consts;
int* bitdec;
int sample_size = inputSamples.getSize();
int* output_bits = new int[sample_size];
int floatsize = sample_size * sizeof(float);
int intsize = sample_size * sizeof(int);
vector<float> pedFilterConstants = calculateLoopFilterConstants(.01, 1, 18.33, 1, 2);
vector<float> tedFilterConstants = calculateLoopFilterConstants(.005, 1, 12.35, -1, 2);
float constants[4];
constants[0] = pedFilterConstants[0];
constants[1] = pedFilterConstants[1];
constants[2] = tedFilterConstants[0];
constants[3] = tedFilterConstants[1];
// Allocate memory on GPU
hipMalloc(&i_samples, floatsize);
hipMalloc(&q_samples, floatsize);
hipMalloc(&consts, 4 * sizeof(float));
hipMalloc(&bitdec, intsize);
// Copy data to GPU
hipMemcpy(i_samples, inputSamples.getI().data(), floatsize, hipMemcpyHostToDevice);
hipMemcpy(q_samples, inputSamples.getQ().data(), floatsize, hipMemcpyHostToDevice);
hipMemcpy(consts, constants, 4 * sizeof(float), hipMemcpyHostToDevice);
cout << "Running on Demodulation loop on Device..." << endl;
// Run on device
hipLaunchKernelGGL(( cudaDemodLoop), dim3(1),dim3(1), 0, 0, i_samples, q_samples, sample_size, bitdec, consts);
// Get data back from GPU
hipMemcpy(output_bits, bitdec, intsize, hipMemcpyDeviceToHost);
// Free memory
hipFree(i_samples);
hipFree(q_samples);
hipFree(bitdec);
vector<int>* output = new vector<int>(output_bits, output_bits + sample_size);
return *output;
}
/**
* Initializes all state and signal values
*/
void Demodulator::initializeStatesAndSignals() {
//States
ped1=0;
ped2=0;
ted1=0;
ted2=0;
ted3=0;
ted4=0;
ted5=0;
VIp=0;
VIt=0;
M=0;
delayedRegister=0;
NCO=0;
OLD_NCO = 0;
state=0;
s1=0;
s2=0;
FI1 = 0;
FI2 = 0;
FI3 = 0;
FQ1 = 0;
FQ2 = 0;
FQ3 = 0;
strobe = false;
//Internal signals
xr=0;
yr=0;
I_Prime=0;
Q_Prime=0;
vp=0;
ep=0;
vt=0;
et=0;
mu=0;
//Loop filter constants
vector<float> pedFilterConstants = calculateLoopFilterConstants(.01, 1, 18.33, 1, 2);
//cout << "About to assign ped constants" << endl;
K1p = pedFilterConstants.at(0);
K2p = pedFilterConstants.at(1);
vector<float> tedFilterConstants = calculateLoopFilterConstants(.005, 1, 12.35, -1, 2);
//cout << "About to assign ted constants" << endl;
K1t = tedFilterConstants.at(0);
K2t = tedFilterConstants.at(1);
//Fill the buffers with zeros
for(int i=0; i<SAMPLE_BUFFER_SIZE; i++) {
I_Buffer.push_back(0);
Q_Buffer.push_back(0);
}
}
/**
* Calculates K1 and K2 for the chosen loop by specifying the particular constants.
*/
vector<float>& Demodulator::calculateLoopFilterConstants(float BN, float zeta, float Kp, float K0, float N) {
float theta = BN/(zeta + .25/zeta);
float d = 1 + 2*zeta*theta/N + theta/N*theta/N;
float K1 = 4 * zeta/N * theta/d;
float K2 = 4 * theta/N * theta/N/d;
K1 = K1/(Kp*K0);
K2 = K2/(Kp*K0);
vector<float>* loopConstantsArray = new vector<float>;
loopConstantsArray->push_back(K1);
loopConstantsArray->push_back(K2);
return *loopConstantsArray;
}
/**
* Gets the index of the current sample (from 0 to 3)
*/
int Demodulator::getSampleIndex(float I, float Q) {
int xBit, yBit;
if(I > 0) {
xBit = 1;
} else {
xBit = 0;
}
if(Q > 0) {
yBit = 1;
} else {
yBit = 0;
}
//Use the masks to generate the index. This should be an integer between 0 to 3 inclusive.
int bitIndex = (xBit << 1) | yBit;
//Now decode the bits
switch(bitIndex) {
case 0:
return 3;
case 1:
return 2;
case 2:
return 1;
case 3:
return 0;
default:
cout << "Invalid bitIndex generated: " << bitIndex << endl;
throw exception();
}
}
/**
* Calculates phase error
*/
float Demodulator::calcPhaseError(float xInterpolant, float xDelayedTwo, float xDelayedThree, float yInterpolant, float yDelayedTwo, float yDelayedThree, int state) {
//Calculate sign(x)*y
float signX, y;
switch(state) {
case 0:
case 1:
case 2:
case 3:
case 5:
case 7:
signX = sign(ped1);
y = ped2;
break;
case 4:
signX = sign(xDelayedThree);
y = yDelayedThree;
break;
case 6:
signX = sign(xDelayedTwo);
y = yDelayedTwo;
break;
default:
cout << "Error -- invalid state in PED. State: " << state << endl;
throw exception();
}
float productOne = signX * y;
//Calculate x*sign(y)
float signY = sign(yInterpolant);
float productTwo = xInterpolant * signY;
//Find the final output and return it
float output = productOne - productTwo;
switch(state) {
case 0:
case 1:
case 2:
case 3:
case 7:
return 0;
case 4:
case 5:
case 6:
return output;
default:
cout << "Error -- invalid state in PED. State: " << state << endl;
throw exception();
}
}
/**
* Determines what should be fed into ted2.
*/
float Demodulator::calcTED2(float ted1, float xDelayedTwo, float xDelayedThree, int state) {
float firstSwitch;
switch(state) {
case 0:
case 1:
case 2:
case 3:
case 5:
case 7:
firstSwitch = ted1;
break;
case 4:
firstSwitch = xDelayedThree;
break;
case 6:
firstSwitch = xDelayedTwo;
break;
default:
cout << "Error in timing detector -- invalid state supplied. State: " << state << endl;
throw exception();
}
return firstSwitch;
}
/**
* Calculates timing error
*/
float Demodulator::calcTimingError(float xInterpolant, float xDelayedTwo, float xDelayedThree, float yInterpolant, int state) {
//Top Branch
float firstSwitch = calcTED2(ted1, xDelayedTwo, xDelayedThree, state);
float summandOne = sign(ted3);
//delayedX3 = delayedX2;
float summandTwo = sign(firstSwitch);
float productOne = ted2 * (summandOne - summandTwo);
//delayedX2 = firstSwitch;
//Bottom branch
summandOne = sign(ted5);
//delayedY2 = delayedY;
summandTwo = sign(yInterpolant);
float productTwo = ted4 * (summandOne - summandTwo);
//delayedY = yInterpolant;
//Last Branch
float switchInput = productOne + productTwo;
//Return switch
switch(state) {
case 0:
case 1:
case 2:
case 3:
case 4:
case 6:
case 7:
return 0;
case 5:
return switchInput;
default:
cout << "Error in timing detector -- invalid state supplied. State: " << state << endl;
throw exception();
}
}
/**
* Calculates the sign of the input. Returns 1 for positive, -1 for negative, 0 for 0
*/
float Demodulator::sign(float input) {
if(input == 0) return 0;
if(input > 0) {
return 1;
} else {
return -1;
}
}
} /* namespace SOQPSK_Demod */
| a50008b272cc92e928818e83b8b405b7135d4b7a.cu | /*
* Demodulator.cu
*
* Created on: May 13, 2013
* Author: adm85
*/
#include <vector>
#include <math.h>
#include <iostream>
#include <fstream>
#include "Demodulator.h"
#include "Samples.h"
#include "Kernels.h"
using namespace std;
namespace PAQ_SOQPSK {
Demodulator::Demodulator() {
initializeStatesAndSignals();
}
Demodulator::~Demodulator() {}
vector<int>& Demodulator::demodulate(Samples& inputSamples){
//----------------------------------------------------------------------
// Debug Code
//----------------------------------------------------------------------
//Load the CSV file
//FileReader fileReader;
//Samples ccwFarrowOutSamples = fileReader.loadCSVFile("/fslhome/adm85/compute/paq/soqpsk_demod/src/matlabFarrowOutput.csv");
vector<float> ISamples;
vector<float> QSamples;
//vector<float> PEDError;
//vector<float> LoopFilterError;
//ofstream errorFile = new ofstream("/fslhome/adm85/compute/paq/soqpsk_demod/output/errorTrack.csv");
ofstream errorFile;
errorFile.open("/home/eswindle/Documents/Output/errorTrack.csv");
if(!errorFile.is_open()) {
cout << "Could not open error file." << endl;
throw exception();
}
ofstream stateFile;
stateFile.open("/home/eswindle/Documents/Output/hostState.csv");
if(!stateFile.is_open()) {
cout << "Could not open state file." << endl;
throw exception();
}
ofstream bitDecisionFile;
bitDecisionFile.open("/home/eswindle/Documents/Output/hostBitDecisions.csv");
if(!bitDecisionFile.is_open()) {
cout << "Could not open bit decision file." << endl;
throw exception();
}
ofstream hostMuFile;
hostMuFile.open("/home/eswindle/Documents/Output/hostMu.csv");
if(!hostMuFile.is_open()) {
cout << "Could not open hostMu file." << endl;
throw exception();
}
ofstream hostStrobeFile;
hostStrobeFile.open("/home/eswindle/Documents/Output/hostStrobe.csv");
if(!hostStrobeFile.is_open()) {
cout << "Could not open hostStrobe file." << endl;
throw exception();
}
ofstream hostIQFile;
hostIQFile.open("/home/eswindle/Documents/Output/hostIQ.csv");
if(!hostIQFile.is_open()) {
cout << "Could not open hostStrobe file." << endl;
throw exception();
}
//Return array
vector<int>* bitIndices = new vector<int>;
//Sample values
float I, Q;
float I_Decision, Q_Decision;
//Farrow interpolator values
float h0, h1, h2, h3;
//Bit decision
int bitDecision;
//cout << "About to enter loop..." << endl;
for(int sampleIndex = 0; sampleIndex < inputSamples.getSize(); sampleIndex++) {
//cout << "Sample Index: " << sampleIndex << endl;
//----------------------------------------------------------------------
// Output Equations
//----------------------------------------------------------------------
//Get samples
I = inputSamples.getI().at(sampleIndex);
Q = inputSamples.getQ().at(sampleIndex);
//cout << "Got Samples" << endl;
//Derotate and add to queues
xr = (cos(dds) * I) + (sin(dds) * Q);
yr = (cos(dds) * Q) - (sin(dds) * I);
for(int i=(SAMPLE_BUFFER_SIZE-1); i>0; i--) {
I_Buffer.at(i) = I_Buffer.at(i-1);
Q_Buffer.at(i) = Q_Buffer.at(i-1);
}
I_Buffer.at(0) = xr;
Q_Buffer.at(0) = yr;
//cout << "Derotated" << endl;
//Grab mu
mu = M;
hostMuFile << mu << "\n";
hostStrobeFile << strobe << "\n";
stateFile << state << "\n";
//Farrow Interpolator
h0 = 0.5 * mu * (mu-1);
h1 = -0.5 * mu * (mu-3);
h2 = (-0.5 * mu * (mu+1))+1;
h3 = h0;
I_Prime = (h0*xr) + (h1*FI1) + (h2*FI2) + (h3*FI3);
Q_Prime = (h0*yr) + (h1*FQ1) + (h2*FQ2) + (h3*FQ3);
//Grab final I and Q values
if(strobe) {
Q_Decision = Q_Prime;
switch(state) {
case 0: //This should never happen
cout << "Error in bit decision block. State should never be 0." << endl;
throw exception();
case 1:
case 2:
case 3:
case 5:
I_Decision = B1;
break;
case 4:
I_Decision = I_Buffer.at(TIME_N_MINUS_THREE);
break;
case 6:
I_Decision = I_Buffer.at(TIME_N_MINUS_TWO);
break;
case 7: //This should never happen
cout << "Error in bit decision block. State should never be 7." << endl;
throw exception();
}
//Save to file
hostIQFile << I_Decision << "," << Q_Decision << "\n";
//Bit decision
bitDecision = getSampleIndex(I_Decision, Q_Decision);
//cout << "SampleIndex: " << sampleIndex << " Bit Decision: " << bitDecision << endl;
bitDecisionFile << bitDecision << "\n";
bitIndices->push_back(bitDecision);
}
//cout << "I and Q complete" << endl;
//----------------------------------------------------------------------
// Internal Signal Update Equations
//----------------------------------------------------------------------
//Phase Loop
ep = calcPhaseError(I_Prime, I_Buffer.at(TIME_N_MINUS_TWO), I_Buffer.at(TIME_N_MINUS_THREE),
Q_Prime, Q_Buffer.at(TIME_N_MINUS_TWO), Q_Buffer.at(TIME_N_MINUS_THREE),
state);
vp = (K1p*ep) + (K2p*ep) + VIp;
//Timing Loop
et = calcTimingError(I_Prime, I_Buffer.at(TIME_N_MINUS_TWO), I_Buffer.at(TIME_N_MINUS_THREE),
Q_Prime, state);
vt = (K1t*et) + (K2t*et) + VIt;
//cout << "Internal signals update complete" << endl;
errorFile << vp << "," << vt << "\n";
//----------------------------------------------------------------------
// State Update Equations
//----------------------------------------------------------------------
//Farrow Interpolator states
FI3 = FI2; FI2 = FI1; FI1 = xr;
FQ3 = FQ2; FQ2 = FQ1; FQ1 = yr;
//Bit Decision block
B1 = I_Prime;
//PED Loop
ped1 = I_Prime;
ped2 = Q_Prime;
VIp = VIp + K2p*ep;
dds = dds + vp;
//TED Loop
ted3 = ted2;
ted2 = calcTED2(ted1, I_Buffer.at(TIME_N_MINUS_TWO), I_Buffer.at(TIME_N_MINUS_THREE), state);
ted1 = I_Prime;
ted5 = ted4;
ted4 = Q_Prime;
VIt = VIt + K2t*et;
OLD_NCO = NCO;
NCO = fmod(NCO, 1) - vt - 0.5;
if(NCO < 0) {
strobe = true;
M = 2*fmod(OLD_NCO, 1);
NCO += 1; //Equivalent to fmod(NCO, 1);
} else {
strobe = false;
}
//State machine
state = (4*strobe) + 2*s1 + s2;
s2 = s1;
s1 = strobe;
//cout << "State updates complete" << endl;
}
//Close the debug files
errorFile.close();
stateFile.close();
bitDecisionFile.close();
hostMuFile.close();
hostStrobeFile.close();
hostIQFile.close();
return *bitIndices;
}
vector<int>& Demodulator::demodulateCuda(Samples& inputSamples)
{
// Initialize variables
float* i_samples, *q_samples, *consts;
int* bitdec;
int sample_size = inputSamples.getSize();
int* output_bits = new int[sample_size];
int floatsize = sample_size * sizeof(float);
int intsize = sample_size * sizeof(int);
vector<float> pedFilterConstants = calculateLoopFilterConstants(.01, 1, 18.33, 1, 2);
vector<float> tedFilterConstants = calculateLoopFilterConstants(.005, 1, 12.35, -1, 2);
float constants[4];
constants[0] = pedFilterConstants[0];
constants[1] = pedFilterConstants[1];
constants[2] = tedFilterConstants[0];
constants[3] = tedFilterConstants[1];
// Allocate memory on GPU
cudaMalloc(&i_samples, floatsize);
cudaMalloc(&q_samples, floatsize);
cudaMalloc(&consts, 4 * sizeof(float));
cudaMalloc(&bitdec, intsize);
// Copy data to GPU
cudaMemcpy(i_samples, inputSamples.getI().data(), floatsize, cudaMemcpyHostToDevice);
cudaMemcpy(q_samples, inputSamples.getQ().data(), floatsize, cudaMemcpyHostToDevice);
cudaMemcpy(consts, constants, 4 * sizeof(float), cudaMemcpyHostToDevice);
cout << "Running on Demodulation loop on Device..." << endl;
// Run on device
cudaDemodLoop<<<1,1>>>(i_samples, q_samples, sample_size, bitdec, consts);
// Get data back from GPU
cudaMemcpy(output_bits, bitdec, intsize, cudaMemcpyDeviceToHost);
// Free memory
cudaFree(i_samples);
cudaFree(q_samples);
cudaFree(bitdec);
vector<int>* output = new vector<int>(output_bits, output_bits + sample_size);
return *output;
}
/**
* Initializes all state and signal values
*/
void Demodulator::initializeStatesAndSignals() {
//States
ped1=0;
ped2=0;
ted1=0;
ted2=0;
ted3=0;
ted4=0;
ted5=0;
VIp=0;
VIt=0;
M=0;
delayedRegister=0;
NCO=0;
OLD_NCO = 0;
state=0;
s1=0;
s2=0;
FI1 = 0;
FI2 = 0;
FI3 = 0;
FQ1 = 0;
FQ2 = 0;
FQ3 = 0;
strobe = false;
//Internal signals
xr=0;
yr=0;
I_Prime=0;
Q_Prime=0;
vp=0;
ep=0;
vt=0;
et=0;
mu=0;
//Loop filter constants
vector<float> pedFilterConstants = calculateLoopFilterConstants(.01, 1, 18.33, 1, 2);
//cout << "About to assign ped constants" << endl;
K1p = pedFilterConstants.at(0);
K2p = pedFilterConstants.at(1);
vector<float> tedFilterConstants = calculateLoopFilterConstants(.005, 1, 12.35, -1, 2);
//cout << "About to assign ted constants" << endl;
K1t = tedFilterConstants.at(0);
K2t = tedFilterConstants.at(1);
//Fill the buffers with zeros
for(int i=0; i<SAMPLE_BUFFER_SIZE; i++) {
I_Buffer.push_back(0);
Q_Buffer.push_back(0);
}
}
/**
* Calculates K1 and K2 for the chosen loop by specifying the particular constants.
*/
vector<float>& Demodulator::calculateLoopFilterConstants(float BN, float zeta, float Kp, float K0, float N) {
float theta = BN/(zeta + .25/zeta);
float d = 1 + 2*zeta*theta/N + theta/N*theta/N;
float K1 = 4 * zeta/N * theta/d;
float K2 = 4 * theta/N * theta/N/d;
K1 = K1/(Kp*K0);
K2 = K2/(Kp*K0);
vector<float>* loopConstantsArray = new vector<float>;
loopConstantsArray->push_back(K1);
loopConstantsArray->push_back(K2);
return *loopConstantsArray;
}
/**
* Gets the index of the current sample (from 0 to 3)
*/
int Demodulator::getSampleIndex(float I, float Q) {
int xBit, yBit;
if(I > 0) {
xBit = 1;
} else {
xBit = 0;
}
if(Q > 0) {
yBit = 1;
} else {
yBit = 0;
}
//Use the masks to generate the index. This should be an integer between 0 to 3 inclusive.
int bitIndex = (xBit << 1) | yBit;
//Now decode the bits
switch(bitIndex) {
case 0:
return 3;
case 1:
return 2;
case 2:
return 1;
case 3:
return 0;
default:
cout << "Invalid bitIndex generated: " << bitIndex << endl;
throw exception();
}
}
/**
* Calculates phase error
*/
float Demodulator::calcPhaseError(float xInterpolant, float xDelayedTwo, float xDelayedThree, float yInterpolant, float yDelayedTwo, float yDelayedThree, int state) {
//Calculate sign(x)*y
float signX, y;
switch(state) {
case 0:
case 1:
case 2:
case 3:
case 5:
case 7:
signX = sign(ped1);
y = ped2;
break;
case 4:
signX = sign(xDelayedThree);
y = yDelayedThree;
break;
case 6:
signX = sign(xDelayedTwo);
y = yDelayedTwo;
break;
default:
cout << "Error -- invalid state in PED. State: " << state << endl;
throw exception();
}
float productOne = signX * y;
//Calculate x*sign(y)
float signY = sign(yInterpolant);
float productTwo = xInterpolant * signY;
//Find the final output and return it
float output = productOne - productTwo;
switch(state) {
case 0:
case 1:
case 2:
case 3:
case 7:
return 0;
case 4:
case 5:
case 6:
return output;
default:
cout << "Error -- invalid state in PED. State: " << state << endl;
throw exception();
}
}
/**
* Determines what should be fed into ted2.
*/
float Demodulator::calcTED2(float ted1, float xDelayedTwo, float xDelayedThree, int state) {
float firstSwitch;
switch(state) {
case 0:
case 1:
case 2:
case 3:
case 5:
case 7:
firstSwitch = ted1;
break;
case 4:
firstSwitch = xDelayedThree;
break;
case 6:
firstSwitch = xDelayedTwo;
break;
default:
cout << "Error in timing detector -- invalid state supplied. State: " << state << endl;
throw exception();
}
return firstSwitch;
}
/**
* Calculates timing error
*/
float Demodulator::calcTimingError(float xInterpolant, float xDelayedTwo, float xDelayedThree, float yInterpolant, int state) {
//Top Branch
float firstSwitch = calcTED2(ted1, xDelayedTwo, xDelayedThree, state);
float summandOne = sign(ted3);
//delayedX3 = delayedX2;
float summandTwo = sign(firstSwitch);
float productOne = ted2 * (summandOne - summandTwo);
//delayedX2 = firstSwitch;
//Bottom branch
summandOne = sign(ted5);
//delayedY2 = delayedY;
summandTwo = sign(yInterpolant);
float productTwo = ted4 * (summandOne - summandTwo);
//delayedY = yInterpolant;
//Last Branch
float switchInput = productOne + productTwo;
//Return switch
switch(state) {
case 0:
case 1:
case 2:
case 3:
case 4:
case 6:
case 7:
return 0;
case 5:
return switchInput;
default:
cout << "Error in timing detector -- invalid state supplied. State: " << state << endl;
throw exception();
}
}
/**
* Calculates the sign of the input. Returns 1 for positive, -1 for negative, 0 for 0
*/
float Demodulator::sign(float input) {
if(input == 0) return 0;
if(input > 0) {
return 1;
} else {
return -1;
}
}
} /* namespace SOQPSK_Demod */
|
44aaaa3d4b46d885092d023db57dcafc824c9d20.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template void caffe_gpu_set<unsigned int>(const int N, const unsigned int alpha, unsigned int* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
| 44aaaa3d4b46d885092d023db57dcafc824c9d20.cu | #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template void caffe_gpu_set<unsigned int>(const int N, const unsigned int alpha, unsigned int* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
|
f59ca2c015f0b88f1f3ee0111c17b6c3f45a78de.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include"cuda_kernel.cuh"
#include<cuda_gl_interop.h>
#include<iostream>
#include<fstream>
__global__ void kernel(hipSurfaceObject_t surf1, hipSurfaceObject_t surf2, int szer, int wys, bool flaga) {
register int wsp_x = blockIdx.x * blockDim.x + threadIdx.x;
register int wsp_y = blockIdx.y * blockDim.y + threadIdx.y;
if (wsp_x < szer && wsp_y < wys) {
// register int i = wsp_y*szer + wsp_x;
// uchar4 voxel;// = make_uchar4(255, 255, 255, 255);
// voxel.x = (flaga) ? 255 : 1;
// voxel.y = (flaga) ? 255 : 1;
// voxel.z = (flaga) ? 255 : 1;
// voxel.w = (flaga) ? 255 : 1;
// surf2Dwrite(voxel, (flaga) ? surf2 : surf1, wsp_x * sizeof(uchar4), wsp_y);
/*
register int i = wsp_y*szer + wsp_x;
uchar4 voxel;// = make_uchar4(255, 255, 255, 255);
// surf2Dread(&voxel, surf[blockIdx.x], wsp_x * sizeof(uchar4), wsp_y);
surf2Dread(&voxel, (flaga)?surf1:surf2, wsp_x * sizeof(uchar4), wsp_y);
//if (voxel.x == 255) {
voxel.x = (flaga) ? 255 : 1;
voxel.y = (flaga) ? 255 : 1;
voxel.z = (flaga) ? 255 : 1;
voxel.w = (flaga) ? 255 : 1;
//surf2Dwrite(voxel, surf[blockIdx.x], wsp_x * sizeof(uchar4), wsp_y);
surf2Dwrite(voxel, (flaga) ? surf2 : surf1, wsp_x * sizeof(uchar4), wsp_y);
//}
*/
}
}
typedef unsigned int uint;
typedef unsigned char uchar;
void cuda_texture_interface::zarejestrujTeksture(bool flaga, int indeks, GLuint image, GLenum target, unsigned int flags, unsigned int arrayIndex, unsigned int mipLevel, unsigned long* tekstura, int szer, int wys) {
wysokosc = wys;
szerokosc = szer;
if (flaga) {
HANDLE_ERROR(hipGraphicsGLRegisterImage(&r[indeks], image, target, flags));
HANDLE_ERROR(hipGraphicsMapResources(1, &r[indeks], stream));
HANDLE_ERROR(hipGraphicsSubResourceGetMappedArray(&tab[indeks], r[indeks], arrayIndex, mipLevel));
lalala(indeks, tekstura, szer, wys, true);
}
else {
HANDLE_ERROR(hipGraphicsGLRegisterImage(&r2[indeks], image, target, flags));
HANDLE_ERROR(hipGraphicsMapResources(1, &r2[indeks], stream));
HANDLE_ERROR(hipGraphicsSubResourceGetMappedArray(&tab2[indeks], r2[indeks], arrayIndex, mipLevel));
lalala(indeks, tekstura, szer, wys, false);
}
}
void cuda_texture_interface::odmapowanieTekstur() {
HANDLE_ERROR(hipGraphicsUnmapResources(rozmiar, r, stream));
HANDLE_ERROR(hipGraphicsUnmapResources(rozmiar, r2, stream));
}
void cuda_texture_interface::inicjalizacja() {
hipDeviceProp_t prop;
int dev;
memset(&prop, 0, sizeof(hipDeviceProp_t));
prop.major = 1;
prop.minor = 0;
HANDLE_ERROR(hipChooseDevice(&dev, &prop));
HANDLE_ERROR(hipGLSetGLDevice(dev));
hipStreamCreateWithFlags(&stream, hipStreamDefault);
}
void cuda_texture_interface::lalala(int i, unsigned long* tekstura, int szer, int wys, bool flaga) {
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(8, 8, 8, 8, hipChannelFormatKindUnsigned);
HANDLE_ERROR(hipMemcpyToArray((flaga) ? tab[i] : tab2[i], 0, 0, tekstura, szer*wys * sizeof(unsigned long), hipMemcpyHostToDevice));
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = (flaga) ? tab[i] : tab2[i];
HANDLE_ERROR(hipCreateSurfaceObject(flaga ? &tex[i] : &tex2[i], &resDesc));
}
void cuda_texture_interface::akcja(bool flaga) {
dim3 block(32, 32);
dim3 grid((szerokosc - 1) / 32 + 1, (szerokosc - 1) / 32 + 1);
for (int i = 0; i<rozmiar; ++i)
kernel << <grid, block >> > (tex[i], tex2[i], szerokosc, wysokosc, flaga);
} | f59ca2c015f0b88f1f3ee0111c17b6c3f45a78de.cu | #include"cuda_kernel.cuh"
#include<cuda_gl_interop.h>
#include<iostream>
#include<fstream>
__global__ void kernel(cudaSurfaceObject_t surf1, cudaSurfaceObject_t surf2, int szer, int wys, bool flaga) {
register int wsp_x = blockIdx.x * blockDim.x + threadIdx.x;
register int wsp_y = blockIdx.y * blockDim.y + threadIdx.y;
if (wsp_x < szer && wsp_y < wys) {
// register int i = wsp_y*szer + wsp_x;
// uchar4 voxel;// = make_uchar4(255, 255, 255, 255);
// voxel.x = (flaga) ? 255 : 1;
// voxel.y = (flaga) ? 255 : 1;
// voxel.z = (flaga) ? 255 : 1;
// voxel.w = (flaga) ? 255 : 1;
// surf2Dwrite(voxel, (flaga) ? surf2 : surf1, wsp_x * sizeof(uchar4), wsp_y);
/*
register int i = wsp_y*szer + wsp_x;
uchar4 voxel;// = make_uchar4(255, 255, 255, 255);
// surf2Dread(&voxel, surf[blockIdx.x], wsp_x * sizeof(uchar4), wsp_y);
surf2Dread(&voxel, (flaga)?surf1:surf2, wsp_x * sizeof(uchar4), wsp_y);
//if (voxel.x == 255) {
voxel.x = (flaga) ? 255 : 1;
voxel.y = (flaga) ? 255 : 1;
voxel.z = (flaga) ? 255 : 1;
voxel.w = (flaga) ? 255 : 1;
//surf2Dwrite(voxel, surf[blockIdx.x], wsp_x * sizeof(uchar4), wsp_y);
surf2Dwrite(voxel, (flaga) ? surf2 : surf1, wsp_x * sizeof(uchar4), wsp_y);
//}
*/
}
}
typedef unsigned int uint;
typedef unsigned char uchar;
void cuda_texture_interface::zarejestrujTeksture(bool flaga, int indeks, GLuint image, GLenum target, unsigned int flags, unsigned int arrayIndex, unsigned int mipLevel, unsigned long* tekstura, int szer, int wys) {
wysokosc = wys;
szerokosc = szer;
if (flaga) {
HANDLE_ERROR(cudaGraphicsGLRegisterImage(&r[indeks], image, target, flags));
HANDLE_ERROR(cudaGraphicsMapResources(1, &r[indeks], stream));
HANDLE_ERROR(cudaGraphicsSubResourceGetMappedArray(&tab[indeks], r[indeks], arrayIndex, mipLevel));
lalala(indeks, tekstura, szer, wys, true);
}
else {
HANDLE_ERROR(cudaGraphicsGLRegisterImage(&r2[indeks], image, target, flags));
HANDLE_ERROR(cudaGraphicsMapResources(1, &r2[indeks], stream));
HANDLE_ERROR(cudaGraphicsSubResourceGetMappedArray(&tab2[indeks], r2[indeks], arrayIndex, mipLevel));
lalala(indeks, tekstura, szer, wys, false);
}
}
void cuda_texture_interface::odmapowanieTekstur() {
HANDLE_ERROR(cudaGraphicsUnmapResources(rozmiar, r, stream));
HANDLE_ERROR(cudaGraphicsUnmapResources(rozmiar, r2, stream));
}
void cuda_texture_interface::inicjalizacja() {
cudaDeviceProp prop;
int dev;
memset(&prop, 0, sizeof(cudaDeviceProp));
prop.major = 1;
prop.minor = 0;
HANDLE_ERROR(cudaChooseDevice(&dev, &prop));
HANDLE_ERROR(cudaGLSetGLDevice(dev));
cudaStreamCreateWithFlags(&stream, cudaStreamDefault);
}
void cuda_texture_interface::lalala(int i, unsigned long* tekstura, int szer, int wys, bool flaga) {
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsigned);
HANDLE_ERROR(cudaMemcpyToArray((flaga) ? tab[i] : tab2[i], 0, 0, tekstura, szer*wys * sizeof(unsigned long), cudaMemcpyHostToDevice));
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = (flaga) ? tab[i] : tab2[i];
HANDLE_ERROR(cudaCreateSurfaceObject(flaga ? &tex[i] : &tex2[i], &resDesc));
}
void cuda_texture_interface::akcja(bool flaga) {
dim3 block(32, 32);
dim3 grid((szerokosc - 1) / 32 + 1, (szerokosc - 1) / 32 + 1);
for (int i = 0; i<rozmiar; ++i)
kernel << <grid, block >> > (tex[i], tex2[i], szerokosc, wysokosc, flaga);
} |
1c9f03d2eaf011f3f2f82009daff9dc24e141974.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<math.h>
#include<time.h>
#include<stdexcept>
#include<iostream>
using namespace std;
int NUMBER_OF_ELEMENTS = 1<<5;
int SIZE = NUMBER_OF_ELEMENTS*sizeof(int);
__global__ void kernel_sum( int* A, int* B, int* C, int NUMBERofELEMENTS);
void sum( int* A, int* B, int* C, int n_el);
int main()
{
//allocate memory for host vectors
int* hostA = (int*)malloc(SIZE);
int* hostB = (int*)malloc(SIZE);
int* hostC = (int*)malloc(SIZE);
int* deviceA,*deviceB,*deviceC;
srand(time(0));
int i;
for(i=0;i<NUMBER_OF_ELEMENTS;i++)
{
hostA[i] = rand();
hostB[i] = rand();
}
//allocate memory for device vectors
hipMalloc(&deviceA,SIZE);
hipMalloc(&deviceB,SIZE);
hipMalloc(&deviceC,SIZE);
//kernel function
hipMemcpy(deviceA,hostA,SIZE,hipMemcpyHostToDevice);
hipMemcpy(deviceB,hostB,SIZE,hipMemcpyHostToDevice);
sum(deviceA,deviceB,deviceC,NUMBER_OF_ELEMENTS);
hipMemcpy(hostC,deviceC,SIZE,hipMemcpyDeviceToHost);
hipFree(deviceA);
hipFree(deviceB);
hipFree(deviceC);
double error = 0;
for(i = 0;i<NUMBER_OF_ELEMENTS;i++)
{
double diff = double((hostA[i]+hostB[i])-hostC[i]);
error+=diff;
cout<<"A+B = "<<hostA[i]+hostB[i]<<"\n";
cout<<"C = "<<hostC[i]<<"\n";
}
error = sqrt(error);
cout<<"error = "<<error<<endl;
delete[] hostA;
delete[] hostB;
delete[] hostC;
return hipDeviceSynchronize();
}
void sum( int* A, int* B, int* C, int n_el)
{
int threadsPerblock,blocksperGrid;
if(n_el<512)
{
threadsPerblock = n_el;
blocksperGrid = 1;
}
else
{
threadsPerblock = 512;
blocksperGrid = ceil(double(n_el)/double(threadsPerblock));
}
//now invoke kernel method
hipLaunchKernelGGL(( kernel_sum), dim3(blocksperGrid),dim3(threadsPerblock), 0, 0, A,B,C,n_el);
}
__global__ void kernel_sum( int* A, int* B, int* C, int NUMBERofELEMENTS)
{
//calculate unique thread index
int index = blockDim.x * blockIdx.x + threadIdx.x;
if(index<NUMBERofELEMENTS)
C[index] = A[index] + B[index];
} | 1c9f03d2eaf011f3f2f82009daff9dc24e141974.cu | #include<math.h>
#include<time.h>
#include<stdexcept>
#include<iostream>
using namespace std;
int NUMBER_OF_ELEMENTS = 1<<5;
int SIZE = NUMBER_OF_ELEMENTS*sizeof(int);
__global__ void kernel_sum( int* A, int* B, int* C, int NUMBERofELEMENTS);
void sum( int* A, int* B, int* C, int n_el);
int main()
{
//allocate memory for host vectors
int* hostA = (int*)malloc(SIZE);
int* hostB = (int*)malloc(SIZE);
int* hostC = (int*)malloc(SIZE);
int* deviceA,*deviceB,*deviceC;
srand(time(0));
int i;
for(i=0;i<NUMBER_OF_ELEMENTS;i++)
{
hostA[i] = rand();
hostB[i] = rand();
}
//allocate memory for device vectors
cudaMalloc(&deviceA,SIZE);
cudaMalloc(&deviceB,SIZE);
cudaMalloc(&deviceC,SIZE);
//kernel function
cudaMemcpy(deviceA,hostA,SIZE,cudaMemcpyHostToDevice);
cudaMemcpy(deviceB,hostB,SIZE,cudaMemcpyHostToDevice);
sum(deviceA,deviceB,deviceC,NUMBER_OF_ELEMENTS);
cudaMemcpy(hostC,deviceC,SIZE,cudaMemcpyDeviceToHost);
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
double error = 0;
for(i = 0;i<NUMBER_OF_ELEMENTS;i++)
{
double diff = double((hostA[i]+hostB[i])-hostC[i]);
error+=diff;
cout<<"A+B = "<<hostA[i]+hostB[i]<<"\n";
cout<<"C = "<<hostC[i]<<"\n";
}
error = sqrt(error);
cout<<"error = "<<error<<endl;
delete[] hostA;
delete[] hostB;
delete[] hostC;
return cudaDeviceSynchronize();
}
void sum( int* A, int* B, int* C, int n_el)
{
int threadsPerblock,blocksperGrid;
if(n_el<512)
{
threadsPerblock = n_el;
blocksperGrid = 1;
}
else
{
threadsPerblock = 512;
blocksperGrid = ceil(double(n_el)/double(threadsPerblock));
}
//now invoke kernel method
kernel_sum<<<blocksperGrid,threadsPerblock>>>(A,B,C,n_el);
}
__global__ void kernel_sum( int* A, int* B, int* C, int NUMBERofELEMENTS)
{
//calculate unique thread index
int index = blockDim.x * blockIdx.x + threadIdx.x;
if(index<NUMBERofELEMENTS)
C[index] = A[index] + B[index];
} |
367140a2452b7081a210556447d4070177f13274.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "group_point_gpu.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int b = 2;
int n = XSIZE*YSIZE;
int c = 2;
int m = 2;
int nsample = 1;
const float *points = NULL;
hipMalloc(&points, XSIZE*YSIZE);
const int *idx = NULL;
hipMalloc(&idx, XSIZE*YSIZE);
float *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
group_point_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, b,n,c,m,nsample,points,idx,out);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
group_point_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, b,n,c,m,nsample,points,idx,out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
group_point_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, b,n,c,m,nsample,points,idx,out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 367140a2452b7081a210556447d4070177f13274.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "group_point_gpu.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int b = 2;
int n = XSIZE*YSIZE;
int c = 2;
int m = 2;
int nsample = 1;
const float *points = NULL;
cudaMalloc(&points, XSIZE*YSIZE);
const int *idx = NULL;
cudaMalloc(&idx, XSIZE*YSIZE);
float *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
group_point_gpu<<<gridBlock,threadBlock>>>(b,n,c,m,nsample,points,idx,out);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
group_point_gpu<<<gridBlock,threadBlock>>>(b,n,c,m,nsample,points,idx,out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
group_point_gpu<<<gridBlock,threadBlock>>>(b,n,c,m,nsample,points,idx,out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
5e0fbe9bfaefaa03ac572f00a6877017595c8faa.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <time.h>
__global__ void addVectors(int* a, int* b, int N) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < N) {
a[id] = a[id] + b[id];
}
}
int main() {
int N = 4096 * 4096 * 5;
int MAX = 4096;
hipEvent_t start, stop;
float elapsed_time_ms;
hipError_t err = hipSuccess;
//* Normal vector for initialization
int* a = (int*)malloc(sizeof(int) * N);
int* b = (int*)malloc(sizeof(int) * N);
for (int i = 0; i < N; i++) {
a[i] = (rand() % MAX) + 1;
b[i] = (rand() % MAX) + 1;
}
printf("Vector Addition of %d elements\n", N);
// * DS for the GPU
int* ga;
int* gb;
//* Allocating memory for vectors on the GPU
if (hipMalloc(&ga, sizeof(int) * N) != hipSuccess) {
printf("Cannot Allocate Memory for A on GPU\n");
return 0;
}
if (hipMalloc(&gb, sizeof(int) * N) != hipSuccess) {
printf("Cannot Allocate Memory for B on GPU\n");
return 0;
}
//* Copying data contents from CPU to GPU
if (hipMemcpy(ga, a, sizeof(int) * N, hipMemcpyHostToDevice) != hipSuccess) {
printf("Cannot Data from A on CPU to GPU\n");
return 0;
}
if (hipMemcpy(gb, b, sizeof(int) * N, hipMemcpyHostToDevice) != hipSuccess) {
printf("Cannot Data from B on CPU to GPU\n");
return 0;
}
int threadsPerBlock = 1024;
int blocks = (N + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocks, threadsPerBlock);
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
// clock_t begin = clock();
hipLaunchKernelGGL(( addVectors), dim3(blocks), dim3(threadsPerBlock), 0, 0, ga, gb, N);
err = hipGetLastError();
if (err != hipSuccess) {
printf("Failed to launch addVectors kernel (error code)!\n");
return 0;
}
if (hipMemcpy(a, ga, sizeof(int) * N, hipMemcpyDeviceToHost) != hipSuccess) {
printf("Cannot copy added vector from GPU to CPU\n");
return 0;
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time_ms, start, stop);
printf("The elapsed time is %f seconds\n", elapsed_time_ms / 1000);
// clock_t end = clock();
// printf("The elapsed time is %f seconds\n", (double)(end - begin) / CLOCKS_PER_SEC);
// for (int i = 0; i < N; i++)
// printf("%d ", a[i]);
// printf("\n");
hipFree(ga);
hipFree(gb);
free(a);
free(b);
return 0;
} | 5e0fbe9bfaefaa03ac572f00a6877017595c8faa.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
__global__ void addVectors(int* a, int* b, int N) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < N) {
a[id] = a[id] + b[id];
}
}
int main() {
int N = 4096 * 4096 * 5;
int MAX = 4096;
cudaEvent_t start, stop;
float elapsed_time_ms;
cudaError_t err = cudaSuccess;
//* Normal vector for initialization
int* a = (int*)malloc(sizeof(int) * N);
int* b = (int*)malloc(sizeof(int) * N);
for (int i = 0; i < N; i++) {
a[i] = (rand() % MAX) + 1;
b[i] = (rand() % MAX) + 1;
}
printf("Vector Addition of %d elements\n", N);
// * DS for the GPU
int* ga;
int* gb;
//* Allocating memory for vectors on the GPU
if (cudaMalloc(&ga, sizeof(int) * N) != cudaSuccess) {
printf("Cannot Allocate Memory for A on GPU\n");
return 0;
}
if (cudaMalloc(&gb, sizeof(int) * N) != cudaSuccess) {
printf("Cannot Allocate Memory for B on GPU\n");
return 0;
}
//* Copying data contents from CPU to GPU
if (cudaMemcpy(ga, a, sizeof(int) * N, cudaMemcpyHostToDevice) != cudaSuccess) {
printf("Cannot Data from A on CPU to GPU\n");
return 0;
}
if (cudaMemcpy(gb, b, sizeof(int) * N, cudaMemcpyHostToDevice) != cudaSuccess) {
printf("Cannot Data from B on CPU to GPU\n");
return 0;
}
int threadsPerBlock = 1024;
int blocks = (N + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocks, threadsPerBlock);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// clock_t begin = clock();
addVectors<<<blocks, threadsPerBlock>>>(ga, gb, N);
err = cudaGetLastError();
if (err != cudaSuccess) {
printf("Failed to launch addVectors kernel (error code)!\n");
return 0;
}
if (cudaMemcpy(a, ga, sizeof(int) * N, cudaMemcpyDeviceToHost) != cudaSuccess) {
printf("Cannot copy added vector from GPU to CPU\n");
return 0;
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time_ms, start, stop);
printf("The elapsed time is %f seconds\n", elapsed_time_ms / 1000);
// clock_t end = clock();
// printf("The elapsed time is %f seconds\n", (double)(end - begin) / CLOCKS_PER_SEC);
// for (int i = 0; i < N; i++)
// printf("%d ", a[i]);
// printf("\n");
cudaFree(ga);
cudaFree(gb);
free(a);
free(b);
return 0;
} |
393724d9c9f3896480484fa9051513419de64ddc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "caffe2/core/context_gpu.h"
#include "select_smooth_l1_loss_op.h"
namespace caffe2 {
namespace {
__global__ void SelectSmoothL1Kernel(
const int D, const int H, const int W,
const int M, const float* Y_hat, const float* Y, const float* L, float* out,
const float* S, const float beta) {
// f(x) = 0.5 * x^2 / beta if |x| < beta
// |x| - 0.5 * beta otherwise
CUDA_1D_KERNEL_LOOP(i, M) {
int n = L[i * 4];
int c = L[i * 4 + 1];
int y = L[i * 4 + 2];
int x = L[i * 4 + 3];
for (int j = 0; j < 4; j++){
// Y_hat: N x (A * CLS * 4) x H x W
int ind = n * (D * H * W) + (c + j) * (H * W) + y * W + x;
float y_hat = Y_hat[ind];
float y = Y[i * 4 + j];
float val = y_hat - y;
float abs_val = abs(val);
if (abs_val < beta) {
out[ind] = (0.5 * val * val / beta) / max(S[0], 1.0);
} else {
out[ind] = (abs_val - 0.5 * beta) / max(S[0], 1.0);
}
}
}
}
__global__ void SelectSmoothL1GradientKernel(
const int D, const int H, const int W,
const int M,
const float* Y_hat,
const float* Y,
const float* L,
float* out,
const float* d_loss_data,
float norm,
const float* S,
float beta) {
// f'(x) = x / beta if |x| < beta
// = sign(x) otherwise
// We also scale by norm * d_loss in this kernel for convenience
CUDA_1D_KERNEL_LOOP(i, M) {
int n = L[i * 4];
int c = L[i * 4 + 1];
int y = L[i * 4 + 2];
int x = L[i * 4 + 3];
float d_loss = *d_loss_data;
for (int j = 0; j < 4; j++) {
int ind = n * (D * H * W) + (c + j) * (H * W) + y * W + x;
float y_hat = Y_hat[ind];
float y = Y[i * 4 + j];
float val = y_hat - y;
float abs_val = abs(val);
if (abs_val < beta) {
out[ind] = norm * d_loss * val / beta / max(S[0], 1.0);
} else {
out[ind] = norm * d_loss * ((float(0) < val) - (val < float(0))) / max(S[0], 1.0);
}
}
}
}
} // namespace
template<>
bool SelectSmoothL1LossOp<float, CUDAContext>::RunOnDevice() {
// bbox targets predictions, for example: N x (A * 4) H x W in cls-agnostic case
auto& Y_hat = Input(0);
// true targets: for example: M x 4 where M is the #fg boxes per fpn level
auto& Y = Input(1);
// locations of fg boxes: M x 4
auto& L = Input(2);
// total number of fg boxes across all FPN levels: scalar
auto& S = Input(3);
auto* avg_loss = Output(0);
avg_loss->Resize(vector<int64_t>());
if (Y.size() == 0){
math::Set<float, CUDAContext>(
1, static_cast<float>(0), avg_loss->mutable_data<float>(), &context_);
return true;
}
int N = Y_hat.dim32(0);
int D = Y_hat.dim32(1);
int H = Y_hat.dim32(2);
int W = Y_hat.dim32(3);
int M = Y.dim32(0);
// initialization
buff_.ResizeLike(Y_hat);
math::Set<float, CUDAContext>(
1, static_cast<float>(0), avg_loss->mutable_data<float>(), &context_);
math::Set<float, CUDAContext>(
buff_.size(), 0.0, buff_.mutable_data<float>(), &context_);
// Element-wise smooth l1 loss
// l := SelectSmoothL1((y_hat - y))
hipLaunchKernelGGL(( SelectSmoothL1Kernel), dim3(CAFFE_GET_BLOCKS(buff_.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
D, H, W,
M, Y_hat.data<float>(), Y.data<float>(),
L.data<float>(), buff_.mutable_data<float>(),
S.data<float>(), beta_);
// Sum of all losses
// al := sum_i l_i
float* avg_loss_data = avg_loss->mutable_data<float>();
math::Sum<float, CUDAContext>(
buff_.size(), buff_.data<float>(), avg_loss_data, &context_);
// Average of input batch size
math::Scale<float, float, CUDAContext>(
1, scale_, avg_loss_data, avg_loss_data, &context_);
return true;
}
template<>
bool SelectSmoothL1LossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y_hat = Input(0);
auto& Y = Input(1);
auto& L = Input(2);
auto& S = Input(3);
// Below is gradient of net w.r.t. avg_loss ("gradOuput"), should be all 1's
auto& d_avg_loss = Input(4);
auto* d_Y_hat = Output(0); // gradient of net w.r.t. Y_hat ("gradInput")
d_Y_hat->ResizeLike(Y_hat);
math::Set<float, CUDAContext>(
d_Y_hat->size(), 0.0, d_Y_hat->mutable_data<float>(), &context_);
if (Y.size() == 0){
return true;
}
int N = Y_hat.dim32(0);
int D = Y_hat.dim32(1);
int H = Y_hat.dim32(2);
int W = Y_hat.dim32(3);
int M = Y.dim32(0);
// Element-wise weighted difference (can be used to ignore or reweight
// specific components)
// d := (y_hat - y)
// d_Y_hat := d_avg_loss * SelectSmoothL1'((y_hat - y))
hipLaunchKernelGGL(( SelectSmoothL1GradientKernel), dim3(CAFFE_GET_BLOCKS(d_Y_hat->size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
D, H, W, M, Y_hat.data<float>(), Y.data<float>(),
L.data<float>(), d_Y_hat->mutable_data<float>(),
d_avg_loss.data<float>(), scale_, S.data<float>(), beta_);
return true;
}
REGISTER_CUDA_OPERATOR(SelectSmoothL1Loss,
SelectSmoothL1LossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SelectSmoothL1LossGradient,
SelectSmoothL1LossGradientOp<float, CUDAContext>);
} // namespace caffe2
| 393724d9c9f3896480484fa9051513419de64ddc.cu | /**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "caffe2/core/context_gpu.h"
#include "select_smooth_l1_loss_op.h"
namespace caffe2 {
namespace {
__global__ void SelectSmoothL1Kernel(
const int D, const int H, const int W,
const int M, const float* Y_hat, const float* Y, const float* L, float* out,
const float* S, const float beta) {
// f(x) = 0.5 * x^2 / beta if |x| < beta
// |x| - 0.5 * beta otherwise
CUDA_1D_KERNEL_LOOP(i, M) {
int n = L[i * 4];
int c = L[i * 4 + 1];
int y = L[i * 4 + 2];
int x = L[i * 4 + 3];
for (int j = 0; j < 4; j++){
// Y_hat: N x (A * CLS * 4) x H x W
int ind = n * (D * H * W) + (c + j) * (H * W) + y * W + x;
float y_hat = Y_hat[ind];
float y = Y[i * 4 + j];
float val = y_hat - y;
float abs_val = abs(val);
if (abs_val < beta) {
out[ind] = (0.5 * val * val / beta) / max(S[0], 1.0);
} else {
out[ind] = (abs_val - 0.5 * beta) / max(S[0], 1.0);
}
}
}
}
__global__ void SelectSmoothL1GradientKernel(
const int D, const int H, const int W,
const int M,
const float* Y_hat,
const float* Y,
const float* L,
float* out,
const float* d_loss_data,
float norm,
const float* S,
float beta) {
// f'(x) = x / beta if |x| < beta
// = sign(x) otherwise
// We also scale by norm * d_loss in this kernel for convenience
CUDA_1D_KERNEL_LOOP(i, M) {
int n = L[i * 4];
int c = L[i * 4 + 1];
int y = L[i * 4 + 2];
int x = L[i * 4 + 3];
float d_loss = *d_loss_data;
for (int j = 0; j < 4; j++) {
int ind = n * (D * H * W) + (c + j) * (H * W) + y * W + x;
float y_hat = Y_hat[ind];
float y = Y[i * 4 + j];
float val = y_hat - y;
float abs_val = abs(val);
if (abs_val < beta) {
out[ind] = norm * d_loss * val / beta / max(S[0], 1.0);
} else {
out[ind] = norm * d_loss * ((float(0) < val) - (val < float(0))) / max(S[0], 1.0);
}
}
}
}
} // namespace
template<>
bool SelectSmoothL1LossOp<float, CUDAContext>::RunOnDevice() {
// bbox targets predictions, for example: N x (A * 4) H x W in cls-agnostic case
auto& Y_hat = Input(0);
// true targets: for example: M x 4 where M is the #fg boxes per fpn level
auto& Y = Input(1);
// locations of fg boxes: M x 4
auto& L = Input(2);
// total number of fg boxes across all FPN levels: scalar
auto& S = Input(3);
auto* avg_loss = Output(0);
avg_loss->Resize(vector<int64_t>());
if (Y.size() == 0){
math::Set<float, CUDAContext>(
1, static_cast<float>(0), avg_loss->mutable_data<float>(), &context_);
return true;
}
int N = Y_hat.dim32(0);
int D = Y_hat.dim32(1);
int H = Y_hat.dim32(2);
int W = Y_hat.dim32(3);
int M = Y.dim32(0);
// initialization
buff_.ResizeLike(Y_hat);
math::Set<float, CUDAContext>(
1, static_cast<float>(0), avg_loss->mutable_data<float>(), &context_);
math::Set<float, CUDAContext>(
buff_.size(), 0.0, buff_.mutable_data<float>(), &context_);
// Element-wise smooth l1 loss
// l := SelectSmoothL1((y_hat - y))
SelectSmoothL1Kernel<<<CAFFE_GET_BLOCKS(buff_.size()),
CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
D, H, W,
M, Y_hat.data<float>(), Y.data<float>(),
L.data<float>(), buff_.mutable_data<float>(),
S.data<float>(), beta_);
// Sum of all losses
// al := sum_i l_i
float* avg_loss_data = avg_loss->mutable_data<float>();
math::Sum<float, CUDAContext>(
buff_.size(), buff_.data<float>(), avg_loss_data, &context_);
// Average of input batch size
math::Scale<float, float, CUDAContext>(
1, scale_, avg_loss_data, avg_loss_data, &context_);
return true;
}
template<>
bool SelectSmoothL1LossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y_hat = Input(0);
auto& Y = Input(1);
auto& L = Input(2);
auto& S = Input(3);
// Below is gradient of net w.r.t. avg_loss ("gradOuput"), should be all 1's
auto& d_avg_loss = Input(4);
auto* d_Y_hat = Output(0); // gradient of net w.r.t. Y_hat ("gradInput")
d_Y_hat->ResizeLike(Y_hat);
math::Set<float, CUDAContext>(
d_Y_hat->size(), 0.0, d_Y_hat->mutable_data<float>(), &context_);
if (Y.size() == 0){
return true;
}
int N = Y_hat.dim32(0);
int D = Y_hat.dim32(1);
int H = Y_hat.dim32(2);
int W = Y_hat.dim32(3);
int M = Y.dim32(0);
// Element-wise weighted difference (can be used to ignore or reweight
// specific components)
// d := (y_hat - y)
// d_Y_hat := d_avg_loss * SelectSmoothL1'((y_hat - y))
SelectSmoothL1GradientKernel<<<CAFFE_GET_BLOCKS(d_Y_hat->size()),
CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
D, H, W, M, Y_hat.data<float>(), Y.data<float>(),
L.data<float>(), d_Y_hat->mutable_data<float>(),
d_avg_loss.data<float>(), scale_, S.data<float>(), beta_);
return true;
}
REGISTER_CUDA_OPERATOR(SelectSmoothL1Loss,
SelectSmoothL1LossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SelectSmoothL1LossGradient,
SelectSmoothL1LossGradientOp<float, CUDAContext>);
} // namespace caffe2
|
f576bf6525249319fbbb69c577c02233f120f333.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--blockDim=256 --gridDim=128
/***************************************************************************
* Copyright (C) 2006 *
* *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
/**
@author Svetlin Manavski <svetlin@manavski.com>
*/
/* aes decryption operation:
* Device code.
*
*/
#ifndef _AESDECRYPT_KERNEL_H_
#define _AESDECRYPT_KERNEL_H_
// IMPERIAL EDIT: comment out C headers and add implicit includes and defines
//#include <stdio.h>
#include "../sbox_D.h"
#include "../sbox_E.h"
#define CUT_BANK_CHECKER( array, index) array[index]
// Thread block size
#define BSIZE 256
#define STAGEBLOCK1(index) CUT_BANK_CHECKER( stageBlock1, index )
#define STAGEBLOCK2(index) CUT_BANK_CHECKER( stageBlock2, index )
#define TBOXE0(index) CUT_BANK_CHECKER( tBox0Block, index )
#define TBOXE1(index) CUT_BANK_CHECKER( tBox1Block, index )
#define TBOXE2(index) CUT_BANK_CHECKER( tBox2Block, index )
#define TBOXE3(index) CUT_BANK_CHECKER( tBox3Block, index )
#define INVSBOX(index) CUT_BANK_CHECKER( invSBoxBlock, index )
texture<unsigned, 1, hipReadModeElementType> texDKey;
__global__ void aesDecrypt256( unsigned * result, unsigned * inData, int inputSize)
{
unsigned bx = blockIdx.x;
unsigned tx = threadIdx.x;
unsigned mod4tx = tx%4;
unsigned int4tx = tx/4;
unsigned idx2 = int4tx*4;
int x;
unsigned keyElem;
__shared__ UByte4 stageBlock1[BSIZE];
__shared__ UByte4 stageBlock2[BSIZE];
__shared__ UByte4 tBox0Block[BSIZE];
__shared__ UByte4 tBox1Block[BSIZE];
__shared__ UByte4 tBox2Block[BSIZE];
__shared__ UByte4 tBox3Block[BSIZE];
__shared__ UByte4 invSBoxBlock[BSIZE];
// input caricati in memoria
STAGEBLOCK1(tx).uival = inData[BSIZE * bx + tx ];
TBOXE0(tx).uival = TBoxi0[tx];
TBOXE1(tx).uival = TBoxi1[tx];
TBOXE2(tx).uival = TBoxi2[tx];
TBOXE3(tx).uival = TBoxi3[tx];
INVSBOX(tx).ubval[0] = inv_SBox[tx];
__syncthreads();
//----------------------------------- 1st stage -----------------------------------
x = mod4tx;
keyElem = tex1Dfetch(texDKey, x);
STAGEBLOCK2(tx).uival = STAGEBLOCK1(tx).uival ^ keyElem;
__syncthreads();
//-------------------------------- end of 1st stage --------------------------------
//----------------------------------- 2nd stage -----------------------------------
unsigned op1 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4] + idx2 ).ubval[0];
unsigned op2 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+1] + idx2 ).ubval[1];
unsigned op3 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+2] + idx2 ).ubval[2];
unsigned op4 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+3] + idx2 ).ubval[3];
op1 = TBOXE0(op1).uival;
op2 = TBOXE1(op2).uival;
op3 = TBOXE2(op3).uival;
op4 = TBOXE3(op4).uival;
x = mod4tx+4;
keyElem = tex1Dfetch(texDKey, x);
STAGEBLOCK1(tx).uival = op1^op2^op3^op4^keyElem;
__syncthreads();
//-------------------------------- end of 2nd stage --------------------------------
//----------------------------------- 3th stage -----------------------------------
op1 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4] + idx2 ).ubval[0];
op2 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+1] + idx2 ).ubval[1];
op3 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+2] + idx2 ).ubval[2];
op4 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+3] + idx2 ).ubval[3];
op1 = TBOXE0(op1).uival;
op2 = TBOXE1(op2).uival;
op3 = TBOXE2(op3).uival;
op4 = TBOXE3(op4).uival;
x = mod4tx+8;
keyElem = tex1Dfetch(texDKey, x);
STAGEBLOCK2(tx).uival = op1^op2^op3^op4^keyElem;
__syncthreads();
//-------------------------------- end of 3th stage --------------------------------
//----------------------------------- 4th stage -----------------------------------
op1 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4] + idx2 ).ubval[0];
op2 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+1] + idx2 ).ubval[1];
op3 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+2] + idx2 ).ubval[2];
op4 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+3] + idx2 ).ubval[3];
op1 = TBOXE0(op1).uival;
op2 = TBOXE1(op2).uival;
op3 = TBOXE2(op3).uival;
op4 = TBOXE3(op4).uival;
x = mod4tx+12;
keyElem = tex1Dfetch(texDKey, x);
STAGEBLOCK1(tx).uival = op1^op2^op3^op4^keyElem;
__syncthreads();
//-------------------------------- end of 4th stage --------------------------------
//----------------------------------- 5th stage -----------------------------------
op1 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4] + idx2 ).ubval[0];
op2 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+1] + idx2 ).ubval[1];
op3 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+2] + idx2 ).ubval[2];
op4 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+3] + idx2 ).ubval[3];
op1 = TBOXE0(op1).uival;
op2 = TBOXE1(op2).uival;
op3 = TBOXE2(op3).uival;
op4 = TBOXE3(op4).uival;
x = mod4tx+16;
keyElem = tex1Dfetch(texDKey, x);
STAGEBLOCK2(tx).uival = op1^op2^op3^op4^keyElem;
__syncthreads();
//-------------------------------- end of 5th stage --------------------------------
//----------------------------------- 6th stage -----------------------------------
op1 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4] + idx2 ).ubval[0];
op2 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+1] + idx2 ).ubval[1];
op3 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+2] + idx2 ).ubval[2];
op4 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+3] + idx2 ).ubval[3];
op1 = TBOXE0(op1).uival;
op2 = TBOXE1(op2).uival;
op3 = TBOXE2(op3).uival;
op4 = TBOXE3(op4).uival;
x = mod4tx+20;
keyElem = tex1Dfetch(texDKey, x);
STAGEBLOCK1(tx).uival = op1^op2^op3^op4^keyElem;
__syncthreads();
//-------------------------------- end of 6th stage --------------------------------
//----------------------------------- 7th stage -----------------------------------
op1 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4] + idx2 ).ubval[0];
op2 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+1] + idx2 ).ubval[1];
op3 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+2] + idx2 ).ubval[2];
op4 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+3] + idx2 ).ubval[3];
op1 = TBOXE0(op1).uival;
op2 = TBOXE1(op2).uival;
op3 = TBOXE2(op3).uival;
op4 = TBOXE3(op4).uival;
x = mod4tx+24;
keyElem = tex1Dfetch(texDKey, x);
STAGEBLOCK2(tx).uival = op1^op2^op3^op4^keyElem;
__syncthreads();
//-------------------------------- end of 7th stage --------------------------------
//----------------------------------- 8th stage -----------------------------------
op1 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4] + idx2 ).ubval[0];
op2 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+1] + idx2 ).ubval[1];
op3 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+2] + idx2 ).ubval[2];
op4 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+3] + idx2 ).ubval[3];
op1 = TBOXE0(op1).uival;
op2 = TBOXE1(op2).uival;
op3 = TBOXE2(op3).uival;
op4 = TBOXE3(op4).uival;
x = mod4tx+28;
keyElem = tex1Dfetch(texDKey, x);
STAGEBLOCK1(tx).uival = op1^op2^op3^op4^keyElem;
__syncthreads();
//-------------------------------- end of 8th stage --------------------------------
//----------------------------------- 9th stage -----------------------------------
op1 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4] + idx2 ).ubval[0];
op2 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+1] + idx2 ).ubval[1];
op3 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+2] + idx2 ).ubval[2];
op4 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+3] + idx2 ).ubval[3];
op1 = TBOXE0(op1).uival;
op2 = TBOXE1(op2).uival;
op3 = TBOXE2(op3).uival;
op4 = TBOXE3(op4).uival;
x = mod4tx+32;
keyElem = tex1Dfetch(texDKey, x);
STAGEBLOCK2(tx).uival = op1^op2^op3^op4^keyElem;
__syncthreads();
//-------------------------------- end of 9th stage --------------------------------
//----------------------------------- 10th stage -----------------------------------
op1 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4] + idx2 ).ubval[0];
op2 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+1] + idx2 ).ubval[1];
op3 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+2] + idx2 ).ubval[2];
op4 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+3] + idx2 ).ubval[3];
op1 = TBOXE0(op1).uival;
op2 = TBOXE1(op2).uival;
op3 = TBOXE2(op3).uival;
op4 = TBOXE3(op4).uival;
x = mod4tx+36;
keyElem = tex1Dfetch(texDKey, x);
STAGEBLOCK1(tx).uival = op1^op2^op3^op4^keyElem;
__syncthreads();
//-------------------------------- end of 10th stage --------------------------------
//----------------------------------- 11th stage -----------------------------------
op1 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4] + idx2 ).ubval[0];
op2 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+1] + idx2 ).ubval[1];
op3 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+2] + idx2 ).ubval[2];
op4 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+3] + idx2 ).ubval[3];
op1 = TBOXE0(op1).uival;
op2 = TBOXE1(op2).uival;
op3 = TBOXE2(op3).uival;
op4 = TBOXE3(op4).uival;
x = mod4tx+40;
keyElem = tex1Dfetch(texDKey, x);
STAGEBLOCK2(tx).uival = op1^op2^op3^op4^keyElem;
__syncthreads();
//-------------------------------- end of 11th stage --------------------------------
//----------------------------------- 12th stage -----------------------------------
op1 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4] + idx2 ).ubval[0];
op2 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+1] + idx2 ).ubval[1];
op3 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+2] + idx2 ).ubval[2];
op4 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+3] + idx2 ).ubval[3];
op1 = TBOXE0(op1).uival;
op2 = TBOXE1(op2).uival;
op3 = TBOXE2(op3).uival;
op4 = TBOXE3(op4).uival;
x = mod4tx+44;
keyElem = tex1Dfetch(texDKey, x);
STAGEBLOCK1(tx).uival = op1^op2^op3^op4^keyElem;
__syncthreads();
//-------------------------------- end of 12th stage --------------------------------
//----------------------------------- 13th stage -----------------------------------
op1 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4] + idx2 ).ubval[0];
op2 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+1] + idx2 ).ubval[1];
op3 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+2] + idx2 ).ubval[2];
op4 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+3] + idx2 ).ubval[3];
op1 = TBOXE0(op1).uival;
op2 = TBOXE1(op2).uival;
op3 = TBOXE2(op3).uival;
op4 = TBOXE3(op4).uival;
x = mod4tx+48;
keyElem = tex1Dfetch(texDKey, x);
STAGEBLOCK2(tx).uival = op1^op2^op3^op4^keyElem;
__syncthreads();
//-------------------------------- end of 13th stage --------------------------------
//----------------------------------- 14th stage -----------------------------------
op1 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4] + idx2 ).ubval[0];
op2 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+1] + idx2 ).ubval[1];
op3 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+2] + idx2 ).ubval[2];
op4 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+3] + idx2 ).ubval[3];
op1 = TBOXE0(op1).uival;
op2 = TBOXE1(op2).uival;
op3 = TBOXE2(op3).uival;
op4 = TBOXE3(op4).uival;
x = mod4tx+52;
keyElem = tex1Dfetch(texDKey, x);
STAGEBLOCK1(tx).uival = op1^op2^op3^op4^keyElem;
__syncthreads();
//-------------------------------- end of 14th stage --------------------------------
//----------------------------------- 15th stage -----------------------------------
op1 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4] + idx2 ).ubval[0];
op2 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+1] + idx2 ).ubval[1];
op3 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+2] + idx2 ).ubval[2];
op4 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+3] + idx2 ).ubval[3];
x = mod4tx+56;
keyElem = tex1Dfetch(texDKey, x);
STAGEBLOCK2(tx).ubval[3] = INVSBOX(op4).ubval[0]^( keyElem>>24);
STAGEBLOCK2(tx).ubval[2] = INVSBOX(op3).ubval[0]^( keyElem>>16 & 0x000000FF);
STAGEBLOCK2(tx).ubval[1] = INVSBOX(op2).ubval[0]^( keyElem>>8 & 0x000000FF);
STAGEBLOCK2(tx).ubval[0] = INVSBOX(op1).ubval[0]^( keyElem & 0x000000FF);
__syncthreads();
//-------------------------------- end of 15th stage --------------------------------
result[BSIZE * bx + tx] = STAGEBLOCK2(tx).uival;
// end of AES
}
#endif // #ifndef _AESDECRYPT_KERNEL_H_
| f576bf6525249319fbbb69c577c02233f120f333.cu | //pass
//--blockDim=256 --gridDim=128
/***************************************************************************
* Copyright (C) 2006 *
* *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
/**
@author Svetlin Manavski <svetlin@manavski.com>
*/
/* aes decryption operation:
* Device code.
*
*/
#ifndef _AESDECRYPT_KERNEL_H_
#define _AESDECRYPT_KERNEL_H_
// IMPERIAL EDIT: comment out C headers and add implicit includes and defines
//#include <stdio.h>
#include "../sbox_D.h"
#include "../sbox_E.h"
#define CUT_BANK_CHECKER( array, index) array[index]
// Thread block size
#define BSIZE 256
#define STAGEBLOCK1(index) CUT_BANK_CHECKER( stageBlock1, index )
#define STAGEBLOCK2(index) CUT_BANK_CHECKER( stageBlock2, index )
#define TBOXE0(index) CUT_BANK_CHECKER( tBox0Block, index )
#define TBOXE1(index) CUT_BANK_CHECKER( tBox1Block, index )
#define TBOXE2(index) CUT_BANK_CHECKER( tBox2Block, index )
#define TBOXE3(index) CUT_BANK_CHECKER( tBox3Block, index )
#define INVSBOX(index) CUT_BANK_CHECKER( invSBoxBlock, index )
texture<unsigned, 1, cudaReadModeElementType> texDKey;
__global__ void aesDecrypt256( unsigned * result, unsigned * inData, int inputSize)
{
unsigned bx = blockIdx.x;
unsigned tx = threadIdx.x;
unsigned mod4tx = tx%4;
unsigned int4tx = tx/4;
unsigned idx2 = int4tx*4;
int x;
unsigned keyElem;
__shared__ UByte4 stageBlock1[BSIZE];
__shared__ UByte4 stageBlock2[BSIZE];
__shared__ UByte4 tBox0Block[BSIZE];
__shared__ UByte4 tBox1Block[BSIZE];
__shared__ UByte4 tBox2Block[BSIZE];
__shared__ UByte4 tBox3Block[BSIZE];
__shared__ UByte4 invSBoxBlock[BSIZE];
// input caricati in memoria
STAGEBLOCK1(tx).uival = inData[BSIZE * bx + tx ];
TBOXE0(tx).uival = TBoxi0[tx];
TBOXE1(tx).uival = TBoxi1[tx];
TBOXE2(tx).uival = TBoxi2[tx];
TBOXE3(tx).uival = TBoxi3[tx];
INVSBOX(tx).ubval[0] = inv_SBox[tx];
__syncthreads();
//----------------------------------- 1st stage -----------------------------------
x = mod4tx;
keyElem = tex1Dfetch(texDKey, x);
STAGEBLOCK2(tx).uival = STAGEBLOCK1(tx).uival ^ keyElem;
__syncthreads();
//-------------------------------- end of 1st stage --------------------------------
//----------------------------------- 2nd stage -----------------------------------
unsigned op1 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4] + idx2 ).ubval[0];
unsigned op2 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+1] + idx2 ).ubval[1];
unsigned op3 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+2] + idx2 ).ubval[2];
unsigned op4 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+3] + idx2 ).ubval[3];
op1 = TBOXE0(op1).uival;
op2 = TBOXE1(op2).uival;
op3 = TBOXE2(op3).uival;
op4 = TBOXE3(op4).uival;
x = mod4tx+4;
keyElem = tex1Dfetch(texDKey, x);
STAGEBLOCK1(tx).uival = op1^op2^op3^op4^keyElem;
__syncthreads();
//-------------------------------- end of 2nd stage --------------------------------
//----------------------------------- 3th stage -----------------------------------
op1 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4] + idx2 ).ubval[0];
op2 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+1] + idx2 ).ubval[1];
op3 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+2] + idx2 ).ubval[2];
op4 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+3] + idx2 ).ubval[3];
op1 = TBOXE0(op1).uival;
op2 = TBOXE1(op2).uival;
op3 = TBOXE2(op3).uival;
op4 = TBOXE3(op4).uival;
x = mod4tx+8;
keyElem = tex1Dfetch(texDKey, x);
STAGEBLOCK2(tx).uival = op1^op2^op3^op4^keyElem;
__syncthreads();
//-------------------------------- end of 3th stage --------------------------------
//----------------------------------- 4th stage -----------------------------------
op1 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4] + idx2 ).ubval[0];
op2 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+1] + idx2 ).ubval[1];
op3 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+2] + idx2 ).ubval[2];
op4 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+3] + idx2 ).ubval[3];
op1 = TBOXE0(op1).uival;
op2 = TBOXE1(op2).uival;
op3 = TBOXE2(op3).uival;
op4 = TBOXE3(op4).uival;
x = mod4tx+12;
keyElem = tex1Dfetch(texDKey, x);
STAGEBLOCK1(tx).uival = op1^op2^op3^op4^keyElem;
__syncthreads();
//-------------------------------- end of 4th stage --------------------------------
//----------------------------------- 5th stage -----------------------------------
op1 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4] + idx2 ).ubval[0];
op2 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+1] + idx2 ).ubval[1];
op3 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+2] + idx2 ).ubval[2];
op4 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+3] + idx2 ).ubval[3];
op1 = TBOXE0(op1).uival;
op2 = TBOXE1(op2).uival;
op3 = TBOXE2(op3).uival;
op4 = TBOXE3(op4).uival;
x = mod4tx+16;
keyElem = tex1Dfetch(texDKey, x);
STAGEBLOCK2(tx).uival = op1^op2^op3^op4^keyElem;
__syncthreads();
//-------------------------------- end of 5th stage --------------------------------
//----------------------------------- 6th stage -----------------------------------
op1 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4] + idx2 ).ubval[0];
op2 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+1] + idx2 ).ubval[1];
op3 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+2] + idx2 ).ubval[2];
op4 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+3] + idx2 ).ubval[3];
op1 = TBOXE0(op1).uival;
op2 = TBOXE1(op2).uival;
op3 = TBOXE2(op3).uival;
op4 = TBOXE3(op4).uival;
x = mod4tx+20;
keyElem = tex1Dfetch(texDKey, x);
STAGEBLOCK1(tx).uival = op1^op2^op3^op4^keyElem;
__syncthreads();
//-------------------------------- end of 6th stage --------------------------------
//----------------------------------- 7th stage -----------------------------------
op1 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4] + idx2 ).ubval[0];
op2 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+1] + idx2 ).ubval[1];
op3 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+2] + idx2 ).ubval[2];
op4 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+3] + idx2 ).ubval[3];
op1 = TBOXE0(op1).uival;
op2 = TBOXE1(op2).uival;
op3 = TBOXE2(op3).uival;
op4 = TBOXE3(op4).uival;
x = mod4tx+24;
keyElem = tex1Dfetch(texDKey, x);
STAGEBLOCK2(tx).uival = op1^op2^op3^op4^keyElem;
__syncthreads();
//-------------------------------- end of 7th stage --------------------------------
//----------------------------------- 8th stage -----------------------------------
op1 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4] + idx2 ).ubval[0];
op2 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+1] + idx2 ).ubval[1];
op3 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+2] + idx2 ).ubval[2];
op4 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+3] + idx2 ).ubval[3];
op1 = TBOXE0(op1).uival;
op2 = TBOXE1(op2).uival;
op3 = TBOXE2(op3).uival;
op4 = TBOXE3(op4).uival;
x = mod4tx+28;
keyElem = tex1Dfetch(texDKey, x);
STAGEBLOCK1(tx).uival = op1^op2^op3^op4^keyElem;
__syncthreads();
//-------------------------------- end of 8th stage --------------------------------
//----------------------------------- 9th stage -----------------------------------
op1 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4] + idx2 ).ubval[0];
op2 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+1] + idx2 ).ubval[1];
op3 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+2] + idx2 ).ubval[2];
op4 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+3] + idx2 ).ubval[3];
op1 = TBOXE0(op1).uival;
op2 = TBOXE1(op2).uival;
op3 = TBOXE2(op3).uival;
op4 = TBOXE3(op4).uival;
x = mod4tx+32;
keyElem = tex1Dfetch(texDKey, x);
STAGEBLOCK2(tx).uival = op1^op2^op3^op4^keyElem;
__syncthreads();
//-------------------------------- end of 9th stage --------------------------------
//----------------------------------- 10th stage -----------------------------------
op1 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4] + idx2 ).ubval[0];
op2 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+1] + idx2 ).ubval[1];
op3 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+2] + idx2 ).ubval[2];
op4 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+3] + idx2 ).ubval[3];
op1 = TBOXE0(op1).uival;
op2 = TBOXE1(op2).uival;
op3 = TBOXE2(op3).uival;
op4 = TBOXE3(op4).uival;
x = mod4tx+36;
keyElem = tex1Dfetch(texDKey, x);
STAGEBLOCK1(tx).uival = op1^op2^op3^op4^keyElem;
__syncthreads();
//-------------------------------- end of 10th stage --------------------------------
//----------------------------------- 11th stage -----------------------------------
op1 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4] + idx2 ).ubval[0];
op2 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+1] + idx2 ).ubval[1];
op3 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+2] + idx2 ).ubval[2];
op4 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+3] + idx2 ).ubval[3];
op1 = TBOXE0(op1).uival;
op2 = TBOXE1(op2).uival;
op3 = TBOXE2(op3).uival;
op4 = TBOXE3(op4).uival;
x = mod4tx+40;
keyElem = tex1Dfetch(texDKey, x);
STAGEBLOCK2(tx).uival = op1^op2^op3^op4^keyElem;
__syncthreads();
//-------------------------------- end of 11th stage --------------------------------
//----------------------------------- 12th stage -----------------------------------
op1 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4] + idx2 ).ubval[0];
op2 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+1] + idx2 ).ubval[1];
op3 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+2] + idx2 ).ubval[2];
op4 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+3] + idx2 ).ubval[3];
op1 = TBOXE0(op1).uival;
op2 = TBOXE1(op2).uival;
op3 = TBOXE2(op3).uival;
op4 = TBOXE3(op4).uival;
x = mod4tx+44;
keyElem = tex1Dfetch(texDKey, x);
STAGEBLOCK1(tx).uival = op1^op2^op3^op4^keyElem;
__syncthreads();
//-------------------------------- end of 12th stage --------------------------------
//----------------------------------- 13th stage -----------------------------------
op1 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4] + idx2 ).ubval[0];
op2 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+1] + idx2 ).ubval[1];
op3 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+2] + idx2 ).ubval[2];
op4 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+3] + idx2 ).ubval[3];
op1 = TBOXE0(op1).uival;
op2 = TBOXE1(op2).uival;
op3 = TBOXE2(op3).uival;
op4 = TBOXE3(op4).uival;
x = mod4tx+48;
keyElem = tex1Dfetch(texDKey, x);
STAGEBLOCK2(tx).uival = op1^op2^op3^op4^keyElem;
__syncthreads();
//-------------------------------- end of 13th stage --------------------------------
//----------------------------------- 14th stage -----------------------------------
op1 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4] + idx2 ).ubval[0];
op2 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+1] + idx2 ).ubval[1];
op3 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+2] + idx2 ).ubval[2];
op4 = STAGEBLOCK2( posIdx_D[16 + mod4tx*4+3] + idx2 ).ubval[3];
op1 = TBOXE0(op1).uival;
op2 = TBOXE1(op2).uival;
op3 = TBOXE2(op3).uival;
op4 = TBOXE3(op4).uival;
x = mod4tx+52;
keyElem = tex1Dfetch(texDKey, x);
STAGEBLOCK1(tx).uival = op1^op2^op3^op4^keyElem;
__syncthreads();
//-------------------------------- end of 14th stage --------------------------------
//----------------------------------- 15th stage -----------------------------------
op1 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4] + idx2 ).ubval[0];
op2 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+1] + idx2 ).ubval[1];
op3 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+2] + idx2 ).ubval[2];
op4 = STAGEBLOCK1( posIdx_D[16 + mod4tx*4+3] + idx2 ).ubval[3];
x = mod4tx+56;
keyElem = tex1Dfetch(texDKey, x);
STAGEBLOCK2(tx).ubval[3] = INVSBOX(op4).ubval[0]^( keyElem>>24);
STAGEBLOCK2(tx).ubval[2] = INVSBOX(op3).ubval[0]^( keyElem>>16 & 0x000000FF);
STAGEBLOCK2(tx).ubval[1] = INVSBOX(op2).ubval[0]^( keyElem>>8 & 0x000000FF);
STAGEBLOCK2(tx).ubval[0] = INVSBOX(op1).ubval[0]^( keyElem & 0x000000FF);
__syncthreads();
//-------------------------------- end of 15th stage --------------------------------
result[BSIZE * bx + tx] = STAGEBLOCK2(tx).uival;
// end of AES
}
#endif // #ifndef _AESDECRYPT_KERNEL_H_
|
4c74f3068d1ad57e52953962ad87a34fa99df456.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@precisions normal z -> s d c
@author Mark Gates
*/
#include "common_magma.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to zlaset.
*/
__global__
void zgeadd_full(
int m, int n,
magmaDoubleComplex alpha,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb];
}
}
}
}
/**
Purpose
-------
ZGEADD adds two matrices, dB = alpha*dA + dB.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
alpha COMPLEX_16
The scalar alpha.
@param[in]
dA COMPLEX_16 array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in,out]
dB COMPLEX_16 array, dimension (LDDB,N)
The m by n matrix dB.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zgeadd_q(
magma_int_t m, magma_int_t n,
magmaDoubleComplex alpha,
const magmaDoubleComplex *dA, magma_int_t ldda,
magmaDoubleComplex *dB, magma_int_t lddb,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 )
return;
dim3 threads( BLK_X );
dim3 grid( (m + BLK_X - 1)/BLK_X, (n + BLK_Y - 1)/BLK_Y );
hipLaunchKernelGGL(( zgeadd_full), dim3(grid), dim3(threads), 0, queue ,
m, n, alpha, dA, ldda, dB, lddb );
}
/**
@see magmablas_zgeadd_q
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zgeadd(
magma_int_t m, magma_int_t n,
magmaDoubleComplex alpha,
const magmaDoubleComplex *dA, magma_int_t ldda,
magmaDoubleComplex *dB, magma_int_t lddb )
{
magmablas_zgeadd_q( m, n, alpha, dA, ldda, dB, lddb, magma_stream );
}
| 4c74f3068d1ad57e52953962ad87a34fa99df456.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@precisions normal z -> s d c
@author Mark Gates
*/
#include "common_magma.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to zlaset.
*/
__global__
void zgeadd_full(
int m, int n,
magmaDoubleComplex alpha,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb];
}
}
}
}
/**
Purpose
-------
ZGEADD adds two matrices, dB = alpha*dA + dB.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
alpha COMPLEX_16
The scalar alpha.
@param[in]
dA COMPLEX_16 array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in,out]
dB COMPLEX_16 array, dimension (LDDB,N)
The m by n matrix dB.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zgeadd_q(
magma_int_t m, magma_int_t n,
magmaDoubleComplex alpha,
const magmaDoubleComplex *dA, magma_int_t ldda,
magmaDoubleComplex *dB, magma_int_t lddb,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 )
return;
dim3 threads( BLK_X );
dim3 grid( (m + BLK_X - 1)/BLK_X, (n + BLK_Y - 1)/BLK_Y );
zgeadd_full<<< grid, threads, 0, queue >>>
( m, n, alpha, dA, ldda, dB, lddb );
}
/**
@see magmablas_zgeadd_q
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zgeadd(
magma_int_t m, magma_int_t n,
magmaDoubleComplex alpha,
const magmaDoubleComplex *dA, magma_int_t ldda,
magmaDoubleComplex *dB, magma_int_t lddb )
{
magmablas_zgeadd_q( m, n, alpha, dA, ldda, dB, lddb, magma_stream );
}
|
78ced4c99e433e7f18e5988551650c9747532768.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstddef>
#include <cstdint>
#include <limits>
#include <stdint.h>
#include <stdio.h>
#include <atomic>
#include <assert.h>
#if defined(GGML_USE_HIPBLAS)
#include <hip/hip_runtime.h>
#include <hipblas/hipblas.h>
#include <hip/hip_fp16.h>
#ifdef __HIP_PLATFORM_AMD__
// for rocblas_initialize()
#include "rocblas/rocblas.h"
#endif
#define CUBLAS_COMPUTE_32F HIPBLAS_R_32F
#define CUBLAS_COMPUTE_32F_FAST_16F HIPBLAS_R_32F
#define HIPBLAS_GEMM_DEFAULT HIPBLAS_GEMM_DEFAULT
#define HIPBLAS_OP_N HIPBLAS_OP_N
#define HIPBLAS_OP_T HIPBLAS_OP_T
#define HIPBLAS_STATUS_SUCCESS HIPBLAS_STATUS_SUCCESS
#define CUBLAS_TF32_TENSOR_OP_MATH 0
#define HIP_R_16F HIPBLAS_R_16F
#define HIP_R_32F HIPBLAS_R_32F
#define __shfl_xor_sync(mask, var, laneMask, width) __shfl_xor(var, laneMask, width)
#define hipblasCreate hipblasCreate
#define hipblasGemmEx hipblasGemmEx
#define hipblasHandle_t hipblasHandle_t
#define cublasSetMathMode(handle, mode) HIPBLAS_STATUS_SUCCESS
#define hipblasSetStream hipblasSetStream
#define hipblasSgemm hipblasSgemm
#define hipblasStatus_t hipblasStatus_t
#define hipDeviceProp_t hipDeviceProp_t
#define hipDeviceSynchronize hipDeviceSynchronize
#define hipError_t hipError_t
#define hipEventCreateWithFlags hipEventCreateWithFlags
#define hipEventDisableTiming hipEventDisableTiming
#define hipEventRecord hipEventRecord
#define hipEvent_t hipEvent_t
#define hipEventDestroy hipEventDestroy
#define hipFree hipFree
#define hipHostFree hipHostFree
#define hipGetDevice hipGetDevice
#define hipGetDeviceCount hipGetDeviceCount
#define hipGetDeviceProperties hipGetDeviceProperties
#define hipGetErrorString hipGetErrorString
#define hipGetLastError hipGetLastError
#define hipMalloc hipMalloc
#define hipHostMalloc(ptr, size) hipHostMalloc(ptr, size, hipHostMallocDefault)
#define hipMemcpy hipMemcpy
#define hipMemcpy2DAsync hipMemcpy2DAsync
#define hipMemcpyAsync hipMemcpyAsync
#define hipMemcpyDeviceToDevice hipMemcpyDeviceToDevice
#define hipMemcpyDeviceToHost hipMemcpyDeviceToHost
#define hipMemcpyHostToDevice hipMemcpyHostToDevice
#define hipMemcpyKind hipMemcpyKind
#define hipMemset hipMemset
#define hipOccupancyMaxPotentialBlockSize hipOccupancyMaxPotentialBlockSize
#define hipSetDevice hipSetDevice
#define hipStreamCreateWithFlags hipStreamCreateWithFlags
#define hipStreamNonBlocking hipStreamNonBlocking
#define hipStreamSynchronize hipStreamSynchronize
#define hipStreamWaitEvent(stream, event) hipStreamWaitEvent(stream, event, 0)
#define hipStream_t hipStream_t
#define hipSuccess hipSuccess
#else
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <hip/hip_fp16.h>
#endif
#include "ggml-cuda.h"
#include "ggml.h"
#define MIN_CC_DP4A 610 // minimum compute capability for __dp4a, an intrinsic for byte-wise dot products
#ifndef CC_TURING
#define CC_TURING 700
#endif
#if defined(GGML_USE_HIPBLAS)
#define __CUDA_ARCH__ 1300
typedef int8_t int8x4_t __attribute__((ext_vector_type(4)));
static __device__ __forceinline__ int __vsubss4(const int a, const int b) {
const int8x4_t va = reinterpret_cast<const int8x4_t&>(a);
const int8x4_t vb = reinterpret_cast<const int8x4_t&>(b);
const int8x4_t c = __builtin_elementwise_sub_sat(va, vb);
return reinterpret_cast<const int&>(c);
}
static __device__ __forceinline__ int __dp4a(const int a, const int b, int c) {
#if defined(__gfx906__) || defined(__gfx908__) || defined(__gfx90a__) || defined(__gfx1030__)
c = __builtin_amdgcn_sdot4(a, b, c, false);
#elif defined(__gfx1100__)
c = __builtin_amdgcn_sudot4( true, a, true, b, c, false);
#elif defined(__gfx1010__) || defined(__gfx900__)
int tmp1;
int tmp2;
asm("\n \
v_mul_i32_i24 %1, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0 \n \
v_mul_i32_i24 %2, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1 \n \
v_add3_u32 %0, %1, %2, %0 \n \
v_mul_i32_i24 %1, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2 \n \
v_mul_i32_i24 %2, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3 \n \
v_add3_u32 %0, %1, %2, %0 \n \
"
: "+v"(c), "=&v"(tmp1), "=&v"(tmp2)
: "v"(a), "v"(b)
);
#else
const int8x4_t va = reinterpret_cast<const int8x4_t&>(a);
const int8x4_t vb = reinterpret_cast<const int8x4_t&>(b);
c += va[0] * vb[0] + va[1] * vb[1] + va[2] * vb[2] + va[3] * vb[3];
#endif
return c;
}
#endif
#if defined(_MSC_VER)
#pragma warning(disable: 4244 4267) // possible loss of data
#endif
static_assert(sizeof(half) == sizeof(ggml_fp16_t), "wrong fp16 size");
#define CUDA_CHECK(err) \
do { \
hipError_t err_ = (err); \
if (err_ != hipSuccess) { \
fprintf(stderr, "CUDA error %d at %s:%d: %s\n", err_, __FILE__, __LINE__, \
hipGetErrorString(err_)); \
exit(1); \
} \
} while (0)
#if CUDART_VERSION >= 12000
#define CUBLAS_CHECK(err) \
do { \
hipblasStatus_t err_ = (err); \
if (err_ != HIPBLAS_STATUS_SUCCESS) { \
fprintf(stderr, "\ncuBLAS error %d at %s:%d: %s\n", \
err_, __FILE__, __LINE__, cublasGetStatusString(err_)); \
exit(1); \
} \
} while (0)
#else
#define CUBLAS_CHECK(err) \
do { \
hipblasStatus_t err_ = (err); \
if (err_ != HIPBLAS_STATUS_SUCCESS) { \
fprintf(stderr, "\ncuBLAS error %d at %s:%d\n", err_, __FILE__, __LINE__); \
exit(1); \
} \
} while (0)
#endif // CUDART_VERSION >= 11
#ifdef GGML_CUDA_F16
typedef half dfloat; // dequantize float
typedef half2 dfloat2;
#else
typedef float dfloat; // dequantize float
typedef float2 dfloat2;
#endif //GGML_CUDA_F16
static __device__ __forceinline__ int get_int_from_int8(const int8_t * x8, const int & i32) {
const uint16_t * x16 = (uint16_t *) (x8 + sizeof(int) * i32); // assume at least 2 byte alignment
int x32 = 0;
x32 |= x16[0] << 0;
x32 |= x16[1] << 16;
return x32;
}
static __device__ __forceinline__ int get_int_from_uint8(const uint8_t * x8, const int & i32) {
const uint16_t * x16 = (uint16_t *) (x8 + sizeof(int) * i32); // assume at least 2 byte alignment
int x32 = 0;
x32 |= x16[0] << 0;
x32 |= x16[1] << 16;
return x32;
}
static __device__ __forceinline__ int get_int_from_int8_aligned(const int8_t * x8, const int & i32) {
return *((int *) (x8 + sizeof(int) * i32)); // assume at least 4 byte alignment
}
static __device__ __forceinline__ int get_int_from_uint8_aligned(const uint8_t * x8, const int & i32) {
return *((int *) (x8 + sizeof(int) * i32)); // assume at least 4 byte alignment
}
typedef void (*dequantize_kernel_t)(const void * vx, const int ib, const int iqs, dfloat2 & v);
typedef void (*to_fp32_cuda_t)(const void * __restrict__ x, float * __restrict__ y, int k, hipStream_t stream);
typedef void (*dot_kernel_k_t)(const void * __restrict__ vx, const int ib, const int iqs, const float * __restrict__ y, float & v);
typedef void (*cpy_kernel_t)(const char * cx, char * cdst);
typedef void (*ggml_cuda_func_t)(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst);
typedef void (*ggml_cuda_op_t)(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i, float * src0_ddf_i,
float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
hipStream_t & cudaStream_main);
// QK = number of values after dequantization
// QR = QK / number of values before dequantization
// QI = number of 32 bit integers before dequantization
#define QK4_0 32
#define QR4_0 2
#define QI4_0 (QK4_0 / (4 * QR4_0))
typedef struct {
half d; // delta
uint8_t qs[QK4_0 / 2]; // nibbles / quants
} block_q4_0;
static_assert(sizeof(block_q4_0) == sizeof(ggml_fp16_t) + QK4_0 / 2, "wrong q4_0 block size/padding");
#define QK4_1 32
#define QR4_1 2
#define QI4_1 (QK4_1 / (4 * QR4_1))
typedef struct {
half2 dm; // dm.x = delta, dm.y = min
uint8_t qs[QK4_1 / 2]; // nibbles / quants
} block_q4_1;
static_assert(sizeof(block_q4_1) == sizeof(ggml_fp16_t) * 2 + QK4_1 / 2, "wrong q4_1 block size/padding");
#define QK5_0 32
#define QR5_0 2
#define QI5_0 (QK5_0 / (4 * QR5_0))
typedef struct {
half d; // delta
uint8_t qh[4]; // 5-th bit of quants
uint8_t qs[QK5_0 / 2]; // nibbles / quants
} block_q5_0;
static_assert(sizeof(block_q5_0) == sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding");
#define QK5_1 32
#define QR5_1 2
#define QI5_1 (QK5_1 / (4 * QR5_1))
typedef struct {
half2 dm; // dm.x = delta, dm.y = min
uint8_t qh[4]; // 5-th bit of quants
uint8_t qs[QK5_1 / 2]; // nibbles / quants
} block_q5_1;
static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_1 / 2, "wrong q5_1 block size/padding");
#define QK8_0 32
#define QR8_0 1
#define QI8_0 (QK8_0 / (4 * QR8_0))
typedef struct {
half d; // delta
int8_t qs[QK8_0]; // quants
} block_q8_0;
static_assert(sizeof(block_q8_0) == sizeof(ggml_fp16_t) + QK8_0, "wrong q8_0 block size/padding");
#define QK8_1 32
#define QR8_1 1
#define QI8_1 (QK8_1 / (4 * QR8_1))
typedef struct {
half2 ds; // ds.x = delta, ds.y = sum
int8_t qs[QK8_0]; // quants
} block_q8_1;
static_assert(sizeof(block_q8_1) == 2*sizeof(ggml_fp16_t) + QK8_0, "wrong q8_1 block size/padding");
typedef float (*vec_dot_q_cuda_t)(const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs);
typedef void (*allocate_tiles_cuda_t)(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc);
typedef void (*load_tiles_cuda_t)(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row);
typedef float (*vec_dot_q_mul_mat_cuda_t)(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ms, const int & i, const int & j, const int & k);
//================================= k-quants
#ifdef GGML_QKK_64
#define QK_K 64
#define K_SCALE_SIZE 4
#else
#define QK_K 256
#define K_SCALE_SIZE 12
#endif
#define QR2_K 4
#define QI2_K (QK_K / (4*QR2_K))
typedef struct {
uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits
uint8_t qs[QK_K/4]; // quants
half2 dm; // super-block scale for quantized scales/mins
} block_q2_K;
static_assert(sizeof(block_q2_K) == 2*sizeof(ggml_fp16_t) + QK_K/16 + QK_K/4, "wrong q2_K block size/padding");
#define QR3_K 4
#define QI3_K (QK_K / (4*QR3_K))
typedef struct {
uint8_t hmask[QK_K/8]; // quants - high bit
uint8_t qs[QK_K/4]; // quants - low 2 bits
#ifdef GGML_QKK_64
uint8_t scales[2]; // scales, quantized with 8 bits
#else
uint8_t scales[K_SCALE_SIZE]; // scales, quantized with 6 bits
#endif
half d; // super-block scale
} block_q3_K;
//static_assert(sizeof(block_q3_K) == sizeof(ggml_fp16_t) + QK_K / 4 + QK_K / 8 + K_SCALE_SIZE, "wrong q3_K block size/padding");
#define QR4_K 2
#define QI4_K (QK_K / (4*QR4_K))
#ifdef GGML_QKK_64
typedef struct {
half dm[2]; // super-block scales/mins
uint8_t scales[2]; // 4-bit block scales/mins
uint8_t qs[QK_K/2]; // 4--bit quants
} block_q4_K;
static_assert(sizeof(block_q4_K) == sizeof(half2) + QK_K/2 + 2, "wrong q4_K block size/padding");
#else
typedef struct {
half2 dm; // super-block scale for quantized scales/mins
uint8_t scales[3*QK_K/64]; // scales, quantized with 6 bits
uint8_t qs[QK_K/2]; // 4--bit quants
} block_q4_K;
static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_fp16_t) + 3*QK_K/64 + QK_K/2, "wrong q4_K block size/padding");
#endif
#define QR5_K 2
#define QI5_K (QK_K / (4*QR5_K))
#ifdef GGML_QKK_64
typedef struct {
half d; // super-block scale
int8_t scales[QK_K/16]; // block scales
uint8_t qh[QK_K/8]; // quants, high bit
uint8_t qs[QK_K/2]; // quants, low 4 bits
} block_q5_K;
static_assert(sizeof(block_q5_K) == sizeof(ggml_fp16_t) + QK_K/2 + QK_K/8 + QK_K/16, "wrong q5_K block size/padding");
#else
typedef struct {
half2 dm; // super-block scale for quantized scales/mins
uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits
uint8_t qh[QK_K/8]; // quants, high bit
uint8_t qs[QK_K/2]; // quants, low 4 bits
} block_q5_K;
static_assert(sizeof(block_q5_K) == 2*sizeof(ggml_fp16_t) + K_SCALE_SIZE + QK_K/2 + QK_K/8, "wrong q5_K block size/padding");
#endif
#define QR6_K 2
#define QI6_K (QK_K / (4*QR6_K))
typedef struct {
uint8_t ql[QK_K/2]; // quants, lower 4 bits
uint8_t qh[QK_K/4]; // quants, upper 2 bits
int8_t scales[QK_K/16]; // scales
half d; // delta
} block_q6_K;
static_assert(sizeof(block_q6_K) == sizeof(ggml_fp16_t) + 13*QK_K/16, "wrong q6_K block size/padding");
#define WARP_SIZE 32
#define MATRIX_ROW_PADDING 512 // last row of quant. matrices is a multiple of this to avoid out-of-bounds memory accesses
#define CUDA_ADD_BLOCK_SIZE 256
#define CUDA_MUL_BLOCK_SIZE 256
#define CUDA_GELU_BLOCK_SIZE 256
#define CUDA_SILU_BLOCK_SIZE 256
#define CUDA_CPY_BLOCK_SIZE 32
#define CUDA_SCALE_BLOCK_SIZE 256
#define CUDA_ROPE_BLOCK_SIZE 256
#define CUDA_ALIBI_BLOCK_SIZE 32
#define CUDA_DIAG_MASK_INF_BLOCK_SIZE 32
#define CUDA_QUANTIZE_BLOCK_SIZE 256
#define CUDA_DEQUANTIZE_BLOCK_SIZE 256
// dmmv = dequantize_mul_mat_vec
#ifndef GGML_CUDA_DMMV_X
#define GGML_CUDA_DMMV_X 32
#endif
#ifndef GGML_CUDA_MMV_Y
#define GGML_CUDA_MMV_Y 1
#endif
#ifndef K_QUANTS_PER_ITERATION
#define K_QUANTS_PER_ITERATION 2
#else
static_assert(K_QUANTS_PER_ITERATION == 1 || K_QUANTS_PER_ITERATION == 2, "K_QUANTS_PER_ITERATION must be 1 or 2");
#endif
struct ggml_tensor_extra_gpu {
void * data_device[GGML_CUDA_MAX_DEVICES]; // 1 pointer for each device for split tensors
hipEvent_t events[GGML_CUDA_MAX_DEVICES]; // events for synchronizing multiple GPUs
};
static int g_device_count = -1;
static int g_main_device = 0;
static int g_compute_capabilities[GGML_CUDA_MAX_DEVICES];
static float g_tensor_split[GGML_CUDA_MAX_DEVICES] = {0};
static bool g_mul_mat_q = true;
static void * g_scratch_buffer = nullptr;
static size_t g_scratch_size = 1024*1024*1024; // 1 GB by default
static size_t g_scratch_offset = 0;
static hipblasHandle_t g_cublas_handles[GGML_CUDA_MAX_DEVICES] = {nullptr};
static hipStream_t g_cudaStreams_main[GGML_CUDA_MAX_DEVICES] = { nullptr };
static __global__ void add_f32(const float * x, const float * y, float * dst, const int kx, const int ky) {
const int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i >= kx) {
return;
}
dst[i] = x[i] + y[i%ky];
}
static __global__ void add_f16_f32_f16(const half * x, const float * y, half * dst, const int k) {
const int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i >= k) {
return;
}
dst[i] = __hadd(x[i], __float2half(y[i]));
}
static __global__ void mul_f32(const float * x, const float * y, float * dst, const int kx, const int ky) {
const int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i >= kx) {
return;
}
dst[i] = x[i] * y[i%ky];
}
static __global__ void gelu_f32(const float * x, float * dst, const int k) {
const float GELU_COEF_A = 0.044715f;
const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f;
const int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i >= k) {
return;
}
float xi = x[i];
dst[i] = 0.5f*xi*(1.0f + tanhf(SQRT_2_OVER_PI*xi*(1.0f + GELU_COEF_A*xi*xi)));
}
static __global__ void silu_f32(const float * x, float * dst, const int k) {
const int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i >= k) {
return;
}
dst[i] = x[i] / (1.0f + expf(-x[i]));
}
static __global__ void norm_f32(const float * x, float * dst, const int ncols) {
const int row = blockIdx.x*blockDim.y + threadIdx.y;
const int tid = threadIdx.x;
const float eps = 1e-5f;
float mean = 0.0f;
float var = 0.0f;
for (int col = tid; col < ncols; col += WARP_SIZE) {
const float xi = x[row*ncols + col];
mean += xi;
var += xi * xi;
}
// sum up partial sums
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
mean += __shfl_xor_sync(0xffffffff, mean, mask, 32);
var += __shfl_xor_sync(0xffffffff, var, mask, 32);
}
mean /= ncols;
var = var / ncols - mean * mean;
const float inv_var = rsqrtf(var + eps);
for (int col = tid; col < ncols; col += WARP_SIZE) {
dst[row*ncols + col] = (x[row*ncols + col] - mean) * inv_var;
}
}
static __global__ void rms_norm_f32(const float * x, float * dst, const int ncols, const float eps) {
const int row = blockIdx.x*blockDim.y + threadIdx.y;
const int tid = threadIdx.x;
float tmp = 0.0f; // partial sum for thread in warp
for (int col = tid; col < ncols; col += WARP_SIZE) {
const float xi = x[row*ncols + col];
tmp += xi * xi;
}
// sum up partial sums
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
}
const float mean = tmp / ncols;
const float scale = rsqrtf(mean + eps);
for (int col = tid; col < ncols; col += WARP_SIZE) {
dst[row*ncols + col] = scale * x[row*ncols + col];
}
}
static __device__ __forceinline__ void dequantize_q4_0(const void * vx, const int ib, const int iqs, dfloat2 & v){
const block_q4_0 * x = (const block_q4_0 *) vx;
const dfloat d = x[ib].d;
const int vui = x[ib].qs[iqs];
v.x = vui & 0xF;
v.y = vui >> 4;
#ifdef GGML_CUDA_F16
v = __hsub2(v, {8.0f, 8.0f});
v = __hmul2(v, {d, d});
#else
v.x = (v.x - 8.0f) * d;
v.y = (v.y - 8.0f) * d;
#endif // GGML_CUDA_F16
}
static __device__ __forceinline__ void dequantize_q4_1(const void * vx, const int ib, const int iqs, dfloat2 & v){
const block_q4_1 * x = (const block_q4_1 *) vx;
const dfloat d = __low2half(x[ib].dm);
const dfloat m = __high2half(x[ib].dm);
const int vui = x[ib].qs[iqs];
v.x = vui & 0xF;
v.y = vui >> 4;
#ifdef GGML_CUDA_F16
v = __hmul2(v, {d, d});
v = __hadd2(v, {m, m});
#else
v.x = (v.x * d) + m;
v.y = (v.y * d) + m;
#endif // GGML_CUDA_F16
}
static __device__ __forceinline__ void dequantize_q5_0(const void * vx, const int ib, const int iqs, dfloat2 & v){
const block_q5_0 * x = (const block_q5_0 *) vx;
const dfloat d = x[ib].d;
uint32_t qh;
memcpy(&qh, x[ib].qh, sizeof(qh));
const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10;
const int xh_1 = ((qh >> (iqs + 12)) ) & 0x10;
v.x = ((x[ib].qs[iqs] & 0xf) | xh_0);
v.y = ((x[ib].qs[iqs] >> 4) | xh_1);
#ifdef GGML_CUDA_F16
v = __hsub2(v, {16.0f, 16.0f});
v = __hmul2(v, {d, d});
#else
v.x = (v.x - 16.0f) * d;
v.y = (v.y - 16.0f) * d;
#endif // GGML_CUDA_F16
}
static __device__ __forceinline__ void dequantize_q5_1(const void * vx, const int ib, const int iqs, dfloat2 & v){
const block_q5_1 * x = (const block_q5_1 *) vx;
const dfloat d = __low2half(x[ib].dm);
const dfloat m = __high2half(x[ib].dm);
uint32_t qh;
memcpy(&qh, x[ib].qh, sizeof(qh));
const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10;
const int xh_1 = ((qh >> (iqs + 12)) ) & 0x10;
v.x = ((x[ib].qs[iqs] & 0xf) | xh_0);
v.y = ((x[ib].qs[iqs] >> 4) | xh_1);
#ifdef GGML_CUDA_F16
v = __hmul2(v, {d, d});
v = __hadd2(v, {m, m});
#else
v.x = (v.x * d) + m;
v.y = (v.y * d) + m;
#endif // GGML_CUDA_F16
}
static __device__ __forceinline__ void dequantize_q8_0(const void * vx, const int ib, const int iqs, dfloat2 & v){
const block_q8_0 * x = (const block_q8_0 *) vx;
const dfloat d = x[ib].d;
v.x = x[ib].qs[iqs + 0];
v.y = x[ib].qs[iqs + 1];
#ifdef GGML_CUDA_F16
v = __hmul2(v, {d, d});
#else
v.x *= d;
v.y *= d;
#endif // GGML_CUDA_F16
}
//================================== k-quants
static __global__ void dequantize_block_q2_K(const void * __restrict__ vx, float * __restrict__ yy) {
const int i = blockIdx.x;
const block_q2_K * x = (const block_q2_K *) vx;
const int tid = threadIdx.x;
#if QK_K == 256
const int n = tid/32;
const int l = tid - 32*n;
const int is = 8*n + l/16;
const uint8_t q = x[i].qs[32*n + l];
float * y = yy + i*QK_K + 128*n;
float dall = __low2half(x[i].dm);
float dmin = __high2half(x[i].dm);
y[l+ 0] = dall * (x[i].scales[is+0] & 0xF) * ((q >> 0) & 3) - dmin * (x[i].scales[is+0] >> 4);
y[l+32] = dall * (x[i].scales[is+2] & 0xF) * ((q >> 2) & 3) - dmin * (x[i].scales[is+2] >> 4);
y[l+64] = dall * (x[i].scales[is+4] & 0xF) * ((q >> 4) & 3) - dmin * (x[i].scales[is+4] >> 4);
y[l+96] = dall * (x[i].scales[is+6] & 0xF) * ((q >> 6) & 3) - dmin * (x[i].scales[is+6] >> 4);
#else
const int is = tid/16; // 0 or 1
const int il = tid%16; // 0...15
const uint8_t q = x[i].qs[il] >> (2*is);
float * y = yy + i*QK_K + 16*is + il;
float dall = __low2half(x[i].dm);
float dmin = __high2half(x[i].dm);
y[ 0] = dall * (x[i].scales[is+0] & 0xF) * ((q >> 0) & 3) - dmin * (x[i].scales[is+0] >> 4);
y[32] = dall * (x[i].scales[is+2] & 0xF) * ((q >> 4) & 3) - dmin * (x[i].scales[is+2] >> 4);
#endif
}
static __global__ void dequantize_block_q3_K(const void * __restrict__ vx, float * __restrict__ yy) {
const int i = blockIdx.x;
const block_q3_K * x = (const block_q3_K *) vx;
#if QK_K == 256
const int r = threadIdx.x/4;
const int tid = r/2;
const int is0 = r%2;
const int l0 = 16*is0 + 4*(threadIdx.x%4);
const int n = tid / 4;
const int j = tid - 4*n;
uint8_t m = 1 << (4*n + j);
int is = 8*n + 2*j + is0;
int shift = 2*j;
int8_t us = is < 4 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+8] >> 0) & 3) << 4) :
is < 8 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+4] >> 2) & 3) << 4) :
is < 12 ? (x[i].scales[is-8] >> 4) | (((x[i].scales[is+0] >> 4) & 3) << 4) :
(x[i].scales[is-8] >> 4) | (((x[i].scales[is-4] >> 6) & 3) << 4);
float d_all = x[i].d;
float dl = d_all * (us - 32);
float * y = yy + i*QK_K + 128*n + 32*j;
const uint8_t * q = x[i].qs + 32*n;
const uint8_t * hm = x[i].hmask;
for (int l = l0; l < l0+4; ++l) y[l] = dl * ((int8_t)((q[l] >> shift) & 3) - ((hm[l] & m) ? 0 : 4));
#else
const int tid = threadIdx.x;
const int is = tid/16; // 0 or 1
const int il = tid%16; // 0...15
const int im = il/8; // 0...1
const int in = il%8; // 0...7
float * y = yy + i*QK_K + 16*is + il;
const uint8_t q = x[i].qs[il] >> (2*is);
const uint8_t h = x[i].hmask[in] >> (2*is + im);
const float d = (float)x[i].d;
if (is == 0) {
y[ 0] = d * ((x[i].scales[0] & 0xF) - 8) * ((int8_t)((q >> 0) & 3) - ((h >> 0) & 1 ? 0 : 4));
y[32] = d * ((x[i].scales[1] & 0xF) - 8) * ((int8_t)((q >> 4) & 3) - ((h >> 4) & 1 ? 0 : 4));
} else {
y[ 0] = d * ((x[i].scales[0] >> 4) - 8) * ((int8_t)((q >> 0) & 3) - ((h >> 0) & 1 ? 0 : 4));
y[32] = d * ((x[i].scales[1] >> 4) - 8) * ((int8_t)((q >> 4) & 3) - ((h >> 4) & 1 ? 0 : 4));
}
#endif
}
#if QK_K == 256
static inline __device__ void get_scale_min_k4(int j, const uint8_t * q, uint8_t & d, uint8_t & m) {
if (j < 4) {
d = q[j] & 63; m = q[j + 4] & 63;
} else {
d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4);
m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4);
}
}
#endif
static __global__ void dequantize_block_q4_K(const void * __restrict__ vx, float * __restrict__ yy) {
const block_q4_K * x = (const block_q4_K *) vx;
const int i = blockIdx.x;
#if QK_K == 256
// assume 32 threads
const int tid = threadIdx.x;
const int il = tid/8;
const int ir = tid%8;
const int is = 2*il;
const int n = 4;
float * y = yy + i*QK_K + 64*il + n*ir;
const float dall = __low2half(x[i].dm);
const float dmin = __high2half(x[i].dm);
const uint8_t * q = x[i].qs + 32*il + n*ir;
uint8_t sc, m;
get_scale_min_k4(is + 0, x[i].scales, sc, m);
const float d1 = dall * sc; const float m1 = dmin * m;
get_scale_min_k4(is + 1, x[i].scales, sc, m);
const float d2 = dall * sc; const float m2 = dmin * m;
for (int l = 0; l < n; ++l) {
y[l + 0] = d1 * (q[l] & 0xF) - m1;
y[l +32] = d2 * (q[l] >> 4) - m2;
}
#else
const int tid = threadIdx.x;
const uint8_t * q = x[i].qs;
float * y = yy + i*QK_K;
const float d = (float)x[i].dm[0];
const float m = (float)x[i].dm[1];
y[tid+ 0] = d * (x[i].scales[0] & 0xF) * (q[tid] & 0xF) - m * (x[i].scales[0] >> 4);
y[tid+32] = d * (x[i].scales[1] & 0xF) * (q[tid] >> 4) - m * (x[i].scales[1] >> 4);
#endif
}
static __global__ void dequantize_block_q5_K(const void * __restrict__ vx, float * __restrict__ yy) {
const block_q5_K * x = (const block_q5_K *) vx;
const int i = blockIdx.x;
#if QK_K == 256
// assume 64 threads - this is very slightly better than the one below
const int tid = threadIdx.x;
const int il = tid/16; // il is in 0...3
const int ir = tid%16; // ir is in 0...15
const int is = 2*il; // is is in 0...6
float * y = yy + i*QK_K + 64*il + 2*ir;
const float dall = __low2half(x[i].dm);
const float dmin = __high2half(x[i].dm);
const uint8_t * ql = x[i].qs + 32*il + 2*ir;
const uint8_t * qh = x[i].qh + 2*ir;
uint8_t sc, m;
get_scale_min_k4(is + 0, x[i].scales, sc, m);
const float d1 = dall * sc; const float m1 = dmin * m;
get_scale_min_k4(is + 1, x[i].scales, sc, m);
const float d2 = dall * sc; const float m2 = dmin * m;
uint8_t hm = 1 << (2*il);
y[ 0] = d1 * ((ql[ 0] & 0xF) + (qh[ 0] & hm ? 16 : 0)) - m1;
y[ 1] = d1 * ((ql[ 1] & 0xF) + (qh[ 1] & hm ? 16 : 0)) - m1;
hm <<= 1;
y[32] = d2 * ((ql[ 0] >> 4) + (qh[ 0] & hm ? 16 : 0)) - m2;
y[33] = d2 * ((ql[ 1] >> 4) + (qh[ 1] & hm ? 16 : 0)) - m2;
#else
const int tid = threadIdx.x;
const uint8_t q = x[i].qs[tid];
const int im = tid/8; // 0...3
const int in = tid%8; // 0...7
const int is = tid/16; // 0 or 1
const uint8_t h = x[i].qh[in] >> im;
const float d = x[i].d;
float * y = yy + i*QK_K + tid;
y[ 0] = d * x[i].scales[is+0] * ((q & 0xF) - ((h >> 0) & 1 ? 0 : 16));
y[32] = d * x[i].scales[is+2] * ((q >> 4) - ((h >> 4) & 1 ? 0 : 16));
#endif
}
static __global__ void dequantize_block_q6_K(const void * __restrict__ vx, float * __restrict__ yy) {
const block_q6_K * x = (const block_q6_K *) vx;
const int i = blockIdx.x;
#if QK_K == 256
// assume 64 threads - this is very slightly better than the one below
const int tid = threadIdx.x;
const int ip = tid/32; // ip is 0 or 1
const int il = tid - 32*ip; // 0...32
const int is = 8*ip + il/16;
float * y = yy + i*QK_K + 128*ip + il;
const float d = x[i].d;
const uint8_t * ql = x[i].ql + 64*ip + il;
const uint8_t qh = x[i].qh[32*ip + il];
const int8_t * sc = x[i].scales + is;
y[ 0] = d * sc[0] * ((int8_t)((ql[ 0] & 0xF) | (((qh >> 0) & 3) << 4)) - 32);
y[32] = d * sc[2] * ((int8_t)((ql[32] & 0xF) | (((qh >> 2) & 3) << 4)) - 32);
y[64] = d * sc[4] * ((int8_t)((ql[ 0] >> 4) | (((qh >> 4) & 3) << 4)) - 32);
y[96] = d * sc[6] * ((int8_t)((ql[32] >> 4) | (((qh >> 6) & 3) << 4)) - 32);
#else
// assume 32 threads
const int tid = threadIdx.x;
const int ip = tid/16; // 0 or 1
const int il = tid - 16*ip; // 0...15
float * y = yy + i*QK_K + 16*ip + il;
const float d = x[i].d;
const uint8_t ql = x[i].ql[16*ip + il];
const uint8_t qh = x[i].qh[il] >> (2*ip);
const int8_t * sc = x[i].scales;
y[ 0] = d * sc[ip+0] * ((int8_t)((ql & 0xF) | (((qh >> 0) & 3) << 4)) - 32);
y[32] = d * sc[ip+2] * ((int8_t)((ql >> 4) | (((qh >> 4) & 3) << 4)) - 32);
#endif
}
static __global__ void dequantize_mul_mat_vec_q2_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) {
static_assert(16%K_QUANTS_PER_ITERATION == 0, "16 must be divisible by K_QUANTS_PER_ITERATION");
const int row = blockIdx.y*blockDim.y + threadIdx.y;
if (row > nrows) return;
const int num_blocks_per_row = ncols / QK_K;
const int ib0 = row*num_blocks_per_row;
const block_q2_K * x = (const block_q2_K *)vx + ib0;
float tmp = 0; // partial sum for thread in warp
#if QK_K == 256
const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...15
const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0,1
const int step = 16/K_QUANTS_PER_ITERATION;
const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
const int in = tid - step*im; // 0...15 or 0...7
const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15 or 0...14 in steps of 2
const int q_offset = 32*im + l0;
const int s_offset = 8*im;
const int y_offset = 128*im + l0;
uint32_t aux[4];
const uint8_t * d = (const uint8_t *)aux;
const uint8_t * m = (const uint8_t *)(aux + 2);
for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
const float * y = yy + i * QK_K + y_offset;
const uint8_t * q = x[i].qs + q_offset;
const float dall = __low2half(x[i].dm);
const float dmin = __high2half(x[i].dm);
const uint32_t * a = (const uint32_t *)(x[i].scales + s_offset);
aux[0] = a[0] & 0x0f0f0f0f;
aux[1] = a[1] & 0x0f0f0f0f;
aux[2] = (a[0] >> 4) & 0x0f0f0f0f;
aux[3] = (a[1] >> 4) & 0x0f0f0f0f;
float sum1 = 0, sum2 = 0;
for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) {
sum1 += y[l+ 0] * d[0] * ((q[l+ 0] >> 0) & 3)
+ y[l+32] * d[2] * ((q[l+ 0] >> 2) & 3)
+ y[l+64] * d[4] * ((q[l+ 0] >> 4) & 3)
+ y[l+96] * d[6] * ((q[l+ 0] >> 6) & 3)
+ y[l+16] * d[1] * ((q[l+16] >> 0) & 3)
+ y[l+48] * d[3] * ((q[l+16] >> 2) & 3)
+ y[l+80] * d[5] * ((q[l+16] >> 4) & 3)
+y[l+112] * d[7] * ((q[l+16] >> 6) & 3);
sum2 += y[l+ 0] * m[0] + y[l+32] * m[2] + y[l+64] * m[4] + y[ l+96] * m[6]
+ y[l+16] * m[1] + y[l+48] * m[3] + y[l+80] * m[5] + y[l+112] * m[7];
}
tmp += dall * sum1 - dmin * sum2;
}
#else
const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15 or 0...7
const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); // 0....1 or 0...3
const int offset = tid * K_QUANTS_PER_ITERATION;
uint32_t uaux[2];
const uint8_t * d = (const uint8_t *)uaux;
for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) {
const float * y = yy + i * QK_K + offset;
const uint8_t * q = x[i].qs + offset;
const uint32_t * s = (const uint32_t *)x[i].scales;
uaux[0] = s[0] & 0x0f0f0f0f;
uaux[1] = (s[0] >> 4) & 0x0f0f0f0f;
const float2 dall = __half22float2(x[i].dm);
float sum1 = 0, sum2 = 0;
for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) {
const uint8_t ql = q[l];
sum1 += y[l+ 0] * d[0] * ((ql >> 0) & 3)
+ y[l+16] * d[1] * ((ql >> 2) & 3)
+ y[l+32] * d[2] * ((ql >> 4) & 3)
+ y[l+48] * d[3] * ((ql >> 6) & 3);
sum2 += y[l+0] * d[4] + y[l+16] * d[5] + y[l+32] * d[6] + y[l+48] * d[7];
}
tmp += dall.x * sum1 - dall.y * sum2;
}
#endif
// sum up partial sums and write back result
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
}
if (threadIdx.x == 0) {
dst[row] = tmp;
}
}
static __global__ void dequantize_mul_mat_vec_q3_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) {
const int row = blockIdx.y*blockDim.y + threadIdx.y;
if (row > nrows) return;
const int num_blocks_per_row = ncols / QK_K;
const int ib0 = row*num_blocks_per_row;
const block_q3_K * x = (const block_q3_K *)vx + ib0;
float tmp = 0; // partial sum for thread in warp
#if QK_K == 256
const uint16_t kmask1 = 0x0303;
const uint16_t kmask2 = 0x0f0f;
const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16
const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0,1
const int n = K_QUANTS_PER_ITERATION; // iterations in the inner loop
const int step = 16/K_QUANTS_PER_ITERATION;
const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
const int in = tid - step*im; // 0....15 or 0...7
const uint8_t m = 1 << (4*im);
const int l0 = n*in; // 0...15 or 0...14 in steps of 2
const int q_offset = 32*im + l0;
const int y_offset = 128*im + l0;
uint16_t utmp[4];
const int8_t * s = (const int8_t *)utmp;
const uint16_t s_shift = 4*im;
for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
const float * y = yy + i * QK_K + y_offset;
const uint8_t * q = x[i].qs + q_offset;
const uint8_t * h = x[i].hmask + l0;
const uint16_t * a = (const uint16_t *)x[i].scales;
utmp[0] = ((a[0] >> s_shift) & kmask2) | (((a[4] >> (s_shift + 0)) & kmask1) << 4);
utmp[1] = ((a[1] >> s_shift) & kmask2) | (((a[5] >> (s_shift + 0)) & kmask1) << 4);
utmp[2] = ((a[2] >> s_shift) & kmask2) | (((a[4] >> (s_shift + 2)) & kmask1) << 4);
utmp[3] = ((a[3] >> s_shift) & kmask2) | (((a[5] >> (s_shift + 2)) & kmask1) << 4);
const float d = x[i].d;
float sum = 0;
for (int l = 0; l < n; ++l) {
sum += y[l+ 0] * (s[0] - 32) * (((q[l] >> 0) & 3) - (h[l] & (m << 0) ? 0 : 4))
+ y[l+32] * (s[2] - 32) * (((q[l] >> 2) & 3) - (h[l] & (m << 1) ? 0 : 4))
+ y[l+64] * (s[4] - 32) * (((q[l] >> 4) & 3) - (h[l] & (m << 2) ? 0 : 4))
+ y[l+96] * (s[6] - 32) * (((q[l] >> 6) & 3) - (h[l] & (m << 3) ? 0 : 4));
sum += y[l+16] * (s[1] - 32) * (((q[l+16] >> 0) & 3) - (h[l+16] & (m << 0) ? 0 : 4))
+ y[l+48] * (s[3] - 32) * (((q[l+16] >> 2) & 3) - (h[l+16] & (m << 1) ? 0 : 4))
+ y[l+80] * (s[5] - 32) * (((q[l+16] >> 4) & 3) - (h[l+16] & (m << 2) ? 0 : 4))
+ y[l+112] * (s[7] - 32) * (((q[l+16] >> 6) & 3) - (h[l+16] & (m << 3) ? 0 : 4));
}
tmp += d * sum;
}
#else
const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15 or 0...7
const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); // 0....1 or 0...3
const int offset = tid * K_QUANTS_PER_ITERATION; // 0...15 or 0...14
const int in = offset/8; // 0 or 1
const int im = offset%8; // 0...7
for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) {
const float * y = yy + i * QK_K + offset;
const uint8_t * q = x[i].qs + offset;
const uint8_t * s = x[i].scales;
const float dall = (float)x[i].d;
float sum = 0;
for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) {
const uint8_t hl = x[i].hmask[im+l] >> in;
const uint8_t ql = q[l];
sum += y[l+ 0] * dall * ((s[0] & 0xF) - 8) * ((int8_t)((ql >> 0) & 3) - ((hl >> 0) & 1 ? 0 : 4))
+ y[l+16] * dall * ((s[0] >> 4) - 8) * ((int8_t)((ql >> 2) & 3) - ((hl >> 2) & 1 ? 0 : 4))
+ y[l+32] * dall * ((s[1] & 0xF) - 8) * ((int8_t)((ql >> 4) & 3) - ((hl >> 4) & 1 ? 0 : 4))
+ y[l+48] * dall * ((s[1] >> 4) - 8) * ((int8_t)((ql >> 6) & 3) - ((hl >> 6) & 1 ? 0 : 4));
}
tmp += sum;
}
#endif
// sum up partial sums and write back result
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
}
if (threadIdx.x == 0) {
dst[row] = tmp;
}
}
static __global__ void dequantize_mul_mat_vec_q4_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) {
const int row = blockIdx.y*blockDim.y + threadIdx.y;
if (row > nrows) return;
const int num_blocks_per_row = ncols / QK_K;
const int ib0 = row*num_blocks_per_row;
const block_q4_K * x = (const block_q4_K *)vx + ib0;
#if QK_K == 256
const uint16_t kmask1 = 0x3f3f;
const uint16_t kmask2 = 0x0f0f;
const uint16_t kmask3 = 0xc0c0;
const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16
const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0,1
const int step = 8/K_QUANTS_PER_ITERATION; // 8 or 4
const int il = tid/step; // 0...3
const int ir = tid - step*il; // 0...7 or 0...3
const int n = 2 * K_QUANTS_PER_ITERATION; // 2 or 4
const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224
const int in = il%2;
const int l0 = n*(2*ir + in);
const int q_offset = 32*im + l0;
const int y_offset = 64*im + l0;
uint16_t aux[4];
const uint8_t * sc = (const uint8_t *)aux;
#if K_QUANTS_PER_ITERATION == 2
uint32_t q32[4];
const uint8_t * q4 = (const uint8_t *)q32;
#else
uint16_t q16[4];
const uint8_t * q4 = (const uint8_t *)q16;
#endif
float tmp = 0; // partial sum for thread in warp
for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
const float * y1 = yy + i*QK_K + y_offset;
const float * y2 = y1 + 128;
const float dall = __low2half(x[i].dm);
const float dmin = __high2half(x[i].dm);
const uint16_t * a = (const uint16_t *)x[i].scales;
aux[0] = a[im+0] & kmask1;
aux[1] = a[im+2] & kmask1;
aux[2] = ((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2);
aux[3] = ((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2);
#if K_QUANTS_PER_ITERATION == 2
const uint32_t * q1 = (const uint32_t *)(x[i].qs + q_offset);
const uint32_t * q2 = q1 + 16;
q32[0] = q1[0] & 0x0f0f0f0f;
q32[1] = q1[0] & 0xf0f0f0f0;
q32[2] = q2[0] & 0x0f0f0f0f;
q32[3] = q2[0] & 0xf0f0f0f0;
float4 s = {0.f, 0.f, 0.f, 0.f};
float smin = 0;
for (int l = 0; l < 4; ++l) {
s.x += y1[l] * q4[l+0]; s.y += y1[l+32] * q4[l+ 4];
s.z += y2[l] * q4[l+8]; s.w += y2[l+32] * q4[l+12];
smin += y1[l] * sc[2] + y1[l+32] * sc[3] + y2[l] * sc[6] + y2[l+32] * sc[7];
}
tmp += dall * (s.x * sc[0] + s.y * sc[1] * 1.f/16.f + s.z * sc[4] + s.w * sc[5] * 1.f/16.f) - dmin * smin;
#else
const uint16_t * q1 = (const uint16_t *)(x[i].qs + q_offset);
const uint16_t * q2 = q1 + 32;
q16[0] = q1[0] & 0x0f0f;
q16[1] = q1[0] & 0xf0f0;
q16[2] = q2[0] & 0x0f0f;
q16[3] = q2[0] & 0xf0f0;
float4 s = {0.f, 0.f, 0.f, 0.f};
float smin = 0;
for (int l = 0; l < 2; ++l) {
s.x += y1[l] * q4[l+0]; s.y += y1[l+32] * q4[l+2];
s.z += y2[l] * q4[l+4]; s.w += y2[l+32] * q4[l+6];
smin += y1[l] * sc[2] + y1[l+32] * sc[3] + y2[l] * sc[6] + y2[l+32] * sc[7];
}
tmp += dall * (s.x * sc[0] + s.y * sc[1] * 1.f/16.f + s.z * sc[4] + s.w * sc[5] * 1.f/16.f) - dmin * smin;
#endif
}
#else
const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15
const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION);
const int step = tid * K_QUANTS_PER_ITERATION;
uint16_t aux16[2];
const uint8_t * s = (const uint8_t *)aux16;
float tmp = 0;
for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) {
const uint8_t * q = x[i].qs + step;
const float * y = yy + i*QK_K + step;
const uint16_t * a = (const uint16_t *)x[i].scales;
aux16[0] = a[0] & 0x0f0f;
aux16[1] = (a[0] >> 4) & 0x0f0f;
const float d = (float)x[i].dm[0];
const float m = (float)x[i].dm[1];
float sum = 0.f;
for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) {
sum += y[j+ 0] * (d * s[0] * (q[j+ 0] & 0xF) - m * s[2])
+ y[j+16] * (d * s[0] * (q[j+16] & 0xF) - m * s[2])
+ y[j+32] * (d * s[1] * (q[j+ 0] >> 4) - m * s[3])
+ y[j+48] * (d * s[1] * (q[j+16] >> 4) - m * s[3]);
}
tmp += sum;
}
#endif
// sum up partial sums and write back result
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
}
if (tid == 0) {
dst[row] = tmp;
}
}
static __global__ void dequantize_mul_mat_vec_q5_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols) {
const int row = blockIdx.x;
const int num_blocks_per_row = ncols / QK_K;
const int ib0 = row*num_blocks_per_row;
const block_q5_K * x = (const block_q5_K *)vx + ib0;
float tmp = 0; // partial sum for thread in warp
#if QK_K == 256
const uint16_t kmask1 = 0x3f3f;
const uint16_t kmask2 = 0x0f0f;
const uint16_t kmask3 = 0xc0c0;
const int tid = threadIdx.x/2; // 0...15
const int ix = threadIdx.x%2;
const int il = tid/4; // 0...3
const int ir = tid - 4*il;// 0...3
const int n = 2;
const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224
const int in = il%2;
const int l0 = n*(2*ir + in);
const int q_offset = 32*im + l0;
const int y_offset = 64*im + l0;
const uint8_t hm1 = 1 << (2*im);
const uint8_t hm2 = hm1 << 4;
uint16_t aux[4];
const uint8_t * sc = (const uint8_t *)aux;
uint16_t q16[8];
const uint8_t * q4 = (const uint8_t *)q16;
for (int i = ix; i < num_blocks_per_row; i += 2) {
const uint8_t * ql1 = x[i].qs + q_offset;
const uint8_t * qh = x[i].qh + l0;
const float * y1 = yy + i*QK_K + y_offset;
const float * y2 = y1 + 128;
const float dall = __low2half(x[i].dm);
const float dmin = __high2half(x[i].dm);
const uint16_t * a = (const uint16_t *)x[i].scales;
aux[0] = a[im+0] & kmask1;
aux[1] = a[im+2] & kmask1;
aux[2] = ((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2);
aux[3] = ((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2);
float4 sum = {0.f, 0.f, 0.f, 0.f};
float smin = 0;
const uint16_t * q1 = (const uint16_t *)ql1;
const uint16_t * q2 = q1 + 32;
q16[0] = q1[0] & 0x0f0f;
q16[1] = q1[8] & 0x0f0f;
q16[2] = (q1[0] >> 4) & 0x0f0f;
q16[3] = (q1[8] >> 4) & 0x0f0f;
q16[4] = q2[0] & 0x0f0f;
q16[5] = q2[8] & 0x0f0f;
q16[6] = (q2[0] >> 4) & 0x0f0f;
q16[7] = (q2[8] >> 4) & 0x0f0f;
for (int l = 0; l < n; ++l) {
sum.x += y1[l+ 0] * (q4[l +0] + (qh[l+ 0] & (hm1 << 0) ? 16 : 0))
+ y1[l+16] * (q4[l +2] + (qh[l+16] & (hm1 << 0) ? 16 : 0));
sum.y += y1[l+32] * (q4[l +4] + (qh[l+ 0] & (hm1 << 1) ? 16 : 0))
+ y1[l+48] * (q4[l +6] + (qh[l+16] & (hm1 << 1) ? 16 : 0));
sum.z += y2[l+ 0] * (q4[l +8] + (qh[l+ 0] & (hm2 << 0) ? 16 : 0))
+ y2[l+16] * (q4[l+10] + (qh[l+16] & (hm2 << 0) ? 16 : 0));
sum.w += y2[l+32] * (q4[l+12] + (qh[l+ 0] & (hm2 << 1) ? 16 : 0))
+ y2[l+48] * (q4[l+14] + (qh[l+16] & (hm2 << 1) ? 16 : 0));
smin += (y1[l] + y1[l+16]) * sc[2] + (y1[l+32] + y1[l+48]) * sc[3]
+ (y2[l] + y2[l+16]) * sc[6] + (y2[l+32] + y2[l+48]) * sc[7];
}
tmp += dall * (sum.x * sc[0] + sum.y * sc[1] + sum.z * sc[4] + sum.w * sc[5]) - dmin * smin;
}
#else
const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15
const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION);
const int step = tid * K_QUANTS_PER_ITERATION;
const int im = step/8;
const int in = step%8;
for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) {
const uint8_t * q = x[i].qs + step;
const int8_t * s = x[i].scales;
const float * y = yy + i*QK_K + step;
const float d = x[i].d;
float sum = 0.f;
for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) {
const uint8_t h = x[i].qh[in+j] >> im;
sum += y[j+ 0] * d * s[0] * ((q[j+ 0] & 0xF) - ((h >> 0) & 1 ? 0 : 16))
+ y[j+16] * d * s[1] * ((q[j+16] & 0xF) - ((h >> 2) & 1 ? 0 : 16))
+ y[j+32] * d * s[2] * ((q[j+ 0] >> 4) - ((h >> 4) & 1 ? 0 : 16))
+ y[j+48] * d * s[3] * ((q[j+16] >> 4) - ((h >> 6) & 1 ? 0 : 16));
}
tmp += sum;
}
#endif
// sum up partial sums and write back result
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
}
if (threadIdx.x == 0) {
dst[row] = tmp;
}
}
static __global__ void dequantize_mul_mat_vec_q6_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) {
static_assert(16%K_QUANTS_PER_ITERATION == 0, "16 must be divisible by K_QUANTS_PER_ITERATION");
const int row = blockIdx.y*blockDim.y + threadIdx.y;
if (row > nrows) return;
const int num_blocks_per_row = ncols / QK_K;
const int ib0 = row*num_blocks_per_row;
const block_q6_K * x = (const block_q6_K *)vx + ib0;
#if QK_K == 256
const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16
const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0, 1
const int step = 16/K_QUANTS_PER_ITERATION; // 16 or 8
const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
const int in = tid - step*im; // 0...15 or 0...7
#if K_QUANTS_PER_ITERATION == 1
const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15
const int is = 0;
#else
const int l0 = 4 * in; // 0, 4, 8, ..., 28
const int is = in / 4;
#endif
const int ql_offset = 64*im + l0;
const int qh_offset = 32*im + l0;
const int s_offset = 8*im + is;
const int y_offset = 128*im + l0;
float tmp = 0; // partial sum for thread in warp
for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
const float * y = yy + i * QK_K + y_offset;
const uint8_t * ql = x[i].ql + ql_offset;
const uint8_t * qh = x[i].qh + qh_offset;
const int8_t * s = x[i].scales + s_offset;
const float d = x[i].d;
#if K_QUANTS_PER_ITERATION == 1
float sum = y[ 0] * s[0] * d * ((int8_t)((ql[ 0] & 0xF) | ((qh[ 0] & 0x03) << 4)) - 32)
+ y[16] * s[1] * d * ((int8_t)((ql[16] & 0xF) | ((qh[16] & 0x03) << 4)) - 32)
+ y[32] * s[2] * d * ((int8_t)((ql[32] & 0xF) | ((qh[ 0] & 0x0c) << 2)) - 32)
+ y[48] * s[3] * d * ((int8_t)((ql[48] & 0xF) | ((qh[16] & 0x0c) << 2)) - 32)
+ y[64] * s[4] * d * ((int8_t)((ql[ 0] >> 4) | ((qh[ 0] & 0x30) >> 0)) - 32)
+ y[80] * s[5] * d * ((int8_t)((ql[16] >> 4) | ((qh[16] & 0x30) >> 0)) - 32)
+ y[96] * s[6] * d * ((int8_t)((ql[32] >> 4) | ((qh[ 0] & 0xc0) >> 2)) - 32)
+y[112] * s[7] * d * ((int8_t)((ql[48] >> 4) | ((qh[16] & 0xc0) >> 2)) - 32);
tmp += sum;
#else
float sum = 0;
for (int l = 0; l < 4; ++l) {
sum += y[l+ 0] * s[0] * d * ((int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32)
+ y[l+32] * s[2] * d * ((int8_t)((ql[l+32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32)
+ y[l+64] * s[4] * d * ((int8_t)((ql[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32)
+ y[l+96] * s[6] * d * ((int8_t)((ql[l+32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32);
}
tmp += sum;
#endif
}
#else
const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...7
const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); // 0...3
const int step = tid * K_QUANTS_PER_ITERATION;
float tmp = 0; // partial sum for thread in warp
for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) {
const float * y = yy + i * QK_K + step;
const uint8_t * ql = x[i].ql + step;
const uint8_t * qh = x[i].qh + step;
const int8_t * s = x[i].scales;
const float d = x[i+0].d;
float sum = 0;
for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) {
sum += y[j+ 0] * s[0] * d * ((int8_t)((ql[j+ 0] & 0xF) | ((qh[j] & 0x03) << 4)) - 32)
+ y[j+16] * s[1] * d * ((int8_t)((ql[j+16] & 0xF) | ((qh[j] & 0x0c) << 2)) - 32)
+ y[j+32] * s[2] * d * ((int8_t)((ql[j+ 0] >> 4) | ((qh[j] & 0x30) >> 0)) - 32)
+ y[j+48] * s[3] * d * ((int8_t)((ql[j+16] >> 4) | ((qh[j] & 0xc0) >> 2)) - 32);
}
tmp += sum;
}
#endif
// sum up partial sums and write back result
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
}
if (tid == 0) {
dst[row] = tmp;
}
}
static __device__ void convert_f16(const void * vx, const int ib, const int iqs, dfloat2 & v){
const half * x = (const half *) vx;
// automatic half -> float type cast if dfloat == float
v.x = x[ib + iqs + 0];
v.y = x[ib + iqs + 1];
}
static __global__ void quantize_q8_1(const float * __restrict__ x, void * __restrict__ vy, const int kx, const int kx_padded) {
const int ix = blockDim.x*blockIdx.x + threadIdx.x;
if (ix >= kx_padded) {
return;
}
const int iy = blockDim.y*blockIdx.y + threadIdx.y;
const int i_padded = iy*kx_padded + ix;
block_q8_1 * y = (block_q8_1 *) vy;
const int ib = i_padded / QK8_1; // block index
const int iqs = i_padded % QK8_1; // quant index
const float xi = ix < kx ? x[iy*kx + ix] : 0.0f;
float amax = fabsf(xi);
float sum = xi;
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
amax = fmaxf(amax, __shfl_xor_sync(0xffffffff, amax, mask, 32));
sum += __shfl_xor_sync(0xffffffff, sum, mask, 32);
}
const float d = amax / 127;
const int8_t q = amax == 0.0f ? 0 : roundf(xi / d);
y[ib].qs[iqs] = q;
if (iqs > 0) {
return;
}
reinterpret_cast<half&>(y[ib].ds.x) = d;
reinterpret_cast<half&>(y[ib].ds.y) = sum;
}
template <int qk, int qr, dequantize_kernel_t dequantize_kernel>
static __global__ void dequantize_block(const void * __restrict__ vx, float * __restrict__ y, const int k) {
const int i = blockDim.x*blockIdx.x + 2*threadIdx.x;
if (i >= k) {
return;
}
const int ib = i/qk; // block index
const int iqs = (i%qk)/qr; // quant index
const int iybs = i - i%qk; // y block start index
const int y_offset = qr == 1 ? 1 : qk/2;
// dequantize
dfloat2 v;
dequantize_kernel(vx, ib, iqs, v);
y[iybs + iqs + 0] = v.x;
y[iybs + iqs + y_offset] = v.y;
}
// VDR = vec dot ratio, how many contiguous integers each thread processes when the vec dot kernel is called
// MMVQ = mul_mat_vec_q, MMQ = mul_mat_q
#define VDR_Q4_0_Q8_1_MMVQ 2
#define VDR_Q4_0_Q8_1_MMQ 4
template <int vdr> static __device__ __forceinline__ float vec_dot_q4_0_q8_1_impl(
const int * v, const int * u, const float & d4, const half2 & ds8) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
int sumi = 0;
#pragma unroll
for (int i = 0; i < vdr; ++i) {
const int vi0 = (v[i] >> 0) & 0x0F0F0F0F;
const int vi1 = (v[i] >> 4) & 0x0F0F0F0F;
// SIMD dot product of quantized values
sumi = __dp4a(vi0, u[2*i+0], sumi);
sumi = __dp4a(vi1, u[2*i+1], sumi);
}
const float2 ds8f = __half22float2(ds8);
// second part effectively subtracts 8 from each quant value
return d4 * (sumi * ds8f.x - (8*vdr/QI4_0) * ds8f.y);
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
#define VDR_Q4_1_Q8_1_MMVQ 2
#define VDR_Q4_1_Q8_1_MMQ 4
template <int vdr> static __device__ __forceinline__ float vec_dot_q4_1_q8_1_impl(
const int * v, const int * u, const half2 & dm4, const half2 & ds8) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
int sumi = 0;
#pragma unroll
for (int i = 0; i < vdr; ++i) {
const int vi0 = (v[i] >> 0) & 0x0F0F0F0F;
const int vi1 = (v[i] >> 4) & 0x0F0F0F0F;
// SIMD dot product of quantized values
sumi = __dp4a(vi0, u[2*i+0], sumi);
sumi = __dp4a(vi1, u[2*i+1], sumi);
}
#ifdef GGML_CUDA_F16
const float2 tmp = __half22float2(__hmul2(dm4, ds8));
const float d4d8 = tmp.x;
const float m4s8 = tmp.y;
#else
const float2 dm4f = __half22float2(dm4);
const float2 ds8f = __half22float2(ds8);
const float d4d8 = dm4f.x * ds8f.x;
const float m4s8 = dm4f.y * ds8f.y;
#endif // GGML_CUDA_F16
// scale second part of sum by QI8_1/(vdr * QR4_1) to compensate for multiple threads adding it
return sumi * d4d8 + m4s8 / (QI8_1 / (vdr * QR4_1));
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
#define VDR_Q5_0_Q8_1_MMVQ 2
#define VDR_Q5_0_Q8_1_MMQ 4
template <int vdr> static __device__ __forceinline__ float vec_dot_q5_0_q8_1_impl(
const int * vl, const int * vh, const int * u, const float & d5, const half2 & ds8) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
int sumi = 0;
#pragma unroll
for (int i = 0; i < vdr; ++i) {
int vi0 = (vl[i] >> 0) & 0x0F0F0F0F; // lower 4 qs bits, still need qh as 5th bits
vi0 |= (vh[i] << 4) & 0x00000010; // 0 -> 4
vi0 |= (vh[i] << 11) & 0x00001000; // 1 -> 12
vi0 |= (vh[i] << 18) & 0x00100000; // 2 -> 20
vi0 |= (vh[i] << 25) & 0x10000000; // 3 -> 28
sumi = __dp4a(vi0, u[2*i+0], sumi); // SIMD dot product of quantized values
int vi1 = (vl[i] >> 4) & 0x0F0F0F0F; // upper 4 qs bits, still need qh as 5th bits
vi1 |= (vh[i] >> 12) & 0x00000010; // 16 -> 4
vi1 |= (vh[i] >> 5) & 0x00001000; // 17 -> 12
vi1 |= (vh[i] << 2) & 0x00100000; // 18 -> 20
vi1 |= (vh[i] << 9) & 0x10000000; // 19 -> 28
sumi = __dp4a(vi1, u[2*i+1], sumi); // SIMD dot product of quantized values
}
const float2 ds8f = __half22float2(ds8);
// second part effectively subtracts 16 from each quant value
return d5 * (sumi * ds8f.x - (16*vdr/QI5_0) * ds8f.y);
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
#define VDR_Q5_1_Q8_1_MMVQ 2
#define VDR_Q5_1_Q8_1_MMQ 4
template <int vdr> static __device__ __forceinline__ float vec_dot_q5_1_q8_1_impl(
const int * vl, const int * vh, const int * u, const half2 & dm5, const half2 & ds8) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
int sumi = 0;
#pragma unroll
for (int i = 0; i < vdr; ++i) {
int vi0 = (vl[i] >> 0) & 0x0F0F0F0F; // lower 4 qs bits, still need qh as 5th bits
vi0 |= (vh[i] << 4) & 0x00000010; // 0 -> 4
vi0 |= (vh[i] << 11) & 0x00001000; // 1 -> 12
vi0 |= (vh[i] << 18) & 0x00100000; // 2 -> 20
vi0 |= (vh[i] << 25) & 0x10000000; // 3 -> 28
sumi = __dp4a(vi0, u[2*i+0], sumi); // SIMD dot product of quantized values
int vi1 = (vl[i] >> 4) & 0x0F0F0F0F; // upper 4 qs bits, still need qh as 5th bits
vi1 |= (vh[i] >> 12) & 0x00000010; // 16 -> 4
vi1 |= (vh[i] >> 5) & 0x00001000; // 17 -> 12
vi1 |= (vh[i] << 2) & 0x00100000; // 18 -> 20
vi1 |= (vh[i] << 9) & 0x10000000; // 19 -> 28
sumi = __dp4a(vi1, u[2*i+1], sumi); // SIMD dot product of quantized values
}
#ifdef GGML_CUDA_F16
const float2 tmp = __half22float2(__hmul2(dm5, ds8));
const float d5d8 = tmp.x;
const float m5s8 = tmp.y;
#else
const float2 dm5f = __half22float2(dm5);
const float2 ds8f = __half22float2(ds8);
const float d5d8 = dm5f.x * ds8f.x;
const float m5s8 = dm5f.y * ds8f.y;
#endif // GGML_CUDA_F16
// scale second part of sum by QI5_1 / vdr to compensate for multiple threads adding it
return sumi*d5d8 + m5s8 / (QI5_1 / vdr);
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
#define VDR_Q8_0_Q8_1_MMVQ 2
#define VDR_Q8_0_Q8_1_MMQ 8
template <int vdr> static __device__ __forceinline__ float vec_dot_q8_0_q8_1_impl(
const int * v, const int * u, const float & d8_0, const float & d8_1) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
int sumi = 0;
#pragma unroll
for (int i = 0; i < vdr; ++i) {
// SIMD dot product of quantized values
sumi = __dp4a(v[i], u[i], sumi);
}
return d8_0*d8_1 * sumi;
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
template <int vdr> static __device__ __forceinline__ float vec_dot_q8_1_q8_1_impl(
const int * v, const int * u, const half2 & dm8, const half2 & ds8) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
int sumi = 0;
#pragma unroll
for (int i = 0; i < vdr; ++i) {
// SIMD dot product of quantized values
sumi = __dp4a(v[i], u[i], sumi);
}
#ifdef GGML_CUDA_F16
const float2 tmp = __half22float2(__hmul2(dm8, ds8));
const float d8d8 = tmp.x;
const float m8s8 = tmp.y;
#else
const float2 dm8f = __half22float2(dm8);
const float2 ds8f = __half22float2(ds8);
const float d8d8 = dm8f.x * ds8f.x;
const float m8s8 = dm8f.y * ds8f.y;
#endif // GGML_CUDA_F16
// scale second part of sum by QI8_1/ vdr to compensate for multiple threads adding it
return sumi*d8d8 + m8s8 / (QI8_1 / vdr);
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
#define VDR_Q2_K_Q8_1_MMVQ 1
#define VDR_Q2_K_Q8_1_MMQ 2
// contiguous v/x values
static __device__ __forceinline__ float vec_dot_q2_K_q8_1_impl_mmvq(
const int & v, const int * __restrict__ u, const uint8_t * __restrict__ scales,
const half2 & dm2, const float * __restrict__ d8) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
float sumf_d = 0.0f;
float sumf_m = 0.0f;
#pragma unroll
for (int i = 0; i < QR2_K; ++i) {
const int sc = scales[2*i];
const int vi = (v >> (2*i)) & 0x03030303;
sumf_d += d8[i] * (__dp4a(vi, u[i], 0) * (sc & 0xF)); // SIMD dot product
// fill int with 4x m
int m = sc >> 4;
m |= m << 8;
m |= m << 16;
sumf_m += d8[i] * __dp4a(m, u[i], 0); // multiply constant q2_K part with sum of q8_1 values
}
const float2 dm2f = __half22float2(dm2);
return dm2f.x*sumf_d - dm2f.y*sumf_m;
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
// contiguous u/y values
static __device__ __forceinline__ float vec_dot_q2_K_q8_1_impl_mmq(
const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ scales,
const half2 & dm2, const float & d8) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
int sumi_d = 0;
int sumi_m = 0;
#pragma unroll
for (int i0 = 0; i0 < QI8_1; i0 += QI8_1/2) {
int sumi_d_sc = 0;
const int sc = scales[i0 / (QI8_1/2)];
// fill int with 4x m
int m = sc >> 4;
m |= m << 8;
m |= m << 16;
#pragma unroll
for (int i = i0; i < i0 + QI8_1/2; ++i) {
sumi_d_sc = __dp4a(v[i], u[i], sumi_d_sc); // SIMD dot product
sumi_m = __dp4a(m, u[i], sumi_m); // multiply sum of q8_1 values with m
}
sumi_d += sumi_d_sc * (sc & 0xF);
}
const float2 dm2f = __half22float2(dm2);
return d8 * (dm2f.x*sumi_d - dm2f.y*sumi_m);
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
#define VDR_Q3_K_Q8_1_MMVQ 1
#define VDR_Q3_K_Q8_1_MMQ 2
// contiguous v/x values
static __device__ __forceinline__ float vec_dot_q3_K_q8_1_impl_mmvq(
const int & vl, const int & vh, const int * __restrict__ u, const uint8_t * __restrict__ scales,
const int & scale_offset, const float & d3, const float * __restrict__ d8) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
float sumf = 0.0f;
#pragma unroll
for (int i = 0; i < QR3_K; ++i) {
const int isc = scale_offset + 2*i;
const int isc_low = isc % (QK_K/32);
const int sc_shift_low = 4 * (isc / (QK_K/32));
const int sc_low = (scales[isc_low] >> sc_shift_low) & 0xF;
const int isc_high = isc % (QK_K/64);
const int sc_shift_high = 2 * (isc / (QK_K/64));
const int sc_high = ((scales[(QK_K/32) + isc_high] >> sc_shift_high) & 3) << 4;
const int sc = (sc_low | sc_high) - 32;
const int vil = (vl >> (2*i)) & 0x03030303;
const int vih = ((vh >> i) << 2) & 0x04040404;
const int vi = __vsubss4(vil, vih);
sumf += d8[i] * (__dp4a(vi, u[i], 0) * sc); // SIMD dot product
}
return d3 * sumf;
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
// contiguous u/y values
static __device__ __forceinline__ float vec_dot_q3_K_q8_1_impl_mmq(
const int * __restrict__ v, const int * __restrict__ u, const int8_t * __restrict__ scales,
const float & d3, const float & d8) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
int sumi = 0;
#pragma unroll
for (int i0 = 0; i0 < QR3_K*VDR_Q3_K_Q8_1_MMQ; i0 += QI8_1/2) {
int sumi_sc = 0;
for (int i = i0; i < i0 + QI8_1/2; ++i) {
sumi_sc = __dp4a(v[i], u[i], sumi_sc); // SIMD dot product
}
sumi += sumi_sc * scales[i0 / (QI8_1/2)];
}
return d3*d8 * sumi;
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
#define VDR_Q4_K_Q8_1_MMVQ 2
#define VDR_Q4_K_Q8_1_MMQ 8
// contiguous v/x values
static __device__ __forceinline__ float vec_dot_q4_K_q8_1_impl_vmmq(
const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc,
const uint8_t * __restrict__ m, const half2 & dm4, const float * __restrict__ d8) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
float sumf_d = 0.0f;
float sumf_m = 0.0f;
#pragma unroll
for (int i = 0; i < QR4_K; ++i) {
const int v0i = (v[0] >> (4*i)) & 0x0F0F0F0F;
const int v1i = (v[1] >> (4*i)) & 0x0F0F0F0F;
const int dot1 = __dp4a(v1i, u[2*i+1], __dp4a(v0i, u[2*i+0], 0)); // SIMD dot product
const int dot2 = __dp4a(0x01010101, u[2*i+1], __dp4a(0x01010101, u[2*i+0], 0)); // sum of u
sumf_d += d8[i] * (dot1 * sc[i]);
sumf_m += d8[i] * (dot2 * m[i]); // multiply constant part of q4_K with sum of q8_1 values
}
const float2 dm4f = __half22float2(dm4);
return dm4f.x*sumf_d - dm4f.y*sumf_m;
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
// contiguous u/y values
static __device__ __forceinline__ float vec_dot_q4_K_q8_1_impl_mmq(
const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc,
const uint8_t * __restrict__ m, const half2 & dm4, const half2 * __restrict__ ds8) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
float sumf_d = 0.0f;
float sumf_m = 0.0f;
#pragma unroll
for (int i = 0; i < QR4_K*VDR_Q4_K_Q8_1_MMQ/QI8_1; ++i) {
int sumi_d = 0;
#pragma unroll
for (int j = 0; j < QI8_1; ++j) {
sumi_d = __dp4a((v[j] >> (4*i)) & 0x0F0F0F0F, u[i*QI8_1 + j], sumi_d); // SIMD dot product
}
const float2 ds8f = __half22float2(ds8[i]);
sumf_d += ds8f.x * (sc[i] * sumi_d);
sumf_m += ds8f.y * m[i]; // sum of q8_1 block * q4_K min val
}
const float2 dm4f = __half22float2(dm4);
return dm4f.x*sumf_d - dm4f.y*sumf_m;
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
#define VDR_Q5_K_Q8_1_MMVQ 2
#define VDR_Q5_K_Q8_1_MMQ 8
// contiguous v/x values
static __device__ __forceinline__ float vec_dot_q5_K_q8_1_impl_vmmq(
const int * __restrict__ vl, const int * __restrict__ vh, const int * __restrict__ u, const uint8_t * __restrict__ sc,
const uint8_t * __restrict__ m, const half2 & dm5, const float * __restrict__ d8) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
float sumf_d = 0.0f;
float sumf_m = 0.0f;
#pragma unroll
for (int i = 0; i < QR5_K; ++i) {
const int vl0i = (vl[0] >> (4*i)) & 0x0F0F0F0F;
const int vl1i = (vl[1] >> (4*i)) & 0x0F0F0F0F;
const int vh0i = ((vh[0] >> i) << 4) & 0x10101010;
const int vh1i = ((vh[1] >> i) << 4) & 0x10101010;
const int v0i = vl0i | vh0i;
const int v1i = vl1i | vh1i;
const int dot1 = __dp4a(v0i, u[2*i+0], __dp4a(v1i, u[2*i+1], 0)); // SIMD dot product
const int dot2 = __dp4a(0x01010101, u[2*i+0], __dp4a(0x01010101, u[2*i+1], 0)); // sum of u
sumf_d += d8[i] * (dot1 * sc[i]);
sumf_m += d8[i] * (dot2 * m[i]);
}
const float2 dm5f = __half22float2(dm5);
return dm5f.x*sumf_d - dm5f.y*sumf_m;
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
// contiguous u/y values
static __device__ __forceinline__ float vec_dot_q5_K_q8_1_impl_mmq(
const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc,
const uint8_t * __restrict__ m, const half2 & dm4, const half2 * __restrict__ ds8) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
float sumf_d = 0.0f;
float sumf_m = 0.0f;
#pragma unroll
for (int i = 0; i < QR5_K*VDR_Q5_K_Q8_1_MMQ/QI8_1; ++i) {
int sumi_d = 0;
#pragma unroll
for (int j = 0; j < QI8_1; ++j) {
sumi_d = __dp4a(v[i*QI8_1 + j], u[i*QI8_1 + j], sumi_d); // SIMD dot product
}
const float2 ds8f = __half22float2(ds8[i]);
sumf_d += ds8f.x * (sc[i] * sumi_d);
sumf_m += ds8f.y * m[i]; // sum of q8_1 block * q4_K min val
}
const float2 dm4f = __half22float2(dm4);
return dm4f.x*sumf_d - dm4f.y*sumf_m;
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
#define VDR_Q6_K_Q8_1_MMVQ 1
#define VDR_Q6_K_Q8_1_MMQ 8
// contiguous v/x values
static __device__ __forceinline__ float vec_dot_q6_K_q8_1_impl_mmvq(
const int & vl, const int & vh, const int * __restrict__ u, const int8_t * __restrict__ scales,
const float & d, const float * __restrict__ d8) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
float sumf = 0.0f;
#pragma unroll
for (int i = 0; i < QR6_K; ++i) {
const int sc = scales[4*i];
const int vil = (vl >> (4*i)) & 0x0F0F0F0F;
const int vih = ((vh >> (4*i)) << 4) & 0x30303030;
const int vi = __vsubss4((vil | vih), 0x20202020); // vi = (vil | vih) - 32
sumf += d8[i] * (__dp4a(vi, u[i], 0) * sc); // SIMD dot product
}
return d*sumf;
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
// contiguous u/y values
static __device__ __forceinline__ float vec_dot_q6_K_q8_1_impl_mmq(
const int * __restrict__ v, const int * __restrict__ u, const int8_t * __restrict__ sc,
const float & d6, const float * __restrict__ d8) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
float sumf_d = 0.0f;
#pragma unroll
for (int i0 = 0; i0 < VDR_Q6_K_Q8_1_MMQ; i0 += 4) {
int2 sumi_d = {0, 0}; // 2 q6_K scales per q8_1 scale
#pragma unroll
for (int i = i0; i < i0 + 2; ++i) {
sumi_d.x = __dp4a(v[2*i+0], u[2*i+0], sumi_d.x); // SIMD dot product
sumi_d.x = __dp4a(v[2*i+1], u[2*i+1], sumi_d.x); // SIMD dot product
sumi_d.y = __dp4a(v[2*i+4], u[2*i+4], sumi_d.y); // SIMD dot product
sumi_d.y = __dp4a(v[2*i+5], u[2*i+5], sumi_d.y); // SIMD dot product
}
sumf_d += d8[i0/4] * (sc[i0/2+0]*sumi_d.x + sc[i0/2+1]*sumi_d.y);
}
return d6 * sumf_d;
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
static __device__ __forceinline__ float vec_dot_q4_0_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
const block_q4_0 * bq4_0 = (const block_q4_0 *) vbq;
int v[VDR_Q4_0_Q8_1_MMVQ];
int u[2*VDR_Q4_0_Q8_1_MMVQ];
#pragma unroll
for (int i = 0; i < VDR_Q4_0_Q8_1_MMVQ; ++i) {
v[i] = get_int_from_uint8(bq4_0->qs, iqs + i);
u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI4_0);
}
return vec_dot_q4_0_q8_1_impl<VDR_Q4_0_Q8_1_MMVQ>(v, u, bq4_0->d, bq8_1->ds);
}
template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q4_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
__shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + mmq_y];
__shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI4_0) + mmq_y/QI4_0];
*x_ql = tile_x_qs;
*x_dm = (half2 *) tile_x_d;
}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q4_0(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
__builtin_assume(i_offset >= 0);
__builtin_assume(i_offset < nwarps);
__builtin_assume(k >= 0);
__builtin_assume(k < WARP_SIZE);
const int kbx = k / QI4_0;
const int kqsx = k % QI4_0;
const block_q4_0 * bx0 = (block_q4_0 *) vx;
float * x_dmf = (float *) x_dm;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
int i = i0 + i_offset;
if (need_check) {
i = min(i, i_max);
}
const block_q4_0 * bxi = bx0 + i*blocks_per_row + kbx;
x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8(bxi->qs, kqsx);
// x_dmf[i * (WARP_SIZE/QI4_0) + i / QI4_0 + kbx] = bxi->d;
}
const int blocks_per_tile_x_row = WARP_SIZE / QI4_0;
const int kbxd = k % blocks_per_tile_x_row;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_0) {
int i = i0 + i_offset * QI4_0 + k / blocks_per_tile_x_row;
if (need_check) {
i = min(i, i_max);
}
const block_q4_0 * bxi = bx0 + i*blocks_per_row + kbxd;
x_dmf[i * (WARP_SIZE/QI4_0) + i / QI4_0 + kbxd] = bxi->d;
}
}
static __device__ __forceinline__ float vec_dot_q4_0_q8_1_mul_mat(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2));
const float * x_dmf = (float *) x_dm;
int u[2*VDR_Q4_0_Q8_1_MMQ];
#pragma unroll
for (int l = 0; l < VDR_Q4_0_Q8_1_MMQ; ++l) {
u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE];
u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI4_0) % WARP_SIZE];
}
return vec_dot_q4_0_q8_1_impl<VDR_Q4_0_Q8_1_MMQ>
(&x_ql[i * (WARP_SIZE + 1) + k], u, x_dmf[i * (WARP_SIZE/QI4_0) + i/QI4_0 + k/QI4_0],
y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]);
}
static __device__ __forceinline__ float vec_dot_q4_1_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
const block_q4_1 * bq4_1 = (const block_q4_1 *) vbq;
int v[VDR_Q4_1_Q8_1_MMVQ];
int u[2*VDR_Q4_1_Q8_1_MMVQ];
#pragma unroll
for (int i = 0; i < VDR_Q4_1_Q8_1_MMVQ; ++i) {
v[i] = get_int_from_uint8_aligned(bq4_1->qs, iqs + i);
u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI4_1);
}
return vec_dot_q4_1_q8_1_impl<VDR_Q4_1_Q8_1_MMVQ>(v, u, bq4_1->dm, bq8_1->ds);
}
template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q4_1(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
__shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + + mmq_y];
__shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI4_1) + mmq_y/QI4_1];
*x_ql = tile_x_qs;
*x_dm = tile_x_dm;
}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q4_1(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
__builtin_assume(i_offset >= 0);
__builtin_assume(i_offset < nwarps);
__builtin_assume(k >= 0);
__builtin_assume(k < WARP_SIZE);
const int kbx = k / QI4_1;
const int kqsx = k % QI4_1;
const block_q4_1 * bx0 = (block_q4_1 *) vx;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
int i = i0 + i_offset;
if (need_check) {
i = min(i, i_max);
}
const block_q4_1 * bxi = bx0 + i*blocks_per_row + kbx;
x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx);
}
const int blocks_per_tile_x_row = WARP_SIZE / QI4_1;
const int kbxd = k % blocks_per_tile_x_row;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_1) {
int i = i0 + i_offset * QI4_1 + k / blocks_per_tile_x_row;
if (need_check) {
i = min(i, i_max);
}
const block_q4_1 * bxi = bx0 + i*blocks_per_row + kbxd;
x_dm[i * (WARP_SIZE/QI4_1) + i / QI4_1 + kbxd] = bxi->dm;
}
}
static __device__ __forceinline__ float vec_dot_q4_1_q8_1_mul_mat(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2));
int u[2*VDR_Q4_1_Q8_1_MMQ];
#pragma unroll
for (int l = 0; l < VDR_Q4_1_Q8_1_MMQ; ++l) {
u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE];
u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI4_1) % WARP_SIZE];
}
return vec_dot_q4_1_q8_1_impl<VDR_Q4_1_Q8_1_MMQ>
(&x_ql[i * (WARP_SIZE + 1) + k], u, x_dm[i * (WARP_SIZE/QI4_1) + i/QI4_1 + k/QI4_1],
y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]);
}
static __device__ __forceinline__ float vec_dot_q5_0_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
const block_q5_0 * bq5_0 = (const block_q5_0 *) vbq;
int vl[VDR_Q5_0_Q8_1_MMVQ];
int vh[VDR_Q5_0_Q8_1_MMVQ];
int u[2*VDR_Q5_0_Q8_1_MMVQ];
#pragma unroll
for (int i = 0; i < VDR_Q5_0_Q8_1_MMVQ; ++i) {
vl[i] = get_int_from_uint8(bq5_0->qs, iqs + i);
vh[i] = get_int_from_uint8(bq5_0->qh, 0) >> (4 * (iqs + i));
u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI5_0);
}
return vec_dot_q5_0_q8_1_impl<VDR_Q5_0_Q8_1_MMVQ>(vl, vh, u, bq5_0->d, bq8_1->ds);
}
template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q5_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
__shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y];
__shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI5_0) + mmq_y/QI5_0];
*x_ql = tile_x_ql;
*x_dm = (half2 *) tile_x_d;
}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q5_0(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
__builtin_assume(i_offset >= 0);
__builtin_assume(i_offset < nwarps);
__builtin_assume(k >= 0);
__builtin_assume(k < WARP_SIZE);
const int kbx = k / QI5_0;
const int kqsx = k % QI5_0;
const block_q5_0 * bx0 = (block_q5_0 *) vx;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
int i = i0 + i_offset;
if (need_check) {
i = min(i, i_max);
}
const block_q5_0 * bxi = bx0 + i*blocks_per_row + kbx;
const int ql = get_int_from_uint8(bxi->qs, kqsx);
const int qh = get_int_from_uint8(bxi->qh, 0) >> (4 * (k % QI5_0));
int qs0 = (ql >> 0) & 0x0F0F0F0F;
qs0 |= (qh << 4) & 0x00000010; // 0 -> 4
qs0 |= (qh << 11) & 0x00001000; // 1 -> 12
qs0 |= (qh << 18) & 0x00100000; // 2 -> 20
qs0 |= (qh << 25) & 0x10000000; // 3 -> 28
qs0 = __vsubss4(qs0, 0x10101010); // subtract 16
x_ql[i * (2*WARP_SIZE + 1) + 2*k+0] = qs0;
int qs1 = (ql >> 4) & 0x0F0F0F0F;
qs1 |= (qh >> 12) & 0x00000010; // 16 -> 4
qs1 |= (qh >> 5) & 0x00001000; // 17 -> 12
qs1 |= (qh << 2) & 0x00100000; // 18 -> 20
qs1 |= (qh << 9) & 0x10000000; // 19 -> 28
qs1 = __vsubss4(qs1, 0x10101010); // subtract 16
x_ql[i * (2*WARP_SIZE + 1) + 2*k+1] = qs1;
}
const int blocks_per_tile_x_row = WARP_SIZE / QI5_0;
const int kbxd = k % blocks_per_tile_x_row;
float * x_dmf = (float *) x_dm;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_0) {
int i = i0 + i_offset * QI5_0 + k / blocks_per_tile_x_row;
if (need_check) {
i = min(i, i_max);
}
const block_q5_0 * bxi = bx0 + i*blocks_per_row + kbxd;
x_dmf[i * (WARP_SIZE/QI5_0) + i / QI5_0 + kbxd] = bxi->d;
}
}
static __device__ __forceinline__ float vec_dot_q5_0_q8_1_mul_mat(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2));
const int index_bx = i * (WARP_SIZE/QI5_0) + i/QI5_0 + k/QI5_0;
const float * x_dmf = (const float *) x_dm;
const float * y_df = (const float *) y_ds;
int u[2*VDR_Q5_0_Q8_1_MMQ];
#pragma unroll
for (int l = 0; l < VDR_Q5_0_Q8_1_MMQ; ++l) {
u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE];
u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI5_0) % WARP_SIZE];
}
return vec_dot_q8_0_q8_1_impl<QR5_0*VDR_Q5_0_Q8_1_MMQ>
(&x_ql[i * (2*WARP_SIZE + 1) + 2 * k], u, x_dmf[index_bx], y_df[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]);
}
static __device__ __forceinline__ float vec_dot_q5_1_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
const block_q5_1 * bq5_1 = (const block_q5_1 *) vbq;
int vl[VDR_Q5_1_Q8_1_MMVQ];
int vh[VDR_Q5_1_Q8_1_MMVQ];
int u[2*VDR_Q5_1_Q8_1_MMVQ];
#pragma unroll
for (int i = 0; i < VDR_Q5_1_Q8_1_MMVQ; ++i) {
vl[i] = get_int_from_uint8_aligned(bq5_1->qs, iqs + i);
vh[i] = get_int_from_uint8_aligned(bq5_1->qh, 0) >> (4 * (iqs + i));
u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI5_1);
}
return vec_dot_q5_1_q8_1_impl<VDR_Q5_1_Q8_1_MMVQ>(vl, vh, u, bq5_1->dm, bq8_1->ds);
}
template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q5_1(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
__shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y];
__shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI5_1) + mmq_y/QI5_1];
*x_ql = tile_x_ql;
*x_dm = tile_x_dm;
}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q5_1(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
__builtin_assume(i_offset >= 0);
__builtin_assume(i_offset < nwarps);
__builtin_assume(k >= 0);
__builtin_assume(k < WARP_SIZE);
const int kbx = k / QI5_1;
const int kqsx = k % QI5_1;
const block_q5_1 * bx0 = (block_q5_1 *) vx;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
int i = i0 + i_offset;
if (need_check) {
i = min(i, i_max);
}
const block_q5_1 * bxi = bx0 + i*blocks_per_row + kbx;
const int ql = get_int_from_uint8_aligned(bxi->qs, kqsx);
const int qh = get_int_from_uint8_aligned(bxi->qh, 0) >> (4 * (k % QI5_1));
int qs0 = (ql >> 0) & 0x0F0F0F0F;
qs0 |= (qh << 4) & 0x00000010; // 0 -> 4
qs0 |= (qh << 11) & 0x00001000; // 1 -> 12
qs0 |= (qh << 18) & 0x00100000; // 2 -> 20
qs0 |= (qh << 25) & 0x10000000; // 3 -> 28
x_ql[i * (2*WARP_SIZE + 1) + 2*k+0] = qs0;
int qs1 = (ql >> 4) & 0x0F0F0F0F;
qs1 |= (qh >> 12) & 0x00000010; // 16 -> 4
qs1 |= (qh >> 5) & 0x00001000; // 17 -> 12
qs1 |= (qh << 2) & 0x00100000; // 18 -> 20
qs1 |= (qh << 9) & 0x10000000; // 19 -> 28
x_ql[i * (2*WARP_SIZE + 1) + 2*k+1] = qs1;
}
const int blocks_per_tile_x_row = WARP_SIZE / QI5_1;
const int kbxd = k % blocks_per_tile_x_row;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_1) {
int i = i0 + i_offset * QI5_1 + k / blocks_per_tile_x_row;
if (need_check) {
i = min(i, i_max);
}
const block_q5_1 * bxi = bx0 + i*blocks_per_row + kbxd;
x_dm[i * (WARP_SIZE/QI5_1) + i / QI5_1 + kbxd] = bxi->dm;
}
}
static __device__ __forceinline__ float vec_dot_q5_1_q8_1_mul_mat(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2));
const int index_bx = i * (WARP_SIZE/QI5_1) + + i/QI5_1 + k/QI5_1;
int u[2*VDR_Q5_1_Q8_1_MMQ];
#pragma unroll
for (int l = 0; l < VDR_Q5_1_Q8_1_MMQ; ++l) {
u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE];
u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI5_1) % WARP_SIZE];
}
return vec_dot_q8_1_q8_1_impl<QR5_1*VDR_Q5_1_Q8_1_MMQ>
(&x_ql[i * (2*WARP_SIZE + 1) + 2 * k], u, x_dm[index_bx], y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]);
}
static __device__ __forceinline__ float vec_dot_q8_0_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
const block_q8_0 * bq8_0 = (const block_q8_0 *) vbq;
int v[VDR_Q8_0_Q8_1_MMVQ];
int u[VDR_Q8_0_Q8_1_MMVQ];
#pragma unroll
for (int i = 0; i < VDR_Q8_0_Q8_1_MMVQ; ++i) {
v[i] = get_int_from_int8(bq8_0->qs, iqs + i);
u[i] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
}
return vec_dot_q8_0_q8_1_impl<VDR_Q8_0_Q8_1_MMVQ>(v, u, bq8_0->d, __low2half(bq8_1->ds));
}
template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q8_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
__shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + mmq_y];
__shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI8_0) + mmq_y/QI8_0];
*x_ql = tile_x_qs;
*x_dm = (half2 *) tile_x_d;
}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q8_0(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
__builtin_assume(i_offset >= 0);
__builtin_assume(i_offset < nwarps);
__builtin_assume(k >= 0);
__builtin_assume(k < WARP_SIZE);
const int kbx = k / QI8_0;
const int kqsx = k % QI8_0;
float * x_dmf = (float *) x_dm;
const block_q8_0 * bx0 = (block_q8_0 *) vx;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
int i = i0 + i_offset;
if (need_check) {
i = min(i, i_max);
}
const block_q8_0 * bxi = bx0 + i*blocks_per_row + kbx;
x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_int8(bxi->qs, kqsx);
}
const int blocks_per_tile_x_row = WARP_SIZE / QI8_0;
const int kbxd = k % blocks_per_tile_x_row;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI8_0) {
int i = i0 + i_offset * QI8_0 + k / blocks_per_tile_x_row;
if (need_check) {
i = min(i, i_max);
}
const block_q8_0 * bxi = bx0 + i*blocks_per_row + kbxd;
x_dmf[i * (WARP_SIZE/QI8_0) + i / QI8_0 + kbxd] = bxi->d;
}
}
static __device__ __forceinline__ float vec_dot_q8_0_q8_1_mul_mat(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
const float * x_dmf = (const float *) x_dm;
const float * y_df = (const float *) y_ds;
return vec_dot_q8_0_q8_1_impl<VDR_Q8_0_Q8_1_MMQ>
(&x_ql[i * (WARP_SIZE + 1) + k], &y_qs[j * WARP_SIZE + k], x_dmf[i * (WARP_SIZE/QI8_0) + i/QI8_0 + k/QI8_0],
y_df[j * (WARP_SIZE/QI8_1) + k/QI8_1]);
}
static __device__ __forceinline__ float vec_dot_q2_K_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
const block_q2_K * bq2_K = (const block_q2_K *) vbq;
const int bq8_offset = QR2_K * (iqs / QI8_1);
const int scale_offset = iqs - iqs % QI8_1 + (iqs % QI8_1) / (QI8_1/2);
const uint8_t * scales = bq2_K->scales + scale_offset;
const int v = get_int_from_uint8_aligned(bq2_K->qs, iqs);
int u[QR2_K];
float d8[QR2_K];
#pragma unroll
for (int i = 0; i < QR2_K; ++ i) {
u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + i].qs, iqs % QI8_1);
d8[i] = __low2half(bq8_1[bq8_offset + i].ds);
}
return vec_dot_q2_K_q8_1_impl_mmvq(v, u, scales, bq2_K->dm, d8);
}
template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q2_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
__shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y];
__shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI2_K) + mmq_y/QI2_K];
__shared__ int tile_x_sc[mmq_y * (WARP_SIZE/4) + mmq_y/4];
*x_ql = tile_x_ql;
*x_dm = tile_x_dm;
*x_sc = tile_x_sc;
}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q2_K(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
__builtin_assume(i_offset >= 0);
__builtin_assume(i_offset < nwarps);
__builtin_assume(k >= 0);
__builtin_assume(k < WARP_SIZE);
const int kbx = k / QI2_K;
const int kqsx = k % QI2_K;
const block_q2_K * bx0 = (block_q2_K *) vx;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
int i = i0 + i_offset;
if (need_check) {
i = min(i, i_max);
}
const block_q2_K * bxi = bx0 + i*blocks_per_row + kbx;
x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx);
}
const int blocks_per_tile_x_row = WARP_SIZE / QI2_K;
const int kbxd = k % blocks_per_tile_x_row;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI2_K) {
int i = (i0 + i_offset * QI2_K + k / blocks_per_tile_x_row) % mmq_y;
if (need_check) {
i = min(i, i_max);
}
const block_q2_K * bxi = bx0 + i*blocks_per_row + kbxd;
x_dm[i * (WARP_SIZE/QI2_K) + i / QI2_K + kbxd] = bxi->dm;
}
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) {
int i = i0 + i_offset * 4 + k / (WARP_SIZE/4);
if (need_check) {
i = min(i, i_max);
}
const block_q2_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/4)) / (QI2_K/4);
x_sc[i * (WARP_SIZE/4) + i / 4 + k % (WARP_SIZE/4)] = get_int_from_uint8_aligned(bxi->scales, k % (QI2_K/4));
}
}
static __device__ __forceinline__ float vec_dot_q2_K_q8_1_mul_mat(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
const int kbx = k / QI2_K;
const int ky = (k % QI2_K) * QR2_K;
const float * y_df = (const float *) y_ds;
int v[QR2_K*VDR_Q2_K_Q8_1_MMQ];
const int kqsx = i * (WARP_SIZE + 1) + kbx*QI2_K + (QI2_K/2) * (ky/(2*QI2_K)) + ky % (QI2_K/2);
const int shift = 2 * ((ky % (2*QI2_K)) / (QI2_K/2));
#pragma unroll
for (int l = 0; l < QR2_K*VDR_Q2_K_Q8_1_MMQ; ++l) {
v[l] = (x_ql[kqsx + l] >> shift) & 0x03030303;
}
const uint8_t * scales = ((const uint8_t *) &x_sc[i * (WARP_SIZE/4) + i/4 + kbx*4]) + ky/4;
const int index_y = j * WARP_SIZE + (QR2_K*k) % WARP_SIZE;
return vec_dot_q2_K_q8_1_impl_mmq(v, &y_qs[index_y], scales, x_dm[i * (WARP_SIZE/QI2_K) + i/QI2_K + kbx], y_df[index_y/QI8_1]);
}
static __device__ __forceinline__ float vec_dot_q3_K_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
const block_q3_K * bq3_K = (const block_q3_K *) vbq;
const int bq8_offset = QR3_K * (iqs / (QI3_K/2));
const int scale_offset = iqs - iqs % QI8_1 + (iqs % QI8_1) / (QI8_1/2);
const float d = bq3_K->d;
const int vl = get_int_from_uint8(bq3_K->qs, iqs);
// invert the mask with ~ so that a 0/1 results in 4/0 being subtracted
const int vh = ~get_int_from_uint8(bq3_K->hmask, iqs % (QI3_K/2)) >> bq8_offset;
int u[QR3_K];
float d8[QR3_K];
#pragma unroll
for (int i = 0; i < QR3_K; ++i) {
u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + i].qs, iqs % QI8_1);
d8[i] = __low2half(bq8_1[bq8_offset + i].ds);
}
return vec_dot_q3_K_q8_1_impl_mmvq(vl, vh, u, bq3_K->scales, scale_offset, d, d8);
}
template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q3_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
__shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y];
__shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI3_K) + mmq_y/QI3_K];
__shared__ int tile_x_qh[mmq_y * (WARP_SIZE/2) + mmq_y/2];
__shared__ int tile_x_sc[mmq_y * (WARP_SIZE/4) + mmq_y/4];
*x_ql = tile_x_ql;
*x_dm = tile_x_dm;
*x_qh = tile_x_qh;
*x_sc = tile_x_sc;
}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q3_K(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
__builtin_assume(i_offset >= 0);
__builtin_assume(i_offset < nwarps);
__builtin_assume(k >= 0);
__builtin_assume(k < WARP_SIZE);
const int kbx = k / QI3_K;
const int kqsx = k % QI3_K;
const block_q3_K * bx0 = (block_q3_K *) vx;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
int i = i0 + i_offset;
if (need_check) {
i = min(i, i_max);
}
const block_q3_K * bxi = bx0 + i*blocks_per_row + kbx;
x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8(bxi->qs, kqsx);
}
const int blocks_per_tile_x_row = WARP_SIZE / QI3_K;
const int kbxd = k % blocks_per_tile_x_row;
float * x_dmf = (float *) x_dm;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI3_K) {
int i = (i0 + i_offset * QI3_K + k / blocks_per_tile_x_row) % mmq_y;
if (need_check) {
i = min(i, i_max);
}
const block_q3_K * bxi = bx0 + i*blocks_per_row + kbxd;
x_dmf[i * (WARP_SIZE/QI3_K) + i / QI3_K + kbxd] = bxi->d;
}
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 2) {
int i = i0 + i_offset * 2 + k / (WARP_SIZE/2);
if (need_check) {
i = min(i, i_max);
}
const block_q3_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/2)) / (QI3_K/2);
// invert the mask with ~ so that a 0/1 results in 4/0 being subtracted
x_qh[i * (WARP_SIZE/2) + i / 2 + k % (WARP_SIZE/2)] = ~get_int_from_uint8(bxi->hmask, k % (QI3_K/2));
}
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) {
int i = i0 + i_offset * 4 + k / (WARP_SIZE/4);
if (need_check) {
i = min(i, i_max);
}
const block_q3_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/4)) / (QI3_K/4);
const int ksc = k % (QI3_K/4);
const int ksc_low = ksc % (QI3_K/8);
const int shift_low = 4 * (ksc / (QI3_K/8));
const int sc_low = (get_int_from_uint8(bxi->scales, ksc_low) >> shift_low) & 0x0F0F0F0F;
const int ksc_high = QI3_K/8;
const int shift_high = 2 * ksc;
const int sc_high = ((get_int_from_uint8(bxi->scales, ksc_high) >> shift_high) << 4) & 0x30303030;
const int sc = __vsubss4(sc_low | sc_high, 0x20202020);
x_sc[i * (WARP_SIZE/4) + i / 4 + k % (WARP_SIZE/4)] = sc;
}
}
static __device__ __forceinline__ float vec_dot_q3_K_q8_1_mul_mat(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
const int kbx = k / QI3_K;
const int ky = (k % QI3_K) * QR3_K;
const float * x_dmf = (const float *) x_dm;
const float * y_df = (const float *) y_ds;
const int8_t * scales = ((int8_t *) (x_sc + i * (WARP_SIZE/4) + i/4 + kbx*4)) + ky/4;
int v[QR3_K*VDR_Q3_K_Q8_1_MMQ];
#pragma unroll
for (int l = 0; l < QR3_K*VDR_Q3_K_Q8_1_MMQ; ++l) {
const int kqsx = i * (WARP_SIZE + 1) + kbx*QI3_K + (QI3_K/2) * (ky/(2*QI3_K)) + ky % (QI3_K/2);
const int shift = 2 * ((ky % 32) / 8);
const int vll = (x_ql[kqsx + l] >> shift) & 0x03030303;
const int vh = x_qh[i * (WARP_SIZE/2) + i/2 + kbx * (QI3_K/2) + (ky+l)%8] >> ((ky+l) / 8);
const int vlh = (vh << 2) & 0x04040404;
v[l] = __vsubss4(vll, vlh);
}
const int index_y = j * WARP_SIZE + (k*QR3_K) % WARP_SIZE;
return vec_dot_q3_K_q8_1_impl_mmq(v, &y_qs[index_y], scales, x_dmf[i * (WARP_SIZE/QI3_K) + i/QI3_K + kbx], y_df[index_y/QI8_1]);
}
static __device__ __forceinline__ float vec_dot_q4_K_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
#ifndef GGML_QKK_64
const block_q4_K * bq4_K = (const block_q4_K *) vbq;
int v[2];
int u[2*QR4_K];
float d8[QR4_K];
// iqs is in 0,2..30. bq8_offset = iqs/4 -> bq8_offset = 0, 2, 4, 6
const int bq8_offset = QR4_K * ((iqs/2) / (QI8_1/2));
// iqs = 0....3 -> bq8_offset = 0, want q4_offset = 0, 4, 8, 12
// iqs = 4....7 -> bq8_offset = 2, want q4_offset = 32, 36, 40, 44
// iqs = 8...11 -> bq8_offset = 4, want q4_offset = 64, 68, 72, 76
// iqs = 12..15 -> bq8_offset = 6, want q4_offset = 96, 100, 104, 108
const int * q4 = (const int *)(bq4_K->qs + 16 * bq8_offset + 4 * ((iqs/2)%4));
v[0] = q4[0];
v[1] = q4[4];
const uint16_t * scales = (const uint16_t *)bq4_K->scales;
uint16_t aux[2];
const int j = bq8_offset/2;
if (j < 2) {
aux[0] = scales[j+0] & 0x3f3f;
aux[1] = scales[j+2] & 0x3f3f;
} else {
aux[0] = ((scales[j+2] >> 0) & 0x0f0f) | ((scales[j-2] & 0xc0c0) >> 2);
aux[1] = ((scales[j+2] >> 4) & 0x0f0f) | ((scales[j-0] & 0xc0c0) >> 2);
}
const uint8_t * sc = (const uint8_t *)aux;
const uint8_t * m = sc + 2;
for (int i = 0; i < QR4_K; ++i) {
const block_q8_1 * bq8i = bq8_1 + bq8_offset + i;
d8[i] = __low2half(bq8i->ds);
const int * q8 = (const int *)bq8i->qs + ((iqs/2)%4);
u[2*i+0] = q8[0];
u[2*i+1] = q8[4];
}
return vec_dot_q4_K_q8_1_impl_vmmq(v, u, sc, m, bq4_K->dm, d8);
#else
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
const block_q4_K * bq4_K = (const block_q4_K *) vbq;
float sumf_d = 0.0f;
float sumf_m = 0.0f;
uint16_t aux16[2];
const uint8_t * s = (const uint8_t *)aux16;
const uint16_t * a = (const uint16_t *)bq4_K->scales;
aux16[0] = a[0] & 0x0f0f;
aux16[1] = (a[0] >> 4) & 0x0f0f;
const float dall = bq4_K->dm[0];
const float dmin = bq4_K->dm[1];
const float d8_1 = __low2float(bq8_1[0].ds);
const float d8_2 = __low2float(bq8_1[1].ds);
const int ui1 = *((const int *)bq8_1[0].qs + (iqs/2));
const int ui2 = *((const int *)bq8_1[0].qs + (iqs/2) + 4);
const int ui3 = *((const int *)bq8_1[1].qs + (iqs/2));
const int ui4 = *((const int *)bq8_1[1].qs + (iqs/2) + 4);
const int * q4 = (const int *)bq4_K->qs + (iqs/2);
const int v1 = q4[0];
const int v2 = q4[4];
const int dot1 = __dp4a(ui2, v2 & 0x0f0f0f0f, __dp4a(ui1, v1 & 0x0f0f0f0f, 0));
const int dot2 = __dp4a(ui4, (v2 >> 4) & 0x0f0f0f0f, __dp4a(ui3, (v1 >> 4) & 0x0f0f0f0f, 0));
const int dot3 = __dp4a(0x01010101, ui2, __dp4a(0x01010101, ui1, 0));
const int dot4 = __dp4a(0x01010101, ui4, __dp4a(0x01010101, ui3, 0));
sumf_d += d8_1 * (dot1 * s[0]) + d8_2 * (dot2 * s[1]);
sumf_m += d8_1 * (dot3 * s[2]) + d8_2 * (dot4 * s[3]);
return dall * sumf_d - dmin * sumf_m;
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
#endif
}
template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q4_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
__shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y];
__shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI4_K) + mmq_y/QI4_K];
__shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8];
*x_ql = tile_x_ql;
*x_dm = tile_x_dm;
*x_sc = tile_x_sc;
}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q4_K(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
__builtin_assume(i_offset >= 0);
__builtin_assume(i_offset < nwarps);
__builtin_assume(k >= 0);
__builtin_assume(k < WARP_SIZE);
const int kbx = k / QI4_K; // == 0 if QK_K == 256
const int kqsx = k % QI4_K; // == k if QK_K == 256
const block_q4_K * bx0 = (block_q4_K *) vx;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
int i = i0 + i_offset;
if (need_check) {
i = min(i, i_max);
}
const block_q4_K * bxi = bx0 + i*blocks_per_row + kbx;
x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx);
}
const int blocks_per_tile_x_row = WARP_SIZE / QI4_K; // == 1 if QK_K == 256
const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_K) {
int i = (i0 + i_offset * QI4_K + k / blocks_per_tile_x_row) % mmq_y;
if (need_check) {
i = min(i, i_max);
}
const block_q4_K * bxi = bx0 + i*blocks_per_row + kbxd;
#if QK_K == 256
x_dm[i * (WARP_SIZE/QI4_K) + i / QI4_K + kbxd] = bxi->dm;
#else
x_dm[i * (WARP_SIZE/QI4_K) + i / QI4_K + kbxd] = {bxi->dm[0], bxi->dm[1]};
#endif
}
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) {
int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y;
if (need_check) {
i = min(i, i_max);
}
const block_q4_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / (QI4_K/8);
const int * scales = (int *) bxi->scales;
const int ksc = k % (WARP_SIZE/8);
// scale arrangement after the following two lines: sc0,...,sc3, sc4,...,sc7, m0,...,m3, m4,...,m8
int scales8 = (scales[(ksc%2) + (ksc!=0)] >> (4 * (ksc & (ksc/2)))) & 0x0F0F0F0F; // lower 4 bits
scales8 |= (scales[ksc/2] >> (2 * (ksc % 2))) & 0x30303030; // upper 2 bits
x_sc[i * (WARP_SIZE/8) + i / 8 + ksc] = scales8;
}
}
static __device__ __forceinline__ float vec_dot_q4_K_q8_1_mul_mat(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/16]) + 2*((k % 16) / 8);
const int index_y = j * WARP_SIZE + (QR4_K*k) % WARP_SIZE;
return vec_dot_q4_K_q8_1_impl_mmq(&x_ql[i * (WARP_SIZE + 1) + k], &y_qs[index_y], sc, sc+8,
x_dm[i * (WARP_SIZE/QI4_K) + i/QI4_K], &y_ds[index_y/QI8_1]);
}
static __device__ __forceinline__ float vec_dot_q5_K_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
#ifndef GGML_QKK_64
const block_q5_K * bq5_K = (const block_q5_K *) vbq;
int vl[2];
int vh[2];
int u[2*QR5_K];
float d8[QR5_K];
const int bq8_offset = QR5_K * ((iqs/2) / (QI8_1/2));
const int * ql = (const int *)(bq5_K->qs + 16 * bq8_offset + 4 * ((iqs/2)%4));
const int * qh = (const int *)(bq5_K->qh + 4 * ((iqs/2)%4));
vl[0] = ql[0];
vl[1] = ql[4];
vh[0] = qh[0] >> bq8_offset;
vh[1] = qh[4] >> bq8_offset;
const uint16_t * scales = (const uint16_t *)bq5_K->scales;
uint16_t aux[2];
const int j = bq8_offset/2;
if (j < 2) {
aux[0] = scales[j+0] & 0x3f3f;
aux[1] = scales[j+2] & 0x3f3f;
} else {
aux[0] = ((scales[j+2] >> 0) & 0x0f0f) | ((scales[j-2] & 0xc0c0) >> 2);
aux[1] = ((scales[j+2] >> 4) & 0x0f0f) | ((scales[j-0] & 0xc0c0) >> 2);
}
const uint8_t * sc = (const uint8_t *)aux;
const uint8_t * m = sc + 2;
#pragma unroll
for (int i = 0; i < QR5_K; ++i) {
const block_q8_1 * bq8i = bq8_1 + bq8_offset + i;
d8[i] = __low2float(bq8i->ds);
const int * q8 = (const int *)bq8i->qs + ((iqs/2)%4);
u[2*i+0] = q8[0];
u[2*i+1] = q8[4];
}
return vec_dot_q5_K_q8_1_impl_vmmq(vl, vh, u, sc, m, bq5_K->dm, d8);
#else
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
const block_q5_K * bq5_K = (const block_q5_K *) vbq;
const int8_t * s = bq5_K->scales;
const float d = bq5_K->d;
const float d8_1 = __low2half(bq8_1[0].ds);
const float d8_2 = __low2half(bq8_1[1].ds);
const int ui1 = *((const int *)bq8_1[0].qs + (iqs/2));
const int ui2 = *((const int *)bq8_1[0].qs + (iqs/2) + 4);
const int ui3 = *((const int *)bq8_1[1].qs + (iqs/2));
const int ui4 = *((const int *)bq8_1[1].qs + (iqs/2) + 4);
const int * ql = (const int *)bq5_K->qs + (iqs/2);
const int vl1 = ql[0];
const int vl2 = ql[4];
const int step = 4 * (iqs/2); // 0, 4, 8, 12
const int im = step/8; // = 0 for iqs = 0, 2, = 1 for iqs = 4, 6
const int in = step%8; // 0, 4, 0, 4
const int vh = (*((const int *)(bq5_K->qh + in))) >> im;
const int v1 = (((vh << 4) & 0x10101010) ^ 0x10101010) | ((vl1 >> 0) & 0x0f0f0f0f);
const int v2 = (((vh << 2) & 0x10101010) ^ 0x10101010) | ((vl2 >> 0) & 0x0f0f0f0f);
const int v3 = (((vh >> 0) & 0x10101010) ^ 0x10101010) | ((vl1 >> 4) & 0x0f0f0f0f);
const int v4 = (((vh >> 2) & 0x10101010) ^ 0x10101010) | ((vl2 >> 4) & 0x0f0f0f0f);
const float sumf_d = d8_1 * (__dp4a(ui1, v1, 0) * s[0] + __dp4a(ui2, v2, 0) * s[1])
+ d8_2 * (__dp4a(ui3, v3, 0) * s[2] + __dp4a(ui4, v4, 0) * s[3]);
return d * sumf_d;
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
#endif
}
template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q5_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
__shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y];
__shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI5_K) + mmq_y/QI5_K];
__shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8];
*x_ql = tile_x_ql;
*x_dm = tile_x_dm;
*x_sc = tile_x_sc;
}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q5_K(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
__builtin_assume(i_offset >= 0);
__builtin_assume(i_offset < nwarps);
__builtin_assume(k >= 0);
__builtin_assume(k < WARP_SIZE);
const int kbx = k / QI5_K; // == 0 if QK_K == 256
const int kqsx = k % QI5_K; // == k if QK_K == 256
const block_q5_K * bx0 = (block_q5_K *) vx;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
int i = i0 + i_offset;
if (need_check) {
i = min(i, i_max);
}
const block_q5_K * bxi = bx0 + i*blocks_per_row + kbx;
const int ky = QR5_K*kqsx;
const int ql = get_int_from_uint8_aligned(bxi->qs, kqsx);
const int ql0 = (ql >> 0) & 0x0F0F0F0F;
const int ql1 = (ql >> 4) & 0x0F0F0F0F;
const int qh = get_int_from_uint8_aligned(bxi->qh, kqsx % (QI5_K/4));
const int qh0 = ((qh >> (2 * (kqsx / (QI5_K/4)) + 0)) << 4) & 0x10101010;
const int qh1 = ((qh >> (2 * (kqsx / (QI5_K/4)) + 1)) << 4) & 0x10101010;
const int kq0 = ky - ky % (QI5_K/2) + k % (QI5_K/4) + 0;
const int kq1 = ky - ky % (QI5_K/2) + k % (QI5_K/4) + (QI5_K/4);
x_ql[i * (2*WARP_SIZE + 1) + kq0] = ql0 | qh0;
x_ql[i * (2*WARP_SIZE + 1) + kq1] = ql1 | qh1;
}
const int blocks_per_tile_x_row = WARP_SIZE / QI5_K; // == 1 if QK_K == 256
const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_K) {
int i = (i0 + i_offset * QI5_K + k / blocks_per_tile_x_row) % mmq_y;
if (need_check) {
i = min(i, i_max);
}
const block_q5_K * bxi = bx0 + i*blocks_per_row + kbxd;
#if QK_K == 256
x_dm[i * (WARP_SIZE/QI5_K) + i / QI5_K + kbxd] = bxi->dm;
#endif
}
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) {
int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y;
if (need_check) {
i = min(i, i_max);
}
const block_q5_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / (QI5_K/8);
const int * scales = (int *) bxi->scales;
const int ksc = k % (WARP_SIZE/8);
// scale arrangement after the following two lines: sc0,...,sc3, sc4,...,sc7, m0,...,m3, m4,...,m8
int scales8 = (scales[(ksc%2) + (ksc!=0)] >> (4 * (ksc & (ksc/2)))) & 0x0F0F0F0F; // lower 4 bits
scales8 |= (scales[ksc/2] >> (2 * (ksc % 2))) & 0x30303030; // upper 2 bits
x_sc[i * (WARP_SIZE/8) + i / 8 + ksc] = scales8;
}
}
static __device__ __forceinline__ float vec_dot_q5_K_q8_1_mul_mat(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/16]) + 2 * ((k % 16) / 8);
const int index_x = i * (QR5_K*WARP_SIZE + 1) + QR5_K*k;
const int index_y = j * WARP_SIZE + (QR5_K*k) % WARP_SIZE;
return vec_dot_q5_K_q8_1_impl_mmq(&x_ql[index_x], &y_qs[index_y], sc, sc+8,
x_dm[i * (WARP_SIZE/QI5_K) + i/QI5_K], &y_ds[index_y/QI8_1]);
}
static __device__ __forceinline__ float vec_dot_q6_K_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
const block_q6_K * bq6_K = (const block_q6_K *) vbq;
const int bq8_offset = 2 * QR6_K * (iqs / (QI6_K/2)) + (iqs % (QI6_K/2)) / (QI6_K/4);
const int scale_offset = (QI6_K/4) * (iqs / (QI6_K/2)) + (iqs % (QI6_K/2)) / (QI6_K/8);
const int vh_shift = 2 * ((iqs % (QI6_K/2)) / (QI6_K/4));
const int vl = get_int_from_uint8(bq6_K->ql, iqs);
const int vh = get_int_from_uint8(bq6_K->qh, (QI6_K/4) * (iqs / (QI6_K/2)) + iqs % (QI6_K/4)) >> vh_shift;
const int8_t * scales = bq6_K->scales + scale_offset;
int u[QR6_K];
float d8[QR6_K];
#pragma unroll
for (int i = 0; i < QR6_K; ++i) {
u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + 2*i].qs, iqs % QI8_1);
d8[i] = __low2half(bq8_1[bq8_offset + 2*i].ds);
}
return vec_dot_q6_K_q8_1_impl_mmvq(vl, vh, u, scales, bq6_K->d, d8);
}
template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q6_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
__shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y];
__shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI6_K) + mmq_y/QI6_K];
__shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8];
*x_ql = tile_x_ql;
*x_dm = tile_x_dm;
*x_sc = tile_x_sc;
}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q6_K(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
__builtin_assume(i_offset >= 0);
__builtin_assume(i_offset < nwarps);
__builtin_assume(k >= 0);
__builtin_assume(k < WARP_SIZE);
const int kbx = k / QI6_K; // == 0 if QK_K == 256
const int kqsx = k % QI6_K; // == k if QK_K == 256
const block_q6_K * bx0 = (block_q6_K *) vx;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
int i = i0 + i_offset;
if (need_check) {
i = min(i, i_max);
}
const block_q6_K * bxi = bx0 + i*blocks_per_row + kbx;
const int ky = QR6_K*kqsx;
const int ql = get_int_from_uint8(bxi->ql, kqsx);
const int ql0 = (ql >> 0) & 0x0F0F0F0F;
const int ql1 = (ql >> 4) & 0x0F0F0F0F;
const int qh = get_int_from_uint8(bxi->qh, (QI6_K/4) * (kqsx / (QI6_K/2)) + kqsx % (QI6_K/4));
const int qh0 = ((qh >> (2 * ((kqsx % (QI6_K/2)) / (QI6_K/4)))) << 4) & 0x30303030;
const int qh1 = (qh >> (2 * ((kqsx % (QI6_K/2)) / (QI6_K/4)))) & 0x30303030;
const int kq0 = ky - ky % QI6_K + k % (QI6_K/2) + 0;
const int kq1 = ky - ky % QI6_K + k % (QI6_K/2) + (QI6_K/2);
x_ql[i * (2*WARP_SIZE + 1) + kq0] = __vsubss4(ql0 | qh0, 0x20202020);
x_ql[i * (2*WARP_SIZE + 1) + kq1] = __vsubss4(ql1 | qh1, 0x20202020);
}
const int blocks_per_tile_x_row = WARP_SIZE / QI6_K; // == 1 if QK_K == 256
const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256
float * x_dmf = (float *) x_dm;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI6_K) {
int i = (i0 + i_offset * QI6_K + k / blocks_per_tile_x_row) % mmq_y;
if (need_check) {
i = min(i, i_max);
}
const block_q6_K * bxi = bx0 + i*blocks_per_row + kbxd;
x_dmf[i * (WARP_SIZE/QI6_K) + i / QI6_K + kbxd] = bxi->d;
}
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) {
int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y;
if (need_check) {
i = min(i, i_max);
}
const block_q6_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / 4;
x_sc[i * (WARP_SIZE/8) + i / 8 + k % (WARP_SIZE/8)] = get_int_from_int8(bxi->scales, k % (QI6_K/8));
}
}
static __device__ __forceinline__ float vec_dot_q6_K_q8_1_mul_mat(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
const float * x_dmf = (const float *) x_dm;
const float * y_df = (const float *) y_ds;
const int8_t * sc = ((const int8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/8]);
const int index_x = i * (QR6_K*WARP_SIZE + 1) + QR6_K*k;
const int index_y = j * WARP_SIZE + (QR6_K*k) % WARP_SIZE;
return vec_dot_q6_K_q8_1_impl_mmq(&x_ql[index_x], &y_qs[index_y], sc, x_dmf[i * (WARP_SIZE/QI6_K) + i/QI6_K], &y_df[index_y/QI8_1]);
}
template <int qk, int qr, int qi, bool need_sum, typename block_q_t, int mmq_x, int mmq_y, int nwarps,
allocate_tiles_cuda_t allocate_tiles, load_tiles_cuda_t load_tiles, int vdr, vec_dot_q_mul_mat_cuda_t vec_dot>
static __device__ __forceinline__ void mul_mat_q(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
const block_q_t * x = (const block_q_t *) vx;
const block_q8_1 * y = (const block_q8_1 *) vy;
const int blocks_per_row_x = ncols_x / qk;
const int blocks_per_col_y = nrows_y / QK8_1;
const int blocks_per_warp = WARP_SIZE / qi;
const int & ncols_dst = ncols_y;
const int row_dst_0 = blockIdx.x*mmq_y;
const int & row_x_0 = row_dst_0;
const int col_dst_0 = blockIdx.y*mmq_x;
const int & col_y_0 = col_dst_0;
int * tile_x_ql = nullptr;
half2 * tile_x_dm = nullptr;
int * tile_x_qh = nullptr;
int * tile_x_sc = nullptr;
allocate_tiles(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc);
__shared__ int tile_y_qs[mmq_x * WARP_SIZE];
__shared__ half2 tile_y_ds[mmq_x * WARP_SIZE/QI8_1];
float sum[mmq_y/WARP_SIZE][mmq_x/nwarps] = {0.0f};
for (int ib0 = 0; ib0 < blocks_per_row_x; ib0 += blocks_per_warp) {
load_tiles(x + row_x_0*blocks_per_row_x + ib0, tile_x_ql, tile_x_dm, tile_x_qh, tile_x_sc,
threadIdx.y, nrows_x-row_x_0-1, threadIdx.x, blocks_per_row_x);
#pragma unroll
for (int ir = 0; ir < qr; ++ir) {
const int kqs = ir*WARP_SIZE + threadIdx.x;
const int kbxd = kqs / QI8_1;
#pragma unroll
for (int i = 0; i < mmq_x; i += nwarps) {
const int col_y_eff = min(col_y_0 + threadIdx.y + i, ncols_y-1); // to prevent out-of-bounds memory accesses
const block_q8_1 * by0 = &y[col_y_eff*blocks_per_col_y + ib0 * (qk/QK8_1) + kbxd];
const int index_y = (threadIdx.y + i) * WARP_SIZE + kqs % WARP_SIZE;
tile_y_qs[index_y] = get_int_from_int8_aligned(by0->qs, threadIdx.x % QI8_1);
}
#pragma unroll
for (int ids0 = 0; ids0 < mmq_x; ids0 += nwarps * QI8_1) {
const int ids = (ids0 + threadIdx.y * QI8_1 + threadIdx.x / (WARP_SIZE/QI8_1)) % mmq_x;
const int kby = threadIdx.x % (WARP_SIZE/QI8_1);
const int col_y_eff = min(col_y_0 + ids, ncols_y-1);
// if the sum is not needed it's faster to transform the scale to f32 ahead of time
const half2 * dsi_src = &y[col_y_eff*blocks_per_col_y + ib0 * (qk/QK8_1) + ir*(WARP_SIZE/QI8_1) + kby].ds;
half2 * dsi_dst = &tile_y_ds[ids * (WARP_SIZE/QI8_1) + kby];
if (need_sum) {
*dsi_dst = *dsi_src;
} else {
float * dfi_dst = (float *) dsi_dst;
*dfi_dst = __low2half(*dsi_src);
}
}
__syncthreads();
// #pragma unroll // unrolling this loop causes too much register pressure
for (int k = ir*WARP_SIZE/qr; k < (ir+1)*WARP_SIZE/qr; k += vdr) {
#pragma unroll
for (int j = 0; j < mmq_x; j += nwarps) {
#pragma unroll
for (int i = 0; i < mmq_y; i += WARP_SIZE) {
sum[i/WARP_SIZE][j/nwarps] += vec_dot(
tile_x_ql, tile_x_dm, tile_x_qh, tile_x_sc, tile_y_qs, tile_y_ds,
threadIdx.x + i, threadIdx.y + j, k);
}
}
}
__syncthreads();
}
}
#pragma unroll
for (int j = 0; j < mmq_x; j += nwarps) {
const int col_dst = col_dst_0 + j + threadIdx.y;
if (col_dst >= ncols_dst) {
return;
}
#pragma unroll
for (int i = 0; i < mmq_y; i += WARP_SIZE) {
const int row_dst = row_dst_0 + threadIdx.x + i;
if (row_dst >= nrows_dst) {
continue;
}
dst[col_dst*nrows_dst + row_dst] = sum[i/WARP_SIZE][j/nwarps];
}
}
}
#define MMQ_X_Q4_0_AMPERE 64
#define MMQ_Y_Q4_0_AMPERE 128
#define NWARPS_Q4_0_AMPERE 4
#define MMQ_X_Q4_0_PASCAL 64
#define MMQ_Y_Q4_0_PASCAL 64
#define NWARPS_Q4_0_PASCAL 8
template <bool need_check> static __global__ void mul_mat_q4_0(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
#if __CUDA_ARCH__ >= CC_TURING
const int mmq_x = MMQ_X_Q4_0_AMPERE;
const int mmq_y = MMQ_Y_Q4_0_AMPERE;
const int nwarps = NWARPS_Q4_0_AMPERE;
mul_mat_q<QK4_0, QR4_0, QI4_0, true, block_q4_0, mmq_x, mmq_y, nwarps, allocate_tiles_q4_0<mmq_y>,
load_tiles_q4_0<mmq_y, nwarps, need_check>, VDR_Q4_0_Q8_1_MMQ, vec_dot_q4_0_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#elif __CUDA_ARCH__ >= MIN_CC_DP4A
const int mmq_x = MMQ_X_Q4_0_PASCAL;
const int mmq_y = MMQ_Y_Q4_0_PASCAL;
const int nwarps = NWARPS_Q4_0_PASCAL;
mul_mat_q<QK4_0, QR4_0, QI4_0, true, block_q4_0, mmq_x, mmq_y, nwarps, allocate_tiles_q4_0<mmq_y>,
load_tiles_q4_0<mmq_y, nwarps, need_check>, VDR_Q4_0_Q8_1_MMQ, vec_dot_q4_0_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#else
(void) vec_dot_q4_0_q8_1_mul_mat;
assert(false);
#endif // __CUDA_ARCH__ >= CC_TURING
}
#define MMQ_X_Q4_1_AMPERE 64
#define MMQ_Y_Q4_1_AMPERE 128
#define NWARPS_Q4_1_AMPERE 4
#define MMQ_X_Q4_1_PASCAL 64
#define MMQ_Y_Q4_1_PASCAL 64
#define NWARPS_Q4_1_PASCAL 8
template <bool need_check> static __global__ void
#if __CUDA_ARCH__ < CC_TURING
__launch_bounds__(WARP_SIZE*NWARPS_Q4_1_PASCAL, 2)
#endif // __CUDA_ARCH__ < CC_TURING
mul_mat_q4_1(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
#if __CUDA_ARCH__ >= CC_TURING
const int mmq_x = MMQ_X_Q4_1_AMPERE;
const int mmq_y = MMQ_Y_Q4_1_AMPERE;
const int nwarps = NWARPS_Q4_1_AMPERE;
mul_mat_q<QK4_1, QR4_1, QI4_1, true, block_q4_1, mmq_x, mmq_y, nwarps, allocate_tiles_q4_1<mmq_y>,
load_tiles_q4_1<mmq_y, nwarps, need_check>, VDR_Q4_1_Q8_1_MMQ, vec_dot_q4_1_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#elif __CUDA_ARCH__ >= MIN_CC_DP4A
const int mmq_x = MMQ_X_Q4_1_PASCAL;
const int mmq_y = MMQ_Y_Q4_1_PASCAL;
const int nwarps = NWARPS_Q4_1_PASCAL;
mul_mat_q<QK4_1, QR4_1, QI4_1, true, block_q4_1, mmq_x, mmq_y, nwarps, allocate_tiles_q4_1<mmq_y>,
load_tiles_q4_1<mmq_y, nwarps, need_check>, VDR_Q4_1_Q8_1_MMQ, vec_dot_q4_1_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#else
(void) vec_dot_q4_1_q8_1_mul_mat;
assert(false);
#endif // __CUDA_ARCH__ >= CC_TURING
}
#define MMQ_X_Q5_0_AMPERE 128
#define MMQ_Y_Q5_0_AMPERE 64
#define NWARPS_Q5_0_AMPERE 4
#define MMQ_X_Q5_0_PASCAL 64
#define MMQ_Y_Q5_0_PASCAL 64
#define NWARPS_Q5_0_PASCAL 8
template <bool need_check> static __global__ void mul_mat_q5_0(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
#if __CUDA_ARCH__ >= CC_TURING
const int mmq_x = MMQ_X_Q5_0_AMPERE;
const int mmq_y = MMQ_Y_Q5_0_AMPERE;
const int nwarps = NWARPS_Q5_0_AMPERE;
mul_mat_q<QK5_0, QR5_0, QI5_0, false, block_q5_0, mmq_x, mmq_y, nwarps, allocate_tiles_q5_0<mmq_y>,
load_tiles_q5_0<mmq_y, nwarps, need_check>, VDR_Q5_0_Q8_1_MMQ, vec_dot_q5_0_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#elif __CUDA_ARCH__ >= MIN_CC_DP4A
const int mmq_x = MMQ_X_Q5_0_PASCAL;
const int mmq_y = MMQ_Y_Q5_0_PASCAL;
const int nwarps = NWARPS_Q5_0_PASCAL;
mul_mat_q<QK5_0, QR5_0, QI5_0, false, block_q5_0, mmq_x, mmq_y, nwarps, allocate_tiles_q5_0<mmq_y>,
load_tiles_q5_0<mmq_y, nwarps, need_check>, VDR_Q5_0_Q8_1_MMQ, vec_dot_q5_0_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#else
(void) vec_dot_q5_0_q8_1_mul_mat;
assert(false);
#endif // __CUDA_ARCH__ >= CC_TURING
}
#define MMQ_X_Q5_1_AMPERE 128
#define MMQ_Y_Q5_1_AMPERE 64
#define NWARPS_Q5_1_AMPERE 4
#define MMQ_X_Q5_1_PASCAL 64
#define MMQ_Y_Q5_1_PASCAL 64
#define NWARPS_Q5_1_PASCAL 8
template <bool need_check> static __global__ void mul_mat_q5_1(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
#if __CUDA_ARCH__ >= CC_TURING
const int mmq_x = MMQ_X_Q5_1_AMPERE;
const int mmq_y = MMQ_Y_Q5_1_AMPERE;
const int nwarps = NWARPS_Q5_1_AMPERE;
mul_mat_q<QK5_1, QR5_1, QI5_1, true, block_q5_1, mmq_x, mmq_y, nwarps, allocate_tiles_q5_1<mmq_y>,
load_tiles_q5_1<mmq_y, nwarps, need_check>, VDR_Q5_1_Q8_1_MMQ, vec_dot_q5_1_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#elif __CUDA_ARCH__ >= MIN_CC_DP4A
const int mmq_x = MMQ_X_Q5_1_PASCAL;
const int mmq_y = MMQ_Y_Q5_1_PASCAL;
const int nwarps = NWARPS_Q5_1_PASCAL;
mul_mat_q<QK5_1, QR5_1, QI5_1, true, block_q5_1, mmq_x, mmq_y, nwarps, allocate_tiles_q5_1<mmq_y>,
load_tiles_q5_1<mmq_y, nwarps, need_check>, VDR_Q5_1_Q8_1_MMQ, vec_dot_q5_1_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#else
(void) vec_dot_q5_1_q8_1_mul_mat;
assert(false);
#endif // __CUDA_ARCH__ >= CC_TURING
}
#define MMQ_X_Q8_0_AMPERE 128
#define MMQ_Y_Q8_0_AMPERE 64
#define NWARPS_Q8_0_AMPERE 4
#define MMQ_X_Q8_0_PASCAL 64
#define MMQ_Y_Q8_0_PASCAL 64
#define NWARPS_Q8_0_PASCAL 8
template <bool need_check> static __global__ void mul_mat_q8_0(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
#if __CUDA_ARCH__ >= CC_TURING
const int mmq_x = MMQ_X_Q8_0_AMPERE;
const int mmq_y = MMQ_Y_Q8_0_AMPERE;
const int nwarps = NWARPS_Q8_0_AMPERE;
mul_mat_q<QK8_0, QR8_0, QI8_0, false, block_q8_0, mmq_x, mmq_y, nwarps, allocate_tiles_q8_0<mmq_y>,
load_tiles_q8_0<mmq_y, nwarps, need_check>, VDR_Q8_0_Q8_1_MMQ, vec_dot_q8_0_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#elif __CUDA_ARCH__ >= MIN_CC_DP4A
const int mmq_x = MMQ_X_Q8_0_PASCAL;
const int mmq_y = MMQ_Y_Q8_0_PASCAL;
const int nwarps = NWARPS_Q8_0_PASCAL;
mul_mat_q<QK8_0, QR8_0, QI8_0, false, block_q8_0, mmq_x, mmq_y, nwarps, allocate_tiles_q8_0<mmq_y>,
load_tiles_q8_0<mmq_y, nwarps, need_check>, VDR_Q8_0_Q8_1_MMQ, vec_dot_q8_0_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#else
(void) vec_dot_q8_0_q8_1_mul_mat;
assert(false);
#endif // __CUDA_ARCH__ >= CC_TURING
}
#define MMQ_X_Q2_K_AMPERE 64
#define MMQ_Y_Q2_K_AMPERE 128
#define NWARPS_Q2_K_AMPERE 4
#define MMQ_X_Q2_K_PASCAL 64
#define MMQ_Y_Q2_K_PASCAL 64
#define NWARPS_Q2_K_PASCAL 8
template <bool need_check> static __global__ void mul_mat_q2_K(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
#if __CUDA_ARCH__ >= CC_TURING
const int mmq_x = MMQ_X_Q2_K_AMPERE;
const int mmq_y = MMQ_Y_Q2_K_AMPERE;
const int nwarps = NWARPS_Q2_K_AMPERE;
mul_mat_q<QK_K, QR2_K, QI2_K, false, block_q2_K, mmq_x, mmq_y, nwarps, allocate_tiles_q2_K<mmq_y>,
load_tiles_q2_K<mmq_y, nwarps, need_check>, VDR_Q2_K_Q8_1_MMQ, vec_dot_q2_K_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#elif __CUDA_ARCH__ >= MIN_CC_DP4A
const int mmq_x = MMQ_X_Q2_K_PASCAL;
const int mmq_y = MMQ_Y_Q2_K_PASCAL;
const int nwarps = NWARPS_Q2_K_PASCAL;
mul_mat_q<QK_K, QR2_K, QI2_K, false, block_q2_K, mmq_x, mmq_y, nwarps, allocate_tiles_q2_K<mmq_y>,
load_tiles_q2_K<mmq_y, nwarps, need_check>, VDR_Q2_K_Q8_1_MMQ, vec_dot_q2_K_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#else
(void) vec_dot_q2_K_q8_1_mul_mat;
assert(false);
#endif // __CUDA_ARCH__ >= CC_TURING
}
#define MMQ_X_Q3_K_AMPERE 128
#define MMQ_Y_Q3_K_AMPERE 128
#define NWARPS_Q3_K_AMPERE 4
#define MMQ_X_Q3_K_PASCAL 64
#define MMQ_Y_Q3_K_PASCAL 64
#define NWARPS_Q3_K_PASCAL 8
template <bool need_check> static __global__ void
#if __CUDA_ARCH__ < CC_TURING
__launch_bounds__(WARP_SIZE*NWARPS_Q3_K_PASCAL, 2)
#endif // __CUDA_ARCH__ < CC_TURING
mul_mat_q3_K(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
#if __CUDA_ARCH__ >= CC_TURING
const int mmq_x = MMQ_X_Q3_K_AMPERE;
const int mmq_y = MMQ_Y_Q3_K_AMPERE;
const int nwarps = NWARPS_Q3_K_AMPERE;
mul_mat_q<QK_K, QR3_K, QI3_K, false, block_q3_K, mmq_x, mmq_y, nwarps, allocate_tiles_q3_K<mmq_y>,
load_tiles_q3_K<mmq_y, nwarps, need_check>, VDR_Q3_K_Q8_1_MMQ, vec_dot_q3_K_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#elif __CUDA_ARCH__ >= MIN_CC_DP4A
const int mmq_x = MMQ_X_Q3_K_PASCAL;
const int mmq_y = MMQ_Y_Q3_K_PASCAL;
const int nwarps = NWARPS_Q3_K_PASCAL;
mul_mat_q<QK_K, QR3_K, QI3_K, false, block_q3_K, mmq_x, mmq_y, nwarps, allocate_tiles_q3_K<mmq_y>,
load_tiles_q3_K<mmq_y, nwarps, need_check>, VDR_Q3_K_Q8_1_MMQ, vec_dot_q3_K_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#else
(void) vec_dot_q3_K_q8_1_mul_mat;
assert(false);
#endif // __CUDA_ARCH__ >= CC_TURING
}
#define MMQ_X_Q4_K_AMPERE 64
#define MMQ_Y_Q4_K_AMPERE 128
#define NWARPS_Q4_K_AMPERE 4
#define MMQ_X_Q4_K_PASCAL 64
#define MMQ_Y_Q4_K_PASCAL 64
#define NWARPS_Q4_K_PASCAL 8
template <bool need_check> static __global__ void
#if __CUDA_ARCH__ < CC_TURING
__launch_bounds__(WARP_SIZE*NWARPS_Q4_K_PASCAL, 2)
#endif // __CUDA_ARCH__ < CC_TURING
mul_mat_q4_K(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
#if __CUDA_ARCH__ >= CC_TURING
const int mmq_x = MMQ_X_Q4_K_AMPERE;
const int mmq_y = MMQ_Y_Q4_K_AMPERE;
const int nwarps = NWARPS_Q4_K_AMPERE;
mul_mat_q<QK_K, QR4_K, QI4_K, true, block_q4_K, mmq_x, mmq_y, nwarps, allocate_tiles_q4_K<mmq_y>,
load_tiles_q4_K<mmq_y, nwarps, need_check>, VDR_Q4_K_Q8_1_MMQ, vec_dot_q4_K_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#elif __CUDA_ARCH__ >= MIN_CC_DP4A
const int mmq_x = MMQ_X_Q4_K_PASCAL;
const int mmq_y = MMQ_Y_Q4_K_PASCAL;
const int nwarps = NWARPS_Q4_K_PASCAL;
mul_mat_q<QK_K, QR4_K, QI4_K, true, block_q4_K, mmq_x, mmq_y, nwarps, allocate_tiles_q4_K<mmq_y>,
load_tiles_q4_K<mmq_y, nwarps, need_check>, VDR_Q4_K_Q8_1_MMQ, vec_dot_q4_K_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#else
(void) vec_dot_q4_K_q8_1_mul_mat;
assert(false);
#endif // __CUDA_ARCH__ >= CC_TURING
}
#define MMQ_X_Q5_K_AMPERE 64
#define MMQ_Y_Q5_K_AMPERE 128
#define NWARPS_Q5_K_AMPERE 4
#define MMQ_X_Q5_K_PASCAL 64
#define MMQ_Y_Q5_K_PASCAL 64
#define NWARPS_Q5_K_PASCAL 8
template <bool need_check> static __global__ void mul_mat_q5_K(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
#if __CUDA_ARCH__ >= CC_TURING
const int mmq_x = MMQ_X_Q5_K_AMPERE;
const int mmq_y = MMQ_Y_Q5_K_AMPERE;
const int nwarps = NWARPS_Q5_K_AMPERE;
mul_mat_q<QK_K, QR5_K, QI5_K, true, block_q5_K, mmq_x, mmq_y, nwarps, allocate_tiles_q5_K<mmq_y>,
load_tiles_q5_K<mmq_y, nwarps, need_check>, VDR_Q5_K_Q8_1_MMQ, vec_dot_q5_K_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#elif __CUDA_ARCH__ >= MIN_CC_DP4A
const int mmq_x = MMQ_X_Q5_K_PASCAL;
const int mmq_y = MMQ_Y_Q5_K_PASCAL;
const int nwarps = NWARPS_Q5_K_PASCAL;
mul_mat_q<QK_K, QR5_K, QI5_K, true, block_q5_K, mmq_x, mmq_y, nwarps, allocate_tiles_q5_K<mmq_y>,
load_tiles_q5_K<mmq_y, nwarps, need_check>, VDR_Q5_K_Q8_1_MMQ, vec_dot_q5_K_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#else
(void) vec_dot_q5_K_q8_1_mul_mat;
assert(false);
#endif // __CUDA_ARCH__ >= CC_TURING
}
#define MMQ_X_Q6_K_AMPERE 64
#define MMQ_Y_Q6_K_AMPERE 64
#define NWARPS_Q6_K_AMPERE 4
#define MMQ_X_Q6_K_PASCAL 64
#define MMQ_Y_Q6_K_PASCAL 64
#define NWARPS_Q6_K_PASCAL 8
template <bool need_check> static __global__ void
#if __CUDA_ARCH__ < CC_TURING
__launch_bounds__(WARP_SIZE*NWARPS_Q6_K_PASCAL, 2)
#endif // __CUDA_ARCH__ < CC_TURING
mul_mat_q6_K(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
#if __CUDA_ARCH__ >= CC_TURING
const int mmq_x = MMQ_X_Q6_K_AMPERE;
const int mmq_y = MMQ_Y_Q6_K_AMPERE;
const int nwarps = NWARPS_Q6_K_AMPERE;
mul_mat_q<QK_K, QR6_K, QI6_K, false, block_q6_K, mmq_x, mmq_y, nwarps, allocate_tiles_q6_K<mmq_y>,
load_tiles_q6_K<mmq_y, nwarps, need_check>, VDR_Q6_K_Q8_1_MMQ, vec_dot_q6_K_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#elif __CUDA_ARCH__ >= MIN_CC_DP4A
const int mmq_x = MMQ_X_Q6_K_PASCAL;
const int mmq_y = MMQ_Y_Q6_K_PASCAL;
const int nwarps = NWARPS_Q6_K_PASCAL;
mul_mat_q<QK_K, QR6_K, QI6_K, false, block_q6_K, mmq_x, mmq_y, nwarps, allocate_tiles_q6_K<mmq_y>,
load_tiles_q6_K<mmq_y, nwarps, need_check>, VDR_Q6_K_Q8_1_MMQ, vec_dot_q6_K_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#else
(void) vec_dot_q6_K_q8_1_mul_mat;
assert(false);
#endif // __CUDA_ARCH__ >= CC_TURING
}
template <int qk, int qi, typename block_q_t, int vdr, vec_dot_q_cuda_t vec_dot_q_cuda>
static __global__ void mul_mat_vec_q(const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols, const int nrows) {
const int row = blockIdx.y*blockDim.y + threadIdx.y;
if (row >= nrows) {
return;
}
const int blocks_per_row = ncols / qk;
const int blocks_per_warp = vdr * WARP_SIZE / qi;
// partial sum for each thread
float tmp = 0.0f;
const block_q_t * x = (const block_q_t *) vx;
const block_q8_1 * y = (const block_q8_1 *) vy;
for (int i = 0; i < blocks_per_row; i += blocks_per_warp) {
const int ibx = row*blocks_per_row + i + threadIdx.x / (qi/vdr); // x block index
const int iby = (i + threadIdx.x / (qi/vdr)) * (qk/QK8_1); // y block index that aligns with ibx
const int iqs = vdr * (threadIdx.x % (qi/vdr)); // x block quant index when casting the quants to int
tmp += vec_dot_q_cuda(&x[ibx], &y[iby], iqs);
}
// sum up partial sums and write back result
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
}
if (threadIdx.x == 0) {
dst[row] = tmp;
}
}
template <int qk, int qr, dequantize_kernel_t dequantize_kernel>
static __global__ void dequantize_mul_mat_vec(const void * __restrict__ vx, const dfloat * __restrict__ y, float * __restrict__ dst, const int ncols, const int nrows) {
// qk = quantized weights per x block
// qr = number of quantized weights per data value in x block
const int row = blockIdx.y*blockDim.y + threadIdx.y;
if (row >= nrows) {
return;
}
const int tid = threadIdx.x;
const int iter_stride = 2*GGML_CUDA_DMMV_X;
const int vals_per_iter = iter_stride / WARP_SIZE; // num quantized vals per thread and i iter
const int y_offset = qr == 1 ? 1 : qk/2;
// partial sum for each thread
#ifdef GGML_CUDA_F16
half2 tmp = {0.0f, 0.0f}; // two sums for f16 to take advantage of half2 intrinsics
#else
float tmp = 0.0f;
#endif // GGML_CUDA_F16
for (int i = 0; i < ncols; i += iter_stride) {
const int col = i + vals_per_iter*tid;
const int ib = (row*ncols + col)/qk; // x block index
const int iqs = (col%qk)/qr; // x quant index
const int iybs = col - col%qk; // y block start index
// processing >2 values per i iter is faster for fast GPUs
#pragma unroll
for (int j = 0; j < vals_per_iter; j += 2) {
// process 2 vals per j iter
// dequantize
// for qr = 2 the iqs needs to increase by 1 per j iter because 2 weights per data val
dfloat2 v;
dequantize_kernel(vx, ib, iqs + j/qr, v);
// matrix multiplication
// for qr = 2 the y index needs to increase by 1 per j iter because of y_offset = qk/2
#ifdef GGML_CUDA_F16
tmp += __hmul2(v, {
y[iybs + iqs + j/qr + 0],
y[iybs + iqs + j/qr + y_offset]
});
#else
tmp += v.x * y[iybs + iqs + j/qr + 0];
tmp += v.y * y[iybs + iqs + j/qr + y_offset];
#endif // GGML_CUDA_F16
}
}
// sum up partial sums and write back result
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
}
if (tid == 0) {
#ifdef GGML_CUDA_F16
dst[row] = tmp.x + tmp.y;
#else
dst[row] = tmp;
#endif // GGML_CUDA_F16
}
}
static __global__ void mul_mat_p021_f16_f32(
const void * __restrict__ vx, const float * __restrict__ y, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int nchannels_x, const int nchannels_y) {
const half * x = (const half *) vx;
const int row_x = blockDim.y*blockIdx.y + threadIdx.y;
const int channel = blockDim.z*blockIdx.z + threadIdx.z;
const int channel_x = channel / (nchannels_y / nchannels_x);
const int nrows_y = ncols_x;
const int nrows_dst = nrows_x;
const int row_dst = row_x;
float tmp = 0.0f;
for (int col_x0 = 0; col_x0 < ncols_x; col_x0 += blockDim.x) {
const int col_x = col_x0 + threadIdx.x;
if (col_x >= ncols_x) {
break;
}
// x is transposed and permuted
const int ix = row_x*nchannels_x*ncols_x + channel_x*ncols_x + col_x;
const float xi = __half2float(x[ix]);
const int row_y = col_x;
// y is not transposed but permuted
const int iy = channel*nrows_y + row_y;
tmp += xi * y[iy];
}
// dst is not transposed and not permuted
const int idst = channel*nrows_dst + row_dst;
// sum up partial sums and write back result
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
}
if (threadIdx.x == 0) {
dst[idst] = tmp;
}
}
static __global__ void mul_mat_vec_nc_f16_f32( // nc == non-contiguous
const void * __restrict__ vx, const float * __restrict__ y, float * __restrict__ dst, const int ncols_x, const int nrows_x,
const int row_stride_x, const int channel_stride_x, const int channel_x_divisor) {
const half * x = (const half *) vx;
const int row_x = blockDim.y*blockIdx.y + threadIdx.y;
const int channel = blockDim.z*blockIdx.z + threadIdx.z;
const int channel_x = channel / channel_x_divisor;
const int nrows_y = ncols_x;
const int nrows_dst = nrows_x;
const int row_dst = row_x;
const int idst = channel*nrows_dst + row_dst;
float tmp = 0.0f;
for (int col_x0 = 0; col_x0 < ncols_x; col_x0 += blockDim.x) {
const int col_x = col_x0 + threadIdx.x;
if (col_x >= ncols_x) {
break;
}
const int ix = channel_x*channel_stride_x + row_x*row_stride_x + col_x;
const float xi = __half2float(x[ix]);
const int row_y = col_x;
const int iy = channel*nrows_y + row_y;
tmp += xi * y[iy];
}
// sum up partial sums and write back result
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
}
if (threadIdx.x == 0) {
dst[idst] = tmp;
}
}
static __device__ void cpy_1_f32_f32(const char * cxi, char * cdsti) {
const float * xi = (const float *) cxi;
float * dsti = (float *) cdsti;
*dsti = *xi;
}
static __device__ void cpy_1_f32_f16(const char * cxi, char * cdsti) {
const float * xi = (const float *) cxi;
half * dsti = (half *) cdsti;
*dsti = __float2half(*xi);
}
template <cpy_kernel_t cpy_1>
static __global__ void cpy_f32_f16(const char * cx, char * cdst, const int ne,
const int ne00, const int ne01, const int nb00, const int nb01, const int nb02,
const int ne10, const int ne11, const int nb10, const int nb11, const int nb12) {
const int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i >= ne) {
return;
}
// determine indices i02/i12, i01/i11, i00/i10 as a function of index i of flattened tensor
// then combine those indices with the corresponding byte offsets to get the total offsets
const int i02 = i / (ne00*ne01);
const int i01 = (i - i02*ne01*ne00) / ne00;
const int i00 = i - i02*ne01*ne00 - i01*ne00;
const int x_offset = i00*nb00 + i01*nb01 + i02*nb02;
const int i12 = i / (ne10*ne11);
const int i11 = (i - i12*ne10*ne11) / ne10;
const int i10 = i - i12*ne10*ne11 - i11*ne10;
const int dst_offset = i10*nb10 + i11*nb11 + i12*nb12;
cpy_1(cx + x_offset, cdst + dst_offset);
}
// rope == RoPE == rotary positional embedding
static __global__ void rope_f32(const float * x, float * dst, const int ncols, const float p0,
const float p_delta, const int p_delta_rows, const float theta_scale) {
const int col = 2*(blockDim.y*blockIdx.y + threadIdx.y);
if (col >= ncols) {
return;
}
const int row = blockDim.x*blockIdx.x + threadIdx.x;
const int i = row*ncols + col;
const float theta = (p0 + p_delta * (row/p_delta_rows))*powf(theta_scale, col/2);
const float sin_theta = sinf(theta);
const float cos_theta = cosf(theta);
const float x0 = x[i + 0];
const float x1 = x[i + 1];
dst[i + 0] = x0*cos_theta - x1*sin_theta;
dst[i + 1] = x0*sin_theta + x1*cos_theta;
}
static __global__ void rope_neox_f32(const float * x, float * dst, const int ncols, const float p0,
const float p_delta, const int p_delta_rows, const float theta_scale) {
const int col = 2*(blockDim.y*blockIdx.y + threadIdx.y);
if (col >= ncols) {
return;
}
const int row = blockDim.x*blockIdx.x + threadIdx.x;
const int i = row*ncols + col/2;
const float theta = (p0 + p_delta * (row/p_delta_rows))*powf(theta_scale, col/2);
const float sin_theta = sinf(theta);
const float cos_theta = cosf(theta);
const float x0 = x[i + 0];
const float x1 = x[i + ncols/2];
dst[i + 0] = x0*cos_theta - x1*sin_theta;
dst[i + ncols/2] = x0*sin_theta + x1*cos_theta;
}
static __global__ void rope_glm_f32(const float * x, float * dst, const int ncols, const float p, const float block_p, const float theta_scale) {
const int col = blockDim.x*blockIdx.x + threadIdx.x;
const int half_n_dims = ncols/4;
if (col >= half_n_dims) {
return;
}
const int row = blockDim.y*blockIdx.y + threadIdx.y;
const int i = row*ncols + col;
const float col_theta_scale = powf(theta_scale, col);
const float theta = p*col_theta_scale;
const float sin_theta = sinf(theta);
const float cos_theta = cosf(theta);
const float x0 = x[i + 0];
const float x1 = x[i + half_n_dims];
dst[i + 0] = x0*cos_theta - x1*sin_theta;
dst[i + half_n_dims] = x0*sin_theta + x1*cos_theta;
const float block_theta = block_p*col_theta_scale;
const float sin_block_theta = sinf(block_theta);
const float cos_block_theta = cosf(block_theta);
const float x2 = x[i + half_n_dims * 2];
const float x3 = x[i + half_n_dims * 3];
dst[i + half_n_dims * 2] = x2*cos_block_theta - x3*sin_block_theta;
dst[i + half_n_dims * 3] = x2*sin_block_theta + x3*cos_block_theta;
}
static __global__ void alibi_f32(const float * x, float * dst, const int ncols, const int k_rows,
const int n_heads_log2_floor, const float m0, const float m1) {
const int col = blockDim.x*blockIdx.x + threadIdx.x;
if (col >= ncols) {
return;
}
const int row = blockDim.y*blockIdx.y + threadIdx.y;
const int i = row*ncols + col;
const int k = row/k_rows;
float m_k;
if (k < n_heads_log2_floor) {
m_k = powf(m0, k + 1);
} else {
m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1);
}
dst[i] = col * m_k + x[i];
}
static __global__ void diag_mask_inf_f32(const float * x, float * dst, const int ncols, const int rows_per_channel, const int n_past) {
const int col = blockDim.y*blockIdx.y + threadIdx.y;
const int row = blockDim.x*blockIdx.x + threadIdx.x;
if (col >= ncols) {
return;
}
const int i = row*ncols + col;
// dst[i] = col > n_past + row ? -INFINITY : x[i];
dst[i] = x[i] - (col > n_past + row % rows_per_channel) * INT_MAX; // equivalent within rounding error but slightly faster on GPU
}
// the CUDA soft max implementation differs from the CPU implementation
// instead of doubles floats are used
static __global__ void soft_max_f32(const float * x, float * dst, const int ncols) {
const int row = blockDim.x*blockIdx.x + threadIdx.x;
const int block_size = blockDim.y;
const int tid = threadIdx.y;
float max_val = -INFINITY;
for (int col = tid; col < ncols; col += block_size) {
const int i = row*ncols + col;
max_val = max(max_val, x[i]);
}
// find the max value in the block
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
max_val = max(max_val, __shfl_xor_sync(0xffffffff, max_val, mask, 32));
}
float tmp = 0.f;
for (int col = tid; col < ncols; col += block_size) {
const int i = row*ncols + col;
const float val = expf(x[i] - max_val);
tmp += val;
dst[i] = val;
}
// sum up partial sums
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
}
const float inv_tmp = 1.f / tmp;
for (int col = tid; col < ncols; col += block_size) {
const int i = row*ncols + col;
dst[i] *= inv_tmp;
}
}
static __global__ void scale_f32(const float * x, float * dst, const float scale, const int k) {
const int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i >= k) {
return;
}
dst[i] = scale * x[i];
}
static void add_f32_cuda(const float * x, const float * y, float * dst, const int kx, const int ky, hipStream_t stream) {
const int num_blocks = (kx + CUDA_ADD_BLOCK_SIZE - 1) / CUDA_ADD_BLOCK_SIZE;
hipLaunchKernelGGL(( add_f32), dim3(num_blocks), dim3(CUDA_ADD_BLOCK_SIZE), 0, stream, x, y, dst, kx, ky);
}
static void add_f16_f32_f16_cuda(const half * x, const float * y, half * dst, const int k, hipStream_t stream) {
const int num_blocks = (k + CUDA_ADD_BLOCK_SIZE - 1) / CUDA_ADD_BLOCK_SIZE;
hipLaunchKernelGGL(( add_f16_f32_f16), dim3(num_blocks), dim3(CUDA_ADD_BLOCK_SIZE), 0, stream, x, y, dst, k);
}
static void mul_f32_cuda(const float * x, const float * y, float * dst, const int kx, const int ky, hipStream_t stream) {
const int num_blocks = (kx + CUDA_MUL_BLOCK_SIZE - 1) / CUDA_MUL_BLOCK_SIZE;
hipLaunchKernelGGL(( mul_f32), dim3(num_blocks), dim3(CUDA_MUL_BLOCK_SIZE), 0, stream, x, y, dst, kx, ky);
}
static void gelu_f32_cuda(const float * x, float * dst, const int k, hipStream_t stream) {
const int num_blocks = (k + CUDA_GELU_BLOCK_SIZE - 1) / CUDA_GELU_BLOCK_SIZE;
hipLaunchKernelGGL(( gelu_f32), dim3(num_blocks), dim3(CUDA_GELU_BLOCK_SIZE), 0, stream, x, dst, k);
}
static void silu_f32_cuda(const float * x, float * dst, const int k, hipStream_t stream) {
const int num_blocks = (k + CUDA_SILU_BLOCK_SIZE - 1) / CUDA_SILU_BLOCK_SIZE;
hipLaunchKernelGGL(( silu_f32), dim3(num_blocks), dim3(CUDA_SILU_BLOCK_SIZE), 0, stream, x, dst, k);
}
static void norm_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, hipStream_t stream) {
GGML_ASSERT(ncols % WARP_SIZE == 0);
const dim3 block_dims(WARP_SIZE, 1, 1);
hipLaunchKernelGGL(( norm_f32), dim3(nrows), dim3(block_dims), 0, stream, x, dst, ncols);
}
static void rms_norm_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, const float eps, hipStream_t stream) {
GGML_ASSERT(ncols % WARP_SIZE == 0);
const dim3 block_dims(WARP_SIZE, 1, 1);
hipLaunchKernelGGL(( rms_norm_f32), dim3(nrows), dim3(block_dims), 0, stream, x, dst, ncols, eps);
}
static void quantize_row_q8_1_cuda(const float * x, void * vy, const int kx, const int ky, const int kx_padded, hipStream_t stream) {
const int block_num_x = (kx_padded + CUDA_QUANTIZE_BLOCK_SIZE - 1) / CUDA_QUANTIZE_BLOCK_SIZE;
const dim3 num_blocks(block_num_x, ky, 1);
const dim3 block_size(CUDA_DEQUANTIZE_BLOCK_SIZE, 1, 1);
hipLaunchKernelGGL(( quantize_q8_1), dim3(num_blocks), dim3(block_size), 0, stream, x, vy, kx, kx_padded);
}
static void dequantize_row_q4_0_cuda(const void * vx, float * y, const int k, hipStream_t stream) {
const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
hipLaunchKernelGGL(( dequantize_block<QK4_0, QR4_0, dequantize_q4_0>), dim3(num_blocks), dim3(CUDA_DEQUANTIZE_BLOCK_SIZE), 0, stream, vx, y, k);
}
static void dequantize_row_q4_1_cuda(const void * vx, float * y, const int k, hipStream_t stream) {
const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
hipLaunchKernelGGL(( dequantize_block<QK4_1, QR4_1, dequantize_q4_1>), dim3(num_blocks), dim3(CUDA_DEQUANTIZE_BLOCK_SIZE), 0, stream, vx, y, k);
}
static void dequantize_row_q5_0_cuda(const void * vx, float * y, const int k, hipStream_t stream) {
const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
hipLaunchKernelGGL(( dequantize_block<QK5_0, QR5_0, dequantize_q5_0>), dim3(num_blocks), dim3(CUDA_DEQUANTIZE_BLOCK_SIZE), 0, stream, vx, y, k);
}
static void dequantize_row_q5_1_cuda(const void * vx, float * y, const int k, hipStream_t stream) {
const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
hipLaunchKernelGGL(( dequantize_block<QK5_1, QR5_1, dequantize_q5_1>), dim3(num_blocks), dim3(CUDA_DEQUANTIZE_BLOCK_SIZE), 0, stream, vx, y, k);
}
static void dequantize_row_q8_0_cuda(const void * vx, float * y, const int k, hipStream_t stream) {
const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
hipLaunchKernelGGL(( dequantize_block<QK8_0, QR8_0, dequantize_q8_0>), dim3(num_blocks), dim3(CUDA_DEQUANTIZE_BLOCK_SIZE), 0, stream, vx, y, k);
}
static void dequantize_row_q2_K_cuda(const void * vx, float * y, const int k, hipStream_t stream) {
const int nb = k / QK_K;
#if QK_K == 256
hipLaunchKernelGGL(( dequantize_block_q2_K), dim3(nb), dim3(64), 0, stream, vx, y);
#else
hipLaunchKernelGGL(( dequantize_block_q2_K), dim3(nb), dim3(32), 0, stream, vx, y);
#endif
}
static void dequantize_row_q3_K_cuda(const void * vx, float * y, const int k, hipStream_t stream) {
const int nb = k / QK_K;
#if QK_K == 256
hipLaunchKernelGGL(( dequantize_block_q3_K), dim3(nb), dim3(64), 0, stream, vx, y);
#else
hipLaunchKernelGGL(( dequantize_block_q3_K), dim3(nb), dim3(32), 0, stream, vx, y);
#endif
}
static void dequantize_row_q4_K_cuda(const void * vx, float * y, const int k, hipStream_t stream) {
const int nb = k / QK_K;
hipLaunchKernelGGL(( dequantize_block_q4_K), dim3(nb), dim3(32), 0, stream, vx, y);
}
static void dequantize_row_q5_K_cuda(const void * vx, float * y, const int k, hipStream_t stream) {
const int nb = k / QK_K;
#if QK_K == 256
hipLaunchKernelGGL(( dequantize_block_q5_K), dim3(nb), dim3(64), 0, stream, vx, y);
#else
hipLaunchKernelGGL(( dequantize_block_q5_K), dim3(nb), dim3(32), 0, stream, vx, y);
#endif
}
static void dequantize_row_q6_K_cuda(const void * vx, float * y, const int k, hipStream_t stream) {
const int nb = k / QK_K;
#if QK_K == 256
hipLaunchKernelGGL(( dequantize_block_q6_K), dim3(nb), dim3(64), 0, stream, vx, y);
#else
hipLaunchKernelGGL(( dequantize_block_q6_K), dim3(nb), dim3(32), 0, stream, vx, y);
#endif
}
static void dequantize_mul_mat_vec_q4_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, hipStream_t stream) {
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
hipLaunchKernelGGL(( dequantize_mul_mat_vec<QK4_0, QR4_0, dequantize_q4_0>)
, dim3(block_nums), dim3(block_dims), 0, stream, vx, y, dst, ncols, nrows);
}
static void dequantize_mul_mat_vec_q4_1_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, hipStream_t stream) {
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
hipLaunchKernelGGL(( dequantize_mul_mat_vec<QK4_1, QR4_1, dequantize_q4_1>)
, dim3(block_nums), dim3(block_dims), 0, stream, vx, y, dst, ncols, nrows);
}
static void dequantize_mul_mat_vec_q5_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, hipStream_t stream) {
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
hipLaunchKernelGGL(( dequantize_mul_mat_vec<QK5_0, QR5_0, dequantize_q5_0>)
, dim3(block_nums), dim3(block_dims), 0, stream, vx, y, dst, ncols, nrows);
}
static void dequantize_mul_mat_vec_q5_1_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, hipStream_t stream) {
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
hipLaunchKernelGGL(( dequantize_mul_mat_vec<QK5_1, QR5_1, dequantize_q5_1>)
, dim3(block_nums), dim3(block_dims), 0, stream, vx, y, dst, ncols, nrows);
}
static void dequantize_mul_mat_vec_q8_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, hipStream_t stream) {
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
hipLaunchKernelGGL(( dequantize_mul_mat_vec<QK8_0, QR8_0, dequantize_q8_0>)
, dim3(block_nums), dim3(block_dims), 0, stream, vx, y, dst, ncols, nrows);
}
static void dequantize_mul_mat_vec_q2_K_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, hipStream_t stream) {
GGML_ASSERT(ncols % QK_K == 0);
const int ny = 2; // very slightly faster than 1 even when K_QUANTS_PER_ITERATION = 2
const int block_num_y = (nrows + ny - 1) / ny;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(32, ny, 1);
hipLaunchKernelGGL(( dequantize_mul_mat_vec_q2_k), dim3(block_nums), dim3(block_dims), 0, stream, vx, y, dst, ncols, nrows);
}
static void dequantize_mul_mat_vec_q3_K_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, hipStream_t stream) {
GGML_ASSERT(ncols % QK_K == 0);
const int ny = 2 / K_QUANTS_PER_ITERATION;
const int block_num_y = (nrows + ny - 1) / ny;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(32, ny, 1);
hipLaunchKernelGGL(( dequantize_mul_mat_vec_q3_k), dim3(block_nums), dim3(block_dims), 0, stream, vx, y, dst, ncols, nrows);
}
static void dequantize_mul_mat_vec_q4_K_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, hipStream_t stream) {
GGML_ASSERT(ncols % QK_K == 0);
const int ny = 2 / K_QUANTS_PER_ITERATION;
const int block_num_y = (nrows + ny - 1) / ny;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(32, ny, 1);
hipLaunchKernelGGL(( dequantize_mul_mat_vec_q4_k), dim3(block_nums), dim3(block_dims), 0, stream, vx, y, dst, ncols, nrows);
}
static void dequantize_mul_mat_vec_q5_K_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, hipStream_t stream) {
GGML_ASSERT(ncols % QK_K == 0);
const dim3 block_dims(32, 1, 1);
hipLaunchKernelGGL(( dequantize_mul_mat_vec_q5_k), dim3(nrows), dim3(block_dims), 0, stream, vx, y, dst, ncols);
}
static void dequantize_mul_mat_vec_q6_K_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, hipStream_t stream) {
GGML_ASSERT(ncols % QK_K == 0);
const int ny = 2 / K_QUANTS_PER_ITERATION;
const int block_num_y = (nrows + ny - 1) / ny;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(32, ny, 1);
hipLaunchKernelGGL(( dequantize_mul_mat_vec_q6_k), dim3(block_nums), dim3(block_dims), 0, stream, vx, y, dst, ncols, nrows);
}
static void mul_mat_vec_q4_0_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, hipStream_t stream) {
GGML_ASSERT(ncols % QK4_0 == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
hipLaunchKernelGGL(( mul_mat_vec_q<QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1>)
, dim3(block_nums), dim3(block_dims), 0, stream, vx, vy, dst, ncols, nrows);
}
static void mul_mat_vec_q4_1_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, hipStream_t stream) {
GGML_ASSERT(ncols % QK4_1 == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
hipLaunchKernelGGL(( mul_mat_vec_q<QK4_0, QI4_1, block_q4_1, VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1>)
, dim3(block_nums), dim3(block_dims), 0, stream, vx, vy, dst, ncols, nrows);
}
static void mul_mat_vec_q5_0_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, hipStream_t stream) {
GGML_ASSERT(ncols % QK5_0 == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
hipLaunchKernelGGL(( mul_mat_vec_q<QK5_0, QI5_0, block_q5_0, VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1>)
, dim3(block_nums), dim3(block_dims), 0, stream, vx, vy, dst, ncols, nrows);
}
static void mul_mat_vec_q5_1_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, hipStream_t stream) {
GGML_ASSERT(ncols % QK5_1 == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
hipLaunchKernelGGL(( mul_mat_vec_q<QK5_1, QI5_1, block_q5_1, VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1>)
, dim3(block_nums), dim3(block_dims), 0, stream, vx, vy, dst, ncols, nrows);
}
static void mul_mat_vec_q8_0_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, hipStream_t stream) {
GGML_ASSERT(ncols % QK8_0 == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
hipLaunchKernelGGL(( mul_mat_vec_q<QK8_0, QI8_0, block_q8_0, VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1>)
, dim3(block_nums), dim3(block_dims), 0, stream, vx, vy, dst, ncols, nrows);
}
static void mul_mat_vec_q2_K_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, hipStream_t stream) {
GGML_ASSERT(ncols % QK_K == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
hipLaunchKernelGGL(( mul_mat_vec_q<QK_K, QI2_K, block_q2_K, VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1>)
, dim3(block_nums), dim3(block_dims), 0, stream, vx, vy, dst, ncols, nrows);
}
static void mul_mat_vec_q3_K_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, hipStream_t stream) {
GGML_ASSERT(ncols % QK_K == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
hipLaunchKernelGGL(( mul_mat_vec_q<QK_K, QI3_K, block_q3_K, VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1>)
, dim3(block_nums), dim3(block_dims), 0, stream, vx, vy, dst, ncols, nrows);
}
static void mul_mat_vec_q4_K_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, hipStream_t stream) {
GGML_ASSERT(ncols % QK_K == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
hipLaunchKernelGGL(( mul_mat_vec_q<QK_K, QI4_K, block_q4_K, VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1>)
, dim3(block_nums), dim3(block_dims), 0, stream, vx, vy, dst, ncols, nrows);
}
static void mul_mat_vec_q5_K_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, hipStream_t stream) {
GGML_ASSERT(ncols % QK_K == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
hipLaunchKernelGGL(( mul_mat_vec_q<QK_K, QI5_K, block_q5_K, VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1>)
, dim3(block_nums), dim3(block_dims), 0, stream, vx, vy, dst, ncols, nrows);
}
static void mul_mat_vec_q6_K_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, hipStream_t stream) {
GGML_ASSERT(ncols % QK_K == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
hipLaunchKernelGGL(( mul_mat_vec_q<QK_K, QI6_K, block_q6_K, VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1>)
, dim3(block_nums), dim3(block_dims), 0, stream, vx, vy, dst, ncols, nrows);
}
static void convert_fp16_to_fp32_cuda(const void * vx, float * y, const int k, hipStream_t stream) {
const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
hipLaunchKernelGGL(( dequantize_block<1, 1, convert_f16>), dim3(num_blocks), dim3(CUDA_DEQUANTIZE_BLOCK_SIZE), 0, stream, vx, y, k);
}
static void convert_mul_mat_vec_f16_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, hipStream_t stream) {
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
hipLaunchKernelGGL(( dequantize_mul_mat_vec<1, 1, convert_f16>)
, dim3(block_nums), dim3(block_dims), 0, stream, vx, y, dst, ncols, nrows);
}
static to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) {
switch (type) {
case GGML_TYPE_Q4_0:
return dequantize_row_q4_0_cuda;
case GGML_TYPE_Q4_1:
return dequantize_row_q4_1_cuda;
case GGML_TYPE_Q5_0:
return dequantize_row_q5_0_cuda;
case GGML_TYPE_Q5_1:
return dequantize_row_q5_1_cuda;
case GGML_TYPE_Q8_0:
return dequantize_row_q8_0_cuda;
case GGML_TYPE_Q2_K:
return dequantize_row_q2_K_cuda;
case GGML_TYPE_Q3_K:
return dequantize_row_q3_K_cuda;
case GGML_TYPE_Q4_K:
return dequantize_row_q4_K_cuda;
case GGML_TYPE_Q5_K:
return dequantize_row_q5_K_cuda;
case GGML_TYPE_Q6_K:
return dequantize_row_q6_K_cuda;
case GGML_TYPE_F16:
return convert_fp16_to_fp32_cuda;
default:
return nullptr;
}
}
static void ggml_mul_mat_q4_0_q8_1_cuda(
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
const int ncols_y, const int nrows_y, const int nrows_dst, hipStream_t stream) {
int id;
CUDA_CHECK(hipGetDevice(&id));
const int compute_capability = g_compute_capabilities[id];
int mmq_x, mmq_y, nwarps;
if (compute_capability >= CC_TURING) {
mmq_x = MMQ_X_Q4_0_AMPERE;
mmq_y = MMQ_Y_Q4_0_AMPERE;
nwarps = NWARPS_Q4_0_AMPERE;
} else if (compute_capability >= MIN_CC_DP4A) {
mmq_x = MMQ_X_Q4_0_PASCAL;
mmq_y = MMQ_Y_Q4_0_PASCAL;
nwarps = NWARPS_Q4_0_PASCAL;
} else {
GGML_ASSERT(false);
}
const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
const dim3 block_nums(block_num_x, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, nwarps, 1);
if (nrows_x % mmq_y == 0) {
const bool need_check = false;
hipLaunchKernelGGL(( mul_mat_q4_0<need_check>), dim3(block_nums), dim3(block_dims), 0, stream,
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
} else {
const bool need_check = true;
hipLaunchKernelGGL(( mul_mat_q4_0<need_check>), dim3(block_nums), dim3(block_dims), 0, stream,
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
static void ggml_mul_mat_q4_1_q8_1_cuda(
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
const int ncols_y, const int nrows_y, const int nrows_dst, hipStream_t stream) {
int id;
CUDA_CHECK(hipGetDevice(&id));
const int compute_capability = g_compute_capabilities[id];
int mmq_x, mmq_y, nwarps;
if (compute_capability >= CC_TURING) {
mmq_x = MMQ_X_Q4_1_AMPERE;
mmq_y = MMQ_Y_Q4_1_AMPERE;
nwarps = NWARPS_Q4_1_AMPERE;
} else if (compute_capability >= MIN_CC_DP4A) {
mmq_x = MMQ_X_Q4_1_PASCAL;
mmq_y = MMQ_Y_Q4_1_PASCAL;
nwarps = NWARPS_Q4_1_PASCAL;
} else {
GGML_ASSERT(false);
}
const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
const dim3 block_nums(block_num_x, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, nwarps, 1);
if (nrows_x % mmq_y == 0) {
const bool need_check = false;
hipLaunchKernelGGL(( mul_mat_q4_1<need_check>), dim3(block_nums), dim3(block_dims), 0, stream,
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
} else {
const bool need_check = true;
hipLaunchKernelGGL(( mul_mat_q4_1<need_check>), dim3(block_nums), dim3(block_dims), 0, stream,
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
static void ggml_mul_mat_q5_0_q8_1_cuda(
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
const int ncols_y, const int nrows_y, const int nrows_dst, hipStream_t stream) {
int id;
CUDA_CHECK(hipGetDevice(&id));
const int compute_capability = g_compute_capabilities[id];
int mmq_x, mmq_y, nwarps;
if (compute_capability >= CC_TURING) {
mmq_x = MMQ_X_Q5_0_AMPERE;
mmq_y = MMQ_Y_Q5_0_AMPERE;
nwarps = NWARPS_Q5_0_AMPERE;
} else if (compute_capability >= MIN_CC_DP4A) {
mmq_x = MMQ_X_Q5_0_PASCAL;
mmq_y = MMQ_Y_Q5_0_PASCAL;
nwarps = NWARPS_Q5_0_PASCAL;
} else {
GGML_ASSERT(false);
}
const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
const dim3 block_nums(block_num_x, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, nwarps, 1);
if (nrows_x % mmq_y == 0) {
const bool need_check = false;
hipLaunchKernelGGL(( mul_mat_q5_0<need_check>), dim3(block_nums), dim3(block_dims), 0, stream,
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
} else {
const bool need_check = true;
hipLaunchKernelGGL(( mul_mat_q5_0<need_check>), dim3(block_nums), dim3(block_dims), 0, stream,
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
static void ggml_mul_mat_q5_1_q8_1_cuda(
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
const int ncols_y, const int nrows_y, const int nrows_dst, hipStream_t stream) {
int id;
CUDA_CHECK(hipGetDevice(&id));
const int compute_capability = g_compute_capabilities[id];
int mmq_x, mmq_y, nwarps;
if (compute_capability >= CC_TURING) {
mmq_x = MMQ_X_Q5_1_AMPERE;
mmq_y = MMQ_Y_Q5_1_AMPERE;
nwarps = NWARPS_Q5_1_AMPERE;
} else if (compute_capability >= MIN_CC_DP4A) {
mmq_x = MMQ_X_Q5_1_PASCAL;
mmq_y = MMQ_Y_Q5_1_PASCAL;
nwarps = NWARPS_Q5_1_PASCAL;
} else {
GGML_ASSERT(false);
}
const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
const dim3 block_nums(block_num_x, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, nwarps, 1);
if (nrows_x % mmq_y == 0) {
const bool need_check = false;
hipLaunchKernelGGL(( mul_mat_q5_1<need_check>), dim3(block_nums), dim3(block_dims), 0, stream,
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
} else {
const bool need_check = true;
hipLaunchKernelGGL(( mul_mat_q5_1<need_check>), dim3(block_nums), dim3(block_dims), 0, stream,
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
static void ggml_mul_mat_q8_0_q8_1_cuda(
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
const int ncols_y, const int nrows_y, const int nrows_dst, hipStream_t stream) {
int id;
CUDA_CHECK(hipGetDevice(&id));
const int compute_capability = g_compute_capabilities[id];
int mmq_x, mmq_y, nwarps;
if (compute_capability >= CC_TURING) {
mmq_x = MMQ_X_Q8_0_AMPERE;
mmq_y = MMQ_Y_Q8_0_AMPERE;
nwarps = NWARPS_Q8_0_AMPERE;
} else if (compute_capability >= MIN_CC_DP4A) {
mmq_x = MMQ_X_Q8_0_PASCAL;
mmq_y = MMQ_Y_Q8_0_PASCAL;
nwarps = NWARPS_Q8_0_PASCAL;
} else {
GGML_ASSERT(false);
}
const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
const dim3 block_nums(block_num_x, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, nwarps, 1);
if (nrows_x % mmq_y == 0) {
const bool need_check = false;
hipLaunchKernelGGL(( mul_mat_q8_0<need_check>), dim3(block_nums), dim3(block_dims), 0, stream,
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
} else {
const bool need_check = true;
hipLaunchKernelGGL(( mul_mat_q8_0<need_check>), dim3(block_nums), dim3(block_dims), 0, stream,
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
static void ggml_mul_mat_q2_K_q8_1_cuda(
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
const int ncols_y, const int nrows_y, const int nrows_dst, hipStream_t stream) {
int id;
CUDA_CHECK(hipGetDevice(&id));
const int compute_capability = g_compute_capabilities[id];
int mmq_x, mmq_y, nwarps;
if (compute_capability >= CC_TURING) {
mmq_x = MMQ_X_Q2_K_AMPERE;
mmq_y = MMQ_Y_Q2_K_AMPERE;
nwarps = NWARPS_Q2_K_AMPERE;
} else if (compute_capability >= MIN_CC_DP4A) {
mmq_x = MMQ_X_Q2_K_PASCAL;
mmq_y = MMQ_Y_Q2_K_PASCAL;
nwarps = NWARPS_Q2_K_PASCAL;
} else {
GGML_ASSERT(false);
}
const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
const dim3 block_nums(block_num_x, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, nwarps, 1);
if (nrows_x % mmq_y == 0) {
const bool need_check = false;
hipLaunchKernelGGL(( mul_mat_q2_K<need_check>), dim3(block_nums), dim3(block_dims), 0, stream,
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
} else {
const bool need_check = true;
hipLaunchKernelGGL(( mul_mat_q2_K<need_check>), dim3(block_nums), dim3(block_dims), 0, stream,
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
static void ggml_mul_mat_q3_K_q8_1_cuda(
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
const int ncols_y, const int nrows_y, const int nrows_dst, hipStream_t stream) {
#if QK_K == 256
int id;
CUDA_CHECK(hipGetDevice(&id));
const int compute_capability = g_compute_capabilities[id];
int mmq_x, mmq_y, nwarps;
if (compute_capability >= CC_TURING) {
mmq_x = MMQ_X_Q3_K_AMPERE;
mmq_y = MMQ_Y_Q3_K_AMPERE;
nwarps = NWARPS_Q3_K_AMPERE;
} else if (compute_capability >= MIN_CC_DP4A) {
mmq_x = MMQ_X_Q3_K_PASCAL;
mmq_y = MMQ_Y_Q3_K_PASCAL;
nwarps = NWARPS_Q3_K_PASCAL;
} else {
GGML_ASSERT(false);
}
const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
const dim3 block_nums(block_num_x, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, nwarps, 1);
if (nrows_x % mmq_y == 0) {
const bool need_check = false;
hipLaunchKernelGGL(( mul_mat_q3_K<need_check>), dim3(block_nums), dim3(block_dims), 0, stream,
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
} else {
const bool need_check = true;
hipLaunchKernelGGL(( mul_mat_q3_K<need_check>), dim3(block_nums), dim3(block_dims), 0, stream,
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
#endif
}
static void ggml_mul_mat_q4_K_q8_1_cuda(
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
const int ncols_y, const int nrows_y, const int nrows_dst, hipStream_t stream) {
int id;
CUDA_CHECK(hipGetDevice(&id));
const int compute_capability = g_compute_capabilities[id];
int mmq_x, mmq_y, nwarps;
if (compute_capability >= CC_TURING) {
mmq_x = MMQ_X_Q4_K_AMPERE;
mmq_y = MMQ_Y_Q4_K_AMPERE;
nwarps = NWARPS_Q4_K_AMPERE;
} else if (compute_capability >= MIN_CC_DP4A) {
mmq_x = MMQ_X_Q4_K_PASCAL;
mmq_y = MMQ_Y_Q4_K_PASCAL;
nwarps = NWARPS_Q4_K_PASCAL;
} else {
GGML_ASSERT(false);
}
const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
const dim3 block_nums(block_num_x, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, nwarps, 1);
if (nrows_x % mmq_y == 0) {
const bool need_check = false;
hipLaunchKernelGGL(( mul_mat_q4_K<need_check>), dim3(block_nums), dim3(block_dims), 0, stream,
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
} else {
const bool need_check = true;
hipLaunchKernelGGL(( mul_mat_q4_K<need_check>), dim3(block_nums), dim3(block_dims), 0, stream,
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
static void ggml_mul_mat_q5_K_q8_1_cuda(
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
const int ncols_y, const int nrows_y, const int nrows_dst, hipStream_t stream) {
int id;
CUDA_CHECK(hipGetDevice(&id));
const int compute_capability = g_compute_capabilities[id];
int mmq_x, mmq_y, nwarps;
if (compute_capability >= CC_TURING) {
mmq_x = MMQ_X_Q5_K_AMPERE;
mmq_y = MMQ_Y_Q5_K_AMPERE;
nwarps = NWARPS_Q5_K_AMPERE;
} else if (compute_capability >= MIN_CC_DP4A) {
mmq_x = MMQ_X_Q5_K_PASCAL;
mmq_y = MMQ_Y_Q5_K_PASCAL;
nwarps = NWARPS_Q5_K_PASCAL;
} else {
GGML_ASSERT(false);
}
const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
const dim3 block_nums(block_num_x, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, nwarps, 1);
if (nrows_x % mmq_y == 0) {
const bool need_check = false;
hipLaunchKernelGGL(( mul_mat_q5_K<need_check>), dim3(block_nums), dim3(block_dims), 0, stream,
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
} else {
const bool need_check = true;
hipLaunchKernelGGL(( mul_mat_q5_K<need_check>), dim3(block_nums), dim3(block_dims), 0, stream,
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
static void ggml_mul_mat_q6_K_q8_1_cuda(
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
const int ncols_y, const int nrows_y, const int nrows_dst, hipStream_t stream) {
int id;
CUDA_CHECK(hipGetDevice(&id));
const int compute_capability = g_compute_capabilities[id];
int mmq_x, mmq_y, nwarps;
if (compute_capability >= CC_TURING) {
mmq_x = MMQ_X_Q6_K_AMPERE;
mmq_y = MMQ_Y_Q6_K_AMPERE;
nwarps = NWARPS_Q6_K_AMPERE;
} else if (compute_capability >= MIN_CC_DP4A) {
mmq_x = MMQ_X_Q6_K_PASCAL;
mmq_y = MMQ_Y_Q6_K_PASCAL;
nwarps = NWARPS_Q6_K_PASCAL;
} else {
GGML_ASSERT(false);
}
const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
const dim3 block_nums(block_num_x, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, nwarps, 1);
if (nrows_x % mmq_y == 0) {
const bool need_check = false;
hipLaunchKernelGGL(( mul_mat_q6_K<need_check>), dim3(block_nums), dim3(block_dims), 0, stream,
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
} else {
const bool need_check = true;
hipLaunchKernelGGL(( mul_mat_q6_K<need_check>), dim3(block_nums), dim3(block_dims), 0, stream,
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
static void ggml_mul_mat_p021_f16_f32_cuda(
const void * vx, const float * y, float * dst, const int ncols_x, const int nrows_x,
const int nchannels_x, const int nchannels_y, hipStream_t stream) {
const dim3 block_nums(1, nrows_x, nchannels_y);
const dim3 block_dims(WARP_SIZE, 1, 1);
hipLaunchKernelGGL(( mul_mat_p021_f16_f32), dim3(block_nums), dim3(block_dims), 0, stream, vx, y, dst, ncols_x, nrows_x, nchannels_x, nchannels_y);
}
static void ggml_mul_mat_vec_nc_f16_f32_cuda(
const void * vx, const float * y, float * dst, const int ncols_x, const int nrows_x, const int row_stride_x,
const int nchannels_x, const int nchannels_y, const int channel_stride_x, hipStream_t stream) {
const dim3 block_nums(1, nrows_x, nchannels_y);
const dim3 block_dims(WARP_SIZE, 1, 1);
hipLaunchKernelGGL(( mul_mat_vec_nc_f16_f32), dim3(block_nums), dim3(block_dims), 0, stream,
vx, y, dst, ncols_x, nrows_x, row_stride_x, channel_stride_x, nchannels_y/nchannels_x);
}
static void ggml_cpy_f32_f32_cuda(
const char * cx, char * cdst, const int ne,
const int ne00, const int ne01, const int nb00, const int nb01, const int nb02,
const int ne10, const int ne11, const int nb10, const int nb11, const int nb12, hipStream_t stream) {
const int num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1) / CUDA_CPY_BLOCK_SIZE;
hipLaunchKernelGGL(( cpy_f32_f16<cpy_1_f32_f32>), dim3(num_blocks), dim3(CUDA_CPY_BLOCK_SIZE), 0, stream,
cx, cdst, ne, ne00, ne01, nb00, nb01, nb02, ne10, ne11, nb10, nb11, nb12);
}
static void ggml_cpy_f32_f16_cuda(
const char * cx, char * cdst, const int ne,
const int ne00, const int ne01, const int nb00, const int nb01, const int nb02,
const int ne10, const int ne11, const int nb10, const int nb11, const int nb12, hipStream_t stream) {
const int num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1) / CUDA_CPY_BLOCK_SIZE;
hipLaunchKernelGGL(( cpy_f32_f16<cpy_1_f32_f16>), dim3(num_blocks), dim3(CUDA_CPY_BLOCK_SIZE), 0, stream,
cx, cdst, ne, ne00, ne01, nb00, nb01, nb02, ne10, ne11, nb10, nb11, nb12);
}
static void scale_f32_cuda(const float * x, float * dst, const float scale, const int k, hipStream_t stream) {
const int num_blocks = (k + CUDA_SCALE_BLOCK_SIZE - 1) / CUDA_SCALE_BLOCK_SIZE;
hipLaunchKernelGGL(( scale_f32), dim3(num_blocks), dim3(CUDA_SCALE_BLOCK_SIZE), 0, stream, x, dst, scale, k);
}
static void rope_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, const float p0,
const float p_delta, const int p_delta_rows, const float theta_scale, hipStream_t stream) {
GGML_ASSERT(ncols % 2 == 0);
const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1);
const int num_blocks_x = (ncols + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE);
const dim3 block_nums(nrows, num_blocks_x, 1);
hipLaunchKernelGGL(( rope_f32), dim3(block_nums), dim3(block_dims), 0, stream, x, dst, ncols, p0, p_delta, p_delta_rows, theta_scale);
}
static void rope_neox_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, const float p0,
const float p_delta, const int p_delta_rows, const float theta_scale, hipStream_t stream) {
GGML_ASSERT(ncols % 2 == 0);
const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1);
const int num_blocks_x = (ncols + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE);
const dim3 block_nums(nrows, num_blocks_x, 1);
hipLaunchKernelGGL(( rope_neox_f32), dim3(block_nums), dim3(block_dims), 0, stream, x, dst, ncols, p0, p_delta, p_delta_rows, theta_scale);
}
static void rope_glm_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, const float p, const float block_p, const float theta_scale, hipStream_t stream) {
GGML_ASSERT(nrows % 4 == 0);
const dim3 block_dims(4*CUDA_ROPE_BLOCK_SIZE, 1, 1);
const int num_blocks_x = (ncols + 4*CUDA_ROPE_BLOCK_SIZE - 1) / (4*CUDA_ROPE_BLOCK_SIZE);
const dim3 block_nums(num_blocks_x, nrows, 1);
hipLaunchKernelGGL(( rope_glm_f32), dim3(block_nums), dim3(block_dims), 0, stream, x, dst, ncols, p, block_p, theta_scale);
}
static void alibi_f32_cuda(const float * x, float * dst, const int ncols, const int nrows,
const int k_rows, const int n_heads_log2_floor, const float m0,
const float m1, hipStream_t stream) {
const dim3 block_dims(CUDA_ALIBI_BLOCK_SIZE, 1, 1);
const int num_blocks_x = (ncols + CUDA_ALIBI_BLOCK_SIZE - 1) / (CUDA_ALIBI_BLOCK_SIZE);
const dim3 block_nums(num_blocks_x, nrows, 1);
hipLaunchKernelGGL(( alibi_f32), dim3(block_nums), dim3(block_dims), 0, stream, x, dst, ncols, k_rows, n_heads_log2_floor, m0, m1);
}
static void diag_mask_inf_f32_cuda(const float * x, float * dst, const int ncols_x, const int nrows_x, const int rows_per_channel, const int n_past, hipStream_t stream) {
const dim3 block_dims(1, CUDA_DIAG_MASK_INF_BLOCK_SIZE, 1);
const int block_num_x = (ncols_x + CUDA_DIAG_MASK_INF_BLOCK_SIZE - 1) / CUDA_DIAG_MASK_INF_BLOCK_SIZE;
const dim3 block_nums(nrows_x, block_num_x, 1);
hipLaunchKernelGGL(( diag_mask_inf_f32), dim3(block_nums), dim3(block_dims), 0, stream, x, dst, ncols_x, rows_per_channel, n_past);
}
static void soft_max_f32_cuda(const float * x, float * dst, const int ncols_x, const int nrows_x, hipStream_t stream) {
const dim3 block_dims(1, WARP_SIZE, 1);
const dim3 block_nums(nrows_x, 1, 1);
hipLaunchKernelGGL(( soft_max_f32), dim3(block_nums), dim3(block_dims), 0, stream, x, dst, ncols_x);
}
// buffer pool for cuda
#define MAX_CUDA_BUFFERS 256
struct scoped_spin_lock {
std::atomic_flag& lock;
scoped_spin_lock(std::atomic_flag& lock) : lock(lock) {
while (lock.test_and_set(std::memory_order_acquire)) {
; // spin
}
}
~scoped_spin_lock() {
lock.clear(std::memory_order_release);
}
scoped_spin_lock(const scoped_spin_lock&) = delete;
scoped_spin_lock& operator=(const scoped_spin_lock&) = delete;
};
struct cuda_buffer {
void * ptr = nullptr;
size_t size = 0;
};
static cuda_buffer g_cuda_buffer_pool[GGML_CUDA_MAX_DEVICES][MAX_CUDA_BUFFERS];
static std::atomic_flag g_cuda_pool_lock = ATOMIC_FLAG_INIT;
static void * ggml_cuda_pool_malloc(size_t size, size_t * actual_size) {
scoped_spin_lock lock(g_cuda_pool_lock);
int id;
CUDA_CHECK(hipGetDevice(&id));
#ifdef DEBUG_CUDA_MALLOC
int nnz = 0;
size_t max_size = 0, tot_size = 0;
#endif
size_t best_diff = 1ull << 36;
int ibest = -1;
for (int i = 0; i < MAX_CUDA_BUFFERS; ++i) {
cuda_buffer& b = g_cuda_buffer_pool[id][i];
if (b.ptr != nullptr) {
#ifdef DEBUG_CUDA_MALLOC
++nnz;
tot_size += b.size;
if (b.size > max_size) max_size = b.size;
#endif
if (b.size >= size) {
size_t diff = b.size - size;
if (diff < best_diff) {
best_diff = diff;
ibest = i;
if (!best_diff) {
void * ptr = b.ptr;
*actual_size = b.size;
b.ptr = nullptr;
b.size = 0;
return ptr;
}
}
}
}
}
if (ibest >= 0) {
cuda_buffer& b = g_cuda_buffer_pool[id][ibest];
void * ptr = b.ptr;
*actual_size = b.size;
b.ptr = nullptr;
b.size = 0;
return ptr;
}
#ifdef DEBUG_CUDA_MALLOC
fprintf(stderr, "%s: %d buffers, max_size = %u MB, tot_size = %u MB, requested %u MB\n", __func__, nnz,
(uint32_t)(max_size/1024/1024), (uint32_t)(tot_size/1024/1024), (uint32_t)(size/1024/1024));
#endif
void * ptr;
size_t look_ahead_size = (size_t) (1.05 * size);
look_ahead_size = 256 * ((look_ahead_size + 255)/256);
CUDA_CHECK(hipMalloc((void **) &ptr, look_ahead_size));
*actual_size = look_ahead_size;
return ptr;
}
static void ggml_cuda_pool_free(void * ptr, size_t size) {
scoped_spin_lock lock(g_cuda_pool_lock);
int id;
CUDA_CHECK(hipGetDevice(&id));
for (int i = 0; i < MAX_CUDA_BUFFERS; ++i) {
cuda_buffer& b = g_cuda_buffer_pool[id][i];
if (b.ptr == nullptr) {
b.ptr = ptr;
b.size = size;
return;
}
}
fprintf(stderr, "WARNING: cuda buffer pool full, increase MAX_CUDA_BUFFERS\n");
CUDA_CHECK(hipFree(ptr));
}
void ggml_init_cublas() {
static bool initialized = false;
if (!initialized) {
#ifdef __HIP_PLATFORM_AMD__
// Workaround for a rocBLAS bug when using multiple graphics cards:
// https://github.com/ROCmSoftwarePlatform/rocBLAS/issues/1346
rocblas_initialize();
CUDA_CHECK(hipDeviceSynchronize());
#endif
CUDA_CHECK(hipGetDeviceCount(&g_device_count));
GGML_ASSERT(g_device_count <= GGML_CUDA_MAX_DEVICES);
int64_t total_vram = 0;
fprintf(stderr, "%s: found %d " GGML_CUDA_NAME " devices:\n", __func__, g_device_count);
for (int id = 0; id < g_device_count; ++id) {
hipDeviceProp_t prop;
CUDA_CHECK(hipGetDeviceProperties(&prop, id));
fprintf(stderr, " Device %d: %s, compute capability %d.%d\n", id, prop.name, prop.major, prop.minor);
g_tensor_split[id] = total_vram;
total_vram += prop.totalGlobalMem;
g_compute_capabilities[id] = 100*prop.major + 10*prop.minor;
}
for (int id = 0; id < g_device_count; ++id) {
g_tensor_split[id] /= total_vram;
}
for (int id = 0; id < g_device_count; ++id) {
CUDA_CHECK(hipSetDevice(id));
// create main stream
CUDA_CHECK(hipStreamCreateWithFlags(&g_cudaStreams_main[id], hipStreamNonBlocking));
// create cublas handle
CUBLAS_CHECK(hipblasCreate(&g_cublas_handles[id]));
CUBLAS_CHECK(cublasSetMathMode(g_cublas_handles[id], CUBLAS_TF32_TENSOR_OP_MATH));
}
// configure logging to stdout
// CUBLAS_CHECK(cublasLoggerConfigure(1, 1, 0, nullptr));
initialized = true;
}
}
void ggml_cuda_set_tensor_split(const float * tensor_split) {
if (tensor_split == nullptr) {
return;
}
bool all_zero = true;
for (int i = 0; i < g_device_count; ++i) {
if (tensor_split[i] != 0.0f) {
all_zero = false;
break;
}
}
if (all_zero) {
return;
}
float split_sum = 0.0f;
for (int i = 0; i < g_device_count; ++i) {
g_tensor_split[i] = split_sum;
split_sum += tensor_split[i];
}
for (int i = 0; i < g_device_count; ++i) {
g_tensor_split[i] /= split_sum;
}
}
void * ggml_cuda_host_malloc(size_t size) {
if (getenv("GGML_CUDA_NO_PINNED") != nullptr) {
return nullptr;
}
void * ptr = nullptr;
hipError_t err = hipHostMalloc((void **) &ptr, size);
if (err != hipSuccess) {
// The allocation error can be bypassed. A null ptr will assigned out of this function.
// This can fixed the OOM error in WSL.
hipGetLastError();
fprintf(stderr, "WARNING: failed to allocate %.2f MB of pinned memory: %s\n",
size/1024.0/1024.0, hipGetErrorString(err));
return nullptr;
}
return ptr;
}
void ggml_cuda_host_free(void * ptr) {
CUDA_CHECK(hipHostFree(ptr));
}
static hipError_t ggml_cuda_cpy_tensor_2d(
void * dst, const struct ggml_tensor * src, int64_t i3, int64_t i2, int64_t i1_low, int64_t i1_high, hipStream_t stream) {
hipMemcpyKind kind;
char * src_ptr;
if (src->backend == GGML_BACKEND_CPU) {
kind = hipMemcpyHostToDevice;
src_ptr = (char *) src->data;
} else if (src->backend == GGML_BACKEND_GPU) {
kind = hipMemcpyDeviceToDevice;
struct ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) src->extra;
int id;
CUDA_CHECK(hipGetDevice(&id));
src_ptr = (char *) extra->data_device[id];
} else {
GGML_ASSERT(false);
}
char * dst_ptr = (char *) dst;
const int64_t ne0 = src->ne[0];
const int64_t nb0 = src->nb[0];
const int64_t nb1 = src->nb[1];
const int64_t nb2 = src->nb[2];
const int64_t nb3 = src->nb[3];
const enum ggml_type type = src->type;
const int64_t ts = ggml_type_size(type);
const int64_t bs = ggml_blck_size(type);
int64_t i1_diff = i1_high - i1_low;
const char * x = src_ptr + i1_low*nb1 + i2*nb2 + i3*nb3;
if (nb0 == ts && nb1 == ts*ne0/bs) {
return hipMemcpyAsync(dst_ptr, x, i1_diff*nb1, kind, stream);
} else if (nb0 == ts) {
return hipMemcpy2DAsync(dst_ptr, ts*ne0/bs, x, nb1, ts*ne0/bs, i1_diff, kind, stream);
} else {
for (int64_t i1 = 0; i1 < i1_diff; i1++) {
const void * rx = (const void *) ((const char *) x + i1*nb1);
void * rd = (void *) (dst_ptr + i1*ts*ne0/bs);
// pretend the row is a matrix with cols=1
hipError_t r = hipMemcpy2DAsync(rd, ts/bs, rx, nb0, ts/bs, ne0, kind, stream);
if (r != hipSuccess) return r;
}
return hipSuccess;
}
}
inline void ggml_cuda_op_add(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
hipStream_t & cudaStream_main){
GGML_ASSERT(src0_ddq_i != nullptr || src0_ddf_i != nullptr);
GGML_ASSERT(src1_ddf_i != nullptr);
GGML_ASSERT(dst_ddf_i != nullptr);
const int64_t ne00 = src0->ne[0];
const int64_t i01_diff = i01_high - i01_low;
const int64_t ne10 = src1->ne[0];
const int64_t ne11 = src1->ne[1];
// compute
if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
add_f32_cuda(src0_ddf_i, src1_ddf_i, dst_ddf_i, ne00*i01_diff, ne10*ne11, cudaStream_main);
} else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
add_f16_f32_f16_cuda((half *) src0_ddq_i, src1_ddf_i, (half *) dst_ddf_i, ne00*i01_diff, cudaStream_main);
} else {
GGML_ASSERT(false);
}
(void) src1;
(void) dst;
(void) src0_ddq_i;
(void) i02;
(void) i1;
}
inline void ggml_cuda_op_mul(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
hipStream_t & cudaStream_main){
GGML_ASSERT(src0_ddf_i != nullptr);
GGML_ASSERT(src1_ddf_i != nullptr);
GGML_ASSERT(dst_ddf_i != nullptr);
const int64_t ne00 = src0->ne[0];
const int64_t i01_diff = i01_high - i01_low;
const int64_t ne10 = src1->ne[0];
const int64_t ne11 = src1->ne[1];
mul_f32_cuda(src0_ddf_i, src1_ddf_i, dst_ddf_i, ne00*i01_diff, ne10*ne11, cudaStream_main);
(void) dst;
(void) src0_ddq_i;
(void) i02;
(void) i1;
}
inline void ggml_cuda_op_gelu(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
hipStream_t & cudaStream_main){
GGML_ASSERT(src0_ddf_i != nullptr);
GGML_ASSERT(dst_ddf_i != nullptr);
const int64_t ne00 = src0->ne[0];
const int64_t i01_diff = i01_high - i01_low;
// compute
gelu_f32_cuda(src0_ddf_i, dst_ddf_i, ne00*i01_diff, cudaStream_main);
(void) src1;
(void) dst;
(void) src0_ddq_i;
(void) src1_ddf_i;
(void) i02;
(void) i1;
}
inline void ggml_cuda_op_silu(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
hipStream_t & cudaStream_main){
GGML_ASSERT(src0_ddf_i != nullptr);
GGML_ASSERT(dst_ddf_i != nullptr);
const int64_t ne00 = src0->ne[0];
const int64_t i01_diff = i01_high - i01_low;
// compute
silu_f32_cuda(src0_ddf_i, dst_ddf_i, ne00*i01_diff, cudaStream_main);
(void) src1;
(void) dst;
(void) src0_ddq_i;
(void) src1_ddf_i;
(void) i02;
(void) i1;
}
inline void ggml_cuda_op_norm(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
hipStream_t & cudaStream_main){
GGML_ASSERT(src0_ddf_i != nullptr);
GGML_ASSERT(dst_ddf_i != nullptr);
const int64_t ne00 = src0->ne[0];
const int64_t i01_diff = i01_high - i01_low;
// compute
norm_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, cudaStream_main);
(void) src1;
(void) dst;
(void) src0_ddq_i;
(void) src1_ddf_i;
(void) i02;
(void) i1;
}
inline void ggml_cuda_op_rms_norm(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
hipStream_t & cudaStream_main){
GGML_ASSERT(src0_ddf_i != nullptr);
GGML_ASSERT(dst_ddf_i != nullptr);
const int64_t ne00 = src0->ne[0];
const int64_t i01_diff = i01_high - i01_low;
float eps;
memcpy(&eps, dst->op_params, sizeof(float));
// compute
rms_norm_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, eps, cudaStream_main);
(void) src1;
(void) dst;
(void) src0_ddq_i;
(void) src1_ddf_i;
(void) i02;
(void) i1;
}
inline void ggml_cuda_op_mul_mat_q(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
hipStream_t & cudaStream_main){
GGML_ASSERT(src0_ddq_i != nullptr);
GGML_ASSERT(src1_ddf_i != nullptr);
GGML_ASSERT(dst_ddf_i != nullptr);
const int64_t ne00 = src0->ne[0];
const int64_t ne10 = src1->ne[0];
const int64_t ne11 = src1->ne[1];
GGML_ASSERT(ne10 % QK8_1 == 0);
const int64_t ne0 = dst->ne[0];
const int64_t i01_diff = i01_high - i01_low;
int id;
CUDA_CHECK(hipGetDevice(&id));
// the main device has a larger memory buffer to hold the results from all GPUs
// nrows_dst == nrows of the matrix that the dequantize_mul_mat kernel writes into
const int64_t nrows_dst = dst->backend == GGML_BACKEND_GPU && id == g_main_device ? ne0 : i01_diff;
const int64_t padded_row_size = ne10 % MATRIX_ROW_PADDING == 0 ?
ne10 : ne10 - ne10 % MATRIX_ROW_PADDING + MATRIX_ROW_PADDING;
size_t as;
void * src1_q8_1 = ggml_cuda_pool_malloc(padded_row_size*ne11*sizeof(block_q8_1)/QK8_1, &as);
quantize_row_q8_1_cuda(src1_ddf_i, src1_q8_1, ne10, ne11, padded_row_size, cudaStream_main);
switch (src0->type) {
case GGML_TYPE_Q4_0:
ggml_mul_mat_q4_0_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, i01_diff, ne11, padded_row_size, nrows_dst, cudaStream_main);
break;
case GGML_TYPE_Q4_1:
ggml_mul_mat_q4_1_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, i01_diff, ne11, padded_row_size, nrows_dst, cudaStream_main);
break;
case GGML_TYPE_Q5_0:
ggml_mul_mat_q5_0_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, i01_diff, ne11, padded_row_size, nrows_dst, cudaStream_main);
break;
case GGML_TYPE_Q5_1:
ggml_mul_mat_q5_1_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, i01_diff, ne11, padded_row_size, nrows_dst, cudaStream_main);
break;
case GGML_TYPE_Q8_0:
ggml_mul_mat_q8_0_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, i01_diff, ne11, padded_row_size, nrows_dst, cudaStream_main);
break;
case GGML_TYPE_Q2_K:
ggml_mul_mat_q2_K_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, i01_diff, ne11, padded_row_size, nrows_dst, cudaStream_main);
break;
case GGML_TYPE_Q3_K:
ggml_mul_mat_q3_K_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, i01_diff, ne11, padded_row_size, nrows_dst, cudaStream_main);
break;
case GGML_TYPE_Q4_K:
ggml_mul_mat_q4_K_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, i01_diff, ne11, padded_row_size, nrows_dst, cudaStream_main);
break;
case GGML_TYPE_Q5_K:
ggml_mul_mat_q5_K_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, i01_diff, ne11, padded_row_size, nrows_dst, cudaStream_main);
break;
case GGML_TYPE_Q6_K:
ggml_mul_mat_q6_K_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, i01_diff, ne11, padded_row_size, nrows_dst, cudaStream_main);
break;
default:
GGML_ASSERT(false);
break;
}
ggml_cuda_pool_free(src1_q8_1, as);
(void) src1;
(void) dst;
(void) src0_ddf_i;
(void) i02;
(void) i1;
}
static int64_t get_row_rounding(ggml_type type) {
int max_compute_capability = INT_MIN;
for (int id = 0; id < g_device_count; ++id) {
if (max_compute_capability < g_compute_capabilities[id]
&& g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1] : 1.0f)) {
max_compute_capability = g_compute_capabilities[id];
}
}
switch(type) {
case GGML_TYPE_Q4_0:
case GGML_TYPE_Q4_1:
return max_compute_capability >= CC_TURING ? 128 : 64;
case GGML_TYPE_Q5_0:
case GGML_TYPE_Q5_1:
case GGML_TYPE_Q8_0:
return 64;
case GGML_TYPE_F16:
return 1;
case GGML_TYPE_Q2_K:
case GGML_TYPE_Q3_K:
case GGML_TYPE_Q4_K:
case GGML_TYPE_Q5_K:
return max_compute_capability >= CC_TURING ? 128 : 64;
case GGML_TYPE_Q6_K:
return 64;
default:
GGML_ASSERT(false);
}
}
inline void ggml_cuda_op_mul_mat_vec(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
hipStream_t & cudaStream_main){
GGML_ASSERT(src0_ddq_i != nullptr);
GGML_ASSERT(src1_ddf_i != nullptr);
GGML_ASSERT(dst_ddf_i != nullptr);
const int64_t ne00 = src0->ne[0];
const int64_t nrows = i01_high - i01_low;
#ifdef GGML_CUDA_FORCE_DMMV
const bool use_mul_mat_vec_q = false;
(void) g_compute_capabilities[0];
#else
int id;
CUDA_CHECK(hipGetDevice(&id));
bool mul_mat_vec_q_implemented =
src0->type == GGML_TYPE_Q4_0 ||
src0->type == GGML_TYPE_Q4_1 ||
src0->type == GGML_TYPE_Q5_0 ||
src0->type == GGML_TYPE_Q5_1 ||
src0->type == GGML_TYPE_Q8_0;
#if QK_K == 256
mul_mat_vec_q_implemented = mul_mat_vec_q_implemented ||
src0->type == GGML_TYPE_Q2_K ||
src0->type == GGML_TYPE_Q3_K ||
src0->type == GGML_TYPE_Q4_K ||
src0->type == GGML_TYPE_Q5_K ||
src0->type == GGML_TYPE_Q6_K;
#endif // QK_K == 256
const bool use_mul_mat_vec_q = g_compute_capabilities[id] >= MIN_CC_DP4A && mul_mat_vec_q_implemented;
#endif
if (use_mul_mat_vec_q) {
const int64_t padded_row_size = ne00 % MATRIX_ROW_PADDING == 0 ?
ne00 : ne00 - ne00 % MATRIX_ROW_PADDING + MATRIX_ROW_PADDING;
size_t as;
void * src1_q8_1 = ggml_cuda_pool_malloc(padded_row_size*sizeof(block_q8_1)/QK8_1, &as);
quantize_row_q8_1_cuda(src1_ddf_i, src1_q8_1, ne00, 1, padded_row_size, cudaStream_main);
switch (src0->type) {
case GGML_TYPE_Q4_0:
mul_mat_vec_q4_0_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q4_1:
mul_mat_vec_q4_1_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q5_0:
mul_mat_vec_q5_0_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q5_1:
mul_mat_vec_q5_1_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q8_0:
mul_mat_vec_q8_0_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q2_K:
mul_mat_vec_q2_K_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q3_K:
mul_mat_vec_q3_K_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q4_K:
mul_mat_vec_q4_K_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q5_K:
mul_mat_vec_q5_K_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q6_K:
mul_mat_vec_q6_K_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
default:
GGML_ASSERT(false);
break;
}
ggml_cuda_pool_free(src1_q8_1, as);
} else {
// on some GPUs it is faster to convert src1 to half and to use half precision intrinsics
#ifdef GGML_CUDA_F16
size_t ash;
dfloat * src1_dfloat = nullptr; // dfloat == half
bool src1_convert_f16 = src0->type == GGML_TYPE_Q4_0 || src0->type == GGML_TYPE_Q4_1 ||
src0->type == GGML_TYPE_Q5_0 || src0->type == GGML_TYPE_Q5_1 ||
src0->type == GGML_TYPE_Q8_0 || src0->type == GGML_TYPE_F16;
if (src1_convert_f16) {
src1_dfloat = (half *) ggml_cuda_pool_malloc(ne00*sizeof(half), &ash);
ggml_cpy_f32_f16_cuda((char *) src1_ddf_i, (char *) src1_dfloat, ne00,
ne00, 1, sizeof(float), 0, 0,
ne00, 1, sizeof(half), 0, 0, cudaStream_main);
}
#else
dfloat * src1_dfloat = src1_ddf_i; // dfloat == float, no conversion
#endif // GGML_CUDA_F16
switch (src0->type) {
case GGML_TYPE_Q4_0:
dequantize_mul_mat_vec_q4_0_cuda(src0_ddq_i, src1_dfloat, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q4_1:
dequantize_mul_mat_vec_q4_1_cuda(src0_ddq_i, src1_dfloat, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q5_0:
dequantize_mul_mat_vec_q5_0_cuda(src0_ddq_i, src1_dfloat, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q5_1:
dequantize_mul_mat_vec_q5_1_cuda(src0_ddq_i, src1_dfloat, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q8_0:
dequantize_mul_mat_vec_q8_0_cuda(src0_ddq_i, src1_dfloat, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q2_K:
dequantize_mul_mat_vec_q2_K_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q3_K:
dequantize_mul_mat_vec_q3_K_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q4_K:
dequantize_mul_mat_vec_q4_K_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q5_K:
dequantize_mul_mat_vec_q5_K_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q6_K:
dequantize_mul_mat_vec_q6_K_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_F16:
convert_mul_mat_vec_f16_cuda(src0_ddq_i, src1_dfloat, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
default:
GGML_ASSERT(false);
break;
}
#ifdef GGML_CUDA_F16
if (src1_convert_f16) {
ggml_cuda_pool_free(src1_dfloat, ash);
}
#endif // GGML_CUDA_F16
}
(void) src1;
(void) dst;
(void) src0_ddf_i;
(void) i02;
(void) i1;
}
inline void ggml_cuda_op_mul_mat_cublas(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
hipStream_t & cudaStream_main){
GGML_ASSERT(src0_ddf_i != nullptr);
GGML_ASSERT(src1_ddf_i != nullptr);
GGML_ASSERT(dst_ddf_i != nullptr);
const float alpha = 1.0f;
const float beta = 0.0f;
const int64_t ne00 = src0->ne[0];
const int64_t ne10 = src1->ne[0];
const int64_t ne11 = src1->ne[1];
const int64_t ne0 = dst->ne[0];
const int64_t i01_diff = i01_high - i01_low;
int id;
CUDA_CHECK(hipGetDevice(&id));
// the main device has a larger memory buffer to hold the results from all GPUs
// ldc == nrows of the matrix that cuBLAS writes into
int ldc = dst->backend == GGML_BACKEND_GPU && id == g_main_device ? ne0 : i01_diff;
CUBLAS_CHECK(hipblasSetStream(g_cublas_handles[id], cudaStream_main));
CUBLAS_CHECK(
hipblasSgemm(g_cublas_handles[id], HIPBLAS_OP_T, HIPBLAS_OP_N,
i01_diff, ne11, ne10,
&alpha, src0_ddf_i, ne00,
src1_ddf_i, ne10,
&beta, dst_ddf_i, ldc));
(void) dst;
(void) src0_ddq_i;
(void) i02;
(void) i1;
}
inline void ggml_cuda_op_rope(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
hipStream_t & cudaStream_main){
GGML_ASSERT(src0_ddf_i != nullptr);
GGML_ASSERT(dst_ddf_i != nullptr);
const int64_t ne00 = src0->ne[0];
const int64_t ne01 = src0->ne[1];
const int64_t i01_diff = i01_high - i01_low;
const int n_past = ((int32_t *) dst->op_params)[0];
const int n_dims = ((int32_t *) dst->op_params)[1];
const int mode = ((int32_t *) dst->op_params)[2];
const int n_ctx = ((int32_t *) dst->op_params)[3];
// RoPE alteration for extended context
float freq_base, freq_scale;
memcpy(&freq_base, (int32_t *) dst->op_params + 4, sizeof(float));
memcpy(&freq_scale, (int32_t *) dst->op_params + 5, sizeof(float));
const float theta_scale = powf(freq_base, -2.0f/n_dims);
const bool is_neox = mode & 2;
const bool is_glm = mode & 4;
// compute
if (is_glm) {
const float p = (((mode & 1) == 0 ? n_past + i02 : i02)) * freq_scale;
const float id_p = min(p, n_ctx - 2.f);
const float block_p = max(p - (n_ctx - 2.f), 0.f);
rope_glm_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, id_p, block_p, theta_scale, cudaStream_main);
} else if (is_neox) {
GGML_ASSERT(ne00 == n_dims && "ne00 != n_dims is not implemented for CUDA yet");
const float p0 = (((mode & 1) == 0 ? n_past : 0)) * freq_scale;
rope_neox_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, p0, freq_scale, ne01, theta_scale, cudaStream_main);
} else {
const float p0 = (((mode & 1) == 0 ? n_past : 0)) * freq_scale;
rope_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, p0, freq_scale, ne01, theta_scale, cudaStream_main);
}
(void) src1;
(void) dst;
(void) src0_ddq_i;
(void) src1_ddf_i;
(void) i1;
}
inline void ggml_cuda_op_alibi(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
hipStream_t & cudaStream_main){
GGML_ASSERT(src0_ddf_i != nullptr);
GGML_ASSERT(dst_ddf_i != nullptr);
const int64_t ne00 = src0->ne[0];
const int64_t ne01 = src0->ne[1];
const int64_t ne02 = src0->ne[2];
const int64_t i01_diff = i01_high - i01_low;
const int n_past = ((int32_t *) dst->op_params)[0];
const int n_head = ((int32_t *) dst->op_params)[1];
float max_bias;
memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
GGML_ASSERT(ne01 + n_past == ne00);
GGML_ASSERT(n_head == ne02);
const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
// compute
alibi_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, ne01, n_heads_log2_floor, m0, m1, cudaStream_main);
(void) src1;
(void) src0_ddq_i;
(void) src1_ddf_i;
(void) i02;
(void) i1;
}
inline void ggml_cuda_op_diag_mask_inf(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
hipStream_t & cudaStream_main){
GGML_ASSERT(src0_ddf_i != nullptr);
GGML_ASSERT(dst_ddf_i != nullptr);
const int64_t ne00 = src0->ne[0];
const int64_t ne01 = src0->ne[1];
const int64_t i01_diff = i01_high - i01_low;
const int n_past = ((int32_t *) dst->op_params)[0];
// compute
diag_mask_inf_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, ne01, n_past, cudaStream_main);
(void) src1;
(void) dst;
(void) src0_ddq_i;
(void) src1_ddf_i;
(void) i02;
(void) i1;
}
inline void ggml_cuda_op_soft_max(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
hipStream_t & cudaStream_main){
GGML_ASSERT(src0_ddf_i != nullptr);
GGML_ASSERT(dst_ddf_i != nullptr);
const int64_t ne00 = src0->ne[0];
const int64_t i01_diff = i01_high - i01_low;
// compute
soft_max_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, cudaStream_main);
(void) src1;
(void) dst;
(void) src0_ddq_i;
(void) src1_ddf_i;
(void) i02;
(void) i1;
}
inline void ggml_cuda_op_scale(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
hipStream_t & cudaStream_main){
GGML_ASSERT(src0_ddf_i != nullptr);
GGML_ASSERT(dst_ddf_i != nullptr);
const float scale = ((float *) src1->data)[0];
const int64_t ne00 = src0->ne[0];
const int64_t i01_diff = i01_high - i01_low;
// compute
scale_f32_cuda(src0_ddf_i, dst_ddf_i, scale, ne00*i01_diff, cudaStream_main);
CUDA_CHECK(hipGetLastError());
(void) src1;
(void) dst;
(void) src0_ddq_i;
(void) src1_ddf_i;
(void) i02;
(void) i1;
}
static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst,
ggml_cuda_op_t op, bool src0_needs_f32, bool flatten_rows) {
const int64_t ne00 = src0->ne[0];
const int64_t ne01 = src0->ne[1];
const int64_t ne02 = src0->ne[2];
const int64_t ne03 = src0->ne[3];
const int64_t nrows0 = ggml_nrows(src0);
const bool use_src1 = src1 != nullptr;
const int64_t ne10 = use_src1 ? src1->ne[0] : 1;
const int64_t ne11 = use_src1 ? src1->ne[1] : 1;
const int64_t ne12 = use_src1 ? src1->ne[2] : 1;
const int64_t ne13 = use_src1 ? src1->ne[3] : 1;
const int64_t nrows1 = use_src1 ? ggml_nrows(src1) : 1;
GGML_ASSERT(ne03 == ne13);
const int64_t ne0 = dst->ne[0];
const int64_t ne1 = dst->ne[1];
const int nb2 = dst->nb[2];
const int nb3 = dst->nb[3];
GGML_ASSERT(dst->backend != GGML_BACKEND_GPU_SPLIT);
GGML_ASSERT(!use_src1 || src1->backend != GGML_BACKEND_GPU_SPLIT);
// strides for iteration over dims 3 and 2
const int64_t num_iters_0 = ne02 >= ne12 ? ne02*ne03 : ne12*ne13;
const int64_t num_iters = flatten_rows ? 1 : num_iters_0;
const int64_t stride_mod = flatten_rows ? num_iters_0 : 1;
const int64_t src0_stride = ne00 * ne01 * stride_mod;
const int64_t src1_stride = ne10 * ne11 * stride_mod;
const int64_t dst_stride = ne0 * ne1 * stride_mod;
const int64_t rows_per_iter = flatten_rows ? nrows0 : ne01;
const int64_t i03_max = flatten_rows ? 1 : ne03;
const int64_t i02_max = flatten_rows ? 1 : (ne02 >= ne12 ? ne02 : ne12);
const int64_t i02_divisor = ne02 >= ne12 ? 1 : ne12 / ne02;
GGML_ASSERT(!(flatten_rows && ne02 < ne12));
const size_t src0_ts = ggml_type_size(src0->type);
const size_t src0_bs = ggml_blck_size(src0->type);
struct ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra;
struct ggml_tensor_extra_gpu * src1_extra = use_src1 ? (ggml_tensor_extra_gpu *) src1->extra : nullptr;
struct ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra;
const bool src0_on_device = src0->backend == GGML_BACKEND_GPU || src0->backend == GGML_BACKEND_GPU_SPLIT;
const bool src0_is_contiguous = ggml_is_contiguous(src0);
const bool src0_is_f32 = src0->type == GGML_TYPE_F32;
const bool src1_is_contiguous = use_src1 && ggml_is_contiguous(src1);
const bool src1_stays_on_host = use_src1 && (
dst->op == GGML_OP_SCALE || dst->op == GGML_OP_DIAG_MASK_INF || dst->op == GGML_OP_ROPE);
const bool split = src0->backend == GGML_BACKEND_GPU_SPLIT;
GGML_ASSERT(!(split && ne02 < ne12));
const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(src0->type);
// dd = data device
char * src0_ddq[GGML_CUDA_MAX_DEVICES] = {nullptr}; // quantized
float * src0_ddf[GGML_CUDA_MAX_DEVICES] = {nullptr}; // float
float * src1_ddf[GGML_CUDA_MAX_DEVICES] = {nullptr};
float * dst_ddf[GGML_CUDA_MAX_DEVICES] = {nullptr};
// asq = actual size quantized, asf = actual size float
size_t src0_asq[GGML_CUDA_MAX_DEVICES] = {0};
size_t src0_asf[GGML_CUDA_MAX_DEVICES] = {0};
size_t src1_asf[GGML_CUDA_MAX_DEVICES] = {0};
size_t dst_asf[GGML_CUDA_MAX_DEVICES] = {0};
// if multiple devices are used they need to wait for the main device
// here an event is recorded that signifies that the main device has finished calculating the input data
if (split && g_device_count > 1) {
CUDA_CHECK(hipSetDevice(g_main_device));
CUDA_CHECK(hipEventRecord(src0_extra->events[g_main_device], g_cudaStreams_main[g_main_device]));
}
for (int id = 0; id < g_device_count; ++id) {
if (!split && id != g_main_device) {
continue;
}
const bool src1_on_device = use_src1 && src1->backend == GGML_BACKEND_GPU && id == g_main_device;
const bool dst_on_device = dst->backend == GGML_BACKEND_GPU && id == g_main_device;
int64_t row_low, row_high;
if (split) {
const int64_t rounding = get_row_rounding(src0->type);
row_low = id == 0 ? 0 : nrows0*g_tensor_split[id];
row_low -= row_low % rounding;
if (id == g_device_count - 1) {
row_high = nrows0;
} else {
row_high = nrows0*g_tensor_split[id + 1];
row_high -= row_high % rounding;
}
} else {
row_low = 0;
row_high = nrows0*i02_divisor;
}
if (row_low == row_high) {
continue;
}
int64_t row_diff = row_high - row_low;
hipSetDevice(id);
hipStream_t cudaStream_main = g_cudaStreams_main[id];
// wait for main GPU data if necessary
if (split && id != g_main_device) {
CUDA_CHECK(hipStreamWaitEvent(cudaStream_main, src0_extra->events[g_main_device]));
}
if (src0_on_device && src0_is_contiguous) {
if (src0_is_f32) {
src0_ddf[id] = (float *) src0_extra->data_device[id];
} else {
src0_ddq[id] = (char *) src0_extra->data_device[id];
}
} else {
if (src0_is_f32) {
src0_ddf[id] = (float *) ggml_cuda_pool_malloc(row_diff*ne00 * sizeof(float), &src0_asf[id]);
} else {
src0_ddq[id] = (char *) ggml_cuda_pool_malloc(row_diff*ne00 * src0_ts/src0_bs, &src0_asq[id]);
}
}
if (src0_needs_f32 && !src0_is_f32) {
src0_ddf[id] = (float *) ggml_cuda_pool_malloc(row_diff*ne00 * sizeof(float), &src0_asf[id]);
}
if (use_src1 && !src1_stays_on_host) {
if (src1_on_device && src1_is_contiguous) {
src1_ddf[id] = (float *) src1_extra->data_device[id];
} else {
src1_ddf[id] = (float *) ggml_cuda_pool_malloc(num_iters*src1_stride * sizeof(float), &src1_asf[id]);
}
}
if (dst_on_device) {
dst_ddf[id] = (float *) dst_extra->data_device[id];
} else {
size_t size_dst_ddf = split ? row_diff*ne1 * sizeof(float) : num_iters*dst_stride * sizeof(float);
dst_ddf[id] = (float *) ggml_cuda_pool_malloc(size_dst_ddf, &dst_asf[id]);
}
for (int64_t i03 = 0; i03 < i03_max; i03++) {
const int64_t i13 = i03 % ne13;
for (int64_t i02 = 0; i02 < i02_max; i02++) {
const int64_t i12 = i02 % ne12;
const int64_t i0 = i03*i02_max + i02;
// i0 values that contain the lower/upper rows for a split tensor when using multiple GPUs
const int64_t i0_offset_low = row_low/rows_per_iter;
const int64_t i0_offset_high = row_high/rows_per_iter;
int64_t i01_low = 0;
int64_t i01_high = rows_per_iter;
if (split) {
if (i0 < i0_offset_low || i0 > i0_offset_high) {
continue;
}
if (i0 == i0_offset_low) {
i01_low = row_low % rows_per_iter;
}
if (i0 == i0_offset_high) {
i01_high = row_high % rows_per_iter;
}
}
// There is possibly a bug in the Windows nvcc compiler regarding instruction reordering or optimizing out local variables.
// Removing the first assert or changing the order of the arguments causes the second assert to fail.
// Removing both asserts results in i01_high becoming 0 which in turn results in garbage output.
// The root cause seems to be a problem with i0_offset_high becoming 0 when it should always be >0 (for single GPU).
GGML_ASSERT(i01_low == 0 || g_device_count > 1);
GGML_ASSERT(i01_high == rows_per_iter || g_device_count > 1);
const int64_t i01_diff = i01_high - i01_low;
if (i01_diff == 0) {
continue;
}
const int64_t i11 = i13*ne12 + i12;
// for split tensors the data begins at i0 == i0_offset_low
char * src0_ddq_i = src0_ddq[id] + (i0/i02_divisor - i0_offset_low)*src0_stride*src0_ts/src0_bs;
float * src0_ddf_i = src0_ddf[id] + (i0/i02_divisor - i0_offset_low)*src0_stride;
float * src1_ddf_i = src1_ddf[id] + i11*src1_stride;
float * dst_ddf_i = dst_ddf[id] + (i0 - i0_offset_low)*dst_stride;
// for split tensors the data pointer needs to be rounded down
// to the bin edge for i03, i02 bins beyond the first
if (i0 - i0_offset_low > 0) {
GGML_ASSERT(!flatten_rows);
src0_ddq_i -= (row_low % ne01)*ne00 * src0_ts/src0_bs;
src0_ddf_i -= (row_low % ne01)*ne00;
dst_ddf_i -= (row_low % ne0)*ne1;
}
// the main device memory buffer can be on VRAM scratch, with space for all partial results
// in that case an offset on dst_ddf_i is needed
if (dst->backend == GGML_BACKEND_GPU && id == g_main_device) {
dst_ddf_i += i01_low; // offset is 0 if no tensor split
}
// copy src0, src1 to device if necessary
if (use_src1 && !src1_stays_on_host) {
if (src1->backend == GGML_BACKEND_CPU) {
GGML_ASSERT(!flatten_rows || nrows0 == ggml_nrows(src1));
int64_t nrows1 = flatten_rows ? nrows0 : ne11;
CUDA_CHECK(ggml_cuda_cpy_tensor_2d(src1_ddf_i, src1, i03, i02, 0, nrows1, cudaStream_main));
} else if (src1->backend == GGML_BACKEND_GPU && src1_is_contiguous) {
if (id != g_main_device) {
GGML_ASSERT(!flatten_rows);
float * src1_ddf_i_source = (float *) src1_extra->data_device[g_main_device];
src1_ddf_i_source += i11*src1_stride;
CUDA_CHECK(hipMemcpyAsync(src1_ddf_i, src1_ddf_i_source, src1_stride*sizeof(float),
hipMemcpyDeviceToDevice, cudaStream_main));
}
} else if (src1_on_device && !src1_is_contiguous) {
GGML_ASSERT(!split);
CUDA_CHECK(ggml_cuda_cpy_tensor_2d(src1_ddf_i, src1, i03, i02, 0, ne11, cudaStream_main));
} else {
GGML_ASSERT(false);
}
}
if ((!src0_on_device || !src0_is_contiguous) && i02 % i02_divisor == 0) {
if (src0_is_f32) {
CUDA_CHECK(ggml_cuda_cpy_tensor_2d(src0_ddf_i, src0, i03, i02/i02_divisor, i01_low, i01_high, cudaStream_main));
} else {
CUDA_CHECK(ggml_cuda_cpy_tensor_2d(src0_ddq_i, src0, i03, i02/i02_divisor, i01_low, i01_high, cudaStream_main));
}
}
// convert src0 to f32 if it is necessary for the ggml_cuda_op
if (src0_needs_f32 && !src0_is_f32) {
to_fp32_cuda(src0_ddq_i, src0_ddf_i, i01_diff*ne00, cudaStream_main);
CUDA_CHECK(hipGetLastError());
}
// do the computation
op(src0, src1, dst, src0_ddq_i, src0_ddf_i, src1_ddf_i, dst_ddf_i, i02, i01_low, i01_high, i11, cudaStream_main);
CUDA_CHECK(hipGetLastError());
// copy dst to host or other device if necessary
if (!dst_on_device) {
void * dst_off_device;
hipMemcpyKind kind;
if (dst->backend == GGML_BACKEND_CPU) {
dst_off_device = dst->data;
kind = hipMemcpyDeviceToHost;
} else if (dst->backend == GGML_BACKEND_GPU) {
dst_off_device = dst_extra->data_device[g_main_device];
kind = hipMemcpyDeviceToDevice;
} else {
GGML_ASSERT(false);
}
if (split) {
// src0 = weight matrix is saved as a transposed matrix for better memory layout.
// dst is NOT transposed.
// The outputs of matrix matrix multiplications can therefore NOT simply be concatenated for >1 GPU.
// Instead they need to be copied to the correct slice in ne0 = dst row index.
// If dst is a vector with ne0 == 1 then you don't have to do this but it still produces correct results.
float * dhf_dst_i = (float *) ((char *) dst_off_device + i01_low*sizeof(float) + i02*nb2 + i03*nb3);
CUDA_CHECK(hipMemcpy2DAsync(dhf_dst_i, ne0*sizeof(float), dst_ddf_i, i01_diff*sizeof(float),
i01_diff*sizeof(float), ne1, kind, cudaStream_main));
} else {
float * dhf_dst_i = (float *) ((char *) dst_off_device + i02*nb2 + i03*nb3);
CUDA_CHECK(hipMemcpyAsync(dhf_dst_i, dst_ddf_i, dst_stride*sizeof(float), kind, cudaStream_main));
}
}
// signify to main device that other device is done
if (split && g_device_count > 1 && id != g_main_device) {
CUDA_CHECK(hipEventRecord(src0_extra->events[id], cudaStream_main));
}
}
}
}
// wait until each device is finished, then free their buffers
for (int id = 0; id < g_device_count; ++id) {
if (src0_asq[id] == 0 && src0_asf[id] == 0 && src1_asf[id] == 0 && dst_asf[id] == 0) {
continue;
}
CUDA_CHECK(hipSetDevice(id));
if (src0_asq[id] > 0) {
ggml_cuda_pool_free(src0_ddq[id], src0_asq[id]);
}
if (src0_asf[id] > 0) {
ggml_cuda_pool_free(src0_ddf[id], src0_asf[id]);
}
if (src1_asf[id] > 0) {
ggml_cuda_pool_free(src1_ddf[id], src1_asf[id]);
}
if (dst_asf[id] > 0) {
ggml_cuda_pool_free(dst_ddf[id], dst_asf[id]);
}
}
// main device waits for all other devices to be finished
if (split && g_device_count > 1) {
CUDA_CHECK(hipSetDevice(g_main_device));
for (int id = 0; id < g_device_count; ++id) {
if (id != g_main_device && src0_extra->events[id]) {
CUDA_CHECK(hipStreamWaitEvent(g_cudaStreams_main[g_main_device], src0_extra->events[id]));
}
}
}
if (dst->backend == GGML_BACKEND_CPU) {
CUDA_CHECK(hipSetDevice(g_main_device));
CUDA_CHECK(hipDeviceSynchronize());
}
}
void ggml_cuda_add(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
// ggml_cuda_add permits f16 dst even though this could in theory cause problems with the pointer arithmetic in ggml_cuda_op.
// Due to flatten_rows == true this does in practice not make a difference however.
// Better solution would be nice but right now that would require disproportionate changes.
GGML_ASSERT(
(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16) &&
src1->type == GGML_TYPE_F32 &&
(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16));
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_add, false, true);
}
void ggml_cuda_mul(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
GGML_ASSERT(src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul, true, false); // TODO ggml_cuda_op needs modification for flatten
}
void ggml_cuda_gelu(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_gelu, true, true);
}
void ggml_cuda_silu(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_silu, true, true);
}
void ggml_cuda_norm(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_norm, true, true);
}
void ggml_cuda_rms_norm(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_rms_norm, true, true);
}
bool ggml_cuda_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst) {
const int64_t ne10 = src1->ne[0];
const int64_t ne0 = dst->ne[0];
const int64_t ne1 = dst->ne[1];
// TODO: find the optimal values for these
if ((src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) &&
src1->type == GGML_TYPE_F32 &&
dst->type == GGML_TYPE_F32 &&
(ne0 >= 32 && ne1 >= 32 && ne10 >= 32)) {
return true;
}
return false;
}
void ggml_cuda_mul_mat_vec_p021(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst){
GGML_ASSERT(ggml_is_permuted(src0) && ggml_is_permuted(src1));
GGML_ASSERT(src0->backend != GGML_BACKEND_GPU_SPLIT);
GGML_ASSERT(src0->nb[0] <= src0->nb[1] && src0->nb[2] <= src0->nb[3]); // 0213 permutation
GGML_ASSERT(src1->nb[0] <= src1->nb[1] && src1->nb[2] <= src1->nb[3]); // 0213 permutation
GGML_ASSERT(src0->type == GGML_TYPE_F16);
GGML_ASSERT(src1->type == GGML_TYPE_F32);
const int64_t ne00 = src0->ne[0];
const int64_t ne01 = src0->ne[1];
const int64_t ne02 = src0->ne[2];
const int64_t ne12 = src1->ne[2];
CUDA_CHECK(hipSetDevice(g_main_device));
hipStream_t cudaStream_main = g_cudaStreams_main[g_main_device];
struct ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra;
void * src0_ddq = src0_extra->data_device[g_main_device];
struct ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu *) src1->extra;
float * src1_ddf = (float *) src1_extra->data_device[g_main_device];
struct ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra;
float * dst_ddf = (float *) dst_extra->data_device[g_main_device];
ggml_mul_mat_p021_f16_f32_cuda(src0_ddq, src1_ddf, dst_ddf, ne00, ne01, ne02, ne12, cudaStream_main);
}
void ggml_cuda_mul_mat_vec_nc(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst){
GGML_ASSERT(!ggml_is_contiguous(src0) && ggml_is_contiguous(src1));
GGML_ASSERT(!ggml_is_permuted(src0));
GGML_ASSERT(src0->backend != GGML_BACKEND_GPU_SPLIT);
GGML_ASSERT(src0->type == GGML_TYPE_F16);
GGML_ASSERT(src1->type == GGML_TYPE_F32);
const int64_t ne00 = src0->ne[0];
const int64_t ne01 = src0->ne[1];
const int64_t ne02 = src0->ne[2];
const int64_t ne12 = src1->ne[2];
const int64_t nb01 = src0->nb[1];
const int64_t nb02 = src0->nb[2];
CUDA_CHECK(hipSetDevice(g_main_device));
hipStream_t cudaStream_main = g_cudaStreams_main[g_main_device];
struct ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra;
void * src0_ddq = src0_extra->data_device[g_main_device];
struct ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu *) src1->extra;
float * src1_ddf = (float *) src1_extra->data_device[g_main_device];
struct ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra;
float * dst_ddf = (float *) dst_extra->data_device[g_main_device];
const int row_stride_x = nb01 / sizeof(half);
const int channel_stride_x = nb02 / sizeof(half);
ggml_mul_mat_vec_nc_f16_f32_cuda(src0_ddq, src1_ddf, dst_ddf, ne00, ne01, row_stride_x, ne02, ne12, channel_stride_x, cudaStream_main);
}
void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
bool all_on_device = (src0->backend == GGML_BACKEND_GPU || src0->backend == GGML_BACKEND_GPU_SPLIT) &&
src1->backend == GGML_BACKEND_GPU && dst->backend == GGML_BACKEND_GPU;
if (all_on_device && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) {
ggml_cuda_mul_mat_vec_p021(src0, src1, dst);
} else if (all_on_device && !ggml_is_contiguous(src0) && ggml_is_contiguous(src1) && src1->ne[1] == 1) {
ggml_cuda_mul_mat_vec_nc(src0, src1, dst);
}else if (src0->type == GGML_TYPE_F32) {
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul_mat_cublas, true, false);
} else if (ggml_is_quantized(src0->type) || src0->type == GGML_TYPE_F16) {
if (src1->ne[1] == 1 && src0->ne[0] % GGML_CUDA_DMMV_X == 0) {
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul_mat_vec, false, false);
} else {
int min_compute_capability = INT_MAX;
for (int id = 0; id < g_device_count; ++id) {
if (min_compute_capability > g_compute_capabilities[id]
&& g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1] : 1.0f)) {
min_compute_capability = g_compute_capabilities[id];
}
}
if (g_mul_mat_q && ggml_is_quantized(src0->type) && min_compute_capability >= MIN_CC_DP4A) {
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul_mat_q, false, false);
} else {
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul_mat_cublas, true, false);
}
}
} else {
GGML_ASSERT(false);
}
}
void ggml_cuda_scale(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_scale, true, true);
}
void ggml_cuda_cpy(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
const int64_t ne = ggml_nelements(src0);
GGML_ASSERT(ne == ggml_nelements(src1));
GGML_ASSERT(src0->backend == GGML_BACKEND_GPU);
GGML_ASSERT(src1->backend == GGML_BACKEND_GPU);
GGML_ASSERT(ggml_nbytes(src0) <= INT_MAX);
GGML_ASSERT(ggml_nbytes(src1) <= INT_MAX);
const int64_t ne00 = src0->ne[0];
const int64_t ne01 = src0->ne[1];
GGML_ASSERT(src0->ne[3] == 1);
const int64_t nb00 = src0->nb[0];
const int64_t nb01 = src0->nb[1];
const int64_t nb02 = src0->nb[2];
const int64_t ne10 = src1->ne[0];
const int64_t ne11 = src1->ne[1];
GGML_ASSERT(src1->ne[3] == 1);
const int64_t nb10 = src1->nb[0];
const int64_t nb11 = src1->nb[1];
const int64_t nb12 = src1->nb[2];
CUDA_CHECK(hipSetDevice(g_main_device));
hipStream_t cudaStream_main = g_cudaStreams_main[g_main_device];
const struct ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra;
const struct ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu *) src1->extra;
char * src0_ddc = (char *) src0_extra->data_device[g_main_device];
char * src1_ddc = (char *) src1_extra->data_device[g_main_device];
if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32) {
ggml_cpy_f32_f32_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, nb00, nb01, nb02,
ne10, ne11, nb10, nb11, nb12, cudaStream_main);
} else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F16) {
ggml_cpy_f32_f16_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, nb00, nb01, nb02,
ne10, ne11, nb10, nb11, nb12, cudaStream_main);
} else {
GGML_ASSERT(false);
}
(void) dst;
}
void ggml_cuda_dup(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
ggml_cuda_cpy(src0, dst, nullptr);
(void) src1;
}
void ggml_cuda_diag_mask_inf(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_diag_mask_inf, true, true);
}
void ggml_cuda_soft_max(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_soft_max, true, true);
}
void ggml_cuda_rope(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
GGML_ASSERT(ggml_is_contiguous(src0)); // TODO: this restriction is temporary until non-cont support is implemented
const int mode = ((int32_t *) dst->op_params)[2];
const bool is_glm = mode & 4;
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_rope, true, !is_glm); // flatten support not implemented for glm
}
void ggml_cuda_alibi(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_alibi, true, true);
}
void ggml_cuda_nop(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
(void) src0;
(void) src1;
(void) dst;
}
void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor) {
int nrows = ggml_nrows(tensor);
const int64_t ne0 = tensor->ne[0];
const size_t nb1 = tensor->nb[1];
ggml_backend backend = tensor->backend;
struct ggml_tensor_extra_gpu * extra = new struct ggml_tensor_extra_gpu;
memset(extra, 0, sizeof(*extra));
for (int id = 0; id < g_device_count; ++id) {
if (backend == GGML_BACKEND_GPU && id != g_main_device) {
continue;
}
hipSetDevice(id);
int row_low, row_high;
if (backend == GGML_BACKEND_GPU) {
row_low = 0;
row_high = nrows;
} else if (backend == GGML_BACKEND_GPU_SPLIT) {
const int64_t rounding = get_row_rounding(tensor->type);
row_low = id == 0 ? 0 : nrows*g_tensor_split[id];
row_low -= row_low % rounding;
if (id == g_device_count - 1) {
row_high = nrows;
} else {
row_high = nrows*g_tensor_split[id + 1];
row_high -= row_high % rounding;
}
} else {
GGML_ASSERT(false);
}
if (row_low == row_high) {
continue;
}
int64_t nrows_split = row_high - row_low;
const size_t offset_split = row_low*nb1;
size_t size = ggml_nbytes_split(tensor, nrows_split);
const size_t original_size = size;
// pad last row to a multiple of 512 elements to avoid out-of-bounds memory accesses
if (ne0 % MATRIX_ROW_PADDING != 0) {
size += (MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING)
* ggml_type_size(tensor->type)/ggml_blck_size(tensor->type);
}
char * buf;
CUDA_CHECK(hipMalloc(&buf, size));
char * buf_host = (char*)data + offset_split;
// set padding to 0 to avoid possible NaN values
if (size > original_size) {
CUDA_CHECK(hipMemset(buf + original_size, 0, size - original_size));
}
CUDA_CHECK(hipMemcpy(buf, buf_host, original_size, hipMemcpyHostToDevice));
extra->data_device[id] = buf;
if (backend == GGML_BACKEND_GPU_SPLIT) {
CUDA_CHECK(hipEventCreateWithFlags(&extra->events[id], hipEventDisableTiming));
}
}
tensor->extra = extra;
}
void ggml_cuda_free_data(struct ggml_tensor * tensor) {
if (!tensor || (tensor->backend != GGML_BACKEND_GPU && tensor->backend != GGML_BACKEND_GPU_SPLIT) ) {
return;
}
ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
for (int id = 0; id < g_device_count; ++id) {
if (extra->data_device[id] != nullptr) {
CUDA_CHECK(hipSetDevice(id));
CUDA_CHECK(hipFree(extra->data_device[id]));
}
if (extra->events[id] != nullptr) {
CUDA_CHECK(hipSetDevice(id));
CUDA_CHECK(hipEventDestroy(extra->events[id]));
}
}
delete extra;
}
static struct ggml_tensor_extra_gpu * g_temp_tensor_extras = nullptr;
static size_t g_temp_tensor_extra_index = 0;
static struct ggml_tensor_extra_gpu * ggml_cuda_alloc_temp_tensor_extra() {
if (g_temp_tensor_extras == nullptr) {
g_temp_tensor_extras = new ggml_tensor_extra_gpu[GGML_MAX_NODES];
}
size_t alloc_index = g_temp_tensor_extra_index;
g_temp_tensor_extra_index = (g_temp_tensor_extra_index + 1) % GGML_MAX_NODES;
struct ggml_tensor_extra_gpu * extra = &g_temp_tensor_extras[alloc_index];
memset(extra, 0, sizeof(*extra));
return extra;
}
void ggml_cuda_assign_buffers_impl(struct ggml_tensor * tensor, bool scratch, bool force_inplace, bool no_alloc) {
if (scratch && g_scratch_size == 0) {
return;
}
// recursively assign CUDA buffers until a compute tensor is found
if (tensor->src[0] != nullptr && tensor->src[0]->backend == GGML_BACKEND_CPU) {
const ggml_op src0_op = tensor->src[0]->op;
if (src0_op == GGML_OP_RESHAPE || src0_op == GGML_OP_TRANSPOSE || src0_op == GGML_OP_VIEW || src0_op == GGML_OP_PERMUTE) {
ggml_cuda_assign_buffers_impl(tensor->src[0], scratch, force_inplace, no_alloc);
}
}
if (tensor->op == GGML_OP_CPY && tensor->src[1]->backend == GGML_BACKEND_CPU) {
ggml_cuda_assign_buffers_impl(tensor->src[1], scratch, force_inplace, no_alloc);
}
tensor->backend = GGML_BACKEND_GPU;
if (scratch && no_alloc) {
return;
}
struct ggml_tensor_extra_gpu * extra;
const bool inplace = (tensor->src[0] != nullptr && tensor->src[0]->data == tensor->data) ||
tensor->op == GGML_OP_VIEW ||
force_inplace;
const size_t size = ggml_nbytes(tensor);
CUDA_CHECK(hipSetDevice(g_main_device));
if (inplace && (tensor->src[0]->backend == GGML_BACKEND_GPU || tensor->src[0]->backend == GGML_BACKEND_GPU_SPLIT)) {
struct ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu * ) tensor->src[0]->extra;
char * src0_ddc = (char *) src0_extra->data_device[g_main_device];
size_t offset = 0;
if (tensor->op == GGML_OP_VIEW) {
memcpy(&offset, tensor->op_params, sizeof(size_t));
}
extra = ggml_cuda_alloc_temp_tensor_extra();
extra->data_device[g_main_device] = src0_ddc + offset;
} else if (tensor->op == GGML_OP_CPY) {
struct ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu * ) tensor->src[1]->extra;
void * src1_ddv = src1_extra->data_device[g_main_device];
extra = ggml_cuda_alloc_temp_tensor_extra();
extra->data_device[g_main_device] = src1_ddv;
} else if (scratch) {
GGML_ASSERT(size <= g_scratch_size);
if (g_scratch_offset + size > g_scratch_size) {
g_scratch_offset = 0;
}
char * data = (char *) g_scratch_buffer;
if (data == nullptr) {
CUDA_CHECK(hipMalloc(&data, g_scratch_size));
g_scratch_buffer = data;
}
extra = ggml_cuda_alloc_temp_tensor_extra();
extra->data_device[g_main_device] = data + g_scratch_offset;
g_scratch_offset += size;
GGML_ASSERT(g_scratch_offset <= g_scratch_size);
} else { // allocate new buffers outside of scratch
void * data;
CUDA_CHECK(hipMalloc(&data, size));
CUDA_CHECK(hipMemset(data, 0, size));
extra = new ggml_tensor_extra_gpu;
memset(extra, 0, sizeof(*extra));
extra->data_device[g_main_device] = data;
}
tensor->extra = extra;
}
void ggml_cuda_assign_scratch_offset(struct ggml_tensor * tensor, size_t offset) {
if (g_scratch_size == 0) {
return;
}
if (g_scratch_buffer == nullptr) {
CUDA_CHECK(hipMalloc(&g_scratch_buffer, g_scratch_size));
}
struct ggml_tensor_extra_gpu * extra = ggml_cuda_alloc_temp_tensor_extra();
const bool inplace = (tensor->src[0] != nullptr && tensor->src[0]->data == tensor->data) ||
tensor->op == GGML_OP_VIEW;
if (inplace && (tensor->src[0]->backend == GGML_BACKEND_GPU || tensor->src[0]->backend == GGML_BACKEND_GPU_SPLIT)) {
struct ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu * ) tensor->src[0]->extra;
char * src0_ddc = (char *) src0_extra->data_device[g_main_device];
size_t view_offset = 0;
if (tensor->op == GGML_OP_VIEW) {
memcpy(&view_offset, tensor->op_params, sizeof(size_t));
}
extra->data_device[g_main_device] = src0_ddc + view_offset;
} else {
extra->data_device[g_main_device] = (char *) g_scratch_buffer + offset;
}
tensor->extra = extra;
}
void ggml_cuda_assign_buffers(struct ggml_tensor * tensor) {
ggml_cuda_assign_buffers_impl(tensor, true, false, false);
}
void ggml_cuda_assign_buffers_no_alloc(struct ggml_tensor * tensor) {
ggml_cuda_assign_buffers_impl(tensor, true, false, true);
}
void ggml_cuda_assign_buffers_no_scratch(struct ggml_tensor * tensor) {
ggml_cuda_assign_buffers_impl(tensor, false, false, false);
}
void ggml_cuda_assign_buffers_force_inplace(struct ggml_tensor * tensor) {
ggml_cuda_assign_buffers_impl(tensor, false, true, false);
}
void ggml_cuda_set_main_device(int main_device) {
if (main_device >= g_device_count) {
fprintf(stderr, "warning: cannot set main_device=%d because there are only %d devices. Using device %d instead.\n",
main_device, g_device_count, g_main_device);
return;
}
g_main_device = main_device;
if (g_device_count > 1) {
hipDeviceProp_t prop;
CUDA_CHECK(hipGetDeviceProperties(&prop, g_main_device));
fprintf(stderr, "%s: using device %d (%s) as main device\n", __func__, g_main_device, prop.name);
}
}
void ggml_cuda_set_mul_mat_q(bool mul_mat_q) {
g_mul_mat_q = mul_mat_q;
}
void ggml_cuda_set_scratch_size(size_t scratch_size) {
g_scratch_size = scratch_size;
}
void ggml_cuda_free_scratch() {
if (g_scratch_buffer == nullptr) {
return;
}
CUDA_CHECK(hipFree(g_scratch_buffer));
g_scratch_buffer = nullptr;
}
bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor){
ggml_cuda_func_t func;
const bool any_on_device = tensor->backend == GGML_BACKEND_GPU
|| (tensor->src[0] != nullptr && (tensor->src[0]->backend == GGML_BACKEND_GPU || tensor->src[0]->backend == GGML_BACKEND_GPU_SPLIT))
|| (tensor->src[1] != nullptr && tensor->src[1]->backend == GGML_BACKEND_GPU);
switch (tensor->op) {
case GGML_OP_DUP:
if (!any_on_device) {
return false;
}
func = ggml_cuda_dup;
break;
case GGML_OP_ADD:
if (!any_on_device) {
return false;
}
func = ggml_cuda_add;
break;
case GGML_OP_MUL:
if (!any_on_device) {
return false;
}
func = ggml_cuda_mul;
break;
case GGML_OP_UNARY:
switch (ggml_get_unary_op(tensor)) {
case GGML_UNARY_OP_GELU:
if (!any_on_device) {
return false;
}
func = ggml_cuda_gelu;
break;
case GGML_UNARY_OP_SILU:
if (!any_on_device) {
return false;
}
func = ggml_cuda_silu;
break;
default:
return false;
} break;
case GGML_OP_NORM:
if (!any_on_device) {
return false;
}
func = ggml_cuda_norm;
break;
case GGML_OP_RMS_NORM:
if (!any_on_device) {
return false;
}
func = ggml_cuda_rms_norm;
break;
case GGML_OP_MUL_MAT:
if (!any_on_device && !ggml_cuda_can_mul_mat(tensor->src[0], tensor->src[1], tensor)) {
return false;
}
func = ggml_cuda_mul_mat;
break;
case GGML_OP_SCALE:
if (!any_on_device) {
return false;
}
func = ggml_cuda_scale;
break;
case GGML_OP_CPY:
if (!any_on_device) {
return false;
}
func = ggml_cuda_cpy;
break;
case GGML_OP_CONT:
if (!any_on_device) {
return false;
}
func = ggml_cuda_dup;
break;
case GGML_OP_RESHAPE:
case GGML_OP_VIEW:
case GGML_OP_PERMUTE:
case GGML_OP_TRANSPOSE:
if (!any_on_device) {
return false;
}
func = ggml_cuda_nop;
break;
case GGML_OP_DIAG_MASK_INF:
if (!any_on_device) {
return false;
}
func = ggml_cuda_diag_mask_inf;
break;
case GGML_OP_SOFT_MAX:
if (!any_on_device) {
return false;
}
func = ggml_cuda_soft_max;
break;
case GGML_OP_ROPE:
if (!any_on_device) {
return false;
}
func = ggml_cuda_rope;
break;
case GGML_OP_ALIBI:
if (!any_on_device) {
return false;
}
func = ggml_cuda_alibi;
break;
default:
return false;
}
if (params->ith != 0) {
return true;
}
if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
return true;
}
func(tensor->src[0], tensor->src[1], tensor);
return true;
}
int ggml_cuda_get_device_count() {
int device_count;
CUDA_CHECK(hipGetDeviceCount(&device_count));
return device_count;
}
void ggml_cuda_get_device_description(int device, char * description, size_t description_size) {
hipDeviceProp_t prop;
CUDA_CHECK(hipGetDeviceProperties(&prop, device));
snprintf(description, description_size, "%s", prop.name);
}
| 78ced4c99e433e7f18e5988551650c9747532768.cu | #include <cstddef>
#include <cstdint>
#include <limits>
#include <stdint.h>
#include <stdio.h>
#include <atomic>
#include <assert.h>
#if defined(GGML_USE_HIPBLAS)
#include <hip/hip_runtime.h>
#include <hipblas/hipblas.h>
#include <hip/hip_fp16.h>
#ifdef __HIP_PLATFORM_AMD__
// for rocblas_initialize()
#include "rocblas/rocblas.h"
#endif
#define CUBLAS_COMPUTE_32F HIPBLAS_R_32F
#define CUBLAS_COMPUTE_32F_FAST_16F HIPBLAS_R_32F
#define CUBLAS_GEMM_DEFAULT HIPBLAS_GEMM_DEFAULT
#define CUBLAS_OP_N HIPBLAS_OP_N
#define CUBLAS_OP_T HIPBLAS_OP_T
#define CUBLAS_STATUS_SUCCESS HIPBLAS_STATUS_SUCCESS
#define CUBLAS_TF32_TENSOR_OP_MATH 0
#define CUDA_R_16F HIPBLAS_R_16F
#define CUDA_R_32F HIPBLAS_R_32F
#define __shfl_xor_sync(mask, var, laneMask, width) __shfl_xor(var, laneMask, width)
#define cublasCreate hipblasCreate
#define cublasGemmEx hipblasGemmEx
#define cublasHandle_t hipblasHandle_t
#define cublasSetMathMode(handle, mode) CUBLAS_STATUS_SUCCESS
#define cublasSetStream hipblasSetStream
#define cublasSgemm hipblasSgemm
#define cublasStatus_t hipblasStatus_t
#define cudaDeviceProp hipDeviceProp_t
#define cudaDeviceSynchronize hipDeviceSynchronize
#define cudaError_t hipError_t
#define cudaEventCreateWithFlags hipEventCreateWithFlags
#define cudaEventDisableTiming hipEventDisableTiming
#define cudaEventRecord hipEventRecord
#define cudaEvent_t hipEvent_t
#define cudaEventDestroy hipEventDestroy
#define cudaFree hipFree
#define cudaFreeHost hipHostFree
#define cudaGetDevice hipGetDevice
#define cudaGetDeviceCount hipGetDeviceCount
#define cudaGetDeviceProperties hipGetDeviceProperties
#define cudaGetErrorString hipGetErrorString
#define cudaGetLastError hipGetLastError
#define cudaMalloc hipMalloc
#define cudaMallocHost(ptr, size) hipHostMalloc(ptr, size, hipHostMallocDefault)
#define cudaMemcpy hipMemcpy
#define cudaMemcpy2DAsync hipMemcpy2DAsync
#define cudaMemcpyAsync hipMemcpyAsync
#define cudaMemcpyDeviceToDevice hipMemcpyDeviceToDevice
#define cudaMemcpyDeviceToHost hipMemcpyDeviceToHost
#define cudaMemcpyHostToDevice hipMemcpyHostToDevice
#define cudaMemcpyKind hipMemcpyKind
#define cudaMemset hipMemset
#define cudaOccupancyMaxPotentialBlockSize hipOccupancyMaxPotentialBlockSize
#define cudaSetDevice hipSetDevice
#define cudaStreamCreateWithFlags hipStreamCreateWithFlags
#define cudaStreamNonBlocking hipStreamNonBlocking
#define cudaStreamSynchronize hipStreamSynchronize
#define cudaStreamWaitEvent(stream, event) hipStreamWaitEvent(stream, event, 0)
#define cudaStream_t hipStream_t
#define cudaSuccess hipSuccess
#else
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cuda_fp16.h>
#endif
#include "ggml-cuda.h"
#include "ggml.h"
#define MIN_CC_DP4A 610 // minimum compute capability for __dp4a, an intrinsic for byte-wise dot products
#ifndef CC_TURING
#define CC_TURING 700
#endif
#if defined(GGML_USE_HIPBLAS)
#define __CUDA_ARCH__ 1300
typedef int8_t int8x4_t __attribute__((ext_vector_type(4)));
static __device__ __forceinline__ int __vsubss4(const int a, const int b) {
const int8x4_t va = reinterpret_cast<const int8x4_t&>(a);
const int8x4_t vb = reinterpret_cast<const int8x4_t&>(b);
const int8x4_t c = __builtin_elementwise_sub_sat(va, vb);
return reinterpret_cast<const int&>(c);
}
static __device__ __forceinline__ int __dp4a(const int a, const int b, int c) {
#if defined(__gfx906__) || defined(__gfx908__) || defined(__gfx90a__) || defined(__gfx1030__)
c = __builtin_amdgcn_sdot4(a, b, c, false);
#elif defined(__gfx1100__)
c = __builtin_amdgcn_sudot4( true, a, true, b, c, false);
#elif defined(__gfx1010__) || defined(__gfx900__)
int tmp1;
int tmp2;
asm("\n \
v_mul_i32_i24 %1, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0 \n \
v_mul_i32_i24 %2, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1 \n \
v_add3_u32 %0, %1, %2, %0 \n \
v_mul_i32_i24 %1, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2 \n \
v_mul_i32_i24 %2, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3 \n \
v_add3_u32 %0, %1, %2, %0 \n \
"
: "+v"(c), "=&v"(tmp1), "=&v"(tmp2)
: "v"(a), "v"(b)
);
#else
const int8x4_t va = reinterpret_cast<const int8x4_t&>(a);
const int8x4_t vb = reinterpret_cast<const int8x4_t&>(b);
c += va[0] * vb[0] + va[1] * vb[1] + va[2] * vb[2] + va[3] * vb[3];
#endif
return c;
}
#endif
#if defined(_MSC_VER)
#pragma warning(disable: 4244 4267) // possible loss of data
#endif
static_assert(sizeof(half) == sizeof(ggml_fp16_t), "wrong fp16 size");
#define CUDA_CHECK(err) \
do { \
cudaError_t err_ = (err); \
if (err_ != cudaSuccess) { \
fprintf(stderr, "CUDA error %d at %s:%d: %s\n", err_, __FILE__, __LINE__, \
cudaGetErrorString(err_)); \
exit(1); \
} \
} while (0)
#if CUDART_VERSION >= 12000
#define CUBLAS_CHECK(err) \
do { \
cublasStatus_t err_ = (err); \
if (err_ != CUBLAS_STATUS_SUCCESS) { \
fprintf(stderr, "\ncuBLAS error %d at %s:%d: %s\n", \
err_, __FILE__, __LINE__, cublasGetStatusString(err_)); \
exit(1); \
} \
} while (0)
#else
#define CUBLAS_CHECK(err) \
do { \
cublasStatus_t err_ = (err); \
if (err_ != CUBLAS_STATUS_SUCCESS) { \
fprintf(stderr, "\ncuBLAS error %d at %s:%d\n", err_, __FILE__, __LINE__); \
exit(1); \
} \
} while (0)
#endif // CUDART_VERSION >= 11
#ifdef GGML_CUDA_F16
typedef half dfloat; // dequantize float
typedef half2 dfloat2;
#else
typedef float dfloat; // dequantize float
typedef float2 dfloat2;
#endif //GGML_CUDA_F16
static __device__ __forceinline__ int get_int_from_int8(const int8_t * x8, const int & i32) {
const uint16_t * x16 = (uint16_t *) (x8 + sizeof(int) * i32); // assume at least 2 byte alignment
int x32 = 0;
x32 |= x16[0] << 0;
x32 |= x16[1] << 16;
return x32;
}
static __device__ __forceinline__ int get_int_from_uint8(const uint8_t * x8, const int & i32) {
const uint16_t * x16 = (uint16_t *) (x8 + sizeof(int) * i32); // assume at least 2 byte alignment
int x32 = 0;
x32 |= x16[0] << 0;
x32 |= x16[1] << 16;
return x32;
}
static __device__ __forceinline__ int get_int_from_int8_aligned(const int8_t * x8, const int & i32) {
return *((int *) (x8 + sizeof(int) * i32)); // assume at least 4 byte alignment
}
static __device__ __forceinline__ int get_int_from_uint8_aligned(const uint8_t * x8, const int & i32) {
return *((int *) (x8 + sizeof(int) * i32)); // assume at least 4 byte alignment
}
typedef void (*dequantize_kernel_t)(const void * vx, const int ib, const int iqs, dfloat2 & v);
typedef void (*to_fp32_cuda_t)(const void * __restrict__ x, float * __restrict__ y, int k, cudaStream_t stream);
typedef void (*dot_kernel_k_t)(const void * __restrict__ vx, const int ib, const int iqs, const float * __restrict__ y, float & v);
typedef void (*cpy_kernel_t)(const char * cx, char * cdst);
typedef void (*ggml_cuda_func_t)(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst);
typedef void (*ggml_cuda_op_t)(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i, float * src0_ddf_i,
float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
cudaStream_t & cudaStream_main);
// QK = number of values after dequantization
// QR = QK / number of values before dequantization
// QI = number of 32 bit integers before dequantization
#define QK4_0 32
#define QR4_0 2
#define QI4_0 (QK4_0 / (4 * QR4_0))
typedef struct {
half d; // delta
uint8_t qs[QK4_0 / 2]; // nibbles / quants
} block_q4_0;
static_assert(sizeof(block_q4_0) == sizeof(ggml_fp16_t) + QK4_0 / 2, "wrong q4_0 block size/padding");
#define QK4_1 32
#define QR4_1 2
#define QI4_1 (QK4_1 / (4 * QR4_1))
typedef struct {
half2 dm; // dm.x = delta, dm.y = min
uint8_t qs[QK4_1 / 2]; // nibbles / quants
} block_q4_1;
static_assert(sizeof(block_q4_1) == sizeof(ggml_fp16_t) * 2 + QK4_1 / 2, "wrong q4_1 block size/padding");
#define QK5_0 32
#define QR5_0 2
#define QI5_0 (QK5_0 / (4 * QR5_0))
typedef struct {
half d; // delta
uint8_t qh[4]; // 5-th bit of quants
uint8_t qs[QK5_0 / 2]; // nibbles / quants
} block_q5_0;
static_assert(sizeof(block_q5_0) == sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding");
#define QK5_1 32
#define QR5_1 2
#define QI5_1 (QK5_1 / (4 * QR5_1))
typedef struct {
half2 dm; // dm.x = delta, dm.y = min
uint8_t qh[4]; // 5-th bit of quants
uint8_t qs[QK5_1 / 2]; // nibbles / quants
} block_q5_1;
static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_1 / 2, "wrong q5_1 block size/padding");
#define QK8_0 32
#define QR8_0 1
#define QI8_0 (QK8_0 / (4 * QR8_0))
typedef struct {
half d; // delta
int8_t qs[QK8_0]; // quants
} block_q8_0;
static_assert(sizeof(block_q8_0) == sizeof(ggml_fp16_t) + QK8_0, "wrong q8_0 block size/padding");
#define QK8_1 32
#define QR8_1 1
#define QI8_1 (QK8_1 / (4 * QR8_1))
typedef struct {
half2 ds; // ds.x = delta, ds.y = sum
int8_t qs[QK8_0]; // quants
} block_q8_1;
static_assert(sizeof(block_q8_1) == 2*sizeof(ggml_fp16_t) + QK8_0, "wrong q8_1 block size/padding");
typedef float (*vec_dot_q_cuda_t)(const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs);
typedef void (*allocate_tiles_cuda_t)(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc);
typedef void (*load_tiles_cuda_t)(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row);
typedef float (*vec_dot_q_mul_mat_cuda_t)(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ms, const int & i, const int & j, const int & k);
//================================= k-quants
#ifdef GGML_QKK_64
#define QK_K 64
#define K_SCALE_SIZE 4
#else
#define QK_K 256
#define K_SCALE_SIZE 12
#endif
#define QR2_K 4
#define QI2_K (QK_K / (4*QR2_K))
typedef struct {
uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits
uint8_t qs[QK_K/4]; // quants
half2 dm; // super-block scale for quantized scales/mins
} block_q2_K;
static_assert(sizeof(block_q2_K) == 2*sizeof(ggml_fp16_t) + QK_K/16 + QK_K/4, "wrong q2_K block size/padding");
#define QR3_K 4
#define QI3_K (QK_K / (4*QR3_K))
typedef struct {
uint8_t hmask[QK_K/8]; // quants - high bit
uint8_t qs[QK_K/4]; // quants - low 2 bits
#ifdef GGML_QKK_64
uint8_t scales[2]; // scales, quantized with 8 bits
#else
uint8_t scales[K_SCALE_SIZE]; // scales, quantized with 6 bits
#endif
half d; // super-block scale
} block_q3_K;
//static_assert(sizeof(block_q3_K) == sizeof(ggml_fp16_t) + QK_K / 4 + QK_K / 8 + K_SCALE_SIZE, "wrong q3_K block size/padding");
#define QR4_K 2
#define QI4_K (QK_K / (4*QR4_K))
#ifdef GGML_QKK_64
typedef struct {
half dm[2]; // super-block scales/mins
uint8_t scales[2]; // 4-bit block scales/mins
uint8_t qs[QK_K/2]; // 4--bit quants
} block_q4_K;
static_assert(sizeof(block_q4_K) == sizeof(half2) + QK_K/2 + 2, "wrong q4_K block size/padding");
#else
typedef struct {
half2 dm; // super-block scale for quantized scales/mins
uint8_t scales[3*QK_K/64]; // scales, quantized with 6 bits
uint8_t qs[QK_K/2]; // 4--bit quants
} block_q4_K;
static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_fp16_t) + 3*QK_K/64 + QK_K/2, "wrong q4_K block size/padding");
#endif
#define QR5_K 2
#define QI5_K (QK_K / (4*QR5_K))
#ifdef GGML_QKK_64
typedef struct {
half d; // super-block scale
int8_t scales[QK_K/16]; // block scales
uint8_t qh[QK_K/8]; // quants, high bit
uint8_t qs[QK_K/2]; // quants, low 4 bits
} block_q5_K;
static_assert(sizeof(block_q5_K) == sizeof(ggml_fp16_t) + QK_K/2 + QK_K/8 + QK_K/16, "wrong q5_K block size/padding");
#else
typedef struct {
half2 dm; // super-block scale for quantized scales/mins
uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits
uint8_t qh[QK_K/8]; // quants, high bit
uint8_t qs[QK_K/2]; // quants, low 4 bits
} block_q5_K;
static_assert(sizeof(block_q5_K) == 2*sizeof(ggml_fp16_t) + K_SCALE_SIZE + QK_K/2 + QK_K/8, "wrong q5_K block size/padding");
#endif
#define QR6_K 2
#define QI6_K (QK_K / (4*QR6_K))
typedef struct {
uint8_t ql[QK_K/2]; // quants, lower 4 bits
uint8_t qh[QK_K/4]; // quants, upper 2 bits
int8_t scales[QK_K/16]; // scales
half d; // delta
} block_q6_K;
static_assert(sizeof(block_q6_K) == sizeof(ggml_fp16_t) + 13*QK_K/16, "wrong q6_K block size/padding");
#define WARP_SIZE 32
#define MATRIX_ROW_PADDING 512 // last row of quant. matrices is a multiple of this to avoid out-of-bounds memory accesses
#define CUDA_ADD_BLOCK_SIZE 256
#define CUDA_MUL_BLOCK_SIZE 256
#define CUDA_GELU_BLOCK_SIZE 256
#define CUDA_SILU_BLOCK_SIZE 256
#define CUDA_CPY_BLOCK_SIZE 32
#define CUDA_SCALE_BLOCK_SIZE 256
#define CUDA_ROPE_BLOCK_SIZE 256
#define CUDA_ALIBI_BLOCK_SIZE 32
#define CUDA_DIAG_MASK_INF_BLOCK_SIZE 32
#define CUDA_QUANTIZE_BLOCK_SIZE 256
#define CUDA_DEQUANTIZE_BLOCK_SIZE 256
// dmmv = dequantize_mul_mat_vec
#ifndef GGML_CUDA_DMMV_X
#define GGML_CUDA_DMMV_X 32
#endif
#ifndef GGML_CUDA_MMV_Y
#define GGML_CUDA_MMV_Y 1
#endif
#ifndef K_QUANTS_PER_ITERATION
#define K_QUANTS_PER_ITERATION 2
#else
static_assert(K_QUANTS_PER_ITERATION == 1 || K_QUANTS_PER_ITERATION == 2, "K_QUANTS_PER_ITERATION must be 1 or 2");
#endif
struct ggml_tensor_extra_gpu {
void * data_device[GGML_CUDA_MAX_DEVICES]; // 1 pointer for each device for split tensors
cudaEvent_t events[GGML_CUDA_MAX_DEVICES]; // events for synchronizing multiple GPUs
};
static int g_device_count = -1;
static int g_main_device = 0;
static int g_compute_capabilities[GGML_CUDA_MAX_DEVICES];
static float g_tensor_split[GGML_CUDA_MAX_DEVICES] = {0};
static bool g_mul_mat_q = true;
static void * g_scratch_buffer = nullptr;
static size_t g_scratch_size = 1024*1024*1024; // 1 GB by default
static size_t g_scratch_offset = 0;
static cublasHandle_t g_cublas_handles[GGML_CUDA_MAX_DEVICES] = {nullptr};
static cudaStream_t g_cudaStreams_main[GGML_CUDA_MAX_DEVICES] = { nullptr };
static __global__ void add_f32(const float * x, const float * y, float * dst, const int kx, const int ky) {
const int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i >= kx) {
return;
}
dst[i] = x[i] + y[i%ky];
}
static __global__ void add_f16_f32_f16(const half * x, const float * y, half * dst, const int k) {
const int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i >= k) {
return;
}
dst[i] = __hadd(x[i], __float2half(y[i]));
}
static __global__ void mul_f32(const float * x, const float * y, float * dst, const int kx, const int ky) {
const int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i >= kx) {
return;
}
dst[i] = x[i] * y[i%ky];
}
static __global__ void gelu_f32(const float * x, float * dst, const int k) {
const float GELU_COEF_A = 0.044715f;
const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f;
const int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i >= k) {
return;
}
float xi = x[i];
dst[i] = 0.5f*xi*(1.0f + tanhf(SQRT_2_OVER_PI*xi*(1.0f + GELU_COEF_A*xi*xi)));
}
static __global__ void silu_f32(const float * x, float * dst, const int k) {
const int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i >= k) {
return;
}
dst[i] = x[i] / (1.0f + expf(-x[i]));
}
static __global__ void norm_f32(const float * x, float * dst, const int ncols) {
const int row = blockIdx.x*blockDim.y + threadIdx.y;
const int tid = threadIdx.x;
const float eps = 1e-5f;
float mean = 0.0f;
float var = 0.0f;
for (int col = tid; col < ncols; col += WARP_SIZE) {
const float xi = x[row*ncols + col];
mean += xi;
var += xi * xi;
}
// sum up partial sums
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
mean += __shfl_xor_sync(0xffffffff, mean, mask, 32);
var += __shfl_xor_sync(0xffffffff, var, mask, 32);
}
mean /= ncols;
var = var / ncols - mean * mean;
const float inv_var = rsqrtf(var + eps);
for (int col = tid; col < ncols; col += WARP_SIZE) {
dst[row*ncols + col] = (x[row*ncols + col] - mean) * inv_var;
}
}
static __global__ void rms_norm_f32(const float * x, float * dst, const int ncols, const float eps) {
const int row = blockIdx.x*blockDim.y + threadIdx.y;
const int tid = threadIdx.x;
float tmp = 0.0f; // partial sum for thread in warp
for (int col = tid; col < ncols; col += WARP_SIZE) {
const float xi = x[row*ncols + col];
tmp += xi * xi;
}
// sum up partial sums
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
}
const float mean = tmp / ncols;
const float scale = rsqrtf(mean + eps);
for (int col = tid; col < ncols; col += WARP_SIZE) {
dst[row*ncols + col] = scale * x[row*ncols + col];
}
}
static __device__ __forceinline__ void dequantize_q4_0(const void * vx, const int ib, const int iqs, dfloat2 & v){
const block_q4_0 * x = (const block_q4_0 *) vx;
const dfloat d = x[ib].d;
const int vui = x[ib].qs[iqs];
v.x = vui & 0xF;
v.y = vui >> 4;
#ifdef GGML_CUDA_F16
v = __hsub2(v, {8.0f, 8.0f});
v = __hmul2(v, {d, d});
#else
v.x = (v.x - 8.0f) * d;
v.y = (v.y - 8.0f) * d;
#endif // GGML_CUDA_F16
}
static __device__ __forceinline__ void dequantize_q4_1(const void * vx, const int ib, const int iqs, dfloat2 & v){
const block_q4_1 * x = (const block_q4_1 *) vx;
const dfloat d = __low2half(x[ib].dm);
const dfloat m = __high2half(x[ib].dm);
const int vui = x[ib].qs[iqs];
v.x = vui & 0xF;
v.y = vui >> 4;
#ifdef GGML_CUDA_F16
v = __hmul2(v, {d, d});
v = __hadd2(v, {m, m});
#else
v.x = (v.x * d) + m;
v.y = (v.y * d) + m;
#endif // GGML_CUDA_F16
}
static __device__ __forceinline__ void dequantize_q5_0(const void * vx, const int ib, const int iqs, dfloat2 & v){
const block_q5_0 * x = (const block_q5_0 *) vx;
const dfloat d = x[ib].d;
uint32_t qh;
memcpy(&qh, x[ib].qh, sizeof(qh));
const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10;
const int xh_1 = ((qh >> (iqs + 12)) ) & 0x10;
v.x = ((x[ib].qs[iqs] & 0xf) | xh_0);
v.y = ((x[ib].qs[iqs] >> 4) | xh_1);
#ifdef GGML_CUDA_F16
v = __hsub2(v, {16.0f, 16.0f});
v = __hmul2(v, {d, d});
#else
v.x = (v.x - 16.0f) * d;
v.y = (v.y - 16.0f) * d;
#endif // GGML_CUDA_F16
}
static __device__ __forceinline__ void dequantize_q5_1(const void * vx, const int ib, const int iqs, dfloat2 & v){
const block_q5_1 * x = (const block_q5_1 *) vx;
const dfloat d = __low2half(x[ib].dm);
const dfloat m = __high2half(x[ib].dm);
uint32_t qh;
memcpy(&qh, x[ib].qh, sizeof(qh));
const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10;
const int xh_1 = ((qh >> (iqs + 12)) ) & 0x10;
v.x = ((x[ib].qs[iqs] & 0xf) | xh_0);
v.y = ((x[ib].qs[iqs] >> 4) | xh_1);
#ifdef GGML_CUDA_F16
v = __hmul2(v, {d, d});
v = __hadd2(v, {m, m});
#else
v.x = (v.x * d) + m;
v.y = (v.y * d) + m;
#endif // GGML_CUDA_F16
}
static __device__ __forceinline__ void dequantize_q8_0(const void * vx, const int ib, const int iqs, dfloat2 & v){
const block_q8_0 * x = (const block_q8_0 *) vx;
const dfloat d = x[ib].d;
v.x = x[ib].qs[iqs + 0];
v.y = x[ib].qs[iqs + 1];
#ifdef GGML_CUDA_F16
v = __hmul2(v, {d, d});
#else
v.x *= d;
v.y *= d;
#endif // GGML_CUDA_F16
}
//================================== k-quants
static __global__ void dequantize_block_q2_K(const void * __restrict__ vx, float * __restrict__ yy) {
const int i = blockIdx.x;
const block_q2_K * x = (const block_q2_K *) vx;
const int tid = threadIdx.x;
#if QK_K == 256
const int n = tid/32;
const int l = tid - 32*n;
const int is = 8*n + l/16;
const uint8_t q = x[i].qs[32*n + l];
float * y = yy + i*QK_K + 128*n;
float dall = __low2half(x[i].dm);
float dmin = __high2half(x[i].dm);
y[l+ 0] = dall * (x[i].scales[is+0] & 0xF) * ((q >> 0) & 3) - dmin * (x[i].scales[is+0] >> 4);
y[l+32] = dall * (x[i].scales[is+2] & 0xF) * ((q >> 2) & 3) - dmin * (x[i].scales[is+2] >> 4);
y[l+64] = dall * (x[i].scales[is+4] & 0xF) * ((q >> 4) & 3) - dmin * (x[i].scales[is+4] >> 4);
y[l+96] = dall * (x[i].scales[is+6] & 0xF) * ((q >> 6) & 3) - dmin * (x[i].scales[is+6] >> 4);
#else
const int is = tid/16; // 0 or 1
const int il = tid%16; // 0...15
const uint8_t q = x[i].qs[il] >> (2*is);
float * y = yy + i*QK_K + 16*is + il;
float dall = __low2half(x[i].dm);
float dmin = __high2half(x[i].dm);
y[ 0] = dall * (x[i].scales[is+0] & 0xF) * ((q >> 0) & 3) - dmin * (x[i].scales[is+0] >> 4);
y[32] = dall * (x[i].scales[is+2] & 0xF) * ((q >> 4) & 3) - dmin * (x[i].scales[is+2] >> 4);
#endif
}
static __global__ void dequantize_block_q3_K(const void * __restrict__ vx, float * __restrict__ yy) {
const int i = blockIdx.x;
const block_q3_K * x = (const block_q3_K *) vx;
#if QK_K == 256
const int r = threadIdx.x/4;
const int tid = r/2;
const int is0 = r%2;
const int l0 = 16*is0 + 4*(threadIdx.x%4);
const int n = tid / 4;
const int j = tid - 4*n;
uint8_t m = 1 << (4*n + j);
int is = 8*n + 2*j + is0;
int shift = 2*j;
int8_t us = is < 4 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+8] >> 0) & 3) << 4) :
is < 8 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+4] >> 2) & 3) << 4) :
is < 12 ? (x[i].scales[is-8] >> 4) | (((x[i].scales[is+0] >> 4) & 3) << 4) :
(x[i].scales[is-8] >> 4) | (((x[i].scales[is-4] >> 6) & 3) << 4);
float d_all = x[i].d;
float dl = d_all * (us - 32);
float * y = yy + i*QK_K + 128*n + 32*j;
const uint8_t * q = x[i].qs + 32*n;
const uint8_t * hm = x[i].hmask;
for (int l = l0; l < l0+4; ++l) y[l] = dl * ((int8_t)((q[l] >> shift) & 3) - ((hm[l] & m) ? 0 : 4));
#else
const int tid = threadIdx.x;
const int is = tid/16; // 0 or 1
const int il = tid%16; // 0...15
const int im = il/8; // 0...1
const int in = il%8; // 0...7
float * y = yy + i*QK_K + 16*is + il;
const uint8_t q = x[i].qs[il] >> (2*is);
const uint8_t h = x[i].hmask[in] >> (2*is + im);
const float d = (float)x[i].d;
if (is == 0) {
y[ 0] = d * ((x[i].scales[0] & 0xF) - 8) * ((int8_t)((q >> 0) & 3) - ((h >> 0) & 1 ? 0 : 4));
y[32] = d * ((x[i].scales[1] & 0xF) - 8) * ((int8_t)((q >> 4) & 3) - ((h >> 4) & 1 ? 0 : 4));
} else {
y[ 0] = d * ((x[i].scales[0] >> 4) - 8) * ((int8_t)((q >> 0) & 3) - ((h >> 0) & 1 ? 0 : 4));
y[32] = d * ((x[i].scales[1] >> 4) - 8) * ((int8_t)((q >> 4) & 3) - ((h >> 4) & 1 ? 0 : 4));
}
#endif
}
#if QK_K == 256
static inline __device__ void get_scale_min_k4(int j, const uint8_t * q, uint8_t & d, uint8_t & m) {
if (j < 4) {
d = q[j] & 63; m = q[j + 4] & 63;
} else {
d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4);
m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4);
}
}
#endif
static __global__ void dequantize_block_q4_K(const void * __restrict__ vx, float * __restrict__ yy) {
const block_q4_K * x = (const block_q4_K *) vx;
const int i = blockIdx.x;
#if QK_K == 256
// assume 32 threads
const int tid = threadIdx.x;
const int il = tid/8;
const int ir = tid%8;
const int is = 2*il;
const int n = 4;
float * y = yy + i*QK_K + 64*il + n*ir;
const float dall = __low2half(x[i].dm);
const float dmin = __high2half(x[i].dm);
const uint8_t * q = x[i].qs + 32*il + n*ir;
uint8_t sc, m;
get_scale_min_k4(is + 0, x[i].scales, sc, m);
const float d1 = dall * sc; const float m1 = dmin * m;
get_scale_min_k4(is + 1, x[i].scales, sc, m);
const float d2 = dall * sc; const float m2 = dmin * m;
for (int l = 0; l < n; ++l) {
y[l + 0] = d1 * (q[l] & 0xF) - m1;
y[l +32] = d2 * (q[l] >> 4) - m2;
}
#else
const int tid = threadIdx.x;
const uint8_t * q = x[i].qs;
float * y = yy + i*QK_K;
const float d = (float)x[i].dm[0];
const float m = (float)x[i].dm[1];
y[tid+ 0] = d * (x[i].scales[0] & 0xF) * (q[tid] & 0xF) - m * (x[i].scales[0] >> 4);
y[tid+32] = d * (x[i].scales[1] & 0xF) * (q[tid] >> 4) - m * (x[i].scales[1] >> 4);
#endif
}
static __global__ void dequantize_block_q5_K(const void * __restrict__ vx, float * __restrict__ yy) {
const block_q5_K * x = (const block_q5_K *) vx;
const int i = blockIdx.x;
#if QK_K == 256
// assume 64 threads - this is very slightly better than the one below
const int tid = threadIdx.x;
const int il = tid/16; // il is in 0...3
const int ir = tid%16; // ir is in 0...15
const int is = 2*il; // is is in 0...6
float * y = yy + i*QK_K + 64*il + 2*ir;
const float dall = __low2half(x[i].dm);
const float dmin = __high2half(x[i].dm);
const uint8_t * ql = x[i].qs + 32*il + 2*ir;
const uint8_t * qh = x[i].qh + 2*ir;
uint8_t sc, m;
get_scale_min_k4(is + 0, x[i].scales, sc, m);
const float d1 = dall * sc; const float m1 = dmin * m;
get_scale_min_k4(is + 1, x[i].scales, sc, m);
const float d2 = dall * sc; const float m2 = dmin * m;
uint8_t hm = 1 << (2*il);
y[ 0] = d1 * ((ql[ 0] & 0xF) + (qh[ 0] & hm ? 16 : 0)) - m1;
y[ 1] = d1 * ((ql[ 1] & 0xF) + (qh[ 1] & hm ? 16 : 0)) - m1;
hm <<= 1;
y[32] = d2 * ((ql[ 0] >> 4) + (qh[ 0] & hm ? 16 : 0)) - m2;
y[33] = d2 * ((ql[ 1] >> 4) + (qh[ 1] & hm ? 16 : 0)) - m2;
#else
const int tid = threadIdx.x;
const uint8_t q = x[i].qs[tid];
const int im = tid/8; // 0...3
const int in = tid%8; // 0...7
const int is = tid/16; // 0 or 1
const uint8_t h = x[i].qh[in] >> im;
const float d = x[i].d;
float * y = yy + i*QK_K + tid;
y[ 0] = d * x[i].scales[is+0] * ((q & 0xF) - ((h >> 0) & 1 ? 0 : 16));
y[32] = d * x[i].scales[is+2] * ((q >> 4) - ((h >> 4) & 1 ? 0 : 16));
#endif
}
static __global__ void dequantize_block_q6_K(const void * __restrict__ vx, float * __restrict__ yy) {
const block_q6_K * x = (const block_q6_K *) vx;
const int i = blockIdx.x;
#if QK_K == 256
// assume 64 threads - this is very slightly better than the one below
const int tid = threadIdx.x;
const int ip = tid/32; // ip is 0 or 1
const int il = tid - 32*ip; // 0...32
const int is = 8*ip + il/16;
float * y = yy + i*QK_K + 128*ip + il;
const float d = x[i].d;
const uint8_t * ql = x[i].ql + 64*ip + il;
const uint8_t qh = x[i].qh[32*ip + il];
const int8_t * sc = x[i].scales + is;
y[ 0] = d * sc[0] * ((int8_t)((ql[ 0] & 0xF) | (((qh >> 0) & 3) << 4)) - 32);
y[32] = d * sc[2] * ((int8_t)((ql[32] & 0xF) | (((qh >> 2) & 3) << 4)) - 32);
y[64] = d * sc[4] * ((int8_t)((ql[ 0] >> 4) | (((qh >> 4) & 3) << 4)) - 32);
y[96] = d * sc[6] * ((int8_t)((ql[32] >> 4) | (((qh >> 6) & 3) << 4)) - 32);
#else
// assume 32 threads
const int tid = threadIdx.x;
const int ip = tid/16; // 0 or 1
const int il = tid - 16*ip; // 0...15
float * y = yy + i*QK_K + 16*ip + il;
const float d = x[i].d;
const uint8_t ql = x[i].ql[16*ip + il];
const uint8_t qh = x[i].qh[il] >> (2*ip);
const int8_t * sc = x[i].scales;
y[ 0] = d * sc[ip+0] * ((int8_t)((ql & 0xF) | (((qh >> 0) & 3) << 4)) - 32);
y[32] = d * sc[ip+2] * ((int8_t)((ql >> 4) | (((qh >> 4) & 3) << 4)) - 32);
#endif
}
static __global__ void dequantize_mul_mat_vec_q2_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) {
static_assert(16%K_QUANTS_PER_ITERATION == 0, "16 must be divisible by K_QUANTS_PER_ITERATION");
const int row = blockIdx.y*blockDim.y + threadIdx.y;
if (row > nrows) return;
const int num_blocks_per_row = ncols / QK_K;
const int ib0 = row*num_blocks_per_row;
const block_q2_K * x = (const block_q2_K *)vx + ib0;
float tmp = 0; // partial sum for thread in warp
#if QK_K == 256
const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...15
const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0,1
const int step = 16/K_QUANTS_PER_ITERATION;
const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
const int in = tid - step*im; // 0...15 or 0...7
const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15 or 0...14 in steps of 2
const int q_offset = 32*im + l0;
const int s_offset = 8*im;
const int y_offset = 128*im + l0;
uint32_t aux[4];
const uint8_t * d = (const uint8_t *)aux;
const uint8_t * m = (const uint8_t *)(aux + 2);
for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
const float * y = yy + i * QK_K + y_offset;
const uint8_t * q = x[i].qs + q_offset;
const float dall = __low2half(x[i].dm);
const float dmin = __high2half(x[i].dm);
const uint32_t * a = (const uint32_t *)(x[i].scales + s_offset);
aux[0] = a[0] & 0x0f0f0f0f;
aux[1] = a[1] & 0x0f0f0f0f;
aux[2] = (a[0] >> 4) & 0x0f0f0f0f;
aux[3] = (a[1] >> 4) & 0x0f0f0f0f;
float sum1 = 0, sum2 = 0;
for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) {
sum1 += y[l+ 0] * d[0] * ((q[l+ 0] >> 0) & 3)
+ y[l+32] * d[2] * ((q[l+ 0] >> 2) & 3)
+ y[l+64] * d[4] * ((q[l+ 0] >> 4) & 3)
+ y[l+96] * d[6] * ((q[l+ 0] >> 6) & 3)
+ y[l+16] * d[1] * ((q[l+16] >> 0) & 3)
+ y[l+48] * d[3] * ((q[l+16] >> 2) & 3)
+ y[l+80] * d[5] * ((q[l+16] >> 4) & 3)
+y[l+112] * d[7] * ((q[l+16] >> 6) & 3);
sum2 += y[l+ 0] * m[0] + y[l+32] * m[2] + y[l+64] * m[4] + y[ l+96] * m[6]
+ y[l+16] * m[1] + y[l+48] * m[3] + y[l+80] * m[5] + y[l+112] * m[7];
}
tmp += dall * sum1 - dmin * sum2;
}
#else
const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15 or 0...7
const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); // 0....1 or 0...3
const int offset = tid * K_QUANTS_PER_ITERATION;
uint32_t uaux[2];
const uint8_t * d = (const uint8_t *)uaux;
for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) {
const float * y = yy + i * QK_K + offset;
const uint8_t * q = x[i].qs + offset;
const uint32_t * s = (const uint32_t *)x[i].scales;
uaux[0] = s[0] & 0x0f0f0f0f;
uaux[1] = (s[0] >> 4) & 0x0f0f0f0f;
const float2 dall = __half22float2(x[i].dm);
float sum1 = 0, sum2 = 0;
for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) {
const uint8_t ql = q[l];
sum1 += y[l+ 0] * d[0] * ((ql >> 0) & 3)
+ y[l+16] * d[1] * ((ql >> 2) & 3)
+ y[l+32] * d[2] * ((ql >> 4) & 3)
+ y[l+48] * d[3] * ((ql >> 6) & 3);
sum2 += y[l+0] * d[4] + y[l+16] * d[5] + y[l+32] * d[6] + y[l+48] * d[7];
}
tmp += dall.x * sum1 - dall.y * sum2;
}
#endif
// sum up partial sums and write back result
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
}
if (threadIdx.x == 0) {
dst[row] = tmp;
}
}
static __global__ void dequantize_mul_mat_vec_q3_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) {
const int row = blockIdx.y*blockDim.y + threadIdx.y;
if (row > nrows) return;
const int num_blocks_per_row = ncols / QK_K;
const int ib0 = row*num_blocks_per_row;
const block_q3_K * x = (const block_q3_K *)vx + ib0;
float tmp = 0; // partial sum for thread in warp
#if QK_K == 256
const uint16_t kmask1 = 0x0303;
const uint16_t kmask2 = 0x0f0f;
const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16
const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0,1
const int n = K_QUANTS_PER_ITERATION; // iterations in the inner loop
const int step = 16/K_QUANTS_PER_ITERATION;
const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
const int in = tid - step*im; // 0....15 or 0...7
const uint8_t m = 1 << (4*im);
const int l0 = n*in; // 0...15 or 0...14 in steps of 2
const int q_offset = 32*im + l0;
const int y_offset = 128*im + l0;
uint16_t utmp[4];
const int8_t * s = (const int8_t *)utmp;
const uint16_t s_shift = 4*im;
for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
const float * y = yy + i * QK_K + y_offset;
const uint8_t * q = x[i].qs + q_offset;
const uint8_t * h = x[i].hmask + l0;
const uint16_t * a = (const uint16_t *)x[i].scales;
utmp[0] = ((a[0] >> s_shift) & kmask2) | (((a[4] >> (s_shift + 0)) & kmask1) << 4);
utmp[1] = ((a[1] >> s_shift) & kmask2) | (((a[5] >> (s_shift + 0)) & kmask1) << 4);
utmp[2] = ((a[2] >> s_shift) & kmask2) | (((a[4] >> (s_shift + 2)) & kmask1) << 4);
utmp[3] = ((a[3] >> s_shift) & kmask2) | (((a[5] >> (s_shift + 2)) & kmask1) << 4);
const float d = x[i].d;
float sum = 0;
for (int l = 0; l < n; ++l) {
sum += y[l+ 0] * (s[0] - 32) * (((q[l] >> 0) & 3) - (h[l] & (m << 0) ? 0 : 4))
+ y[l+32] * (s[2] - 32) * (((q[l] >> 2) & 3) - (h[l] & (m << 1) ? 0 : 4))
+ y[l+64] * (s[4] - 32) * (((q[l] >> 4) & 3) - (h[l] & (m << 2) ? 0 : 4))
+ y[l+96] * (s[6] - 32) * (((q[l] >> 6) & 3) - (h[l] & (m << 3) ? 0 : 4));
sum += y[l+16] * (s[1] - 32) * (((q[l+16] >> 0) & 3) - (h[l+16] & (m << 0) ? 0 : 4))
+ y[l+48] * (s[3] - 32) * (((q[l+16] >> 2) & 3) - (h[l+16] & (m << 1) ? 0 : 4))
+ y[l+80] * (s[5] - 32) * (((q[l+16] >> 4) & 3) - (h[l+16] & (m << 2) ? 0 : 4))
+ y[l+112] * (s[7] - 32) * (((q[l+16] >> 6) & 3) - (h[l+16] & (m << 3) ? 0 : 4));
}
tmp += d * sum;
}
#else
const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15 or 0...7
const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); // 0....1 or 0...3
const int offset = tid * K_QUANTS_PER_ITERATION; // 0...15 or 0...14
const int in = offset/8; // 0 or 1
const int im = offset%8; // 0...7
for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) {
const float * y = yy + i * QK_K + offset;
const uint8_t * q = x[i].qs + offset;
const uint8_t * s = x[i].scales;
const float dall = (float)x[i].d;
float sum = 0;
for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) {
const uint8_t hl = x[i].hmask[im+l] >> in;
const uint8_t ql = q[l];
sum += y[l+ 0] * dall * ((s[0] & 0xF) - 8) * ((int8_t)((ql >> 0) & 3) - ((hl >> 0) & 1 ? 0 : 4))
+ y[l+16] * dall * ((s[0] >> 4) - 8) * ((int8_t)((ql >> 2) & 3) - ((hl >> 2) & 1 ? 0 : 4))
+ y[l+32] * dall * ((s[1] & 0xF) - 8) * ((int8_t)((ql >> 4) & 3) - ((hl >> 4) & 1 ? 0 : 4))
+ y[l+48] * dall * ((s[1] >> 4) - 8) * ((int8_t)((ql >> 6) & 3) - ((hl >> 6) & 1 ? 0 : 4));
}
tmp += sum;
}
#endif
// sum up partial sums and write back result
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
}
if (threadIdx.x == 0) {
dst[row] = tmp;
}
}
static __global__ void dequantize_mul_mat_vec_q4_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) {
const int row = blockIdx.y*blockDim.y + threadIdx.y;
if (row > nrows) return;
const int num_blocks_per_row = ncols / QK_K;
const int ib0 = row*num_blocks_per_row;
const block_q4_K * x = (const block_q4_K *)vx + ib0;
#if QK_K == 256
const uint16_t kmask1 = 0x3f3f;
const uint16_t kmask2 = 0x0f0f;
const uint16_t kmask3 = 0xc0c0;
const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16
const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0,1
const int step = 8/K_QUANTS_PER_ITERATION; // 8 or 4
const int il = tid/step; // 0...3
const int ir = tid - step*il; // 0...7 or 0...3
const int n = 2 * K_QUANTS_PER_ITERATION; // 2 or 4
const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224
const int in = il%2;
const int l0 = n*(2*ir + in);
const int q_offset = 32*im + l0;
const int y_offset = 64*im + l0;
uint16_t aux[4];
const uint8_t * sc = (const uint8_t *)aux;
#if K_QUANTS_PER_ITERATION == 2
uint32_t q32[4];
const uint8_t * q4 = (const uint8_t *)q32;
#else
uint16_t q16[4];
const uint8_t * q4 = (const uint8_t *)q16;
#endif
float tmp = 0; // partial sum for thread in warp
for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
const float * y1 = yy + i*QK_K + y_offset;
const float * y2 = y1 + 128;
const float dall = __low2half(x[i].dm);
const float dmin = __high2half(x[i].dm);
const uint16_t * a = (const uint16_t *)x[i].scales;
aux[0] = a[im+0] & kmask1;
aux[1] = a[im+2] & kmask1;
aux[2] = ((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2);
aux[3] = ((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2);
#if K_QUANTS_PER_ITERATION == 2
const uint32_t * q1 = (const uint32_t *)(x[i].qs + q_offset);
const uint32_t * q2 = q1 + 16;
q32[0] = q1[0] & 0x0f0f0f0f;
q32[1] = q1[0] & 0xf0f0f0f0;
q32[2] = q2[0] & 0x0f0f0f0f;
q32[3] = q2[0] & 0xf0f0f0f0;
float4 s = {0.f, 0.f, 0.f, 0.f};
float smin = 0;
for (int l = 0; l < 4; ++l) {
s.x += y1[l] * q4[l+0]; s.y += y1[l+32] * q4[l+ 4];
s.z += y2[l] * q4[l+8]; s.w += y2[l+32] * q4[l+12];
smin += y1[l] * sc[2] + y1[l+32] * sc[3] + y2[l] * sc[6] + y2[l+32] * sc[7];
}
tmp += dall * (s.x * sc[0] + s.y * sc[1] * 1.f/16.f + s.z * sc[4] + s.w * sc[5] * 1.f/16.f) - dmin * smin;
#else
const uint16_t * q1 = (const uint16_t *)(x[i].qs + q_offset);
const uint16_t * q2 = q1 + 32;
q16[0] = q1[0] & 0x0f0f;
q16[1] = q1[0] & 0xf0f0;
q16[2] = q2[0] & 0x0f0f;
q16[3] = q2[0] & 0xf0f0;
float4 s = {0.f, 0.f, 0.f, 0.f};
float smin = 0;
for (int l = 0; l < 2; ++l) {
s.x += y1[l] * q4[l+0]; s.y += y1[l+32] * q4[l+2];
s.z += y2[l] * q4[l+4]; s.w += y2[l+32] * q4[l+6];
smin += y1[l] * sc[2] + y1[l+32] * sc[3] + y2[l] * sc[6] + y2[l+32] * sc[7];
}
tmp += dall * (s.x * sc[0] + s.y * sc[1] * 1.f/16.f + s.z * sc[4] + s.w * sc[5] * 1.f/16.f) - dmin * smin;
#endif
}
#else
const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15
const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION);
const int step = tid * K_QUANTS_PER_ITERATION;
uint16_t aux16[2];
const uint8_t * s = (const uint8_t *)aux16;
float tmp = 0;
for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) {
const uint8_t * q = x[i].qs + step;
const float * y = yy + i*QK_K + step;
const uint16_t * a = (const uint16_t *)x[i].scales;
aux16[0] = a[0] & 0x0f0f;
aux16[1] = (a[0] >> 4) & 0x0f0f;
const float d = (float)x[i].dm[0];
const float m = (float)x[i].dm[1];
float sum = 0.f;
for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) {
sum += y[j+ 0] * (d * s[0] * (q[j+ 0] & 0xF) - m * s[2])
+ y[j+16] * (d * s[0] * (q[j+16] & 0xF) - m * s[2])
+ y[j+32] * (d * s[1] * (q[j+ 0] >> 4) - m * s[3])
+ y[j+48] * (d * s[1] * (q[j+16] >> 4) - m * s[3]);
}
tmp += sum;
}
#endif
// sum up partial sums and write back result
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
}
if (tid == 0) {
dst[row] = tmp;
}
}
static __global__ void dequantize_mul_mat_vec_q5_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols) {
const int row = blockIdx.x;
const int num_blocks_per_row = ncols / QK_K;
const int ib0 = row*num_blocks_per_row;
const block_q5_K * x = (const block_q5_K *)vx + ib0;
float tmp = 0; // partial sum for thread in warp
#if QK_K == 256
const uint16_t kmask1 = 0x3f3f;
const uint16_t kmask2 = 0x0f0f;
const uint16_t kmask3 = 0xc0c0;
const int tid = threadIdx.x/2; // 0...15
const int ix = threadIdx.x%2;
const int il = tid/4; // 0...3
const int ir = tid - 4*il;// 0...3
const int n = 2;
const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224
const int in = il%2;
const int l0 = n*(2*ir + in);
const int q_offset = 32*im + l0;
const int y_offset = 64*im + l0;
const uint8_t hm1 = 1 << (2*im);
const uint8_t hm2 = hm1 << 4;
uint16_t aux[4];
const uint8_t * sc = (const uint8_t *)aux;
uint16_t q16[8];
const uint8_t * q4 = (const uint8_t *)q16;
for (int i = ix; i < num_blocks_per_row; i += 2) {
const uint8_t * ql1 = x[i].qs + q_offset;
const uint8_t * qh = x[i].qh + l0;
const float * y1 = yy + i*QK_K + y_offset;
const float * y2 = y1 + 128;
const float dall = __low2half(x[i].dm);
const float dmin = __high2half(x[i].dm);
const uint16_t * a = (const uint16_t *)x[i].scales;
aux[0] = a[im+0] & kmask1;
aux[1] = a[im+2] & kmask1;
aux[2] = ((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2);
aux[3] = ((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2);
float4 sum = {0.f, 0.f, 0.f, 0.f};
float smin = 0;
const uint16_t * q1 = (const uint16_t *)ql1;
const uint16_t * q2 = q1 + 32;
q16[0] = q1[0] & 0x0f0f;
q16[1] = q1[8] & 0x0f0f;
q16[2] = (q1[0] >> 4) & 0x0f0f;
q16[3] = (q1[8] >> 4) & 0x0f0f;
q16[4] = q2[0] & 0x0f0f;
q16[5] = q2[8] & 0x0f0f;
q16[6] = (q2[0] >> 4) & 0x0f0f;
q16[7] = (q2[8] >> 4) & 0x0f0f;
for (int l = 0; l < n; ++l) {
sum.x += y1[l+ 0] * (q4[l +0] + (qh[l+ 0] & (hm1 << 0) ? 16 : 0))
+ y1[l+16] * (q4[l +2] + (qh[l+16] & (hm1 << 0) ? 16 : 0));
sum.y += y1[l+32] * (q4[l +4] + (qh[l+ 0] & (hm1 << 1) ? 16 : 0))
+ y1[l+48] * (q4[l +6] + (qh[l+16] & (hm1 << 1) ? 16 : 0));
sum.z += y2[l+ 0] * (q4[l +8] + (qh[l+ 0] & (hm2 << 0) ? 16 : 0))
+ y2[l+16] * (q4[l+10] + (qh[l+16] & (hm2 << 0) ? 16 : 0));
sum.w += y2[l+32] * (q4[l+12] + (qh[l+ 0] & (hm2 << 1) ? 16 : 0))
+ y2[l+48] * (q4[l+14] + (qh[l+16] & (hm2 << 1) ? 16 : 0));
smin += (y1[l] + y1[l+16]) * sc[2] + (y1[l+32] + y1[l+48]) * sc[3]
+ (y2[l] + y2[l+16]) * sc[6] + (y2[l+32] + y2[l+48]) * sc[7];
}
tmp += dall * (sum.x * sc[0] + sum.y * sc[1] + sum.z * sc[4] + sum.w * sc[5]) - dmin * smin;
}
#else
const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15
const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION);
const int step = tid * K_QUANTS_PER_ITERATION;
const int im = step/8;
const int in = step%8;
for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) {
const uint8_t * q = x[i].qs + step;
const int8_t * s = x[i].scales;
const float * y = yy + i*QK_K + step;
const float d = x[i].d;
float sum = 0.f;
for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) {
const uint8_t h = x[i].qh[in+j] >> im;
sum += y[j+ 0] * d * s[0] * ((q[j+ 0] & 0xF) - ((h >> 0) & 1 ? 0 : 16))
+ y[j+16] * d * s[1] * ((q[j+16] & 0xF) - ((h >> 2) & 1 ? 0 : 16))
+ y[j+32] * d * s[2] * ((q[j+ 0] >> 4) - ((h >> 4) & 1 ? 0 : 16))
+ y[j+48] * d * s[3] * ((q[j+16] >> 4) - ((h >> 6) & 1 ? 0 : 16));
}
tmp += sum;
}
#endif
// sum up partial sums and write back result
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
}
if (threadIdx.x == 0) {
dst[row] = tmp;
}
}
static __global__ void dequantize_mul_mat_vec_q6_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) {
static_assert(16%K_QUANTS_PER_ITERATION == 0, "16 must be divisible by K_QUANTS_PER_ITERATION");
const int row = blockIdx.y*blockDim.y + threadIdx.y;
if (row > nrows) return;
const int num_blocks_per_row = ncols / QK_K;
const int ib0 = row*num_blocks_per_row;
const block_q6_K * x = (const block_q6_K *)vx + ib0;
#if QK_K == 256
const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16
const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0, 1
const int step = 16/K_QUANTS_PER_ITERATION; // 16 or 8
const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
const int in = tid - step*im; // 0...15 or 0...7
#if K_QUANTS_PER_ITERATION == 1
const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15
const int is = 0;
#else
const int l0 = 4 * in; // 0, 4, 8, ..., 28
const int is = in / 4;
#endif
const int ql_offset = 64*im + l0;
const int qh_offset = 32*im + l0;
const int s_offset = 8*im + is;
const int y_offset = 128*im + l0;
float tmp = 0; // partial sum for thread in warp
for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
const float * y = yy + i * QK_K + y_offset;
const uint8_t * ql = x[i].ql + ql_offset;
const uint8_t * qh = x[i].qh + qh_offset;
const int8_t * s = x[i].scales + s_offset;
const float d = x[i].d;
#if K_QUANTS_PER_ITERATION == 1
float sum = y[ 0] * s[0] * d * ((int8_t)((ql[ 0] & 0xF) | ((qh[ 0] & 0x03) << 4)) - 32)
+ y[16] * s[1] * d * ((int8_t)((ql[16] & 0xF) | ((qh[16] & 0x03) << 4)) - 32)
+ y[32] * s[2] * d * ((int8_t)((ql[32] & 0xF) | ((qh[ 0] & 0x0c) << 2)) - 32)
+ y[48] * s[3] * d * ((int8_t)((ql[48] & 0xF) | ((qh[16] & 0x0c) << 2)) - 32)
+ y[64] * s[4] * d * ((int8_t)((ql[ 0] >> 4) | ((qh[ 0] & 0x30) >> 0)) - 32)
+ y[80] * s[5] * d * ((int8_t)((ql[16] >> 4) | ((qh[16] & 0x30) >> 0)) - 32)
+ y[96] * s[6] * d * ((int8_t)((ql[32] >> 4) | ((qh[ 0] & 0xc0) >> 2)) - 32)
+y[112] * s[7] * d * ((int8_t)((ql[48] >> 4) | ((qh[16] & 0xc0) >> 2)) - 32);
tmp += sum;
#else
float sum = 0;
for (int l = 0; l < 4; ++l) {
sum += y[l+ 0] * s[0] * d * ((int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32)
+ y[l+32] * s[2] * d * ((int8_t)((ql[l+32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32)
+ y[l+64] * s[4] * d * ((int8_t)((ql[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32)
+ y[l+96] * s[6] * d * ((int8_t)((ql[l+32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32);
}
tmp += sum;
#endif
}
#else
const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...7
const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); // 0...3
const int step = tid * K_QUANTS_PER_ITERATION;
float tmp = 0; // partial sum for thread in warp
for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) {
const float * y = yy + i * QK_K + step;
const uint8_t * ql = x[i].ql + step;
const uint8_t * qh = x[i].qh + step;
const int8_t * s = x[i].scales;
const float d = x[i+0].d;
float sum = 0;
for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) {
sum += y[j+ 0] * s[0] * d * ((int8_t)((ql[j+ 0] & 0xF) | ((qh[j] & 0x03) << 4)) - 32)
+ y[j+16] * s[1] * d * ((int8_t)((ql[j+16] & 0xF) | ((qh[j] & 0x0c) << 2)) - 32)
+ y[j+32] * s[2] * d * ((int8_t)((ql[j+ 0] >> 4) | ((qh[j] & 0x30) >> 0)) - 32)
+ y[j+48] * s[3] * d * ((int8_t)((ql[j+16] >> 4) | ((qh[j] & 0xc0) >> 2)) - 32);
}
tmp += sum;
}
#endif
// sum up partial sums and write back result
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
}
if (tid == 0) {
dst[row] = tmp;
}
}
static __device__ void convert_f16(const void * vx, const int ib, const int iqs, dfloat2 & v){
const half * x = (const half *) vx;
// automatic half -> float type cast if dfloat == float
v.x = x[ib + iqs + 0];
v.y = x[ib + iqs + 1];
}
static __global__ void quantize_q8_1(const float * __restrict__ x, void * __restrict__ vy, const int kx, const int kx_padded) {
const int ix = blockDim.x*blockIdx.x + threadIdx.x;
if (ix >= kx_padded) {
return;
}
const int iy = blockDim.y*blockIdx.y + threadIdx.y;
const int i_padded = iy*kx_padded + ix;
block_q8_1 * y = (block_q8_1 *) vy;
const int ib = i_padded / QK8_1; // block index
const int iqs = i_padded % QK8_1; // quant index
const float xi = ix < kx ? x[iy*kx + ix] : 0.0f;
float amax = fabsf(xi);
float sum = xi;
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
amax = fmaxf(amax, __shfl_xor_sync(0xffffffff, amax, mask, 32));
sum += __shfl_xor_sync(0xffffffff, sum, mask, 32);
}
const float d = amax / 127;
const int8_t q = amax == 0.0f ? 0 : roundf(xi / d);
y[ib].qs[iqs] = q;
if (iqs > 0) {
return;
}
reinterpret_cast<half&>(y[ib].ds.x) = d;
reinterpret_cast<half&>(y[ib].ds.y) = sum;
}
template <int qk, int qr, dequantize_kernel_t dequantize_kernel>
static __global__ void dequantize_block(const void * __restrict__ vx, float * __restrict__ y, const int k) {
const int i = blockDim.x*blockIdx.x + 2*threadIdx.x;
if (i >= k) {
return;
}
const int ib = i/qk; // block index
const int iqs = (i%qk)/qr; // quant index
const int iybs = i - i%qk; // y block start index
const int y_offset = qr == 1 ? 1 : qk/2;
// dequantize
dfloat2 v;
dequantize_kernel(vx, ib, iqs, v);
y[iybs + iqs + 0] = v.x;
y[iybs + iqs + y_offset] = v.y;
}
// VDR = vec dot ratio, how many contiguous integers each thread processes when the vec dot kernel is called
// MMVQ = mul_mat_vec_q, MMQ = mul_mat_q
#define VDR_Q4_0_Q8_1_MMVQ 2
#define VDR_Q4_0_Q8_1_MMQ 4
template <int vdr> static __device__ __forceinline__ float vec_dot_q4_0_q8_1_impl(
const int * v, const int * u, const float & d4, const half2 & ds8) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
int sumi = 0;
#pragma unroll
for (int i = 0; i < vdr; ++i) {
const int vi0 = (v[i] >> 0) & 0x0F0F0F0F;
const int vi1 = (v[i] >> 4) & 0x0F0F0F0F;
// SIMD dot product of quantized values
sumi = __dp4a(vi0, u[2*i+0], sumi);
sumi = __dp4a(vi1, u[2*i+1], sumi);
}
const float2 ds8f = __half22float2(ds8);
// second part effectively subtracts 8 from each quant value
return d4 * (sumi * ds8f.x - (8*vdr/QI4_0) * ds8f.y);
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
#define VDR_Q4_1_Q8_1_MMVQ 2
#define VDR_Q4_1_Q8_1_MMQ 4
template <int vdr> static __device__ __forceinline__ float vec_dot_q4_1_q8_1_impl(
const int * v, const int * u, const half2 & dm4, const half2 & ds8) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
int sumi = 0;
#pragma unroll
for (int i = 0; i < vdr; ++i) {
const int vi0 = (v[i] >> 0) & 0x0F0F0F0F;
const int vi1 = (v[i] >> 4) & 0x0F0F0F0F;
// SIMD dot product of quantized values
sumi = __dp4a(vi0, u[2*i+0], sumi);
sumi = __dp4a(vi1, u[2*i+1], sumi);
}
#ifdef GGML_CUDA_F16
const float2 tmp = __half22float2(__hmul2(dm4, ds8));
const float d4d8 = tmp.x;
const float m4s8 = tmp.y;
#else
const float2 dm4f = __half22float2(dm4);
const float2 ds8f = __half22float2(ds8);
const float d4d8 = dm4f.x * ds8f.x;
const float m4s8 = dm4f.y * ds8f.y;
#endif // GGML_CUDA_F16
// scale second part of sum by QI8_1/(vdr * QR4_1) to compensate for multiple threads adding it
return sumi * d4d8 + m4s8 / (QI8_1 / (vdr * QR4_1));
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
#define VDR_Q5_0_Q8_1_MMVQ 2
#define VDR_Q5_0_Q8_1_MMQ 4
template <int vdr> static __device__ __forceinline__ float vec_dot_q5_0_q8_1_impl(
const int * vl, const int * vh, const int * u, const float & d5, const half2 & ds8) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
int sumi = 0;
#pragma unroll
for (int i = 0; i < vdr; ++i) {
int vi0 = (vl[i] >> 0) & 0x0F0F0F0F; // lower 4 qs bits, still need qh as 5th bits
vi0 |= (vh[i] << 4) & 0x00000010; // 0 -> 4
vi0 |= (vh[i] << 11) & 0x00001000; // 1 -> 12
vi0 |= (vh[i] << 18) & 0x00100000; // 2 -> 20
vi0 |= (vh[i] << 25) & 0x10000000; // 3 -> 28
sumi = __dp4a(vi0, u[2*i+0], sumi); // SIMD dot product of quantized values
int vi1 = (vl[i] >> 4) & 0x0F0F0F0F; // upper 4 qs bits, still need qh as 5th bits
vi1 |= (vh[i] >> 12) & 0x00000010; // 16 -> 4
vi1 |= (vh[i] >> 5) & 0x00001000; // 17 -> 12
vi1 |= (vh[i] << 2) & 0x00100000; // 18 -> 20
vi1 |= (vh[i] << 9) & 0x10000000; // 19 -> 28
sumi = __dp4a(vi1, u[2*i+1], sumi); // SIMD dot product of quantized values
}
const float2 ds8f = __half22float2(ds8);
// second part effectively subtracts 16 from each quant value
return d5 * (sumi * ds8f.x - (16*vdr/QI5_0) * ds8f.y);
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
#define VDR_Q5_1_Q8_1_MMVQ 2
#define VDR_Q5_1_Q8_1_MMQ 4
template <int vdr> static __device__ __forceinline__ float vec_dot_q5_1_q8_1_impl(
const int * vl, const int * vh, const int * u, const half2 & dm5, const half2 & ds8) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
int sumi = 0;
#pragma unroll
for (int i = 0; i < vdr; ++i) {
int vi0 = (vl[i] >> 0) & 0x0F0F0F0F; // lower 4 qs bits, still need qh as 5th bits
vi0 |= (vh[i] << 4) & 0x00000010; // 0 -> 4
vi0 |= (vh[i] << 11) & 0x00001000; // 1 -> 12
vi0 |= (vh[i] << 18) & 0x00100000; // 2 -> 20
vi0 |= (vh[i] << 25) & 0x10000000; // 3 -> 28
sumi = __dp4a(vi0, u[2*i+0], sumi); // SIMD dot product of quantized values
int vi1 = (vl[i] >> 4) & 0x0F0F0F0F; // upper 4 qs bits, still need qh as 5th bits
vi1 |= (vh[i] >> 12) & 0x00000010; // 16 -> 4
vi1 |= (vh[i] >> 5) & 0x00001000; // 17 -> 12
vi1 |= (vh[i] << 2) & 0x00100000; // 18 -> 20
vi1 |= (vh[i] << 9) & 0x10000000; // 19 -> 28
sumi = __dp4a(vi1, u[2*i+1], sumi); // SIMD dot product of quantized values
}
#ifdef GGML_CUDA_F16
const float2 tmp = __half22float2(__hmul2(dm5, ds8));
const float d5d8 = tmp.x;
const float m5s8 = tmp.y;
#else
const float2 dm5f = __half22float2(dm5);
const float2 ds8f = __half22float2(ds8);
const float d5d8 = dm5f.x * ds8f.x;
const float m5s8 = dm5f.y * ds8f.y;
#endif // GGML_CUDA_F16
// scale second part of sum by QI5_1 / vdr to compensate for multiple threads adding it
return sumi*d5d8 + m5s8 / (QI5_1 / vdr);
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
#define VDR_Q8_0_Q8_1_MMVQ 2
#define VDR_Q8_0_Q8_1_MMQ 8
template <int vdr> static __device__ __forceinline__ float vec_dot_q8_0_q8_1_impl(
const int * v, const int * u, const float & d8_0, const float & d8_1) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
int sumi = 0;
#pragma unroll
for (int i = 0; i < vdr; ++i) {
// SIMD dot product of quantized values
sumi = __dp4a(v[i], u[i], sumi);
}
return d8_0*d8_1 * sumi;
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
template <int vdr> static __device__ __forceinline__ float vec_dot_q8_1_q8_1_impl(
const int * v, const int * u, const half2 & dm8, const half2 & ds8) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
int sumi = 0;
#pragma unroll
for (int i = 0; i < vdr; ++i) {
// SIMD dot product of quantized values
sumi = __dp4a(v[i], u[i], sumi);
}
#ifdef GGML_CUDA_F16
const float2 tmp = __half22float2(__hmul2(dm8, ds8));
const float d8d8 = tmp.x;
const float m8s8 = tmp.y;
#else
const float2 dm8f = __half22float2(dm8);
const float2 ds8f = __half22float2(ds8);
const float d8d8 = dm8f.x * ds8f.x;
const float m8s8 = dm8f.y * ds8f.y;
#endif // GGML_CUDA_F16
// scale second part of sum by QI8_1/ vdr to compensate for multiple threads adding it
return sumi*d8d8 + m8s8 / (QI8_1 / vdr);
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
#define VDR_Q2_K_Q8_1_MMVQ 1
#define VDR_Q2_K_Q8_1_MMQ 2
// contiguous v/x values
static __device__ __forceinline__ float vec_dot_q2_K_q8_1_impl_mmvq(
const int & v, const int * __restrict__ u, const uint8_t * __restrict__ scales,
const half2 & dm2, const float * __restrict__ d8) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
float sumf_d = 0.0f;
float sumf_m = 0.0f;
#pragma unroll
for (int i = 0; i < QR2_K; ++i) {
const int sc = scales[2*i];
const int vi = (v >> (2*i)) & 0x03030303;
sumf_d += d8[i] * (__dp4a(vi, u[i], 0) * (sc & 0xF)); // SIMD dot product
// fill int with 4x m
int m = sc >> 4;
m |= m << 8;
m |= m << 16;
sumf_m += d8[i] * __dp4a(m, u[i], 0); // multiply constant q2_K part with sum of q8_1 values
}
const float2 dm2f = __half22float2(dm2);
return dm2f.x*sumf_d - dm2f.y*sumf_m;
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
// contiguous u/y values
static __device__ __forceinline__ float vec_dot_q2_K_q8_1_impl_mmq(
const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ scales,
const half2 & dm2, const float & d8) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
int sumi_d = 0;
int sumi_m = 0;
#pragma unroll
for (int i0 = 0; i0 < QI8_1; i0 += QI8_1/2) {
int sumi_d_sc = 0;
const int sc = scales[i0 / (QI8_1/2)];
// fill int with 4x m
int m = sc >> 4;
m |= m << 8;
m |= m << 16;
#pragma unroll
for (int i = i0; i < i0 + QI8_1/2; ++i) {
sumi_d_sc = __dp4a(v[i], u[i], sumi_d_sc); // SIMD dot product
sumi_m = __dp4a(m, u[i], sumi_m); // multiply sum of q8_1 values with m
}
sumi_d += sumi_d_sc * (sc & 0xF);
}
const float2 dm2f = __half22float2(dm2);
return d8 * (dm2f.x*sumi_d - dm2f.y*sumi_m);
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
#define VDR_Q3_K_Q8_1_MMVQ 1
#define VDR_Q3_K_Q8_1_MMQ 2
// contiguous v/x values
static __device__ __forceinline__ float vec_dot_q3_K_q8_1_impl_mmvq(
const int & vl, const int & vh, const int * __restrict__ u, const uint8_t * __restrict__ scales,
const int & scale_offset, const float & d3, const float * __restrict__ d8) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
float sumf = 0.0f;
#pragma unroll
for (int i = 0; i < QR3_K; ++i) {
const int isc = scale_offset + 2*i;
const int isc_low = isc % (QK_K/32);
const int sc_shift_low = 4 * (isc / (QK_K/32));
const int sc_low = (scales[isc_low] >> sc_shift_low) & 0xF;
const int isc_high = isc % (QK_K/64);
const int sc_shift_high = 2 * (isc / (QK_K/64));
const int sc_high = ((scales[(QK_K/32) + isc_high] >> sc_shift_high) & 3) << 4;
const int sc = (sc_low | sc_high) - 32;
const int vil = (vl >> (2*i)) & 0x03030303;
const int vih = ((vh >> i) << 2) & 0x04040404;
const int vi = __vsubss4(vil, vih);
sumf += d8[i] * (__dp4a(vi, u[i], 0) * sc); // SIMD dot product
}
return d3 * sumf;
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
// contiguous u/y values
static __device__ __forceinline__ float vec_dot_q3_K_q8_1_impl_mmq(
const int * __restrict__ v, const int * __restrict__ u, const int8_t * __restrict__ scales,
const float & d3, const float & d8) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
int sumi = 0;
#pragma unroll
for (int i0 = 0; i0 < QR3_K*VDR_Q3_K_Q8_1_MMQ; i0 += QI8_1/2) {
int sumi_sc = 0;
for (int i = i0; i < i0 + QI8_1/2; ++i) {
sumi_sc = __dp4a(v[i], u[i], sumi_sc); // SIMD dot product
}
sumi += sumi_sc * scales[i0 / (QI8_1/2)];
}
return d3*d8 * sumi;
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
#define VDR_Q4_K_Q8_1_MMVQ 2
#define VDR_Q4_K_Q8_1_MMQ 8
// contiguous v/x values
static __device__ __forceinline__ float vec_dot_q4_K_q8_1_impl_vmmq(
const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc,
const uint8_t * __restrict__ m, const half2 & dm4, const float * __restrict__ d8) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
float sumf_d = 0.0f;
float sumf_m = 0.0f;
#pragma unroll
for (int i = 0; i < QR4_K; ++i) {
const int v0i = (v[0] >> (4*i)) & 0x0F0F0F0F;
const int v1i = (v[1] >> (4*i)) & 0x0F0F0F0F;
const int dot1 = __dp4a(v1i, u[2*i+1], __dp4a(v0i, u[2*i+0], 0)); // SIMD dot product
const int dot2 = __dp4a(0x01010101, u[2*i+1], __dp4a(0x01010101, u[2*i+0], 0)); // sum of u
sumf_d += d8[i] * (dot1 * sc[i]);
sumf_m += d8[i] * (dot2 * m[i]); // multiply constant part of q4_K with sum of q8_1 values
}
const float2 dm4f = __half22float2(dm4);
return dm4f.x*sumf_d - dm4f.y*sumf_m;
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
// contiguous u/y values
static __device__ __forceinline__ float vec_dot_q4_K_q8_1_impl_mmq(
const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc,
const uint8_t * __restrict__ m, const half2 & dm4, const half2 * __restrict__ ds8) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
float sumf_d = 0.0f;
float sumf_m = 0.0f;
#pragma unroll
for (int i = 0; i < QR4_K*VDR_Q4_K_Q8_1_MMQ/QI8_1; ++i) {
int sumi_d = 0;
#pragma unroll
for (int j = 0; j < QI8_1; ++j) {
sumi_d = __dp4a((v[j] >> (4*i)) & 0x0F0F0F0F, u[i*QI8_1 + j], sumi_d); // SIMD dot product
}
const float2 ds8f = __half22float2(ds8[i]);
sumf_d += ds8f.x * (sc[i] * sumi_d);
sumf_m += ds8f.y * m[i]; // sum of q8_1 block * q4_K min val
}
const float2 dm4f = __half22float2(dm4);
return dm4f.x*sumf_d - dm4f.y*sumf_m;
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
#define VDR_Q5_K_Q8_1_MMVQ 2
#define VDR_Q5_K_Q8_1_MMQ 8
// contiguous v/x values
static __device__ __forceinline__ float vec_dot_q5_K_q8_1_impl_vmmq(
const int * __restrict__ vl, const int * __restrict__ vh, const int * __restrict__ u, const uint8_t * __restrict__ sc,
const uint8_t * __restrict__ m, const half2 & dm5, const float * __restrict__ d8) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
float sumf_d = 0.0f;
float sumf_m = 0.0f;
#pragma unroll
for (int i = 0; i < QR5_K; ++i) {
const int vl0i = (vl[0] >> (4*i)) & 0x0F0F0F0F;
const int vl1i = (vl[1] >> (4*i)) & 0x0F0F0F0F;
const int vh0i = ((vh[0] >> i) << 4) & 0x10101010;
const int vh1i = ((vh[1] >> i) << 4) & 0x10101010;
const int v0i = vl0i | vh0i;
const int v1i = vl1i | vh1i;
const int dot1 = __dp4a(v0i, u[2*i+0], __dp4a(v1i, u[2*i+1], 0)); // SIMD dot product
const int dot2 = __dp4a(0x01010101, u[2*i+0], __dp4a(0x01010101, u[2*i+1], 0)); // sum of u
sumf_d += d8[i] * (dot1 * sc[i]);
sumf_m += d8[i] * (dot2 * m[i]);
}
const float2 dm5f = __half22float2(dm5);
return dm5f.x*sumf_d - dm5f.y*sumf_m;
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
// contiguous u/y values
static __device__ __forceinline__ float vec_dot_q5_K_q8_1_impl_mmq(
const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc,
const uint8_t * __restrict__ m, const half2 & dm4, const half2 * __restrict__ ds8) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
float sumf_d = 0.0f;
float sumf_m = 0.0f;
#pragma unroll
for (int i = 0; i < QR5_K*VDR_Q5_K_Q8_1_MMQ/QI8_1; ++i) {
int sumi_d = 0;
#pragma unroll
for (int j = 0; j < QI8_1; ++j) {
sumi_d = __dp4a(v[i*QI8_1 + j], u[i*QI8_1 + j], sumi_d); // SIMD dot product
}
const float2 ds8f = __half22float2(ds8[i]);
sumf_d += ds8f.x * (sc[i] * sumi_d);
sumf_m += ds8f.y * m[i]; // sum of q8_1 block * q4_K min val
}
const float2 dm4f = __half22float2(dm4);
return dm4f.x*sumf_d - dm4f.y*sumf_m;
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
#define VDR_Q6_K_Q8_1_MMVQ 1
#define VDR_Q6_K_Q8_1_MMQ 8
// contiguous v/x values
static __device__ __forceinline__ float vec_dot_q6_K_q8_1_impl_mmvq(
const int & vl, const int & vh, const int * __restrict__ u, const int8_t * __restrict__ scales,
const float & d, const float * __restrict__ d8) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
float sumf = 0.0f;
#pragma unroll
for (int i = 0; i < QR6_K; ++i) {
const int sc = scales[4*i];
const int vil = (vl >> (4*i)) & 0x0F0F0F0F;
const int vih = ((vh >> (4*i)) << 4) & 0x30303030;
const int vi = __vsubss4((vil | vih), 0x20202020); // vi = (vil | vih) - 32
sumf += d8[i] * (__dp4a(vi, u[i], 0) * sc); // SIMD dot product
}
return d*sumf;
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
// contiguous u/y values
static __device__ __forceinline__ float vec_dot_q6_K_q8_1_impl_mmq(
const int * __restrict__ v, const int * __restrict__ u, const int8_t * __restrict__ sc,
const float & d6, const float * __restrict__ d8) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
float sumf_d = 0.0f;
#pragma unroll
for (int i0 = 0; i0 < VDR_Q6_K_Q8_1_MMQ; i0 += 4) {
int2 sumi_d = {0, 0}; // 2 q6_K scales per q8_1 scale
#pragma unroll
for (int i = i0; i < i0 + 2; ++i) {
sumi_d.x = __dp4a(v[2*i+0], u[2*i+0], sumi_d.x); // SIMD dot product
sumi_d.x = __dp4a(v[2*i+1], u[2*i+1], sumi_d.x); // SIMD dot product
sumi_d.y = __dp4a(v[2*i+4], u[2*i+4], sumi_d.y); // SIMD dot product
sumi_d.y = __dp4a(v[2*i+5], u[2*i+5], sumi_d.y); // SIMD dot product
}
sumf_d += d8[i0/4] * (sc[i0/2+0]*sumi_d.x + sc[i0/2+1]*sumi_d.y);
}
return d6 * sumf_d;
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
static __device__ __forceinline__ float vec_dot_q4_0_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
const block_q4_0 * bq4_0 = (const block_q4_0 *) vbq;
int v[VDR_Q4_0_Q8_1_MMVQ];
int u[2*VDR_Q4_0_Q8_1_MMVQ];
#pragma unroll
for (int i = 0; i < VDR_Q4_0_Q8_1_MMVQ; ++i) {
v[i] = get_int_from_uint8(bq4_0->qs, iqs + i);
u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI4_0);
}
return vec_dot_q4_0_q8_1_impl<VDR_Q4_0_Q8_1_MMVQ>(v, u, bq4_0->d, bq8_1->ds);
}
template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q4_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
__shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + mmq_y];
__shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI4_0) + mmq_y/QI4_0];
*x_ql = tile_x_qs;
*x_dm = (half2 *) tile_x_d;
}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q4_0(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
__builtin_assume(i_offset >= 0);
__builtin_assume(i_offset < nwarps);
__builtin_assume(k >= 0);
__builtin_assume(k < WARP_SIZE);
const int kbx = k / QI4_0;
const int kqsx = k % QI4_0;
const block_q4_0 * bx0 = (block_q4_0 *) vx;
float * x_dmf = (float *) x_dm;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
int i = i0 + i_offset;
if (need_check) {
i = min(i, i_max);
}
const block_q4_0 * bxi = bx0 + i*blocks_per_row + kbx;
x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8(bxi->qs, kqsx);
// x_dmf[i * (WARP_SIZE/QI4_0) + i / QI4_0 + kbx] = bxi->d;
}
const int blocks_per_tile_x_row = WARP_SIZE / QI4_0;
const int kbxd = k % blocks_per_tile_x_row;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_0) {
int i = i0 + i_offset * QI4_0 + k / blocks_per_tile_x_row;
if (need_check) {
i = min(i, i_max);
}
const block_q4_0 * bxi = bx0 + i*blocks_per_row + kbxd;
x_dmf[i * (WARP_SIZE/QI4_0) + i / QI4_0 + kbxd] = bxi->d;
}
}
static __device__ __forceinline__ float vec_dot_q4_0_q8_1_mul_mat(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2));
const float * x_dmf = (float *) x_dm;
int u[2*VDR_Q4_0_Q8_1_MMQ];
#pragma unroll
for (int l = 0; l < VDR_Q4_0_Q8_1_MMQ; ++l) {
u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE];
u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI4_0) % WARP_SIZE];
}
return vec_dot_q4_0_q8_1_impl<VDR_Q4_0_Q8_1_MMQ>
(&x_ql[i * (WARP_SIZE + 1) + k], u, x_dmf[i * (WARP_SIZE/QI4_0) + i/QI4_0 + k/QI4_0],
y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]);
}
static __device__ __forceinline__ float vec_dot_q4_1_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
const block_q4_1 * bq4_1 = (const block_q4_1 *) vbq;
int v[VDR_Q4_1_Q8_1_MMVQ];
int u[2*VDR_Q4_1_Q8_1_MMVQ];
#pragma unroll
for (int i = 0; i < VDR_Q4_1_Q8_1_MMVQ; ++i) {
v[i] = get_int_from_uint8_aligned(bq4_1->qs, iqs + i);
u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI4_1);
}
return vec_dot_q4_1_q8_1_impl<VDR_Q4_1_Q8_1_MMVQ>(v, u, bq4_1->dm, bq8_1->ds);
}
template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q4_1(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
__shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + + mmq_y];
__shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI4_1) + mmq_y/QI4_1];
*x_ql = tile_x_qs;
*x_dm = tile_x_dm;
}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q4_1(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
__builtin_assume(i_offset >= 0);
__builtin_assume(i_offset < nwarps);
__builtin_assume(k >= 0);
__builtin_assume(k < WARP_SIZE);
const int kbx = k / QI4_1;
const int kqsx = k % QI4_1;
const block_q4_1 * bx0 = (block_q4_1 *) vx;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
int i = i0 + i_offset;
if (need_check) {
i = min(i, i_max);
}
const block_q4_1 * bxi = bx0 + i*blocks_per_row + kbx;
x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx);
}
const int blocks_per_tile_x_row = WARP_SIZE / QI4_1;
const int kbxd = k % blocks_per_tile_x_row;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_1) {
int i = i0 + i_offset * QI4_1 + k / blocks_per_tile_x_row;
if (need_check) {
i = min(i, i_max);
}
const block_q4_1 * bxi = bx0 + i*blocks_per_row + kbxd;
x_dm[i * (WARP_SIZE/QI4_1) + i / QI4_1 + kbxd] = bxi->dm;
}
}
static __device__ __forceinline__ float vec_dot_q4_1_q8_1_mul_mat(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2));
int u[2*VDR_Q4_1_Q8_1_MMQ];
#pragma unroll
for (int l = 0; l < VDR_Q4_1_Q8_1_MMQ; ++l) {
u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE];
u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI4_1) % WARP_SIZE];
}
return vec_dot_q4_1_q8_1_impl<VDR_Q4_1_Q8_1_MMQ>
(&x_ql[i * (WARP_SIZE + 1) + k], u, x_dm[i * (WARP_SIZE/QI4_1) + i/QI4_1 + k/QI4_1],
y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]);
}
static __device__ __forceinline__ float vec_dot_q5_0_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
const block_q5_0 * bq5_0 = (const block_q5_0 *) vbq;
int vl[VDR_Q5_0_Q8_1_MMVQ];
int vh[VDR_Q5_0_Q8_1_MMVQ];
int u[2*VDR_Q5_0_Q8_1_MMVQ];
#pragma unroll
for (int i = 0; i < VDR_Q5_0_Q8_1_MMVQ; ++i) {
vl[i] = get_int_from_uint8(bq5_0->qs, iqs + i);
vh[i] = get_int_from_uint8(bq5_0->qh, 0) >> (4 * (iqs + i));
u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI5_0);
}
return vec_dot_q5_0_q8_1_impl<VDR_Q5_0_Q8_1_MMVQ>(vl, vh, u, bq5_0->d, bq8_1->ds);
}
template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q5_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
__shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y];
__shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI5_0) + mmq_y/QI5_0];
*x_ql = tile_x_ql;
*x_dm = (half2 *) tile_x_d;
}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q5_0(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
__builtin_assume(i_offset >= 0);
__builtin_assume(i_offset < nwarps);
__builtin_assume(k >= 0);
__builtin_assume(k < WARP_SIZE);
const int kbx = k / QI5_0;
const int kqsx = k % QI5_0;
const block_q5_0 * bx0 = (block_q5_0 *) vx;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
int i = i0 + i_offset;
if (need_check) {
i = min(i, i_max);
}
const block_q5_0 * bxi = bx0 + i*blocks_per_row + kbx;
const int ql = get_int_from_uint8(bxi->qs, kqsx);
const int qh = get_int_from_uint8(bxi->qh, 0) >> (4 * (k % QI5_0));
int qs0 = (ql >> 0) & 0x0F0F0F0F;
qs0 |= (qh << 4) & 0x00000010; // 0 -> 4
qs0 |= (qh << 11) & 0x00001000; // 1 -> 12
qs0 |= (qh << 18) & 0x00100000; // 2 -> 20
qs0 |= (qh << 25) & 0x10000000; // 3 -> 28
qs0 = __vsubss4(qs0, 0x10101010); // subtract 16
x_ql[i * (2*WARP_SIZE + 1) + 2*k+0] = qs0;
int qs1 = (ql >> 4) & 0x0F0F0F0F;
qs1 |= (qh >> 12) & 0x00000010; // 16 -> 4
qs1 |= (qh >> 5) & 0x00001000; // 17 -> 12
qs1 |= (qh << 2) & 0x00100000; // 18 -> 20
qs1 |= (qh << 9) & 0x10000000; // 19 -> 28
qs1 = __vsubss4(qs1, 0x10101010); // subtract 16
x_ql[i * (2*WARP_SIZE + 1) + 2*k+1] = qs1;
}
const int blocks_per_tile_x_row = WARP_SIZE / QI5_0;
const int kbxd = k % blocks_per_tile_x_row;
float * x_dmf = (float *) x_dm;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_0) {
int i = i0 + i_offset * QI5_0 + k / blocks_per_tile_x_row;
if (need_check) {
i = min(i, i_max);
}
const block_q5_0 * bxi = bx0 + i*blocks_per_row + kbxd;
x_dmf[i * (WARP_SIZE/QI5_0) + i / QI5_0 + kbxd] = bxi->d;
}
}
static __device__ __forceinline__ float vec_dot_q5_0_q8_1_mul_mat(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2));
const int index_bx = i * (WARP_SIZE/QI5_0) + i/QI5_0 + k/QI5_0;
const float * x_dmf = (const float *) x_dm;
const float * y_df = (const float *) y_ds;
int u[2*VDR_Q5_0_Q8_1_MMQ];
#pragma unroll
for (int l = 0; l < VDR_Q5_0_Q8_1_MMQ; ++l) {
u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE];
u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI5_0) % WARP_SIZE];
}
return vec_dot_q8_0_q8_1_impl<QR5_0*VDR_Q5_0_Q8_1_MMQ>
(&x_ql[i * (2*WARP_SIZE + 1) + 2 * k], u, x_dmf[index_bx], y_df[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]);
}
static __device__ __forceinline__ float vec_dot_q5_1_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
const block_q5_1 * bq5_1 = (const block_q5_1 *) vbq;
int vl[VDR_Q5_1_Q8_1_MMVQ];
int vh[VDR_Q5_1_Q8_1_MMVQ];
int u[2*VDR_Q5_1_Q8_1_MMVQ];
#pragma unroll
for (int i = 0; i < VDR_Q5_1_Q8_1_MMVQ; ++i) {
vl[i] = get_int_from_uint8_aligned(bq5_1->qs, iqs + i);
vh[i] = get_int_from_uint8_aligned(bq5_1->qh, 0) >> (4 * (iqs + i));
u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI5_1);
}
return vec_dot_q5_1_q8_1_impl<VDR_Q5_1_Q8_1_MMVQ>(vl, vh, u, bq5_1->dm, bq8_1->ds);
}
template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q5_1(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
__shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y];
__shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI5_1) + mmq_y/QI5_1];
*x_ql = tile_x_ql;
*x_dm = tile_x_dm;
}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q5_1(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
__builtin_assume(i_offset >= 0);
__builtin_assume(i_offset < nwarps);
__builtin_assume(k >= 0);
__builtin_assume(k < WARP_SIZE);
const int kbx = k / QI5_1;
const int kqsx = k % QI5_1;
const block_q5_1 * bx0 = (block_q5_1 *) vx;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
int i = i0 + i_offset;
if (need_check) {
i = min(i, i_max);
}
const block_q5_1 * bxi = bx0 + i*blocks_per_row + kbx;
const int ql = get_int_from_uint8_aligned(bxi->qs, kqsx);
const int qh = get_int_from_uint8_aligned(bxi->qh, 0) >> (4 * (k % QI5_1));
int qs0 = (ql >> 0) & 0x0F0F0F0F;
qs0 |= (qh << 4) & 0x00000010; // 0 -> 4
qs0 |= (qh << 11) & 0x00001000; // 1 -> 12
qs0 |= (qh << 18) & 0x00100000; // 2 -> 20
qs0 |= (qh << 25) & 0x10000000; // 3 -> 28
x_ql[i * (2*WARP_SIZE + 1) + 2*k+0] = qs0;
int qs1 = (ql >> 4) & 0x0F0F0F0F;
qs1 |= (qh >> 12) & 0x00000010; // 16 -> 4
qs1 |= (qh >> 5) & 0x00001000; // 17 -> 12
qs1 |= (qh << 2) & 0x00100000; // 18 -> 20
qs1 |= (qh << 9) & 0x10000000; // 19 -> 28
x_ql[i * (2*WARP_SIZE + 1) + 2*k+1] = qs1;
}
const int blocks_per_tile_x_row = WARP_SIZE / QI5_1;
const int kbxd = k % blocks_per_tile_x_row;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_1) {
int i = i0 + i_offset * QI5_1 + k / blocks_per_tile_x_row;
if (need_check) {
i = min(i, i_max);
}
const block_q5_1 * bxi = bx0 + i*blocks_per_row + kbxd;
x_dm[i * (WARP_SIZE/QI5_1) + i / QI5_1 + kbxd] = bxi->dm;
}
}
static __device__ __forceinline__ float vec_dot_q5_1_q8_1_mul_mat(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2));
const int index_bx = i * (WARP_SIZE/QI5_1) + + i/QI5_1 + k/QI5_1;
int u[2*VDR_Q5_1_Q8_1_MMQ];
#pragma unroll
for (int l = 0; l < VDR_Q5_1_Q8_1_MMQ; ++l) {
u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE];
u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI5_1) % WARP_SIZE];
}
return vec_dot_q8_1_q8_1_impl<QR5_1*VDR_Q5_1_Q8_1_MMQ>
(&x_ql[i * (2*WARP_SIZE + 1) + 2 * k], u, x_dm[index_bx], y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]);
}
static __device__ __forceinline__ float vec_dot_q8_0_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
const block_q8_0 * bq8_0 = (const block_q8_0 *) vbq;
int v[VDR_Q8_0_Q8_1_MMVQ];
int u[VDR_Q8_0_Q8_1_MMVQ];
#pragma unroll
for (int i = 0; i < VDR_Q8_0_Q8_1_MMVQ; ++i) {
v[i] = get_int_from_int8(bq8_0->qs, iqs + i);
u[i] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
}
return vec_dot_q8_0_q8_1_impl<VDR_Q8_0_Q8_1_MMVQ>(v, u, bq8_0->d, __low2half(bq8_1->ds));
}
template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q8_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
__shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + mmq_y];
__shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI8_0) + mmq_y/QI8_0];
*x_ql = tile_x_qs;
*x_dm = (half2 *) tile_x_d;
}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q8_0(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
__builtin_assume(i_offset >= 0);
__builtin_assume(i_offset < nwarps);
__builtin_assume(k >= 0);
__builtin_assume(k < WARP_SIZE);
const int kbx = k / QI8_0;
const int kqsx = k % QI8_0;
float * x_dmf = (float *) x_dm;
const block_q8_0 * bx0 = (block_q8_0 *) vx;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
int i = i0 + i_offset;
if (need_check) {
i = min(i, i_max);
}
const block_q8_0 * bxi = bx0 + i*blocks_per_row + kbx;
x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_int8(bxi->qs, kqsx);
}
const int blocks_per_tile_x_row = WARP_SIZE / QI8_0;
const int kbxd = k % blocks_per_tile_x_row;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI8_0) {
int i = i0 + i_offset * QI8_0 + k / blocks_per_tile_x_row;
if (need_check) {
i = min(i, i_max);
}
const block_q8_0 * bxi = bx0 + i*blocks_per_row + kbxd;
x_dmf[i * (WARP_SIZE/QI8_0) + i / QI8_0 + kbxd] = bxi->d;
}
}
static __device__ __forceinline__ float vec_dot_q8_0_q8_1_mul_mat(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
const float * x_dmf = (const float *) x_dm;
const float * y_df = (const float *) y_ds;
return vec_dot_q8_0_q8_1_impl<VDR_Q8_0_Q8_1_MMQ>
(&x_ql[i * (WARP_SIZE + 1) + k], &y_qs[j * WARP_SIZE + k], x_dmf[i * (WARP_SIZE/QI8_0) + i/QI8_0 + k/QI8_0],
y_df[j * (WARP_SIZE/QI8_1) + k/QI8_1]);
}
static __device__ __forceinline__ float vec_dot_q2_K_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
const block_q2_K * bq2_K = (const block_q2_K *) vbq;
const int bq8_offset = QR2_K * (iqs / QI8_1);
const int scale_offset = iqs - iqs % QI8_1 + (iqs % QI8_1) / (QI8_1/2);
const uint8_t * scales = bq2_K->scales + scale_offset;
const int v = get_int_from_uint8_aligned(bq2_K->qs, iqs);
int u[QR2_K];
float d8[QR2_K];
#pragma unroll
for (int i = 0; i < QR2_K; ++ i) {
u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + i].qs, iqs % QI8_1);
d8[i] = __low2half(bq8_1[bq8_offset + i].ds);
}
return vec_dot_q2_K_q8_1_impl_mmvq(v, u, scales, bq2_K->dm, d8);
}
template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q2_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
__shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y];
__shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI2_K) + mmq_y/QI2_K];
__shared__ int tile_x_sc[mmq_y * (WARP_SIZE/4) + mmq_y/4];
*x_ql = tile_x_ql;
*x_dm = tile_x_dm;
*x_sc = tile_x_sc;
}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q2_K(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
__builtin_assume(i_offset >= 0);
__builtin_assume(i_offset < nwarps);
__builtin_assume(k >= 0);
__builtin_assume(k < WARP_SIZE);
const int kbx = k / QI2_K;
const int kqsx = k % QI2_K;
const block_q2_K * bx0 = (block_q2_K *) vx;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
int i = i0 + i_offset;
if (need_check) {
i = min(i, i_max);
}
const block_q2_K * bxi = bx0 + i*blocks_per_row + kbx;
x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx);
}
const int blocks_per_tile_x_row = WARP_SIZE / QI2_K;
const int kbxd = k % blocks_per_tile_x_row;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI2_K) {
int i = (i0 + i_offset * QI2_K + k / blocks_per_tile_x_row) % mmq_y;
if (need_check) {
i = min(i, i_max);
}
const block_q2_K * bxi = bx0 + i*blocks_per_row + kbxd;
x_dm[i * (WARP_SIZE/QI2_K) + i / QI2_K + kbxd] = bxi->dm;
}
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) {
int i = i0 + i_offset * 4 + k / (WARP_SIZE/4);
if (need_check) {
i = min(i, i_max);
}
const block_q2_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/4)) / (QI2_K/4);
x_sc[i * (WARP_SIZE/4) + i / 4 + k % (WARP_SIZE/4)] = get_int_from_uint8_aligned(bxi->scales, k % (QI2_K/4));
}
}
static __device__ __forceinline__ float vec_dot_q2_K_q8_1_mul_mat(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
const int kbx = k / QI2_K;
const int ky = (k % QI2_K) * QR2_K;
const float * y_df = (const float *) y_ds;
int v[QR2_K*VDR_Q2_K_Q8_1_MMQ];
const int kqsx = i * (WARP_SIZE + 1) + kbx*QI2_K + (QI2_K/2) * (ky/(2*QI2_K)) + ky % (QI2_K/2);
const int shift = 2 * ((ky % (2*QI2_K)) / (QI2_K/2));
#pragma unroll
for (int l = 0; l < QR2_K*VDR_Q2_K_Q8_1_MMQ; ++l) {
v[l] = (x_ql[kqsx + l] >> shift) & 0x03030303;
}
const uint8_t * scales = ((const uint8_t *) &x_sc[i * (WARP_SIZE/4) + i/4 + kbx*4]) + ky/4;
const int index_y = j * WARP_SIZE + (QR2_K*k) % WARP_SIZE;
return vec_dot_q2_K_q8_1_impl_mmq(v, &y_qs[index_y], scales, x_dm[i * (WARP_SIZE/QI2_K) + i/QI2_K + kbx], y_df[index_y/QI8_1]);
}
static __device__ __forceinline__ float vec_dot_q3_K_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
const block_q3_K * bq3_K = (const block_q3_K *) vbq;
const int bq8_offset = QR3_K * (iqs / (QI3_K/2));
const int scale_offset = iqs - iqs % QI8_1 + (iqs % QI8_1) / (QI8_1/2);
const float d = bq3_K->d;
const int vl = get_int_from_uint8(bq3_K->qs, iqs);
// invert the mask with ~ so that a 0/1 results in 4/0 being subtracted
const int vh = ~get_int_from_uint8(bq3_K->hmask, iqs % (QI3_K/2)) >> bq8_offset;
int u[QR3_K];
float d8[QR3_K];
#pragma unroll
for (int i = 0; i < QR3_K; ++i) {
u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + i].qs, iqs % QI8_1);
d8[i] = __low2half(bq8_1[bq8_offset + i].ds);
}
return vec_dot_q3_K_q8_1_impl_mmvq(vl, vh, u, bq3_K->scales, scale_offset, d, d8);
}
template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q3_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
__shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y];
__shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI3_K) + mmq_y/QI3_K];
__shared__ int tile_x_qh[mmq_y * (WARP_SIZE/2) + mmq_y/2];
__shared__ int tile_x_sc[mmq_y * (WARP_SIZE/4) + mmq_y/4];
*x_ql = tile_x_ql;
*x_dm = tile_x_dm;
*x_qh = tile_x_qh;
*x_sc = tile_x_sc;
}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q3_K(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
__builtin_assume(i_offset >= 0);
__builtin_assume(i_offset < nwarps);
__builtin_assume(k >= 0);
__builtin_assume(k < WARP_SIZE);
const int kbx = k / QI3_K;
const int kqsx = k % QI3_K;
const block_q3_K * bx0 = (block_q3_K *) vx;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
int i = i0 + i_offset;
if (need_check) {
i = min(i, i_max);
}
const block_q3_K * bxi = bx0 + i*blocks_per_row + kbx;
x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8(bxi->qs, kqsx);
}
const int blocks_per_tile_x_row = WARP_SIZE / QI3_K;
const int kbxd = k % blocks_per_tile_x_row;
float * x_dmf = (float *) x_dm;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI3_K) {
int i = (i0 + i_offset * QI3_K + k / blocks_per_tile_x_row) % mmq_y;
if (need_check) {
i = min(i, i_max);
}
const block_q3_K * bxi = bx0 + i*blocks_per_row + kbxd;
x_dmf[i * (WARP_SIZE/QI3_K) + i / QI3_K + kbxd] = bxi->d;
}
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 2) {
int i = i0 + i_offset * 2 + k / (WARP_SIZE/2);
if (need_check) {
i = min(i, i_max);
}
const block_q3_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/2)) / (QI3_K/2);
// invert the mask with ~ so that a 0/1 results in 4/0 being subtracted
x_qh[i * (WARP_SIZE/2) + i / 2 + k % (WARP_SIZE/2)] = ~get_int_from_uint8(bxi->hmask, k % (QI3_K/2));
}
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) {
int i = i0 + i_offset * 4 + k / (WARP_SIZE/4);
if (need_check) {
i = min(i, i_max);
}
const block_q3_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/4)) / (QI3_K/4);
const int ksc = k % (QI3_K/4);
const int ksc_low = ksc % (QI3_K/8);
const int shift_low = 4 * (ksc / (QI3_K/8));
const int sc_low = (get_int_from_uint8(bxi->scales, ksc_low) >> shift_low) & 0x0F0F0F0F;
const int ksc_high = QI3_K/8;
const int shift_high = 2 * ksc;
const int sc_high = ((get_int_from_uint8(bxi->scales, ksc_high) >> shift_high) << 4) & 0x30303030;
const int sc = __vsubss4(sc_low | sc_high, 0x20202020);
x_sc[i * (WARP_SIZE/4) + i / 4 + k % (WARP_SIZE/4)] = sc;
}
}
static __device__ __forceinline__ float vec_dot_q3_K_q8_1_mul_mat(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
const int kbx = k / QI3_K;
const int ky = (k % QI3_K) * QR3_K;
const float * x_dmf = (const float *) x_dm;
const float * y_df = (const float *) y_ds;
const int8_t * scales = ((int8_t *) (x_sc + i * (WARP_SIZE/4) + i/4 + kbx*4)) + ky/4;
int v[QR3_K*VDR_Q3_K_Q8_1_MMQ];
#pragma unroll
for (int l = 0; l < QR3_K*VDR_Q3_K_Q8_1_MMQ; ++l) {
const int kqsx = i * (WARP_SIZE + 1) + kbx*QI3_K + (QI3_K/2) * (ky/(2*QI3_K)) + ky % (QI3_K/2);
const int shift = 2 * ((ky % 32) / 8);
const int vll = (x_ql[kqsx + l] >> shift) & 0x03030303;
const int vh = x_qh[i * (WARP_SIZE/2) + i/2 + kbx * (QI3_K/2) + (ky+l)%8] >> ((ky+l) / 8);
const int vlh = (vh << 2) & 0x04040404;
v[l] = __vsubss4(vll, vlh);
}
const int index_y = j * WARP_SIZE + (k*QR3_K) % WARP_SIZE;
return vec_dot_q3_K_q8_1_impl_mmq(v, &y_qs[index_y], scales, x_dmf[i * (WARP_SIZE/QI3_K) + i/QI3_K + kbx], y_df[index_y/QI8_1]);
}
static __device__ __forceinline__ float vec_dot_q4_K_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
#ifndef GGML_QKK_64
const block_q4_K * bq4_K = (const block_q4_K *) vbq;
int v[2];
int u[2*QR4_K];
float d8[QR4_K];
// iqs is in 0,2..30. bq8_offset = iqs/4 -> bq8_offset = 0, 2, 4, 6
const int bq8_offset = QR4_K * ((iqs/2) / (QI8_1/2));
// iqs = 0....3 -> bq8_offset = 0, want q4_offset = 0, 4, 8, 12
// iqs = 4....7 -> bq8_offset = 2, want q4_offset = 32, 36, 40, 44
// iqs = 8...11 -> bq8_offset = 4, want q4_offset = 64, 68, 72, 76
// iqs = 12..15 -> bq8_offset = 6, want q4_offset = 96, 100, 104, 108
const int * q4 = (const int *)(bq4_K->qs + 16 * bq8_offset + 4 * ((iqs/2)%4));
v[0] = q4[0];
v[1] = q4[4];
const uint16_t * scales = (const uint16_t *)bq4_K->scales;
uint16_t aux[2];
const int j = bq8_offset/2;
if (j < 2) {
aux[0] = scales[j+0] & 0x3f3f;
aux[1] = scales[j+2] & 0x3f3f;
} else {
aux[0] = ((scales[j+2] >> 0) & 0x0f0f) | ((scales[j-2] & 0xc0c0) >> 2);
aux[1] = ((scales[j+2] >> 4) & 0x0f0f) | ((scales[j-0] & 0xc0c0) >> 2);
}
const uint8_t * sc = (const uint8_t *)aux;
const uint8_t * m = sc + 2;
for (int i = 0; i < QR4_K; ++i) {
const block_q8_1 * bq8i = bq8_1 + bq8_offset + i;
d8[i] = __low2half(bq8i->ds);
const int * q8 = (const int *)bq8i->qs + ((iqs/2)%4);
u[2*i+0] = q8[0];
u[2*i+1] = q8[4];
}
return vec_dot_q4_K_q8_1_impl_vmmq(v, u, sc, m, bq4_K->dm, d8);
#else
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
const block_q4_K * bq4_K = (const block_q4_K *) vbq;
float sumf_d = 0.0f;
float sumf_m = 0.0f;
uint16_t aux16[2];
const uint8_t * s = (const uint8_t *)aux16;
const uint16_t * a = (const uint16_t *)bq4_K->scales;
aux16[0] = a[0] & 0x0f0f;
aux16[1] = (a[0] >> 4) & 0x0f0f;
const float dall = bq4_K->dm[0];
const float dmin = bq4_K->dm[1];
const float d8_1 = __low2float(bq8_1[0].ds);
const float d8_2 = __low2float(bq8_1[1].ds);
const int ui1 = *((const int *)bq8_1[0].qs + (iqs/2));
const int ui2 = *((const int *)bq8_1[0].qs + (iqs/2) + 4);
const int ui3 = *((const int *)bq8_1[1].qs + (iqs/2));
const int ui4 = *((const int *)bq8_1[1].qs + (iqs/2) + 4);
const int * q4 = (const int *)bq4_K->qs + (iqs/2);
const int v1 = q4[0];
const int v2 = q4[4];
const int dot1 = __dp4a(ui2, v2 & 0x0f0f0f0f, __dp4a(ui1, v1 & 0x0f0f0f0f, 0));
const int dot2 = __dp4a(ui4, (v2 >> 4) & 0x0f0f0f0f, __dp4a(ui3, (v1 >> 4) & 0x0f0f0f0f, 0));
const int dot3 = __dp4a(0x01010101, ui2, __dp4a(0x01010101, ui1, 0));
const int dot4 = __dp4a(0x01010101, ui4, __dp4a(0x01010101, ui3, 0));
sumf_d += d8_1 * (dot1 * s[0]) + d8_2 * (dot2 * s[1]);
sumf_m += d8_1 * (dot3 * s[2]) + d8_2 * (dot4 * s[3]);
return dall * sumf_d - dmin * sumf_m;
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
#endif
}
template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q4_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
__shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y];
__shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI4_K) + mmq_y/QI4_K];
__shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8];
*x_ql = tile_x_ql;
*x_dm = tile_x_dm;
*x_sc = tile_x_sc;
}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q4_K(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
__builtin_assume(i_offset >= 0);
__builtin_assume(i_offset < nwarps);
__builtin_assume(k >= 0);
__builtin_assume(k < WARP_SIZE);
const int kbx = k / QI4_K; // == 0 if QK_K == 256
const int kqsx = k % QI4_K; // == k if QK_K == 256
const block_q4_K * bx0 = (block_q4_K *) vx;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
int i = i0 + i_offset;
if (need_check) {
i = min(i, i_max);
}
const block_q4_K * bxi = bx0 + i*blocks_per_row + kbx;
x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx);
}
const int blocks_per_tile_x_row = WARP_SIZE / QI4_K; // == 1 if QK_K == 256
const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_K) {
int i = (i0 + i_offset * QI4_K + k / blocks_per_tile_x_row) % mmq_y;
if (need_check) {
i = min(i, i_max);
}
const block_q4_K * bxi = bx0 + i*blocks_per_row + kbxd;
#if QK_K == 256
x_dm[i * (WARP_SIZE/QI4_K) + i / QI4_K + kbxd] = bxi->dm;
#else
x_dm[i * (WARP_SIZE/QI4_K) + i / QI4_K + kbxd] = {bxi->dm[0], bxi->dm[1]};
#endif
}
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) {
int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y;
if (need_check) {
i = min(i, i_max);
}
const block_q4_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / (QI4_K/8);
const int * scales = (int *) bxi->scales;
const int ksc = k % (WARP_SIZE/8);
// scale arrangement after the following two lines: sc0,...,sc3, sc4,...,sc7, m0,...,m3, m4,...,m8
int scales8 = (scales[(ksc%2) + (ksc!=0)] >> (4 * (ksc & (ksc/2)))) & 0x0F0F0F0F; // lower 4 bits
scales8 |= (scales[ksc/2] >> (2 * (ksc % 2))) & 0x30303030; // upper 2 bits
x_sc[i * (WARP_SIZE/8) + i / 8 + ksc] = scales8;
}
}
static __device__ __forceinline__ float vec_dot_q4_K_q8_1_mul_mat(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/16]) + 2*((k % 16) / 8);
const int index_y = j * WARP_SIZE + (QR4_K*k) % WARP_SIZE;
return vec_dot_q4_K_q8_1_impl_mmq(&x_ql[i * (WARP_SIZE + 1) + k], &y_qs[index_y], sc, sc+8,
x_dm[i * (WARP_SIZE/QI4_K) + i/QI4_K], &y_ds[index_y/QI8_1]);
}
static __device__ __forceinline__ float vec_dot_q5_K_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
#ifndef GGML_QKK_64
const block_q5_K * bq5_K = (const block_q5_K *) vbq;
int vl[2];
int vh[2];
int u[2*QR5_K];
float d8[QR5_K];
const int bq8_offset = QR5_K * ((iqs/2) / (QI8_1/2));
const int * ql = (const int *)(bq5_K->qs + 16 * bq8_offset + 4 * ((iqs/2)%4));
const int * qh = (const int *)(bq5_K->qh + 4 * ((iqs/2)%4));
vl[0] = ql[0];
vl[1] = ql[4];
vh[0] = qh[0] >> bq8_offset;
vh[1] = qh[4] >> bq8_offset;
const uint16_t * scales = (const uint16_t *)bq5_K->scales;
uint16_t aux[2];
const int j = bq8_offset/2;
if (j < 2) {
aux[0] = scales[j+0] & 0x3f3f;
aux[1] = scales[j+2] & 0x3f3f;
} else {
aux[0] = ((scales[j+2] >> 0) & 0x0f0f) | ((scales[j-2] & 0xc0c0) >> 2);
aux[1] = ((scales[j+2] >> 4) & 0x0f0f) | ((scales[j-0] & 0xc0c0) >> 2);
}
const uint8_t * sc = (const uint8_t *)aux;
const uint8_t * m = sc + 2;
#pragma unroll
for (int i = 0; i < QR5_K; ++i) {
const block_q8_1 * bq8i = bq8_1 + bq8_offset + i;
d8[i] = __low2float(bq8i->ds);
const int * q8 = (const int *)bq8i->qs + ((iqs/2)%4);
u[2*i+0] = q8[0];
u[2*i+1] = q8[4];
}
return vec_dot_q5_K_q8_1_impl_vmmq(vl, vh, u, sc, m, bq5_K->dm, d8);
#else
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
const block_q5_K * bq5_K = (const block_q5_K *) vbq;
const int8_t * s = bq5_K->scales;
const float d = bq5_K->d;
const float d8_1 = __low2half(bq8_1[0].ds);
const float d8_2 = __low2half(bq8_1[1].ds);
const int ui1 = *((const int *)bq8_1[0].qs + (iqs/2));
const int ui2 = *((const int *)bq8_1[0].qs + (iqs/2) + 4);
const int ui3 = *((const int *)bq8_1[1].qs + (iqs/2));
const int ui4 = *((const int *)bq8_1[1].qs + (iqs/2) + 4);
const int * ql = (const int *)bq5_K->qs + (iqs/2);
const int vl1 = ql[0];
const int vl2 = ql[4];
const int step = 4 * (iqs/2); // 0, 4, 8, 12
const int im = step/8; // = 0 for iqs = 0, 2, = 1 for iqs = 4, 6
const int in = step%8; // 0, 4, 0, 4
const int vh = (*((const int *)(bq5_K->qh + in))) >> im;
const int v1 = (((vh << 4) & 0x10101010) ^ 0x10101010) | ((vl1 >> 0) & 0x0f0f0f0f);
const int v2 = (((vh << 2) & 0x10101010) ^ 0x10101010) | ((vl2 >> 0) & 0x0f0f0f0f);
const int v3 = (((vh >> 0) & 0x10101010) ^ 0x10101010) | ((vl1 >> 4) & 0x0f0f0f0f);
const int v4 = (((vh >> 2) & 0x10101010) ^ 0x10101010) | ((vl2 >> 4) & 0x0f0f0f0f);
const float sumf_d = d8_1 * (__dp4a(ui1, v1, 0) * s[0] + __dp4a(ui2, v2, 0) * s[1])
+ d8_2 * (__dp4a(ui3, v3, 0) * s[2] + __dp4a(ui4, v4, 0) * s[3]);
return d * sumf_d;
#else
assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
#endif
}
template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q5_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
__shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y];
__shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI5_K) + mmq_y/QI5_K];
__shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8];
*x_ql = tile_x_ql;
*x_dm = tile_x_dm;
*x_sc = tile_x_sc;
}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q5_K(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
__builtin_assume(i_offset >= 0);
__builtin_assume(i_offset < nwarps);
__builtin_assume(k >= 0);
__builtin_assume(k < WARP_SIZE);
const int kbx = k / QI5_K; // == 0 if QK_K == 256
const int kqsx = k % QI5_K; // == k if QK_K == 256
const block_q5_K * bx0 = (block_q5_K *) vx;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
int i = i0 + i_offset;
if (need_check) {
i = min(i, i_max);
}
const block_q5_K * bxi = bx0 + i*blocks_per_row + kbx;
const int ky = QR5_K*kqsx;
const int ql = get_int_from_uint8_aligned(bxi->qs, kqsx);
const int ql0 = (ql >> 0) & 0x0F0F0F0F;
const int ql1 = (ql >> 4) & 0x0F0F0F0F;
const int qh = get_int_from_uint8_aligned(bxi->qh, kqsx % (QI5_K/4));
const int qh0 = ((qh >> (2 * (kqsx / (QI5_K/4)) + 0)) << 4) & 0x10101010;
const int qh1 = ((qh >> (2 * (kqsx / (QI5_K/4)) + 1)) << 4) & 0x10101010;
const int kq0 = ky - ky % (QI5_K/2) + k % (QI5_K/4) + 0;
const int kq1 = ky - ky % (QI5_K/2) + k % (QI5_K/4) + (QI5_K/4);
x_ql[i * (2*WARP_SIZE + 1) + kq0] = ql0 | qh0;
x_ql[i * (2*WARP_SIZE + 1) + kq1] = ql1 | qh1;
}
const int blocks_per_tile_x_row = WARP_SIZE / QI5_K; // == 1 if QK_K == 256
const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_K) {
int i = (i0 + i_offset * QI5_K + k / blocks_per_tile_x_row) % mmq_y;
if (need_check) {
i = min(i, i_max);
}
const block_q5_K * bxi = bx0 + i*blocks_per_row + kbxd;
#if QK_K == 256
x_dm[i * (WARP_SIZE/QI5_K) + i / QI5_K + kbxd] = bxi->dm;
#endif
}
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) {
int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y;
if (need_check) {
i = min(i, i_max);
}
const block_q5_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / (QI5_K/8);
const int * scales = (int *) bxi->scales;
const int ksc = k % (WARP_SIZE/8);
// scale arrangement after the following two lines: sc0,...,sc3, sc4,...,sc7, m0,...,m3, m4,...,m8
int scales8 = (scales[(ksc%2) + (ksc!=0)] >> (4 * (ksc & (ksc/2)))) & 0x0F0F0F0F; // lower 4 bits
scales8 |= (scales[ksc/2] >> (2 * (ksc % 2))) & 0x30303030; // upper 2 bits
x_sc[i * (WARP_SIZE/8) + i / 8 + ksc] = scales8;
}
}
static __device__ __forceinline__ float vec_dot_q5_K_q8_1_mul_mat(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/16]) + 2 * ((k % 16) / 8);
const int index_x = i * (QR5_K*WARP_SIZE + 1) + QR5_K*k;
const int index_y = j * WARP_SIZE + (QR5_K*k) % WARP_SIZE;
return vec_dot_q5_K_q8_1_impl_mmq(&x_ql[index_x], &y_qs[index_y], sc, sc+8,
x_dm[i * (WARP_SIZE/QI5_K) + i/QI5_K], &y_ds[index_y/QI8_1]);
}
static __device__ __forceinline__ float vec_dot_q6_K_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
const block_q6_K * bq6_K = (const block_q6_K *) vbq;
const int bq8_offset = 2 * QR6_K * (iqs / (QI6_K/2)) + (iqs % (QI6_K/2)) / (QI6_K/4);
const int scale_offset = (QI6_K/4) * (iqs / (QI6_K/2)) + (iqs % (QI6_K/2)) / (QI6_K/8);
const int vh_shift = 2 * ((iqs % (QI6_K/2)) / (QI6_K/4));
const int vl = get_int_from_uint8(bq6_K->ql, iqs);
const int vh = get_int_from_uint8(bq6_K->qh, (QI6_K/4) * (iqs / (QI6_K/2)) + iqs % (QI6_K/4)) >> vh_shift;
const int8_t * scales = bq6_K->scales + scale_offset;
int u[QR6_K];
float d8[QR6_K];
#pragma unroll
for (int i = 0; i < QR6_K; ++i) {
u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + 2*i].qs, iqs % QI8_1);
d8[i] = __low2half(bq8_1[bq8_offset + 2*i].ds);
}
return vec_dot_q6_K_q8_1_impl_mmvq(vl, vh, u, scales, bq6_K->d, d8);
}
template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q6_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
__shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y];
__shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI6_K) + mmq_y/QI6_K];
__shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8];
*x_ql = tile_x_ql;
*x_dm = tile_x_dm;
*x_sc = tile_x_sc;
}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q6_K(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
__builtin_assume(i_offset >= 0);
__builtin_assume(i_offset < nwarps);
__builtin_assume(k >= 0);
__builtin_assume(k < WARP_SIZE);
const int kbx = k / QI6_K; // == 0 if QK_K == 256
const int kqsx = k % QI6_K; // == k if QK_K == 256
const block_q6_K * bx0 = (block_q6_K *) vx;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
int i = i0 + i_offset;
if (need_check) {
i = min(i, i_max);
}
const block_q6_K * bxi = bx0 + i*blocks_per_row + kbx;
const int ky = QR6_K*kqsx;
const int ql = get_int_from_uint8(bxi->ql, kqsx);
const int ql0 = (ql >> 0) & 0x0F0F0F0F;
const int ql1 = (ql >> 4) & 0x0F0F0F0F;
const int qh = get_int_from_uint8(bxi->qh, (QI6_K/4) * (kqsx / (QI6_K/2)) + kqsx % (QI6_K/4));
const int qh0 = ((qh >> (2 * ((kqsx % (QI6_K/2)) / (QI6_K/4)))) << 4) & 0x30303030;
const int qh1 = (qh >> (2 * ((kqsx % (QI6_K/2)) / (QI6_K/4)))) & 0x30303030;
const int kq0 = ky - ky % QI6_K + k % (QI6_K/2) + 0;
const int kq1 = ky - ky % QI6_K + k % (QI6_K/2) + (QI6_K/2);
x_ql[i * (2*WARP_SIZE + 1) + kq0] = __vsubss4(ql0 | qh0, 0x20202020);
x_ql[i * (2*WARP_SIZE + 1) + kq1] = __vsubss4(ql1 | qh1, 0x20202020);
}
const int blocks_per_tile_x_row = WARP_SIZE / QI6_K; // == 1 if QK_K == 256
const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256
float * x_dmf = (float *) x_dm;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI6_K) {
int i = (i0 + i_offset * QI6_K + k / blocks_per_tile_x_row) % mmq_y;
if (need_check) {
i = min(i, i_max);
}
const block_q6_K * bxi = bx0 + i*blocks_per_row + kbxd;
x_dmf[i * (WARP_SIZE/QI6_K) + i / QI6_K + kbxd] = bxi->d;
}
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) {
int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y;
if (need_check) {
i = min(i, i_max);
}
const block_q6_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / 4;
x_sc[i * (WARP_SIZE/8) + i / 8 + k % (WARP_SIZE/8)] = get_int_from_int8(bxi->scales, k % (QI6_K/8));
}
}
static __device__ __forceinline__ float vec_dot_q6_K_q8_1_mul_mat(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
const float * x_dmf = (const float *) x_dm;
const float * y_df = (const float *) y_ds;
const int8_t * sc = ((const int8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/8]);
const int index_x = i * (QR6_K*WARP_SIZE + 1) + QR6_K*k;
const int index_y = j * WARP_SIZE + (QR6_K*k) % WARP_SIZE;
return vec_dot_q6_K_q8_1_impl_mmq(&x_ql[index_x], &y_qs[index_y], sc, x_dmf[i * (WARP_SIZE/QI6_K) + i/QI6_K], &y_df[index_y/QI8_1]);
}
template <int qk, int qr, int qi, bool need_sum, typename block_q_t, int mmq_x, int mmq_y, int nwarps,
allocate_tiles_cuda_t allocate_tiles, load_tiles_cuda_t load_tiles, int vdr, vec_dot_q_mul_mat_cuda_t vec_dot>
static __device__ __forceinline__ void mul_mat_q(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
const block_q_t * x = (const block_q_t *) vx;
const block_q8_1 * y = (const block_q8_1 *) vy;
const int blocks_per_row_x = ncols_x / qk;
const int blocks_per_col_y = nrows_y / QK8_1;
const int blocks_per_warp = WARP_SIZE / qi;
const int & ncols_dst = ncols_y;
const int row_dst_0 = blockIdx.x*mmq_y;
const int & row_x_0 = row_dst_0;
const int col_dst_0 = blockIdx.y*mmq_x;
const int & col_y_0 = col_dst_0;
int * tile_x_ql = nullptr;
half2 * tile_x_dm = nullptr;
int * tile_x_qh = nullptr;
int * tile_x_sc = nullptr;
allocate_tiles(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc);
__shared__ int tile_y_qs[mmq_x * WARP_SIZE];
__shared__ half2 tile_y_ds[mmq_x * WARP_SIZE/QI8_1];
float sum[mmq_y/WARP_SIZE][mmq_x/nwarps] = {0.0f};
for (int ib0 = 0; ib0 < blocks_per_row_x; ib0 += blocks_per_warp) {
load_tiles(x + row_x_0*blocks_per_row_x + ib0, tile_x_ql, tile_x_dm, tile_x_qh, tile_x_sc,
threadIdx.y, nrows_x-row_x_0-1, threadIdx.x, blocks_per_row_x);
#pragma unroll
for (int ir = 0; ir < qr; ++ir) {
const int kqs = ir*WARP_SIZE + threadIdx.x;
const int kbxd = kqs / QI8_1;
#pragma unroll
for (int i = 0; i < mmq_x; i += nwarps) {
const int col_y_eff = min(col_y_0 + threadIdx.y + i, ncols_y-1); // to prevent out-of-bounds memory accesses
const block_q8_1 * by0 = &y[col_y_eff*blocks_per_col_y + ib0 * (qk/QK8_1) + kbxd];
const int index_y = (threadIdx.y + i) * WARP_SIZE + kqs % WARP_SIZE;
tile_y_qs[index_y] = get_int_from_int8_aligned(by0->qs, threadIdx.x % QI8_1);
}
#pragma unroll
for (int ids0 = 0; ids0 < mmq_x; ids0 += nwarps * QI8_1) {
const int ids = (ids0 + threadIdx.y * QI8_1 + threadIdx.x / (WARP_SIZE/QI8_1)) % mmq_x;
const int kby = threadIdx.x % (WARP_SIZE/QI8_1);
const int col_y_eff = min(col_y_0 + ids, ncols_y-1);
// if the sum is not needed it's faster to transform the scale to f32 ahead of time
const half2 * dsi_src = &y[col_y_eff*blocks_per_col_y + ib0 * (qk/QK8_1) + ir*(WARP_SIZE/QI8_1) + kby].ds;
half2 * dsi_dst = &tile_y_ds[ids * (WARP_SIZE/QI8_1) + kby];
if (need_sum) {
*dsi_dst = *dsi_src;
} else {
float * dfi_dst = (float *) dsi_dst;
*dfi_dst = __low2half(*dsi_src);
}
}
__syncthreads();
// #pragma unroll // unrolling this loop causes too much register pressure
for (int k = ir*WARP_SIZE/qr; k < (ir+1)*WARP_SIZE/qr; k += vdr) {
#pragma unroll
for (int j = 0; j < mmq_x; j += nwarps) {
#pragma unroll
for (int i = 0; i < mmq_y; i += WARP_SIZE) {
sum[i/WARP_SIZE][j/nwarps] += vec_dot(
tile_x_ql, tile_x_dm, tile_x_qh, tile_x_sc, tile_y_qs, tile_y_ds,
threadIdx.x + i, threadIdx.y + j, k);
}
}
}
__syncthreads();
}
}
#pragma unroll
for (int j = 0; j < mmq_x; j += nwarps) {
const int col_dst = col_dst_0 + j + threadIdx.y;
if (col_dst >= ncols_dst) {
return;
}
#pragma unroll
for (int i = 0; i < mmq_y; i += WARP_SIZE) {
const int row_dst = row_dst_0 + threadIdx.x + i;
if (row_dst >= nrows_dst) {
continue;
}
dst[col_dst*nrows_dst + row_dst] = sum[i/WARP_SIZE][j/nwarps];
}
}
}
#define MMQ_X_Q4_0_AMPERE 64
#define MMQ_Y_Q4_0_AMPERE 128
#define NWARPS_Q4_0_AMPERE 4
#define MMQ_X_Q4_0_PASCAL 64
#define MMQ_Y_Q4_0_PASCAL 64
#define NWARPS_Q4_0_PASCAL 8
template <bool need_check> static __global__ void mul_mat_q4_0(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
#if __CUDA_ARCH__ >= CC_TURING
const int mmq_x = MMQ_X_Q4_0_AMPERE;
const int mmq_y = MMQ_Y_Q4_0_AMPERE;
const int nwarps = NWARPS_Q4_0_AMPERE;
mul_mat_q<QK4_0, QR4_0, QI4_0, true, block_q4_0, mmq_x, mmq_y, nwarps, allocate_tiles_q4_0<mmq_y>,
load_tiles_q4_0<mmq_y, nwarps, need_check>, VDR_Q4_0_Q8_1_MMQ, vec_dot_q4_0_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#elif __CUDA_ARCH__ >= MIN_CC_DP4A
const int mmq_x = MMQ_X_Q4_0_PASCAL;
const int mmq_y = MMQ_Y_Q4_0_PASCAL;
const int nwarps = NWARPS_Q4_0_PASCAL;
mul_mat_q<QK4_0, QR4_0, QI4_0, true, block_q4_0, mmq_x, mmq_y, nwarps, allocate_tiles_q4_0<mmq_y>,
load_tiles_q4_0<mmq_y, nwarps, need_check>, VDR_Q4_0_Q8_1_MMQ, vec_dot_q4_0_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#else
(void) vec_dot_q4_0_q8_1_mul_mat;
assert(false);
#endif // __CUDA_ARCH__ >= CC_TURING
}
#define MMQ_X_Q4_1_AMPERE 64
#define MMQ_Y_Q4_1_AMPERE 128
#define NWARPS_Q4_1_AMPERE 4
#define MMQ_X_Q4_1_PASCAL 64
#define MMQ_Y_Q4_1_PASCAL 64
#define NWARPS_Q4_1_PASCAL 8
template <bool need_check> static __global__ void
#if __CUDA_ARCH__ < CC_TURING
__launch_bounds__(WARP_SIZE*NWARPS_Q4_1_PASCAL, 2)
#endif // __CUDA_ARCH__ < CC_TURING
mul_mat_q4_1(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
#if __CUDA_ARCH__ >= CC_TURING
const int mmq_x = MMQ_X_Q4_1_AMPERE;
const int mmq_y = MMQ_Y_Q4_1_AMPERE;
const int nwarps = NWARPS_Q4_1_AMPERE;
mul_mat_q<QK4_1, QR4_1, QI4_1, true, block_q4_1, mmq_x, mmq_y, nwarps, allocate_tiles_q4_1<mmq_y>,
load_tiles_q4_1<mmq_y, nwarps, need_check>, VDR_Q4_1_Q8_1_MMQ, vec_dot_q4_1_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#elif __CUDA_ARCH__ >= MIN_CC_DP4A
const int mmq_x = MMQ_X_Q4_1_PASCAL;
const int mmq_y = MMQ_Y_Q4_1_PASCAL;
const int nwarps = NWARPS_Q4_1_PASCAL;
mul_mat_q<QK4_1, QR4_1, QI4_1, true, block_q4_1, mmq_x, mmq_y, nwarps, allocate_tiles_q4_1<mmq_y>,
load_tiles_q4_1<mmq_y, nwarps, need_check>, VDR_Q4_1_Q8_1_MMQ, vec_dot_q4_1_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#else
(void) vec_dot_q4_1_q8_1_mul_mat;
assert(false);
#endif // __CUDA_ARCH__ >= CC_TURING
}
#define MMQ_X_Q5_0_AMPERE 128
#define MMQ_Y_Q5_0_AMPERE 64
#define NWARPS_Q5_0_AMPERE 4
#define MMQ_X_Q5_0_PASCAL 64
#define MMQ_Y_Q5_0_PASCAL 64
#define NWARPS_Q5_0_PASCAL 8
template <bool need_check> static __global__ void mul_mat_q5_0(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
#if __CUDA_ARCH__ >= CC_TURING
const int mmq_x = MMQ_X_Q5_0_AMPERE;
const int mmq_y = MMQ_Y_Q5_0_AMPERE;
const int nwarps = NWARPS_Q5_0_AMPERE;
mul_mat_q<QK5_0, QR5_0, QI5_0, false, block_q5_0, mmq_x, mmq_y, nwarps, allocate_tiles_q5_0<mmq_y>,
load_tiles_q5_0<mmq_y, nwarps, need_check>, VDR_Q5_0_Q8_1_MMQ, vec_dot_q5_0_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#elif __CUDA_ARCH__ >= MIN_CC_DP4A
const int mmq_x = MMQ_X_Q5_0_PASCAL;
const int mmq_y = MMQ_Y_Q5_0_PASCAL;
const int nwarps = NWARPS_Q5_0_PASCAL;
mul_mat_q<QK5_0, QR5_0, QI5_0, false, block_q5_0, mmq_x, mmq_y, nwarps, allocate_tiles_q5_0<mmq_y>,
load_tiles_q5_0<mmq_y, nwarps, need_check>, VDR_Q5_0_Q8_1_MMQ, vec_dot_q5_0_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#else
(void) vec_dot_q5_0_q8_1_mul_mat;
assert(false);
#endif // __CUDA_ARCH__ >= CC_TURING
}
#define MMQ_X_Q5_1_AMPERE 128
#define MMQ_Y_Q5_1_AMPERE 64
#define NWARPS_Q5_1_AMPERE 4
#define MMQ_X_Q5_1_PASCAL 64
#define MMQ_Y_Q5_1_PASCAL 64
#define NWARPS_Q5_1_PASCAL 8
template <bool need_check> static __global__ void mul_mat_q5_1(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
#if __CUDA_ARCH__ >= CC_TURING
const int mmq_x = MMQ_X_Q5_1_AMPERE;
const int mmq_y = MMQ_Y_Q5_1_AMPERE;
const int nwarps = NWARPS_Q5_1_AMPERE;
mul_mat_q<QK5_1, QR5_1, QI5_1, true, block_q5_1, mmq_x, mmq_y, nwarps, allocate_tiles_q5_1<mmq_y>,
load_tiles_q5_1<mmq_y, nwarps, need_check>, VDR_Q5_1_Q8_1_MMQ, vec_dot_q5_1_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#elif __CUDA_ARCH__ >= MIN_CC_DP4A
const int mmq_x = MMQ_X_Q5_1_PASCAL;
const int mmq_y = MMQ_Y_Q5_1_PASCAL;
const int nwarps = NWARPS_Q5_1_PASCAL;
mul_mat_q<QK5_1, QR5_1, QI5_1, true, block_q5_1, mmq_x, mmq_y, nwarps, allocate_tiles_q5_1<mmq_y>,
load_tiles_q5_1<mmq_y, nwarps, need_check>, VDR_Q5_1_Q8_1_MMQ, vec_dot_q5_1_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#else
(void) vec_dot_q5_1_q8_1_mul_mat;
assert(false);
#endif // __CUDA_ARCH__ >= CC_TURING
}
#define MMQ_X_Q8_0_AMPERE 128
#define MMQ_Y_Q8_0_AMPERE 64
#define NWARPS_Q8_0_AMPERE 4
#define MMQ_X_Q8_0_PASCAL 64
#define MMQ_Y_Q8_0_PASCAL 64
#define NWARPS_Q8_0_PASCAL 8
template <bool need_check> static __global__ void mul_mat_q8_0(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
#if __CUDA_ARCH__ >= CC_TURING
const int mmq_x = MMQ_X_Q8_0_AMPERE;
const int mmq_y = MMQ_Y_Q8_0_AMPERE;
const int nwarps = NWARPS_Q8_0_AMPERE;
mul_mat_q<QK8_0, QR8_0, QI8_0, false, block_q8_0, mmq_x, mmq_y, nwarps, allocate_tiles_q8_0<mmq_y>,
load_tiles_q8_0<mmq_y, nwarps, need_check>, VDR_Q8_0_Q8_1_MMQ, vec_dot_q8_0_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#elif __CUDA_ARCH__ >= MIN_CC_DP4A
const int mmq_x = MMQ_X_Q8_0_PASCAL;
const int mmq_y = MMQ_Y_Q8_0_PASCAL;
const int nwarps = NWARPS_Q8_0_PASCAL;
mul_mat_q<QK8_0, QR8_0, QI8_0, false, block_q8_0, mmq_x, mmq_y, nwarps, allocate_tiles_q8_0<mmq_y>,
load_tiles_q8_0<mmq_y, nwarps, need_check>, VDR_Q8_0_Q8_1_MMQ, vec_dot_q8_0_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#else
(void) vec_dot_q8_0_q8_1_mul_mat;
assert(false);
#endif // __CUDA_ARCH__ >= CC_TURING
}
#define MMQ_X_Q2_K_AMPERE 64
#define MMQ_Y_Q2_K_AMPERE 128
#define NWARPS_Q2_K_AMPERE 4
#define MMQ_X_Q2_K_PASCAL 64
#define MMQ_Y_Q2_K_PASCAL 64
#define NWARPS_Q2_K_PASCAL 8
template <bool need_check> static __global__ void mul_mat_q2_K(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
#if __CUDA_ARCH__ >= CC_TURING
const int mmq_x = MMQ_X_Q2_K_AMPERE;
const int mmq_y = MMQ_Y_Q2_K_AMPERE;
const int nwarps = NWARPS_Q2_K_AMPERE;
mul_mat_q<QK_K, QR2_K, QI2_K, false, block_q2_K, mmq_x, mmq_y, nwarps, allocate_tiles_q2_K<mmq_y>,
load_tiles_q2_K<mmq_y, nwarps, need_check>, VDR_Q2_K_Q8_1_MMQ, vec_dot_q2_K_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#elif __CUDA_ARCH__ >= MIN_CC_DP4A
const int mmq_x = MMQ_X_Q2_K_PASCAL;
const int mmq_y = MMQ_Y_Q2_K_PASCAL;
const int nwarps = NWARPS_Q2_K_PASCAL;
mul_mat_q<QK_K, QR2_K, QI2_K, false, block_q2_K, mmq_x, mmq_y, nwarps, allocate_tiles_q2_K<mmq_y>,
load_tiles_q2_K<mmq_y, nwarps, need_check>, VDR_Q2_K_Q8_1_MMQ, vec_dot_q2_K_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#else
(void) vec_dot_q2_K_q8_1_mul_mat;
assert(false);
#endif // __CUDA_ARCH__ >= CC_TURING
}
#define MMQ_X_Q3_K_AMPERE 128
#define MMQ_Y_Q3_K_AMPERE 128
#define NWARPS_Q3_K_AMPERE 4
#define MMQ_X_Q3_K_PASCAL 64
#define MMQ_Y_Q3_K_PASCAL 64
#define NWARPS_Q3_K_PASCAL 8
template <bool need_check> static __global__ void
#if __CUDA_ARCH__ < CC_TURING
__launch_bounds__(WARP_SIZE*NWARPS_Q3_K_PASCAL, 2)
#endif // __CUDA_ARCH__ < CC_TURING
mul_mat_q3_K(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
#if __CUDA_ARCH__ >= CC_TURING
const int mmq_x = MMQ_X_Q3_K_AMPERE;
const int mmq_y = MMQ_Y_Q3_K_AMPERE;
const int nwarps = NWARPS_Q3_K_AMPERE;
mul_mat_q<QK_K, QR3_K, QI3_K, false, block_q3_K, mmq_x, mmq_y, nwarps, allocate_tiles_q3_K<mmq_y>,
load_tiles_q3_K<mmq_y, nwarps, need_check>, VDR_Q3_K_Q8_1_MMQ, vec_dot_q3_K_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#elif __CUDA_ARCH__ >= MIN_CC_DP4A
const int mmq_x = MMQ_X_Q3_K_PASCAL;
const int mmq_y = MMQ_Y_Q3_K_PASCAL;
const int nwarps = NWARPS_Q3_K_PASCAL;
mul_mat_q<QK_K, QR3_K, QI3_K, false, block_q3_K, mmq_x, mmq_y, nwarps, allocate_tiles_q3_K<mmq_y>,
load_tiles_q3_K<mmq_y, nwarps, need_check>, VDR_Q3_K_Q8_1_MMQ, vec_dot_q3_K_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#else
(void) vec_dot_q3_K_q8_1_mul_mat;
assert(false);
#endif // __CUDA_ARCH__ >= CC_TURING
}
#define MMQ_X_Q4_K_AMPERE 64
#define MMQ_Y_Q4_K_AMPERE 128
#define NWARPS_Q4_K_AMPERE 4
#define MMQ_X_Q4_K_PASCAL 64
#define MMQ_Y_Q4_K_PASCAL 64
#define NWARPS_Q4_K_PASCAL 8
template <bool need_check> static __global__ void
#if __CUDA_ARCH__ < CC_TURING
__launch_bounds__(WARP_SIZE*NWARPS_Q4_K_PASCAL, 2)
#endif // __CUDA_ARCH__ < CC_TURING
mul_mat_q4_K(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
#if __CUDA_ARCH__ >= CC_TURING
const int mmq_x = MMQ_X_Q4_K_AMPERE;
const int mmq_y = MMQ_Y_Q4_K_AMPERE;
const int nwarps = NWARPS_Q4_K_AMPERE;
mul_mat_q<QK_K, QR4_K, QI4_K, true, block_q4_K, mmq_x, mmq_y, nwarps, allocate_tiles_q4_K<mmq_y>,
load_tiles_q4_K<mmq_y, nwarps, need_check>, VDR_Q4_K_Q8_1_MMQ, vec_dot_q4_K_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#elif __CUDA_ARCH__ >= MIN_CC_DP4A
const int mmq_x = MMQ_X_Q4_K_PASCAL;
const int mmq_y = MMQ_Y_Q4_K_PASCAL;
const int nwarps = NWARPS_Q4_K_PASCAL;
mul_mat_q<QK_K, QR4_K, QI4_K, true, block_q4_K, mmq_x, mmq_y, nwarps, allocate_tiles_q4_K<mmq_y>,
load_tiles_q4_K<mmq_y, nwarps, need_check>, VDR_Q4_K_Q8_1_MMQ, vec_dot_q4_K_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#else
(void) vec_dot_q4_K_q8_1_mul_mat;
assert(false);
#endif // __CUDA_ARCH__ >= CC_TURING
}
#define MMQ_X_Q5_K_AMPERE 64
#define MMQ_Y_Q5_K_AMPERE 128
#define NWARPS_Q5_K_AMPERE 4
#define MMQ_X_Q5_K_PASCAL 64
#define MMQ_Y_Q5_K_PASCAL 64
#define NWARPS_Q5_K_PASCAL 8
template <bool need_check> static __global__ void mul_mat_q5_K(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
#if __CUDA_ARCH__ >= CC_TURING
const int mmq_x = MMQ_X_Q5_K_AMPERE;
const int mmq_y = MMQ_Y_Q5_K_AMPERE;
const int nwarps = NWARPS_Q5_K_AMPERE;
mul_mat_q<QK_K, QR5_K, QI5_K, true, block_q5_K, mmq_x, mmq_y, nwarps, allocate_tiles_q5_K<mmq_y>,
load_tiles_q5_K<mmq_y, nwarps, need_check>, VDR_Q5_K_Q8_1_MMQ, vec_dot_q5_K_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#elif __CUDA_ARCH__ >= MIN_CC_DP4A
const int mmq_x = MMQ_X_Q5_K_PASCAL;
const int mmq_y = MMQ_Y_Q5_K_PASCAL;
const int nwarps = NWARPS_Q5_K_PASCAL;
mul_mat_q<QK_K, QR5_K, QI5_K, true, block_q5_K, mmq_x, mmq_y, nwarps, allocate_tiles_q5_K<mmq_y>,
load_tiles_q5_K<mmq_y, nwarps, need_check>, VDR_Q5_K_Q8_1_MMQ, vec_dot_q5_K_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#else
(void) vec_dot_q5_K_q8_1_mul_mat;
assert(false);
#endif // __CUDA_ARCH__ >= CC_TURING
}
#define MMQ_X_Q6_K_AMPERE 64
#define MMQ_Y_Q6_K_AMPERE 64
#define NWARPS_Q6_K_AMPERE 4
#define MMQ_X_Q6_K_PASCAL 64
#define MMQ_Y_Q6_K_PASCAL 64
#define NWARPS_Q6_K_PASCAL 8
template <bool need_check> static __global__ void
#if __CUDA_ARCH__ < CC_TURING
__launch_bounds__(WARP_SIZE*NWARPS_Q6_K_PASCAL, 2)
#endif // __CUDA_ARCH__ < CC_TURING
mul_mat_q6_K(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
#if __CUDA_ARCH__ >= CC_TURING
const int mmq_x = MMQ_X_Q6_K_AMPERE;
const int mmq_y = MMQ_Y_Q6_K_AMPERE;
const int nwarps = NWARPS_Q6_K_AMPERE;
mul_mat_q<QK_K, QR6_K, QI6_K, false, block_q6_K, mmq_x, mmq_y, nwarps, allocate_tiles_q6_K<mmq_y>,
load_tiles_q6_K<mmq_y, nwarps, need_check>, VDR_Q6_K_Q8_1_MMQ, vec_dot_q6_K_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#elif __CUDA_ARCH__ >= MIN_CC_DP4A
const int mmq_x = MMQ_X_Q6_K_PASCAL;
const int mmq_y = MMQ_Y_Q6_K_PASCAL;
const int nwarps = NWARPS_Q6_K_PASCAL;
mul_mat_q<QK_K, QR6_K, QI6_K, false, block_q6_K, mmq_x, mmq_y, nwarps, allocate_tiles_q6_K<mmq_y>,
load_tiles_q6_K<mmq_y, nwarps, need_check>, VDR_Q6_K_Q8_1_MMQ, vec_dot_q6_K_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
#else
(void) vec_dot_q6_K_q8_1_mul_mat;
assert(false);
#endif // __CUDA_ARCH__ >= CC_TURING
}
template <int qk, int qi, typename block_q_t, int vdr, vec_dot_q_cuda_t vec_dot_q_cuda>
static __global__ void mul_mat_vec_q(const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols, const int nrows) {
const int row = blockIdx.y*blockDim.y + threadIdx.y;
if (row >= nrows) {
return;
}
const int blocks_per_row = ncols / qk;
const int blocks_per_warp = vdr * WARP_SIZE / qi;
// partial sum for each thread
float tmp = 0.0f;
const block_q_t * x = (const block_q_t *) vx;
const block_q8_1 * y = (const block_q8_1 *) vy;
for (int i = 0; i < blocks_per_row; i += blocks_per_warp) {
const int ibx = row*blocks_per_row + i + threadIdx.x / (qi/vdr); // x block index
const int iby = (i + threadIdx.x / (qi/vdr)) * (qk/QK8_1); // y block index that aligns with ibx
const int iqs = vdr * (threadIdx.x % (qi/vdr)); // x block quant index when casting the quants to int
tmp += vec_dot_q_cuda(&x[ibx], &y[iby], iqs);
}
// sum up partial sums and write back result
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
}
if (threadIdx.x == 0) {
dst[row] = tmp;
}
}
template <int qk, int qr, dequantize_kernel_t dequantize_kernel>
static __global__ void dequantize_mul_mat_vec(const void * __restrict__ vx, const dfloat * __restrict__ y, float * __restrict__ dst, const int ncols, const int nrows) {
// qk = quantized weights per x block
// qr = number of quantized weights per data value in x block
const int row = blockIdx.y*blockDim.y + threadIdx.y;
if (row >= nrows) {
return;
}
const int tid = threadIdx.x;
const int iter_stride = 2*GGML_CUDA_DMMV_X;
const int vals_per_iter = iter_stride / WARP_SIZE; // num quantized vals per thread and i iter
const int y_offset = qr == 1 ? 1 : qk/2;
// partial sum for each thread
#ifdef GGML_CUDA_F16
half2 tmp = {0.0f, 0.0f}; // two sums for f16 to take advantage of half2 intrinsics
#else
float tmp = 0.0f;
#endif // GGML_CUDA_F16
for (int i = 0; i < ncols; i += iter_stride) {
const int col = i + vals_per_iter*tid;
const int ib = (row*ncols + col)/qk; // x block index
const int iqs = (col%qk)/qr; // x quant index
const int iybs = col - col%qk; // y block start index
// processing >2 values per i iter is faster for fast GPUs
#pragma unroll
for (int j = 0; j < vals_per_iter; j += 2) {
// process 2 vals per j iter
// dequantize
// for qr = 2 the iqs needs to increase by 1 per j iter because 2 weights per data val
dfloat2 v;
dequantize_kernel(vx, ib, iqs + j/qr, v);
// matrix multiplication
// for qr = 2 the y index needs to increase by 1 per j iter because of y_offset = qk/2
#ifdef GGML_CUDA_F16
tmp += __hmul2(v, {
y[iybs + iqs + j/qr + 0],
y[iybs + iqs + j/qr + y_offset]
});
#else
tmp += v.x * y[iybs + iqs + j/qr + 0];
tmp += v.y * y[iybs + iqs + j/qr + y_offset];
#endif // GGML_CUDA_F16
}
}
// sum up partial sums and write back result
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
}
if (tid == 0) {
#ifdef GGML_CUDA_F16
dst[row] = tmp.x + tmp.y;
#else
dst[row] = tmp;
#endif // GGML_CUDA_F16
}
}
static __global__ void mul_mat_p021_f16_f32(
const void * __restrict__ vx, const float * __restrict__ y, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int nchannels_x, const int nchannels_y) {
const half * x = (const half *) vx;
const int row_x = blockDim.y*blockIdx.y + threadIdx.y;
const int channel = blockDim.z*blockIdx.z + threadIdx.z;
const int channel_x = channel / (nchannels_y / nchannels_x);
const int nrows_y = ncols_x;
const int nrows_dst = nrows_x;
const int row_dst = row_x;
float tmp = 0.0f;
for (int col_x0 = 0; col_x0 < ncols_x; col_x0 += blockDim.x) {
const int col_x = col_x0 + threadIdx.x;
if (col_x >= ncols_x) {
break;
}
// x is transposed and permuted
const int ix = row_x*nchannels_x*ncols_x + channel_x*ncols_x + col_x;
const float xi = __half2float(x[ix]);
const int row_y = col_x;
// y is not transposed but permuted
const int iy = channel*nrows_y + row_y;
tmp += xi * y[iy];
}
// dst is not transposed and not permuted
const int idst = channel*nrows_dst + row_dst;
// sum up partial sums and write back result
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
}
if (threadIdx.x == 0) {
dst[idst] = tmp;
}
}
static __global__ void mul_mat_vec_nc_f16_f32( // nc == non-contiguous
const void * __restrict__ vx, const float * __restrict__ y, float * __restrict__ dst, const int ncols_x, const int nrows_x,
const int row_stride_x, const int channel_stride_x, const int channel_x_divisor) {
const half * x = (const half *) vx;
const int row_x = blockDim.y*blockIdx.y + threadIdx.y;
const int channel = blockDim.z*blockIdx.z + threadIdx.z;
const int channel_x = channel / channel_x_divisor;
const int nrows_y = ncols_x;
const int nrows_dst = nrows_x;
const int row_dst = row_x;
const int idst = channel*nrows_dst + row_dst;
float tmp = 0.0f;
for (int col_x0 = 0; col_x0 < ncols_x; col_x0 += blockDim.x) {
const int col_x = col_x0 + threadIdx.x;
if (col_x >= ncols_x) {
break;
}
const int ix = channel_x*channel_stride_x + row_x*row_stride_x + col_x;
const float xi = __half2float(x[ix]);
const int row_y = col_x;
const int iy = channel*nrows_y + row_y;
tmp += xi * y[iy];
}
// sum up partial sums and write back result
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
}
if (threadIdx.x == 0) {
dst[idst] = tmp;
}
}
static __device__ void cpy_1_f32_f32(const char * cxi, char * cdsti) {
const float * xi = (const float *) cxi;
float * dsti = (float *) cdsti;
*dsti = *xi;
}
static __device__ void cpy_1_f32_f16(const char * cxi, char * cdsti) {
const float * xi = (const float *) cxi;
half * dsti = (half *) cdsti;
*dsti = __float2half(*xi);
}
template <cpy_kernel_t cpy_1>
static __global__ void cpy_f32_f16(const char * cx, char * cdst, const int ne,
const int ne00, const int ne01, const int nb00, const int nb01, const int nb02,
const int ne10, const int ne11, const int nb10, const int nb11, const int nb12) {
const int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i >= ne) {
return;
}
// determine indices i02/i12, i01/i11, i00/i10 as a function of index i of flattened tensor
// then combine those indices with the corresponding byte offsets to get the total offsets
const int i02 = i / (ne00*ne01);
const int i01 = (i - i02*ne01*ne00) / ne00;
const int i00 = i - i02*ne01*ne00 - i01*ne00;
const int x_offset = i00*nb00 + i01*nb01 + i02*nb02;
const int i12 = i / (ne10*ne11);
const int i11 = (i - i12*ne10*ne11) / ne10;
const int i10 = i - i12*ne10*ne11 - i11*ne10;
const int dst_offset = i10*nb10 + i11*nb11 + i12*nb12;
cpy_1(cx + x_offset, cdst + dst_offset);
}
// rope == RoPE == rotary positional embedding
static __global__ void rope_f32(const float * x, float * dst, const int ncols, const float p0,
const float p_delta, const int p_delta_rows, const float theta_scale) {
const int col = 2*(blockDim.y*blockIdx.y + threadIdx.y);
if (col >= ncols) {
return;
}
const int row = blockDim.x*blockIdx.x + threadIdx.x;
const int i = row*ncols + col;
const float theta = (p0 + p_delta * (row/p_delta_rows))*powf(theta_scale, col/2);
const float sin_theta = sinf(theta);
const float cos_theta = cosf(theta);
const float x0 = x[i + 0];
const float x1 = x[i + 1];
dst[i + 0] = x0*cos_theta - x1*sin_theta;
dst[i + 1] = x0*sin_theta + x1*cos_theta;
}
static __global__ void rope_neox_f32(const float * x, float * dst, const int ncols, const float p0,
const float p_delta, const int p_delta_rows, const float theta_scale) {
const int col = 2*(blockDim.y*blockIdx.y + threadIdx.y);
if (col >= ncols) {
return;
}
const int row = blockDim.x*blockIdx.x + threadIdx.x;
const int i = row*ncols + col/2;
const float theta = (p0 + p_delta * (row/p_delta_rows))*powf(theta_scale, col/2);
const float sin_theta = sinf(theta);
const float cos_theta = cosf(theta);
const float x0 = x[i + 0];
const float x1 = x[i + ncols/2];
dst[i + 0] = x0*cos_theta - x1*sin_theta;
dst[i + ncols/2] = x0*sin_theta + x1*cos_theta;
}
static __global__ void rope_glm_f32(const float * x, float * dst, const int ncols, const float p, const float block_p, const float theta_scale) {
const int col = blockDim.x*blockIdx.x + threadIdx.x;
const int half_n_dims = ncols/4;
if (col >= half_n_dims) {
return;
}
const int row = blockDim.y*blockIdx.y + threadIdx.y;
const int i = row*ncols + col;
const float col_theta_scale = powf(theta_scale, col);
const float theta = p*col_theta_scale;
const float sin_theta = sinf(theta);
const float cos_theta = cosf(theta);
const float x0 = x[i + 0];
const float x1 = x[i + half_n_dims];
dst[i + 0] = x0*cos_theta - x1*sin_theta;
dst[i + half_n_dims] = x0*sin_theta + x1*cos_theta;
const float block_theta = block_p*col_theta_scale;
const float sin_block_theta = sinf(block_theta);
const float cos_block_theta = cosf(block_theta);
const float x2 = x[i + half_n_dims * 2];
const float x3 = x[i + half_n_dims * 3];
dst[i + half_n_dims * 2] = x2*cos_block_theta - x3*sin_block_theta;
dst[i + half_n_dims * 3] = x2*sin_block_theta + x3*cos_block_theta;
}
static __global__ void alibi_f32(const float * x, float * dst, const int ncols, const int k_rows,
const int n_heads_log2_floor, const float m0, const float m1) {
const int col = blockDim.x*blockIdx.x + threadIdx.x;
if (col >= ncols) {
return;
}
const int row = blockDim.y*blockIdx.y + threadIdx.y;
const int i = row*ncols + col;
const int k = row/k_rows;
float m_k;
if (k < n_heads_log2_floor) {
m_k = powf(m0, k + 1);
} else {
m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1);
}
dst[i] = col * m_k + x[i];
}
static __global__ void diag_mask_inf_f32(const float * x, float * dst, const int ncols, const int rows_per_channel, const int n_past) {
const int col = blockDim.y*blockIdx.y + threadIdx.y;
const int row = blockDim.x*blockIdx.x + threadIdx.x;
if (col >= ncols) {
return;
}
const int i = row*ncols + col;
// dst[i] = col > n_past + row ? -INFINITY : x[i];
dst[i] = x[i] - (col > n_past + row % rows_per_channel) * INT_MAX; // equivalent within rounding error but slightly faster on GPU
}
// the CUDA soft max implementation differs from the CPU implementation
// instead of doubles floats are used
static __global__ void soft_max_f32(const float * x, float * dst, const int ncols) {
const int row = blockDim.x*blockIdx.x + threadIdx.x;
const int block_size = blockDim.y;
const int tid = threadIdx.y;
float max_val = -INFINITY;
for (int col = tid; col < ncols; col += block_size) {
const int i = row*ncols + col;
max_val = max(max_val, x[i]);
}
// find the max value in the block
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
max_val = max(max_val, __shfl_xor_sync(0xffffffff, max_val, mask, 32));
}
float tmp = 0.f;
for (int col = tid; col < ncols; col += block_size) {
const int i = row*ncols + col;
const float val = expf(x[i] - max_val);
tmp += val;
dst[i] = val;
}
// sum up partial sums
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
}
const float inv_tmp = 1.f / tmp;
for (int col = tid; col < ncols; col += block_size) {
const int i = row*ncols + col;
dst[i] *= inv_tmp;
}
}
static __global__ void scale_f32(const float * x, float * dst, const float scale, const int k) {
const int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i >= k) {
return;
}
dst[i] = scale * x[i];
}
static void add_f32_cuda(const float * x, const float * y, float * dst, const int kx, const int ky, cudaStream_t stream) {
const int num_blocks = (kx + CUDA_ADD_BLOCK_SIZE - 1) / CUDA_ADD_BLOCK_SIZE;
add_f32<<<num_blocks, CUDA_ADD_BLOCK_SIZE, 0, stream>>>(x, y, dst, kx, ky);
}
static void add_f16_f32_f16_cuda(const half * x, const float * y, half * dst, const int k, cudaStream_t stream) {
const int num_blocks = (k + CUDA_ADD_BLOCK_SIZE - 1) / CUDA_ADD_BLOCK_SIZE;
add_f16_f32_f16<<<num_blocks, CUDA_ADD_BLOCK_SIZE, 0, stream>>>(x, y, dst, k);
}
static void mul_f32_cuda(const float * x, const float * y, float * dst, const int kx, const int ky, cudaStream_t stream) {
const int num_blocks = (kx + CUDA_MUL_BLOCK_SIZE - 1) / CUDA_MUL_BLOCK_SIZE;
mul_f32<<<num_blocks, CUDA_MUL_BLOCK_SIZE, 0, stream>>>(x, y, dst, kx, ky);
}
static void gelu_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) {
const int num_blocks = (k + CUDA_GELU_BLOCK_SIZE - 1) / CUDA_GELU_BLOCK_SIZE;
gelu_f32<<<num_blocks, CUDA_GELU_BLOCK_SIZE, 0, stream>>>(x, dst, k);
}
static void silu_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) {
const int num_blocks = (k + CUDA_SILU_BLOCK_SIZE - 1) / CUDA_SILU_BLOCK_SIZE;
silu_f32<<<num_blocks, CUDA_SILU_BLOCK_SIZE, 0, stream>>>(x, dst, k);
}
static void norm_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
GGML_ASSERT(ncols % WARP_SIZE == 0);
const dim3 block_dims(WARP_SIZE, 1, 1);
norm_f32<<<nrows, block_dims, 0, stream>>>(x, dst, ncols);
}
static void rms_norm_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, const float eps, cudaStream_t stream) {
GGML_ASSERT(ncols % WARP_SIZE == 0);
const dim3 block_dims(WARP_SIZE, 1, 1);
rms_norm_f32<<<nrows, block_dims, 0, stream>>>(x, dst, ncols, eps);
}
static void quantize_row_q8_1_cuda(const float * x, void * vy, const int kx, const int ky, const int kx_padded, cudaStream_t stream) {
const int block_num_x = (kx_padded + CUDA_QUANTIZE_BLOCK_SIZE - 1) / CUDA_QUANTIZE_BLOCK_SIZE;
const dim3 num_blocks(block_num_x, ky, 1);
const dim3 block_size(CUDA_DEQUANTIZE_BLOCK_SIZE, 1, 1);
quantize_q8_1<<<num_blocks, block_size, 0, stream>>>(x, vy, kx, kx_padded);
}
static void dequantize_row_q4_0_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
dequantize_block<QK4_0, QR4_0, dequantize_q4_0><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
}
static void dequantize_row_q4_1_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
dequantize_block<QK4_1, QR4_1, dequantize_q4_1><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
}
static void dequantize_row_q5_0_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
dequantize_block<QK5_0, QR5_0, dequantize_q5_0><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
}
static void dequantize_row_q5_1_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
dequantize_block<QK5_1, QR5_1, dequantize_q5_1><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
}
static void dequantize_row_q8_0_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
dequantize_block<QK8_0, QR8_0, dequantize_q8_0><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
}
static void dequantize_row_q2_K_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
const int nb = k / QK_K;
#if QK_K == 256
dequantize_block_q2_K<<<nb, 64, 0, stream>>>(vx, y);
#else
dequantize_block_q2_K<<<nb, 32, 0, stream>>>(vx, y);
#endif
}
static void dequantize_row_q3_K_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
const int nb = k / QK_K;
#if QK_K == 256
dequantize_block_q3_K<<<nb, 64, 0, stream>>>(vx, y);
#else
dequantize_block_q3_K<<<nb, 32, 0, stream>>>(vx, y);
#endif
}
static void dequantize_row_q4_K_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
const int nb = k / QK_K;
dequantize_block_q4_K<<<nb, 32, 0, stream>>>(vx, y);
}
static void dequantize_row_q5_K_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
const int nb = k / QK_K;
#if QK_K == 256
dequantize_block_q5_K<<<nb, 64, 0, stream>>>(vx, y);
#else
dequantize_block_q5_K<<<nb, 32, 0, stream>>>(vx, y);
#endif
}
static void dequantize_row_q6_K_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
const int nb = k / QK_K;
#if QK_K == 256
dequantize_block_q6_K<<<nb, 64, 0, stream>>>(vx, y);
#else
dequantize_block_q6_K<<<nb, 32, 0, stream>>>(vx, y);
#endif
}
static void dequantize_mul_mat_vec_q4_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
dequantize_mul_mat_vec<QK4_0, QR4_0, dequantize_q4_0>
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
}
static void dequantize_mul_mat_vec_q4_1_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
dequantize_mul_mat_vec<QK4_1, QR4_1, dequantize_q4_1>
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
}
static void dequantize_mul_mat_vec_q5_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
dequantize_mul_mat_vec<QK5_0, QR5_0, dequantize_q5_0>
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
}
static void dequantize_mul_mat_vec_q5_1_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
dequantize_mul_mat_vec<QK5_1, QR5_1, dequantize_q5_1>
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
}
static void dequantize_mul_mat_vec_q8_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
dequantize_mul_mat_vec<QK8_0, QR8_0, dequantize_q8_0>
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
}
static void dequantize_mul_mat_vec_q2_K_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
GGML_ASSERT(ncols % QK_K == 0);
const int ny = 2; // very slightly faster than 1 even when K_QUANTS_PER_ITERATION = 2
const int block_num_y = (nrows + ny - 1) / ny;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(32, ny, 1);
dequantize_mul_mat_vec_q2_k<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
}
static void dequantize_mul_mat_vec_q3_K_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
GGML_ASSERT(ncols % QK_K == 0);
const int ny = 2 / K_QUANTS_PER_ITERATION;
const int block_num_y = (nrows + ny - 1) / ny;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(32, ny, 1);
dequantize_mul_mat_vec_q3_k<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
}
static void dequantize_mul_mat_vec_q4_K_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
GGML_ASSERT(ncols % QK_K == 0);
const int ny = 2 / K_QUANTS_PER_ITERATION;
const int block_num_y = (nrows + ny - 1) / ny;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(32, ny, 1);
dequantize_mul_mat_vec_q4_k<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
}
static void dequantize_mul_mat_vec_q5_K_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
GGML_ASSERT(ncols % QK_K == 0);
const dim3 block_dims(32, 1, 1);
dequantize_mul_mat_vec_q5_k<<<nrows, block_dims, 0, stream>>>(vx, y, dst, ncols);
}
static void dequantize_mul_mat_vec_q6_K_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
GGML_ASSERT(ncols % QK_K == 0);
const int ny = 2 / K_QUANTS_PER_ITERATION;
const int block_num_y = (nrows + ny - 1) / ny;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(32, ny, 1);
dequantize_mul_mat_vec_q6_k<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
}
static void mul_mat_vec_q4_0_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
GGML_ASSERT(ncols % QK4_0 == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
mul_mat_vec_q<QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
}
static void mul_mat_vec_q4_1_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
GGML_ASSERT(ncols % QK4_1 == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
mul_mat_vec_q<QK4_0, QI4_1, block_q4_1, VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
}
static void mul_mat_vec_q5_0_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
GGML_ASSERT(ncols % QK5_0 == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
mul_mat_vec_q<QK5_0, QI5_0, block_q5_0, VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
}
static void mul_mat_vec_q5_1_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
GGML_ASSERT(ncols % QK5_1 == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
mul_mat_vec_q<QK5_1, QI5_1, block_q5_1, VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
}
static void mul_mat_vec_q8_0_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
GGML_ASSERT(ncols % QK8_0 == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
mul_mat_vec_q<QK8_0, QI8_0, block_q8_0, VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
}
static void mul_mat_vec_q2_K_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
GGML_ASSERT(ncols % QK_K == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
mul_mat_vec_q<QK_K, QI2_K, block_q2_K, VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
}
static void mul_mat_vec_q3_K_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
GGML_ASSERT(ncols % QK_K == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
mul_mat_vec_q<QK_K, QI3_K, block_q3_K, VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
}
static void mul_mat_vec_q4_K_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
GGML_ASSERT(ncols % QK_K == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
mul_mat_vec_q<QK_K, QI4_K, block_q4_K, VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
}
static void mul_mat_vec_q5_K_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
GGML_ASSERT(ncols % QK_K == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
mul_mat_vec_q<QK_K, QI5_K, block_q5_K, VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
}
static void mul_mat_vec_q6_K_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
GGML_ASSERT(ncols % QK_K == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
mul_mat_vec_q<QK_K, QI6_K, block_q6_K, VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1>
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
}
static void convert_fp16_to_fp32_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
dequantize_block<1, 1, convert_f16><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
}
static void convert_mul_mat_vec_f16_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
const dim3 block_nums(1, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
dequantize_mul_mat_vec<1, 1, convert_f16>
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
}
static to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) {
switch (type) {
case GGML_TYPE_Q4_0:
return dequantize_row_q4_0_cuda;
case GGML_TYPE_Q4_1:
return dequantize_row_q4_1_cuda;
case GGML_TYPE_Q5_0:
return dequantize_row_q5_0_cuda;
case GGML_TYPE_Q5_1:
return dequantize_row_q5_1_cuda;
case GGML_TYPE_Q8_0:
return dequantize_row_q8_0_cuda;
case GGML_TYPE_Q2_K:
return dequantize_row_q2_K_cuda;
case GGML_TYPE_Q3_K:
return dequantize_row_q3_K_cuda;
case GGML_TYPE_Q4_K:
return dequantize_row_q4_K_cuda;
case GGML_TYPE_Q5_K:
return dequantize_row_q5_K_cuda;
case GGML_TYPE_Q6_K:
return dequantize_row_q6_K_cuda;
case GGML_TYPE_F16:
return convert_fp16_to_fp32_cuda;
default:
return nullptr;
}
}
static void ggml_mul_mat_q4_0_q8_1_cuda(
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
int id;
CUDA_CHECK(cudaGetDevice(&id));
const int compute_capability = g_compute_capabilities[id];
int mmq_x, mmq_y, nwarps;
if (compute_capability >= CC_TURING) {
mmq_x = MMQ_X_Q4_0_AMPERE;
mmq_y = MMQ_Y_Q4_0_AMPERE;
nwarps = NWARPS_Q4_0_AMPERE;
} else if (compute_capability >= MIN_CC_DP4A) {
mmq_x = MMQ_X_Q4_0_PASCAL;
mmq_y = MMQ_Y_Q4_0_PASCAL;
nwarps = NWARPS_Q4_0_PASCAL;
} else {
GGML_ASSERT(false);
}
const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
const dim3 block_nums(block_num_x, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, nwarps, 1);
if (nrows_x % mmq_y == 0) {
const bool need_check = false;
mul_mat_q4_0<need_check><<<block_nums, block_dims, 0, stream>>>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
} else {
const bool need_check = true;
mul_mat_q4_0<need_check><<<block_nums, block_dims, 0, stream>>>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
static void ggml_mul_mat_q4_1_q8_1_cuda(
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
int id;
CUDA_CHECK(cudaGetDevice(&id));
const int compute_capability = g_compute_capabilities[id];
int mmq_x, mmq_y, nwarps;
if (compute_capability >= CC_TURING) {
mmq_x = MMQ_X_Q4_1_AMPERE;
mmq_y = MMQ_Y_Q4_1_AMPERE;
nwarps = NWARPS_Q4_1_AMPERE;
} else if (compute_capability >= MIN_CC_DP4A) {
mmq_x = MMQ_X_Q4_1_PASCAL;
mmq_y = MMQ_Y_Q4_1_PASCAL;
nwarps = NWARPS_Q4_1_PASCAL;
} else {
GGML_ASSERT(false);
}
const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
const dim3 block_nums(block_num_x, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, nwarps, 1);
if (nrows_x % mmq_y == 0) {
const bool need_check = false;
mul_mat_q4_1<need_check><<<block_nums, block_dims, 0, stream>>>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
} else {
const bool need_check = true;
mul_mat_q4_1<need_check><<<block_nums, block_dims, 0, stream>>>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
static void ggml_mul_mat_q5_0_q8_1_cuda(
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
int id;
CUDA_CHECK(cudaGetDevice(&id));
const int compute_capability = g_compute_capabilities[id];
int mmq_x, mmq_y, nwarps;
if (compute_capability >= CC_TURING) {
mmq_x = MMQ_X_Q5_0_AMPERE;
mmq_y = MMQ_Y_Q5_0_AMPERE;
nwarps = NWARPS_Q5_0_AMPERE;
} else if (compute_capability >= MIN_CC_DP4A) {
mmq_x = MMQ_X_Q5_0_PASCAL;
mmq_y = MMQ_Y_Q5_0_PASCAL;
nwarps = NWARPS_Q5_0_PASCAL;
} else {
GGML_ASSERT(false);
}
const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
const dim3 block_nums(block_num_x, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, nwarps, 1);
if (nrows_x % mmq_y == 0) {
const bool need_check = false;
mul_mat_q5_0<need_check><<<block_nums, block_dims, 0, stream>>>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
} else {
const bool need_check = true;
mul_mat_q5_0<need_check><<<block_nums, block_dims, 0, stream>>>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
static void ggml_mul_mat_q5_1_q8_1_cuda(
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
int id;
CUDA_CHECK(cudaGetDevice(&id));
const int compute_capability = g_compute_capabilities[id];
int mmq_x, mmq_y, nwarps;
if (compute_capability >= CC_TURING) {
mmq_x = MMQ_X_Q5_1_AMPERE;
mmq_y = MMQ_Y_Q5_1_AMPERE;
nwarps = NWARPS_Q5_1_AMPERE;
} else if (compute_capability >= MIN_CC_DP4A) {
mmq_x = MMQ_X_Q5_1_PASCAL;
mmq_y = MMQ_Y_Q5_1_PASCAL;
nwarps = NWARPS_Q5_1_PASCAL;
} else {
GGML_ASSERT(false);
}
const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
const dim3 block_nums(block_num_x, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, nwarps, 1);
if (nrows_x % mmq_y == 0) {
const bool need_check = false;
mul_mat_q5_1<need_check><<<block_nums, block_dims, 0, stream>>>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
} else {
const bool need_check = true;
mul_mat_q5_1<need_check><<<block_nums, block_dims, 0, stream>>>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
static void ggml_mul_mat_q8_0_q8_1_cuda(
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
int id;
CUDA_CHECK(cudaGetDevice(&id));
const int compute_capability = g_compute_capabilities[id];
int mmq_x, mmq_y, nwarps;
if (compute_capability >= CC_TURING) {
mmq_x = MMQ_X_Q8_0_AMPERE;
mmq_y = MMQ_Y_Q8_0_AMPERE;
nwarps = NWARPS_Q8_0_AMPERE;
} else if (compute_capability >= MIN_CC_DP4A) {
mmq_x = MMQ_X_Q8_0_PASCAL;
mmq_y = MMQ_Y_Q8_0_PASCAL;
nwarps = NWARPS_Q8_0_PASCAL;
} else {
GGML_ASSERT(false);
}
const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
const dim3 block_nums(block_num_x, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, nwarps, 1);
if (nrows_x % mmq_y == 0) {
const bool need_check = false;
mul_mat_q8_0<need_check><<<block_nums, block_dims, 0, stream>>>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
} else {
const bool need_check = true;
mul_mat_q8_0<need_check><<<block_nums, block_dims, 0, stream>>>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
static void ggml_mul_mat_q2_K_q8_1_cuda(
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
int id;
CUDA_CHECK(cudaGetDevice(&id));
const int compute_capability = g_compute_capabilities[id];
int mmq_x, mmq_y, nwarps;
if (compute_capability >= CC_TURING) {
mmq_x = MMQ_X_Q2_K_AMPERE;
mmq_y = MMQ_Y_Q2_K_AMPERE;
nwarps = NWARPS_Q2_K_AMPERE;
} else if (compute_capability >= MIN_CC_DP4A) {
mmq_x = MMQ_X_Q2_K_PASCAL;
mmq_y = MMQ_Y_Q2_K_PASCAL;
nwarps = NWARPS_Q2_K_PASCAL;
} else {
GGML_ASSERT(false);
}
const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
const dim3 block_nums(block_num_x, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, nwarps, 1);
if (nrows_x % mmq_y == 0) {
const bool need_check = false;
mul_mat_q2_K<need_check><<<block_nums, block_dims, 0, stream>>>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
} else {
const bool need_check = true;
mul_mat_q2_K<need_check><<<block_nums, block_dims, 0, stream>>>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
static void ggml_mul_mat_q3_K_q8_1_cuda(
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
#if QK_K == 256
int id;
CUDA_CHECK(cudaGetDevice(&id));
const int compute_capability = g_compute_capabilities[id];
int mmq_x, mmq_y, nwarps;
if (compute_capability >= CC_TURING) {
mmq_x = MMQ_X_Q3_K_AMPERE;
mmq_y = MMQ_Y_Q3_K_AMPERE;
nwarps = NWARPS_Q3_K_AMPERE;
} else if (compute_capability >= MIN_CC_DP4A) {
mmq_x = MMQ_X_Q3_K_PASCAL;
mmq_y = MMQ_Y_Q3_K_PASCAL;
nwarps = NWARPS_Q3_K_PASCAL;
} else {
GGML_ASSERT(false);
}
const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
const dim3 block_nums(block_num_x, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, nwarps, 1);
if (nrows_x % mmq_y == 0) {
const bool need_check = false;
mul_mat_q3_K<need_check><<<block_nums, block_dims, 0, stream>>>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
} else {
const bool need_check = true;
mul_mat_q3_K<need_check><<<block_nums, block_dims, 0, stream>>>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
#endif
}
static void ggml_mul_mat_q4_K_q8_1_cuda(
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
int id;
CUDA_CHECK(cudaGetDevice(&id));
const int compute_capability = g_compute_capabilities[id];
int mmq_x, mmq_y, nwarps;
if (compute_capability >= CC_TURING) {
mmq_x = MMQ_X_Q4_K_AMPERE;
mmq_y = MMQ_Y_Q4_K_AMPERE;
nwarps = NWARPS_Q4_K_AMPERE;
} else if (compute_capability >= MIN_CC_DP4A) {
mmq_x = MMQ_X_Q4_K_PASCAL;
mmq_y = MMQ_Y_Q4_K_PASCAL;
nwarps = NWARPS_Q4_K_PASCAL;
} else {
GGML_ASSERT(false);
}
const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
const dim3 block_nums(block_num_x, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, nwarps, 1);
if (nrows_x % mmq_y == 0) {
const bool need_check = false;
mul_mat_q4_K<need_check><<<block_nums, block_dims, 0, stream>>>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
} else {
const bool need_check = true;
mul_mat_q4_K<need_check><<<block_nums, block_dims, 0, stream>>>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
static void ggml_mul_mat_q5_K_q8_1_cuda(
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
int id;
CUDA_CHECK(cudaGetDevice(&id));
const int compute_capability = g_compute_capabilities[id];
int mmq_x, mmq_y, nwarps;
if (compute_capability >= CC_TURING) {
mmq_x = MMQ_X_Q5_K_AMPERE;
mmq_y = MMQ_Y_Q5_K_AMPERE;
nwarps = NWARPS_Q5_K_AMPERE;
} else if (compute_capability >= MIN_CC_DP4A) {
mmq_x = MMQ_X_Q5_K_PASCAL;
mmq_y = MMQ_Y_Q5_K_PASCAL;
nwarps = NWARPS_Q5_K_PASCAL;
} else {
GGML_ASSERT(false);
}
const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
const dim3 block_nums(block_num_x, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, nwarps, 1);
if (nrows_x % mmq_y == 0) {
const bool need_check = false;
mul_mat_q5_K<need_check><<<block_nums, block_dims, 0, stream>>>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
} else {
const bool need_check = true;
mul_mat_q5_K<need_check><<<block_nums, block_dims, 0, stream>>>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
static void ggml_mul_mat_q6_K_q8_1_cuda(
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
int id;
CUDA_CHECK(cudaGetDevice(&id));
const int compute_capability = g_compute_capabilities[id];
int mmq_x, mmq_y, nwarps;
if (compute_capability >= CC_TURING) {
mmq_x = MMQ_X_Q6_K_AMPERE;
mmq_y = MMQ_Y_Q6_K_AMPERE;
nwarps = NWARPS_Q6_K_AMPERE;
} else if (compute_capability >= MIN_CC_DP4A) {
mmq_x = MMQ_X_Q6_K_PASCAL;
mmq_y = MMQ_Y_Q6_K_PASCAL;
nwarps = NWARPS_Q6_K_PASCAL;
} else {
GGML_ASSERT(false);
}
const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
const dim3 block_nums(block_num_x, block_num_y, 1);
const dim3 block_dims(WARP_SIZE, nwarps, 1);
if (nrows_x % mmq_y == 0) {
const bool need_check = false;
mul_mat_q6_K<need_check><<<block_nums, block_dims, 0, stream>>>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
} else {
const bool need_check = true;
mul_mat_q6_K<need_check><<<block_nums, block_dims, 0, stream>>>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
static void ggml_mul_mat_p021_f16_f32_cuda(
const void * vx, const float * y, float * dst, const int ncols_x, const int nrows_x,
const int nchannels_x, const int nchannels_y, cudaStream_t stream) {
const dim3 block_nums(1, nrows_x, nchannels_y);
const dim3 block_dims(WARP_SIZE, 1, 1);
mul_mat_p021_f16_f32<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols_x, nrows_x, nchannels_x, nchannels_y);
}
static void ggml_mul_mat_vec_nc_f16_f32_cuda(
const void * vx, const float * y, float * dst, const int ncols_x, const int nrows_x, const int row_stride_x,
const int nchannels_x, const int nchannels_y, const int channel_stride_x, cudaStream_t stream) {
const dim3 block_nums(1, nrows_x, nchannels_y);
const dim3 block_dims(WARP_SIZE, 1, 1);
mul_mat_vec_nc_f16_f32<<<block_nums, block_dims, 0, stream>>>
(vx, y, dst, ncols_x, nrows_x, row_stride_x, channel_stride_x, nchannels_y/nchannels_x);
}
static void ggml_cpy_f32_f32_cuda(
const char * cx, char * cdst, const int ne,
const int ne00, const int ne01, const int nb00, const int nb01, const int nb02,
const int ne10, const int ne11, const int nb10, const int nb11, const int nb12, cudaStream_t stream) {
const int num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1) / CUDA_CPY_BLOCK_SIZE;
cpy_f32_f16<cpy_1_f32_f32><<<num_blocks, CUDA_CPY_BLOCK_SIZE, 0, stream>>>
(cx, cdst, ne, ne00, ne01, nb00, nb01, nb02, ne10, ne11, nb10, nb11, nb12);
}
static void ggml_cpy_f32_f16_cuda(
const char * cx, char * cdst, const int ne,
const int ne00, const int ne01, const int nb00, const int nb01, const int nb02,
const int ne10, const int ne11, const int nb10, const int nb11, const int nb12, cudaStream_t stream) {
const int num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1) / CUDA_CPY_BLOCK_SIZE;
cpy_f32_f16<cpy_1_f32_f16><<<num_blocks, CUDA_CPY_BLOCK_SIZE, 0, stream>>>
(cx, cdst, ne, ne00, ne01, nb00, nb01, nb02, ne10, ne11, nb10, nb11, nb12);
}
static void scale_f32_cuda(const float * x, float * dst, const float scale, const int k, cudaStream_t stream) {
const int num_blocks = (k + CUDA_SCALE_BLOCK_SIZE - 1) / CUDA_SCALE_BLOCK_SIZE;
scale_f32<<<num_blocks, CUDA_SCALE_BLOCK_SIZE, 0, stream>>>(x, dst, scale, k);
}
static void rope_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, const float p0,
const float p_delta, const int p_delta_rows, const float theta_scale, cudaStream_t stream) {
GGML_ASSERT(ncols % 2 == 0);
const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1);
const int num_blocks_x = (ncols + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE);
const dim3 block_nums(nrows, num_blocks_x, 1);
rope_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols, p0, p_delta, p_delta_rows, theta_scale);
}
static void rope_neox_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, const float p0,
const float p_delta, const int p_delta_rows, const float theta_scale, cudaStream_t stream) {
GGML_ASSERT(ncols % 2 == 0);
const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1);
const int num_blocks_x = (ncols + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE);
const dim3 block_nums(nrows, num_blocks_x, 1);
rope_neox_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols, p0, p_delta, p_delta_rows, theta_scale);
}
static void rope_glm_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, const float p, const float block_p, const float theta_scale, cudaStream_t stream) {
GGML_ASSERT(nrows % 4 == 0);
const dim3 block_dims(4*CUDA_ROPE_BLOCK_SIZE, 1, 1);
const int num_blocks_x = (ncols + 4*CUDA_ROPE_BLOCK_SIZE - 1) / (4*CUDA_ROPE_BLOCK_SIZE);
const dim3 block_nums(num_blocks_x, nrows, 1);
rope_glm_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols, p, block_p, theta_scale);
}
static void alibi_f32_cuda(const float * x, float * dst, const int ncols, const int nrows,
const int k_rows, const int n_heads_log2_floor, const float m0,
const float m1, cudaStream_t stream) {
const dim3 block_dims(CUDA_ALIBI_BLOCK_SIZE, 1, 1);
const int num_blocks_x = (ncols + CUDA_ALIBI_BLOCK_SIZE - 1) / (CUDA_ALIBI_BLOCK_SIZE);
const dim3 block_nums(num_blocks_x, nrows, 1);
alibi_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols, k_rows, n_heads_log2_floor, m0, m1);
}
static void diag_mask_inf_f32_cuda(const float * x, float * dst, const int ncols_x, const int nrows_x, const int rows_per_channel, const int n_past, cudaStream_t stream) {
const dim3 block_dims(1, CUDA_DIAG_MASK_INF_BLOCK_SIZE, 1);
const int block_num_x = (ncols_x + CUDA_DIAG_MASK_INF_BLOCK_SIZE - 1) / CUDA_DIAG_MASK_INF_BLOCK_SIZE;
const dim3 block_nums(nrows_x, block_num_x, 1);
diag_mask_inf_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols_x, rows_per_channel, n_past);
}
static void soft_max_f32_cuda(const float * x, float * dst, const int ncols_x, const int nrows_x, cudaStream_t stream) {
const dim3 block_dims(1, WARP_SIZE, 1);
const dim3 block_nums(nrows_x, 1, 1);
soft_max_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols_x);
}
// buffer pool for cuda
#define MAX_CUDA_BUFFERS 256
struct scoped_spin_lock {
std::atomic_flag& lock;
scoped_spin_lock(std::atomic_flag& lock) : lock(lock) {
while (lock.test_and_set(std::memory_order_acquire)) {
; // spin
}
}
~scoped_spin_lock() {
lock.clear(std::memory_order_release);
}
scoped_spin_lock(const scoped_spin_lock&) = delete;
scoped_spin_lock& operator=(const scoped_spin_lock&) = delete;
};
struct cuda_buffer {
void * ptr = nullptr;
size_t size = 0;
};
static cuda_buffer g_cuda_buffer_pool[GGML_CUDA_MAX_DEVICES][MAX_CUDA_BUFFERS];
static std::atomic_flag g_cuda_pool_lock = ATOMIC_FLAG_INIT;
static void * ggml_cuda_pool_malloc(size_t size, size_t * actual_size) {
scoped_spin_lock lock(g_cuda_pool_lock);
int id;
CUDA_CHECK(cudaGetDevice(&id));
#ifdef DEBUG_CUDA_MALLOC
int nnz = 0;
size_t max_size = 0, tot_size = 0;
#endif
size_t best_diff = 1ull << 36;
int ibest = -1;
for (int i = 0; i < MAX_CUDA_BUFFERS; ++i) {
cuda_buffer& b = g_cuda_buffer_pool[id][i];
if (b.ptr != nullptr) {
#ifdef DEBUG_CUDA_MALLOC
++nnz;
tot_size += b.size;
if (b.size > max_size) max_size = b.size;
#endif
if (b.size >= size) {
size_t diff = b.size - size;
if (diff < best_diff) {
best_diff = diff;
ibest = i;
if (!best_diff) {
void * ptr = b.ptr;
*actual_size = b.size;
b.ptr = nullptr;
b.size = 0;
return ptr;
}
}
}
}
}
if (ibest >= 0) {
cuda_buffer& b = g_cuda_buffer_pool[id][ibest];
void * ptr = b.ptr;
*actual_size = b.size;
b.ptr = nullptr;
b.size = 0;
return ptr;
}
#ifdef DEBUG_CUDA_MALLOC
fprintf(stderr, "%s: %d buffers, max_size = %u MB, tot_size = %u MB, requested %u MB\n", __func__, nnz,
(uint32_t)(max_size/1024/1024), (uint32_t)(tot_size/1024/1024), (uint32_t)(size/1024/1024));
#endif
void * ptr;
size_t look_ahead_size = (size_t) (1.05 * size);
look_ahead_size = 256 * ((look_ahead_size + 255)/256);
CUDA_CHECK(cudaMalloc((void **) &ptr, look_ahead_size));
*actual_size = look_ahead_size;
return ptr;
}
static void ggml_cuda_pool_free(void * ptr, size_t size) {
scoped_spin_lock lock(g_cuda_pool_lock);
int id;
CUDA_CHECK(cudaGetDevice(&id));
for (int i = 0; i < MAX_CUDA_BUFFERS; ++i) {
cuda_buffer& b = g_cuda_buffer_pool[id][i];
if (b.ptr == nullptr) {
b.ptr = ptr;
b.size = size;
return;
}
}
fprintf(stderr, "WARNING: cuda buffer pool full, increase MAX_CUDA_BUFFERS\n");
CUDA_CHECK(cudaFree(ptr));
}
void ggml_init_cublas() {
static bool initialized = false;
if (!initialized) {
#ifdef __HIP_PLATFORM_AMD__
// Workaround for a rocBLAS bug when using multiple graphics cards:
// https://github.com/ROCmSoftwarePlatform/rocBLAS/issues/1346
rocblas_initialize();
CUDA_CHECK(cudaDeviceSynchronize());
#endif
CUDA_CHECK(cudaGetDeviceCount(&g_device_count));
GGML_ASSERT(g_device_count <= GGML_CUDA_MAX_DEVICES);
int64_t total_vram = 0;
fprintf(stderr, "%s: found %d " GGML_CUDA_NAME " devices:\n", __func__, g_device_count);
for (int id = 0; id < g_device_count; ++id) {
cudaDeviceProp prop;
CUDA_CHECK(cudaGetDeviceProperties(&prop, id));
fprintf(stderr, " Device %d: %s, compute capability %d.%d\n", id, prop.name, prop.major, prop.minor);
g_tensor_split[id] = total_vram;
total_vram += prop.totalGlobalMem;
g_compute_capabilities[id] = 100*prop.major + 10*prop.minor;
}
for (int id = 0; id < g_device_count; ++id) {
g_tensor_split[id] /= total_vram;
}
for (int id = 0; id < g_device_count; ++id) {
CUDA_CHECK(cudaSetDevice(id));
// create main stream
CUDA_CHECK(cudaStreamCreateWithFlags(&g_cudaStreams_main[id], cudaStreamNonBlocking));
// create cublas handle
CUBLAS_CHECK(cublasCreate(&g_cublas_handles[id]));
CUBLAS_CHECK(cublasSetMathMode(g_cublas_handles[id], CUBLAS_TF32_TENSOR_OP_MATH));
}
// configure logging to stdout
// CUBLAS_CHECK(cublasLoggerConfigure(1, 1, 0, nullptr));
initialized = true;
}
}
void ggml_cuda_set_tensor_split(const float * tensor_split) {
if (tensor_split == nullptr) {
return;
}
bool all_zero = true;
for (int i = 0; i < g_device_count; ++i) {
if (tensor_split[i] != 0.0f) {
all_zero = false;
break;
}
}
if (all_zero) {
return;
}
float split_sum = 0.0f;
for (int i = 0; i < g_device_count; ++i) {
g_tensor_split[i] = split_sum;
split_sum += tensor_split[i];
}
for (int i = 0; i < g_device_count; ++i) {
g_tensor_split[i] /= split_sum;
}
}
void * ggml_cuda_host_malloc(size_t size) {
if (getenv("GGML_CUDA_NO_PINNED") != nullptr) {
return nullptr;
}
void * ptr = nullptr;
cudaError_t err = cudaMallocHost((void **) &ptr, size);
if (err != cudaSuccess) {
// The allocation error can be bypassed. A null ptr will assigned out of this function.
// This can fixed the OOM error in WSL.
cudaGetLastError();
fprintf(stderr, "WARNING: failed to allocate %.2f MB of pinned memory: %s\n",
size/1024.0/1024.0, cudaGetErrorString(err));
return nullptr;
}
return ptr;
}
void ggml_cuda_host_free(void * ptr) {
CUDA_CHECK(cudaFreeHost(ptr));
}
static cudaError_t ggml_cuda_cpy_tensor_2d(
void * dst, const struct ggml_tensor * src, int64_t i3, int64_t i2, int64_t i1_low, int64_t i1_high, cudaStream_t stream) {
cudaMemcpyKind kind;
char * src_ptr;
if (src->backend == GGML_BACKEND_CPU) {
kind = cudaMemcpyHostToDevice;
src_ptr = (char *) src->data;
} else if (src->backend == GGML_BACKEND_GPU) {
kind = cudaMemcpyDeviceToDevice;
struct ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) src->extra;
int id;
CUDA_CHECK(cudaGetDevice(&id));
src_ptr = (char *) extra->data_device[id];
} else {
GGML_ASSERT(false);
}
char * dst_ptr = (char *) dst;
const int64_t ne0 = src->ne[0];
const int64_t nb0 = src->nb[0];
const int64_t nb1 = src->nb[1];
const int64_t nb2 = src->nb[2];
const int64_t nb3 = src->nb[3];
const enum ggml_type type = src->type;
const int64_t ts = ggml_type_size(type);
const int64_t bs = ggml_blck_size(type);
int64_t i1_diff = i1_high - i1_low;
const char * x = src_ptr + i1_low*nb1 + i2*nb2 + i3*nb3;
if (nb0 == ts && nb1 == ts*ne0/bs) {
return cudaMemcpyAsync(dst_ptr, x, i1_diff*nb1, kind, stream);
} else if (nb0 == ts) {
return cudaMemcpy2DAsync(dst_ptr, ts*ne0/bs, x, nb1, ts*ne0/bs, i1_diff, kind, stream);
} else {
for (int64_t i1 = 0; i1 < i1_diff; i1++) {
const void * rx = (const void *) ((const char *) x + i1*nb1);
void * rd = (void *) (dst_ptr + i1*ts*ne0/bs);
// pretend the row is a matrix with cols=1
cudaError_t r = cudaMemcpy2DAsync(rd, ts/bs, rx, nb0, ts/bs, ne0, kind, stream);
if (r != cudaSuccess) return r;
}
return cudaSuccess;
}
}
inline void ggml_cuda_op_add(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
cudaStream_t & cudaStream_main){
GGML_ASSERT(src0_ddq_i != nullptr || src0_ddf_i != nullptr);
GGML_ASSERT(src1_ddf_i != nullptr);
GGML_ASSERT(dst_ddf_i != nullptr);
const int64_t ne00 = src0->ne[0];
const int64_t i01_diff = i01_high - i01_low;
const int64_t ne10 = src1->ne[0];
const int64_t ne11 = src1->ne[1];
// compute
if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
add_f32_cuda(src0_ddf_i, src1_ddf_i, dst_ddf_i, ne00*i01_diff, ne10*ne11, cudaStream_main);
} else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
add_f16_f32_f16_cuda((half *) src0_ddq_i, src1_ddf_i, (half *) dst_ddf_i, ne00*i01_diff, cudaStream_main);
} else {
GGML_ASSERT(false);
}
(void) src1;
(void) dst;
(void) src0_ddq_i;
(void) i02;
(void) i1;
}
inline void ggml_cuda_op_mul(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
cudaStream_t & cudaStream_main){
GGML_ASSERT(src0_ddf_i != nullptr);
GGML_ASSERT(src1_ddf_i != nullptr);
GGML_ASSERT(dst_ddf_i != nullptr);
const int64_t ne00 = src0->ne[0];
const int64_t i01_diff = i01_high - i01_low;
const int64_t ne10 = src1->ne[0];
const int64_t ne11 = src1->ne[1];
mul_f32_cuda(src0_ddf_i, src1_ddf_i, dst_ddf_i, ne00*i01_diff, ne10*ne11, cudaStream_main);
(void) dst;
(void) src0_ddq_i;
(void) i02;
(void) i1;
}
inline void ggml_cuda_op_gelu(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
cudaStream_t & cudaStream_main){
GGML_ASSERT(src0_ddf_i != nullptr);
GGML_ASSERT(dst_ddf_i != nullptr);
const int64_t ne00 = src0->ne[0];
const int64_t i01_diff = i01_high - i01_low;
// compute
gelu_f32_cuda(src0_ddf_i, dst_ddf_i, ne00*i01_diff, cudaStream_main);
(void) src1;
(void) dst;
(void) src0_ddq_i;
(void) src1_ddf_i;
(void) i02;
(void) i1;
}
inline void ggml_cuda_op_silu(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
cudaStream_t & cudaStream_main){
GGML_ASSERT(src0_ddf_i != nullptr);
GGML_ASSERT(dst_ddf_i != nullptr);
const int64_t ne00 = src0->ne[0];
const int64_t i01_diff = i01_high - i01_low;
// compute
silu_f32_cuda(src0_ddf_i, dst_ddf_i, ne00*i01_diff, cudaStream_main);
(void) src1;
(void) dst;
(void) src0_ddq_i;
(void) src1_ddf_i;
(void) i02;
(void) i1;
}
inline void ggml_cuda_op_norm(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
cudaStream_t & cudaStream_main){
GGML_ASSERT(src0_ddf_i != nullptr);
GGML_ASSERT(dst_ddf_i != nullptr);
const int64_t ne00 = src0->ne[0];
const int64_t i01_diff = i01_high - i01_low;
// compute
norm_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, cudaStream_main);
(void) src1;
(void) dst;
(void) src0_ddq_i;
(void) src1_ddf_i;
(void) i02;
(void) i1;
}
inline void ggml_cuda_op_rms_norm(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
cudaStream_t & cudaStream_main){
GGML_ASSERT(src0_ddf_i != nullptr);
GGML_ASSERT(dst_ddf_i != nullptr);
const int64_t ne00 = src0->ne[0];
const int64_t i01_diff = i01_high - i01_low;
float eps;
memcpy(&eps, dst->op_params, sizeof(float));
// compute
rms_norm_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, eps, cudaStream_main);
(void) src1;
(void) dst;
(void) src0_ddq_i;
(void) src1_ddf_i;
(void) i02;
(void) i1;
}
inline void ggml_cuda_op_mul_mat_q(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
cudaStream_t & cudaStream_main){
GGML_ASSERT(src0_ddq_i != nullptr);
GGML_ASSERT(src1_ddf_i != nullptr);
GGML_ASSERT(dst_ddf_i != nullptr);
const int64_t ne00 = src0->ne[0];
const int64_t ne10 = src1->ne[0];
const int64_t ne11 = src1->ne[1];
GGML_ASSERT(ne10 % QK8_1 == 0);
const int64_t ne0 = dst->ne[0];
const int64_t i01_diff = i01_high - i01_low;
int id;
CUDA_CHECK(cudaGetDevice(&id));
// the main device has a larger memory buffer to hold the results from all GPUs
// nrows_dst == nrows of the matrix that the dequantize_mul_mat kernel writes into
const int64_t nrows_dst = dst->backend == GGML_BACKEND_GPU && id == g_main_device ? ne0 : i01_diff;
const int64_t padded_row_size = ne10 % MATRIX_ROW_PADDING == 0 ?
ne10 : ne10 - ne10 % MATRIX_ROW_PADDING + MATRIX_ROW_PADDING;
size_t as;
void * src1_q8_1 = ggml_cuda_pool_malloc(padded_row_size*ne11*sizeof(block_q8_1)/QK8_1, &as);
quantize_row_q8_1_cuda(src1_ddf_i, src1_q8_1, ne10, ne11, padded_row_size, cudaStream_main);
switch (src0->type) {
case GGML_TYPE_Q4_0:
ggml_mul_mat_q4_0_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, i01_diff, ne11, padded_row_size, nrows_dst, cudaStream_main);
break;
case GGML_TYPE_Q4_1:
ggml_mul_mat_q4_1_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, i01_diff, ne11, padded_row_size, nrows_dst, cudaStream_main);
break;
case GGML_TYPE_Q5_0:
ggml_mul_mat_q5_0_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, i01_diff, ne11, padded_row_size, nrows_dst, cudaStream_main);
break;
case GGML_TYPE_Q5_1:
ggml_mul_mat_q5_1_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, i01_diff, ne11, padded_row_size, nrows_dst, cudaStream_main);
break;
case GGML_TYPE_Q8_0:
ggml_mul_mat_q8_0_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, i01_diff, ne11, padded_row_size, nrows_dst, cudaStream_main);
break;
case GGML_TYPE_Q2_K:
ggml_mul_mat_q2_K_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, i01_diff, ne11, padded_row_size, nrows_dst, cudaStream_main);
break;
case GGML_TYPE_Q3_K:
ggml_mul_mat_q3_K_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, i01_diff, ne11, padded_row_size, nrows_dst, cudaStream_main);
break;
case GGML_TYPE_Q4_K:
ggml_mul_mat_q4_K_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, i01_diff, ne11, padded_row_size, nrows_dst, cudaStream_main);
break;
case GGML_TYPE_Q5_K:
ggml_mul_mat_q5_K_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, i01_diff, ne11, padded_row_size, nrows_dst, cudaStream_main);
break;
case GGML_TYPE_Q6_K:
ggml_mul_mat_q6_K_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, i01_diff, ne11, padded_row_size, nrows_dst, cudaStream_main);
break;
default:
GGML_ASSERT(false);
break;
}
ggml_cuda_pool_free(src1_q8_1, as);
(void) src1;
(void) dst;
(void) src0_ddf_i;
(void) i02;
(void) i1;
}
static int64_t get_row_rounding(ggml_type type) {
int max_compute_capability = INT_MIN;
for (int id = 0; id < g_device_count; ++id) {
if (max_compute_capability < g_compute_capabilities[id]
&& g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1] : 1.0f)) {
max_compute_capability = g_compute_capabilities[id];
}
}
switch(type) {
case GGML_TYPE_Q4_0:
case GGML_TYPE_Q4_1:
return max_compute_capability >= CC_TURING ? 128 : 64;
case GGML_TYPE_Q5_0:
case GGML_TYPE_Q5_1:
case GGML_TYPE_Q8_0:
return 64;
case GGML_TYPE_F16:
return 1;
case GGML_TYPE_Q2_K:
case GGML_TYPE_Q3_K:
case GGML_TYPE_Q4_K:
case GGML_TYPE_Q5_K:
return max_compute_capability >= CC_TURING ? 128 : 64;
case GGML_TYPE_Q6_K:
return 64;
default:
GGML_ASSERT(false);
}
}
inline void ggml_cuda_op_mul_mat_vec(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
cudaStream_t & cudaStream_main){
GGML_ASSERT(src0_ddq_i != nullptr);
GGML_ASSERT(src1_ddf_i != nullptr);
GGML_ASSERT(dst_ddf_i != nullptr);
const int64_t ne00 = src0->ne[0];
const int64_t nrows = i01_high - i01_low;
#ifdef GGML_CUDA_FORCE_DMMV
const bool use_mul_mat_vec_q = false;
(void) g_compute_capabilities[0];
#else
int id;
CUDA_CHECK(cudaGetDevice(&id));
bool mul_mat_vec_q_implemented =
src0->type == GGML_TYPE_Q4_0 ||
src0->type == GGML_TYPE_Q4_1 ||
src0->type == GGML_TYPE_Q5_0 ||
src0->type == GGML_TYPE_Q5_1 ||
src0->type == GGML_TYPE_Q8_0;
#if QK_K == 256
mul_mat_vec_q_implemented = mul_mat_vec_q_implemented ||
src0->type == GGML_TYPE_Q2_K ||
src0->type == GGML_TYPE_Q3_K ||
src0->type == GGML_TYPE_Q4_K ||
src0->type == GGML_TYPE_Q5_K ||
src0->type == GGML_TYPE_Q6_K;
#endif // QK_K == 256
const bool use_mul_mat_vec_q = g_compute_capabilities[id] >= MIN_CC_DP4A && mul_mat_vec_q_implemented;
#endif
if (use_mul_mat_vec_q) {
const int64_t padded_row_size = ne00 % MATRIX_ROW_PADDING == 0 ?
ne00 : ne00 - ne00 % MATRIX_ROW_PADDING + MATRIX_ROW_PADDING;
size_t as;
void * src1_q8_1 = ggml_cuda_pool_malloc(padded_row_size*sizeof(block_q8_1)/QK8_1, &as);
quantize_row_q8_1_cuda(src1_ddf_i, src1_q8_1, ne00, 1, padded_row_size, cudaStream_main);
switch (src0->type) {
case GGML_TYPE_Q4_0:
mul_mat_vec_q4_0_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q4_1:
mul_mat_vec_q4_1_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q5_0:
mul_mat_vec_q5_0_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q5_1:
mul_mat_vec_q5_1_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q8_0:
mul_mat_vec_q8_0_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q2_K:
mul_mat_vec_q2_K_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q3_K:
mul_mat_vec_q3_K_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q4_K:
mul_mat_vec_q4_K_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q5_K:
mul_mat_vec_q5_K_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q6_K:
mul_mat_vec_q6_K_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
default:
GGML_ASSERT(false);
break;
}
ggml_cuda_pool_free(src1_q8_1, as);
} else {
// on some GPUs it is faster to convert src1 to half and to use half precision intrinsics
#ifdef GGML_CUDA_F16
size_t ash;
dfloat * src1_dfloat = nullptr; // dfloat == half
bool src1_convert_f16 = src0->type == GGML_TYPE_Q4_0 || src0->type == GGML_TYPE_Q4_1 ||
src0->type == GGML_TYPE_Q5_0 || src0->type == GGML_TYPE_Q5_1 ||
src0->type == GGML_TYPE_Q8_0 || src0->type == GGML_TYPE_F16;
if (src1_convert_f16) {
src1_dfloat = (half *) ggml_cuda_pool_malloc(ne00*sizeof(half), &ash);
ggml_cpy_f32_f16_cuda((char *) src1_ddf_i, (char *) src1_dfloat, ne00,
ne00, 1, sizeof(float), 0, 0,
ne00, 1, sizeof(half), 0, 0, cudaStream_main);
}
#else
dfloat * src1_dfloat = src1_ddf_i; // dfloat == float, no conversion
#endif // GGML_CUDA_F16
switch (src0->type) {
case GGML_TYPE_Q4_0:
dequantize_mul_mat_vec_q4_0_cuda(src0_ddq_i, src1_dfloat, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q4_1:
dequantize_mul_mat_vec_q4_1_cuda(src0_ddq_i, src1_dfloat, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q5_0:
dequantize_mul_mat_vec_q5_0_cuda(src0_ddq_i, src1_dfloat, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q5_1:
dequantize_mul_mat_vec_q5_1_cuda(src0_ddq_i, src1_dfloat, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q8_0:
dequantize_mul_mat_vec_q8_0_cuda(src0_ddq_i, src1_dfloat, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q2_K:
dequantize_mul_mat_vec_q2_K_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q3_K:
dequantize_mul_mat_vec_q3_K_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q4_K:
dequantize_mul_mat_vec_q4_K_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q5_K:
dequantize_mul_mat_vec_q5_K_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_Q6_K:
dequantize_mul_mat_vec_q6_K_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
case GGML_TYPE_F16:
convert_mul_mat_vec_f16_cuda(src0_ddq_i, src1_dfloat, dst_ddf_i, ne00, nrows, cudaStream_main);
break;
default:
GGML_ASSERT(false);
break;
}
#ifdef GGML_CUDA_F16
if (src1_convert_f16) {
ggml_cuda_pool_free(src1_dfloat, ash);
}
#endif // GGML_CUDA_F16
}
(void) src1;
(void) dst;
(void) src0_ddf_i;
(void) i02;
(void) i1;
}
inline void ggml_cuda_op_mul_mat_cublas(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
cudaStream_t & cudaStream_main){
GGML_ASSERT(src0_ddf_i != nullptr);
GGML_ASSERT(src1_ddf_i != nullptr);
GGML_ASSERT(dst_ddf_i != nullptr);
const float alpha = 1.0f;
const float beta = 0.0f;
const int64_t ne00 = src0->ne[0];
const int64_t ne10 = src1->ne[0];
const int64_t ne11 = src1->ne[1];
const int64_t ne0 = dst->ne[0];
const int64_t i01_diff = i01_high - i01_low;
int id;
CUDA_CHECK(cudaGetDevice(&id));
// the main device has a larger memory buffer to hold the results from all GPUs
// ldc == nrows of the matrix that cuBLAS writes into
int ldc = dst->backend == GGML_BACKEND_GPU && id == g_main_device ? ne0 : i01_diff;
CUBLAS_CHECK(cublasSetStream(g_cublas_handles[id], cudaStream_main));
CUBLAS_CHECK(
cublasSgemm(g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N,
i01_diff, ne11, ne10,
&alpha, src0_ddf_i, ne00,
src1_ddf_i, ne10,
&beta, dst_ddf_i, ldc));
(void) dst;
(void) src0_ddq_i;
(void) i02;
(void) i1;
}
inline void ggml_cuda_op_rope(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
cudaStream_t & cudaStream_main){
GGML_ASSERT(src0_ddf_i != nullptr);
GGML_ASSERT(dst_ddf_i != nullptr);
const int64_t ne00 = src0->ne[0];
const int64_t ne01 = src0->ne[1];
const int64_t i01_diff = i01_high - i01_low;
const int n_past = ((int32_t *) dst->op_params)[0];
const int n_dims = ((int32_t *) dst->op_params)[1];
const int mode = ((int32_t *) dst->op_params)[2];
const int n_ctx = ((int32_t *) dst->op_params)[3];
// RoPE alteration for extended context
float freq_base, freq_scale;
memcpy(&freq_base, (int32_t *) dst->op_params + 4, sizeof(float));
memcpy(&freq_scale, (int32_t *) dst->op_params + 5, sizeof(float));
const float theta_scale = powf(freq_base, -2.0f/n_dims);
const bool is_neox = mode & 2;
const bool is_glm = mode & 4;
// compute
if (is_glm) {
const float p = (((mode & 1) == 0 ? n_past + i02 : i02)) * freq_scale;
const float id_p = min(p, n_ctx - 2.f);
const float block_p = max(p - (n_ctx - 2.f), 0.f);
rope_glm_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, id_p, block_p, theta_scale, cudaStream_main);
} else if (is_neox) {
GGML_ASSERT(ne00 == n_dims && "ne00 != n_dims is not implemented for CUDA yet");
const float p0 = (((mode & 1) == 0 ? n_past : 0)) * freq_scale;
rope_neox_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, p0, freq_scale, ne01, theta_scale, cudaStream_main);
} else {
const float p0 = (((mode & 1) == 0 ? n_past : 0)) * freq_scale;
rope_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, p0, freq_scale, ne01, theta_scale, cudaStream_main);
}
(void) src1;
(void) dst;
(void) src0_ddq_i;
(void) src1_ddf_i;
(void) i1;
}
inline void ggml_cuda_op_alibi(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
cudaStream_t & cudaStream_main){
GGML_ASSERT(src0_ddf_i != nullptr);
GGML_ASSERT(dst_ddf_i != nullptr);
const int64_t ne00 = src0->ne[0];
const int64_t ne01 = src0->ne[1];
const int64_t ne02 = src0->ne[2];
const int64_t i01_diff = i01_high - i01_low;
const int n_past = ((int32_t *) dst->op_params)[0];
const int n_head = ((int32_t *) dst->op_params)[1];
float max_bias;
memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
GGML_ASSERT(ne01 + n_past == ne00);
GGML_ASSERT(n_head == ne02);
const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
// compute
alibi_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, ne01, n_heads_log2_floor, m0, m1, cudaStream_main);
(void) src1;
(void) src0_ddq_i;
(void) src1_ddf_i;
(void) i02;
(void) i1;
}
inline void ggml_cuda_op_diag_mask_inf(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
cudaStream_t & cudaStream_main){
GGML_ASSERT(src0_ddf_i != nullptr);
GGML_ASSERT(dst_ddf_i != nullptr);
const int64_t ne00 = src0->ne[0];
const int64_t ne01 = src0->ne[1];
const int64_t i01_diff = i01_high - i01_low;
const int n_past = ((int32_t *) dst->op_params)[0];
// compute
diag_mask_inf_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, ne01, n_past, cudaStream_main);
(void) src1;
(void) dst;
(void) src0_ddq_i;
(void) src1_ddf_i;
(void) i02;
(void) i1;
}
inline void ggml_cuda_op_soft_max(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
cudaStream_t & cudaStream_main){
GGML_ASSERT(src0_ddf_i != nullptr);
GGML_ASSERT(dst_ddf_i != nullptr);
const int64_t ne00 = src0->ne[0];
const int64_t i01_diff = i01_high - i01_low;
// compute
soft_max_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, cudaStream_main);
(void) src1;
(void) dst;
(void) src0_ddq_i;
(void) src1_ddf_i;
(void) i02;
(void) i1;
}
inline void ggml_cuda_op_scale(
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
cudaStream_t & cudaStream_main){
GGML_ASSERT(src0_ddf_i != nullptr);
GGML_ASSERT(dst_ddf_i != nullptr);
const float scale = ((float *) src1->data)[0];
const int64_t ne00 = src0->ne[0];
const int64_t i01_diff = i01_high - i01_low;
// compute
scale_f32_cuda(src0_ddf_i, dst_ddf_i, scale, ne00*i01_diff, cudaStream_main);
CUDA_CHECK(cudaGetLastError());
(void) src1;
(void) dst;
(void) src0_ddq_i;
(void) src1_ddf_i;
(void) i02;
(void) i1;
}
static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst,
ggml_cuda_op_t op, bool src0_needs_f32, bool flatten_rows) {
const int64_t ne00 = src0->ne[0];
const int64_t ne01 = src0->ne[1];
const int64_t ne02 = src0->ne[2];
const int64_t ne03 = src0->ne[3];
const int64_t nrows0 = ggml_nrows(src0);
const bool use_src1 = src1 != nullptr;
const int64_t ne10 = use_src1 ? src1->ne[0] : 1;
const int64_t ne11 = use_src1 ? src1->ne[1] : 1;
const int64_t ne12 = use_src1 ? src1->ne[2] : 1;
const int64_t ne13 = use_src1 ? src1->ne[3] : 1;
const int64_t nrows1 = use_src1 ? ggml_nrows(src1) : 1;
GGML_ASSERT(ne03 == ne13);
const int64_t ne0 = dst->ne[0];
const int64_t ne1 = dst->ne[1];
const int nb2 = dst->nb[2];
const int nb3 = dst->nb[3];
GGML_ASSERT(dst->backend != GGML_BACKEND_GPU_SPLIT);
GGML_ASSERT(!use_src1 || src1->backend != GGML_BACKEND_GPU_SPLIT);
// strides for iteration over dims 3 and 2
const int64_t num_iters_0 = ne02 >= ne12 ? ne02*ne03 : ne12*ne13;
const int64_t num_iters = flatten_rows ? 1 : num_iters_0;
const int64_t stride_mod = flatten_rows ? num_iters_0 : 1;
const int64_t src0_stride = ne00 * ne01 * stride_mod;
const int64_t src1_stride = ne10 * ne11 * stride_mod;
const int64_t dst_stride = ne0 * ne1 * stride_mod;
const int64_t rows_per_iter = flatten_rows ? nrows0 : ne01;
const int64_t i03_max = flatten_rows ? 1 : ne03;
const int64_t i02_max = flatten_rows ? 1 : (ne02 >= ne12 ? ne02 : ne12);
const int64_t i02_divisor = ne02 >= ne12 ? 1 : ne12 / ne02;
GGML_ASSERT(!(flatten_rows && ne02 < ne12));
const size_t src0_ts = ggml_type_size(src0->type);
const size_t src0_bs = ggml_blck_size(src0->type);
struct ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra;
struct ggml_tensor_extra_gpu * src1_extra = use_src1 ? (ggml_tensor_extra_gpu *) src1->extra : nullptr;
struct ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra;
const bool src0_on_device = src0->backend == GGML_BACKEND_GPU || src0->backend == GGML_BACKEND_GPU_SPLIT;
const bool src0_is_contiguous = ggml_is_contiguous(src0);
const bool src0_is_f32 = src0->type == GGML_TYPE_F32;
const bool src1_is_contiguous = use_src1 && ggml_is_contiguous(src1);
const bool src1_stays_on_host = use_src1 && (
dst->op == GGML_OP_SCALE || dst->op == GGML_OP_DIAG_MASK_INF || dst->op == GGML_OP_ROPE);
const bool split = src0->backend == GGML_BACKEND_GPU_SPLIT;
GGML_ASSERT(!(split && ne02 < ne12));
const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(src0->type);
// dd = data device
char * src0_ddq[GGML_CUDA_MAX_DEVICES] = {nullptr}; // quantized
float * src0_ddf[GGML_CUDA_MAX_DEVICES] = {nullptr}; // float
float * src1_ddf[GGML_CUDA_MAX_DEVICES] = {nullptr};
float * dst_ddf[GGML_CUDA_MAX_DEVICES] = {nullptr};
// asq = actual size quantized, asf = actual size float
size_t src0_asq[GGML_CUDA_MAX_DEVICES] = {0};
size_t src0_asf[GGML_CUDA_MAX_DEVICES] = {0};
size_t src1_asf[GGML_CUDA_MAX_DEVICES] = {0};
size_t dst_asf[GGML_CUDA_MAX_DEVICES] = {0};
// if multiple devices are used they need to wait for the main device
// here an event is recorded that signifies that the main device has finished calculating the input data
if (split && g_device_count > 1) {
CUDA_CHECK(cudaSetDevice(g_main_device));
CUDA_CHECK(cudaEventRecord(src0_extra->events[g_main_device], g_cudaStreams_main[g_main_device]));
}
for (int id = 0; id < g_device_count; ++id) {
if (!split && id != g_main_device) {
continue;
}
const bool src1_on_device = use_src1 && src1->backend == GGML_BACKEND_GPU && id == g_main_device;
const bool dst_on_device = dst->backend == GGML_BACKEND_GPU && id == g_main_device;
int64_t row_low, row_high;
if (split) {
const int64_t rounding = get_row_rounding(src0->type);
row_low = id == 0 ? 0 : nrows0*g_tensor_split[id];
row_low -= row_low % rounding;
if (id == g_device_count - 1) {
row_high = nrows0;
} else {
row_high = nrows0*g_tensor_split[id + 1];
row_high -= row_high % rounding;
}
} else {
row_low = 0;
row_high = nrows0*i02_divisor;
}
if (row_low == row_high) {
continue;
}
int64_t row_diff = row_high - row_low;
cudaSetDevice(id);
cudaStream_t cudaStream_main = g_cudaStreams_main[id];
// wait for main GPU data if necessary
if (split && id != g_main_device) {
CUDA_CHECK(cudaStreamWaitEvent(cudaStream_main, src0_extra->events[g_main_device]));
}
if (src0_on_device && src0_is_contiguous) {
if (src0_is_f32) {
src0_ddf[id] = (float *) src0_extra->data_device[id];
} else {
src0_ddq[id] = (char *) src0_extra->data_device[id];
}
} else {
if (src0_is_f32) {
src0_ddf[id] = (float *) ggml_cuda_pool_malloc(row_diff*ne00 * sizeof(float), &src0_asf[id]);
} else {
src0_ddq[id] = (char *) ggml_cuda_pool_malloc(row_diff*ne00 * src0_ts/src0_bs, &src0_asq[id]);
}
}
if (src0_needs_f32 && !src0_is_f32) {
src0_ddf[id] = (float *) ggml_cuda_pool_malloc(row_diff*ne00 * sizeof(float), &src0_asf[id]);
}
if (use_src1 && !src1_stays_on_host) {
if (src1_on_device && src1_is_contiguous) {
src1_ddf[id] = (float *) src1_extra->data_device[id];
} else {
src1_ddf[id] = (float *) ggml_cuda_pool_malloc(num_iters*src1_stride * sizeof(float), &src1_asf[id]);
}
}
if (dst_on_device) {
dst_ddf[id] = (float *) dst_extra->data_device[id];
} else {
size_t size_dst_ddf = split ? row_diff*ne1 * sizeof(float) : num_iters*dst_stride * sizeof(float);
dst_ddf[id] = (float *) ggml_cuda_pool_malloc(size_dst_ddf, &dst_asf[id]);
}
for (int64_t i03 = 0; i03 < i03_max; i03++) {
const int64_t i13 = i03 % ne13;
for (int64_t i02 = 0; i02 < i02_max; i02++) {
const int64_t i12 = i02 % ne12;
const int64_t i0 = i03*i02_max + i02;
// i0 values that contain the lower/upper rows for a split tensor when using multiple GPUs
const int64_t i0_offset_low = row_low/rows_per_iter;
const int64_t i0_offset_high = row_high/rows_per_iter;
int64_t i01_low = 0;
int64_t i01_high = rows_per_iter;
if (split) {
if (i0 < i0_offset_low || i0 > i0_offset_high) {
continue;
}
if (i0 == i0_offset_low) {
i01_low = row_low % rows_per_iter;
}
if (i0 == i0_offset_high) {
i01_high = row_high % rows_per_iter;
}
}
// There is possibly a bug in the Windows nvcc compiler regarding instruction reordering or optimizing out local variables.
// Removing the first assert or changing the order of the arguments causes the second assert to fail.
// Removing both asserts results in i01_high becoming 0 which in turn results in garbage output.
// The root cause seems to be a problem with i0_offset_high becoming 0 when it should always be >0 (for single GPU).
GGML_ASSERT(i01_low == 0 || g_device_count > 1);
GGML_ASSERT(i01_high == rows_per_iter || g_device_count > 1);
const int64_t i01_diff = i01_high - i01_low;
if (i01_diff == 0) {
continue;
}
const int64_t i11 = i13*ne12 + i12;
// for split tensors the data begins at i0 == i0_offset_low
char * src0_ddq_i = src0_ddq[id] + (i0/i02_divisor - i0_offset_low)*src0_stride*src0_ts/src0_bs;
float * src0_ddf_i = src0_ddf[id] + (i0/i02_divisor - i0_offset_low)*src0_stride;
float * src1_ddf_i = src1_ddf[id] + i11*src1_stride;
float * dst_ddf_i = dst_ddf[id] + (i0 - i0_offset_low)*dst_stride;
// for split tensors the data pointer needs to be rounded down
// to the bin edge for i03, i02 bins beyond the first
if (i0 - i0_offset_low > 0) {
GGML_ASSERT(!flatten_rows);
src0_ddq_i -= (row_low % ne01)*ne00 * src0_ts/src0_bs;
src0_ddf_i -= (row_low % ne01)*ne00;
dst_ddf_i -= (row_low % ne0)*ne1;
}
// the main device memory buffer can be on VRAM scratch, with space for all partial results
// in that case an offset on dst_ddf_i is needed
if (dst->backend == GGML_BACKEND_GPU && id == g_main_device) {
dst_ddf_i += i01_low; // offset is 0 if no tensor split
}
// copy src0, src1 to device if necessary
if (use_src1 && !src1_stays_on_host) {
if (src1->backend == GGML_BACKEND_CPU) {
GGML_ASSERT(!flatten_rows || nrows0 == ggml_nrows(src1));
int64_t nrows1 = flatten_rows ? nrows0 : ne11;
CUDA_CHECK(ggml_cuda_cpy_tensor_2d(src1_ddf_i, src1, i03, i02, 0, nrows1, cudaStream_main));
} else if (src1->backend == GGML_BACKEND_GPU && src1_is_contiguous) {
if (id != g_main_device) {
GGML_ASSERT(!flatten_rows);
float * src1_ddf_i_source = (float *) src1_extra->data_device[g_main_device];
src1_ddf_i_source += i11*src1_stride;
CUDA_CHECK(cudaMemcpyAsync(src1_ddf_i, src1_ddf_i_source, src1_stride*sizeof(float),
cudaMemcpyDeviceToDevice, cudaStream_main));
}
} else if (src1_on_device && !src1_is_contiguous) {
GGML_ASSERT(!split);
CUDA_CHECK(ggml_cuda_cpy_tensor_2d(src1_ddf_i, src1, i03, i02, 0, ne11, cudaStream_main));
} else {
GGML_ASSERT(false);
}
}
if ((!src0_on_device || !src0_is_contiguous) && i02 % i02_divisor == 0) {
if (src0_is_f32) {
CUDA_CHECK(ggml_cuda_cpy_tensor_2d(src0_ddf_i, src0, i03, i02/i02_divisor, i01_low, i01_high, cudaStream_main));
} else {
CUDA_CHECK(ggml_cuda_cpy_tensor_2d(src0_ddq_i, src0, i03, i02/i02_divisor, i01_low, i01_high, cudaStream_main));
}
}
// convert src0 to f32 if it is necessary for the ggml_cuda_op
if (src0_needs_f32 && !src0_is_f32) {
to_fp32_cuda(src0_ddq_i, src0_ddf_i, i01_diff*ne00, cudaStream_main);
CUDA_CHECK(cudaGetLastError());
}
// do the computation
op(src0, src1, dst, src0_ddq_i, src0_ddf_i, src1_ddf_i, dst_ddf_i, i02, i01_low, i01_high, i11, cudaStream_main);
CUDA_CHECK(cudaGetLastError());
// copy dst to host or other device if necessary
if (!dst_on_device) {
void * dst_off_device;
cudaMemcpyKind kind;
if (dst->backend == GGML_BACKEND_CPU) {
dst_off_device = dst->data;
kind = cudaMemcpyDeviceToHost;
} else if (dst->backend == GGML_BACKEND_GPU) {
dst_off_device = dst_extra->data_device[g_main_device];
kind = cudaMemcpyDeviceToDevice;
} else {
GGML_ASSERT(false);
}
if (split) {
// src0 = weight matrix is saved as a transposed matrix for better memory layout.
// dst is NOT transposed.
// The outputs of matrix matrix multiplications can therefore NOT simply be concatenated for >1 GPU.
// Instead they need to be copied to the correct slice in ne0 = dst row index.
// If dst is a vector with ne0 == 1 then you don't have to do this but it still produces correct results.
float * dhf_dst_i = (float *) ((char *) dst_off_device + i01_low*sizeof(float) + i02*nb2 + i03*nb3);
CUDA_CHECK(cudaMemcpy2DAsync(dhf_dst_i, ne0*sizeof(float), dst_ddf_i, i01_diff*sizeof(float),
i01_diff*sizeof(float), ne1, kind, cudaStream_main));
} else {
float * dhf_dst_i = (float *) ((char *) dst_off_device + i02*nb2 + i03*nb3);
CUDA_CHECK(cudaMemcpyAsync(dhf_dst_i, dst_ddf_i, dst_stride*sizeof(float), kind, cudaStream_main));
}
}
// signify to main device that other device is done
if (split && g_device_count > 1 && id != g_main_device) {
CUDA_CHECK(cudaEventRecord(src0_extra->events[id], cudaStream_main));
}
}
}
}
// wait until each device is finished, then free their buffers
for (int id = 0; id < g_device_count; ++id) {
if (src0_asq[id] == 0 && src0_asf[id] == 0 && src1_asf[id] == 0 && dst_asf[id] == 0) {
continue;
}
CUDA_CHECK(cudaSetDevice(id));
if (src0_asq[id] > 0) {
ggml_cuda_pool_free(src0_ddq[id], src0_asq[id]);
}
if (src0_asf[id] > 0) {
ggml_cuda_pool_free(src0_ddf[id], src0_asf[id]);
}
if (src1_asf[id] > 0) {
ggml_cuda_pool_free(src1_ddf[id], src1_asf[id]);
}
if (dst_asf[id] > 0) {
ggml_cuda_pool_free(dst_ddf[id], dst_asf[id]);
}
}
// main device waits for all other devices to be finished
if (split && g_device_count > 1) {
CUDA_CHECK(cudaSetDevice(g_main_device));
for (int id = 0; id < g_device_count; ++id) {
if (id != g_main_device && src0_extra->events[id]) {
CUDA_CHECK(cudaStreamWaitEvent(g_cudaStreams_main[g_main_device], src0_extra->events[id]));
}
}
}
if (dst->backend == GGML_BACKEND_CPU) {
CUDA_CHECK(cudaSetDevice(g_main_device));
CUDA_CHECK(cudaDeviceSynchronize());
}
}
void ggml_cuda_add(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
// ggml_cuda_add permits f16 dst even though this could in theory cause problems with the pointer arithmetic in ggml_cuda_op.
// Due to flatten_rows == true this does in practice not make a difference however.
// Better solution would be nice but right now that would require disproportionate changes.
GGML_ASSERT(
(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16) &&
src1->type == GGML_TYPE_F32 &&
(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16));
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_add, false, true);
}
void ggml_cuda_mul(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
GGML_ASSERT(src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul, true, false); // TODO ggml_cuda_op needs modification for flatten
}
void ggml_cuda_gelu(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_gelu, true, true);
}
void ggml_cuda_silu(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_silu, true, true);
}
void ggml_cuda_norm(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_norm, true, true);
}
void ggml_cuda_rms_norm(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_rms_norm, true, true);
}
bool ggml_cuda_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst) {
const int64_t ne10 = src1->ne[0];
const int64_t ne0 = dst->ne[0];
const int64_t ne1 = dst->ne[1];
// TODO: find the optimal values for these
if ((src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) &&
src1->type == GGML_TYPE_F32 &&
dst->type == GGML_TYPE_F32 &&
(ne0 >= 32 && ne1 >= 32 && ne10 >= 32)) {
return true;
}
return false;
}
void ggml_cuda_mul_mat_vec_p021(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst){
GGML_ASSERT(ggml_is_permuted(src0) && ggml_is_permuted(src1));
GGML_ASSERT(src0->backend != GGML_BACKEND_GPU_SPLIT);
GGML_ASSERT(src0->nb[0] <= src0->nb[1] && src0->nb[2] <= src0->nb[3]); // 0213 permutation
GGML_ASSERT(src1->nb[0] <= src1->nb[1] && src1->nb[2] <= src1->nb[3]); // 0213 permutation
GGML_ASSERT(src0->type == GGML_TYPE_F16);
GGML_ASSERT(src1->type == GGML_TYPE_F32);
const int64_t ne00 = src0->ne[0];
const int64_t ne01 = src0->ne[1];
const int64_t ne02 = src0->ne[2];
const int64_t ne12 = src1->ne[2];
CUDA_CHECK(cudaSetDevice(g_main_device));
cudaStream_t cudaStream_main = g_cudaStreams_main[g_main_device];
struct ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra;
void * src0_ddq = src0_extra->data_device[g_main_device];
struct ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu *) src1->extra;
float * src1_ddf = (float *) src1_extra->data_device[g_main_device];
struct ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra;
float * dst_ddf = (float *) dst_extra->data_device[g_main_device];
ggml_mul_mat_p021_f16_f32_cuda(src0_ddq, src1_ddf, dst_ddf, ne00, ne01, ne02, ne12, cudaStream_main);
}
void ggml_cuda_mul_mat_vec_nc(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst){
GGML_ASSERT(!ggml_is_contiguous(src0) && ggml_is_contiguous(src1));
GGML_ASSERT(!ggml_is_permuted(src0));
GGML_ASSERT(src0->backend != GGML_BACKEND_GPU_SPLIT);
GGML_ASSERT(src0->type == GGML_TYPE_F16);
GGML_ASSERT(src1->type == GGML_TYPE_F32);
const int64_t ne00 = src0->ne[0];
const int64_t ne01 = src0->ne[1];
const int64_t ne02 = src0->ne[2];
const int64_t ne12 = src1->ne[2];
const int64_t nb01 = src0->nb[1];
const int64_t nb02 = src0->nb[2];
CUDA_CHECK(cudaSetDevice(g_main_device));
cudaStream_t cudaStream_main = g_cudaStreams_main[g_main_device];
struct ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra;
void * src0_ddq = src0_extra->data_device[g_main_device];
struct ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu *) src1->extra;
float * src1_ddf = (float *) src1_extra->data_device[g_main_device];
struct ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra;
float * dst_ddf = (float *) dst_extra->data_device[g_main_device];
const int row_stride_x = nb01 / sizeof(half);
const int channel_stride_x = nb02 / sizeof(half);
ggml_mul_mat_vec_nc_f16_f32_cuda(src0_ddq, src1_ddf, dst_ddf, ne00, ne01, row_stride_x, ne02, ne12, channel_stride_x, cudaStream_main);
}
void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
bool all_on_device = (src0->backend == GGML_BACKEND_GPU || src0->backend == GGML_BACKEND_GPU_SPLIT) &&
src1->backend == GGML_BACKEND_GPU && dst->backend == GGML_BACKEND_GPU;
if (all_on_device && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) {
ggml_cuda_mul_mat_vec_p021(src0, src1, dst);
} else if (all_on_device && !ggml_is_contiguous(src0) && ggml_is_contiguous(src1) && src1->ne[1] == 1) {
ggml_cuda_mul_mat_vec_nc(src0, src1, dst);
}else if (src0->type == GGML_TYPE_F32) {
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul_mat_cublas, true, false);
} else if (ggml_is_quantized(src0->type) || src0->type == GGML_TYPE_F16) {
if (src1->ne[1] == 1 && src0->ne[0] % GGML_CUDA_DMMV_X == 0) {
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul_mat_vec, false, false);
} else {
int min_compute_capability = INT_MAX;
for (int id = 0; id < g_device_count; ++id) {
if (min_compute_capability > g_compute_capabilities[id]
&& g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1] : 1.0f)) {
min_compute_capability = g_compute_capabilities[id];
}
}
if (g_mul_mat_q && ggml_is_quantized(src0->type) && min_compute_capability >= MIN_CC_DP4A) {
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul_mat_q, false, false);
} else {
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul_mat_cublas, true, false);
}
}
} else {
GGML_ASSERT(false);
}
}
void ggml_cuda_scale(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_scale, true, true);
}
void ggml_cuda_cpy(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
const int64_t ne = ggml_nelements(src0);
GGML_ASSERT(ne == ggml_nelements(src1));
GGML_ASSERT(src0->backend == GGML_BACKEND_GPU);
GGML_ASSERT(src1->backend == GGML_BACKEND_GPU);
GGML_ASSERT(ggml_nbytes(src0) <= INT_MAX);
GGML_ASSERT(ggml_nbytes(src1) <= INT_MAX);
const int64_t ne00 = src0->ne[0];
const int64_t ne01 = src0->ne[1];
GGML_ASSERT(src0->ne[3] == 1);
const int64_t nb00 = src0->nb[0];
const int64_t nb01 = src0->nb[1];
const int64_t nb02 = src0->nb[2];
const int64_t ne10 = src1->ne[0];
const int64_t ne11 = src1->ne[1];
GGML_ASSERT(src1->ne[3] == 1);
const int64_t nb10 = src1->nb[0];
const int64_t nb11 = src1->nb[1];
const int64_t nb12 = src1->nb[2];
CUDA_CHECK(cudaSetDevice(g_main_device));
cudaStream_t cudaStream_main = g_cudaStreams_main[g_main_device];
const struct ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra;
const struct ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu *) src1->extra;
char * src0_ddc = (char *) src0_extra->data_device[g_main_device];
char * src1_ddc = (char *) src1_extra->data_device[g_main_device];
if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32) {
ggml_cpy_f32_f32_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, nb00, nb01, nb02,
ne10, ne11, nb10, nb11, nb12, cudaStream_main);
} else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F16) {
ggml_cpy_f32_f16_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, nb00, nb01, nb02,
ne10, ne11, nb10, nb11, nb12, cudaStream_main);
} else {
GGML_ASSERT(false);
}
(void) dst;
}
void ggml_cuda_dup(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
ggml_cuda_cpy(src0, dst, nullptr);
(void) src1;
}
void ggml_cuda_diag_mask_inf(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_diag_mask_inf, true, true);
}
void ggml_cuda_soft_max(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_soft_max, true, true);
}
void ggml_cuda_rope(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
GGML_ASSERT(ggml_is_contiguous(src0)); // TODO: this restriction is temporary until non-cont support is implemented
const int mode = ((int32_t *) dst->op_params)[2];
const bool is_glm = mode & 4;
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_rope, true, !is_glm); // flatten support not implemented for glm
}
void ggml_cuda_alibi(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_alibi, true, true);
}
void ggml_cuda_nop(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
(void) src0;
(void) src1;
(void) dst;
}
void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor) {
int nrows = ggml_nrows(tensor);
const int64_t ne0 = tensor->ne[0];
const size_t nb1 = tensor->nb[1];
ggml_backend backend = tensor->backend;
struct ggml_tensor_extra_gpu * extra = new struct ggml_tensor_extra_gpu;
memset(extra, 0, sizeof(*extra));
for (int id = 0; id < g_device_count; ++id) {
if (backend == GGML_BACKEND_GPU && id != g_main_device) {
continue;
}
cudaSetDevice(id);
int row_low, row_high;
if (backend == GGML_BACKEND_GPU) {
row_low = 0;
row_high = nrows;
} else if (backend == GGML_BACKEND_GPU_SPLIT) {
const int64_t rounding = get_row_rounding(tensor->type);
row_low = id == 0 ? 0 : nrows*g_tensor_split[id];
row_low -= row_low % rounding;
if (id == g_device_count - 1) {
row_high = nrows;
} else {
row_high = nrows*g_tensor_split[id + 1];
row_high -= row_high % rounding;
}
} else {
GGML_ASSERT(false);
}
if (row_low == row_high) {
continue;
}
int64_t nrows_split = row_high - row_low;
const size_t offset_split = row_low*nb1;
size_t size = ggml_nbytes_split(tensor, nrows_split);
const size_t original_size = size;
// pad last row to a multiple of 512 elements to avoid out-of-bounds memory accesses
if (ne0 % MATRIX_ROW_PADDING != 0) {
size += (MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING)
* ggml_type_size(tensor->type)/ggml_blck_size(tensor->type);
}
char * buf;
CUDA_CHECK(cudaMalloc(&buf, size));
char * buf_host = (char*)data + offset_split;
// set padding to 0 to avoid possible NaN values
if (size > original_size) {
CUDA_CHECK(cudaMemset(buf + original_size, 0, size - original_size));
}
CUDA_CHECK(cudaMemcpy(buf, buf_host, original_size, cudaMemcpyHostToDevice));
extra->data_device[id] = buf;
if (backend == GGML_BACKEND_GPU_SPLIT) {
CUDA_CHECK(cudaEventCreateWithFlags(&extra->events[id], cudaEventDisableTiming));
}
}
tensor->extra = extra;
}
void ggml_cuda_free_data(struct ggml_tensor * tensor) {
if (!tensor || (tensor->backend != GGML_BACKEND_GPU && tensor->backend != GGML_BACKEND_GPU_SPLIT) ) {
return;
}
ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
for (int id = 0; id < g_device_count; ++id) {
if (extra->data_device[id] != nullptr) {
CUDA_CHECK(cudaSetDevice(id));
CUDA_CHECK(cudaFree(extra->data_device[id]));
}
if (extra->events[id] != nullptr) {
CUDA_CHECK(cudaSetDevice(id));
CUDA_CHECK(cudaEventDestroy(extra->events[id]));
}
}
delete extra;
}
static struct ggml_tensor_extra_gpu * g_temp_tensor_extras = nullptr;
static size_t g_temp_tensor_extra_index = 0;
static struct ggml_tensor_extra_gpu * ggml_cuda_alloc_temp_tensor_extra() {
if (g_temp_tensor_extras == nullptr) {
g_temp_tensor_extras = new ggml_tensor_extra_gpu[GGML_MAX_NODES];
}
size_t alloc_index = g_temp_tensor_extra_index;
g_temp_tensor_extra_index = (g_temp_tensor_extra_index + 1) % GGML_MAX_NODES;
struct ggml_tensor_extra_gpu * extra = &g_temp_tensor_extras[alloc_index];
memset(extra, 0, sizeof(*extra));
return extra;
}
void ggml_cuda_assign_buffers_impl(struct ggml_tensor * tensor, bool scratch, bool force_inplace, bool no_alloc) {
if (scratch && g_scratch_size == 0) {
return;
}
// recursively assign CUDA buffers until a compute tensor is found
if (tensor->src[0] != nullptr && tensor->src[0]->backend == GGML_BACKEND_CPU) {
const ggml_op src0_op = tensor->src[0]->op;
if (src0_op == GGML_OP_RESHAPE || src0_op == GGML_OP_TRANSPOSE || src0_op == GGML_OP_VIEW || src0_op == GGML_OP_PERMUTE) {
ggml_cuda_assign_buffers_impl(tensor->src[0], scratch, force_inplace, no_alloc);
}
}
if (tensor->op == GGML_OP_CPY && tensor->src[1]->backend == GGML_BACKEND_CPU) {
ggml_cuda_assign_buffers_impl(tensor->src[1], scratch, force_inplace, no_alloc);
}
tensor->backend = GGML_BACKEND_GPU;
if (scratch && no_alloc) {
return;
}
struct ggml_tensor_extra_gpu * extra;
const bool inplace = (tensor->src[0] != nullptr && tensor->src[0]->data == tensor->data) ||
tensor->op == GGML_OP_VIEW ||
force_inplace;
const size_t size = ggml_nbytes(tensor);
CUDA_CHECK(cudaSetDevice(g_main_device));
if (inplace && (tensor->src[0]->backend == GGML_BACKEND_GPU || tensor->src[0]->backend == GGML_BACKEND_GPU_SPLIT)) {
struct ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu * ) tensor->src[0]->extra;
char * src0_ddc = (char *) src0_extra->data_device[g_main_device];
size_t offset = 0;
if (tensor->op == GGML_OP_VIEW) {
memcpy(&offset, tensor->op_params, sizeof(size_t));
}
extra = ggml_cuda_alloc_temp_tensor_extra();
extra->data_device[g_main_device] = src0_ddc + offset;
} else if (tensor->op == GGML_OP_CPY) {
struct ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu * ) tensor->src[1]->extra;
void * src1_ddv = src1_extra->data_device[g_main_device];
extra = ggml_cuda_alloc_temp_tensor_extra();
extra->data_device[g_main_device] = src1_ddv;
} else if (scratch) {
GGML_ASSERT(size <= g_scratch_size);
if (g_scratch_offset + size > g_scratch_size) {
g_scratch_offset = 0;
}
char * data = (char *) g_scratch_buffer;
if (data == nullptr) {
CUDA_CHECK(cudaMalloc(&data, g_scratch_size));
g_scratch_buffer = data;
}
extra = ggml_cuda_alloc_temp_tensor_extra();
extra->data_device[g_main_device] = data + g_scratch_offset;
g_scratch_offset += size;
GGML_ASSERT(g_scratch_offset <= g_scratch_size);
} else { // allocate new buffers outside of scratch
void * data;
CUDA_CHECK(cudaMalloc(&data, size));
CUDA_CHECK(cudaMemset(data, 0, size));
extra = new ggml_tensor_extra_gpu;
memset(extra, 0, sizeof(*extra));
extra->data_device[g_main_device] = data;
}
tensor->extra = extra;
}
void ggml_cuda_assign_scratch_offset(struct ggml_tensor * tensor, size_t offset) {
if (g_scratch_size == 0) {
return;
}
if (g_scratch_buffer == nullptr) {
CUDA_CHECK(cudaMalloc(&g_scratch_buffer, g_scratch_size));
}
struct ggml_tensor_extra_gpu * extra = ggml_cuda_alloc_temp_tensor_extra();
const bool inplace = (tensor->src[0] != nullptr && tensor->src[0]->data == tensor->data) ||
tensor->op == GGML_OP_VIEW;
if (inplace && (tensor->src[0]->backend == GGML_BACKEND_GPU || tensor->src[0]->backend == GGML_BACKEND_GPU_SPLIT)) {
struct ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu * ) tensor->src[0]->extra;
char * src0_ddc = (char *) src0_extra->data_device[g_main_device];
size_t view_offset = 0;
if (tensor->op == GGML_OP_VIEW) {
memcpy(&view_offset, tensor->op_params, sizeof(size_t));
}
extra->data_device[g_main_device] = src0_ddc + view_offset;
} else {
extra->data_device[g_main_device] = (char *) g_scratch_buffer + offset;
}
tensor->extra = extra;
}
void ggml_cuda_assign_buffers(struct ggml_tensor * tensor) {
ggml_cuda_assign_buffers_impl(tensor, true, false, false);
}
void ggml_cuda_assign_buffers_no_alloc(struct ggml_tensor * tensor) {
ggml_cuda_assign_buffers_impl(tensor, true, false, true);
}
void ggml_cuda_assign_buffers_no_scratch(struct ggml_tensor * tensor) {
ggml_cuda_assign_buffers_impl(tensor, false, false, false);
}
void ggml_cuda_assign_buffers_force_inplace(struct ggml_tensor * tensor) {
ggml_cuda_assign_buffers_impl(tensor, false, true, false);
}
void ggml_cuda_set_main_device(int main_device) {
if (main_device >= g_device_count) {
fprintf(stderr, "warning: cannot set main_device=%d because there are only %d devices. Using device %d instead.\n",
main_device, g_device_count, g_main_device);
return;
}
g_main_device = main_device;
if (g_device_count > 1) {
cudaDeviceProp prop;
CUDA_CHECK(cudaGetDeviceProperties(&prop, g_main_device));
fprintf(stderr, "%s: using device %d (%s) as main device\n", __func__, g_main_device, prop.name);
}
}
void ggml_cuda_set_mul_mat_q(bool mul_mat_q) {
g_mul_mat_q = mul_mat_q;
}
void ggml_cuda_set_scratch_size(size_t scratch_size) {
g_scratch_size = scratch_size;
}
void ggml_cuda_free_scratch() {
if (g_scratch_buffer == nullptr) {
return;
}
CUDA_CHECK(cudaFree(g_scratch_buffer));
g_scratch_buffer = nullptr;
}
bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor){
ggml_cuda_func_t func;
const bool any_on_device = tensor->backend == GGML_BACKEND_GPU
|| (tensor->src[0] != nullptr && (tensor->src[0]->backend == GGML_BACKEND_GPU || tensor->src[0]->backend == GGML_BACKEND_GPU_SPLIT))
|| (tensor->src[1] != nullptr && tensor->src[1]->backend == GGML_BACKEND_GPU);
switch (tensor->op) {
case GGML_OP_DUP:
if (!any_on_device) {
return false;
}
func = ggml_cuda_dup;
break;
case GGML_OP_ADD:
if (!any_on_device) {
return false;
}
func = ggml_cuda_add;
break;
case GGML_OP_MUL:
if (!any_on_device) {
return false;
}
func = ggml_cuda_mul;
break;
case GGML_OP_UNARY:
switch (ggml_get_unary_op(tensor)) {
case GGML_UNARY_OP_GELU:
if (!any_on_device) {
return false;
}
func = ggml_cuda_gelu;
break;
case GGML_UNARY_OP_SILU:
if (!any_on_device) {
return false;
}
func = ggml_cuda_silu;
break;
default:
return false;
} break;
case GGML_OP_NORM:
if (!any_on_device) {
return false;
}
func = ggml_cuda_norm;
break;
case GGML_OP_RMS_NORM:
if (!any_on_device) {
return false;
}
func = ggml_cuda_rms_norm;
break;
case GGML_OP_MUL_MAT:
if (!any_on_device && !ggml_cuda_can_mul_mat(tensor->src[0], tensor->src[1], tensor)) {
return false;
}
func = ggml_cuda_mul_mat;
break;
case GGML_OP_SCALE:
if (!any_on_device) {
return false;
}
func = ggml_cuda_scale;
break;
case GGML_OP_CPY:
if (!any_on_device) {
return false;
}
func = ggml_cuda_cpy;
break;
case GGML_OP_CONT:
if (!any_on_device) {
return false;
}
func = ggml_cuda_dup;
break;
case GGML_OP_RESHAPE:
case GGML_OP_VIEW:
case GGML_OP_PERMUTE:
case GGML_OP_TRANSPOSE:
if (!any_on_device) {
return false;
}
func = ggml_cuda_nop;
break;
case GGML_OP_DIAG_MASK_INF:
if (!any_on_device) {
return false;
}
func = ggml_cuda_diag_mask_inf;
break;
case GGML_OP_SOFT_MAX:
if (!any_on_device) {
return false;
}
func = ggml_cuda_soft_max;
break;
case GGML_OP_ROPE:
if (!any_on_device) {
return false;
}
func = ggml_cuda_rope;
break;
case GGML_OP_ALIBI:
if (!any_on_device) {
return false;
}
func = ggml_cuda_alibi;
break;
default:
return false;
}
if (params->ith != 0) {
return true;
}
if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
return true;
}
func(tensor->src[0], tensor->src[1], tensor);
return true;
}
int ggml_cuda_get_device_count() {
int device_count;
CUDA_CHECK(cudaGetDeviceCount(&device_count));
return device_count;
}
void ggml_cuda_get_device_description(int device, char * description, size_t description_size) {
cudaDeviceProp prop;
CUDA_CHECK(cudaGetDeviceProperties(&prop, device));
snprintf(description, description_size, "%s", prop.name);
}
|
859868ea40507d4a0da8ffae10b3afe9b186f2fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "nbody.hpp"
#include "hip/hip_runtime.h"
#include "helper_cuda.h"
#include "device_launch_parameters.h"
#include "error_check.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
__global__ void collision(Body *pnt, double deltaTime)
{
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
while (thread_id < N)
{
Body* a = thread_id + pnt;
for(int i = thread_id + 1; i < N; i++)
{
Body* b = pnt + i;
float a_x = a->x;
float a_y = a->y;
float b_x = b->x;
float b_y = b->y;
float dist_x = b_x - a_x;
float dist_y = b_y - a_y;
float dSq = dist_x * dist_x + dist_y * dist_y;
if (dSq < 2 * RADIUS * RADIUS) {
float a_dx = a->vx;
float a_dy = a->vy;
float b_dx = b->vx;
float b_dy = b->vy;
float a_m = a->mass;
float b_m = b->mass;
float length = sqrt(dSq);
float n_x = dist_x / length;
float n_y = dist_y / length;
float a_d_dot_n = a_dx * n_x + a_dy * n_y;
float b_d_dot_n = b_dx * n_x + b_dy * n_y;
float totalMomentum = 2 * (a_d_dot_n - b_d_dot_n) / (a_m + b_m);
float pn_x = n_x * totalMomentum;
float pn_y = n_y * totalMomentum;
float new_a_d_x = - pn_x * b_m;
float new_a_d_y = - pn_y * b_m;
float new_b_d_x = pn_x * a_m;
float new_b_d_y = pn_y * a_m;
a->vx += new_a_d_x * deltaTime;
a->vy += new_a_d_y * deltaTime;
b->vx += new_b_d_x * deltaTime;
b->vy += new_b_d_y * deltaTime;
}
}
thread_id += blockDim.x;
}
}
__global__ void gravity(Body *pnt, double deltaTime)
{
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
while (thread_id < N)
{
Body* a = thread_id + pnt;
float dx = 0.0;
float dy = 0.0;
for (int i = 0; i < N; i++)
{
if (i == thread_id) continue;
Body* otherBody = pnt + i;
float dir_x = otherBody->x - a->x;
float dir_y = otherBody->y - a->y;
float lengthSq = dir_x * dir_x + dir_y * dir_y;
if (lengthSq < 2 * RADIUS * RADIUS) continue;
float length = (float)sqrt(lengthSq);
float nx = dir_x / length;
float ny = dir_y / length;
float force = G * a->mass * otherBody->mass / lengthSq;
float ddx = nx * force / a->mass;
float ddy = ny * force / a->mass;
dx += ddx * deltaTime;
dy += ddy * deltaTime;
}
a->vx += dx;
a->vy += dy;
thread_id += blockDim.x;
}
}
__global__ void updatePosition(Body *a, double deltaTime)
{
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
while (thread_id < N)
{
Body* b = a + thread_id;
b->x += b->vx * deltaTime;
b->y += b->vy * deltaTime;
b->vx *= DAMPING;
b->vy *= DAMPING;
thread_id += blockDim.x;
}
}
void NBody::simulate(double deltaTime)
{
Body* devPtr;
size_t size;
HANDLE_CUDA_ERROR(hipGraphicsMapResources(1, &m_cudaVBOResource, NULL));
HANDLE_CUDA_ERROR(hipGraphicsResourceGetMappedPointer((void**)&devPtr, &size, m_cudaVBOResource));
hipLaunchKernelGGL(( collision), dim3(BLOCK_SIZE), dim3(THREAD_SIZE) , 0, 0, devPtr, deltaTime);
HANDLE_CUDA_ERROR(hipDeviceSynchronize());
hipLaunchKernelGGL(( gravity), dim3(BLOCK_SIZE), dim3(THREAD_SIZE), 0, 0, devPtr, deltaTime);
HANDLE_CUDA_ERROR(hipDeviceSynchronize());
hipLaunchKernelGGL(( updatePosition), dim3(BLOCK_SIZE), dim3(THREAD_SIZE) , 0, 0, devPtr, deltaTime);
HANDLE_CUDA_ERROR(hipDeviceSynchronize());
HANDLE_CUDA_ERROR(hipGraphicsUnmapResources(1, &m_cudaVBOResource, NULL));
}
| 859868ea40507d4a0da8ffae10b3afe9b186f2fe.cu | #include "nbody.hpp"
#include "cuda_runtime.h"
#include "helper_cuda.h"
#include "device_launch_parameters.h"
#include "error_check.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
__global__ void collision(Body *pnt, double deltaTime)
{
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
while (thread_id < N)
{
Body* a = thread_id + pnt;
for(int i = thread_id + 1; i < N; i++)
{
Body* b = pnt + i;
float a_x = a->x;
float a_y = a->y;
float b_x = b->x;
float b_y = b->y;
float dist_x = b_x - a_x;
float dist_y = b_y - a_y;
float dSq = dist_x * dist_x + dist_y * dist_y;
if (dSq < 2 * RADIUS * RADIUS) {
float a_dx = a->vx;
float a_dy = a->vy;
float b_dx = b->vx;
float b_dy = b->vy;
float a_m = a->mass;
float b_m = b->mass;
float length = sqrt(dSq);
float n_x = dist_x / length;
float n_y = dist_y / length;
float a_d_dot_n = a_dx * n_x + a_dy * n_y;
float b_d_dot_n = b_dx * n_x + b_dy * n_y;
float totalMomentum = 2 * (a_d_dot_n - b_d_dot_n) / (a_m + b_m);
float pn_x = n_x * totalMomentum;
float pn_y = n_y * totalMomentum;
float new_a_d_x = - pn_x * b_m;
float new_a_d_y = - pn_y * b_m;
float new_b_d_x = pn_x * a_m;
float new_b_d_y = pn_y * a_m;
a->vx += new_a_d_x * deltaTime;
a->vy += new_a_d_y * deltaTime;
b->vx += new_b_d_x * deltaTime;
b->vy += new_b_d_y * deltaTime;
}
}
thread_id += blockDim.x;
}
}
__global__ void gravity(Body *pnt, double deltaTime)
{
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
while (thread_id < N)
{
Body* a = thread_id + pnt;
float dx = 0.0;
float dy = 0.0;
for (int i = 0; i < N; i++)
{
if (i == thread_id) continue;
Body* otherBody = pnt + i;
float dir_x = otherBody->x - a->x;
float dir_y = otherBody->y - a->y;
float lengthSq = dir_x * dir_x + dir_y * dir_y;
if (lengthSq < 2 * RADIUS * RADIUS) continue;
float length = (float)sqrt(lengthSq);
float nx = dir_x / length;
float ny = dir_y / length;
float force = G * a->mass * otherBody->mass / lengthSq;
float ddx = nx * force / a->mass;
float ddy = ny * force / a->mass;
dx += ddx * deltaTime;
dy += ddy * deltaTime;
}
a->vx += dx;
a->vy += dy;
thread_id += blockDim.x;
}
}
__global__ void updatePosition(Body *a, double deltaTime)
{
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
while (thread_id < N)
{
Body* b = a + thread_id;
b->x += b->vx * deltaTime;
b->y += b->vy * deltaTime;
b->vx *= DAMPING;
b->vy *= DAMPING;
thread_id += blockDim.x;
}
}
void NBody::simulate(double deltaTime)
{
Body* devPtr;
size_t size;
HANDLE_CUDA_ERROR(cudaGraphicsMapResources(1, &m_cudaVBOResource, NULL));
HANDLE_CUDA_ERROR(cudaGraphicsResourceGetMappedPointer((void**)&devPtr, &size, m_cudaVBOResource));
collision<<<BLOCK_SIZE, THREAD_SIZE >>>(devPtr, deltaTime);
HANDLE_CUDA_ERROR(cudaDeviceSynchronize());
gravity<<<BLOCK_SIZE, THREAD_SIZE>>>(devPtr, deltaTime);
HANDLE_CUDA_ERROR(cudaDeviceSynchronize());
updatePosition<<<BLOCK_SIZE, THREAD_SIZE >>>(devPtr, deltaTime);
HANDLE_CUDA_ERROR(cudaDeviceSynchronize());
HANDLE_CUDA_ERROR(cudaGraphicsUnmapResources(1, &m_cudaVBOResource, NULL));
}
|
87b5084e64531c58ab79534094feed02a590f23b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "sum.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float4 *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
float4 *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
sum), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,N);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
sum), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
sum), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 87b5084e64531c58ab79534094feed02a590f23b.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "sum.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float4 *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
float4 *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
sum<<<gridBlock,threadBlock>>>(a,b,N);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
sum<<<gridBlock,threadBlock>>>(a,b,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
sum<<<gridBlock,threadBlock>>>(a,b,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
64356d677826b75abc6e011734071909527d121d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
//user function
__device__ void initBathymetry_update_gpu( float *values, const float *z_zero, const float *zmin, const int *firstTime) {
if (*firstTime){
if (*z_zero > 0.0f){
values[0] = EPS_cuda;
values[3] = -1.0f* *zmin + *z_zero;
} else {
values[0] = -1.0f* *z_zero;
values[3] = -1.0f* *zmin;
}
} else {
if (*z_zero > 0.0f){
values[3] = -1.0f* *zmin + *z_zero + values[0];
} else {
values[3] = values[0] + *z_zero - *zmin;
}
}
}
// CUDA kernel function
__global__ void op_cuda_initBathymetry_update(
float *arg0,
const float *__restrict arg1,
const float *arg2,
const int *arg3,
int set_size ) {
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
initBathymetry_update_gpu(arg0+n*4,
arg1+n*1,
arg2,
arg3);
}
}
//host stub function
void op_par_loop_initBathymetry_update(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3){
float*arg2h = (float *)arg2.data;
int*arg3h = (int *)arg3.data;
int nargs = 4;
op_arg args[4];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(13);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[13].name = name;
OP_kernels[13].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: initBathymetry_update");
}
int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2);
if (set_size > 0) {
//transfer constants to GPU
int consts_bytes = 0;
consts_bytes += ROUND_UP(1*sizeof(float));
consts_bytes += ROUND_UP(1*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OP_consts_h + consts_bytes;
arg2.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg2.data)[d] = arg2h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
arg3.data = OP_consts_h + consts_bytes;
arg3.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((int *)arg3.data)[d] = arg3h[d];
}
consts_bytes += ROUND_UP(1*sizeof(int));
mvConstArraysToDevice(consts_bytes);
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_13
int nthread = OP_BLOCK_SIZE_13;
#else
int nthread = OP_block_size;
#endif
int nblocks = 200;
hipLaunchKernelGGL(( op_cuda_initBathymetry_update), dim3(nblocks),dim3(nthread), 0, 0,
(float *) arg0.data_d,
(float *) arg1.data_d,
(float *) arg2.data_d,
(int *) arg3.data_d,
set->size );
}
op_mpi_set_dirtybit_cuda(nargs, args);
if (OP_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
}
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[13].time += wall_t2 - wall_t1;
OP_kernels[13].transfer += (float)set->size * arg0.size * 2.0f;
OP_kernels[13].transfer += (float)set->size * arg1.size;
}
| 64356d677826b75abc6e011734071909527d121d.cu | //
// auto-generated by op2.py
//
//user function
__device__ void initBathymetry_update_gpu( float *values, const float *z_zero, const float *zmin, const int *firstTime) {
if (*firstTime){
if (*z_zero > 0.0f){
values[0] = EPS_cuda;
values[3] = -1.0f* *zmin + *z_zero;
} else {
values[0] = -1.0f* *z_zero;
values[3] = -1.0f* *zmin;
}
} else {
if (*z_zero > 0.0f){
values[3] = -1.0f* *zmin + *z_zero + values[0];
} else {
values[3] = values[0] + *z_zero - *zmin;
}
}
}
// CUDA kernel function
__global__ void op_cuda_initBathymetry_update(
float *arg0,
const float *__restrict arg1,
const float *arg2,
const int *arg3,
int set_size ) {
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
initBathymetry_update_gpu(arg0+n*4,
arg1+n*1,
arg2,
arg3);
}
}
//host stub function
void op_par_loop_initBathymetry_update(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3){
float*arg2h = (float *)arg2.data;
int*arg3h = (int *)arg3.data;
int nargs = 4;
op_arg args[4];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(13);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[13].name = name;
OP_kernels[13].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: initBathymetry_update");
}
int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2);
if (set_size > 0) {
//transfer constants to GPU
int consts_bytes = 0;
consts_bytes += ROUND_UP(1*sizeof(float));
consts_bytes += ROUND_UP(1*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OP_consts_h + consts_bytes;
arg2.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg2.data)[d] = arg2h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
arg3.data = OP_consts_h + consts_bytes;
arg3.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((int *)arg3.data)[d] = arg3h[d];
}
consts_bytes += ROUND_UP(1*sizeof(int));
mvConstArraysToDevice(consts_bytes);
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_13
int nthread = OP_BLOCK_SIZE_13;
#else
int nthread = OP_block_size;
#endif
int nblocks = 200;
op_cuda_initBathymetry_update<<<nblocks,nthread>>>(
(float *) arg0.data_d,
(float *) arg1.data_d,
(float *) arg2.data_d,
(int *) arg3.data_d,
set->size );
}
op_mpi_set_dirtybit_cuda(nargs, args);
if (OP_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
}
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[13].time += wall_t2 - wall_t1;
OP_kernels[13].transfer += (float)set->size * arg0.size * 2.0f;
OP_kernels[13].transfer += (float)set->size * arg1.size;
}
|
d70d3002c0c64af0a9488e3ce48f4e7b358262c5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//=======================================================================
__device__ void states( int &n, int nmax, sdp hdt, int &axis,
sdp &rad, sdp &azi, sdp &pol, sdp &v_rot, body planet,
sdp *al, sdp *da, sdp *a6, sdp *cs, sdp *csfac,
StateVar &S, sdp &flat,
sdp *r, sdp *p, sdp *u, sdp *v, sdp *w, sdp *xa0, sdp *dx0)
{
// Compute parabolic coefficients and volume elements
ParaConst par;
paraset( n, 0, nmax, par, dx0,
al, da, a6 ); //temporary variables
//-----------------------------------------------------------------------
// This subroutine takes the values of rho, u, and P at the left hand
// side of the zone, the change accross the zone, and the parabolic
// coefficients, p6, u6, and rho6, and computes the left and right states
// (integrated over the charachteristics) for each variable for input
// to the Riemann solver.
//-----------------------------------------------------------------------
sdp tmp, ftot;
if (n>=2 && n<nmax-1)
{
if (axis==0)
get_fx_bound(n, hdt, xa0[n], azi+hdt*v_rot, pol, tmp, ftot, planet, r, p, u, v, w);
else if (axis==1)
get_fy (n, hdt, rad, xa0[n]+hdt*v_rot, pol, tmp, ftot, planet, r, p, u, v, w);
else
get_fz_bound(n, hdt, rad, azi+hdt*v_rot, xa0[n], tmp, ftot, planet, r, p, u, v, w);
}
__syncthreads();
cs[n] = hdt*csqrt(gam*p[n]/r[n])/dx0[n];
if (axis==1) cs[n] /= rad;
if (axis==2)
{
if (ngeomz == 5) cs[n] /= rad;
}
csfac[n] = 1.0 - fourthd*cs[n];
parabola(n, 0, nmax, flat, p, al, da, a6, par);
if (n>=3 && n<nmax-2)
{
tmp = al[n-1] + da[n-1] - cs[n-1]*(da[n-1] - csfac[n-1]*a6[n-1]);
S.pl = cmax(smallp, tmp);
tmp = al[n] + cs[n] *(da[n] + csfac[n] *a6[n]);
S.pr = cmax(smallp, tmp);
}
__syncthreads();
parabola(n, 0, nmax, flat, r, al, da, a6, par);
if (n>=3 && n<nmax-2)
{
tmp = al[n-1] + da[n-1] - cs[n-1]*(da[n-1] - csfac[n-1]*a6[n-1]);
S.rl = cmax(smallr, tmp);
tmp = al[n] + cs[n] *(da[n] + csfac[n] *a6[n]);
S.rr = cmax(smallr, tmp);
}
__syncthreads();
parabola(n, 0, nmax, flat, u, al, da, a6, par);
if (n>=3 && n<nmax-2)
{
tmp = al[n-1] + da[n-1] - cs[n-1]*(da[n-1] - csfac[n-1]*a6[n-1]);
S.ul = tmp + hdt*ftot;
tmp = al[n] + cs[n] *(da[n] + csfac[n] *a6[n]);
S.ur = tmp + hdt*ftot;
}
__syncthreads();
return;
}
| d70d3002c0c64af0a9488e3ce48f4e7b358262c5.cu | //=======================================================================
__device__ void states( int &n, int nmax, sdp hdt, int &axis,
sdp &rad, sdp &azi, sdp &pol, sdp &v_rot, body planet,
sdp *al, sdp *da, sdp *a6, sdp *cs, sdp *csfac,
StateVar &S, sdp &flat,
sdp *r, sdp *p, sdp *u, sdp *v, sdp *w, sdp *xa0, sdp *dx0)
{
// Compute parabolic coefficients and volume elements
ParaConst par;
paraset( n, 0, nmax, par, dx0,
al, da, a6 ); //temporary variables
//-----------------------------------------------------------------------
// This subroutine takes the values of rho, u, and P at the left hand
// side of the zone, the change accross the zone, and the parabolic
// coefficients, p6, u6, and rho6, and computes the left and right states
// (integrated over the charachteristics) for each variable for input
// to the Riemann solver.
//-----------------------------------------------------------------------
sdp tmp, ftot;
if (n>=2 && n<nmax-1)
{
if (axis==0)
get_fx_bound(n, hdt, xa0[n], azi+hdt*v_rot, pol, tmp, ftot, planet, r, p, u, v, w);
else if (axis==1)
get_fy (n, hdt, rad, xa0[n]+hdt*v_rot, pol, tmp, ftot, planet, r, p, u, v, w);
else
get_fz_bound(n, hdt, rad, azi+hdt*v_rot, xa0[n], tmp, ftot, planet, r, p, u, v, w);
}
__syncthreads();
cs[n] = hdt*csqrt(gam*p[n]/r[n])/dx0[n];
if (axis==1) cs[n] /= rad;
if (axis==2)
{
if (ngeomz == 5) cs[n] /= rad;
}
csfac[n] = 1.0 - fourthd*cs[n];
parabola(n, 0, nmax, flat, p, al, da, a6, par);
if (n>=3 && n<nmax-2)
{
tmp = al[n-1] + da[n-1] - cs[n-1]*(da[n-1] - csfac[n-1]*a6[n-1]);
S.pl = cmax(smallp, tmp);
tmp = al[n] + cs[n] *(da[n] + csfac[n] *a6[n]);
S.pr = cmax(smallp, tmp);
}
__syncthreads();
parabola(n, 0, nmax, flat, r, al, da, a6, par);
if (n>=3 && n<nmax-2)
{
tmp = al[n-1] + da[n-1] - cs[n-1]*(da[n-1] - csfac[n-1]*a6[n-1]);
S.rl = cmax(smallr, tmp);
tmp = al[n] + cs[n] *(da[n] + csfac[n] *a6[n]);
S.rr = cmax(smallr, tmp);
}
__syncthreads();
parabola(n, 0, nmax, flat, u, al, da, a6, par);
if (n>=3 && n<nmax-2)
{
tmp = al[n-1] + da[n-1] - cs[n-1]*(da[n-1] - csfac[n-1]*a6[n-1]);
S.ul = tmp + hdt*ftot;
tmp = al[n] + cs[n] *(da[n] + csfac[n] *a6[n]);
S.ur = tmp + hdt*ftot;
}
__syncthreads();
return;
}
|
63ea0dde2d9b9dd69e66cb5e2f290991d1a4609f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef __CUDALOCKSBARRIERATOMIC_CU__
#define __CUDALOCKSBARRIERATOMIC_CU__
#include "cudaLocks.h"
inline __device__ void cudaBarrierAtomicSub(unsigned int * globalBarr,
int * done,
// numBarr represents the number of
// TBs going to the barrier
const unsigned int numBarr,
int backoff,
const bool isMasterThread)
{
__syncthreads();
if (isMasterThread)
{
*done = 0;
// atomicInc acts as a store release, need TF to enforce ordering
__threadfence();
// atomicInc effectively adds 1 to atomic for each TB that's part of the
// global barrier.
atomicInc(globalBarr, 0x7FFFFFFF);
}
__syncthreads();
while (!*done)
{
if (isMasterThread)
{
/*
For the tree barrier we expect only 1 TB from each SM to enter the
global barrier. Since we are assuming an equal amount of work for all
SMs, we can use the # of TBs reaching the barrier for the compare value
here. Once the atomic's value == numBarr, then reset the value to 0 and
proceed because all of the TBs have reached the global barrier.
*/
if (atomicCAS(globalBarr, numBarr, 0) == 0) {
// atomicCAS acts as a load acquire, need TF to enforce ordering
__threadfence();
*done = 1;
}
else { // increase backoff to avoid repeatedly hammering global barrier
// (capped) exponential backoff
backoff = (((backoff << 1) + 1) & (MAX_BACKOFF-1));
}
}
__syncthreads();
// do exponential backoff to reduce the number of times we pound the global
// barrier
if (!*done) {
for (int i = 0; i < backoff; ++i) { ; }
__syncthreads();
}
}
}
inline __device__ void cudaBarrierAtomic(unsigned int * barrierBuffers,
// numBarr represents the number of
// TBs going to the barrier
const unsigned int numBarr,
const bool isMasterThread)
{
unsigned int * atomic1 = barrierBuffers;
// unsigned int * atomic2 = atomic1 + 1;
__shared__ int done1;
__shared__ int backoff;
if (isMasterThread) {
backoff = 1;
}
__syncthreads();
cudaBarrierAtomicSub(atomic1, &done1, numBarr, backoff, isMasterThread);
// second barrier is necessary to provide a facesimile for a sense-reversing
// barrier
//cudaBarrierAtomicSub(atomic2, &done2, numBarr, backoff, isMasterThread);
}
// does local barrier amongst all of the TBs on an SM
inline __device__ void cudaBarrierAtomicSubLocal(unsigned int * perSMBarr,
unsigned int * last_block,
int * done,
unsigned int * global_done,
const unsigned int numTBs_thisSM,
const bool isMasterThread)
{
__syncthreads();
if (isMasterThread)
{
*done = 0;
// atomicInc acts as a store release, need TF to enforce ordering locally
__threadfence_block();
/*
atomicInc effectively adds 1 to atomic for each TB that's part of the
barrier. For the local barrier, this requires using the per-CU
locations.
*/
atomicInc(perSMBarr, 0x7FFFFFFF);
}
__syncthreads();
*last_block = 1025;
while (!*done && (*global_done | *last_block == blockIdx.x))
{
if (isMasterThread)
{
/*
Once all of the TBs on this SM have incremented the value at atomic,
then the value (for the local barrier) should be equal to the # of TBs
on this SM. Once that is true, then we want to reset the atomic to 0
and proceed because all of the TBs on this SM have reached the local
barrier.
*/
if (atomicCAS(perSMBarr, numTBs_thisSM, 0) == 0) {
// atomicCAS acts as a load acquire, need TF to enforce ordering
// locally
__threadfence_block();
*last_block = blockIdx.x;
printf("Block which came last is %d\n", last_block);
*done = 1;
}
}
__syncthreads();
}
}
// does local barrier amongst all of the TBs on an SM
inline __device__ unsigned int* cudaBarrierAtomicLocal(unsigned int * perSMBarrierBuffers,
unsigned int * global_done,
const unsigned int smID,
const unsigned int numTBs_thisSM,
const bool isMasterThread,
const int MAX_BLOCKS)
{
// each SM has MAX_BLOCKS locations in barrierBuffers, so my SM's locations
// start at barrierBuffers[smID*MAX_BLOCKS]
unsigned int * atomic1 = perSMBarrierBuffers + (smID * MAX_BLOCKS);
unsigned int * atomic2 = atomic1 + 1;
__shared__ int done1 ;
cudaBarrierAtomicSubLocal(atomic1, atomic2, &done1, global_done, numTBs_thisSM, isMasterThread);
// second barrier is necessary to approproximate a sense-reversing barrier
//cudaBarrierAtomicSubLocal(atomic1, &done1, numTBs_thisSM, isMasterThread);
return atomic2;
}
/*
Helper function for joining the barrier with the atomic tree barrier.
*/
__device__ void joinBarrier_helper(unsigned int * barrierBuffers,
unsigned int * perSMBarrierBuffers,
const unsigned int numBlocksAtBarr,
const int smID,
const int perSM_blockID,
const int numTBs_perSM,
const bool isMasterThread,
const int MAX_BLOCKS) {
if (numTBs_perSM > 1) {
unsigned int * global_done = barrierBuffers;
unsigned int * last_block = cudaBarrierAtomicLocal(perSMBarrierBuffers, global_done, smID, numTBs_perSM,
isMasterThread, MAX_BLOCKS);
// only 1 TB per SM needs to do the global barrier since we synchronized
// the TBs locally first
if (blockIdx.x == *last_block) {
//printf("Trying to get into global barrier with block ID %d\n", blockIdx.x);
cudaBarrierAtomic(barrierBuffers, numBlocksAtBarr, isMasterThread);
}
// all TBs on this SM do a local barrier to ensure global barrier is
// reached
// cudaBarrierAtomicLocal(perSMBarrierBuffers, smID, numTBs_perS, isMasterThread, MAX_BLOCKS);
} else { // if only 1 TB on the SM, no need for the local barriers
cudaBarrierAtomic(barrierBuffers, numBlocksAtBarr, isMasterThread);
}
}
#endif
| 63ea0dde2d9b9dd69e66cb5e2f290991d1a4609f.cu | #ifndef __CUDALOCKSBARRIERATOMIC_CU__
#define __CUDALOCKSBARRIERATOMIC_CU__
#include "cudaLocks.h"
inline __device__ void cudaBarrierAtomicSub(unsigned int * globalBarr,
int * done,
// numBarr represents the number of
// TBs going to the barrier
const unsigned int numBarr,
int backoff,
const bool isMasterThread)
{
__syncthreads();
if (isMasterThread)
{
*done = 0;
// atomicInc acts as a store release, need TF to enforce ordering
__threadfence();
// atomicInc effectively adds 1 to atomic for each TB that's part of the
// global barrier.
atomicInc(globalBarr, 0x7FFFFFFF);
}
__syncthreads();
while (!*done)
{
if (isMasterThread)
{
/*
For the tree barrier we expect only 1 TB from each SM to enter the
global barrier. Since we are assuming an equal amount of work for all
SMs, we can use the # of TBs reaching the barrier for the compare value
here. Once the atomic's value == numBarr, then reset the value to 0 and
proceed because all of the TBs have reached the global barrier.
*/
if (atomicCAS(globalBarr, numBarr, 0) == 0) {
// atomicCAS acts as a load acquire, need TF to enforce ordering
__threadfence();
*done = 1;
}
else { // increase backoff to avoid repeatedly hammering global barrier
// (capped) exponential backoff
backoff = (((backoff << 1) + 1) & (MAX_BACKOFF-1));
}
}
__syncthreads();
// do exponential backoff to reduce the number of times we pound the global
// barrier
if (!*done) {
for (int i = 0; i < backoff; ++i) { ; }
__syncthreads();
}
}
}
inline __device__ void cudaBarrierAtomic(unsigned int * barrierBuffers,
// numBarr represents the number of
// TBs going to the barrier
const unsigned int numBarr,
const bool isMasterThread)
{
unsigned int * atomic1 = barrierBuffers;
// unsigned int * atomic2 = atomic1 + 1;
__shared__ int done1;
__shared__ int backoff;
if (isMasterThread) {
backoff = 1;
}
__syncthreads();
cudaBarrierAtomicSub(atomic1, &done1, numBarr, backoff, isMasterThread);
// second barrier is necessary to provide a facesimile for a sense-reversing
// barrier
//cudaBarrierAtomicSub(atomic2, &done2, numBarr, backoff, isMasterThread);
}
// does local barrier amongst all of the TBs on an SM
inline __device__ void cudaBarrierAtomicSubLocal(unsigned int * perSMBarr,
unsigned int * last_block,
int * done,
unsigned int * global_done,
const unsigned int numTBs_thisSM,
const bool isMasterThread)
{
__syncthreads();
if (isMasterThread)
{
*done = 0;
// atomicInc acts as a store release, need TF to enforce ordering locally
__threadfence_block();
/*
atomicInc effectively adds 1 to atomic for each TB that's part of the
barrier. For the local barrier, this requires using the per-CU
locations.
*/
atomicInc(perSMBarr, 0x7FFFFFFF);
}
__syncthreads();
*last_block = 1025;
while (!*done && (*global_done | *last_block == blockIdx.x))
{
if (isMasterThread)
{
/*
Once all of the TBs on this SM have incremented the value at atomic,
then the value (for the local barrier) should be equal to the # of TBs
on this SM. Once that is true, then we want to reset the atomic to 0
and proceed because all of the TBs on this SM have reached the local
barrier.
*/
if (atomicCAS(perSMBarr, numTBs_thisSM, 0) == 0) {
// atomicCAS acts as a load acquire, need TF to enforce ordering
// locally
__threadfence_block();
*last_block = blockIdx.x;
printf("Block which came last is %d\n", last_block);
*done = 1;
}
}
__syncthreads();
}
}
// does local barrier amongst all of the TBs on an SM
inline __device__ unsigned int* cudaBarrierAtomicLocal(unsigned int * perSMBarrierBuffers,
unsigned int * global_done,
const unsigned int smID,
const unsigned int numTBs_thisSM,
const bool isMasterThread,
const int MAX_BLOCKS)
{
// each SM has MAX_BLOCKS locations in barrierBuffers, so my SM's locations
// start at barrierBuffers[smID*MAX_BLOCKS]
unsigned int * atomic1 = perSMBarrierBuffers + (smID * MAX_BLOCKS);
unsigned int * atomic2 = atomic1 + 1;
__shared__ int done1 ;
cudaBarrierAtomicSubLocal(atomic1, atomic2, &done1, global_done, numTBs_thisSM, isMasterThread);
// second barrier is necessary to approproximate a sense-reversing barrier
//cudaBarrierAtomicSubLocal(atomic1, &done1, numTBs_thisSM, isMasterThread);
return atomic2;
}
/*
Helper function for joining the barrier with the atomic tree barrier.
*/
__device__ void joinBarrier_helper(unsigned int * barrierBuffers,
unsigned int * perSMBarrierBuffers,
const unsigned int numBlocksAtBarr,
const int smID,
const int perSM_blockID,
const int numTBs_perSM,
const bool isMasterThread,
const int MAX_BLOCKS) {
if (numTBs_perSM > 1) {
unsigned int * global_done = barrierBuffers;
unsigned int * last_block = cudaBarrierAtomicLocal(perSMBarrierBuffers, global_done, smID, numTBs_perSM,
isMasterThread, MAX_BLOCKS);
// only 1 TB per SM needs to do the global barrier since we synchronized
// the TBs locally first
if (blockIdx.x == *last_block) {
//printf("Trying to get into global barrier with block ID %d\n", blockIdx.x);
cudaBarrierAtomic(barrierBuffers, numBlocksAtBarr, isMasterThread);
}
// all TBs on this SM do a local barrier to ensure global barrier is
// reached
// cudaBarrierAtomicLocal(perSMBarrierBuffers, smID, numTBs_perS, isMasterThread, MAX_BLOCKS);
} else { // if only 1 TB on the SM, no need for the local barriers
cudaBarrierAtomic(barrierBuffers, numBlocksAtBarr, isMasterThread);
}
}
#endif
|
6266b17fbf15bb4b87dbb303ee3c879d4a9d5738.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdio.h"
#define COLUMNS 3
#define ROWS 2
__global__ void add(int *a, int *b, int *c)
{
int x = blockIdx.x;
int y = blockIdx.y;
int i = (COLUMNS*y) + x;
c[i] = a[i] + b[i];
}
int main()
{
int a[ROWS][COLUMNS], b[ROWS][COLUMNS], c[ROWS][COLUMNS];
int *dev_a, *dev_b, *dev_c;
hipMalloc((void **) &dev_a, ROWS*COLUMNS*sizeof(int));
hipMalloc((void **) &dev_b, ROWS*COLUMNS*sizeof(int));
hipMalloc((void **) &dev_c, ROWS*COLUMNS*sizeof(int));
for (int y = 0; y < ROWS; y++) // Fill Arrays
for (int x = 0; x < COLUMNS; x++)
{
a[y][x] = x;
b[y][x] = y;
}
hipMemcpy(dev_a, a, ROWS*COLUMNS*sizeof(int),
hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, ROWS*COLUMNS*sizeof(int),
hipMemcpyHostToDevice);
dim3 grid(COLUMNS,ROWS);hipLaunchKernelGGL((
add), dim3(grid),dim3(1), 0, 0, dev_a, dev_b, dev_c);
hipMemcpy(c, dev_c, ROWS*COLUMNS*sizeof(int),
hipMemcpyDeviceToHost);
for (int y = 0; y < ROWS; y++) // Output Arrays
{
for (int x = 0; x < COLUMNS; x++)
{
printf("[%d][%d]=%d ",y,x,c[y][x]);
}
printf("\n");
}
return 0;
} | 6266b17fbf15bb4b87dbb303ee3c879d4a9d5738.cu | #include "stdio.h"
#define COLUMNS 3
#define ROWS 2
__global__ void add(int *a, int *b, int *c)
{
int x = blockIdx.x;
int y = blockIdx.y;
int i = (COLUMNS*y) + x;
c[i] = a[i] + b[i];
}
int main()
{
int a[ROWS][COLUMNS], b[ROWS][COLUMNS], c[ROWS][COLUMNS];
int *dev_a, *dev_b, *dev_c;
cudaMalloc((void **) &dev_a, ROWS*COLUMNS*sizeof(int));
cudaMalloc((void **) &dev_b, ROWS*COLUMNS*sizeof(int));
cudaMalloc((void **) &dev_c, ROWS*COLUMNS*sizeof(int));
for (int y = 0; y < ROWS; y++) // Fill Arrays
for (int x = 0; x < COLUMNS; x++)
{
a[y][x] = x;
b[y][x] = y;
}
cudaMemcpy(dev_a, a, ROWS*COLUMNS*sizeof(int),
cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, ROWS*COLUMNS*sizeof(int),
cudaMemcpyHostToDevice);
dim3 grid(COLUMNS,ROWS);
add<<<grid,1>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, ROWS*COLUMNS*sizeof(int),
cudaMemcpyDeviceToHost);
for (int y = 0; y < ROWS; y++) // Output Arrays
{
for (int x = 0; x < COLUMNS; x++)
{
printf("[%d][%d]=%d ",y,x,c[y][x]);
}
printf("\n");
}
return 0;
} |
ca62e8324c5f43eb8bbfd7706fa85aa28c617750.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <iostream>
#include <algorithm>
#include <string>
#include <vector>
#include <cmath>
#include <boost/timer/timer.hpp>
#include "nmt.h"
#include "common/vocab.h"
#include "common/god.h"
#include "common/history.h"
#include "common/sentence.h"
#include "common/search.h"
namespace amunmt {
void MosesPlugin::initGod(const std::string& configPath) {
std::string configs = "-c " + configPath;
god_.Init(configs);
}
MosesPlugin::MosesPlugin()
{}
MosesPlugin::~MosesPlugin()
{
}
size_t MosesPlugin::GetDevices(size_t maxDevices) {
int num_gpus = 0; // number of CUDA GPUs
HANDLE_ERROR( hipGetDeviceCount(&num_gpus));
std::cerr << "Number of CUDA devices: " << num_gpus << std::endl;
for (int i = 0; i < num_gpus; i++) {
hipDeviceProp_t dprop;
HANDLE_ERROR( hipGetDeviceProperties(&dprop, i));
std::cerr << i << ": " << dprop.name << std::endl;
}
return (size_t)::min(num_gpus, (int)maxDevices);
}
AmunOutput MosesPlugin::SetSource(const std::vector<size_t>& words) {
AmunOutput ret;
amunmt::Sentences sentences;
sentences.push_back(SentencePtr(new Sentence(god_, 0, words)));
// Encode
Search &search = god_.GetSearch();
size_t numScorers = search.GetScorers().size();
std::shared_ptr<Histories> histories(new Histories(god_, sentences));
size_t batchSize = sentences.size();
Beam prevHyps(batchSize, HypothesisPtr(new Hypothesis()));
States states = search.NewStates();
search.PreProcess(god_, sentences, histories, prevHyps);
search.Encode(sentences, states);
// fill return info
ret.states = states;
ret.prevHyps = prevHyps;
ret.score = 0;
return ret;
}
AmunOutputs MosesPlugin::Score(const AmunInputs &inputs)
{
AmunOutputs outputs(inputs.size());
// TODO
return outputs;
}
}
| ca62e8324c5f43eb8bbfd7706fa85aa28c617750.cu | #include <cstdlib>
#include <iostream>
#include <algorithm>
#include <string>
#include <vector>
#include <cmath>
#include <boost/timer/timer.hpp>
#include "nmt.h"
#include "common/vocab.h"
#include "common/god.h"
#include "common/history.h"
#include "common/sentence.h"
#include "common/search.h"
namespace amunmt {
void MosesPlugin::initGod(const std::string& configPath) {
std::string configs = "-c " + configPath;
god_.Init(configs);
}
MosesPlugin::MosesPlugin()
{}
MosesPlugin::~MosesPlugin()
{
}
size_t MosesPlugin::GetDevices(size_t maxDevices) {
int num_gpus = 0; // number of CUDA GPUs
HANDLE_ERROR( cudaGetDeviceCount(&num_gpus));
std::cerr << "Number of CUDA devices: " << num_gpus << std::endl;
for (int i = 0; i < num_gpus; i++) {
cudaDeviceProp dprop;
HANDLE_ERROR( cudaGetDeviceProperties(&dprop, i));
std::cerr << i << ": " << dprop.name << std::endl;
}
return (size_t)std::min(num_gpus, (int)maxDevices);
}
AmunOutput MosesPlugin::SetSource(const std::vector<size_t>& words) {
AmunOutput ret;
amunmt::Sentences sentences;
sentences.push_back(SentencePtr(new Sentence(god_, 0, words)));
// Encode
Search &search = god_.GetSearch();
size_t numScorers = search.GetScorers().size();
std::shared_ptr<Histories> histories(new Histories(god_, sentences));
size_t batchSize = sentences.size();
Beam prevHyps(batchSize, HypothesisPtr(new Hypothesis()));
States states = search.NewStates();
search.PreProcess(god_, sentences, histories, prevHyps);
search.Encode(sentences, states);
// fill return info
ret.states = states;
ret.prevHyps = prevHyps;
ret.score = 0;
return ret;
}
AmunOutputs MosesPlugin::Score(const AmunInputs &inputs)
{
AmunOutputs outputs(inputs.size());
// TODO
return outputs;
}
}
|
169736b254040d06a920ac7397b77a24ec7437b1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Alexandre Maros - 2016
*
* Cuda Matrix Multiplication with Shared Memory.
*
* nvcc cuda_matrix_shared.cu -o cs.o
*
* Implemented by Alexandre Maros for learning purposes.
* A version of this code using Global Memory is in here:
* https://github.com/alepmaros/cuda_matrix_multiplication
*
* Distributed under the MIT Lincese.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
// 32x32 Threads in a block.
#define NTHREADS_X 32
#define NTHREADS_Y 32
#define THREADS_PER_BLOCK NTHREADS_X * NTHREADS_Y
/* A macro used for error checking in CUDA function calls
* Credit to: http://stackoverflow.com/a/14038590 for the gpuErrchk macro.
*/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void matrix_mul(int *a, int *b, int *c, int a_ncolumns, int c_nlines,
int c_ncolumns, int nBlocks)
{
int i, z, sum = 0;
/* How many multiplications there will be for each value in Matrix C
* This corresponds to the number of columns in Matrix A (or number of)
* lines in Matrix B
*/
int nMultiplications = a_ncolumns;
/* Each iteration of the block will multiply NTHREADS_Y values. This value
* Can be less then NTHREADS_Y if the number of a_ncolumns is not divisible
* by NTHREADS_Y. This value is used to control that.
*/
int multiplicationsInBlock = NTHREADS_Y;
int column = blockIdx.x * blockDim.x + threadIdx.x;
int line = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ int s_a[NTHREADS_Y][NTHREADS_X];
__shared__ int s_b[NTHREADS_Y][NTHREADS_X];
/* temporary line and temporary column
* Each thread is responsible for loading one value in the matrix A and
* Matrix B. These variables are used to hold which line and column of the
* original Matrices they are suppose to load. I also need to check if those
* values that they will load actually correspond to a valid position in the
* original Matrix.
*/
int a_tLine, a_tColumn, b_tLine, b_tColumn;
for (z = 0; z < nBlocks; z++)
{
// Load Matrix A
a_tLine = (blockIdx.y * NTHREADS_Y + threadIdx.y);
a_tColumn = (z * NTHREADS_X + threadIdx.x);
if (a_tLine < c_nlines && a_tColumn < a_ncolumns)
{
s_a[threadIdx.y][threadIdx.x] = a[ (a_ncolumns * a_tLine) + a_tColumn];
}
// Load Matrix B
b_tLine = (z * NTHREADS_Y + threadIdx.y);
b_tColumn = (blockIdx.x * NTHREADS_X + threadIdx.x);
if (b_tLine < a_ncolumns && b_tColumn < c_ncolumns)
{
s_b[threadIdx.y][threadIdx.x] = b[ (c_ncolumns * b_tLine) + b_tColumn ];
}
__syncthreads();
/* Checkin to see if that thread actually belongs to a valid position in
* the Matrix C
*/
if (column < c_ncolumns && line < c_nlines)
{
if (nMultiplications < NTHREADS_Y)
{
multiplicationsInBlock = nMultiplications;
}
for (i = 0; i < multiplicationsInBlock; i++)
{
sum += s_a[threadIdx.y][i] * s_b[i][threadIdx.x];
}
nMultiplications -= NTHREADS_Y;
}
__syncthreads();
}
/* Checkin to see if that thread actually belongs to a valid position in
* the Matrix C
*/
if (column < c_ncolumns && line < c_nlines)
{
c[line * c_ncolumns + column] = sum;
}
}
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int a_nlines, a_ncolumns;
int b_nlines, b_ncolumns;
int c_nlines, c_ncolumns;
size_t a_size, b_size, c_size;
int i, j;
hipEvent_t start, stop;
gpuErrchk( hipEventCreate(&start) );
gpuErrchk( hipEventCreate(&stop) );
scanf("%d", &a_nlines);
scanf("%d", &a_ncolumns);
scanf("%d", &b_nlines);
scanf("%d", &b_ncolumns);
c_nlines = a_nlines;
c_ncolumns = b_ncolumns;
#ifdef __DEBUG
printf("a_nlines: %d\na_ncolumns: %d\nb_nlines: %d\nb_ncolumns: %d\nc_nlines: %d\nc_ncolumns: %d\n", a_nlines, a_ncolumns, b_nlines, b_ncolumns, c_nlines, c_ncolumns);
#endif
if ( a_ncolumns != b_nlines )
{
printf("Number of columns in Matrix A should be equals to number of lines in Matrix B\n");
return EXIT_FAILURE;
}
a_size = a_nlines * a_ncolumns * sizeof(int);
b_size = b_nlines * b_ncolumns * sizeof(int);
c_size = c_nlines * c_ncolumns * sizeof(int);
gpuErrchk( hipMalloc((void **) &d_a, a_size) );
gpuErrchk( hipMalloc((void **) &d_b, b_size) );
gpuErrchk( hipMalloc((void **) &d_c, c_size) );
a = (int *)malloc(a_size);
b = (int *)malloc(b_size);
c = (int *)malloc(c_size);
memset(c, 0, c_nlines*c_ncolumns*sizeof(int));
for (i = 0; i < a_nlines; i++)
{
for (j = 0; j < a_ncolumns; j++)
{
scanf("%d", &a[i * a_ncolumns + j]);
}
}
for (i = 0; i < b_nlines; i++)
{
for (j = 0; j < b_ncolumns; j++)
{
scanf("%d", &b[i * b_ncolumns + j]);
}
}
gpuErrchk( hipMemcpy(d_a, a, a_size, hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(d_b, b, b_size, hipMemcpyHostToDevice) );
dim3 tbloco = dim3(
(int) ::ceil( (double) c_ncolumns / NTHREADS_X ),
(int) ::ceil( (double) c_nlines / NTHREADS_Y ),
1
);
dim3 tthreads = dim3(
NTHREADS_X,
NTHREADS_Y,
1
);
#ifdef __DEBUG
printf("tbloco.x: %d tbloco.y: %d tbloco.z: %d\n", tbloco.x, tbloco.y, tbloco.z);
printf("tthreads.x: %d tthreads.y: %d\n", tthreads.x, tthreads.y);
#endif
hipEventRecord(start);
// kernel call
hipLaunchKernelGGL(( matrix_mul), dim3(tbloco),dim3(tthreads), 0, 0, d_a, d_b, d_c, a_ncolumns, c_nlines,
c_ncolumns, (int) ::ceil( (double) a_ncolumns / NTHREADS_X));
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipEventRecord(stop) );
gpuErrchk( hipMemcpy(c, d_c, c_size, hipMemcpyDeviceToHost) );
gpuErrchk( hipEventSynchronize(stop) );
#ifndef __NO_OUTPUT
// print Matrix
for (i = 0; i < c_nlines; i++)
{
for (j = 0; j < c_ncolumns; j++)
{
printf("%d ", c[i * c_ncolumns + j]);
}
printf("\n");
}
printf("\n");
#endif
#ifdef __TIME
float milliseconds = 0;
gpuErrchk( hipEventElapsedTime(&milliseconds, start, stop) );
printf("%.5f\n", milliseconds);
#endif
free(a); free(b); free(c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
| 169736b254040d06a920ac7397b77a24ec7437b1.cu | /*
* Alexandre Maros - 2016
*
* Cuda Matrix Multiplication with Shared Memory.
*
* nvcc cuda_matrix_shared.cu -o cs.o
*
* Implemented by Alexandre Maros for learning purposes.
* A version of this code using Global Memory is in here:
* https://github.com/alepmaros/cuda_matrix_multiplication
*
* Distributed under the MIT Lincese.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
// 32x32 Threads in a block.
#define NTHREADS_X 32
#define NTHREADS_Y 32
#define THREADS_PER_BLOCK NTHREADS_X * NTHREADS_Y
/* A macro used for error checking in CUDA function calls
* Credit to: http://stackoverflow.com/a/14038590 for the gpuErrchk macro.
*/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void matrix_mul(int *a, int *b, int *c, int a_ncolumns, int c_nlines,
int c_ncolumns, int nBlocks)
{
int i, z, sum = 0;
/* How many multiplications there will be for each value in Matrix C
* This corresponds to the number of columns in Matrix A (or number of)
* lines in Matrix B
*/
int nMultiplications = a_ncolumns;
/* Each iteration of the block will multiply NTHREADS_Y values. This value
* Can be less then NTHREADS_Y if the number of a_ncolumns is not divisible
* by NTHREADS_Y. This value is used to control that.
*/
int multiplicationsInBlock = NTHREADS_Y;
int column = blockIdx.x * blockDim.x + threadIdx.x;
int line = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ int s_a[NTHREADS_Y][NTHREADS_X];
__shared__ int s_b[NTHREADS_Y][NTHREADS_X];
/* temporary line and temporary column
* Each thread is responsible for loading one value in the matrix A and
* Matrix B. These variables are used to hold which line and column of the
* original Matrices they are suppose to load. I also need to check if those
* values that they will load actually correspond to a valid position in the
* original Matrix.
*/
int a_tLine, a_tColumn, b_tLine, b_tColumn;
for (z = 0; z < nBlocks; z++)
{
// Load Matrix A
a_tLine = (blockIdx.y * NTHREADS_Y + threadIdx.y);
a_tColumn = (z * NTHREADS_X + threadIdx.x);
if (a_tLine < c_nlines && a_tColumn < a_ncolumns)
{
s_a[threadIdx.y][threadIdx.x] = a[ (a_ncolumns * a_tLine) + a_tColumn];
}
// Load Matrix B
b_tLine = (z * NTHREADS_Y + threadIdx.y);
b_tColumn = (blockIdx.x * NTHREADS_X + threadIdx.x);
if (b_tLine < a_ncolumns && b_tColumn < c_ncolumns)
{
s_b[threadIdx.y][threadIdx.x] = b[ (c_ncolumns * b_tLine) + b_tColumn ];
}
__syncthreads();
/* Checkin to see if that thread actually belongs to a valid position in
* the Matrix C
*/
if (column < c_ncolumns && line < c_nlines)
{
if (nMultiplications < NTHREADS_Y)
{
multiplicationsInBlock = nMultiplications;
}
for (i = 0; i < multiplicationsInBlock; i++)
{
sum += s_a[threadIdx.y][i] * s_b[i][threadIdx.x];
}
nMultiplications -= NTHREADS_Y;
}
__syncthreads();
}
/* Checkin to see if that thread actually belongs to a valid position in
* the Matrix C
*/
if (column < c_ncolumns && line < c_nlines)
{
c[line * c_ncolumns + column] = sum;
}
}
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int a_nlines, a_ncolumns;
int b_nlines, b_ncolumns;
int c_nlines, c_ncolumns;
size_t a_size, b_size, c_size;
int i, j;
cudaEvent_t start, stop;
gpuErrchk( cudaEventCreate(&start) );
gpuErrchk( cudaEventCreate(&stop) );
scanf("%d", &a_nlines);
scanf("%d", &a_ncolumns);
scanf("%d", &b_nlines);
scanf("%d", &b_ncolumns);
c_nlines = a_nlines;
c_ncolumns = b_ncolumns;
#ifdef __DEBUG
printf("a_nlines: %d\na_ncolumns: %d\nb_nlines: %d\nb_ncolumns: %d\nc_nlines: %d\nc_ncolumns: %d\n", a_nlines, a_ncolumns, b_nlines, b_ncolumns, c_nlines, c_ncolumns);
#endif
if ( a_ncolumns != b_nlines )
{
printf("Number of columns in Matrix A should be equals to number of lines in Matrix B\n");
return EXIT_FAILURE;
}
a_size = a_nlines * a_ncolumns * sizeof(int);
b_size = b_nlines * b_ncolumns * sizeof(int);
c_size = c_nlines * c_ncolumns * sizeof(int);
gpuErrchk( cudaMalloc((void **) &d_a, a_size) );
gpuErrchk( cudaMalloc((void **) &d_b, b_size) );
gpuErrchk( cudaMalloc((void **) &d_c, c_size) );
a = (int *)malloc(a_size);
b = (int *)malloc(b_size);
c = (int *)malloc(c_size);
memset(c, 0, c_nlines*c_ncolumns*sizeof(int));
for (i = 0; i < a_nlines; i++)
{
for (j = 0; j < a_ncolumns; j++)
{
scanf("%d", &a[i * a_ncolumns + j]);
}
}
for (i = 0; i < b_nlines; i++)
{
for (j = 0; j < b_ncolumns; j++)
{
scanf("%d", &b[i * b_ncolumns + j]);
}
}
gpuErrchk( cudaMemcpy(d_a, a, a_size, cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(d_b, b, b_size, cudaMemcpyHostToDevice) );
dim3 tbloco = dim3(
(int) std::ceil( (double) c_ncolumns / NTHREADS_X ),
(int) std::ceil( (double) c_nlines / NTHREADS_Y ),
1
);
dim3 tthreads = dim3(
NTHREADS_X,
NTHREADS_Y,
1
);
#ifdef __DEBUG
printf("tbloco.x: %d tbloco.y: %d tbloco.z: %d\n", tbloco.x, tbloco.y, tbloco.z);
printf("tthreads.x: %d tthreads.y: %d\n", tthreads.x, tthreads.y);
#endif
cudaEventRecord(start);
// kernel call
matrix_mul<<<tbloco,tthreads>>>(d_a, d_b, d_c, a_ncolumns, c_nlines,
c_ncolumns, (int) std::ceil( (double) a_ncolumns / NTHREADS_X));
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaEventRecord(stop) );
gpuErrchk( cudaMemcpy(c, d_c, c_size, cudaMemcpyDeviceToHost) );
gpuErrchk( cudaEventSynchronize(stop) );
#ifndef __NO_OUTPUT
// print Matrix
for (i = 0; i < c_nlines; i++)
{
for (j = 0; j < c_ncolumns; j++)
{
printf("%d ", c[i * c_ncolumns + j]);
}
printf("\n");
}
printf("\n");
#endif
#ifdef __TIME
float milliseconds = 0;
gpuErrchk( cudaEventElapsedTime(&milliseconds, start, stop) );
printf("%.5f\n", milliseconds);
#endif
free(a); free(b); free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
ce26423e13c365f7c015d15273757be83a8a2770.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ----------------------------------
// Copyright (c) 2011, Brown University
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// (1) Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// (3) Neither the name of Brown University nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY BROWN UNIVERSITY AS IS WITH NO
// WARRANTIES OR REPRESENTATIONS OF ANY KIND WHATSOEVER EITHER EXPRESS OR
// IMPLIED, INCLUDING WITHOUT LIMITATION ANY WARRANTY OF DESIGN OR
// MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, EACH OF WHICH ARE
// SPECIFICALLY DISCLAIMED, NOR ANY WARRANTY OR REPRESENTATIONS THAT THE
// SOFTWARE IS ERROR FREE OR THAT THE SOFTWARE WILL NOT INFRINGE ANY
// PATENT, COPYRIGHT, TRADEMARK, OR OTHER THIRD PARTY PROPRIETARY RIGHTS.
// IN NO EVENT SHALL BROWN UNIVERSITY BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
// OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY OR CAUSE OF ACTION, WHETHER IN CONTRACT,
// STRICT LIABILITY, TORT, NEGLIGENCE OR OTHERWISE, ARISING IN ANY WAY
// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
// SUCH DAMAGE. ANY RECIPIENT OR USER OF THIS SOFTWARE ACKNOWLEDGES THE
// FOREGOING, AND ACCEPTS ALL RISKS AND LIABILITIES THAT MAY ARISE FROM
// THEIR USE OF THE SOFTWARE.
// ---------------------------------
/// \file ContrastFilter_kernels.cu
/// \author Andy Loomis
#include "ContrastFilter_kernels.h"
__global__
void contrast_filter_kernel(const float* input, float* output,
int width, int height,
float alpha, float beta, int size);
namespace xromm { namespace gpu {
void contrast_filter_apply(const float* input, float* output,
int width, int height,
float alpha, float beta, int size)
{
dim3 blockDim(16, 16);
dim3 gridDim((width+blockDim.x-1)/blockDim.x,
(height+blockDim.y-1)/blockDim.y);
hipLaunchKernelGGL(( contrast_filter_kernel), dim3(gridDim), dim3(blockDim), 0, 0, input, output,
width, height,
alpha, beta, size);
}
} } // namespace xromm::cuda
__device__
float average(const float* input, int width, int height, int x, int y, int size)
{
float n = 0.0f;
float sum = 0.0f;
int minI = max(y-size/2, 0);
int maxI = min(y+(size+1)/2, height);
int minJ = max(x-size/2, 0);
int maxJ = min(x+(size+1)/2, width);
for (int i = minI; i < maxI; ++i) {
for (int j = minJ; j < maxJ; ++j) {
n += 1.0f;
sum += input[i*width+j];
}
}
return sum/n;
}
__global__
void contrast_filter_kernel(const float* input, float* output,
int width, int height,
float alpha, float beta, int size)
{
short x = blockIdx.x*blockDim.x+threadIdx.x;
short y = blockIdx.y*blockDim.y+threadIdx.y;
if (x > width-1 || y > height-1) {
return;
}
float fxy = input[y*width+x];
float axy = average(input, width, height, x, y, size);
float gxy = 0.0f;
if (axy > 0.01f) {
gxy = pow(axy,alpha-beta)*pow(fxy,beta);
}
output[y*width+x] = gxy;
}
| ce26423e13c365f7c015d15273757be83a8a2770.cu | // ----------------------------------
// Copyright (c) 2011, Brown University
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// (1) Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// (3) Neither the name of Brown University nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY BROWN UNIVERSITY “AS IS” WITH NO
// WARRANTIES OR REPRESENTATIONS OF ANY KIND WHATSOEVER EITHER EXPRESS OR
// IMPLIED, INCLUDING WITHOUT LIMITATION ANY WARRANTY OF DESIGN OR
// MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, EACH OF WHICH ARE
// SPECIFICALLY DISCLAIMED, NOR ANY WARRANTY OR REPRESENTATIONS THAT THE
// SOFTWARE IS ERROR FREE OR THAT THE SOFTWARE WILL NOT INFRINGE ANY
// PATENT, COPYRIGHT, TRADEMARK, OR OTHER THIRD PARTY PROPRIETARY RIGHTS.
// IN NO EVENT SHALL BROWN UNIVERSITY BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
// OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY OR CAUSE OF ACTION, WHETHER IN CONTRACT,
// STRICT LIABILITY, TORT, NEGLIGENCE OR OTHERWISE, ARISING IN ANY WAY
// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
// SUCH DAMAGE. ANY RECIPIENT OR USER OF THIS SOFTWARE ACKNOWLEDGES THE
// FOREGOING, AND ACCEPTS ALL RISKS AND LIABILITIES THAT MAY ARISE FROM
// THEIR USE OF THE SOFTWARE.
// ---------------------------------
/// \file ContrastFilter_kernels.cu
/// \author Andy Loomis
#include "ContrastFilter_kernels.h"
__global__
void contrast_filter_kernel(const float* input, float* output,
int width, int height,
float alpha, float beta, int size);
namespace xromm { namespace gpu {
void contrast_filter_apply(const float* input, float* output,
int width, int height,
float alpha, float beta, int size)
{
dim3 blockDim(16, 16);
dim3 gridDim((width+blockDim.x-1)/blockDim.x,
(height+blockDim.y-1)/blockDim.y);
contrast_filter_kernel<<<gridDim, blockDim>>>(input, output,
width, height,
alpha, beta, size);
}
} } // namespace xromm::cuda
__device__
float average(const float* input, int width, int height, int x, int y, int size)
{
float n = 0.0f;
float sum = 0.0f;
int minI = max(y-size/2, 0);
int maxI = min(y+(size+1)/2, height);
int minJ = max(x-size/2, 0);
int maxJ = min(x+(size+1)/2, width);
for (int i = minI; i < maxI; ++i) {
for (int j = minJ; j < maxJ; ++j) {
n += 1.0f;
sum += input[i*width+j];
}
}
return sum/n;
}
__global__
void contrast_filter_kernel(const float* input, float* output,
int width, int height,
float alpha, float beta, int size)
{
short x = blockIdx.x*blockDim.x+threadIdx.x;
short y = blockIdx.y*blockDim.y+threadIdx.y;
if (x > width-1 || y > height-1) {
return;
}
float fxy = input[y*width+x];
float axy = average(input, width, height, x, y, size);
float gxy = 0.0f;
if (axy > 0.01f) {
gxy = pow(axy,alpha-beta)*pow(fxy,beta);
}
output[y*width+x] = gxy;
}
|
5107689823b1b5c869ca76666c4b5052cee91d7a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@generated d Tue Aug 13 16:45:09 2013
@author Mark Gates
*/
#include "common_magma.h"
#include <assert.h>
#define NB 64
/* =====================================================================
Batches dlacpy of multiple arrays;
y-dimension of grid is different arrays,
x-dimension of grid is blocks for each array.
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread adds one row, iterating across all columns.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
TODO. Block in both directions, for large matrices.
E.g., each block does 64x64 tile, instead of 64xN tile.
*/
__global__ void
dgeadd_batched_kernel(
int m, int n,
double alpha,
const double * const *dAarray, int ldda,
double **dBarray, int lddb )
{
// dA and dB iterate across row i
const double *dA = dAarray[ blockIdx.y ];
double *dB = dBarray[ blockIdx.y ];
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < m ) {
dA += i;
dB += i;
const double *dAend = dA + n*ldda;
while( dA < dAend ) {
*dB = alpha*(*dA) + (*dB);
dA += ldda;
dB += lddb;
}
}
}
/* ===================================================================== */
extern "C" void
magmablas_dgeadd_batched(
magma_int_t m, magma_int_t n,
double alpha,
const double * const *dAarray, magma_int_t ldda,
double **dBarray, magma_int_t lddb,
magma_int_t batchCount )
{
/*
Purpose
=======
ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i],
for i = 0, ..., batchCount-1.
Arguments
=========
M (input) INTEGER
The number of rows of each matrix dAarray[i]. M >= 0.
N (input) INTEGER
The number of columns of each matrix dAarray[i]. N >= 0.
ALPHA (input) COMPLEX DOUBLE PRECISION
The scalar alpha.
dAarray (input) array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX DOUBLE PRECISION array, dimension (LDDA,N)
The m by n matrices dAarray[i].
LDDA (input) INTEGER
The leading dimension of each array dAarray[i]. LDDA >= max(1,M).
dBarray (input/output) array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX DOUBLE PRECISION array, dimension (LDDB,N)
The m by n matrices dBarray[i].
LDDB (input) INTEGER
The leading dimension of each array dBarray[i]. LDDB >= max(1,M).
batchCount (input) INTEGER
The number of matrices to add; length of dAarray and dBarray.
batchCount >= 0.
===================================================================== */
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 )
return;
dim3 threads( NB );
dim3 grid( (m + NB - 1)/NB, batchCount );
hipLaunchKernelGGL(( dgeadd_batched_kernel), dim3(grid), dim3(threads), 0, magma_stream ,
m, n, alpha, dAarray, ldda, dBarray, lddb );
}
| 5107689823b1b5c869ca76666c4b5052cee91d7a.cu | /*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@generated d Tue Aug 13 16:45:09 2013
@author Mark Gates
*/
#include "common_magma.h"
#include <assert.h>
#define NB 64
/* =====================================================================
Batches dlacpy of multiple arrays;
y-dimension of grid is different arrays,
x-dimension of grid is blocks for each array.
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread adds one row, iterating across all columns.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
TODO. Block in both directions, for large matrices.
E.g., each block does 64x64 tile, instead of 64xN tile.
*/
__global__ void
dgeadd_batched_kernel(
int m, int n,
double alpha,
const double * const *dAarray, int ldda,
double **dBarray, int lddb )
{
// dA and dB iterate across row i
const double *dA = dAarray[ blockIdx.y ];
double *dB = dBarray[ blockIdx.y ];
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < m ) {
dA += i;
dB += i;
const double *dAend = dA + n*ldda;
while( dA < dAend ) {
*dB = alpha*(*dA) + (*dB);
dA += ldda;
dB += lddb;
}
}
}
/* ===================================================================== */
extern "C" void
magmablas_dgeadd_batched(
magma_int_t m, magma_int_t n,
double alpha,
const double * const *dAarray, magma_int_t ldda,
double **dBarray, magma_int_t lddb,
magma_int_t batchCount )
{
/*
Purpose
=======
ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i],
for i = 0, ..., batchCount-1.
Arguments
=========
M (input) INTEGER
The number of rows of each matrix dAarray[i]. M >= 0.
N (input) INTEGER
The number of columns of each matrix dAarray[i]. N >= 0.
ALPHA (input) COMPLEX DOUBLE PRECISION
The scalar alpha.
dAarray (input) array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX DOUBLE PRECISION array, dimension (LDDA,N)
The m by n matrices dAarray[i].
LDDA (input) INTEGER
The leading dimension of each array dAarray[i]. LDDA >= max(1,M).
dBarray (input/output) array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX DOUBLE PRECISION array, dimension (LDDB,N)
The m by n matrices dBarray[i].
LDDB (input) INTEGER
The leading dimension of each array dBarray[i]. LDDB >= max(1,M).
batchCount (input) INTEGER
The number of matrices to add; length of dAarray and dBarray.
batchCount >= 0.
===================================================================== */
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 )
return;
dim3 threads( NB );
dim3 grid( (m + NB - 1)/NB, batchCount );
dgeadd_batched_kernel<<< grid, threads, 0, magma_stream >>>(
m, n, alpha, dAarray, ldda, dBarray, lddb );
}
|
922e06a71c79139c52ec9dbd1ab2cdd3421fc398.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Matrix normalization.
* Compile with "gcc matrixNorm.c"
*/
/* ****** ADD YOUR CODE AT THE END OF THIS FILE. ******
* You need not submit the provided code.
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <sys/types.h>
#include <sys/times.h>
#include <sys/time.h>
#include <time.h>
/* Program Parameters */
#define MAXN 8000 /* Max value of N */
int N; /* Matrix size */
/* Matrices */
volatile float A[MAXN][MAXN], B[MAXN][MAXN];
/* junk */
#define randm() 4|2[uid]&3
/* Prototype */
void matrixNorm();
/* returns a seed for srand based on the time */
unsigned int time_seed() {
struct timeval t;
struct timezone tzdummy;
gettimeofday(&t, &tzdummy);
return (unsigned int)(t.tv_usec);
}
/* Set the program parameters from the command-line arguments */
void parameters(int argc, char **argv) {
int seed = 0; /* Random seed */
char uid[32]; /*User name */
/* Read command-line arguments */
srand(time_seed()); /* Randomize */
if (argc == 3) {
seed = atoi(argv[2]);
srand(seed);
printf("Random seed = %i\n", seed);
}
if (argc >= 2) {
N = atoi(argv[1]);
if (N < 1 || N > MAXN) {
printf("N = %i is out of range.\n", N);
exit(0);
}
}
else {
printf("Usage: %s <matrix_dimension> [random seed]\n",
argv[0]);
exit(0);
}
/* Print parameters */
printf("\nMatrix dimension N = %i.\n", N);
}
/* Initialize A and B*/
void initialize_inputs() {
int row, col;
printf("\nInitializing...\n");
for (col = 0; col < N; col++) {
for (row = 0; row < N; row++) {
A[row][col] = (float)rand() / 32768.0;
B[row][col] = 0.0;
}
}
}
/* Print input matrices */
void print_inputs() {
int row, col;
if (N < 10) {
printf("\nA =\n\t");
for (row = 0; row < N; row++) {
for (col = 0; col < N; col++) {
printf("%5.2f%s", A[row][col], (col < N-1) ? ", " : ";\n\t");
}
}
}
}
void print_B() {
int row, col;
if (N < 10) {
printf("\nB =\n\t");
for (row = 0; row < N; row++) {
for (col = 0; col < N; col++) {
printf("%1.10f%s", B[row][col], (col < N-1) ? ", " : ";\n\t");
}
}
}
}
int main(int argc, char **argv) {
/* Timing variables */
struct timeval etstart, etstop; /* Elapsed times using gettimeofday() */
struct timezone tzdummy;
clock_t etstart2, etstop2; /* Elapsed times using times() */
unsigned long long usecstart, usecstop;
struct tms cputstart, cputstop; /* CPU times for my processes */
/* Process program parameters */
parameters(argc, argv);
/* Initialize A and B */
initialize_inputs();
/* Print input matrices */
print_inputs();
/* Start Clock */
printf("\nStarting clock.\n");
gettimeofday(&etstart, &tzdummy);
etstart2 = times(&cputstart);
/* Gaussian Elimination */
matrixNorm();
/* Stop Clock */
gettimeofday(&etstop, &tzdummy);
etstop2 = times(&cputstop);
printf("Stopped clock.\n");
usecstart = (unsigned long long)etstart.tv_sec * 1000000 + etstart.tv_usec;
usecstop = (unsigned long long)etstop.tv_sec * 1000000 + etstop.tv_usec;
/* Display output */
print_B();
/* Display timing results */
printf("\nElapsed time = %g ms.\n",
(float)(usecstop - usecstart)/(float)1000);
printf("(CPU times are accurate to the nearest %g ms)\n",
1.0/(float)CLOCKS_PER_SEC * 1000.0);
printf("My total CPU time for parent = %g ms.\n",
(float)( (cputstop.tms_utime + cputstop.tms_stime) -
(cputstart.tms_utime + cputstart.tms_stime) ) /
(float)CLOCKS_PER_SEC * 1000);
printf("My system CPU time for parent = %g ms.\n",
(float)(cputstop.tms_stime - cputstart.tms_stime) /
(float)CLOCKS_PER_SEC * 1000);
printf("My total CPU time for child processes = %g ms.\n",
(float)( (cputstop.tms_cutime + cputstop.tms_cstime) -
(cputstart.tms_cutime + cputstart.tms_cstime) ) /
(float)CLOCKS_PER_SEC * 1000);
/* Contrary to the man pages, this appears not to include the parent */
printf("--------------------------------------------\n");
exit(0);
}
/* ------------------ Above Was Provided --------------------- */
/****** You will replace this routine with your own parallel version *******/
/* Provided global variables are MAXN, N, A[][] and B[][],
* defined in the beginning of this code. B[][] is initialized to zeros.
*/
float curr_mu = 0.0, curr_sigma = 0.0;
int curr_index = 0;
__global__ void Matrixaddition(float a[][N], float b[][N]){
int i = threadIdx.x;
int j = threadIdx.y;
printf("%d %d", i, j);
if(curr_sigma ==0)
b[i][curr_index]= 0.0;
else
b[i][curr_index] = (A[i][curr_index] - curr_mu) / curr_sigma;
}
void matrixNorm() {
int row, col, blocks =1;
float mu, sigma; // Mean and Standard Deviation
float (*pa)[N],(*pb)[N];
hipMalloc((void**)&pa,(N*N)*sizeof(float));
hipMalloc((void**)&pb,(N*N)*sizeof(float));
hipMemcpy(pa,A,(N*N)*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(pb,B,(N*N)*sizeof(float),hipMemcpyHostToDevice);
dim3 threadsPerBlock(N,N);
printf("Computing Parallel Program using Cuda.\n");
for (col=0; col < N; col++) {
mu = 0.0;
for (row=0; row < N; row++)
mu += A[row][col];
mu /= (float) N;
sigma = 0.0;
for (row=0; row < N; row++)
sigma += powf(A[row][col] - mu, 2.0);
sigma /= (float) N;
/* for (row=0; row < N; row++) {
if (sigma == 0.0)
B[row][col] = 0.0;
else
B[row][col] = (A[row][col] - mu) / sigma;
}*/
curr_mu = mu; curr_sigma = sigma;
curr_index = col;
hipLaunchKernelGGL(( Matrixaddition), dim3(threadsPerBlock), dim3(blocks), 0, 0, A,B,mu,sigma,col);
}
hipMemcpy(B,pb,(N*N)*sizeof(float),hipMemcpyDeviceToHost);
hipFree(pa);
hipFree(pb);
}
| 922e06a71c79139c52ec9dbd1ab2cdd3421fc398.cu | /* Matrix normalization.
* Compile with "gcc matrixNorm.c"
*/
/* ****** ADD YOUR CODE AT THE END OF THIS FILE. ******
* You need not submit the provided code.
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <sys/types.h>
#include <sys/times.h>
#include <sys/time.h>
#include <time.h>
/* Program Parameters */
#define MAXN 8000 /* Max value of N */
int N; /* Matrix size */
/* Matrices */
volatile float A[MAXN][MAXN], B[MAXN][MAXN];
/* junk */
#define randm() 4|2[uid]&3
/* Prototype */
void matrixNorm();
/* returns a seed for srand based on the time */
unsigned int time_seed() {
struct timeval t;
struct timezone tzdummy;
gettimeofday(&t, &tzdummy);
return (unsigned int)(t.tv_usec);
}
/* Set the program parameters from the command-line arguments */
void parameters(int argc, char **argv) {
int seed = 0; /* Random seed */
char uid[32]; /*User name */
/* Read command-line arguments */
srand(time_seed()); /* Randomize */
if (argc == 3) {
seed = atoi(argv[2]);
srand(seed);
printf("Random seed = %i\n", seed);
}
if (argc >= 2) {
N = atoi(argv[1]);
if (N < 1 || N > MAXN) {
printf("N = %i is out of range.\n", N);
exit(0);
}
}
else {
printf("Usage: %s <matrix_dimension> [random seed]\n",
argv[0]);
exit(0);
}
/* Print parameters */
printf("\nMatrix dimension N = %i.\n", N);
}
/* Initialize A and B*/
void initialize_inputs() {
int row, col;
printf("\nInitializing...\n");
for (col = 0; col < N; col++) {
for (row = 0; row < N; row++) {
A[row][col] = (float)rand() / 32768.0;
B[row][col] = 0.0;
}
}
}
/* Print input matrices */
void print_inputs() {
int row, col;
if (N < 10) {
printf("\nA =\n\t");
for (row = 0; row < N; row++) {
for (col = 0; col < N; col++) {
printf("%5.2f%s", A[row][col], (col < N-1) ? ", " : ";\n\t");
}
}
}
}
void print_B() {
int row, col;
if (N < 10) {
printf("\nB =\n\t");
for (row = 0; row < N; row++) {
for (col = 0; col < N; col++) {
printf("%1.10f%s", B[row][col], (col < N-1) ? ", " : ";\n\t");
}
}
}
}
int main(int argc, char **argv) {
/* Timing variables */
struct timeval etstart, etstop; /* Elapsed times using gettimeofday() */
struct timezone tzdummy;
clock_t etstart2, etstop2; /* Elapsed times using times() */
unsigned long long usecstart, usecstop;
struct tms cputstart, cputstop; /* CPU times for my processes */
/* Process program parameters */
parameters(argc, argv);
/* Initialize A and B */
initialize_inputs();
/* Print input matrices */
print_inputs();
/* Start Clock */
printf("\nStarting clock.\n");
gettimeofday(&etstart, &tzdummy);
etstart2 = times(&cputstart);
/* Gaussian Elimination */
matrixNorm();
/* Stop Clock */
gettimeofday(&etstop, &tzdummy);
etstop2 = times(&cputstop);
printf("Stopped clock.\n");
usecstart = (unsigned long long)etstart.tv_sec * 1000000 + etstart.tv_usec;
usecstop = (unsigned long long)etstop.tv_sec * 1000000 + etstop.tv_usec;
/* Display output */
print_B();
/* Display timing results */
printf("\nElapsed time = %g ms.\n",
(float)(usecstop - usecstart)/(float)1000);
printf("(CPU times are accurate to the nearest %g ms)\n",
1.0/(float)CLOCKS_PER_SEC * 1000.0);
printf("My total CPU time for parent = %g ms.\n",
(float)( (cputstop.tms_utime + cputstop.tms_stime) -
(cputstart.tms_utime + cputstart.tms_stime) ) /
(float)CLOCKS_PER_SEC * 1000);
printf("My system CPU time for parent = %g ms.\n",
(float)(cputstop.tms_stime - cputstart.tms_stime) /
(float)CLOCKS_PER_SEC * 1000);
printf("My total CPU time for child processes = %g ms.\n",
(float)( (cputstop.tms_cutime + cputstop.tms_cstime) -
(cputstart.tms_cutime + cputstart.tms_cstime) ) /
(float)CLOCKS_PER_SEC * 1000);
/* Contrary to the man pages, this appears not to include the parent */
printf("--------------------------------------------\n");
exit(0);
}
/* ------------------ Above Was Provided --------------------- */
/****** You will replace this routine with your own parallel version *******/
/* Provided global variables are MAXN, N, A[][] and B[][],
* defined in the beginning of this code. B[][] is initialized to zeros.
*/
float curr_mu = 0.0, curr_sigma = 0.0;
int curr_index = 0;
__global__ void Matrixaddition(float a[][N], float b[][N]){
int i = threadIdx.x;
int j = threadIdx.y;
printf("%d %d", i, j);
if(curr_sigma ==0)
b[i][curr_index]= 0.0;
else
b[i][curr_index] = (A[i][curr_index] - curr_mu) / curr_sigma;
}
void matrixNorm() {
int row, col, blocks =1;
float mu, sigma; // Mean and Standard Deviation
float (*pa)[N],(*pb)[N];
cudaMalloc((void**)&pa,(N*N)*sizeof(float));
cudaMalloc((void**)&pb,(N*N)*sizeof(float));
cudaMemcpy(pa,A,(N*N)*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(pb,B,(N*N)*sizeof(float),cudaMemcpyHostToDevice);
dim3 threadsPerBlock(N,N);
printf("Computing Parallel Program using Cuda.\n");
for (col=0; col < N; col++) {
mu = 0.0;
for (row=0; row < N; row++)
mu += A[row][col];
mu /= (float) N;
sigma = 0.0;
for (row=0; row < N; row++)
sigma += powf(A[row][col] - mu, 2.0);
sigma /= (float) N;
/* for (row=0; row < N; row++) {
if (sigma == 0.0)
B[row][col] = 0.0;
else
B[row][col] = (A[row][col] - mu) / sigma;
}*/
curr_mu = mu; curr_sigma = sigma;
curr_index = col;
Matrixaddition<<<threadsPerBlock, blocks>>>(A,B,mu,sigma,col);
}
cudaMemcpy(B,pb,(N*N)*sizeof(float),cudaMemcpyDeviceToHost);
cudaFree(pa);
cudaFree(pb);
}
|
adf7cbd0e633edd7ce792a85e7d67a6d905cf215.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "sum_improved_atomic_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *arr = NULL;
hipMalloc(&arr, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int *sum = NULL;
hipMalloc(&sum, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
sum_improved_atomic_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, arr,size,sum);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
sum_improved_atomic_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, arr,size,sum);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
sum_improved_atomic_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, arr,size,sum);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | adf7cbd0e633edd7ce792a85e7d67a6d905cf215.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "sum_improved_atomic_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *arr = NULL;
cudaMalloc(&arr, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int *sum = NULL;
cudaMalloc(&sum, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
sum_improved_atomic_kernel<<<gridBlock,threadBlock>>>(arr,size,sum);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
sum_improved_atomic_kernel<<<gridBlock,threadBlock>>>(arr,size,sum);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
sum_improved_atomic_kernel<<<gridBlock,threadBlock>>>(arr,size,sum);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
ac54fdaed6d62d90932a70bd63b78ddbfba56358.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <windows.h>
#include <time.h>
#include <opencv2/opencv.hpp>
#include <math.h>
#include <iostream>
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_timer.h>
#include <hip/device_functions.h>
#include <sm_20_atomic_functions.h>
using namespace cv;
#define SPLIT_SIZE_X 32
#define SPLIT_SIZE_Y 24
#define SPLIT_SIZE_X2 16
#define SPLIT_SIZE_Y2 12
#define BLOCK_SIZE_X 36
#define BLOCK_SIZE_Y 28
#define IS_NOT_EDGE(a) (a < min_val)
#define IS_STRONG_EDGE(a) (a >= max_val)
#define IS_WEAK_EDGE(a) (a >= min_val && a < max_val)
/*canny using cuda*/
void CUDA_Canny();
__global__ void CUDA_GaussianAndSobel(unsigned char* img, int width, int height, unsigned char* output_sobel, short* output_gradient);
__device__ void CUDA_Gaussian(unsigned char* img, int width, int height, int idx, unsigned char* output);
__device__ void CUDA_Sobel(unsigned char* img, int width, int height, int idx, unsigned char* output_sobel, short* gradient);
__global__ void CUDA_NonMaxSuppress(unsigned char* sobel, int width, int height, short* gradient, unsigned char* output);
__global__ void CUDA_DoubleThreshold(unsigned char* sobel, int width, int height, int min_val, int max_val, unsigned char* canny);
__device__ unsigned char CUDA_GetPixelVal(unsigned char* img, int width, int height, int i, int j);
__device__ short GetGradientDirection(int sobel_x, int sobel_y);
__device__ void CheckAndConvert(unsigned char* img, int width, int height, int i, int j, int min_val, int max_val,
unsigned short* weak_stack, unsigned short* stack_index, unsigned char* canny);
__global__ void CUDA_DoubleThreshold2(unsigned char* sobel, int width, int height, int min_val, int max_val, unsigned char* canny);
void DisplayGradient(short* gradient, int width, int height);
unsigned char GetPixelVal(unsigned char* img, int width, int height, int i, int j);
void NonMaxSuppress(unsigned char* sobel, int width, int height, short* gradient, unsigned char* output);
void DoubleThreshold(unsigned char* sobel, int width, int height, int min_val, int max_val, unsigned char* output);
void IsWeakEdge(unsigned char* sobel, int width, int height, int min_val, int max_val, int i, int j, unsigned int* stack, unsigned int* top, unsigned char* visited, unsigned char* output);
__device__ void CUDA_SubDoubleThreshold(unsigned char* sobel, int width, int height, int min_val, int max_val,
unsigned int* weak_stack, unsigned int* stack_top, unsigned char* output, unsigned char* visited);
__device__ void CUDA_IsWeakEdge(unsigned char* sobel, int width, int height, int min_val, int max_val, int i, int j, unsigned int* stack, unsigned int* top, unsigned char* output, unsigned char* visited);
__global__ void CUDA_CheckEdge(unsigned char* sobel, int width, int height, int min_val, int max_val, int i, int j, unsigned short* stack, unsigned short* top, unsigned char* output);
int main(void)
{
printf("CANNY_CUDA\n");
CUDA_Canny();
//system("pause");
return 0;
}
void CUDA_Canny()
{
int width = 640;
int height = 480;
dim3 block_size_extended(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 block_size_normal(SPLIT_SIZE_X, SPLIT_SIZE_Y);
dim3 block_size_normal2(SPLIT_SIZE_X2, SPLIT_SIZE_Y2);
dim3 grid_size(width / SPLIT_SIZE_X, height / SPLIT_SIZE_Y);
dim3 grid_size2(width / SPLIT_SIZE_X2, height / SPLIT_SIZE_Y2);
Mat img_src, img_sobel, img_gradient, img_canny;
VideoCapture camera(0);
/*cpu memory*/
unsigned char* cpu_img = new unsigned char[width * height];
unsigned char* cpu_img2 = new unsigned char[width * height];
unsigned char* cpu_img3 = new unsigned char[width * height];
short* cpu_gradient = new short[width * height];
/*gpu memory*/
unsigned char* gpu_img;
hipMalloc(&gpu_img, width * height * sizeof(unsigned char));
unsigned char* gpu_sobel;
hipMalloc(&gpu_sobel, width * height * sizeof(unsigned char));
short* gpu_gradient;
hipMalloc(&gpu_gradient, width * height * sizeof(short));
unsigned char* gpu_canny;
hipMalloc(&gpu_canny, width * height * sizeof(unsigned char));
StopWatchInterface * timer_cublas;
while (1)
{
//camera >> img_src;
img_src = imread("F:/img_src/lena.jpg");
resize(img_src, img_src, Size(width, height), 0, 0);
cvtColor(img_src, img_src, CV_BGR2GRAY);
//imshow("img_src", img_src);
sdkCreateTimer(&timer_cublas);
sdkStartTimer(&timer_cublas);
/*1.copy to gpu memory*/
hipMemcpy(gpu_img, img_src.data, width * height * sizeof(unsigned char), hipMemcpyHostToDevice);
/*2.gauss filter*/
CUDA_GaussianAndSobel << <grid_size, block_size_extended >> > (gpu_img, width, height, gpu_sobel, gpu_gradient);
hipDeviceSynchronize();
/*3.none max suppress*/
CUDA_NonMaxSuppress << <grid_size, block_size_normal >> > (gpu_sobel, width, height, gpu_gradient, gpu_sobel);
hipDeviceSynchronize();
/*4.double threshold*/
//CUDA_DoubleThreshold << <grid_size2, block_size_normal2 >> > (gpu_sobel, width, height, 40, 90, gpu_canny);
CUDA_DoubleThreshold2 << <grid_size2, dim3(1,1) >> > (gpu_sobel, width, height, 50, 70, gpu_canny);
/*copy to cpu memory*/
hipMemcpy(cpu_img, gpu_canny, width * height * sizeof(unsigned char), hipMemcpyDeviceToHost);
//hipMemcpy(cpu_img2, gpu_sobel, width * height * sizeof(unsigned char), hipMemcpyDeviceToHost);
//img_canny = Mat(Size(width, height), CV_8UC1, cpu_img);
//resize(img_canny, img_canny, Size(640, 480), 0, 0);
//imshow("img_canny", img_canny);
//DoubleThreshold(cpu_img2, width, height, 50, 90, cpu_img3);
hipDeviceSynchronize();
sdkStopTimer(&timer_cublas);
printf("\ngpu_done %.3f(ms)\n\n", sdkGetTimerValue(&timer_cublas));
img_sobel = Mat(Size(width, height), CV_8UC1, cpu_img);
resize(img_sobel, img_sobel, Size(640, 480), 0, 0);
imshow("img_sobel", img_sobel);
if ('q' == waitKey(1))
{
destroyAllWindows();
free(cpu_img);
cpu_img = NULL;
free(cpu_img2);
cpu_img2 = NULL;
free(cpu_img3);
cpu_img3 = NULL;
free(cpu_gradient);
cpu_gradient = NULL;
hipFree(gpu_img);
hipFree(gpu_sobel);
hipFree(gpu_gradient);
hipFree(gpu_canny);
break;
}
}
}
__global__ void CUDA_GaussianAndSobel(unsigned char* img, int width, int height, unsigned char* output_sobel, short* output_gradient)
{
__shared__ unsigned char cache[(BLOCK_SIZE_X) * (BLOCK_SIZE_Y)];
__shared__ unsigned char gauss[(BLOCK_SIZE_X) * (BLOCK_SIZE_Y)];
__shared__ unsigned char sobel[(BLOCK_SIZE_X) * (BLOCK_SIZE_Y)];
short gradient = 0;
/*alloct img to cache*/
int raw_index = SPLIT_SIZE_X * SPLIT_SIZE_Y * blockIdx.y * gridDim.x + blockIdx.x * SPLIT_SIZE_X + SPLIT_SIZE_X * gridDim.x * threadIdx.y + threadIdx.x;
int pixel_val = CUDA_GetPixelVal(img, width, height, raw_index / width - 2, raw_index % width - 2);
int cache_index = blockDim.x * threadIdx.y + threadIdx.x;
cache[cache_index] = pixel_val;
__syncthreads();
/*gauss filter*/
CUDA_Gaussian(cache, blockDim.x, blockDim.y, cache_index, gauss);
__syncthreads();
/*sobel filter*/
CUDA_Sobel(gauss, blockDim.x, blockDim.y, cache_index, sobel, &gradient);
/*cute edge*/
if (threadIdx.y <= 1 || threadIdx.y >= blockDim.y - 2 ||
threadIdx.x <= 1 || threadIdx.x >= blockDim.x - 2)
return;
int new_id = blockIdx.y * SPLIT_SIZE_X * SPLIT_SIZE_Y * gridDim.x + (threadIdx.y - 2) * SPLIT_SIZE_X * gridDim.x + blockIdx.x * SPLIT_SIZE_X + (threadIdx.x - 2);
/*store result*/
output_gradient[new_id] = gradient;
output_sobel[new_id] = sobel[cache_index];
}
__device__ void CUDA_Gaussian(unsigned char* img, int width, int height, int idx, unsigned char* output)
{
int new_pixel_value = 0;
new_pixel_value = CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x - 1) * 0.07511 +
CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x ) * 0.12384 +
CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x + 1) * 0.07511 +
CUDA_GetPixelVal(img, width, height, threadIdx.y , threadIdx.x - 1) * 0.12384 +
CUDA_GetPixelVal(img, width, height, threadIdx.y , threadIdx.x ) * 0.20418 +
CUDA_GetPixelVal(img, width, height, threadIdx.y , threadIdx.x + 1) * 0.12384 +
CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x - 1) * 0.07511 +
CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x ) * 0.12384 +
CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x + 1) * 0.07511;
output[idx] = new_pixel_value;
}
__device__ void CUDA_Sobel(unsigned char* img, int width, int height, int idx, unsigned char* output_sobel, short* gradient)
{
int sobel_x = 0;
int sobel_y = 0;
int sobel = 0;
sobel_x = CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x - 1) * (1) +
CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x ) * (2) +
CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x + 1) * (1) +
CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x - 1) * (-1) +
CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x ) * (-2) +
CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x + 1) * (-1);
sobel_y = CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x - 1) * (-1) +
CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x + 1) * (1) +
CUDA_GetPixelVal(img, width, height, threadIdx.y , threadIdx.x - 1) * (-2) +
CUDA_GetPixelVal(img, width, height, threadIdx.y , threadIdx.x + 1) * (2) +
CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x - 1) * (-1) +
CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x + 1) * (1);
sobel = sqrtf((float)(sobel_x * sobel_x + sobel_y * sobel_y));
sobel = sobel > 255 ? 255 : sobel;
output_sobel[idx] = sobel;
*gradient = GetGradientDirection(sobel_x, sobel_y);
}
__global__ void CUDA_NonMaxSuppress(unsigned char* sobel, int width, int height, short* gradient, unsigned char* output)
{
int id = (blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if (id >= width * height)
return;
int i = id / width;
int j = id % width;
float weight = 0;
int g0, g1, g2, g3;
int temp_gradient = gradient[id] < 0 ? gradient[id] + 180 : gradient[id];
if (temp_gradient >= 0 && temp_gradient < 45)
{
weight = temp_gradient / 45.0;
g0 = CUDA_GetPixelVal(sobel, width, height, i , j + 1);
g1 = CUDA_GetPixelVal(sobel, width, height, i - 1, j + 1);
g2 = CUDA_GetPixelVal(sobel, width, height, i , j - 1);
g3 = CUDA_GetPixelVal(sobel, width, height, i + 1, j - 1);
}
else if (temp_gradient >= 45 && temp_gradient < 90)
{
weight = (90 - temp_gradient) / 45.0;
g0 = CUDA_GetPixelVal(sobel, width, height, i - 1, j );
g1 = CUDA_GetPixelVal(sobel, width, height, i - 1, j + 1);
g2 = CUDA_GetPixelVal(sobel, width, height, i + 1, j );
g3 = CUDA_GetPixelVal(sobel, width, height, i + 1, j - 1);
}
else if (temp_gradient >= 90 && temp_gradient < 135)
{
weight = (temp_gradient - 90) / 45.0;
g0 = CUDA_GetPixelVal(sobel, width, height, i - 1, j );
g1 = CUDA_GetPixelVal(sobel, width, height, i - 1, j - 1);
g2 = CUDA_GetPixelVal(sobel, width, height, i + 1, j );
g3 = CUDA_GetPixelVal(sobel, width, height, i + 1, j + 1);
}
else if (temp_gradient >= 135 && temp_gradient <= 180)
{
weight = (180 - temp_gradient) / 45.0;
g0 = CUDA_GetPixelVal(sobel, width, height, i , j - 1);
g1 = CUDA_GetPixelVal(sobel, width, height, i - 1, j - 1);
g2 = CUDA_GetPixelVal(sobel, width, height, i , j + 1);
g3 = CUDA_GetPixelVal(sobel, width, height, i + 1, j + 1);
}
int dot1 = g0 * (1 - weight) + g1 * weight;
int dot2 = g2 * (1 - weight) + g3 * weight;
if (sobel[id] >= dot1 && sobel[id] >= dot2)
output[id] = sobel[id];
else
output[id] = 0;
}
__global__ void CUDA_DoubleThreshold(unsigned char* sobel, int width, int height, int min_val, int max_val, unsigned char* canny)
{
__shared__ unsigned char cache[SPLIT_SIZE_X2 * SPLIT_SIZE_Y2];
__shared__ unsigned char cache2[SPLIT_SIZE_X2 * SPLIT_SIZE_Y2];
int raw_index = blockDim.x * blockDim.y * blockIdx.y * gridDim.x + blockIdx.x * blockDim.x + blockDim.x * gridDim.x * threadIdx.y + threadIdx.x;
int pixel_val = CUDA_GetPixelVal(sobel, width, height, raw_index / width, raw_index % width);
int cache_index = blockDim.x * threadIdx.y + threadIdx.x;
cache[cache_index] = pixel_val;
__syncthreads();
if (IS_STRONG_EDGE(cache[cache_index]))
{
unsigned short weak_stack[SPLIT_SIZE_X2 * SPLIT_SIZE_Y2];
unsigned short stack_cnt = 0;
CheckAndConvert(cache, blockDim.x, blockDim.y, threadIdx.y, threadIdx.x, min_val, max_val, weak_stack, &stack_cnt, cache2);
unsigned short center_index = 0;
while(stack_cnt > 0)
{
center_index = weak_stack[stack_cnt - 1];
stack_cnt--;
CheckAndConvert(cache, blockDim.x, blockDim.y, center_index / blockDim.x - 1, center_index % blockDim.x - 1, min_val, max_val, weak_stack, &stack_cnt, cache2);
CheckAndConvert(cache, blockDim.x, blockDim.y, center_index / blockDim.x - 1, center_index % blockDim.x , min_val, max_val, weak_stack, &stack_cnt, cache2);
CheckAndConvert(cache, blockDim.x, blockDim.y, center_index / blockDim.x - 1, center_index % blockDim.x + 1, min_val, max_val, weak_stack, &stack_cnt, cache2);
CheckAndConvert(cache, blockDim.x, blockDim.y, center_index / blockDim.x , center_index % blockDim.x - 1, min_val, max_val, weak_stack, &stack_cnt, cache2);
CheckAndConvert(cache, blockDim.x, blockDim.y, center_index / blockDim.x , center_index % blockDim.x + 1, min_val, max_val, weak_stack, &stack_cnt, cache2);
CheckAndConvert(cache, blockDim.x, blockDim.y, center_index / blockDim.x + 1, center_index % blockDim.x - 1, min_val, max_val, weak_stack, &stack_cnt, cache2);
CheckAndConvert(cache, blockDim.x, blockDim.y, center_index / blockDim.x + 1, center_index % blockDim.x , min_val, max_val, weak_stack, &stack_cnt, cache2);
CheckAndConvert(cache, blockDim.x, blockDim.y, center_index / blockDim.x + 1, center_index % blockDim.x + 1, min_val, max_val, weak_stack, &stack_cnt, cache2);
__syncthreads();
}
}
else if (IS_NOT_EDGE(cache[cache_index]))
{
cache2[cache_index] = 0;
}
int new_id = blockIdx.y * blockDim.x * blockDim.y * gridDim.x + threadIdx.y * blockDim.x * gridDim.x + blockIdx.x * blockDim.x + threadIdx.x;
canny[new_id] = cache2[cache_index];
__syncthreads();
}
__device__ void CheckAndConvert(unsigned char* img, int width, int height, int i, int j, int min_val, int max_val,
unsigned short* weak_stack, unsigned short* stack_index, unsigned char* canny)
{
int temp_index = width * i + j;
if (IS_WEAK_EDGE(CUDA_GetPixelVal(img, width, height, i, j)) ||
IS_STRONG_EDGE(CUDA_GetPixelVal(img, width, height, i, j)))
{
weak_stack[*stack_index] = temp_index;
(*stack_index)++;
canny[temp_index] = 255;
}
else
{
canny[temp_index] = 0;
}
}
__global__ void CUDA_DoubleThreshold2(unsigned char* sobel, int width, int height, int min_val, int max_val, unsigned char* canny)
{
__shared__ unsigned char cache[SPLIT_SIZE_X2 * SPLIT_SIZE_Y2];
__shared__ unsigned char output[SPLIT_SIZE_X2 * SPLIT_SIZE_Y2];
__shared__ unsigned int weak_stack[SPLIT_SIZE_X2 * SPLIT_SIZE_Y2];
__shared__ unsigned char visited[SPLIT_SIZE_X2 * SPLIT_SIZE_Y2];
unsigned int stack_top = 0;
memset(visited, 0, SPLIT_SIZE_X2 * SPLIT_SIZE_Y2);
int raw_index = SPLIT_SIZE_X2 * SPLIT_SIZE_Y2 * blockIdx.y * gridDim.x + blockIdx.x * SPLIT_SIZE_X2 + SPLIT_SIZE_X2 * gridDim.x * threadIdx.y + threadIdx.x;
int i, j, new_id;
for (i = 0; i < SPLIT_SIZE_Y2; ++i)
{
for (j = 0; j < SPLIT_SIZE_X2; ++j)
{
cache[i * SPLIT_SIZE_X2 + j] = CUDA_GetPixelVal(sobel, width, height, raw_index / width + i, raw_index % width + j);
}
}
CUDA_SubDoubleThreshold(cache, SPLIT_SIZE_X2, SPLIT_SIZE_Y2, min_val, max_val, weak_stack, &stack_top, output, visited);
for (i = 0; i < SPLIT_SIZE_Y2; ++i)
{
for (j = 0; j < SPLIT_SIZE_X2; ++j)
{
new_id = blockIdx.y * SPLIT_SIZE_X2 * SPLIT_SIZE_Y2 * gridDim.x + i * SPLIT_SIZE_X2 * gridDim.x + blockIdx.x * SPLIT_SIZE_X2 + j;
canny[new_id] = output[i * SPLIT_SIZE_X2 + j];
}
}
__syncthreads();
}
__device__ unsigned char CUDA_GetPixelVal(unsigned char* img, int width, int height, int i, int j)
{
if (i >= height || i < 0)
return 0;
else if (j >= width || j < 0)
return 0;
return *(img + i * width + j);
}
__device__ short GetGradientDirection(int sobel_x, int sobel_y)
{
short gradient = (atan2f(sobel_x, sobel_y) / 3.1415926 * 180.0);
//gradient = gradient < 0 ? gradient + 180 : gradient;
return gradient;
}
void DisplayGradient(short* gradient, int width, int height)
{
Mat img = Mat::zeros(Size(width, height), CV_8UC3);
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
if (abs(*(gradient + i * width + j)) >= 0 && abs(*(gradient + i * width + j)) < 45)
{
img.at<Vec3b>(i, j) = Vec3b(255, 0, 0);
}
else if (abs(*(gradient + i * width + j)) >= 45 && abs(*(gradient + i * width + j)) < 90)
{
img.at<Vec3b>(i, j) = Vec3b(0, 255, 0);
}
else if (abs(*(gradient + i * width + j)) >= 90 && abs(*(gradient + i * width + j)) < 135)
{
img.at<Vec3b>(i, j) = Vec3b(0, 0, 255);
}
else if (abs(*(gradient + i * width + j)) >= 135 && abs(*(gradient + i * width + j)) <= 180)
{
img.at<Vec3b>(i, j) = Vec3b(128, 128, 128);
}
}
}
imshow("gradient", img);
}
unsigned char GetPixelVal(unsigned char* img, int width, int height, int i, int j)
{
if (i >= height || i < 0)
return 0;
else if (j >= width || j < 0)
return 0;
return *(img + i * width + j);
}
void NonMaxSuppress(unsigned char* sobel, int width, int height, short* gradient, unsigned char* output)
{
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
int id = i * width + j;
float weight = 0;
int g0, g1, g2, g3;
int temp_gradient = gradient[id] < 0 ? gradient[id] + 180 : gradient[id];
if (temp_gradient >= 0 && temp_gradient < 45)
{
weight = temp_gradient / 45.0;
g0 = GetPixelVal(sobel, width, height, i, j + 1);
g1 = GetPixelVal(sobel, width, height, i - 1, j + 1);
g2 = GetPixelVal(sobel, width, height, i, j - 1);
g3 = GetPixelVal(sobel, width, height, i + 1, j - 1);
}
else if (temp_gradient >= 45 && temp_gradient < 90)
{
weight = (90 - temp_gradient) / 45.0;
g0 = GetPixelVal(sobel, width, height, i - 1, j);
g1 = GetPixelVal(sobel, width, height, i - 1, j + 1);
g2 = GetPixelVal(sobel, width, height, i + 1, j);
g3 = GetPixelVal(sobel, width, height, i + 1, j - 1);
}
else if (temp_gradient >= 90 && temp_gradient < 135)
{
weight = (temp_gradient - 90) / 45.0;
g0 = GetPixelVal(sobel, width, height, i - 1, j);
g1 = GetPixelVal(sobel, width, height, i - 1, j - 1);
g2 = GetPixelVal(sobel, width, height, i + 1, j);
g3 = GetPixelVal(sobel, width, height, i + 1, j + 1);
}
else if (temp_gradient >= 135 && temp_gradient <= 180)
{
weight = (180 - temp_gradient) / 45.0;
g0 = GetPixelVal(sobel, width, height, i, j - 1);
g1 = GetPixelVal(sobel, width, height, i - 1, j - 1);
g2 = GetPixelVal(sobel, width, height, i, j + 1);
g3 = GetPixelVal(sobel, width, height, i + 1, j + 1);
}
int dot1 = g0 * (1 - weight) + g1 * weight;
int dot2 = g2 * (1 - weight) + g3 * weight;
if (sobel[id] > dot1 && sobel[id] > dot2)
output[id] = sobel[id];
else
output[id] = 0;
}
}
}
void DoubleThreshold(unsigned char* sobel, int width, int height, int min_val, int max_val, unsigned char* output)
{
unsigned int* weak_stack = new unsigned int[width * height];
unsigned char* visited = new unsigned char[width * height];
unsigned int stack_top = 0;
unsigned int center_index = 0;
memset(visited, 0, width * height);
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
if (IS_STRONG_EDGE(GetPixelVal(sobel, width, height, i, j)) && visited[i * width + j] != 1)
{
IsWeakEdge(sobel, width, height, min_val, max_val, i, j, weak_stack, &stack_top, visited, output);
while (stack_top > 0)
{
center_index = weak_stack[stack_top - 1];
stack_top--;
IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width - 1, center_index % width - 1, weak_stack, &stack_top, visited, output);
IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width - 1, center_index % width , weak_stack, &stack_top, visited, output);
IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width - 1, center_index % width + 1, weak_stack, &stack_top, visited, output);
IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width , center_index % width - 1, weak_stack, &stack_top, visited, output);
IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width , center_index % width + 1, weak_stack, &stack_top, visited, output);
IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width + 1, center_index % width - 1, weak_stack, &stack_top, visited, output);
IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width + 1, center_index % width , weak_stack, &stack_top, visited, output);
IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width + 1, center_index % width + 1, weak_stack, &stack_top, visited, output);
}
}
else if (IS_NOT_EDGE(GetPixelVal(sobel, width, height, i, j)))
{
output[i * width + j] = 0;
}
}
}
delete[] weak_stack;
weak_stack = nullptr;
delete[] visited;
visited = nullptr;
}
void IsWeakEdge(unsigned char* sobel, int width, int height, int min_val, int max_val, int i, int j, unsigned int* stack, unsigned int* top, unsigned char* visited, unsigned char* output)
{
if (i < 0 || i >= height)
return;
if (j < 0 || j >= width)
return;
if (visited[i * width + j] == 1)
return;
if (IS_STRONG_EDGE(GetPixelVal(sobel, width, height, i, j)))
{
output[i * width + j] = 255;
visited[i * width + j] = 1;
stack[*top] = i * width + j;
(*top)++;
}
else if(IS_WEAK_EDGE(GetPixelVal(sobel, width, height, i, j)))
{
output[i * width + j] = 255;
visited[i * width + j] = 1;
stack[*top] = i * width + j;
(*top)++;
}
else
{
visited[i * width + j] = 1;
output[i * width + j] = 50;
}
}
__device__ void CUDA_SubDoubleThreshold(unsigned char* sobel, int width, int height, int min_val, int max_val,
unsigned int* weak_stack, unsigned int* stack_top, unsigned char* output, unsigned char* visited)
{
unsigned int center_index = 0;
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
if (IS_STRONG_EDGE(CUDA_GetPixelVal(sobel, width, height, i, j)))
{
//output[i * width + j] = 255;
CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, i, j, weak_stack, stack_top, output, visited);
while ((*stack_top) > 0)
{
//printf("%d\n", *stack_top);
center_index = weak_stack[(*stack_top) - 1];
(*stack_top)--;
CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width - 1, center_index % width - 1, weak_stack, stack_top, output, visited);
CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width - 1, center_index % width , weak_stack, stack_top, output, visited);
CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width - 1, center_index % width + 1, weak_stack, stack_top, output, visited);
CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width , center_index % width - 1, weak_stack, stack_top, output, visited);
CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width , center_index % width + 1, weak_stack, stack_top, output, visited);
CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width + 1, center_index % width - 1, weak_stack, stack_top, output, visited);
CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width + 1, center_index % width , weak_stack, stack_top, output, visited);
CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width + 1, center_index % width + 1, weak_stack, stack_top, output, visited);
//CUDA_CheckEdge << <1, 1 >> > (sobel, width, height, min_val, max_val, center_index / width, center_index % width, weak_stack, stack_top, output);
}//*/
}
else if (IS_NOT_EDGE(CUDA_GetPixelVal(sobel, width, height, i, j)))
{
output[i * width + j] = 0;
}
else
{
if(visited[i * width + j] == 0)
output[i * width + j] = 0;
}
}
}
}
__device__ void CUDA_IsWeakEdge(unsigned char* sobel, int width, int height, int min_val, int max_val, int i, int j, unsigned int* stack, unsigned int* top, unsigned char* output, unsigned char* visited)
{
if (i < 0 || i >= height)
return;
if (j < 0 || j >= width)
return;
if (visited[i * width + j] == 1)
return;
if (IS_STRONG_EDGE(CUDA_GetPixelVal(sobel, width, height, i, j)))
{
visited[i * width + j] = 1;
output[i * width + j] = 255;
stack[(*top)] = i * width + j;
*top = *top + 1;
}
else if(IS_WEAK_EDGE(CUDA_GetPixelVal(sobel, width, height, i, j)))
{
visited[i * width + j] = 1;
output[i * width + j] = 255;
stack[(*top)] = i * width + j;
*top = *top + 1;
}
else
{
visited[i * width + j] = 1;
output[i * width + j] = 0;
sobel[i * width + j] = 0;
}
}
__global__ void CUDA_CheckEdge(unsigned char* sobel, int width, int height, int min_val, int max_val, int i, int j, unsigned short* stack, unsigned short* top, unsigned char* output)
{
if (threadIdx.x >= 0 && threadIdx.x <= 2)
{
i = i - 1;
j = j - 1 + threadIdx.x;
}
else if (threadIdx.x == 3)
j = j - 1;
else if (threadIdx.x == 4)
j = j + 1;
else if (threadIdx.x >= 5 && threadIdx.x <= 7)
{
i = i + 1;
j = j - 6 + threadIdx.x;
}
if (IS_WEAK_EDGE(CUDA_GetPixelVal(sobel, width, height, i, j)) ||
IS_STRONG_EDGE(CUDA_GetPixelVal(sobel, width, height, i, j)))
{
output[i * width + j] = 255;
stack[*top] = i * width + j;
(*top)++;
}
else
{
output[i * width + j] = 0;
}
}
| ac54fdaed6d62d90932a70bd63b78ddbfba56358.cu | #include <stdio.h>
#include <windows.h>
#include <time.h>
#include <opencv2/opencv.hpp>
#include <math.h>
#include <iostream>
#include <device_launch_parameters.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_timer.h>
#include <device_functions.h>
#include <sm_20_atomic_functions.h>
using namespace cv;
#define SPLIT_SIZE_X 32
#define SPLIT_SIZE_Y 24
#define SPLIT_SIZE_X2 16
#define SPLIT_SIZE_Y2 12
#define BLOCK_SIZE_X 36
#define BLOCK_SIZE_Y 28
#define IS_NOT_EDGE(a) (a < min_val)
#define IS_STRONG_EDGE(a) (a >= max_val)
#define IS_WEAK_EDGE(a) (a >= min_val && a < max_val)
/*canny using cuda*/
void CUDA_Canny();
__global__ void CUDA_GaussianAndSobel(unsigned char* img, int width, int height, unsigned char* output_sobel, short* output_gradient);
__device__ void CUDA_Gaussian(unsigned char* img, int width, int height, int idx, unsigned char* output);
__device__ void CUDA_Sobel(unsigned char* img, int width, int height, int idx, unsigned char* output_sobel, short* gradient);
__global__ void CUDA_NonMaxSuppress(unsigned char* sobel, int width, int height, short* gradient, unsigned char* output);
__global__ void CUDA_DoubleThreshold(unsigned char* sobel, int width, int height, int min_val, int max_val, unsigned char* canny);
__device__ unsigned char CUDA_GetPixelVal(unsigned char* img, int width, int height, int i, int j);
__device__ short GetGradientDirection(int sobel_x, int sobel_y);
__device__ void CheckAndConvert(unsigned char* img, int width, int height, int i, int j, int min_val, int max_val,
unsigned short* weak_stack, unsigned short* stack_index, unsigned char* canny);
__global__ void CUDA_DoubleThreshold2(unsigned char* sobel, int width, int height, int min_val, int max_val, unsigned char* canny);
void DisplayGradient(short* gradient, int width, int height);
unsigned char GetPixelVal(unsigned char* img, int width, int height, int i, int j);
void NonMaxSuppress(unsigned char* sobel, int width, int height, short* gradient, unsigned char* output);
void DoubleThreshold(unsigned char* sobel, int width, int height, int min_val, int max_val, unsigned char* output);
void IsWeakEdge(unsigned char* sobel, int width, int height, int min_val, int max_val, int i, int j, unsigned int* stack, unsigned int* top, unsigned char* visited, unsigned char* output);
__device__ void CUDA_SubDoubleThreshold(unsigned char* sobel, int width, int height, int min_val, int max_val,
unsigned int* weak_stack, unsigned int* stack_top, unsigned char* output, unsigned char* visited);
__device__ void CUDA_IsWeakEdge(unsigned char* sobel, int width, int height, int min_val, int max_val, int i, int j, unsigned int* stack, unsigned int* top, unsigned char* output, unsigned char* visited);
__global__ void CUDA_CheckEdge(unsigned char* sobel, int width, int height, int min_val, int max_val, int i, int j, unsigned short* stack, unsigned short* top, unsigned char* output);
int main(void)
{
printf("CANNY_CUDA\n");
CUDA_Canny();
//system("pause");
return 0;
}
void CUDA_Canny()
{
int width = 640;
int height = 480;
dim3 block_size_extended(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 block_size_normal(SPLIT_SIZE_X, SPLIT_SIZE_Y);
dim3 block_size_normal2(SPLIT_SIZE_X2, SPLIT_SIZE_Y2);
dim3 grid_size(width / SPLIT_SIZE_X, height / SPLIT_SIZE_Y);
dim3 grid_size2(width / SPLIT_SIZE_X2, height / SPLIT_SIZE_Y2);
Mat img_src, img_sobel, img_gradient, img_canny;
VideoCapture camera(0);
/*cpu memory*/
unsigned char* cpu_img = new unsigned char[width * height];
unsigned char* cpu_img2 = new unsigned char[width * height];
unsigned char* cpu_img3 = new unsigned char[width * height];
short* cpu_gradient = new short[width * height];
/*gpu memory*/
unsigned char* gpu_img;
cudaMalloc(&gpu_img, width * height * sizeof(unsigned char));
unsigned char* gpu_sobel;
cudaMalloc(&gpu_sobel, width * height * sizeof(unsigned char));
short* gpu_gradient;
cudaMalloc(&gpu_gradient, width * height * sizeof(short));
unsigned char* gpu_canny;
cudaMalloc(&gpu_canny, width * height * sizeof(unsigned char));
StopWatchInterface * timer_cublas;
while (1)
{
//camera >> img_src;
img_src = imread("F:/img_src/lena.jpg");
resize(img_src, img_src, Size(width, height), 0, 0);
cvtColor(img_src, img_src, CV_BGR2GRAY);
//imshow("img_src", img_src);
sdkCreateTimer(&timer_cublas);
sdkStartTimer(&timer_cublas);
/*1.copy to gpu memory*/
cudaMemcpy(gpu_img, img_src.data, width * height * sizeof(unsigned char), cudaMemcpyHostToDevice);
/*2.gauss filter*/
CUDA_GaussianAndSobel << <grid_size, block_size_extended >> > (gpu_img, width, height, gpu_sobel, gpu_gradient);
cudaDeviceSynchronize();
/*3.none max suppress*/
CUDA_NonMaxSuppress << <grid_size, block_size_normal >> > (gpu_sobel, width, height, gpu_gradient, gpu_sobel);
cudaDeviceSynchronize();
/*4.double threshold*/
//CUDA_DoubleThreshold << <grid_size2, block_size_normal2 >> > (gpu_sobel, width, height, 40, 90, gpu_canny);
CUDA_DoubleThreshold2 << <grid_size2, dim3(1,1) >> > (gpu_sobel, width, height, 50, 70, gpu_canny);
/*copy to cpu memory*/
cudaMemcpy(cpu_img, gpu_canny, width * height * sizeof(unsigned char), cudaMemcpyDeviceToHost);
//cudaMemcpy(cpu_img2, gpu_sobel, width * height * sizeof(unsigned char), cudaMemcpyDeviceToHost);
//img_canny = Mat(Size(width, height), CV_8UC1, cpu_img);
//resize(img_canny, img_canny, Size(640, 480), 0, 0);
//imshow("img_canny", img_canny);
//DoubleThreshold(cpu_img2, width, height, 50, 90, cpu_img3);
cudaThreadSynchronize();
sdkStopTimer(&timer_cublas);
printf("\ngpu_done %.3f(ms)\n\n", sdkGetTimerValue(&timer_cublas));
img_sobel = Mat(Size(width, height), CV_8UC1, cpu_img);
resize(img_sobel, img_sobel, Size(640, 480), 0, 0);
imshow("img_sobel", img_sobel);
if ('q' == waitKey(1))
{
destroyAllWindows();
free(cpu_img);
cpu_img = NULL;
free(cpu_img2);
cpu_img2 = NULL;
free(cpu_img3);
cpu_img3 = NULL;
free(cpu_gradient);
cpu_gradient = NULL;
cudaFree(gpu_img);
cudaFree(gpu_sobel);
cudaFree(gpu_gradient);
cudaFree(gpu_canny);
break;
}
}
}
__global__ void CUDA_GaussianAndSobel(unsigned char* img, int width, int height, unsigned char* output_sobel, short* output_gradient)
{
__shared__ unsigned char cache[(BLOCK_SIZE_X) * (BLOCK_SIZE_Y)];
__shared__ unsigned char gauss[(BLOCK_SIZE_X) * (BLOCK_SIZE_Y)];
__shared__ unsigned char sobel[(BLOCK_SIZE_X) * (BLOCK_SIZE_Y)];
short gradient = 0;
/*alloct img to cache*/
int raw_index = SPLIT_SIZE_X * SPLIT_SIZE_Y * blockIdx.y * gridDim.x + blockIdx.x * SPLIT_SIZE_X + SPLIT_SIZE_X * gridDim.x * threadIdx.y + threadIdx.x;
int pixel_val = CUDA_GetPixelVal(img, width, height, raw_index / width - 2, raw_index % width - 2);
int cache_index = blockDim.x * threadIdx.y + threadIdx.x;
cache[cache_index] = pixel_val;
__syncthreads();
/*gauss filter*/
CUDA_Gaussian(cache, blockDim.x, blockDim.y, cache_index, gauss);
__syncthreads();
/*sobel filter*/
CUDA_Sobel(gauss, blockDim.x, blockDim.y, cache_index, sobel, &gradient);
/*cute edge*/
if (threadIdx.y <= 1 || threadIdx.y >= blockDim.y - 2 ||
threadIdx.x <= 1 || threadIdx.x >= blockDim.x - 2)
return;
int new_id = blockIdx.y * SPLIT_SIZE_X * SPLIT_SIZE_Y * gridDim.x + (threadIdx.y - 2) * SPLIT_SIZE_X * gridDim.x + blockIdx.x * SPLIT_SIZE_X + (threadIdx.x - 2);
/*store result*/
output_gradient[new_id] = gradient;
output_sobel[new_id] = sobel[cache_index];
}
__device__ void CUDA_Gaussian(unsigned char* img, int width, int height, int idx, unsigned char* output)
{
int new_pixel_value = 0;
new_pixel_value = CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x - 1) * 0.07511 +
CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x ) * 0.12384 +
CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x + 1) * 0.07511 +
CUDA_GetPixelVal(img, width, height, threadIdx.y , threadIdx.x - 1) * 0.12384 +
CUDA_GetPixelVal(img, width, height, threadIdx.y , threadIdx.x ) * 0.20418 +
CUDA_GetPixelVal(img, width, height, threadIdx.y , threadIdx.x + 1) * 0.12384 +
CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x - 1) * 0.07511 +
CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x ) * 0.12384 +
CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x + 1) * 0.07511;
output[idx] = new_pixel_value;
}
__device__ void CUDA_Sobel(unsigned char* img, int width, int height, int idx, unsigned char* output_sobel, short* gradient)
{
int sobel_x = 0;
int sobel_y = 0;
int sobel = 0;
sobel_x = CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x - 1) * (1) +
CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x ) * (2) +
CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x + 1) * (1) +
CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x - 1) * (-1) +
CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x ) * (-2) +
CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x + 1) * (-1);
sobel_y = CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x - 1) * (-1) +
CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x + 1) * (1) +
CUDA_GetPixelVal(img, width, height, threadIdx.y , threadIdx.x - 1) * (-2) +
CUDA_GetPixelVal(img, width, height, threadIdx.y , threadIdx.x + 1) * (2) +
CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x - 1) * (-1) +
CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x + 1) * (1);
sobel = sqrtf((float)(sobel_x * sobel_x + sobel_y * sobel_y));
sobel = sobel > 255 ? 255 : sobel;
output_sobel[idx] = sobel;
*gradient = GetGradientDirection(sobel_x, sobel_y);
}
__global__ void CUDA_NonMaxSuppress(unsigned char* sobel, int width, int height, short* gradient, unsigned char* output)
{
int id = (blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if (id >= width * height)
return;
int i = id / width;
int j = id % width;
float weight = 0;
int g0, g1, g2, g3;
int temp_gradient = gradient[id] < 0 ? gradient[id] + 180 : gradient[id];
if (temp_gradient >= 0 && temp_gradient < 45)
{
weight = temp_gradient / 45.0;
g0 = CUDA_GetPixelVal(sobel, width, height, i , j + 1);
g1 = CUDA_GetPixelVal(sobel, width, height, i - 1, j + 1);
g2 = CUDA_GetPixelVal(sobel, width, height, i , j - 1);
g3 = CUDA_GetPixelVal(sobel, width, height, i + 1, j - 1);
}
else if (temp_gradient >= 45 && temp_gradient < 90)
{
weight = (90 - temp_gradient) / 45.0;
g0 = CUDA_GetPixelVal(sobel, width, height, i - 1, j );
g1 = CUDA_GetPixelVal(sobel, width, height, i - 1, j + 1);
g2 = CUDA_GetPixelVal(sobel, width, height, i + 1, j );
g3 = CUDA_GetPixelVal(sobel, width, height, i + 1, j - 1);
}
else if (temp_gradient >= 90 && temp_gradient < 135)
{
weight = (temp_gradient - 90) / 45.0;
g0 = CUDA_GetPixelVal(sobel, width, height, i - 1, j );
g1 = CUDA_GetPixelVal(sobel, width, height, i - 1, j - 1);
g2 = CUDA_GetPixelVal(sobel, width, height, i + 1, j );
g3 = CUDA_GetPixelVal(sobel, width, height, i + 1, j + 1);
}
else if (temp_gradient >= 135 && temp_gradient <= 180)
{
weight = (180 - temp_gradient) / 45.0;
g0 = CUDA_GetPixelVal(sobel, width, height, i , j - 1);
g1 = CUDA_GetPixelVal(sobel, width, height, i - 1, j - 1);
g2 = CUDA_GetPixelVal(sobel, width, height, i , j + 1);
g3 = CUDA_GetPixelVal(sobel, width, height, i + 1, j + 1);
}
int dot1 = g0 * (1 - weight) + g1 * weight;
int dot2 = g2 * (1 - weight) + g3 * weight;
if (sobel[id] >= dot1 && sobel[id] >= dot2)
output[id] = sobel[id];
else
output[id] = 0;
}
__global__ void CUDA_DoubleThreshold(unsigned char* sobel, int width, int height, int min_val, int max_val, unsigned char* canny)
{
__shared__ unsigned char cache[SPLIT_SIZE_X2 * SPLIT_SIZE_Y2];
__shared__ unsigned char cache2[SPLIT_SIZE_X2 * SPLIT_SIZE_Y2];
int raw_index = blockDim.x * blockDim.y * blockIdx.y * gridDim.x + blockIdx.x * blockDim.x + blockDim.x * gridDim.x * threadIdx.y + threadIdx.x;
int pixel_val = CUDA_GetPixelVal(sobel, width, height, raw_index / width, raw_index % width);
int cache_index = blockDim.x * threadIdx.y + threadIdx.x;
cache[cache_index] = pixel_val;
__syncthreads();
if (IS_STRONG_EDGE(cache[cache_index]))
{
unsigned short weak_stack[SPLIT_SIZE_X2 * SPLIT_SIZE_Y2];
unsigned short stack_cnt = 0;
CheckAndConvert(cache, blockDim.x, blockDim.y, threadIdx.y, threadIdx.x, min_val, max_val, weak_stack, &stack_cnt, cache2);
unsigned short center_index = 0;
while(stack_cnt > 0)
{
center_index = weak_stack[stack_cnt - 1];
stack_cnt--;
CheckAndConvert(cache, blockDim.x, blockDim.y, center_index / blockDim.x - 1, center_index % blockDim.x - 1, min_val, max_val, weak_stack, &stack_cnt, cache2);
CheckAndConvert(cache, blockDim.x, blockDim.y, center_index / blockDim.x - 1, center_index % blockDim.x , min_val, max_val, weak_stack, &stack_cnt, cache2);
CheckAndConvert(cache, blockDim.x, blockDim.y, center_index / blockDim.x - 1, center_index % blockDim.x + 1, min_val, max_val, weak_stack, &stack_cnt, cache2);
CheckAndConvert(cache, blockDim.x, blockDim.y, center_index / blockDim.x , center_index % blockDim.x - 1, min_val, max_val, weak_stack, &stack_cnt, cache2);
CheckAndConvert(cache, blockDim.x, blockDim.y, center_index / blockDim.x , center_index % blockDim.x + 1, min_val, max_val, weak_stack, &stack_cnt, cache2);
CheckAndConvert(cache, blockDim.x, blockDim.y, center_index / blockDim.x + 1, center_index % blockDim.x - 1, min_val, max_val, weak_stack, &stack_cnt, cache2);
CheckAndConvert(cache, blockDim.x, blockDim.y, center_index / blockDim.x + 1, center_index % blockDim.x , min_val, max_val, weak_stack, &stack_cnt, cache2);
CheckAndConvert(cache, blockDim.x, blockDim.y, center_index / blockDim.x + 1, center_index % blockDim.x + 1, min_val, max_val, weak_stack, &stack_cnt, cache2);
__syncthreads();
}
}
else if (IS_NOT_EDGE(cache[cache_index]))
{
cache2[cache_index] = 0;
}
int new_id = blockIdx.y * blockDim.x * blockDim.y * gridDim.x + threadIdx.y * blockDim.x * gridDim.x + blockIdx.x * blockDim.x + threadIdx.x;
canny[new_id] = cache2[cache_index];
__syncthreads();
}
__device__ void CheckAndConvert(unsigned char* img, int width, int height, int i, int j, int min_val, int max_val,
unsigned short* weak_stack, unsigned short* stack_index, unsigned char* canny)
{
int temp_index = width * i + j;
if (IS_WEAK_EDGE(CUDA_GetPixelVal(img, width, height, i, j)) ||
IS_STRONG_EDGE(CUDA_GetPixelVal(img, width, height, i, j)))
{
weak_stack[*stack_index] = temp_index;
(*stack_index)++;
canny[temp_index] = 255;
}
else
{
canny[temp_index] = 0;
}
}
__global__ void CUDA_DoubleThreshold2(unsigned char* sobel, int width, int height, int min_val, int max_val, unsigned char* canny)
{
__shared__ unsigned char cache[SPLIT_SIZE_X2 * SPLIT_SIZE_Y2];
__shared__ unsigned char output[SPLIT_SIZE_X2 * SPLIT_SIZE_Y2];
__shared__ unsigned int weak_stack[SPLIT_SIZE_X2 * SPLIT_SIZE_Y2];
__shared__ unsigned char visited[SPLIT_SIZE_X2 * SPLIT_SIZE_Y2];
unsigned int stack_top = 0;
memset(visited, 0, SPLIT_SIZE_X2 * SPLIT_SIZE_Y2);
int raw_index = SPLIT_SIZE_X2 * SPLIT_SIZE_Y2 * blockIdx.y * gridDim.x + blockIdx.x * SPLIT_SIZE_X2 + SPLIT_SIZE_X2 * gridDim.x * threadIdx.y + threadIdx.x;
int i, j, new_id;
for (i = 0; i < SPLIT_SIZE_Y2; ++i)
{
for (j = 0; j < SPLIT_SIZE_X2; ++j)
{
cache[i * SPLIT_SIZE_X2 + j] = CUDA_GetPixelVal(sobel, width, height, raw_index / width + i, raw_index % width + j);
}
}
CUDA_SubDoubleThreshold(cache, SPLIT_SIZE_X2, SPLIT_SIZE_Y2, min_val, max_val, weak_stack, &stack_top, output, visited);
for (i = 0; i < SPLIT_SIZE_Y2; ++i)
{
for (j = 0; j < SPLIT_SIZE_X2; ++j)
{
new_id = blockIdx.y * SPLIT_SIZE_X2 * SPLIT_SIZE_Y2 * gridDim.x + i * SPLIT_SIZE_X2 * gridDim.x + blockIdx.x * SPLIT_SIZE_X2 + j;
canny[new_id] = output[i * SPLIT_SIZE_X2 + j];
}
}
__syncthreads();
}
__device__ unsigned char CUDA_GetPixelVal(unsigned char* img, int width, int height, int i, int j)
{
if (i >= height || i < 0)
return 0;
else if (j >= width || j < 0)
return 0;
return *(img + i * width + j);
}
__device__ short GetGradientDirection(int sobel_x, int sobel_y)
{
short gradient = (atan2f(sobel_x, sobel_y) / 3.1415926 * 180.0);
//gradient = gradient < 0 ? gradient + 180 : gradient;
return gradient;
}
void DisplayGradient(short* gradient, int width, int height)
{
Mat img = Mat::zeros(Size(width, height), CV_8UC3);
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
if (abs(*(gradient + i * width + j)) >= 0 && abs(*(gradient + i * width + j)) < 45)
{
img.at<Vec3b>(i, j) = Vec3b(255, 0, 0);
}
else if (abs(*(gradient + i * width + j)) >= 45 && abs(*(gradient + i * width + j)) < 90)
{
img.at<Vec3b>(i, j) = Vec3b(0, 255, 0);
}
else if (abs(*(gradient + i * width + j)) >= 90 && abs(*(gradient + i * width + j)) < 135)
{
img.at<Vec3b>(i, j) = Vec3b(0, 0, 255);
}
else if (abs(*(gradient + i * width + j)) >= 135 && abs(*(gradient + i * width + j)) <= 180)
{
img.at<Vec3b>(i, j) = Vec3b(128, 128, 128);
}
}
}
imshow("gradient", img);
}
unsigned char GetPixelVal(unsigned char* img, int width, int height, int i, int j)
{
if (i >= height || i < 0)
return 0;
else if (j >= width || j < 0)
return 0;
return *(img + i * width + j);
}
void NonMaxSuppress(unsigned char* sobel, int width, int height, short* gradient, unsigned char* output)
{
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
int id = i * width + j;
float weight = 0;
int g0, g1, g2, g3;
int temp_gradient = gradient[id] < 0 ? gradient[id] + 180 : gradient[id];
if (temp_gradient >= 0 && temp_gradient < 45)
{
weight = temp_gradient / 45.0;
g0 = GetPixelVal(sobel, width, height, i, j + 1);
g1 = GetPixelVal(sobel, width, height, i - 1, j + 1);
g2 = GetPixelVal(sobel, width, height, i, j - 1);
g3 = GetPixelVal(sobel, width, height, i + 1, j - 1);
}
else if (temp_gradient >= 45 && temp_gradient < 90)
{
weight = (90 - temp_gradient) / 45.0;
g0 = GetPixelVal(sobel, width, height, i - 1, j);
g1 = GetPixelVal(sobel, width, height, i - 1, j + 1);
g2 = GetPixelVal(sobel, width, height, i + 1, j);
g3 = GetPixelVal(sobel, width, height, i + 1, j - 1);
}
else if (temp_gradient >= 90 && temp_gradient < 135)
{
weight = (temp_gradient - 90) / 45.0;
g0 = GetPixelVal(sobel, width, height, i - 1, j);
g1 = GetPixelVal(sobel, width, height, i - 1, j - 1);
g2 = GetPixelVal(sobel, width, height, i + 1, j);
g3 = GetPixelVal(sobel, width, height, i + 1, j + 1);
}
else if (temp_gradient >= 135 && temp_gradient <= 180)
{
weight = (180 - temp_gradient) / 45.0;
g0 = GetPixelVal(sobel, width, height, i, j - 1);
g1 = GetPixelVal(sobel, width, height, i - 1, j - 1);
g2 = GetPixelVal(sobel, width, height, i, j + 1);
g3 = GetPixelVal(sobel, width, height, i + 1, j + 1);
}
int dot1 = g0 * (1 - weight) + g1 * weight;
int dot2 = g2 * (1 - weight) + g3 * weight;
if (sobel[id] > dot1 && sobel[id] > dot2)
output[id] = sobel[id];
else
output[id] = 0;
}
}
}
void DoubleThreshold(unsigned char* sobel, int width, int height, int min_val, int max_val, unsigned char* output)
{
unsigned int* weak_stack = new unsigned int[width * height];
unsigned char* visited = new unsigned char[width * height];
unsigned int stack_top = 0;
unsigned int center_index = 0;
memset(visited, 0, width * height);
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
if (IS_STRONG_EDGE(GetPixelVal(sobel, width, height, i, j)) && visited[i * width + j] != 1)
{
IsWeakEdge(sobel, width, height, min_val, max_val, i, j, weak_stack, &stack_top, visited, output);
while (stack_top > 0)
{
center_index = weak_stack[stack_top - 1];
stack_top--;
IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width - 1, center_index % width - 1, weak_stack, &stack_top, visited, output);
IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width - 1, center_index % width , weak_stack, &stack_top, visited, output);
IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width - 1, center_index % width + 1, weak_stack, &stack_top, visited, output);
IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width , center_index % width - 1, weak_stack, &stack_top, visited, output);
IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width , center_index % width + 1, weak_stack, &stack_top, visited, output);
IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width + 1, center_index % width - 1, weak_stack, &stack_top, visited, output);
IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width + 1, center_index % width , weak_stack, &stack_top, visited, output);
IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width + 1, center_index % width + 1, weak_stack, &stack_top, visited, output);
}
}
else if (IS_NOT_EDGE(GetPixelVal(sobel, width, height, i, j)))
{
output[i * width + j] = 0;
}
}
}
delete[] weak_stack;
weak_stack = nullptr;
delete[] visited;
visited = nullptr;
}
void IsWeakEdge(unsigned char* sobel, int width, int height, int min_val, int max_val, int i, int j, unsigned int* stack, unsigned int* top, unsigned char* visited, unsigned char* output)
{
if (i < 0 || i >= height)
return;
if (j < 0 || j >= width)
return;
if (visited[i * width + j] == 1)
return;
if (IS_STRONG_EDGE(GetPixelVal(sobel, width, height, i, j)))
{
output[i * width + j] = 255;
visited[i * width + j] = 1;
stack[*top] = i * width + j;
(*top)++;
}
else if(IS_WEAK_EDGE(GetPixelVal(sobel, width, height, i, j)))
{
output[i * width + j] = 255;
visited[i * width + j] = 1;
stack[*top] = i * width + j;
(*top)++;
}
else
{
visited[i * width + j] = 1;
output[i * width + j] = 50;
}
}
__device__ void CUDA_SubDoubleThreshold(unsigned char* sobel, int width, int height, int min_val, int max_val,
unsigned int* weak_stack, unsigned int* stack_top, unsigned char* output, unsigned char* visited)
{
unsigned int center_index = 0;
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
if (IS_STRONG_EDGE(CUDA_GetPixelVal(sobel, width, height, i, j)))
{
//output[i * width + j] = 255;
CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, i, j, weak_stack, stack_top, output, visited);
while ((*stack_top) > 0)
{
//printf("%d\n", *stack_top);
center_index = weak_stack[(*stack_top) - 1];
(*stack_top)--;
CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width - 1, center_index % width - 1, weak_stack, stack_top, output, visited);
CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width - 1, center_index % width , weak_stack, stack_top, output, visited);
CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width - 1, center_index % width + 1, weak_stack, stack_top, output, visited);
CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width , center_index % width - 1, weak_stack, stack_top, output, visited);
CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width , center_index % width + 1, weak_stack, stack_top, output, visited);
CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width + 1, center_index % width - 1, weak_stack, stack_top, output, visited);
CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width + 1, center_index % width , weak_stack, stack_top, output, visited);
CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width + 1, center_index % width + 1, weak_stack, stack_top, output, visited);
//CUDA_CheckEdge << <1, 1 >> > (sobel, width, height, min_val, max_val, center_index / width, center_index % width, weak_stack, stack_top, output);
}//*/
}
else if (IS_NOT_EDGE(CUDA_GetPixelVal(sobel, width, height, i, j)))
{
output[i * width + j] = 0;
}
else
{
if(visited[i * width + j] == 0)
output[i * width + j] = 0;
}
}
}
}
__device__ void CUDA_IsWeakEdge(unsigned char* sobel, int width, int height, int min_val, int max_val, int i, int j, unsigned int* stack, unsigned int* top, unsigned char* output, unsigned char* visited)
{
if (i < 0 || i >= height)
return;
if (j < 0 || j >= width)
return;
if (visited[i * width + j] == 1)
return;
if (IS_STRONG_EDGE(CUDA_GetPixelVal(sobel, width, height, i, j)))
{
visited[i * width + j] = 1;
output[i * width + j] = 255;
stack[(*top)] = i * width + j;
*top = *top + 1;
}
else if(IS_WEAK_EDGE(CUDA_GetPixelVal(sobel, width, height, i, j)))
{
visited[i * width + j] = 1;
output[i * width + j] = 255;
stack[(*top)] = i * width + j;
*top = *top + 1;
}
else
{
visited[i * width + j] = 1;
output[i * width + j] = 0;
sobel[i * width + j] = 0;
}
}
__global__ void CUDA_CheckEdge(unsigned char* sobel, int width, int height, int min_val, int max_val, int i, int j, unsigned short* stack, unsigned short* top, unsigned char* output)
{
if (threadIdx.x >= 0 && threadIdx.x <= 2)
{
i = i - 1;
j = j - 1 + threadIdx.x;
}
else if (threadIdx.x == 3)
j = j - 1;
else if (threadIdx.x == 4)
j = j + 1;
else if (threadIdx.x >= 5 && threadIdx.x <= 7)
{
i = i + 1;
j = j - 6 + threadIdx.x;
}
if (IS_WEAK_EDGE(CUDA_GetPixelVal(sobel, width, height, i, j)) ||
IS_STRONG_EDGE(CUDA_GetPixelVal(sobel, width, height, i, j)))
{
output[i * width + j] = 255;
stack[*top] = i * width + j;
(*top)++;
}
else
{
output[i * width + j] = 0;
}
}
|
23983632a04070c21c4093ca79262c52654d7a72.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include "util_para.h"
//#include <hip/hip_runtime.h>
#include <time.h>
#define BILLION 1000000000
/*
* Input: Dataset matrix
* Output: Result matrix after get convoluted
*/
void read_data(double **train_im, double **test_im) {
/*
* Load MNIST Dataset and store in array
* - train image : train_image[60000][784] (type: double, normalized, flattened)
* - train label : train_label[60000] (type: int)
* - test image : test_image[10000][784] (type: double, normalized, flattened)
* - test label : test_label[10000] (type: int)
*/
for (int i = 0; i < NUM_TRAINS; i++) {
for (int j = 0; j < IMAGE_SIZE; j++) {
train_im[i][j] = 1.0;
}
}
for (int i = 0; i < NUM_TESTS; i++) {
for (int j = 0; j < IMAGE_SIZE; j++) {
test_im[i][j] = 0.0;
}
}
printf("read_data.c: Loaded images to arrays \n");
//save_mnist_pgm(train_im, 0);
int i;
for (i=0; i<784; i++) {
//printf("i is %d \n", i);
printf("%1.1f ", train_im[0][i]);
if ((i+1) % 28 == 0) putchar('\n');
}
}
void padding(double **trained_im, double **padded_im) {
// ======== 1. Zero-Padding ========
//puts("test 2 \n");
int new_dim = IMAGE_DIM + KERNEL_SIZE - 1;
int new_size = new_dim * new_dim;
int extra_size = KERNEL_SIZE / 2; // round down automatically
for (int i = 0; i < NUM_TRAINS; i++) {
for (int j = 0; j < new_size; j++) {
//printf("i is %d, j is %d \n", i, j);
padded_im[i][j] = 0.0;
}
}
//puts("test 3 \n");
for (int i = 0; i < NUM_TRAINS; i++) {
for (int j = 0; j < IMAGE_DIM; j++) {
for (int k = 0; k < IMAGE_DIM; k++) {
padded_im[i][(j+extra_size)*new_dim+(k+extra_size)] = \
trained_im[i][j*IMAGE_DIM+k];
}
}
}
/*for (int i = 0; i < new_size; i++) { // TODO: Delete this
printf("%1.1f ", padded_im[0][i]);
if ((i+1) % 30 == 0) putchar('\n');
} */
}
void conv_layer_baseline(double **padded_im, double **result_im, int kn_type) {
int new_dim = IMAGE_DIM + KERNEL_SIZE - 1;
int new_size = new_dim * new_dim;
int extra_size = KERNEL_SIZE / 2; // round down automatically
// ======== 2. Do Convolution Operation ========
// -------- 2.1 Create Kernel --------
double **convKernel = (double **) malloc( KERNEL_SIZE * sizeof(double *));
for (int i = 0; i < KERNEL_SIZE; i++) {
convKernel[i] = (double *) malloc(KERNEL_SIZE * sizeof(double));
}
if (kn_type == KN_HORIZONTAL_EDGE) {
convKernel[0][0] = 1;
convKernel[0][1] = 1;
convKernel[0][2] = 1;
convKernel[1][0] = 1;
convKernel[1][1] = 1;
convKernel[1][2] = 1;
convKernel[2][0] = convKernel[0][0] * (1);
convKernel[2][1] = convKernel[0][1] * (1);
convKernel[2][2] = convKernel[0][2] * (1);
}
else if (kn_type == KN_VERTICAL_EDGE) {
convKernel[0][0] = -1.0;
convKernel[1][0] = -2.0;
convKernel[2][0] = -1.0;
convKernel[0][1] = 0.0;
convKernel[1][1] = 0.0;
convKernel[2][1] = 0.0;
convKernel[0][2] = convKernel[0][0] * (-1);
convKernel[1][2] = convKernel[1][0] * (-1);
convKernel[2][2] = convKernel[2][0] * (-1);
}
else {
printf("conv_layer.c: please set up the kernel \n");
printf("Usage: Set: KN_VERTICAL_EDGE to use Kernel to detect vertical edge \n");
printf(" Set: KN_HORIZAONTAL_EDGE to use Kernel to detect horizontal edge \n");
exit(0);
}
/*for (int i = 0; i < KERNEL_SIZE; i++) { // TODO: Delete this
for (int j = 0; j < KERNEL_SIZE; j++) {
printf("%1.1f ", convKernel[i][j]);
}
printf("\n");
} */
// -------- 2.2 Load Mapped Image from the dataset --------
double **imageMap = (double **) malloc( KERNEL_SIZE * sizeof(double *));
for (int i = 0; i < KERNEL_SIZE; i++) {
imageMap[i] = (double *) malloc(KERNEL_SIZE * sizeof(double));
}
double accu = 0.0;
int img_i;
for (img_i = 0; img_i < NUM_TRAINS; img_i++) {
/// conv_i and conv_j are the top left element of kernel
for (int conv_i = 0; conv_i <= new_dim-KERNEL_SIZE; conv_i = conv_i + CONV_STRIDE) {
for (int conv_j = 0; conv_j <= new_dim-KERNEL_SIZE; conv_j = conv_j + CONV_STRIDE) {
accu = 0.0;
// Load Mapped Image to imageMap
// Then imageMap conv with convKernel
for (int i = 0; i < KERNEL_SIZE; i++) {
for (int j = 0; j < KERNEL_SIZE; j++) {
imageMap[i][j] = padded_im[img_i][(i+conv_i)*new_dim+(j+conv_j)];
accu += imageMap[i][j] * convKernel[i][j];
}
}
result_im[img_i][conv_i*IMAGE_DIM+conv_j] = accu + CONV_BIAS;
} // end of conv_j loop
} // end of conv_i loop
} // end of img_i loop
printf("test: im_i is %d \n", img_i);
free(convKernel); free(imageMap);
//printf("conv_layer: This is conv layer \n");
}
__global__ void conv_cuda(double *dev_padded_im, double *dev_kernel, double *dev_conv_res, int new_dim)
{
int ki, kj;
double partial = 0.0;
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
//printf("i = %d, j = %d \n", i, j);
//if (i > (new_dim-KERNEL_SIZE) || j > (new_dim-KERNEL_SIZE)) {
// printf("here 1 \n");
//}
//else {
for (ki = 0; ki < KERNEL_SIZE; ki++) {
for (kj = 0; kj < KERNEL_SIZE; kj++) {
partial += dev_padded_im[i*new_dim+j+ki*new_dim+kj] * dev_kernel[ki*KERNEL_SIZE+kj];
}
} //end of ki for loop
dev_conv_res[i*IMAGE_DIM+j] = partial;
//printf("here 2 \n");
//} //end of else
//printf("test1 \n");
}
__global__ void myKernel()
{
printf("Hello, world from the device!\n");
}
int main () {
struct timespec start, end;
double diff;
hipEvent_t start_cu, stop_cu;
float time;
double **train_image = (double **) malloc(NUM_TRAINS * sizeof(double*));
for (int i = 0; i < NUM_TRAINS; i++) {
train_image[i] = (double *) malloc(IMAGE_SIZE * sizeof(double));
}
double **test_image = (double **) malloc(NUM_TESTS * sizeof(double*));
for (int i = 0; i < NUM_TESTS; i++) {
test_image[i] = (double *) malloc(IMAGE_SIZE * sizeof(double));
}
read_data(train_image, test_image);
int new_dim = IMAGE_DIM + KERNEL_SIZE - 1;
int new_size = new_dim * new_dim;
int extra_size = KERNEL_SIZE / 2; // round down automatically
double **padded_im = (double **) malloc(NUM_TRAINS * sizeof(double *));
for (int i = 0; i < NUM_TRAINS; i++) {
padded_im[i] = (double *) malloc(new_size * sizeof(double));
}
double **conv_res = (double **) malloc(NUM_TRAINS * sizeof(double *));
for (int i = 0; i < NUM_TRAINS; i++) {
conv_res[i] = (double *) malloc(IMAGE_SIZE * sizeof(double));
}
// Baseline part
clock_gettime(CLOCK_MONOTONIC, &start);
padding(train_image, padded_im);
conv_layer_baseline(padded_im, conv_res, KN_HORIZONTAL_EDGE);
clock_gettime(CLOCK_MONOTONIC, &end);
diff = BILLION * (end.tv_sec - start.tv_sec) + end.tv_nsec - start.tv_nsec;
printf("Baseline conv time is %3.12f \n", diff);
free(train_image);
printf(" ======== Baseline Result ======== \n");
for (int i = 0; i < IMAGE_SIZE; i++) { // TODO: Delete this
printf("%1.1f ", conv_res[0][i]);
if ((i+1) % IMAGE_DIM == 0) putchar('\n');
}
// Alternative 1
int size_double_padded_im = NUM_TRAINS * new_size * sizeof(double);
int size_double_kernel = KERNEL_SIZE * KERNEL_SIZE * sizeof(double);
int size_double_conv_res = NUM_TRAINS * IMAGE_SIZE * sizeof(double);
double *padded_im_flat = (double *) malloc(size_double_padded_im);
for (int i = 0; i < NUM_TRAINS; i++) {
for (int j = 0; j < IMAGE_SIZE; j++) {
padded_im_flat[i*IMAGE_SIZE+j] = padded_im[i][j];
}
}
puts("test 0 \n");
double *kernel = (double *) malloc(size_double_kernel);
for (int i = 0; i < KERNEL_SIZE * KERNEL_SIZE; i++) kernel[i] = 1.0;
double *h_conv_res = (double *) malloc(size_double_conv_res);
for (int i = 0; i < NUM_TRAINS * IMAGE_SIZE; i++) h_conv_res[i] = 0.0;
double *dev_padded_im = NULL, *dev_kernel = NULL, *dev_conv_res = NULL;
hipMalloc((void **)&dev_padded_im, size_double_padded_im);
hipMalloc((void **)&dev_kernel, size_double_kernel );
hipMalloc((void **)&dev_conv_res, size_double_conv_res );
hipEventCreate( &start_cu ); hipEventCreate( &stop_cu );
//hipEventRecord(start_cu, 0);
hipMemcpy(dev_padded_im, padded_im_flat, size_double_padded_im, hipMemcpyHostToDevice);
hipMemcpy(dev_kernel, kernel, size_double_kernel, hipMemcpyHostToDevice);
dim3 Block_A1(15, 15);
dim3 Grid_A1(new_dim/Block_A1.x, new_dim/Block_A1.y);
//hipEventRecord(start_cu, 0);
float time_acc = 0;
for (int i = 0; i < NUM_TRAINS; i++) {
hipEventRecord(start_cu, 0);
hipLaunchKernelGGL(( conv_cuda) , dim3(Grid_A1), dim3(Block_A1) , 0, 0, dev_padded_im, dev_kernel, dev_conv_res, new_dim);
hipDeviceSynchronize();
hipEventRecord(stop_cu, 0);
hipEventSynchronize(stop_cu);
hipEventElapsedTime(&time, start_cu, stop_cu);
time_acc += time/1000;
//printf("time_acc is %3.12f \n", time_acc);
}
//hipDeviceSynchronize();
//hipEventRecord(stop_cu, 0);
//hipEventSynchronize(stop_cu);
//hipEventElapsedTime(&time, start_cu, stop_cu);
//time = time/1000;
printf("cuda time is %3.12f \n", time_acc);
hipMemcpy(h_conv_res, dev_conv_res, size_double_conv_res, hipMemcpyDeviceToHost);
//hipEventRecord(stop_cu, 0);
//hipEventSynchronize(stop_cu);
hipEventDestroy(start_cu); hipEventDestroy(stop_cu);
hipFree(dev_padded_im); hipFree(dev_kernel); hipFree(dev_conv_res);
for (int i = 0; i < NUM_TRAINS; i++) {
for (int j = 0; j < IMAGE_SIZE; j++) {
conv_res[i][j] = h_conv_res[i*IMAGE_SIZE+j];
}
}
printf("end \n");
hipLaunchKernelGGL(( myKernel), dim3(1),dim3(10), 0, 0, );
hipDeviceSynchronize();
return 0;
}
| 23983632a04070c21c4093ca79262c52654d7a72.cu | #include <stdio.h>
#include <stdlib.h>
#include "util_para.h"
//#include <cuda_runtime.h>
#include <time.h>
#define BILLION 1000000000
/*
* Input: Dataset matrix
* Output: Result matrix after get convoluted
*/
void read_data(double **train_im, double **test_im) {
/*
* Load MNIST Dataset and store in array
* - train image : train_image[60000][784] (type: double, normalized, flattened)
* - train label : train_label[60000] (type: int)
* - test image : test_image[10000][784] (type: double, normalized, flattened)
* - test label : test_label[10000] (type: int)
*/
for (int i = 0; i < NUM_TRAINS; i++) {
for (int j = 0; j < IMAGE_SIZE; j++) {
train_im[i][j] = 1.0;
}
}
for (int i = 0; i < NUM_TESTS; i++) {
for (int j = 0; j < IMAGE_SIZE; j++) {
test_im[i][j] = 0.0;
}
}
printf("read_data.c: Loaded images to arrays \n");
//save_mnist_pgm(train_im, 0);
int i;
for (i=0; i<784; i++) {
//printf("i is %d \n", i);
printf("%1.1f ", train_im[0][i]);
if ((i+1) % 28 == 0) putchar('\n');
}
}
void padding(double **trained_im, double **padded_im) {
// ======== 1. Zero-Padding ========
//puts("test 2 \n");
int new_dim = IMAGE_DIM + KERNEL_SIZE - 1;
int new_size = new_dim * new_dim;
int extra_size = KERNEL_SIZE / 2; // round down automatically
for (int i = 0; i < NUM_TRAINS; i++) {
for (int j = 0; j < new_size; j++) {
//printf("i is %d, j is %d \n", i, j);
padded_im[i][j] = 0.0;
}
}
//puts("test 3 \n");
for (int i = 0; i < NUM_TRAINS; i++) {
for (int j = 0; j < IMAGE_DIM; j++) {
for (int k = 0; k < IMAGE_DIM; k++) {
padded_im[i][(j+extra_size)*new_dim+(k+extra_size)] = \
trained_im[i][j*IMAGE_DIM+k];
}
}
}
/*for (int i = 0; i < new_size; i++) { // TODO: Delete this
printf("%1.1f ", padded_im[0][i]);
if ((i+1) % 30 == 0) putchar('\n');
} */
}
void conv_layer_baseline(double **padded_im, double **result_im, int kn_type) {
int new_dim = IMAGE_DIM + KERNEL_SIZE - 1;
int new_size = new_dim * new_dim;
int extra_size = KERNEL_SIZE / 2; // round down automatically
// ======== 2. Do Convolution Operation ========
// -------- 2.1 Create Kernel --------
double **convKernel = (double **) malloc( KERNEL_SIZE * sizeof(double *));
for (int i = 0; i < KERNEL_SIZE; i++) {
convKernel[i] = (double *) malloc(KERNEL_SIZE * sizeof(double));
}
if (kn_type == KN_HORIZONTAL_EDGE) {
convKernel[0][0] = 1;
convKernel[0][1] = 1;
convKernel[0][2] = 1;
convKernel[1][0] = 1;
convKernel[1][1] = 1;
convKernel[1][2] = 1;
convKernel[2][0] = convKernel[0][0] * (1);
convKernel[2][1] = convKernel[0][1] * (1);
convKernel[2][2] = convKernel[0][2] * (1);
}
else if (kn_type == KN_VERTICAL_EDGE) {
convKernel[0][0] = -1.0;
convKernel[1][0] = -2.0;
convKernel[2][0] = -1.0;
convKernel[0][1] = 0.0;
convKernel[1][1] = 0.0;
convKernel[2][1] = 0.0;
convKernel[0][2] = convKernel[0][0] * (-1);
convKernel[1][2] = convKernel[1][0] * (-1);
convKernel[2][2] = convKernel[2][0] * (-1);
}
else {
printf("conv_layer.c: please set up the kernel \n");
printf("Usage: Set: KN_VERTICAL_EDGE to use Kernel to detect vertical edge \n");
printf(" Set: KN_HORIZAONTAL_EDGE to use Kernel to detect horizontal edge \n");
exit(0);
}
/*for (int i = 0; i < KERNEL_SIZE; i++) { // TODO: Delete this
for (int j = 0; j < KERNEL_SIZE; j++) {
printf("%1.1f ", convKernel[i][j]);
}
printf("\n");
} */
// -------- 2.2 Load Mapped Image from the dataset --------
double **imageMap = (double **) malloc( KERNEL_SIZE * sizeof(double *));
for (int i = 0; i < KERNEL_SIZE; i++) {
imageMap[i] = (double *) malloc(KERNEL_SIZE * sizeof(double));
}
double accu = 0.0;
int img_i;
for (img_i = 0; img_i < NUM_TRAINS; img_i++) {
/// conv_i and conv_j are the top left element of kernel
for (int conv_i = 0; conv_i <= new_dim-KERNEL_SIZE; conv_i = conv_i + CONV_STRIDE) {
for (int conv_j = 0; conv_j <= new_dim-KERNEL_SIZE; conv_j = conv_j + CONV_STRIDE) {
accu = 0.0;
// Load Mapped Image to imageMap
// Then imageMap conv with convKernel
for (int i = 0; i < KERNEL_SIZE; i++) {
for (int j = 0; j < KERNEL_SIZE; j++) {
imageMap[i][j] = padded_im[img_i][(i+conv_i)*new_dim+(j+conv_j)];
accu += imageMap[i][j] * convKernel[i][j];
}
}
result_im[img_i][conv_i*IMAGE_DIM+conv_j] = accu + CONV_BIAS;
} // end of conv_j loop
} // end of conv_i loop
} // end of img_i loop
printf("test: im_i is %d \n", img_i);
free(convKernel); free(imageMap);
//printf("conv_layer: This is conv layer \n");
}
__global__ void conv_cuda(double *dev_padded_im, double *dev_kernel, double *dev_conv_res, int new_dim)
{
int ki, kj;
double partial = 0.0;
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
//printf("i = %d, j = %d \n", i, j);
//if (i > (new_dim-KERNEL_SIZE) || j > (new_dim-KERNEL_SIZE)) {
// printf("here 1 \n");
//}
//else {
for (ki = 0; ki < KERNEL_SIZE; ki++) {
for (kj = 0; kj < KERNEL_SIZE; kj++) {
partial += dev_padded_im[i*new_dim+j+ki*new_dim+kj] * dev_kernel[ki*KERNEL_SIZE+kj];
}
} //end of ki for loop
dev_conv_res[i*IMAGE_DIM+j] = partial;
//printf("here 2 \n");
//} //end of else
//printf("test1 \n");
}
__global__ void myKernel()
{
printf("Hello, world from the device!\n");
}
int main () {
struct timespec start, end;
double diff;
cudaEvent_t start_cu, stop_cu;
float time;
double **train_image = (double **) malloc(NUM_TRAINS * sizeof(double*));
for (int i = 0; i < NUM_TRAINS; i++) {
train_image[i] = (double *) malloc(IMAGE_SIZE * sizeof(double));
}
double **test_image = (double **) malloc(NUM_TESTS * sizeof(double*));
for (int i = 0; i < NUM_TESTS; i++) {
test_image[i] = (double *) malloc(IMAGE_SIZE * sizeof(double));
}
read_data(train_image, test_image);
int new_dim = IMAGE_DIM + KERNEL_SIZE - 1;
int new_size = new_dim * new_dim;
int extra_size = KERNEL_SIZE / 2; // round down automatically
double **padded_im = (double **) malloc(NUM_TRAINS * sizeof(double *));
for (int i = 0; i < NUM_TRAINS; i++) {
padded_im[i] = (double *) malloc(new_size * sizeof(double));
}
double **conv_res = (double **) malloc(NUM_TRAINS * sizeof(double *));
for (int i = 0; i < NUM_TRAINS; i++) {
conv_res[i] = (double *) malloc(IMAGE_SIZE * sizeof(double));
}
// Baseline part
clock_gettime(CLOCK_MONOTONIC, &start);
padding(train_image, padded_im);
conv_layer_baseline(padded_im, conv_res, KN_HORIZONTAL_EDGE);
clock_gettime(CLOCK_MONOTONIC, &end);
diff = BILLION * (end.tv_sec - start.tv_sec) + end.tv_nsec - start.tv_nsec;
printf("Baseline conv time is %3.12f \n", diff);
free(train_image);
printf(" ======== Baseline Result ======== \n");
for (int i = 0; i < IMAGE_SIZE; i++) { // TODO: Delete this
printf("%1.1f ", conv_res[0][i]);
if ((i+1) % IMAGE_DIM == 0) putchar('\n');
}
// Alternative 1
int size_double_padded_im = NUM_TRAINS * new_size * sizeof(double);
int size_double_kernel = KERNEL_SIZE * KERNEL_SIZE * sizeof(double);
int size_double_conv_res = NUM_TRAINS * IMAGE_SIZE * sizeof(double);
double *padded_im_flat = (double *) malloc(size_double_padded_im);
for (int i = 0; i < NUM_TRAINS; i++) {
for (int j = 0; j < IMAGE_SIZE; j++) {
padded_im_flat[i*IMAGE_SIZE+j] = padded_im[i][j];
}
}
puts("test 0 \n");
double *kernel = (double *) malloc(size_double_kernel);
for (int i = 0; i < KERNEL_SIZE * KERNEL_SIZE; i++) kernel[i] = 1.0;
double *h_conv_res = (double *) malloc(size_double_conv_res);
for (int i = 0; i < NUM_TRAINS * IMAGE_SIZE; i++) h_conv_res[i] = 0.0;
double *dev_padded_im = NULL, *dev_kernel = NULL, *dev_conv_res = NULL;
cudaMalloc((void **)&dev_padded_im, size_double_padded_im);
cudaMalloc((void **)&dev_kernel, size_double_kernel );
cudaMalloc((void **)&dev_conv_res, size_double_conv_res );
cudaEventCreate( &start_cu ); cudaEventCreate( &stop_cu );
//cudaEventRecord(start_cu, 0);
cudaMemcpy(dev_padded_im, padded_im_flat, size_double_padded_im, cudaMemcpyHostToDevice);
cudaMemcpy(dev_kernel, kernel, size_double_kernel, cudaMemcpyHostToDevice);
dim3 Block_A1(15, 15);
dim3 Grid_A1(new_dim/Block_A1.x, new_dim/Block_A1.y);
//cudaEventRecord(start_cu, 0);
float time_acc = 0;
for (int i = 0; i < NUM_TRAINS; i++) {
cudaEventRecord(start_cu, 0);
conv_cuda <<< Grid_A1, Block_A1 >>> (dev_padded_im, dev_kernel, dev_conv_res, new_dim);
cudaDeviceSynchronize();
cudaEventRecord(stop_cu, 0);
cudaEventSynchronize(stop_cu);
cudaEventElapsedTime(&time, start_cu, stop_cu);
time_acc += time/1000;
//printf("time_acc is %3.12f \n", time_acc);
}
//cudaDeviceSynchronize();
//cudaEventRecord(stop_cu, 0);
//cudaEventSynchronize(stop_cu);
//cudaEventElapsedTime(&time, start_cu, stop_cu);
//time = time/1000;
printf("cuda time is %3.12f \n", time_acc);
cudaMemcpy(h_conv_res, dev_conv_res, size_double_conv_res, cudaMemcpyDeviceToHost);
//cudaEventRecord(stop_cu, 0);
//cudaEventSynchronize(stop_cu);
cudaEventDestroy(start_cu); cudaEventDestroy(stop_cu);
cudaFree(dev_padded_im); cudaFree(dev_kernel); cudaFree(dev_conv_res);
for (int i = 0; i < NUM_TRAINS; i++) {
for (int j = 0; j < IMAGE_SIZE; j++) {
conv_res[i][j] = h_conv_res[i*IMAGE_SIZE+j];
}
}
printf("end \n");
myKernel<<<1,10>>>();
cudaDeviceSynchronize();
return 0;
}
|
33148508deaf2f130ef699b475dc04737f6d546a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
////////////////////////////////////////////////////////////////////////////////
// KNN kernel
////////////////////////////////////////////////////////////////////////////////
__global__ void KNN(
TColor *dst,
int imageW,
int imageH,
float Noise,
float lerpC
){
const int ix = blockDim.x * blockIdx.x + threadIdx.x;
const int iy = blockDim.y * blockIdx.y + threadIdx.y;
//Add half of a texel to always address exact texel centers
const float x = (float)ix + 0.5f;
const float y = (float)iy + 0.5f;
if(ix < imageW && iy < imageH){
//Normalized counter for the weight threshold
float fCount = 0;
//Total sum of pixel weights
float sumWeights = 0;
//Result accumulator
float3 clr = {0, 0, 0};
//Center of the KNN window
float4 clr00 = tex2D(texImage, x, y);
//Cycle through KNN window, surrounding (x, y) texel
for(float i = -KNN_WINDOW_RADIUS; i <= KNN_WINDOW_RADIUS; i++)
for(float j = -KNN_WINDOW_RADIUS; j <= KNN_WINDOW_RADIUS; j++)
{
float4 clrIJ = tex2D(texImage, x + j, y + i);
float distanceIJ = vecLen(clr00, clrIJ);
//Derive final weight from color distance
float weightIJ = __expf( - (distanceIJ * Noise + (i * i + j * j) * INV_KNN_WINDOW_AREA) );
//Accumulate (x + j, y + i) texel color with computed weight
clr.x += clrIJ.x * weightIJ;
clr.y += clrIJ.y * weightIJ;
clr.z += clrIJ.z * weightIJ;
//Sum of weights for color normalization to [0..1] range
sumWeights += weightIJ;
//Update weight counter, if KNN weight for current window texel
//exceeds the weight threshold
fCount += (weightIJ > KNN_WEIGHT_THRESHOLD) ? INV_KNN_WINDOW_AREA : 0;
}
//Normalize result color by sum of weights
sumWeights = 1.0f / sumWeights;
clr.x *= sumWeights;
clr.y *= sumWeights;
clr.z *= sumWeights;
//Choose LERP quotent basing on how many texels
//within the KNN window exceeded the weight threshold
float lerpQ = (fCount > KNN_LERP_THRESHOLD) ? lerpC : 1.0f - lerpC;
//Write final result to global memory
clr.x = lerpf(clr.x, clr00.x, lerpQ);
clr.y = lerpf(clr.y, clr00.y, lerpQ);
clr.z = lerpf(clr.z, clr00.z, lerpQ);
dst[imageW * iy + ix] = make_color(clr.x, clr.y, clr.z, 0);
};
}
extern "C"
void cuda_KNN(
TColor *d_dst,
int imageW,
int imageH,
float Noise,
float lerpC
){
dim3 threads(BLOCKDIM_X, BLOCKDIM_Y);
dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y));
hipLaunchKernelGGL(( KNN), dim3(grid), dim3(threads), 0, 0, d_dst, imageW, imageH, Noise, lerpC);
}
////////////////////////////////////////////////////////////////////////////////
// Stripped KNN kernel, only highlighting areas with different LERP directions
////////////////////////////////////////////////////////////////////////////////
__global__ void KNNdiag(
TColor *dst,
int imageW,
int imageH,
float Noise,
float lerpC
){
const int ix = blockDim.x * blockIdx.x + threadIdx.x;
const int iy = blockDim.y * blockIdx.y + threadIdx.y;
//Add half of a texel to always address exact texel centers
const float x = (float)ix + 0.5f;
const float y = (float)iy + 0.5f;
if(ix < imageW && iy < imageH){
//Normalized counter for the weight threshold
float fCount = 0;
//Center of the KNN window
float4 clr00 = tex2D(texImage, x, y);
//Cycle through KNN window, surrounding (x, y) texel
for(float i = -KNN_WINDOW_RADIUS; i <= KNN_WINDOW_RADIUS; i++)
for(float j = -KNN_WINDOW_RADIUS; j <= KNN_WINDOW_RADIUS; j++)
{
float4 clrIJ = tex2D(texImage, x + j, y + i);
float distanceIJ = vecLen(clr00, clrIJ);
//Derive final weight from color and geometric distance
float weightIJ = __expf( - (distanceIJ * Noise + (i * i + j * j) * INV_KNN_WINDOW_AREA) );
//Update weight counter, if KNN weight for current window texel
//exceeds the weight threshold
fCount += (weightIJ > KNN_WEIGHT_THRESHOLD) ? INV_KNN_WINDOW_AREA : 0.0f;
}
//Choose LERP quotent basing on how many texels
//within the KNN window exceeded the weight threshold
float lerpQ = (fCount > KNN_LERP_THRESHOLD) ? 1.0f : 0;
//Write final result to global memory
dst[imageW * iy + ix] = make_color(lerpQ, 0, (1.0f - lerpQ), 0);
};
}
extern "C"
void cuda_KNNdiag(
TColor *d_dst,
int imageW,
int imageH,
float Noise,
float lerpC
)
{
dim3 threads(BLOCKDIM_X, BLOCKDIM_Y);
dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y));
hipLaunchKernelGGL(( KNNdiag), dim3(grid), dim3(threads), 0, 0, d_dst, imageW, imageH, Noise, lerpC);
}
| 33148508deaf2f130ef699b475dc04737f6d546a.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
////////////////////////////////////////////////////////////////////////////////
// KNN kernel
////////////////////////////////////////////////////////////////////////////////
__global__ void KNN(
TColor *dst,
int imageW,
int imageH,
float Noise,
float lerpC
){
const int ix = blockDim.x * blockIdx.x + threadIdx.x;
const int iy = blockDim.y * blockIdx.y + threadIdx.y;
//Add half of a texel to always address exact texel centers
const float x = (float)ix + 0.5f;
const float y = (float)iy + 0.5f;
if(ix < imageW && iy < imageH){
//Normalized counter for the weight threshold
float fCount = 0;
//Total sum of pixel weights
float sumWeights = 0;
//Result accumulator
float3 clr = {0, 0, 0};
//Center of the KNN window
float4 clr00 = tex2D(texImage, x, y);
//Cycle through KNN window, surrounding (x, y) texel
for(float i = -KNN_WINDOW_RADIUS; i <= KNN_WINDOW_RADIUS; i++)
for(float j = -KNN_WINDOW_RADIUS; j <= KNN_WINDOW_RADIUS; j++)
{
float4 clrIJ = tex2D(texImage, x + j, y + i);
float distanceIJ = vecLen(clr00, clrIJ);
//Derive final weight from color distance
float weightIJ = __expf( - (distanceIJ * Noise + (i * i + j * j) * INV_KNN_WINDOW_AREA) );
//Accumulate (x + j, y + i) texel color with computed weight
clr.x += clrIJ.x * weightIJ;
clr.y += clrIJ.y * weightIJ;
clr.z += clrIJ.z * weightIJ;
//Sum of weights for color normalization to [0..1] range
sumWeights += weightIJ;
//Update weight counter, if KNN weight for current window texel
//exceeds the weight threshold
fCount += (weightIJ > KNN_WEIGHT_THRESHOLD) ? INV_KNN_WINDOW_AREA : 0;
}
//Normalize result color by sum of weights
sumWeights = 1.0f / sumWeights;
clr.x *= sumWeights;
clr.y *= sumWeights;
clr.z *= sumWeights;
//Choose LERP quotent basing on how many texels
//within the KNN window exceeded the weight threshold
float lerpQ = (fCount > KNN_LERP_THRESHOLD) ? lerpC : 1.0f - lerpC;
//Write final result to global memory
clr.x = lerpf(clr.x, clr00.x, lerpQ);
clr.y = lerpf(clr.y, clr00.y, lerpQ);
clr.z = lerpf(clr.z, clr00.z, lerpQ);
dst[imageW * iy + ix] = make_color(clr.x, clr.y, clr.z, 0);
};
}
extern "C"
void cuda_KNN(
TColor *d_dst,
int imageW,
int imageH,
float Noise,
float lerpC
){
dim3 threads(BLOCKDIM_X, BLOCKDIM_Y);
dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y));
KNN<<<grid, threads>>>(d_dst, imageW, imageH, Noise, lerpC);
}
////////////////////////////////////////////////////////////////////////////////
// Stripped KNN kernel, only highlighting areas with different LERP directions
////////////////////////////////////////////////////////////////////////////////
__global__ void KNNdiag(
TColor *dst,
int imageW,
int imageH,
float Noise,
float lerpC
){
const int ix = blockDim.x * blockIdx.x + threadIdx.x;
const int iy = blockDim.y * blockIdx.y + threadIdx.y;
//Add half of a texel to always address exact texel centers
const float x = (float)ix + 0.5f;
const float y = (float)iy + 0.5f;
if(ix < imageW && iy < imageH){
//Normalized counter for the weight threshold
float fCount = 0;
//Center of the KNN window
float4 clr00 = tex2D(texImage, x, y);
//Cycle through KNN window, surrounding (x, y) texel
for(float i = -KNN_WINDOW_RADIUS; i <= KNN_WINDOW_RADIUS; i++)
for(float j = -KNN_WINDOW_RADIUS; j <= KNN_WINDOW_RADIUS; j++)
{
float4 clrIJ = tex2D(texImage, x + j, y + i);
float distanceIJ = vecLen(clr00, clrIJ);
//Derive final weight from color and geometric distance
float weightIJ = __expf( - (distanceIJ * Noise + (i * i + j * j) * INV_KNN_WINDOW_AREA) );
//Update weight counter, if KNN weight for current window texel
//exceeds the weight threshold
fCount += (weightIJ > KNN_WEIGHT_THRESHOLD) ? INV_KNN_WINDOW_AREA : 0.0f;
}
//Choose LERP quotent basing on how many texels
//within the KNN window exceeded the weight threshold
float lerpQ = (fCount > KNN_LERP_THRESHOLD) ? 1.0f : 0;
//Write final result to global memory
dst[imageW * iy + ix] = make_color(lerpQ, 0, (1.0f - lerpQ), 0);
};
}
extern "C"
void cuda_KNNdiag(
TColor *d_dst,
int imageW,
int imageH,
float Noise,
float lerpC
)
{
dim3 threads(BLOCKDIM_X, BLOCKDIM_Y);
dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y));
KNNdiag<<<grid, threads>>>(d_dst, imageW, imageH, Noise, lerpC);
}
|
9fde4f25ea21c6d6657af073d2d815e3ba6d34ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cstdlib>
#include <ctime>
#include <fstream>
using namespace std;
#define RASMER 2
__host__ int** sozdanie(int n, int m)
{
int **A;
A = new int *[n];
for (int i = 0; i < n; i++) {
A[i] = new int [m];
}
return A;
}
__host__ void initialize(int** A, int n, int m, int n_lim, int m_lim)
{
srand(time(0));
for(int i = 0; i < n_lim; i++ )
{
for(int j = 0; j < m_lim; j++ )
{
if(i >= n || j >= m) {
A[i][j] = 0;
}
else {
A[i][j] = rand()%11 - 2 ;
cout.width(3);
cout << A[i][j] << " ";
}
}
cout << endl;
}
}
__global__ void block_proiz(int* A, int* B, int* C, int N, int M, int K)
{
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
int aBegin = N * RASMER * by;
int aEnd = aBegin + N - 1;
int bBegin = RASMER * bx;
int aStep = RASMER, bStep = RASMER * K;
int sum = 0;
for ( int ia = aBegin, ib = bBegin; ia <= aEnd; ia += aStep, ib += bStep )
{
__shared__ float as [RASMER][RASMER];
__shared__ float bs [RASMER][RASMER];
as [ty][tx] = A [ia + N * ty + tx];
bs [ty][tx] = B [ib + K * ty + tx];
__syncthreads ();
for ( int k = 0; k < RASMER; k++ ){
sum += as [ty][k] * bs [k][tx];
}
__syncthreads ();
}
C [N * RASMER * by + RASMER * bx + K * ty + tx] = sum;
}
int main()
{
setlocale(LC_ALL, "Russian");
int Ax(3), Ay(4), Bx(4), By(3);
int N = Ax / RASMER;
if (Ax % RASMER > 0) N++;
int M = Ay / RASMER;
if (Ay % RASMER > 0) M++;
int K = By / RASMER;
if (By % RASMER > 0) K++;
int **A = sozdanie(RASMER * N, RASMER * M);
initialize(A, Ax, Ay, RASMER * N, RASMER * M);
cout << endl;
int **B = sozdanie(RASMER * M, RASMER * K);
initialize(B, Bx, By, RASMER * M, RASMER * K );
int **C = sozdanie(RASMER * N, RASMER * K);
cout << endl << "Matrix C: " << endl << endl;
int size_A = (RASMER * N) * (RASMER * M);
int size_B = (RASMER * M) * (RASMER * K);
int size_C = (RASMER * N) * (RASMER * K);
int* host_A = new int [size_A];
int* host_B = new int [size_B];
int* host_C = new int [size_C];
for (int i = 0; i < RASMER * N; i++){
for (int j = 0; j < RASMER * M; j++){
host_A [i * RASMER * M + j] = A[i][j];
cout << A[i][j] << " ";
}
}
cout << endl;
for (int i = 0; i < RASMER * M; i++){
for (int j = 0; j < RASMER * K; j++){
host_B [i * RASMERr * K + j] = B[i][j];
cout << B[i][j] << " ";
}
}
cout << endl;
int *gpu_A, *gpu_B, *gpu_C;
hipMalloc((void **)&gpu_A, sizeof(int) * size_A);
hipMalloc((void **)&gpu_B, sizeof(int) * size_B);
hipMalloc((void **)&gpu_C, sizeof(int) * size_C);
hipMemcpy(gpu_A, host_A, sizeof(int) * size_A, hipMemcpyHostToDevice);
hipMemcpy(gpu_B, host_B, sizeof(int) * size_B, hipMemcpyHostToDevice);
dim3 gridDim(N, K);
dim3 blockDim(RASMER, RASMER);
hipLaunchKernelGGL(( block_proiz) , dim3(gridDim), dim3(blockDim) , 0, 0, gpu_A, gpu_B, gpu_C, N, M, K);
hipMemcpy(host_C, gpu_C, N*sizeof(int), hipMemcpyDeviceToHost);
hipFree (gpu_A);
hipFree (gpu_B);
hipFree (gpu_C);
for(int i = 0; i < Ax_number*By_number*N*K; i++) {
cout << host_C[i] << " ";
}
return 0;
}
| 9fde4f25ea21c6d6657af073d2d815e3ba6d34ce.cu | #include <iostream>
#include <cstdlib>
#include <ctime>
#include <fstream>
using namespace std;
#define RASMER 2
__host__ int** sozdanie(int n, int m)
{
int **A;
A = new int *[n];
for (int i = 0; i < n; i++) {
A[i] = new int [m];
}
return A;
}
__host__ void initialize(int** A, int n, int m, int n_lim, int m_lim)
{
srand(time(0));
for(int i = 0; i < n_lim; i++ )
{
for(int j = 0; j < m_lim; j++ )
{
if(i >= n || j >= m) {
A[i][j] = 0;
}
else {
A[i][j] = rand()%11 - 2 ;
cout.width(3);
cout << A[i][j] << " ";
}
}
cout << endl;
}
}
__global__ void block_proiz(int* A, int* B, int* C, int N, int M, int K)
{
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
int aBegin = N * RASMER * by;
int aEnd = aBegin + N - 1;
int bBegin = RASMER * bx;
int aStep = RASMER, bStep = RASMER * K;
int sum = 0;
for ( int ia = aBegin, ib = bBegin; ia <= aEnd; ia += aStep, ib += bStep )
{
__shared__ float as [RASMER][RASMER];
__shared__ float bs [RASMER][RASMER];
as [ty][tx] = A [ia + N * ty + tx];
bs [ty][tx] = B [ib + K * ty + tx];
__syncthreads ();
for ( int k = 0; k < RASMER; k++ ){
sum += as [ty][k] * bs [k][tx];
}
__syncthreads ();
}
C [N * RASMER * by + RASMER * bx + K * ty + tx] = sum;
}
int main()
{
setlocale(LC_ALL, "Russian");
int Ax(3), Ay(4), Bx(4), By(3);
int N = Ax / RASMER;
if (Ax % RASMER > 0) N++;
int M = Ay / RASMER;
if (Ay % RASMER > 0) M++;
int K = By / RASMER;
if (By % RASMER > 0) K++;
int **A = sozdanie(RASMER * N, RASMER * M);
initialize(A, Ax, Ay, RASMER * N, RASMER * M);
cout << endl;
int **B = sozdanie(RASMER * M, RASMER * K);
initialize(B, Bx, By, RASMER * M, RASMER * K );
int **C = sozdanie(RASMER * N, RASMER * K);
cout << endl << "Matrix C: " << endl << endl;
int size_A = (RASMER * N) * (RASMER * M);
int size_B = (RASMER * M) * (RASMER * K);
int size_C = (RASMER * N) * (RASMER * K);
int* host_A = new int [size_A];
int* host_B = new int [size_B];
int* host_C = new int [size_C];
for (int i = 0; i < RASMER * N; i++){
for (int j = 0; j < RASMER * M; j++){
host_A [i * RASMER * M + j] = A[i][j];
cout << A[i][j] << " ";
}
}
cout << endl;
for (int i = 0; i < RASMER * M; i++){
for (int j = 0; j < RASMER * K; j++){
host_B [i * RASMERr * K + j] = B[i][j];
cout << B[i][j] << " ";
}
}
cout << endl;
int *gpu_A, *gpu_B, *gpu_C;
cudaMalloc((void **)&gpu_A, sizeof(int) * size_A);
cudaMalloc((void **)&gpu_B, sizeof(int) * size_B);
cudaMalloc((void **)&gpu_C, sizeof(int) * size_C);
cudaMemcpy(gpu_A, host_A, sizeof(int) * size_A, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_B, host_B, sizeof(int) * size_B, cudaMemcpyHostToDevice);
dim3 gridDim(N, K);
dim3 blockDim(RASMER, RASMER);
block_proiz <<< gridDim, blockDim >>> (gpu_A, gpu_B, gpu_C, N, M, K);
cudaMemcpy(host_C, gpu_C, N*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree (gpu_A);
cudaFree (gpu_B);
cudaFree (gpu_C);
for(int i = 0; i < Ax_number*By_number*N*K; i++) {
cout << host_C[i] << " ";
}
return 0;
}
|
2137ff4611131fd05579dfd372c69192cfe8bf1b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2020 Michael Koesel and respective contributors
// SPDX-License-Identifier: MIT
// See accompanying LICENSE file for detailed information
#include "mapping/laser_to_meas_grid.h"
#include "mapping/opengl/renderer.h"
#include "mapping/kernel/measurement_grid.h"
LaserMeasurementGrid::LaserMeasurementGrid(const Params& params, float grid_length, float resolution)
: grid_size(static_cast<int>(grid_length / resolution)), params(params)
{
int grid_cell_count = grid_size * grid_size;
CHECK_ERROR(hipMalloc(&meas_grid, grid_cell_count * sizeof(dogm::MeasurementCell)));
renderer = std::make_unique<Renderer>(grid_size, params.fov, grid_length, params.max_range);
}
LaserMeasurementGrid::~LaserMeasurementGrid()
{
CHECK_ERROR(hipFree(meas_grid));
}
dogm::MeasurementCell* LaserMeasurementGrid::generateGrid(const std::vector<float>& measurements)
{
const int num_measurements = measurements.size();
float* d_measurements;
CHECK_ERROR(hipMalloc(&d_measurements, num_measurements * sizeof(float)));
CHECK_ERROR(
hipMemcpy(d_measurements, measurements.data(), num_measurements * sizeof(float), hipMemcpyHostToDevice));
const int polar_width = num_measurements;
const int polar_height = static_cast<int>(params.max_range / params.resolution);
dim3 dim_block(32, 32);
dim3 grid_dim(divUp(polar_width, dim_block.x), divUp(polar_height, dim_block.y));
dim3 cart_grid_dim(divUp(grid_size, dim_block.x), divUp(grid_size, dim_block.y));
const float anisotropy_level = 16.0f;
Texture polar_texture(polar_width, polar_height, anisotropy_level);
hipSurfaceObject_t polar_surface;
// create polar texture
polar_texture.beginCudaAccess(&polar_surface);
hipLaunchKernelGGL(( createPolarGridTextureKernel), dim3(grid_dim), dim3(dim_block), 0, 0, polar_surface, d_measurements, polar_width, polar_height,
params.resolution);
CHECK_ERROR(hipGetLastError());
polar_texture.endCudaAccess(polar_surface);
// render cartesian image to texture using polar texture
renderer->renderToTexture(polar_texture);
auto framebuffer = renderer->getFrameBuffer();
hipSurfaceObject_t cartesian_surface;
framebuffer->beginCudaAccess(&cartesian_surface);
// transform RGBA texture to measurement grid
hipLaunchKernelGGL(( cartesianGridToMeasurementGridKernel), dim3(cart_grid_dim), dim3(dim_block), 0, 0, meas_grid, cartesian_surface, grid_size);
CHECK_ERROR(hipGetLastError());
framebuffer->endCudaAccess(cartesian_surface);
CHECK_ERROR(hipFree(d_measurements));
CHECK_ERROR(hipDeviceSynchronize());
return meas_grid;
}
| 2137ff4611131fd05579dfd372c69192cfe8bf1b.cu | // Copyright (c) 2020 Michael Koesel and respective contributors
// SPDX-License-Identifier: MIT
// See accompanying LICENSE file for detailed information
#include "mapping/laser_to_meas_grid.h"
#include "mapping/opengl/renderer.h"
#include "mapping/kernel/measurement_grid.h"
LaserMeasurementGrid::LaserMeasurementGrid(const Params& params, float grid_length, float resolution)
: grid_size(static_cast<int>(grid_length / resolution)), params(params)
{
int grid_cell_count = grid_size * grid_size;
CHECK_ERROR(cudaMalloc(&meas_grid, grid_cell_count * sizeof(dogm::MeasurementCell)));
renderer = std::make_unique<Renderer>(grid_size, params.fov, grid_length, params.max_range);
}
LaserMeasurementGrid::~LaserMeasurementGrid()
{
CHECK_ERROR(cudaFree(meas_grid));
}
dogm::MeasurementCell* LaserMeasurementGrid::generateGrid(const std::vector<float>& measurements)
{
const int num_measurements = measurements.size();
float* d_measurements;
CHECK_ERROR(cudaMalloc(&d_measurements, num_measurements * sizeof(float)));
CHECK_ERROR(
cudaMemcpy(d_measurements, measurements.data(), num_measurements * sizeof(float), cudaMemcpyHostToDevice));
const int polar_width = num_measurements;
const int polar_height = static_cast<int>(params.max_range / params.resolution);
dim3 dim_block(32, 32);
dim3 grid_dim(divUp(polar_width, dim_block.x), divUp(polar_height, dim_block.y));
dim3 cart_grid_dim(divUp(grid_size, dim_block.x), divUp(grid_size, dim_block.y));
const float anisotropy_level = 16.0f;
Texture polar_texture(polar_width, polar_height, anisotropy_level);
cudaSurfaceObject_t polar_surface;
// create polar texture
polar_texture.beginCudaAccess(&polar_surface);
createPolarGridTextureKernel<<<grid_dim, dim_block>>>(polar_surface, d_measurements, polar_width, polar_height,
params.resolution);
CHECK_ERROR(cudaGetLastError());
polar_texture.endCudaAccess(polar_surface);
// render cartesian image to texture using polar texture
renderer->renderToTexture(polar_texture);
auto framebuffer = renderer->getFrameBuffer();
cudaSurfaceObject_t cartesian_surface;
framebuffer->beginCudaAccess(&cartesian_surface);
// transform RGBA texture to measurement grid
cartesianGridToMeasurementGridKernel<<<cart_grid_dim, dim_block>>>(meas_grid, cartesian_surface, grid_size);
CHECK_ERROR(cudaGetLastError());
framebuffer->endCudaAccess(cartesian_surface);
CHECK_ERROR(cudaFree(d_measurements));
CHECK_ERROR(cudaDeviceSynchronize());
return meas_grid;
}
|
bc5f106ef42b2ce48f932b21703b7b430437dac5.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h>
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
// Add your kernel here
__global__ void add(int *a, int *b, int *c) {
*c = *a + *b;
}
// main
int main(void)
{
int a, b, c;
int *d_a, *d_b, *d_c;
int size = sizeof(int);
// Allocate memory in Device
hipMalloc ((void **) &d_a, size);
hipMalloc ((void **) &d_b, size);
hipMalloc ((void **) &d_c, size);
// Initialize value
a = 2;
b = 7;
// Copy data from Host to Device
hipMemcpy (d_a, &a, size, hipMemcpyHostToDevice);
hipMemcpy (d_b, &b, size, hipMemcpyHostToDevice);
// Execute
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, d_a, d_b, d_c);
// Copy result back to Host
// Take note that it will be smart enough to wait
// until the task at device completed
hipMemcpy (&c, d_c, size, hipMemcpyDeviceToHost);
// Clean up
hipFree (d_a);
hipFree (d_b);
hipFree (d_c);
printf("Task Completed: c = %d + %d = %d\n" ,a, b, c);
return 0;
}
| bc5f106ef42b2ce48f932b21703b7b430437dac5.cu | #include <cuda_runtime_api.h>
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
// Add your kernel here
__global__ void add(int *a, int *b, int *c) {
*c = *a + *b;
}
// main
int main(void)
{
int a, b, c;
int *d_a, *d_b, *d_c;
int size = sizeof(int);
// Allocate memory in Device
cudaMalloc ((void **) &d_a, size);
cudaMalloc ((void **) &d_b, size);
cudaMalloc ((void **) &d_c, size);
// Initialize value
a = 2;
b = 7;
// Copy data from Host to Device
cudaMemcpy (d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy (d_b, &b, size, cudaMemcpyHostToDevice);
// Execute
add<<<1,1>>>(d_a, d_b, d_c);
// Copy result back to Host
// Take note that it will be smart enough to wait
// until the task at device completed
cudaMemcpy (&c, d_c, size, cudaMemcpyDeviceToHost);
// Clean up
cudaFree (d_a);
cudaFree (d_b);
cudaFree (d_c);
printf("Task Completed: c = %d + %d = %d\n" ,a, b, c);
return 0;
}
|
4bccf7a04c6e81f44daac23b63b9752b9c473147.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel_dsa_kernel.hu"
__global__ void kernel0(int *best_gap_v_ext)
{
int b0 = blockIdx.x;
int t0 = threadIdx.x;
int private_best_gap_v_ext[1][1];
{
private_best_gap_v_ext[0][0] = (-260);
best_gap_v_ext[0 * 8 + t0] = private_best_gap_v_ext[0][0];
}
}
__global__ void kernel1(char *alt, char *alt_ext)
{
int b0 = blockIdx.x;
int t0 = threadIdx.x;
char private_alt[1];
char private_alt_ext[1][1];
{
private_alt[0] = alt[t0];
private_alt_ext[0][0] = private_alt[0];
alt_ext[t0 * 8 + 0] = private_alt_ext[0][0];
}
}
__global__ void kernel2(int *gap_size_v_ext)
{
int b0 = blockIdx.x;
int t0 = threadIdx.x;
int private_gap_size_v_ext[1][1];
{
private_gap_size_v_ext[0][0] = 1;
gap_size_v_ext[0 * 8 + t0] = private_gap_size_v_ext[0][0];
}
}
__global__ void kernel3(char *ref, char *ref_ext)
{
int b0 = blockIdx.x;
int t0 = threadIdx.x;
char private_ref[1];
char private_ref_ext[1][1];
{
private_ref[0] = ref[t0];
private_ref_ext[0][0] = private_ref[0];
ref_ext[0 * 8 + t0] = private_ref_ext[0][0];
}
}
__global__ void kernel4(char *alt_ext)
{
int b0 = blockIdx.x;
int t0 = threadIdx.x;
__shared__ char shared_alt_ext[16][8];
{
if (t0 <= 7)
for (int c0 = 0; c0 <= 15; c0 += 1)
shared_alt_ext[c0][t0] = alt_ext[c0 * 8 + t0];
__syncthreads();
for (int c3 = 1; c3 <= 7; c3 += 1)
shared_alt_ext[t0][c3] = shared_alt_ext[t0][c3 - 1];
__syncthreads();
if (t0 >= 1 && t0 <= 7)
for (int c0 = 0; c0 <= 15; c0 += 1)
alt_ext[c0 * 8 + t0] = shared_alt_ext[c0][t0];
}
}
__global__ void kernel5(char *ref_ext)
{
int b0 = blockIdx.x;
int t0 = threadIdx.x;
__shared__ char shared_ref_ext[16][8];
{
for (int c0 = 0; c0 <= 15; c0 += 1)
shared_ref_ext[c0][t0] = ref_ext[c0 * 8 + t0];
__syncthreads();
for (int c3 = 1; c3 <= 15; c3 += 1)
shared_ref_ext[c3][t0] = shared_ref_ext[c3 - 1][t0];
__syncthreads();
for (int c0 = 1; c0 <= 15; c0 += 1)
ref_ext[c0 * 8 + t0] = shared_ref_ext[c0][t0];
}
}
__global__ void kernel6(char *alt_ext, char *ref_ext, int *sim_score_ext, int *step_diag_ext)
{
int b0 = blockIdx.y, b1 = blockIdx.x;
int t0 = threadIdx.y, t1 = threadIdx.x;
int private_sim_score_ext[1][1];
{
private_sim_score_ext[0][0] = ((alt_ext[t0 * 8 + t1] == ref_ext[t0 * 8 + t1]) ? 200 : (-150));
if (t0 == 0) {
step_diag_ext[0 * 8 + t1] = (((-100000000) > private_sim_score_ext[0][0]) ? (-100000000) : private_sim_score_ext[0][0]);
} else if (t1 == 0) {
step_diag_ext[t0 * 8 + 0] = (((-100000000) > private_sim_score_ext[0][0]) ? (-100000000) : private_sim_score_ext[0][0]);
}
if (t0 >= 1 && t1 >= 1)
sim_score_ext[t0 * 8 + t1] = private_sim_score_ext[0][0];
}
}
__global__ void kernel7(int *best_gap_h_ext)
{
int b0 = blockIdx.x;
int t0 = threadIdx.x;
int private_best_gap_h_ext[1][1];
{
private_best_gap_h_ext[0][0] = (-260);
best_gap_h_ext[t0 * 8 + 0] = private_best_gap_h_ext[0][0];
}
}
__global__ void kernel8(int *gap_size_h_ext)
{
int b0 = blockIdx.x;
int t0 = threadIdx.x;
int private_gap_size_h_ext[1][1];
{
private_gap_size_h_ext[0][0] = 1;
gap_size_h_ext[t0 * 8 + 0] = private_gap_size_h_ext[0][0];
}
}
__global__ void kernel9(int *H, int *H_ext, int *best_gap_h_ext, int *best_gap_v_ext, int *bt, int *gap_size_h_ext, int *gap_size_v_ext, int *sim_score_ext, int *step_diag_ext, int c0)
{
int b0 = blockIdx.x;
int t0 = threadIdx.x;
int private_H_ext_6[1][1];
int private_bt_tmp1_ext[1][1];
int private_bt_tmp2_ext[1][1];
int private_kd_ext[1][1];
int private_ki_ext[1][1];
int private_step_diag_ext[1][1];
int private_step_down_ext[1][1];
int private_step_right_ext[1][1];
int private_sw_tmp1_ext[1][1];
int private_sw_tmp2_ext[1][1];
if (c0 >= t0 && t0 + 7 >= c0) {
if ((t0 >= 1 && c0 == t0) || t0 == 0)
private_step_diag_ext[0][0] = step_diag_ext[t0 * 8 + (-t0 + c0)];
if (c0 >= t0 + 1)
if ((H_ext[t0 * 8 + (-t0 + c0 - 1)] + (-260)) > (best_gap_h_ext[t0 * 8 + (-t0 + c0 - 1)] + (-11))) {
best_gap_h_ext[t0 * 8 + (-t0 + c0)] = (H_ext[t0 * 8 + (-t0 + c0 - 1)] + (-260));
gap_size_h_ext[t0 * 8 + (-t0 + c0)] = 1;
} else {
best_gap_h_ext[t0 * 8 + (-t0 + c0)] = (best_gap_h_ext[t0 * 8 + (-t0 + c0 - 1)] + (-11));
gap_size_h_ext[t0 * 8 + (-t0 + c0)] = (gap_size_h_ext[t0 * 8 + (-t0 + c0 - 1)] + 1);
}
if (t0 >= 1)
if ((H_ext[(t0 - 1) * 8 + (-t0 + c0)] + (-260)) > (best_gap_v_ext[(t0 - 1) * 8 + (-t0 + c0)] + (-11))) {
best_gap_v_ext[t0 * 8 + (-t0 + c0)] = (H_ext[(t0 - 1) * 8 + (-t0 + c0)] + (-260));
gap_size_v_ext[t0 * 8 + (-t0 + c0)] = 1;
} else {
best_gap_v_ext[t0 * 8 + (-t0 + c0)] = (best_gap_v_ext[(t0 - 1) * 8 + (-t0 + c0)] + (-11));
gap_size_v_ext[t0 * 8 + (-t0 + c0)] = (gap_size_v_ext[(t0 - 1) * 8 + (-t0 + c0)] + 1);
}
private_kd_ext[0][0] = gap_size_v_ext[t0 * 8 + (-t0 + c0)];
private_step_down_ext[0][0] = (((-100000000) > best_gap_v_ext[t0 * 8 + (-t0 + c0)]) ? (-100000000) : best_gap_v_ext[t0 * 8 + (-t0 + c0)]);
if (t0 >= 1 && c0 >= t0 + 1)
private_step_diag_ext[0][0] = (((-100000000) > (H_ext[(t0 - 1) * 8 + (-t0 + c0 - 1)] + sim_score_ext[t0 * 8 + (-t0 + c0)])) ? (-100000000) : (H_ext[(t0 - 1) * 8 + (-t0 + c0 - 1)] + sim_score_ext[t0 * 8 + (-t0 + c0)]));
private_step_right_ext[0][0] = (((-100000000) > best_gap_h_ext[t0 * 8 + (-t0 + c0)]) ? (-100000000) : best_gap_h_ext[t0 * 8 + (-t0 + c0)]);
private_ki_ext[0][0] = gap_size_h_ext[t0 * 8 + (-t0 + c0)];
private_sw_tmp1_ext[0][0] = ((private_step_diag_ext[0][0] > private_step_down_ext[0][0]) ? private_step_diag_ext[0][0] : private_step_down_ext[0][0]);
private_sw_tmp2_ext[0][0] = ((private_sw_tmp1_ext[0][0] > private_step_right_ext[0][0]) ? private_sw_tmp1_ext[0][0] : private_step_right_ext[0][0]);
private_H_ext_6[0][0] = private_sw_tmp2_ext[0][0];
H[(t0 + 1) * 9 + (-t0 + c0 + 1)] = private_H_ext_6[0][0];
private_bt_tmp1_ext[0][0] = ((private_H_ext_6[0][0] == private_step_right_ext[0][0]) ? (-private_ki_ext[0][0]) : private_kd_ext[0][0]);
private_bt_tmp2_ext[0][0] = ((private_H_ext_6[0][0] == private_step_diag_ext[0][0]) ? 0 : private_bt_tmp1_ext[0][0]);
bt[t0 * 8 + (-t0 + c0)] = private_bt_tmp2_ext[0][0];
if (c0 <= 21)
H_ext[t0 * 8 + (-t0 + c0)] = private_H_ext_6[0][0];
}
}
| 4bccf7a04c6e81f44daac23b63b9752b9c473147.cu | #include "kernel_dsa_kernel.hu"
__global__ void kernel0(int *best_gap_v_ext)
{
int b0 = blockIdx.x;
int t0 = threadIdx.x;
int private_best_gap_v_ext[1][1];
{
private_best_gap_v_ext[0][0] = (-260);
best_gap_v_ext[0 * 8 + t0] = private_best_gap_v_ext[0][0];
}
}
__global__ void kernel1(char *alt, char *alt_ext)
{
int b0 = blockIdx.x;
int t0 = threadIdx.x;
char private_alt[1];
char private_alt_ext[1][1];
{
private_alt[0] = alt[t0];
private_alt_ext[0][0] = private_alt[0];
alt_ext[t0 * 8 + 0] = private_alt_ext[0][0];
}
}
__global__ void kernel2(int *gap_size_v_ext)
{
int b0 = blockIdx.x;
int t0 = threadIdx.x;
int private_gap_size_v_ext[1][1];
{
private_gap_size_v_ext[0][0] = 1;
gap_size_v_ext[0 * 8 + t0] = private_gap_size_v_ext[0][0];
}
}
__global__ void kernel3(char *ref, char *ref_ext)
{
int b0 = blockIdx.x;
int t0 = threadIdx.x;
char private_ref[1];
char private_ref_ext[1][1];
{
private_ref[0] = ref[t0];
private_ref_ext[0][0] = private_ref[0];
ref_ext[0 * 8 + t0] = private_ref_ext[0][0];
}
}
__global__ void kernel4(char *alt_ext)
{
int b0 = blockIdx.x;
int t0 = threadIdx.x;
__shared__ char shared_alt_ext[16][8];
{
if (t0 <= 7)
for (int c0 = 0; c0 <= 15; c0 += 1)
shared_alt_ext[c0][t0] = alt_ext[c0 * 8 + t0];
__syncthreads();
for (int c3 = 1; c3 <= 7; c3 += 1)
shared_alt_ext[t0][c3] = shared_alt_ext[t0][c3 - 1];
__syncthreads();
if (t0 >= 1 && t0 <= 7)
for (int c0 = 0; c0 <= 15; c0 += 1)
alt_ext[c0 * 8 + t0] = shared_alt_ext[c0][t0];
}
}
__global__ void kernel5(char *ref_ext)
{
int b0 = blockIdx.x;
int t0 = threadIdx.x;
__shared__ char shared_ref_ext[16][8];
{
for (int c0 = 0; c0 <= 15; c0 += 1)
shared_ref_ext[c0][t0] = ref_ext[c0 * 8 + t0];
__syncthreads();
for (int c3 = 1; c3 <= 15; c3 += 1)
shared_ref_ext[c3][t0] = shared_ref_ext[c3 - 1][t0];
__syncthreads();
for (int c0 = 1; c0 <= 15; c0 += 1)
ref_ext[c0 * 8 + t0] = shared_ref_ext[c0][t0];
}
}
__global__ void kernel6(char *alt_ext, char *ref_ext, int *sim_score_ext, int *step_diag_ext)
{
int b0 = blockIdx.y, b1 = blockIdx.x;
int t0 = threadIdx.y, t1 = threadIdx.x;
int private_sim_score_ext[1][1];
{
private_sim_score_ext[0][0] = ((alt_ext[t0 * 8 + t1] == ref_ext[t0 * 8 + t1]) ? 200 : (-150));
if (t0 == 0) {
step_diag_ext[0 * 8 + t1] = (((-100000000) > private_sim_score_ext[0][0]) ? (-100000000) : private_sim_score_ext[0][0]);
} else if (t1 == 0) {
step_diag_ext[t0 * 8 + 0] = (((-100000000) > private_sim_score_ext[0][0]) ? (-100000000) : private_sim_score_ext[0][0]);
}
if (t0 >= 1 && t1 >= 1)
sim_score_ext[t0 * 8 + t1] = private_sim_score_ext[0][0];
}
}
__global__ void kernel7(int *best_gap_h_ext)
{
int b0 = blockIdx.x;
int t0 = threadIdx.x;
int private_best_gap_h_ext[1][1];
{
private_best_gap_h_ext[0][0] = (-260);
best_gap_h_ext[t0 * 8 + 0] = private_best_gap_h_ext[0][0];
}
}
__global__ void kernel8(int *gap_size_h_ext)
{
int b0 = blockIdx.x;
int t0 = threadIdx.x;
int private_gap_size_h_ext[1][1];
{
private_gap_size_h_ext[0][0] = 1;
gap_size_h_ext[t0 * 8 + 0] = private_gap_size_h_ext[0][0];
}
}
__global__ void kernel9(int *H, int *H_ext, int *best_gap_h_ext, int *best_gap_v_ext, int *bt, int *gap_size_h_ext, int *gap_size_v_ext, int *sim_score_ext, int *step_diag_ext, int c0)
{
int b0 = blockIdx.x;
int t0 = threadIdx.x;
int private_H_ext_6[1][1];
int private_bt_tmp1_ext[1][1];
int private_bt_tmp2_ext[1][1];
int private_kd_ext[1][1];
int private_ki_ext[1][1];
int private_step_diag_ext[1][1];
int private_step_down_ext[1][1];
int private_step_right_ext[1][1];
int private_sw_tmp1_ext[1][1];
int private_sw_tmp2_ext[1][1];
if (c0 >= t0 && t0 + 7 >= c0) {
if ((t0 >= 1 && c0 == t0) || t0 == 0)
private_step_diag_ext[0][0] = step_diag_ext[t0 * 8 + (-t0 + c0)];
if (c0 >= t0 + 1)
if ((H_ext[t0 * 8 + (-t0 + c0 - 1)] + (-260)) > (best_gap_h_ext[t0 * 8 + (-t0 + c0 - 1)] + (-11))) {
best_gap_h_ext[t0 * 8 + (-t0 + c0)] = (H_ext[t0 * 8 + (-t0 + c0 - 1)] + (-260));
gap_size_h_ext[t0 * 8 + (-t0 + c0)] = 1;
} else {
best_gap_h_ext[t0 * 8 + (-t0 + c0)] = (best_gap_h_ext[t0 * 8 + (-t0 + c0 - 1)] + (-11));
gap_size_h_ext[t0 * 8 + (-t0 + c0)] = (gap_size_h_ext[t0 * 8 + (-t0 + c0 - 1)] + 1);
}
if (t0 >= 1)
if ((H_ext[(t0 - 1) * 8 + (-t0 + c0)] + (-260)) > (best_gap_v_ext[(t0 - 1) * 8 + (-t0 + c0)] + (-11))) {
best_gap_v_ext[t0 * 8 + (-t0 + c0)] = (H_ext[(t0 - 1) * 8 + (-t0 + c0)] + (-260));
gap_size_v_ext[t0 * 8 + (-t0 + c0)] = 1;
} else {
best_gap_v_ext[t0 * 8 + (-t0 + c0)] = (best_gap_v_ext[(t0 - 1) * 8 + (-t0 + c0)] + (-11));
gap_size_v_ext[t0 * 8 + (-t0 + c0)] = (gap_size_v_ext[(t0 - 1) * 8 + (-t0 + c0)] + 1);
}
private_kd_ext[0][0] = gap_size_v_ext[t0 * 8 + (-t0 + c0)];
private_step_down_ext[0][0] = (((-100000000) > best_gap_v_ext[t0 * 8 + (-t0 + c0)]) ? (-100000000) : best_gap_v_ext[t0 * 8 + (-t0 + c0)]);
if (t0 >= 1 && c0 >= t0 + 1)
private_step_diag_ext[0][0] = (((-100000000) > (H_ext[(t0 - 1) * 8 + (-t0 + c0 - 1)] + sim_score_ext[t0 * 8 + (-t0 + c0)])) ? (-100000000) : (H_ext[(t0 - 1) * 8 + (-t0 + c0 - 1)] + sim_score_ext[t0 * 8 + (-t0 + c0)]));
private_step_right_ext[0][0] = (((-100000000) > best_gap_h_ext[t0 * 8 + (-t0 + c0)]) ? (-100000000) : best_gap_h_ext[t0 * 8 + (-t0 + c0)]);
private_ki_ext[0][0] = gap_size_h_ext[t0 * 8 + (-t0 + c0)];
private_sw_tmp1_ext[0][0] = ((private_step_diag_ext[0][0] > private_step_down_ext[0][0]) ? private_step_diag_ext[0][0] : private_step_down_ext[0][0]);
private_sw_tmp2_ext[0][0] = ((private_sw_tmp1_ext[0][0] > private_step_right_ext[0][0]) ? private_sw_tmp1_ext[0][0] : private_step_right_ext[0][0]);
private_H_ext_6[0][0] = private_sw_tmp2_ext[0][0];
H[(t0 + 1) * 9 + (-t0 + c0 + 1)] = private_H_ext_6[0][0];
private_bt_tmp1_ext[0][0] = ((private_H_ext_6[0][0] == private_step_right_ext[0][0]) ? (-private_ki_ext[0][0]) : private_kd_ext[0][0]);
private_bt_tmp2_ext[0][0] = ((private_H_ext_6[0][0] == private_step_diag_ext[0][0]) ? 0 : private_bt_tmp1_ext[0][0]);
bt[t0 * 8 + (-t0 + c0)] = private_bt_tmp2_ext[0][0];
if (c0 <= 21)
H_ext[t0 * 8 + (-t0 + c0)] = private_H_ext_6[0][0];
}
}
|
85cca2a554bd87751d425d701dbb6f4e9dcaed52.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
__global__ void matrixTranspose(int* d_in, int* d_out, const int n)
{
//Row index
int row = ((blockIdx.y * blockDim.y) + threadIdx.y);
//Column index
int col = ((blockIdx.x * blockDim.x) + threadIdx.x);
//Boundary protection
if ((row < n) && (col < n)) {
d_out[col * n + row] = d_in[row * n + col];
}
};
int main(int argc, char** argv)
{
std::cout << "\n";
std::cout << "This Program Transposes a Matrix of size N = 10"
<< "\n";
//Size of matrix n x n | n = 10
const int N = 10;
size_t bytes = N * N * sizeof(int);
int* h_in = new int[N * N];
int* h_out = new int[N * N];
int *d_in, *d_out;
hipMalloc(&d_in, bytes);
hipMalloc(&d_out, bytes);
// Initialize matrix | each line has the same value
for (int row = 0; row < N; row++) {
for (int col = 0; col < N; col++) {
h_in[row * N + col] = row;
}
}
std::cout << "\n";
for (int row = 0; row < N; row++) {
for (int col = 0; col < N; col++) {
std::cout << " " << h_in[row * N + col] << " ";
}
std::cout << "\n";
}
// Host --> Device
hipMemcpy(d_in, h_in, bytes, hipMemcpyHostToDevice);
const int BLOCK_SIZE = 16;
const int GRID_SIZE = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 blocks(GRID_SIZE, GRID_SIZE, 1);
dim3 threads(BLOCK_SIZE, BLOCK_SIZE, 1);
// Transposing matrix on GPU
matrixTranspose<<<blocks, threads> > >(d_in, d_out, N);
// Device --> Host
hipMemcpy(h_out, d_out, bytes, hipMemcpyDeviceToHost);
// Print the Transpose
std::cout << "\n";
std::cout << "The Transpose matrix is: "
<< "\n";
std::cout << "\n";
for (int row = 0; row < N; row++) {
for (int col = 0; col < N; col++) {
std::cout << " " << h_out[row * N + col] << " ";
}
std::cout << "\n";
}
// Free memory
delete[] h_in;
h_in = NULL;
delete[] h_out;
h_out = NULL;
hipFree(d_in);
hipFree(d_out);
return 0;
}
| 85cca2a554bd87751d425d701dbb6f4e9dcaed52.cu | #include <iostream>
__global__ void matrixTranspose(int* d_in, int* d_out, const int n)
{
//Row index
int row = ((blockIdx.y * blockDim.y) + threadIdx.y);
//Column index
int col = ((blockIdx.x * blockDim.x) + threadIdx.x);
//Boundary protection
if ((row < n) && (col < n)) {
d_out[col * n + row] = d_in[row * n + col];
}
};
int main(int argc, char** argv)
{
std::cout << "\n";
std::cout << "This Program Transposes a Matrix of size N = 10"
<< "\n";
//Size of matrix n x n | n = 10
const int N = 10;
size_t bytes = N * N * sizeof(int);
int* h_in = new int[N * N];
int* h_out = new int[N * N];
int *d_in, *d_out;
cudaMalloc(&d_in, bytes);
cudaMalloc(&d_out, bytes);
// Initialize matrix | each line has the same value
for (int row = 0; row < N; row++) {
for (int col = 0; col < N; col++) {
h_in[row * N + col] = row;
}
}
std::cout << "\n";
for (int row = 0; row < N; row++) {
for (int col = 0; col < N; col++) {
std::cout << " " << h_in[row * N + col] << " ";
}
std::cout << "\n";
}
// Host --> Device
cudaMemcpy(d_in, h_in, bytes, cudaMemcpyHostToDevice);
const int BLOCK_SIZE = 16;
const int GRID_SIZE = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 blocks(GRID_SIZE, GRID_SIZE, 1);
dim3 threads(BLOCK_SIZE, BLOCK_SIZE, 1);
// Transposing matrix on GPU
matrixTranspose<<<blocks, threads> > >(d_in, d_out, N);
// Device --> Host
cudaMemcpy(h_out, d_out, bytes, cudaMemcpyDeviceToHost);
// Print the Transpose
std::cout << "\n";
std::cout << "The Transpose matrix is: "
<< "\n";
std::cout << "\n";
for (int row = 0; row < N; row++) {
for (int col = 0; col < N; col++) {
std::cout << " " << h_out[row * N + col] << " ";
}
std::cout << "\n";
}
// Free memory
delete[] h_in;
h_in = NULL;
delete[] h_out;
h_out = NULL;
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
6b45a8dd3efbd4c10009036904e7547795b8b0b5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Fermat
*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <renderer.h>
#include <pathtracer.h>
#include <files.h>
#include <bpt.h>
#include <cmlt.h>
#include <pssmlt.h>
#include <rpt.h>
#include <optix_prime/optix_primepp.h>
#include <optixu/optixu_matrix.h>
#include <mesh/MeshStorage.h>
#include <eaw.h>
#include <cugar/basic/cuda/arch.h>
#include <cugar/basic/cuda/timer.h>
#include <cugar/basic/primitives.h>
#include <cugar/basic/functors.h>
#include <cugar/basic/cuda/sort.h>
#include <cugar/image/tga.h>
#include <cugar/bsdf/ltc.h>
#include <buffers.h>
#include <vector>
namespace ltc_ggx
{
typedef float mat33[9];
#include <cugar/bsdf/ltc_ggx.inc>
};
void load_scene(const char* filename, MeshStorage& mesh, const std::vector<std::string>& dirs, std::vector<std::string>& scene_dirs);
//------------------------------------------------------------------------------
__global__ void fill_n_kernel(const int n, uint32_t* pixels)
{
const int idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx < n) pixels[idx] = idx;
}
void fill_n(const int n, Buffer<uint32_t>& pixels)
{
dim3 blockSize(128);
dim3 gridSize(cugar::divide_ri(n, blockSize.x));
fill_n_kernel << < gridSize, blockSize >> >(n, pixels.ptr());
}
//------------------------------------------------------------------------------
__global__ void to_rgba_kernel(const RendererView renderer, uint8* rgba)
{
const uint32 idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx < renderer.res_x * renderer.res_y)
{
if (renderer.shading_mode == kShaded)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::COMPOSITED_C, idx);
//cugar::Vector4f c =
// renderer.fb(FBufferDesc::DIRECT_C, idx) +
// renderer.fb(FBufferDesc::DIFFUSE_C, idx) * renderer.fb(FBufferDesc::DIFFUSE_A, idx) +
// renderer.fb(FBufferDesc::SPECULAR_C, idx) * renderer.fb(FBufferDesc::SPECULAR_A, idx);
c *= renderer.exposure; // Hardcoded Exposure Adjustment
c = c / (c + cugar::Vector4f(1));
c.x = powf(c.x, 1.0f / 2.2f);
c.y = powf(c.y, 1.0f / 2.2f);
c.z = powf(c.z, 1.0f / 2.2f);
c.w = powf(c.w, 1.0f / 2.2f);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kFiltered)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::FILTERED_C, idx);
//cugar::Vector4f c =
// renderer.fb(FBufferDesc::DIRECT_C, idx) +
// renderer.fb(FBufferDesc::DIFFUSE_C, idx) * renderer.fb(FBufferDesc::DIFFUSE_A, idx) +
// renderer.fb(FBufferDesc::SPECULAR_C, idx) * renderer.fb(FBufferDesc::SPECULAR_A, idx);
c *= renderer.exposure; // Hardcoded Exposure Adjustment
c = c / (c + cugar::Vector4f(1));
c.x = powf(c.x, 1.0f / 2.2f);
c.y = powf(c.y, 1.0f / 2.2f);
c.z = powf(c.z, 1.0f / 2.2f);
c.w = powf(c.w, 1.0f / 2.2f);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kAlbedo)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::DIFFUSE_A, idx) +
renderer.fb(FBufferDesc::SPECULAR_A, idx);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kDiffuseAlbedo)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::DIFFUSE_A, idx);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kSpecularAlbedo)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::SPECULAR_A, idx);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kDiffuseColor)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::DIFFUSE_C, idx);
c *= renderer.exposure; // Hardcoded Exposure Adjustment
c = c / (c + cugar::Vector4f(1));
c.x = powf(c.x, 1.0f / 2.2f);
c.y = powf(c.y, 1.0f / 2.2f);
c.z = powf(c.z, 1.0f / 2.2f);
c.w = powf(c.w, 1.0f / 2.2f);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kSpecularColor)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::SPECULAR_C, idx);
c *= renderer.exposure; // Hardcoded Exposure Adjustment
c = c / (c + cugar::Vector4f(1));
c.x = powf(c.x, 1.0f / 2.2f);
c.y = powf(c.y, 1.0f / 2.2f);
c.z = powf(c.z, 1.0f / 2.2f);
c.w = powf(c.w, 1.0f / 2.2f);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kDirectLighting)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::DIRECT_C, idx);
c *= renderer.exposure; // Hardcoded Exposure Adjustment
c = c / (c + cugar::Vector4f(1));
c.x = powf(c.x, 1.0f / 2.2f);
c.y = powf(c.y, 1.0f / 2.2f);
c.z = powf(c.z, 1.0f / 2.2f);
c.w = powf(c.w, 1.0f / 2.2f);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kVariance)
{
float c = renderer.fb(FBufferDesc::COMPOSITED_C, idx).w;
c *= renderer.exposure; // Hardcoded Exposure Adjustment
c = c / (c + 1);
c = powf(c, 1.0f / 2.2f);
rgba[idx * 4 + 0] = uint8(fminf(c * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c * 256.0f, 255.0f));
}
else if (renderer.shading_mode >= kAux0 && (renderer.shading_mode - kAux0 < renderer.fb.n_channels - FBufferDesc::NUM_CHANNELS))
{
const uint32 aux_channel = renderer.shading_mode - kAux0 + FBufferDesc::NUM_CHANNELS;
cugar::Vector4f c = renderer.fb(aux_channel, idx);
c *= renderer.exposure; // Hardcoded Exposure Adjustment
c = c / (c + cugar::Vector4f(1));
c.x = powf(c.x, 1.0f / 2.2f);
c.y = powf(c.y, 1.0f / 2.2f);
c.z = powf(c.z, 1.0f / 2.2f);
c.w = powf(c.w, 1.0f / 2.2f);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
}
}
void to_rgba(const RendererView renderer, uint8* rgba)
{
dim3 blockSize(128);
dim3 gridSize(cugar::divide_ri(renderer.res_x * renderer.res_y, blockSize.x));
hipLaunchKernelGGL(( to_rgba_kernel) , dim3(gridSize), dim3(blockSize) , 0, 0, renderer, rgba);
CUDA_CHECK(cugar::cuda::sync_and_check_error("to_rgba"));
}
//------------------------------------------------------------------------------
__global__ void multiply_frame_kernel(RendererView renderer, const float scale)
{
const uint32 idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx < renderer.res_x * renderer.res_y)
{
// before scaling, save out luminance data
renderer.fb(FBufferDesc::LUMINANCE, idx) = cugar::Vector4f(
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::DIRECT_C, idx)).xyz()),
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::DIFFUSE_C, idx)).xyz()),
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::SPECULAR_C, idx)).xyz()),
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::COMPOSITED_C, idx)).xyz()) );
renderer.fb(FBufferDesc::DIFFUSE_C, idx) *= scale;
renderer.fb(FBufferDesc::DIFFUSE_A, idx) *= scale;
renderer.fb(FBufferDesc::SPECULAR_C, idx) *= scale;
renderer.fb(FBufferDesc::SPECULAR_A, idx) *= scale;
renderer.fb(FBufferDesc::DIRECT_C, idx) *= scale;
renderer.fb(FBufferDesc::COMPOSITED_C, idx) *= scale;
}
}
//------------------------------------------------------------------------------
__global__ void update_variances_kernel(RendererView renderer, const uint32 n)
{
const uint32 idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx < renderer.res_x * renderer.res_y)
{
// fetch the previous frame's luminances
const cugar::Vector4f old_lum = renderer.fb(FBufferDesc::LUMINANCE, idx);
// compute the new frame's luminances
const cugar::Vector4f new_lum = cugar::Vector4f(
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::DIRECT_C, idx)).xyz()),
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::DIFFUSE_C, idx)).xyz()),
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::SPECULAR_C, idx)).xyz()),
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::COMPOSITED_C, idx)).xyz()) );
// compute the change in variance (x(n) - avg(n-1))*(x(n) - avg(n)), which can be written as the sum of two terms:
// 1. n*avg(n) - (n-1)*avg(n-1) - avg(n-1) = n*(avg(n) - avg(n-1))
// 2. n*avg(n) - (n-1)*avg(n-1) - avg(n) = (n-1)*(avg(n) - avg(n-1))
const cugar::Vector4f delta_lum_1 = n * (new_lum - old_lum);
const cugar::Vector4f delta_lum_2 = (n - 1) * (new_lum - old_lum);
const cugar::Vector4f delta_var = (delta_lum_1 * delta_lum_2) / (n*n);
// add the variance deltas to the old variances (previously rescaled by (n-1)/n) stored in the alpha components of the respective channels
renderer.fb(FBufferDesc::DIRECT_C, idx).w += delta_var.x;
renderer.fb(FBufferDesc::DIFFUSE_C, idx).w += delta_var.y;
renderer.fb(FBufferDesc::SPECULAR_C, idx).w += delta_var.z;
renderer.fb(FBufferDesc::COMPOSITED_C, idx).w += delta_var.w;
}
}
//------------------------------------------------------------------------------
__global__ void filter_variance_kernel(const FBufferChannelView img, float* var, const uint32 FW)
{
const uint32 x = threadIdx.x + blockIdx.x*blockDim.x;
const uint32 y = threadIdx.y + blockIdx.y*blockDim.y;
if (x < img.res_x &&
y < img.res_y)
{
const int32 lx = x > FW ? x - FW : 0;
const int32 rx = x + FW < img.res_x ? x + FW : img.res_x - 1;
const int32 ly = y > FW ? y - FW : 0;
const int32 ry = y + FW < img.res_y ? y + FW : img.res_y - 1;
float variance = 0.0f;
for (int yy = ly; yy <= ry; yy++)
for (int xx = lx; xx <= rx; xx++)
variance += img(xx, yy).w;
variance /= (ry - ly + 1) * (rx - lx + 1);
var[x + y * img.res_x] = variance;
}
}
void filter_variance(const FBufferChannelView img, float* var, const uint32 FW = 1)
{
dim3 blockSize(32, 4);
dim3 gridSize(cugar::divide_ri(img.res_x, blockSize.x), cugar::divide_ri(img.res_y, blockSize.y));
filter_variance_kernel << < gridSize, blockSize >> > (img, var, FW);
CUDA_CHECK(cugar::cuda::sync_and_check_error("filter_variance"));
}
//------------------------------------------------------------------------------
void Renderer::multiply_frame(const float scale)
{
dim3 blockSize(128);
dim3 gridSize(cugar::divide_ri(m_res_x * m_res_y, blockSize.x));
hipLaunchKernelGGL(( multiply_frame_kernel) , dim3(gridSize), dim3(blockSize) , 0, 0, view(0), scale);
CUDA_CHECK( cugar::cuda::sync_and_check_error("multiply_frame") );
}
//------------------------------------------------------------------------------
void Renderer::rescale_frame(const uint32 instance)
{
multiply_frame( float(instance)/float(instance+1) );
}
//------------------------------------------------------------------------------
void Renderer::update_variances(const uint32 instance)
{
dim3 blockSize(128);
dim3 gridSize(cugar::divide_ri(m_res_x * m_res_y, blockSize.x));
hipLaunchKernelGGL(( update_variances_kernel) , dim3(gridSize), dim3(blockSize) , 0, 0, view(0), instance + 1);
CUDA_CHECK( cugar::cuda::sync_and_check_error("update_variances") );
}
//------------------------------------------------------------------------------
// Renderer initialization
//
void Renderer::init(int argc, char** argv)
{
const char* filename = NULL;
m_renderer_type = kBPT;
m_exposure = 1.0f;
m_res_x = 1600;
m_res_y = 900;
m_aspect = 0.0f;
m_shading_rate = 1.0f;
m_shading_mode = kShaded;
for (int i = 0; i < argc; ++i)
{
if (strcmp(argv[i], "-i") == 0)
filename = argv[++i];
else if (strcmp(argv[i], "-pt") == 0)
m_renderer_type = kPT;
else if (strcmp(argv[i], "-bpt") == 0)
m_renderer_type = kBPT;
else if (strcmp(argv[i], "-cmlt") == 0)
m_renderer_type = kCMLT;
else if (strcmp(argv[i], "-pssmlt") == 0)
m_renderer_type = kPSSMLT;
else if (strcmp(argv[i], "-rpt") == 0)
m_renderer_type = kRPT;
else if (strcmp(argv[i], "-r") == 0 ||
strcmp(argv[i], "-res") == 0)
{
m_res_x = atoi(argv[++i]);
m_res_y = atoi(argv[++i]);
}
else if (strcmp(argv[i], "-a") == 0 ||
strcmp(argv[i], "-aspect") == 0)
{
m_aspect = (float)atof(argv[++i]);
}
else if (strcmp(argv[i], "-c") == 0)
{
FILE* camera_file = fopen(argv[++i], "r");
if (camera_file == NULL)
{
fprintf(stderr, "failed opening camera file %s\n", argv[i]);
exit(0);
}
fscanf(camera_file, "%f %f %f", &m_camera.eye.x, &m_camera.eye.y, &m_camera.eye.z);
fscanf(camera_file, "%f %f %f", &m_camera.aim.x, &m_camera.aim.y, &m_camera.aim.z);
fscanf(camera_file, "%f %f %f", &m_camera.up.x, &m_camera.up.y, &m_camera.up.z);
fscanf(camera_file, "%f", &m_camera.fov);
m_camera.dx = normalize(cross(m_camera.aim - m_camera.eye, m_camera.up));
fclose(camera_file);
}
}
if (m_aspect == 0.0f)
m_aspect = float(m_res_x) / float(m_res_y);
if (filename == NULL)
{
fprintf(stderr, "options:\n");
fprintf(stderr, " -i scene.obj specify the input scene\n");
fprintf(stderr, " -r int int specify the resolution\n");
fprintf(stderr, " -a float specify the aspect ratio\n");
fprintf(stderr, " -c camera.txt specify a camera file\n");
fprintf(stderr, " -pt use the PT renderer\n");
fprintf(stderr, " -bpt use the BPT renderer\n");
fprintf(stderr, " -mlt use the MLT renderer\n");
fprintf(stderr, " -cmlt use the CMLT renderer\n");
fprintf(stderr, " -pssmlt use the PSSMLT renderer\n");
exit(0);
}
for (int i = 0; i < argc; ++i)
{
if (strcmp(argv[i], "-c") == 0)
{
FILE* camera_file = fopen(argv[++i], "r");
fscanf(camera_file, "%f %f %f", &m_camera.eye.x, &m_camera.eye.y, &m_camera.eye.z);
fscanf(camera_file, "%f %f %f", &m_camera.aim.x, &m_camera.aim.y, &m_camera.aim.z);
fscanf(camera_file, "%f %f %f", &m_camera.up.x, &m_camera.up.y, &m_camera.up.z);
fscanf(camera_file, "%f", &m_camera.fov);
m_camera.dx = normalize(cross(m_camera.aim - m_camera.eye, m_camera.up));
fclose(camera_file);
}
}
m_rgba.alloc(m_res_x * m_res_y * 4);
m_var.alloc(m_res_x * m_res_y);
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
fprintf(stderr, "cuda device: %s\n", prop.name);
size_t free, total;
hipMemGetInfo(&free, &total);
fprintf(stderr, " memory: %.3f GB\n",
float(total) / (1024 * 1024 * 1024));
std::vector<unsigned int> devices(1);
devices[0] = 0;
hipSetDevice( devices[0] );
// create an Optix Prime context
m_context = optix::prime::Context::create(RTP_CONTEXT_TYPE_CUDA);
m_context->setCudaDeviceNumbers( devices );
switch (m_renderer_type)
{
case kPT: { m_renderer = new PathTracer(); break; }
case kBPT: { m_renderer = new BPT(); break; }
case kCMLT: { m_renderer = new CMLT(); break; }
case kPSSMLT: { m_renderer = new PSSMLT(); break; }
case kRPT: { m_renderer = new RPT(); break; }
default: { m_renderer = new PathTracer(); break; }
};
const uint32 aux_channels = m_renderer->auxiliary_channel_count();
m_fb.set_channel_count(FBufferDesc::NUM_CHANNELS + aux_channels);
m_fb.set_channel(FBufferDesc::DIFFUSE_C, "diffuse_color");
m_fb.set_channel(FBufferDesc::DIFFUSE_A, "diffuse_albedo");
m_fb.set_channel(FBufferDesc::SPECULAR_C, "specular_color");
m_fb.set_channel(FBufferDesc::SPECULAR_A, "specular_albedo");
m_fb.set_channel(FBufferDesc::DIRECT_C, "direct_color");
m_fb.set_channel(FBufferDesc::COMPOSITED_C, "composited_color");
m_fb.set_channel(FBufferDesc::FILTERED_C, "filtered_color");
m_fb.set_channel(FBufferDesc::LUMINANCE, "luminance");
m_renderer->register_auxiliary_channels( m_fb, FBufferDesc::NUM_CHANNELS );
m_fb.resize(m_res_x, m_res_y);
m_fb_temp[0].resize(m_res_x, m_res_y);
m_fb_temp[1].resize(m_res_x, m_res_y);
#if 0
// pre-computer the samples buffer
m_samples.alloc(m_res_x * m_res_y);
{
DomainBuffer<RTP_BUFFER_TYPE_HOST, float2> samples(m_res_x * m_res_y);
cugar::MJSampler sampler;
sampler.sample(m_res_x, m_res_y, (cugar::Vector2f*)samples.ptr());
m_samples = samples;
}
#endif
// Load the LTC coefficients
{
fprintf(stderr, "initializing LTC coefficients... started\n");
DomainBuffer<HOST_BUFFER, float4> ltc_M;
DomainBuffer<HOST_BUFFER, float4> ltc_Minv;
ltc_M.alloc(ltc_ggx::size * ltc_ggx::size);
ltc_Minv.alloc(ltc_ggx::size * ltc_ggx::size);
cugar::LTCBsdf::preprocess(ltc_ggx::size, (const cugar::Matrix3x3f*)ltc_ggx::tabM, ltc_M.ptr(), ltc_Minv.ptr());
m_ltc_size = ltc_ggx::size;
m_ltc_M = ltc_M;
m_ltc_Minv = ltc_Minv;
m_ltc_A.alloc(ltc_ggx::size * ltc_ggx::size);
m_ltc_A.copy_from(ltc_ggx::size * ltc_ggx::size, HOST_BUFFER, ltc_ggx::tabAmplitude);
fprintf(stderr, "initializing LTC coefficients... done\n");
}
fprintf(stderr, "loading mesh file %s... started\n", filename);
std::vector<std::string> scene_dirs;
{
scene_dirs.push_back(""); // always look in the current directory
char local_path[2048];
extract_path(filename, local_path);
scene_dirs.push_back(local_path);
}
// Create the Model object
//
try
{
std::vector<std::string> dirs = scene_dirs;
if (strlen(filename) > 3 && strcmp(filename+strlen(filename)-3, ".fa") == 0)
load_scene(filename, m_mesh, dirs, scene_dirs);
else
loadModel(filename, m_mesh);
// compute the bbox
if (1)
{
cugar::Vector3f bmin(1.0e16f, 1.0e16f, 1.0e16f);
cugar::Vector3f bmax(-1.0e16f, -1.0e16f, -1.0e16f);
float3* v = reinterpret_cast<float3*>(m_mesh.getVertexData());
for (int32_t i = 0; i < m_mesh.getNumVertices(); ++i)
{
bmin = cugar::min(bmin, cugar::Vector3f(v[i]));
bmax = cugar::max(bmax, cugar::Vector3f(v[i]));
}
// scale the model
if (0)
{
const cugar::Vector3f center = (bmin + bmax) * 0.5f;
const float scale = 1.0f / cugar::max3(bmax[0] - bmin[0], bmax[1] - bmin[1], bmax[2] - bmin[2]);
for (int32_t i = 0; i < m_mesh.getNumVertices(); ++i)
v[i] = (v[i] - center) * scale;
}
// print the bounding box
fprintf(stderr, " bbox[%f, %f, %f][%f, %f, %f]\n",
bmin[0], bmin[1], bmin[2],
bmax[0], bmax[1], bmax[2]);
}
}
catch (MeshException e)
{
fprintf(stderr, " error loading mesh file %s : %s\n", filename, e.what());
exit(1);
}
fprintf(stderr, "loading mesh file %s... done (%d triangles, %d materials, %d groups)\n", filename, m_mesh.getNumTriangles(), m_mesh.getNumMaterials(), m_mesh.getNumGroups());
{
// print the group names
for (int32 i = 0; i < m_mesh.getNumGroups(); ++i)
fprintf(stderr, " group[%d] : %s, %u triangles\n", i,
m_mesh.getGroupName(i).c_str(),
m_mesh.getGroupOffsets()[i + 1] - m_mesh.getGroupOffsets()[i]);
}
// load all textures
{
fprintf(stderr, "loading %u textures... started\n", (uint32)m_mesh.m_textures.size());
m_textures_h.resize( m_mesh.m_textures.size() );
m_textures_d.resize( m_mesh.m_textures.size() );
for (size_t i = 0; i < m_mesh.m_textures.size(); ++i)
{
m_textures_h[i] = HostMipMapStoragePtr(new MipMapStorage<HOST_BUFFER>());
m_textures_d[i] = DeviceMipMapStoragePtr(new MipMapStorage<CUDA_BUFFER>());
// try to load the texture
char local_path[2048];
extract_path(filename, local_path);
char texture_name[2048];
strcpy(texture_name, m_mesh.m_textures[i].c_str());
if (find_file(texture_name, scene_dirs))
{
if (strcmp(texture_name + strlen(texture_name) - 4, ".tga") == 0)
{
cugar::TGAHeader tga_header;
unsigned char* rgb = cugar::load_tga(texture_name, &tga_header);
if (rgb)
{
MipMapStorage<HOST_BUFFER>::TexturePtr texture_h(new TextureStorage<HOST_BUFFER>());
texture_h->resize(tga_header.width, tga_header.height);
float4* tex = texture_h->ptr();
for (uint32 p = 0; p < uint32(tga_header.width) * uint32(tga_header.height); ++p)
tex[p] = make_float4(
float(rgb[3 * p + 0]) / 255.0f,
float(rgb[3 * p + 1]) / 255.0f,
float(rgb[3 * p + 2]) / 255.0f,
0.0f);
// generate the mipmap for this texture
m_textures_h[i]->set(texture_h);
// and copy it to the device
*m_textures_d[i] = *m_textures_h[i];
delete[] rgb;
}
else
fprintf(stderr, "warning: unable to load texture %s\n", texture_name);
}
else
fprintf(stderr, "warning: unsupported texture format %s\n", texture_name);
}
else
fprintf(stderr, "warning: unable to find texture %s\n", texture_name);
}
m_texture_views_h.alloc(m_mesh.m_textures.size());
for (uint32 i = 0; i < m_textures_h.size(); ++i)
m_texture_views_h.set(i, m_textures_h[i]->view());
m_texture_views_d.alloc(m_mesh.m_textures.size());
for (uint32 i = 0; i < m_textures_d.size(); ++i)
m_texture_views_d.set(i, m_textures_d[i]->view());
fprintf(stderr, "loading %u textures... done\n", (uint32)m_mesh.m_textures.size());
}
// checking materials
for (int32_t i = 0; i < m_mesh.getNumTriangles(); ++i)
{
const int m = m_mesh.getMaterialIndices()[i];
if (m < 0 || m >= m_mesh.getNumMaterials())
{
fprintf(stderr, "material[%u] : %u out of range\n", i, m);
exit(1);
}
}
// copy to the device
m_mesh_d = m_mesh;
{
size_t mem_free, mem_tot;
hipSetDevice(0);
hipMemGetInfo(&mem_free, &mem_tot);
fprintf(stderr, "free device memory: %.3f GB\n", float(mem_free) / (1024 * 1024 * 1024));
}
fprintf(stderr, "creatign RT index... started\n");
try
{
m_model = m_context->createModel();
m_model->setTriangles(
m_mesh.getNumTriangles(), RTP_BUFFER_TYPE_HOST, m_mesh.getVertexIndices(),
m_mesh.getNumVertices(), RTP_BUFFER_TYPE_HOST, m_mesh.getVertexData());
m_model->update(0);
}
catch (optix::prime::Exception& e)
{
fprintf(stderr, " error[%d] : %s\n", e.getErrorCode(), e.getErrorString().c_str());
exit(1);
}
fprintf(stderr, "creatign RT index... done\n");
fprintf(stderr, "initializing path sampler... started\n");
m_renderer->init(argc, argv, *this);
fprintf(stderr, "initializing path sampler... done\n");
{
size_t mem_free, mem_tot;
hipSetDevice(0);
hipMemGetInfo(&mem_free, &mem_tot);
fprintf(stderr, "free device memory: %.3f GB\n", float(mem_free) / (1024 * 1024 * 1024));
}
#if 0
cugar::host_vector<uint32_t> h_randoms(1024 * 1024);
for (uint32_t i = 0; i < 1024 * 1024; ++i)
h_randoms[i] = rand();
cugar::device_vector<uint32_t> d_randoms = h_randoms;
cugar::device_vector<uint32_t> d_vals = h_randoms;
cugar::device_vector<uint8_t> temp_storage;
cugar::radix_sort<cugar::device_tag>(1024 * 1024, cugar::raw_pointer(d_randoms), cugar::raw_pointer(d_vals), temp_storage);
for (uint32_t i = 0; i < 10; ++i)
{
d_randoms = h_randoms;
const uint32_t n_keys = (1u << (i + 1)) * 1024;
cugar::cuda::Timer timer;
timer.start();
cugar::radix_sort<cugar::device_tag>(n_keys, cugar::raw_pointer(d_randoms), cugar::raw_pointer(d_vals), temp_storage);
timer.stop();
fprintf(stderr, "%u K items : %.2fms\n", n_keys / 1024, timer.seconds() * 1000.0f);
}
#endif
}
void Renderer::clear()
{
for (uint32_t c = 0; c < m_fb.channel_count(); ++c)
m_fb.channels[c].clear();
}
void Renderer::update_model()
{
//m_model = m_context->createModel();
m_model->setTriangles(
m_mesh.getNumTriangles(), RTP_BUFFER_TYPE_HOST, m_mesh.getVertexIndices(),
m_mesh.getNumVertices(), RTP_BUFFER_TYPE_HOST, m_mesh.getVertexData());
m_model->update(0);
m_model->finish();
CUDA_CHECK(cugar::cuda::sync_and_check_error("model update"));
// copy to the device
m_mesh_d = m_mesh;
// TODO: update m_mesh_lights if needed!
}
// Renderer display function
//
void Renderer::render(const uint32 instance)
{
try
{
RendererView renderer_view = view(instance);
// clear the primary Gbuffer
m_fb.gbuffer.clear();
//hipDeviceSynchronize();
m_renderer->render(instance, *this);
// apply filtering, if enabled
filter( instance );
to_rgba(renderer_view, m_rgba.ptr());
}
catch (cugar::cuda_error& error)
{
fprintf(stderr, "caught cuda error: %s\n", error.what());
exit(0);
}
}
RendererView Renderer::view(const uint32 instance)
{
RendererView renderer_view(
m_camera,
m_light,
m_mesh_d.view(),
m_mesh_lights.view(false),
m_mesh_lights.view(true),
m_texture_views_d.ptr(),
m_ltc_size,
m_ltc_M.ptr(),
m_ltc_Minv.ptr(),
m_ltc_A.ptr(),
m_res_x,
m_res_y,
m_aspect,
m_exposure,
m_shading_rate,
m_shading_mode,
m_fb.view(),
instance );
return renderer_view;
}
void Renderer::filter(const uint32 instance)
{
// setup some ping-pong buffers
FBufferChannelView pingpong[2];
pingpong[0] = m_fb_temp[0].view();
pingpong[1] = m_fb_temp[1].view();
// clear the output filter
m_fb.channels[FBufferDesc::FILTERED_C] = m_fb.channels[FBufferDesc::DIRECT_C];
FBufferChannelView output = m_fb.channels[FBufferDesc::FILTERED_C].view();
EAWParams eaw_params;
eaw_params.phi_normal = /*sqrtf(float(instance + 1)) **/ 128.0f;
eaw_params.phi_position = /*sqrtf(float(instance + 1)) **/ 8.0f;
eaw_params.phi_color = float(instance + 1) / 20.0f;
//eaw_params.phi_color = float(instance*instance + 1) / 10000.0f;
const uint32 n_iterations = 5;
// filter the diffuse channel
{
GBufferView gbuffer = m_fb.gbuffer.view();
FBufferChannelView input = m_fb.channels[FBufferDesc::DIFFUSE_C].view();
FBufferChannelView weight = m_fb.channels[FBufferDesc::DIFFUSE_A].view();
filter_variance(input, m_var.ptr(), 2);
EAW(
n_iterations,
output, // destination
weight, // weight
input, // input
gbuffer, // gbuffer
m_var.ptr(), // variance
eaw_params, pingpong);
}
// filter the specular channel
{
GBufferView gbuffer = m_fb.gbuffer.view();
FBufferChannelView input = m_fb.channels[FBufferDesc::SPECULAR_C].view();
FBufferChannelView weight = m_fb.channels[FBufferDesc::SPECULAR_A].view();
filter_variance(input, m_var.ptr(), 2);
EAW(
n_iterations,
output, // destination
weight, // weight
input, // input
gbuffer, // gbuffer
m_var.ptr(), // variance
eaw_params, pingpong);
}
} | 6b45a8dd3efbd4c10009036904e7547795b8b0b5.cu | /*
* Fermat
*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <renderer.h>
#include <pathtracer.h>
#include <files.h>
#include <bpt.h>
#include <cmlt.h>
#include <pssmlt.h>
#include <rpt.h>
#include <optix_prime/optix_primepp.h>
#include <optixu/optixu_matrix.h>
#include <mesh/MeshStorage.h>
#include <eaw.h>
#include <cugar/basic/cuda/arch.h>
#include <cugar/basic/cuda/timer.h>
#include <cugar/basic/primitives.h>
#include <cugar/basic/functors.h>
#include <cugar/basic/cuda/sort.h>
#include <cugar/image/tga.h>
#include <cugar/bsdf/ltc.h>
#include <buffers.h>
#include <vector>
namespace ltc_ggx
{
typedef float mat33[9];
#include <cugar/bsdf/ltc_ggx.inc>
};
void load_scene(const char* filename, MeshStorage& mesh, const std::vector<std::string>& dirs, std::vector<std::string>& scene_dirs);
//------------------------------------------------------------------------------
__global__ void fill_n_kernel(const int n, uint32_t* pixels)
{
const int idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx < n) pixels[idx] = idx;
}
void fill_n(const int n, Buffer<uint32_t>& pixels)
{
dim3 blockSize(128);
dim3 gridSize(cugar::divide_ri(n, blockSize.x));
fill_n_kernel << < gridSize, blockSize >> >(n, pixels.ptr());
}
//------------------------------------------------------------------------------
__global__ void to_rgba_kernel(const RendererView renderer, uint8* rgba)
{
const uint32 idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx < renderer.res_x * renderer.res_y)
{
if (renderer.shading_mode == kShaded)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::COMPOSITED_C, idx);
//cugar::Vector4f c =
// renderer.fb(FBufferDesc::DIRECT_C, idx) +
// renderer.fb(FBufferDesc::DIFFUSE_C, idx) * renderer.fb(FBufferDesc::DIFFUSE_A, idx) +
// renderer.fb(FBufferDesc::SPECULAR_C, idx) * renderer.fb(FBufferDesc::SPECULAR_A, idx);
c *= renderer.exposure; // Hardcoded Exposure Adjustment
c = c / (c + cugar::Vector4f(1));
c.x = powf(c.x, 1.0f / 2.2f);
c.y = powf(c.y, 1.0f / 2.2f);
c.z = powf(c.z, 1.0f / 2.2f);
c.w = powf(c.w, 1.0f / 2.2f);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kFiltered)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::FILTERED_C, idx);
//cugar::Vector4f c =
// renderer.fb(FBufferDesc::DIRECT_C, idx) +
// renderer.fb(FBufferDesc::DIFFUSE_C, idx) * renderer.fb(FBufferDesc::DIFFUSE_A, idx) +
// renderer.fb(FBufferDesc::SPECULAR_C, idx) * renderer.fb(FBufferDesc::SPECULAR_A, idx);
c *= renderer.exposure; // Hardcoded Exposure Adjustment
c = c / (c + cugar::Vector4f(1));
c.x = powf(c.x, 1.0f / 2.2f);
c.y = powf(c.y, 1.0f / 2.2f);
c.z = powf(c.z, 1.0f / 2.2f);
c.w = powf(c.w, 1.0f / 2.2f);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kAlbedo)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::DIFFUSE_A, idx) +
renderer.fb(FBufferDesc::SPECULAR_A, idx);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kDiffuseAlbedo)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::DIFFUSE_A, idx);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kSpecularAlbedo)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::SPECULAR_A, idx);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kDiffuseColor)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::DIFFUSE_C, idx);
c *= renderer.exposure; // Hardcoded Exposure Adjustment
c = c / (c + cugar::Vector4f(1));
c.x = powf(c.x, 1.0f / 2.2f);
c.y = powf(c.y, 1.0f / 2.2f);
c.z = powf(c.z, 1.0f / 2.2f);
c.w = powf(c.w, 1.0f / 2.2f);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kSpecularColor)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::SPECULAR_C, idx);
c *= renderer.exposure; // Hardcoded Exposure Adjustment
c = c / (c + cugar::Vector4f(1));
c.x = powf(c.x, 1.0f / 2.2f);
c.y = powf(c.y, 1.0f / 2.2f);
c.z = powf(c.z, 1.0f / 2.2f);
c.w = powf(c.w, 1.0f / 2.2f);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kDirectLighting)
{
cugar::Vector4f c = renderer.fb(FBufferDesc::DIRECT_C, idx);
c *= renderer.exposure; // Hardcoded Exposure Adjustment
c = c / (c + cugar::Vector4f(1));
c.x = powf(c.x, 1.0f / 2.2f);
c.y = powf(c.y, 1.0f / 2.2f);
c.z = powf(c.z, 1.0f / 2.2f);
c.w = powf(c.w, 1.0f / 2.2f);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
else if (renderer.shading_mode == kVariance)
{
float c = renderer.fb(FBufferDesc::COMPOSITED_C, idx).w;
c *= renderer.exposure; // Hardcoded Exposure Adjustment
c = c / (c + 1);
c = powf(c, 1.0f / 2.2f);
rgba[idx * 4 + 0] = uint8(fminf(c * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c * 256.0f, 255.0f));
}
else if (renderer.shading_mode >= kAux0 && (renderer.shading_mode - kAux0 < renderer.fb.n_channels - FBufferDesc::NUM_CHANNELS))
{
const uint32 aux_channel = renderer.shading_mode - kAux0 + FBufferDesc::NUM_CHANNELS;
cugar::Vector4f c = renderer.fb(aux_channel, idx);
c *= renderer.exposure; // Hardcoded Exposure Adjustment
c = c / (c + cugar::Vector4f(1));
c.x = powf(c.x, 1.0f / 2.2f);
c.y = powf(c.y, 1.0f / 2.2f);
c.z = powf(c.z, 1.0f / 2.2f);
c.w = powf(c.w, 1.0f / 2.2f);
rgba[idx * 4 + 0] = uint8(fminf(c.x * 256.0f, 255.0f));
rgba[idx * 4 + 1] = uint8(fminf(c.y * 256.0f, 255.0f));
rgba[idx * 4 + 2] = uint8(fminf(c.z * 256.0f, 255.0f));
rgba[idx * 4 + 3] = uint8(fminf(c.w * 256.0f, 255.0f));
}
}
}
void to_rgba(const RendererView renderer, uint8* rgba)
{
dim3 blockSize(128);
dim3 gridSize(cugar::divide_ri(renderer.res_x * renderer.res_y, blockSize.x));
to_rgba_kernel <<< gridSize, blockSize >>>(renderer, rgba);
CUDA_CHECK(cugar::cuda::sync_and_check_error("to_rgba"));
}
//------------------------------------------------------------------------------
__global__ void multiply_frame_kernel(RendererView renderer, const float scale)
{
const uint32 idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx < renderer.res_x * renderer.res_y)
{
// before scaling, save out luminance data
renderer.fb(FBufferDesc::LUMINANCE, idx) = cugar::Vector4f(
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::DIRECT_C, idx)).xyz()),
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::DIFFUSE_C, idx)).xyz()),
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::SPECULAR_C, idx)).xyz()),
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::COMPOSITED_C, idx)).xyz()) );
renderer.fb(FBufferDesc::DIFFUSE_C, idx) *= scale;
renderer.fb(FBufferDesc::DIFFUSE_A, idx) *= scale;
renderer.fb(FBufferDesc::SPECULAR_C, idx) *= scale;
renderer.fb(FBufferDesc::SPECULAR_A, idx) *= scale;
renderer.fb(FBufferDesc::DIRECT_C, idx) *= scale;
renderer.fb(FBufferDesc::COMPOSITED_C, idx) *= scale;
}
}
//------------------------------------------------------------------------------
__global__ void update_variances_kernel(RendererView renderer, const uint32 n)
{
const uint32 idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx < renderer.res_x * renderer.res_y)
{
// fetch the previous frame's luminances
const cugar::Vector4f old_lum = renderer.fb(FBufferDesc::LUMINANCE, idx);
// compute the new frame's luminances
const cugar::Vector4f new_lum = cugar::Vector4f(
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::DIRECT_C, idx)).xyz()),
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::DIFFUSE_C, idx)).xyz()),
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::SPECULAR_C, idx)).xyz()),
cugar::max_comp(cugar::Vector4f(renderer.fb(FBufferDesc::COMPOSITED_C, idx)).xyz()) );
// compute the change in variance (x(n) - avg(n-1))*(x(n) - avg(n)), which can be written as the sum of two terms:
// 1. n*avg(n) - (n-1)*avg(n-1) - avg(n-1) = n*(avg(n) - avg(n-1))
// 2. n*avg(n) - (n-1)*avg(n-1) - avg(n) = (n-1)*(avg(n) - avg(n-1))
const cugar::Vector4f delta_lum_1 = n * (new_lum - old_lum);
const cugar::Vector4f delta_lum_2 = (n - 1) * (new_lum - old_lum);
const cugar::Vector4f delta_var = (delta_lum_1 * delta_lum_2) / (n*n);
// add the variance deltas to the old variances (previously rescaled by (n-1)/n) stored in the alpha components of the respective channels
renderer.fb(FBufferDesc::DIRECT_C, idx).w += delta_var.x;
renderer.fb(FBufferDesc::DIFFUSE_C, idx).w += delta_var.y;
renderer.fb(FBufferDesc::SPECULAR_C, idx).w += delta_var.z;
renderer.fb(FBufferDesc::COMPOSITED_C, idx).w += delta_var.w;
}
}
//------------------------------------------------------------------------------
__global__ void filter_variance_kernel(const FBufferChannelView img, float* var, const uint32 FW)
{
const uint32 x = threadIdx.x + blockIdx.x*blockDim.x;
const uint32 y = threadIdx.y + blockIdx.y*blockDim.y;
if (x < img.res_x &&
y < img.res_y)
{
const int32 lx = x > FW ? x - FW : 0;
const int32 rx = x + FW < img.res_x ? x + FW : img.res_x - 1;
const int32 ly = y > FW ? y - FW : 0;
const int32 ry = y + FW < img.res_y ? y + FW : img.res_y - 1;
float variance = 0.0f;
for (int yy = ly; yy <= ry; yy++)
for (int xx = lx; xx <= rx; xx++)
variance += img(xx, yy).w;
variance /= (ry - ly + 1) * (rx - lx + 1);
var[x + y * img.res_x] = variance;
}
}
void filter_variance(const FBufferChannelView img, float* var, const uint32 FW = 1)
{
dim3 blockSize(32, 4);
dim3 gridSize(cugar::divide_ri(img.res_x, blockSize.x), cugar::divide_ri(img.res_y, blockSize.y));
filter_variance_kernel << < gridSize, blockSize >> > (img, var, FW);
CUDA_CHECK(cugar::cuda::sync_and_check_error("filter_variance"));
}
//------------------------------------------------------------------------------
void Renderer::multiply_frame(const float scale)
{
dim3 blockSize(128);
dim3 gridSize(cugar::divide_ri(m_res_x * m_res_y, blockSize.x));
multiply_frame_kernel <<< gridSize, blockSize >>>(view(0), scale);
CUDA_CHECK( cugar::cuda::sync_and_check_error("multiply_frame") );
}
//------------------------------------------------------------------------------
void Renderer::rescale_frame(const uint32 instance)
{
multiply_frame( float(instance)/float(instance+1) );
}
//------------------------------------------------------------------------------
void Renderer::update_variances(const uint32 instance)
{
dim3 blockSize(128);
dim3 gridSize(cugar::divide_ri(m_res_x * m_res_y, blockSize.x));
update_variances_kernel <<< gridSize, blockSize >>>(view(0), instance + 1);
CUDA_CHECK( cugar::cuda::sync_and_check_error("update_variances") );
}
//------------------------------------------------------------------------------
// Renderer initialization
//
void Renderer::init(int argc, char** argv)
{
const char* filename = NULL;
m_renderer_type = kBPT;
m_exposure = 1.0f;
m_res_x = 1600;
m_res_y = 900;
m_aspect = 0.0f;
m_shading_rate = 1.0f;
m_shading_mode = kShaded;
for (int i = 0; i < argc; ++i)
{
if (strcmp(argv[i], "-i") == 0)
filename = argv[++i];
else if (strcmp(argv[i], "-pt") == 0)
m_renderer_type = kPT;
else if (strcmp(argv[i], "-bpt") == 0)
m_renderer_type = kBPT;
else if (strcmp(argv[i], "-cmlt") == 0)
m_renderer_type = kCMLT;
else if (strcmp(argv[i], "-pssmlt") == 0)
m_renderer_type = kPSSMLT;
else if (strcmp(argv[i], "-rpt") == 0)
m_renderer_type = kRPT;
else if (strcmp(argv[i], "-r") == 0 ||
strcmp(argv[i], "-res") == 0)
{
m_res_x = atoi(argv[++i]);
m_res_y = atoi(argv[++i]);
}
else if (strcmp(argv[i], "-a") == 0 ||
strcmp(argv[i], "-aspect") == 0)
{
m_aspect = (float)atof(argv[++i]);
}
else if (strcmp(argv[i], "-c") == 0)
{
FILE* camera_file = fopen(argv[++i], "r");
if (camera_file == NULL)
{
fprintf(stderr, "failed opening camera file %s\n", argv[i]);
exit(0);
}
fscanf(camera_file, "%f %f %f", &m_camera.eye.x, &m_camera.eye.y, &m_camera.eye.z);
fscanf(camera_file, "%f %f %f", &m_camera.aim.x, &m_camera.aim.y, &m_camera.aim.z);
fscanf(camera_file, "%f %f %f", &m_camera.up.x, &m_camera.up.y, &m_camera.up.z);
fscanf(camera_file, "%f", &m_camera.fov);
m_camera.dx = normalize(cross(m_camera.aim - m_camera.eye, m_camera.up));
fclose(camera_file);
}
}
if (m_aspect == 0.0f)
m_aspect = float(m_res_x) / float(m_res_y);
if (filename == NULL)
{
fprintf(stderr, "options:\n");
fprintf(stderr, " -i scene.obj specify the input scene\n");
fprintf(stderr, " -r int int specify the resolution\n");
fprintf(stderr, " -a float specify the aspect ratio\n");
fprintf(stderr, " -c camera.txt specify a camera file\n");
fprintf(stderr, " -pt use the PT renderer\n");
fprintf(stderr, " -bpt use the BPT renderer\n");
fprintf(stderr, " -mlt use the MLT renderer\n");
fprintf(stderr, " -cmlt use the CMLT renderer\n");
fprintf(stderr, " -pssmlt use the PSSMLT renderer\n");
exit(0);
}
for (int i = 0; i < argc; ++i)
{
if (strcmp(argv[i], "-c") == 0)
{
FILE* camera_file = fopen(argv[++i], "r");
fscanf(camera_file, "%f %f %f", &m_camera.eye.x, &m_camera.eye.y, &m_camera.eye.z);
fscanf(camera_file, "%f %f %f", &m_camera.aim.x, &m_camera.aim.y, &m_camera.aim.z);
fscanf(camera_file, "%f %f %f", &m_camera.up.x, &m_camera.up.y, &m_camera.up.z);
fscanf(camera_file, "%f", &m_camera.fov);
m_camera.dx = normalize(cross(m_camera.aim - m_camera.eye, m_camera.up));
fclose(camera_file);
}
}
m_rgba.alloc(m_res_x * m_res_y * 4);
m_var.alloc(m_res_x * m_res_y);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
fprintf(stderr, "cuda device: %s\n", prop.name);
size_t free, total;
cudaMemGetInfo(&free, &total);
fprintf(stderr, " memory: %.3f GB\n",
float(total) / (1024 * 1024 * 1024));
std::vector<unsigned int> devices(1);
devices[0] = 0;
cudaSetDevice( devices[0] );
// create an Optix Prime context
m_context = optix::prime::Context::create(RTP_CONTEXT_TYPE_CUDA);
m_context->setCudaDeviceNumbers( devices );
switch (m_renderer_type)
{
case kPT: { m_renderer = new PathTracer(); break; }
case kBPT: { m_renderer = new BPT(); break; }
case kCMLT: { m_renderer = new CMLT(); break; }
case kPSSMLT: { m_renderer = new PSSMLT(); break; }
case kRPT: { m_renderer = new RPT(); break; }
default: { m_renderer = new PathTracer(); break; }
};
const uint32 aux_channels = m_renderer->auxiliary_channel_count();
m_fb.set_channel_count(FBufferDesc::NUM_CHANNELS + aux_channels);
m_fb.set_channel(FBufferDesc::DIFFUSE_C, "diffuse_color");
m_fb.set_channel(FBufferDesc::DIFFUSE_A, "diffuse_albedo");
m_fb.set_channel(FBufferDesc::SPECULAR_C, "specular_color");
m_fb.set_channel(FBufferDesc::SPECULAR_A, "specular_albedo");
m_fb.set_channel(FBufferDesc::DIRECT_C, "direct_color");
m_fb.set_channel(FBufferDesc::COMPOSITED_C, "composited_color");
m_fb.set_channel(FBufferDesc::FILTERED_C, "filtered_color");
m_fb.set_channel(FBufferDesc::LUMINANCE, "luminance");
m_renderer->register_auxiliary_channels( m_fb, FBufferDesc::NUM_CHANNELS );
m_fb.resize(m_res_x, m_res_y);
m_fb_temp[0].resize(m_res_x, m_res_y);
m_fb_temp[1].resize(m_res_x, m_res_y);
#if 0
// pre-computer the samples buffer
m_samples.alloc(m_res_x * m_res_y);
{
DomainBuffer<RTP_BUFFER_TYPE_HOST, float2> samples(m_res_x * m_res_y);
cugar::MJSampler sampler;
sampler.sample(m_res_x, m_res_y, (cugar::Vector2f*)samples.ptr());
m_samples = samples;
}
#endif
// Load the LTC coefficients
{
fprintf(stderr, "initializing LTC coefficients... started\n");
DomainBuffer<HOST_BUFFER, float4> ltc_M;
DomainBuffer<HOST_BUFFER, float4> ltc_Minv;
ltc_M.alloc(ltc_ggx::size * ltc_ggx::size);
ltc_Minv.alloc(ltc_ggx::size * ltc_ggx::size);
cugar::LTCBsdf::preprocess(ltc_ggx::size, (const cugar::Matrix3x3f*)ltc_ggx::tabM, ltc_M.ptr(), ltc_Minv.ptr());
m_ltc_size = ltc_ggx::size;
m_ltc_M = ltc_M;
m_ltc_Minv = ltc_Minv;
m_ltc_A.alloc(ltc_ggx::size * ltc_ggx::size);
m_ltc_A.copy_from(ltc_ggx::size * ltc_ggx::size, HOST_BUFFER, ltc_ggx::tabAmplitude);
fprintf(stderr, "initializing LTC coefficients... done\n");
}
fprintf(stderr, "loading mesh file %s... started\n", filename);
std::vector<std::string> scene_dirs;
{
scene_dirs.push_back(""); // always look in the current directory
char local_path[2048];
extract_path(filename, local_path);
scene_dirs.push_back(local_path);
}
// Create the Model object
//
try
{
std::vector<std::string> dirs = scene_dirs;
if (strlen(filename) > 3 && strcmp(filename+strlen(filename)-3, ".fa") == 0)
load_scene(filename, m_mesh, dirs, scene_dirs);
else
loadModel(filename, m_mesh);
// compute the bbox
if (1)
{
cugar::Vector3f bmin(1.0e16f, 1.0e16f, 1.0e16f);
cugar::Vector3f bmax(-1.0e16f, -1.0e16f, -1.0e16f);
float3* v = reinterpret_cast<float3*>(m_mesh.getVertexData());
for (int32_t i = 0; i < m_mesh.getNumVertices(); ++i)
{
bmin = cugar::min(bmin, cugar::Vector3f(v[i]));
bmax = cugar::max(bmax, cugar::Vector3f(v[i]));
}
// scale the model
if (0)
{
const cugar::Vector3f center = (bmin + bmax) * 0.5f;
const float scale = 1.0f / cugar::max3(bmax[0] - bmin[0], bmax[1] - bmin[1], bmax[2] - bmin[2]);
for (int32_t i = 0; i < m_mesh.getNumVertices(); ++i)
v[i] = (v[i] - center) * scale;
}
// print the bounding box
fprintf(stderr, " bbox[%f, %f, %f][%f, %f, %f]\n",
bmin[0], bmin[1], bmin[2],
bmax[0], bmax[1], bmax[2]);
}
}
catch (MeshException e)
{
fprintf(stderr, " error loading mesh file %s : %s\n", filename, e.what());
exit(1);
}
fprintf(stderr, "loading mesh file %s... done (%d triangles, %d materials, %d groups)\n", filename, m_mesh.getNumTriangles(), m_mesh.getNumMaterials(), m_mesh.getNumGroups());
{
// print the group names
for (int32 i = 0; i < m_mesh.getNumGroups(); ++i)
fprintf(stderr, " group[%d] : %s, %u triangles\n", i,
m_mesh.getGroupName(i).c_str(),
m_mesh.getGroupOffsets()[i + 1] - m_mesh.getGroupOffsets()[i]);
}
// load all textures
{
fprintf(stderr, "loading %u textures... started\n", (uint32)m_mesh.m_textures.size());
m_textures_h.resize( m_mesh.m_textures.size() );
m_textures_d.resize( m_mesh.m_textures.size() );
for (size_t i = 0; i < m_mesh.m_textures.size(); ++i)
{
m_textures_h[i] = HostMipMapStoragePtr(new MipMapStorage<HOST_BUFFER>());
m_textures_d[i] = DeviceMipMapStoragePtr(new MipMapStorage<CUDA_BUFFER>());
// try to load the texture
char local_path[2048];
extract_path(filename, local_path);
char texture_name[2048];
strcpy(texture_name, m_mesh.m_textures[i].c_str());
if (find_file(texture_name, scene_dirs))
{
if (strcmp(texture_name + strlen(texture_name) - 4, ".tga") == 0)
{
cugar::TGAHeader tga_header;
unsigned char* rgb = cugar::load_tga(texture_name, &tga_header);
if (rgb)
{
MipMapStorage<HOST_BUFFER>::TexturePtr texture_h(new TextureStorage<HOST_BUFFER>());
texture_h->resize(tga_header.width, tga_header.height);
float4* tex = texture_h->ptr();
for (uint32 p = 0; p < uint32(tga_header.width) * uint32(tga_header.height); ++p)
tex[p] = make_float4(
float(rgb[3 * p + 0]) / 255.0f,
float(rgb[3 * p + 1]) / 255.0f,
float(rgb[3 * p + 2]) / 255.0f,
0.0f);
// generate the mipmap for this texture
m_textures_h[i]->set(texture_h);
// and copy it to the device
*m_textures_d[i] = *m_textures_h[i];
delete[] rgb;
}
else
fprintf(stderr, "warning: unable to load texture %s\n", texture_name);
}
else
fprintf(stderr, "warning: unsupported texture format %s\n", texture_name);
}
else
fprintf(stderr, "warning: unable to find texture %s\n", texture_name);
}
m_texture_views_h.alloc(m_mesh.m_textures.size());
for (uint32 i = 0; i < m_textures_h.size(); ++i)
m_texture_views_h.set(i, m_textures_h[i]->view());
m_texture_views_d.alloc(m_mesh.m_textures.size());
for (uint32 i = 0; i < m_textures_d.size(); ++i)
m_texture_views_d.set(i, m_textures_d[i]->view());
fprintf(stderr, "loading %u textures... done\n", (uint32)m_mesh.m_textures.size());
}
// checking materials
for (int32_t i = 0; i < m_mesh.getNumTriangles(); ++i)
{
const int m = m_mesh.getMaterialIndices()[i];
if (m < 0 || m >= m_mesh.getNumMaterials())
{
fprintf(stderr, "material[%u] : %u out of range\n", i, m);
exit(1);
}
}
// copy to the device
m_mesh_d = m_mesh;
{
size_t mem_free, mem_tot;
cudaSetDevice(0);
cudaMemGetInfo(&mem_free, &mem_tot);
fprintf(stderr, "free device memory: %.3f GB\n", float(mem_free) / (1024 * 1024 * 1024));
}
fprintf(stderr, "creatign RT index... started\n");
try
{
m_model = m_context->createModel();
m_model->setTriangles(
m_mesh.getNumTriangles(), RTP_BUFFER_TYPE_HOST, m_mesh.getVertexIndices(),
m_mesh.getNumVertices(), RTP_BUFFER_TYPE_HOST, m_mesh.getVertexData());
m_model->update(0);
}
catch (optix::prime::Exception& e)
{
fprintf(stderr, " error[%d] : %s\n", e.getErrorCode(), e.getErrorString().c_str());
exit(1);
}
fprintf(stderr, "creatign RT index... done\n");
fprintf(stderr, "initializing path sampler... started\n");
m_renderer->init(argc, argv, *this);
fprintf(stderr, "initializing path sampler... done\n");
{
size_t mem_free, mem_tot;
cudaSetDevice(0);
cudaMemGetInfo(&mem_free, &mem_tot);
fprintf(stderr, "free device memory: %.3f GB\n", float(mem_free) / (1024 * 1024 * 1024));
}
#if 0
cugar::host_vector<uint32_t> h_randoms(1024 * 1024);
for (uint32_t i = 0; i < 1024 * 1024; ++i)
h_randoms[i] = rand();
cugar::device_vector<uint32_t> d_randoms = h_randoms;
cugar::device_vector<uint32_t> d_vals = h_randoms;
cugar::device_vector<uint8_t> temp_storage;
cugar::radix_sort<cugar::device_tag>(1024 * 1024, cugar::raw_pointer(d_randoms), cugar::raw_pointer(d_vals), temp_storage);
for (uint32_t i = 0; i < 10; ++i)
{
d_randoms = h_randoms;
const uint32_t n_keys = (1u << (i + 1)) * 1024;
cugar::cuda::Timer timer;
timer.start();
cugar::radix_sort<cugar::device_tag>(n_keys, cugar::raw_pointer(d_randoms), cugar::raw_pointer(d_vals), temp_storage);
timer.stop();
fprintf(stderr, "%u K items : %.2fms\n", n_keys / 1024, timer.seconds() * 1000.0f);
}
#endif
}
void Renderer::clear()
{
for (uint32_t c = 0; c < m_fb.channel_count(); ++c)
m_fb.channels[c].clear();
}
void Renderer::update_model()
{
//m_model = m_context->createModel();
m_model->setTriangles(
m_mesh.getNumTriangles(), RTP_BUFFER_TYPE_HOST, m_mesh.getVertexIndices(),
m_mesh.getNumVertices(), RTP_BUFFER_TYPE_HOST, m_mesh.getVertexData());
m_model->update(0);
m_model->finish();
CUDA_CHECK(cugar::cuda::sync_and_check_error("model update"));
// copy to the device
m_mesh_d = m_mesh;
// TODO: update m_mesh_lights if needed!
}
// Renderer display function
//
void Renderer::render(const uint32 instance)
{
try
{
RendererView renderer_view = view(instance);
// clear the primary Gbuffer
m_fb.gbuffer.clear();
//cudaDeviceSynchronize();
m_renderer->render(instance, *this);
// apply filtering, if enabled
filter( instance );
to_rgba(renderer_view, m_rgba.ptr());
}
catch (cugar::cuda_error& error)
{
fprintf(stderr, "caught cuda error: %s\n", error.what());
exit(0);
}
}
RendererView Renderer::view(const uint32 instance)
{
RendererView renderer_view(
m_camera,
m_light,
m_mesh_d.view(),
m_mesh_lights.view(false),
m_mesh_lights.view(true),
m_texture_views_d.ptr(),
m_ltc_size,
m_ltc_M.ptr(),
m_ltc_Minv.ptr(),
m_ltc_A.ptr(),
m_res_x,
m_res_y,
m_aspect,
m_exposure,
m_shading_rate,
m_shading_mode,
m_fb.view(),
instance );
return renderer_view;
}
void Renderer::filter(const uint32 instance)
{
// setup some ping-pong buffers
FBufferChannelView pingpong[2];
pingpong[0] = m_fb_temp[0].view();
pingpong[1] = m_fb_temp[1].view();
// clear the output filter
m_fb.channels[FBufferDesc::FILTERED_C] = m_fb.channels[FBufferDesc::DIRECT_C];
FBufferChannelView output = m_fb.channels[FBufferDesc::FILTERED_C].view();
EAWParams eaw_params;
eaw_params.phi_normal = /*sqrtf(float(instance + 1)) **/ 128.0f;
eaw_params.phi_position = /*sqrtf(float(instance + 1)) **/ 8.0f;
eaw_params.phi_color = float(instance + 1) / 20.0f;
//eaw_params.phi_color = float(instance*instance + 1) / 10000.0f;
const uint32 n_iterations = 5;
// filter the diffuse channel
{
GBufferView gbuffer = m_fb.gbuffer.view();
FBufferChannelView input = m_fb.channels[FBufferDesc::DIFFUSE_C].view();
FBufferChannelView weight = m_fb.channels[FBufferDesc::DIFFUSE_A].view();
filter_variance(input, m_var.ptr(), 2);
EAW(
n_iterations,
output, // destination
weight, // weight
input, // input
gbuffer, // gbuffer
m_var.ptr(), // variance
eaw_params, pingpong);
}
// filter the specular channel
{
GBufferView gbuffer = m_fb.gbuffer.view();
FBufferChannelView input = m_fb.channels[FBufferDesc::SPECULAR_C].view();
FBufferChannelView weight = m_fb.channels[FBufferDesc::SPECULAR_A].view();
filter_variance(input, m_var.ptr(), 2);
EAW(
n_iterations,
output, // destination
weight, // weight
input, // input
gbuffer, // gbuffer
m_var.ptr(), // variance
eaw_params, pingpong);
}
} |
87855a4fbd79a1bcdebf66a1da5c6e7a3195ba36.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <chrono>
#include <fstream>
#define arraySize 31 //35 max
#define def_div 10 // 5<=X<=15
#define W 100
//#define threads_per_block 32
//#define max_blocks 32
using namespace std;
__constant__ float coefs[arraySize*2];
__global__ void hybrid(float *sh_sum_dev, long int *str_num_dev, float num_of_blocks, int* bdevX,int* global_mem_bin,int threads_per_block)
{
float th_w_sum = 0;
float th_v_sum = 0;
int th_bin[arraySize];
int best_bin[arraySize];
extern __shared__ float sh_array[];
float* sh_maxs = (float*)sh_array;
int* indices = (int*)&sh_maxs[threads_per_block];
int reached = 0;
indices[threadIdx.x] = blockIdx.x * blockDim.x + threadIdx.x;
__syncthreads();
long signed int num_to_bin = blockIdx.x * blockDim.x + threadIdx.x;
//num_to_bin += max_blocks * n_of_it;
#pragma unroll
for (uint i = 0; i < def_div; i++)
{
th_bin[i] = ((num_to_bin) >> i) % 2;
th_w_sum += th_bin[i] * coefs[i];
th_v_sum += th_bin[i] * coefs[i+arraySize];
best_bin[i] = th_bin[i];
}
#pragma unroll
for (uint i = def_div; i < arraySize; i++)
{
th_bin[i] = -1;
}
int Capacity = W - th_w_sum;
sh_maxs[threadIdx.x] = (th_w_sum > W) ? 0:th_v_sum;
__syncthreads ();
//H_S
int h = def_div;
long int ns = 0;
bool forward;
while(h-def_div!=-1){
ns++;
forward = true;
if(th_bin[h]==-1){
th_bin[h]=1;
}else{
if(th_bin[h]==1){
th_bin[h]=0;
}else{
if(th_bin[h]==0){
th_bin[h]=-1;
h--;
forward=false;
}
}
}
if(h==arraySize-1){
int cw = 0;
int cp = 0;
#pragma unroll
for(int i = def_div;i<arraySize;i++){
cp += coefs[i+arraySize] * th_bin[i];
cw += coefs[i] * th_bin[i];
}
if((cw <= Capacity) &&(cp > reached)){
reached = cp;
#pragma unroll
for(int i = def_div; i < arraySize; i++){
best_bin[i] = th_bin[i];
}
}
}
else{
int cw = 0;
for(int i = def_div ; i < arraySize; i++){
cw += coefs[i] * th_bin[i];
}
if (cw > Capacity) forward = false;
cw = 0;
float cp = 0;
int nw = 0;
int np = 0;
#pragma unroll
for(int i = def_div;i < arraySize;i++){
np = th_bin[i]!=-1? th_bin[i] * coefs[i+arraySize]:coefs[i+arraySize];
nw = th_bin[i]!=-1? th_bin[i] * coefs[i]: coefs[i];
if(cw+nw <= Capacity){
cw += nw;
cp += np;
}
else{
cp+=np*(Capacity-cw)/nw;
break;
}
}
int b = cp;
if (b <= reached){
forward = false;
}
}
if(forward){if(h<arraySize-1){h++;}
}
}
sh_maxs[threadIdx.x] += reached;
__syncthreads();
//reduction on block
for (uint offset = blockDim.x >> 1; offset >= 1; offset >>= 1)
{
if (threadIdx.x < offset)
{
if (sh_maxs[threadIdx.x] < sh_maxs[threadIdx.x + offset])
{
sh_maxs[threadIdx.x] = sh_maxs[threadIdx.x + offset];
indices[threadIdx.x] = indices[threadIdx.x + offset];
}
}
__syncthreads ();
}
// write result for this block to global mem
if(threadIdx.x == 0){
sh_sum_dev[blockIdx.x] = sh_maxs[0];
str_num_dev[blockIdx.x] = indices[0];
}
if(blockIdx.x*blockDim.x+threadIdx.x == indices[0]){
#pragma unroll
for(int i = 0; i < arraySize;i++){
global_mem_bin[blockIdx.x*arraySize + i] = best_bin[i];
}
}
__syncthreads();
}
__global__ void
hybrid_reduction (float *s, long int *str_num_dev,int* global_mem_bin,int threads_per_block)
{
int ID = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ int sh_hy_data[];
sh_hy_data[threadIdx.x] = s[ID];
sh_hy_data[threadIdx.x + threads_per_block] = str_num_dev[ID];
__syncthreads ();
// do reduction in shared mem
for (uint s = blockDim.x >>1; s > 0; s >>= 1)
{
if (threadIdx.x < s)
{
if (sh_hy_data[threadIdx.x] < sh_hy_data[threadIdx.x + s])
{
sh_hy_data[threadIdx.x] = sh_hy_data[threadIdx.x + s];
sh_hy_data[threadIdx.x + threads_per_block] =
sh_hy_data[threadIdx.x + threads_per_block + s];
}
}
__syncthreads ();
}
// write result for this block to global mem
if (threadIdx.x == 0)
{
//if(sh_hy_data[0]>s[0]){//}&&(blockIdx.x>0)){
s[blockIdx.x] = sh_hy_data[0];
str_num_dev[blockIdx.x] = sh_hy_data[threads_per_block];
#pragma unroll
for(int i = 0; i < arraySize;i++){
global_mem_bin[i] = global_mem_bin[(sh_hy_data[threads_per_block]/arraySize)*arraySize + i];
}
}
}
__global__ void
which_string (long int a, int *view_dev)
{
view_dev[threadIdx.x] = (a>>threadIdx.x)%2;
}
void quickSortR(float* a,float* b, long N) {
// - a[], a[N] - .
long i = 0, j = N; //
float temp, p;
p = a[ N>>1 ]; //
//
do {
while ( a[i] > p ) i++;
while ( a[j] < p ) j--;
if (i <= j) {
temp = a[i]; a[i] = a[j]; a[j] = temp;
temp = b[i]; b[i] = b[j]; b[j] = temp;
temp = b[i+arraySize]; b[i+arraySize] = b[j+arraySize]; b[j+arraySize] = temp;
i++; j--;
}
} while ( i<=j );
// , ,
if ( j > 0 ) quickSortR(a,b, j);
if ( N > i ) quickSortR(a+i,b+i, N-i);
}
int main(){
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0);
int threads_per_block = deviceProp.warpSize;
int max_blocks = pow(2,def_div)/threads_per_block;
long int strSize_b = pow (2, arraySize);
int num_of_blocks = strSize_b / threads_per_block;
float *Sum = new float[32]; // = { 0 };
float *sh_sum_dev;
//float weight[31] ={ 5, 10, 17, 19, 20, 23, 26, 30, 32, 38, 40, 44, 47, 50, 55, 56, 56, 60, 62, 66, 70, 75, 77, 80, 81, 90,93,96,101,107,115 };
//float values[31] ={ 10, 13, 16, 22, 30, 25, 55, 90, 110, 115, 130, 120, 150, 170, 194, 199, 194, 199, 217, 230, 248, 250, 264, 271, 279, 286,293,299,305,313,321 };
float dev_coefs[62] = {2,1,8,2,17,22,21,33,54,53,29,34,91,24,82,91,51,9,64,14,44,30,23,98,38,55,98,64,57,80,66,49,24,89,15,87,86,77,81,89,82,44,38,86,22,75,72,40,7,47,9,28,17,10,42,15,20,32,15,6,4,1};
//float dev_coefs[60] = {5, 10, 17, 19, 20, 23, 26, 30, 32, 38, 40, 44, 47, 50, 55, 56, 56, 60, 62, 66, 70, 75, 77, 80, 81, 90,93,96,101,107, 10, 13, 16, 22, 30, 25, 55, 90, 110, 115, 130, 120, 150, 170, 194, 199, 194, 199, 217, 230, 248, 250, 264, 271, 279, 286,293,299,305,313 };
//float dev_coefs[58] = {5, 10, 17, 19, 20, 23, 26, 30, 32, 38, 40, 44, 47, 50, 55, 56, 56, 60, 62, 66, 70, 75, 77, 80, 81, 90,93,96,101, 10, 13, 16, 22, 30, 25, 55, 90, 110, 115, 130, 120, 150, 170, 194, 199, 194, 199, 217, 230, 248, 250, 264, 271, 279, 286,293,299,305 };
//float *values_dev;
long int *str_num_dev;
long int *str_num = new long int[1];
cout<<"sing param = "<<max_blocks<<" _ "<< threads_per_block<<"\n";
cout<<"red param "<<1<<" , "<<max_blocks<<"\n";
float* additional_array = new float[arraySize];
for(int i = 0; i < arraySize;i++){
additional_array[i] = dev_coefs[i+arraySize]/dev_coefs[i];
}
quickSortR(additional_array,dev_coefs,arraySize-1);
float t1,t2;
float acceleration = 0;
//for(int i = 0;i<arraySize*2;i++){dev_coefs[i] = 2;}
std::chrono::time_point<std::chrono:: high_resolution_clock> start, end;
start = std::chrono::high_resolution_clock::now();
int* bdevX;
hipMalloc ((void **) &bdevX, arraySize * sizeof (int));
int* global_mem_bin;
hipMalloc ((void **) &global_mem_bin, max_blocks*arraySize * sizeof (int));
hipMalloc ((void **) &sh_sum_dev, num_of_blocks * sizeof (float));
hipMalloc ((void **) &str_num_dev, num_of_blocks * sizeof (float));
hipMemcpyToSymbol (coefs, dev_coefs, 2*arraySize * sizeof (float));
hipLaunchKernelGGL(( hybrid) , dim3(max_blocks), dim3(threads_per_block) ,threads_per_block*2*sizeof(int), 0, sh_sum_dev, str_num_dev, num_of_blocks,bdevX,global_mem_bin,threads_per_block);
hipLaunchKernelGGL((
hybrid_reduction), dim3(1),dim3(max_blocks),threads_per_block*2*sizeof(int), 0, sh_sum_dev,str_num_dev,global_mem_bin,threads_per_block);
int* suda = new int[arraySize];
hipMemcpy (Sum, sh_sum_dev, sizeof (int), hipMemcpyDeviceToHost);
hipMemcpy (str_num, str_num_dev, sizeof (long int), hipMemcpyDeviceToHost);
hipMemcpy (suda, global_mem_bin, arraySize*sizeof (int), hipMemcpyDeviceToHost);
end = std::chrono:: high_resolution_clock::now();
int elapsed_seconds = std::chrono::duration_cast<std::chrono::microseconds>
(end-start).count();
std::time_t end_time = std::chrono::system_clock::to_time_t(end);
std::cout<< " : " << elapsed_seconds << "microseconds\n";
t1 = elapsed_seconds;
cout << "Acheived maximal sum = " << Sum[0] << "\n";
for (int i = 0; i < arraySize; i++)
{
cout << suda[i];
} cout << "\n";
//check
int checksum = 0;
for (int i = 0; i < arraySize; i++)
{
checksum += dev_coefs[i+arraySize] * suda[i];
}
cout << "Validation sum = " << checksum << "\n";
checksum = 0;
for (int i = 0; i < arraySize; i++)
{
checksum += dev_coefs[i] * suda[i];
} cout << "Weight = " << checksum << "\n";
// ofstream fout;
// fout.open("data_uncorr_hybrid.txt",ios_base::app);
// fout<<"GPU\n"<<Sum[0]<<"\n"<<elapsed_seconds<<"\n\n";
hipFree(coefs);
hipFree (sh_sum_dev);
hipFree (str_num_dev);
hipFree(bdevX);
hipFree(global_mem_bin);
delete [] Sum;
delete [] str_num;
cout<<". CPU version:\n";
start = std::chrono::high_resolution_clock::now();
int *X = new int[arraySize];
int *bestX = new int[arraySize];
for(int i = 0; i < arraySize; i++){
X[i] = -1;
bestX[i] = 0;
}
int curr_sum = 0;
int reached_max = 0;
float *cpu_bin = new float[arraySize];
for(int i = 0; i < arraySize;i++){
additional_array[i] = dev_coefs[i+arraySize]/dev_coefs[i];
}
quickSortR(additional_array,dev_coefs,arraySize-1);
int h = 0;
int k = h;//def_div;
long int ns = 0;
bool forward;
while(h-k!=-1){
ns++;
forward = true;
if(X[h]==-1){
X[h]=1;
}else{
if(X[h]==1){
X[h]=0;
}else{
if(X[h]==0){
X[h]=-1;
h--;
forward=false;
}
}
}
if(h==arraySize-1){
int cw = 0;
int cp = 0;
for(int i = k;i<arraySize;i++){
cp += dev_coefs[i+arraySize]*X[i];
cw += dev_coefs[i]*X[i];
}
if((cw <= W) &&(cp > reached_max)){
reached_max = cp;
for(int i = k; i < arraySize; i++){
bestX[i] = X[i];
}
}
}
else{
int cw = 0;
for(int i = k ; i < arraySize; i++){
cw += dev_coefs[i]*X[i];
}
if (cw > W) forward = false;
cw = 0;
float cp = 0;
int nw = 0;
int np = 0;
for(int i = k;i<arraySize;i++){
np = X[i]!=-1? X[i] * dev_coefs[i+arraySize]:dev_coefs[i+arraySize];
nw = X[i]!=-1? X[i] * dev_coefs[i]: dev_coefs[i];
if(cw+nw <= W){
cw += nw;
cp += np;
}
else{
cp+=np*(W-cw)/nw;
break;
}
}
int b = cp;
if (b <= reached_max){
forward = false;
}
}
if(forward){if(h<arraySize-1){h++;}}
}
end = std::chrono:: high_resolution_clock::now();
elapsed_seconds = std::chrono::duration_cast<std::chrono::microseconds>
(end-start).count();
end_time = std::chrono::system_clock::to_time_t(end);
t2 = elapsed_seconds;
std::cout<< " : " << elapsed_seconds << "microseconds\n";
cout<<"MAX = "<<reached_max<<"\n";
for(int m = 0 ; m < arraySize;m++){
cout<<bestX[m];
curr_sum += bestX[m]*dev_coefs[m+arraySize];
}cout<<"\n = "<<ns<<"\n";
// fout<<"CPU\n"<<reached_max<<"\n"<<elapsed_seconds<<"\n\n";
// fout.close();
acceleration = t2/t1;
cout<<"Acceleration = "<<acceleration<<"\n";
delete [] suda;
delete [] additional_array;
hipFree (sh_sum_dev);
hipFree (str_num_dev);
hipFree (coefs);
return 0;
}
| 87855a4fbd79a1bcdebf66a1da5c6e7a3195ba36.cu |
#include "cuda_runtime.h"
#include <iostream>
#include <chrono>
#include <fstream>
#define arraySize 31 //35 max
#define def_div 10 // 5<=X<=15
#define W 100
//#define threads_per_block 32
//#define max_blocks 32
using namespace std;
__constant__ float coefs[arraySize*2];
__global__ void hybrid(float *sh_sum_dev, long int *str_num_dev, float num_of_blocks, int* bdevX,int* global_mem_bin,int threads_per_block)
{
float th_w_sum = 0;
float th_v_sum = 0;
int th_bin[arraySize];
int best_bin[arraySize];
extern __shared__ float sh_array[];
float* sh_maxs = (float*)sh_array;
int* indices = (int*)&sh_maxs[threads_per_block];
int reached = 0;
indices[threadIdx.x] = blockIdx.x * blockDim.x + threadIdx.x;
__syncthreads();
long signed int num_to_bin = blockIdx.x * blockDim.x + threadIdx.x;
//num_to_bin += max_blocks * n_of_it;
#pragma unroll
for (uint i = 0; i < def_div; i++)
{
th_bin[i] = ((num_to_bin) >> i) % 2;
th_w_sum += th_bin[i] * coefs[i];
th_v_sum += th_bin[i] * coefs[i+arraySize];
best_bin[i] = th_bin[i];
}
#pragma unroll
for (uint i = def_div; i < arraySize; i++)
{
th_bin[i] = -1;
}
int Capacity = W - th_w_sum;
sh_maxs[threadIdx.x] = (th_w_sum > W) ? 0:th_v_sum;
__syncthreads ();
//H_S
int h = def_div;
long int ns = 0;
bool forward;
while(h-def_div!=-1){
ns++;
forward = true;
if(th_bin[h]==-1){
th_bin[h]=1;
}else{
if(th_bin[h]==1){
th_bin[h]=0;
}else{
if(th_bin[h]==0){
th_bin[h]=-1;
h--;
forward=false;
}
}
}
if(h==arraySize-1){
int cw = 0;
int cp = 0;
#pragma unroll
for(int i = def_div;i<arraySize;i++){
cp += coefs[i+arraySize] * th_bin[i];
cw += coefs[i] * th_bin[i];
}
if((cw <= Capacity) &&(cp > reached)){
reached = cp;
#pragma unroll
for(int i = def_div; i < arraySize; i++){
best_bin[i] = th_bin[i];
}
}
}
else{
int cw = 0;
for(int i = def_div ; i < arraySize; i++){
cw += coefs[i] * th_bin[i];
}
if (cw > Capacity) forward = false;
cw = 0;
float cp = 0;
int nw = 0;
int np = 0;
#pragma unroll
for(int i = def_div;i < arraySize;i++){
np = th_bin[i]!=-1? th_bin[i] * coefs[i+arraySize]:coefs[i+arraySize];
nw = th_bin[i]!=-1? th_bin[i] * coefs[i]: coefs[i];
if(cw+nw <= Capacity){
cw += nw;
cp += np;
}
else{
cp+=np*(Capacity-cw)/nw;
break;
}
}
int b = cp;
if (b <= reached){
forward = false;
}
}
if(forward){if(h<arraySize-1){h++;}
}
}
sh_maxs[threadIdx.x] += reached;
__syncthreads();
//reduction on block
for (uint offset = blockDim.x >> 1; offset >= 1; offset >>= 1)
{
if (threadIdx.x < offset)
{
if (sh_maxs[threadIdx.x] < sh_maxs[threadIdx.x + offset])
{
sh_maxs[threadIdx.x] = sh_maxs[threadIdx.x + offset];
indices[threadIdx.x] = indices[threadIdx.x + offset];
}
}
__syncthreads ();
}
// write result for this block to global mem
if(threadIdx.x == 0){
sh_sum_dev[blockIdx.x] = sh_maxs[0];
str_num_dev[blockIdx.x] = indices[0];
}
if(blockIdx.x*blockDim.x+threadIdx.x == indices[0]){
#pragma unroll
for(int i = 0; i < arraySize;i++){
global_mem_bin[blockIdx.x*arraySize + i] = best_bin[i];
}
}
__syncthreads();
}
__global__ void
hybrid_reduction (float *s, long int *str_num_dev,int* global_mem_bin,int threads_per_block)
{
int ID = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ int sh_hy_data[];
sh_hy_data[threadIdx.x] = s[ID];
sh_hy_data[threadIdx.x + threads_per_block] = str_num_dev[ID];
__syncthreads ();
// do reduction in shared mem
for (uint s = blockDim.x >>1; s > 0; s >>= 1)
{
if (threadIdx.x < s)
{
if (sh_hy_data[threadIdx.x] < sh_hy_data[threadIdx.x + s])
{
sh_hy_data[threadIdx.x] = sh_hy_data[threadIdx.x + s];
sh_hy_data[threadIdx.x + threads_per_block] =
sh_hy_data[threadIdx.x + threads_per_block + s];
}
}
__syncthreads ();
}
// write result for this block to global mem
if (threadIdx.x == 0)
{
//if(sh_hy_data[0]>s[0]){//}&&(blockIdx.x>0)){
s[blockIdx.x] = sh_hy_data[0];
str_num_dev[blockIdx.x] = sh_hy_data[threads_per_block];
#pragma unroll
for(int i = 0; i < arraySize;i++){
global_mem_bin[i] = global_mem_bin[(sh_hy_data[threads_per_block]/arraySize)*arraySize + i];
}
}
}
__global__ void
which_string (long int a, int *view_dev)
{
view_dev[threadIdx.x] = (a>>threadIdx.x)%2;
}
void quickSortR(float* a,float* b, long N) {
// На входе - массив a[], a[N] - его последний элемент.
long i = 0, j = N; // поставить указатели на исходные места
float temp, p;
p = a[ N>>1 ]; // центральный элемент
// процедура разделения
do {
while ( a[i] > p ) i++;
while ( a[j] < p ) j--;
if (i <= j) {
temp = a[i]; a[i] = a[j]; a[j] = temp;
temp = b[i]; b[i] = b[j]; b[j] = temp;
temp = b[i+arraySize]; b[i+arraySize] = b[j+arraySize]; b[j+arraySize] = temp;
i++; j--;
}
} while ( i<=j );
// рекурсивные вызовы, если есть, что сортировать
if ( j > 0 ) quickSortR(a,b, j);
if ( N > i ) quickSortR(a+i,b+i, N-i);
}
int main(){
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
int threads_per_block = deviceProp.warpSize;
int max_blocks = pow(2,def_div)/threads_per_block;
long int strSize_b = pow (2, arraySize);
int num_of_blocks = strSize_b / threads_per_block;
float *Sum = new float[32]; // = { 0 };
float *sh_sum_dev;
//float weight[31] ={ 5, 10, 17, 19, 20, 23, 26, 30, 32, 38, 40, 44, 47, 50, 55, 56, 56, 60, 62, 66, 70, 75, 77, 80, 81, 90,93,96,101,107,115 };
//float values[31] ={ 10, 13, 16, 22, 30, 25, 55, 90, 110, 115, 130, 120, 150, 170, 194, 199, 194, 199, 217, 230, 248, 250, 264, 271, 279, 286,293,299,305,313,321 };
float dev_coefs[62] = {2,1,8,2,17,22,21,33,54,53,29,34,91,24,82,91,51,9,64,14,44,30,23,98,38,55,98,64,57,80,66,49,24,89,15,87,86,77,81,89,82,44,38,86,22,75,72,40,7,47,9,28,17,10,42,15,20,32,15,6,4,1};
//float dev_coefs[60] = {5, 10, 17, 19, 20, 23, 26, 30, 32, 38, 40, 44, 47, 50, 55, 56, 56, 60, 62, 66, 70, 75, 77, 80, 81, 90,93,96,101,107, 10, 13, 16, 22, 30, 25, 55, 90, 110, 115, 130, 120, 150, 170, 194, 199, 194, 199, 217, 230, 248, 250, 264, 271, 279, 286,293,299,305,313 };
//float dev_coefs[58] = {5, 10, 17, 19, 20, 23, 26, 30, 32, 38, 40, 44, 47, 50, 55, 56, 56, 60, 62, 66, 70, 75, 77, 80, 81, 90,93,96,101, 10, 13, 16, 22, 30, 25, 55, 90, 110, 115, 130, 120, 150, 170, 194, 199, 194, 199, 217, 230, 248, 250, 264, 271, 279, 286,293,299,305 };
//float *values_dev;
long int *str_num_dev;
long int *str_num = new long int[1];
cout<<"sing param = "<<max_blocks<<" _ "<< threads_per_block<<"\n";
cout<<"red param "<<1<<" , "<<max_blocks<<"\n";
float* additional_array = new float[arraySize];
for(int i = 0; i < arraySize;i++){
additional_array[i] = dev_coefs[i+arraySize]/dev_coefs[i];
}
quickSortR(additional_array,dev_coefs,arraySize-1);
float t1,t2;
float acceleration = 0;
//for(int i = 0;i<arraySize*2;i++){dev_coefs[i] = 2;}
std::chrono::time_point<std::chrono:: high_resolution_clock> start, end;
start = std::chrono::high_resolution_clock::now();
int* bdevX;
cudaMalloc ((void **) &bdevX, arraySize * sizeof (int));
int* global_mem_bin;
cudaMalloc ((void **) &global_mem_bin, max_blocks*arraySize * sizeof (int));
cudaMalloc ((void **) &sh_sum_dev, num_of_blocks * sizeof (float));
cudaMalloc ((void **) &str_num_dev, num_of_blocks * sizeof (float));
cudaMemcpyToSymbol (coefs, dev_coefs, 2*arraySize * sizeof (float));
hybrid <<< max_blocks, threads_per_block ,threads_per_block*2*sizeof(int)>>> (sh_sum_dev, str_num_dev, num_of_blocks,bdevX,global_mem_bin,threads_per_block);
hybrid_reduction<<<1,max_blocks,threads_per_block*2*sizeof(int)>>>(sh_sum_dev,str_num_dev,global_mem_bin,threads_per_block);
int* suda = new int[arraySize];
cudaMemcpy (Sum, sh_sum_dev, sizeof (int), cudaMemcpyDeviceToHost);
cudaMemcpy (str_num, str_num_dev, sizeof (long int), cudaMemcpyDeviceToHost);
cudaMemcpy (suda, global_mem_bin, arraySize*sizeof (int), cudaMemcpyDeviceToHost);
end = std::chrono:: high_resolution_clock::now();
int elapsed_seconds = std::chrono::duration_cast<std::chrono::microseconds>
(end-start).count();
std::time_t end_time = std::chrono::system_clock::to_time_t(end);
std::cout<< "Время выполнения: " << elapsed_seconds << "microseconds\n";
t1 = elapsed_seconds;
cout << "Acheived maximal sum = " << Sum[0] << "\n";
for (int i = 0; i < arraySize; i++)
{
cout << suda[i];
} cout << "\n";
//check
int checksum = 0;
for (int i = 0; i < arraySize; i++)
{
checksum += dev_coefs[i+arraySize] * suda[i];
}
cout << "Validation sum = " << checksum << "\n";
checksum = 0;
for (int i = 0; i < arraySize; i++)
{
checksum += dev_coefs[i] * suda[i];
} cout << "Weight = " << checksum << "\n";
// ofstream fout;
// fout.open("data_uncorr_hybrid.txt",ios_base::app);
// fout<<"GPU\n"<<Sum[0]<<"\n"<<elapsed_seconds<<"\n\n";
cudaFree(coefs);
cudaFree (sh_sum_dev);
cudaFree (str_num_dev);
cudaFree(bdevX);
cudaFree(global_mem_bin);
delete [] Sum;
delete [] str_num;
cout<<"Проверка. CPU version:\n";
start = std::chrono::high_resolution_clock::now();
int *X = new int[arraySize];
int *bestX = new int[arraySize];
for(int i = 0; i < arraySize; i++){
X[i] = -1;
bestX[i] = 0;
}
int curr_sum = 0;
int reached_max = 0;
float *cpu_bin = new float[arraySize];
for(int i = 0; i < arraySize;i++){
additional_array[i] = dev_coefs[i+arraySize]/dev_coefs[i];
}
quickSortR(additional_array,dev_coefs,arraySize-1);
int h = 0;
int k = h;//def_div;
long int ns = 0;
bool forward;
while(h-k!=-1){
ns++;
forward = true;
if(X[h]==-1){
X[h]=1;
}else{
if(X[h]==1){
X[h]=0;
}else{
if(X[h]==0){
X[h]=-1;
h--;
forward=false;
}
}
}
if(h==arraySize-1){
int cw = 0;
int cp = 0;
for(int i = k;i<arraySize;i++){
cp += dev_coefs[i+arraySize]*X[i];
cw += dev_coefs[i]*X[i];
}
if((cw <= W) &&(cp > reached_max)){
reached_max = cp;
for(int i = k; i < arraySize; i++){
bestX[i] = X[i];
}
}
}
else{
int cw = 0;
for(int i = k ; i < arraySize; i++){
cw += dev_coefs[i]*X[i];
}
if (cw > W) forward = false;
cw = 0;
float cp = 0;
int nw = 0;
int np = 0;
for(int i = k;i<arraySize;i++){
np = X[i]!=-1? X[i] * dev_coefs[i+arraySize]:dev_coefs[i+arraySize];
nw = X[i]!=-1? X[i] * dev_coefs[i]: dev_coefs[i];
if(cw+nw <= W){
cw += nw;
cp += np;
}
else{
cp+=np*(W-cw)/nw;
break;
}
}
int b = cp;
if (b <= reached_max){
forward = false;
}
}
if(forward){if(h<arraySize-1){h++;}}
}
end = std::chrono:: high_resolution_clock::now();
elapsed_seconds = std::chrono::duration_cast<std::chrono::microseconds>
(end-start).count();
end_time = std::chrono::system_clock::to_time_t(end);
t2 = elapsed_seconds;
std::cout<< "Время выполнения: " << elapsed_seconds << "microseconds\n";
cout<<"MAX = "<<reached_max<<"\n";
for(int m = 0 ; m < arraySize;m++){
cout<<bestX[m];
curr_sum += bestX[m]*dev_coefs[m+arraySize];
}cout<<"\nЧисло итераций = "<<ns<<"\n";
// fout<<"CPU\n"<<reached_max<<"\n"<<elapsed_seconds<<"\n\n";
// fout.close();
acceleration = t2/t1;
cout<<"Acceleration = "<<acceleration<<"\n";
delete [] suda;
delete [] additional_array;
cudaFree (sh_sum_dev);
cudaFree (str_num_dev);
cudaFree (coefs);
return 0;
}
|
98298039b3e954488920b6aa4065cea28b3f0d61.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_init_kernel;
int xdim0_init_kernel_h = -1;
int ydim0_init_kernel_h = -1;
__constant__ int xdim1_init_kernel;
int xdim1_init_kernel_h = -1;
int ydim1_init_kernel_h = -1;
__constant__ int xdim2_init_kernel;
int xdim2_init_kernel_h = -1;
int ydim2_init_kernel_h = -1;
__constant__ int xdim3_init_kernel;
int xdim3_init_kernel_h = -1;
int ydim3_init_kernel_h = -1;
__constant__ int xdim4_init_kernel;
int xdim4_init_kernel_h = -1;
int ydim4_init_kernel_h = -1;
__constant__ int xdim5_init_kernel;
int xdim5_init_kernel_h = -1;
int ydim5_init_kernel_h = -1;
__constant__ int xdim6_init_kernel;
int xdim6_init_kernel_h = -1;
int ydim6_init_kernel_h = -1;
__constant__ int xdim7_init_kernel;
int xdim7_init_kernel_h = -1;
int ydim7_init_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#undef OPS_ACC7
#define OPS_ACC0(x) (x)
#define OPS_ACC1(x) (x)
#define OPS_ACC2(x) (x)
#define OPS_ACC3(x) (x)
#define OPS_ACC4(x) (x)
#define OPS_ACC5(x) (x)
#define OPS_ACC6(x) (x)
#define OPS_ACC7(x) (x)
// user function
__device__
void
init_kernel(const double *x, double *rho_new, double *rhou_new,
double *rhoE_new, double *rhoin, double *rho_old,
double *rhou_old, double *rhoE_old) {
if (x[OPS_ACC0(0)] >= -4.0) {
rho_new[OPS_ACC1(0)] = 1.0 + eps * sin(lambda * x[OPS_ACC0(0)]);
rhou_new[OPS_ACC2(0)] = ur * rho_new[OPS_ACC1(0)];
rhoE_new[OPS_ACC3(0)] =
(pr / gam1) +
0.5 * pow(rhou_new[OPS_ACC2(0)], 2) / rho_new[OPS_ACC1(0)];
} else {
rho_new[OPS_ACC1(0)] = rhol;
rhou_new[OPS_ACC2(0)] = ul * rho_new[OPS_ACC1(0)];
rhoE_new[OPS_ACC3(0)] =
(pl / gam1) +
0.5 * pow(rhou_new[OPS_ACC2(0)], 2) / rho_new[OPS_ACC1(0)];
}
rho_old[OPS_ACC5(0)] = rho_new[OPS_ACC1(0)];
rhou_old[OPS_ACC6(0)] = rhou_new[OPS_ACC2(0)];
rhoE_old[OPS_ACC7(0)] = rhoE_new[OPS_ACC3(0)];
rhoin[OPS_ACC4(0)] = rho_new[OPS_ACC1(0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#undef OPS_ACC7
__global__ void
ops_init_kernel(const double *__restrict arg0, double *__restrict arg1,
double *__restrict arg2, double *__restrict arg3,
double *__restrict arg4, double *__restrict arg5,
double *__restrict arg6, double *__restrict arg7, int size0) {
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1;
arg1 += idx_x * 1 * 1;
arg2 += idx_x * 1 * 1;
arg3 += idx_x * 1 * 1;
arg4 += idx_x * 1 * 1;
arg5 += idx_x * 1 * 1;
arg6 += idx_x * 1 * 1;
arg7 += idx_x * 1 * 1;
if (idx_x < size0) {
init_kernel(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
}
}
// host stub function
void ops_par_loop_init_kernel(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6, ops_arg arg7) {
// Timing
double t1, t2, c1, c2;
ops_arg args[8] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 8, range, 1))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(1, "init_kernel");
OPS_kernels[1].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[1];
int end[1];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 1; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 1; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
int xdim5 = args[5].dat->size[0];
int xdim6 = args[6].dat->size[0];
int xdim7 = args[7].dat->size[0];
if (xdim0 != xdim0_init_kernel_h || xdim1 != xdim1_init_kernel_h ||
xdim2 != xdim2_init_kernel_h || xdim3 != xdim3_init_kernel_h ||
xdim4 != xdim4_init_kernel_h || xdim5 != xdim5_init_kernel_h ||
xdim6 != xdim6_init_kernel_h || xdim7 != xdim7_init_kernel_h) {
hipMemcpyToSymbol(xdim0_init_kernel, &xdim0, sizeof(int));
xdim0_init_kernel_h = xdim0;
hipMemcpyToSymbol(xdim1_init_kernel, &xdim1, sizeof(int));
xdim1_init_kernel_h = xdim1;
hipMemcpyToSymbol(xdim2_init_kernel, &xdim2, sizeof(int));
xdim2_init_kernel_h = xdim2;
hipMemcpyToSymbol(xdim3_init_kernel, &xdim3, sizeof(int));
xdim3_init_kernel_h = xdim3;
hipMemcpyToSymbol(xdim4_init_kernel, &xdim4, sizeof(int));
xdim4_init_kernel_h = xdim4;
hipMemcpyToSymbol(xdim5_init_kernel, &xdim5, sizeof(int));
xdim5_init_kernel_h = xdim5;
hipMemcpyToSymbol(xdim6_init_kernel, &xdim6, sizeof(int));
xdim6_init_kernel_h = xdim6;
hipMemcpyToSymbol(xdim7_init_kernel, &xdim7, sizeof(int));
xdim7_init_kernel_h = xdim7;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1, 1, 1);
dim3 tblock(OPS_block_size_x, 1, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
int dat4 = args[4].dat->elem_size;
int dat5 = args[5].dat->elem_size;
int dat6 = args[6].dat->elem_size;
int dat7 = args[7].dat->elem_size;
char *p_a[8];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
p_a[3] = (char *)args[3].data_d + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[4].dat->d_m[d];
#endif
int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] -
args[4].dat->base[0] - d_m[0]);
p_a[4] = (char *)args[4].data_d + base4;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[5].dat->d_m[d];
#endif
int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] -
args[5].dat->base[0] - d_m[0]);
p_a[5] = (char *)args[5].data_d + base5;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[6].dat->d_m[d];
#endif
int base6 = dat6 * 1 * (start[0] * args[6].stencil->stride[0] -
args[6].dat->base[0] - d_m[0]);
p_a[6] = (char *)args[6].data_d + base6;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[7].dat->d_m[d] + OPS_sub_dat_list[args[7].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[7].dat->d_m[d];
#endif
int base7 = dat7 * 1 * (start[0] * args[7].stencil->stride[0] -
args[7].dat->base[0] - d_m[0]);
p_a[7] = (char *)args[7].data_d + base7;
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args, 8, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[1].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_init_kernel), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (double *)p_a[7], x_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[1].time += t1 - t2;
}
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[1], range);
ops_set_halo_dirtybit3(&args[2], range);
ops_set_halo_dirtybit3(&args[3], range);
ops_set_halo_dirtybit3(&args[4], range);
ops_set_halo_dirtybit3(&args[5], range);
ops_set_halo_dirtybit3(&args[6], range);
ops_set_halo_dirtybit3(&args[7], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[1].mpi_time += t2 - t1;
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg6);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg7);
}
}
| 98298039b3e954488920b6aa4065cea28b3f0d61.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_init_kernel;
int xdim0_init_kernel_h = -1;
int ydim0_init_kernel_h = -1;
__constant__ int xdim1_init_kernel;
int xdim1_init_kernel_h = -1;
int ydim1_init_kernel_h = -1;
__constant__ int xdim2_init_kernel;
int xdim2_init_kernel_h = -1;
int ydim2_init_kernel_h = -1;
__constant__ int xdim3_init_kernel;
int xdim3_init_kernel_h = -1;
int ydim3_init_kernel_h = -1;
__constant__ int xdim4_init_kernel;
int xdim4_init_kernel_h = -1;
int ydim4_init_kernel_h = -1;
__constant__ int xdim5_init_kernel;
int xdim5_init_kernel_h = -1;
int ydim5_init_kernel_h = -1;
__constant__ int xdim6_init_kernel;
int xdim6_init_kernel_h = -1;
int ydim6_init_kernel_h = -1;
__constant__ int xdim7_init_kernel;
int xdim7_init_kernel_h = -1;
int ydim7_init_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#undef OPS_ACC7
#define OPS_ACC0(x) (x)
#define OPS_ACC1(x) (x)
#define OPS_ACC2(x) (x)
#define OPS_ACC3(x) (x)
#define OPS_ACC4(x) (x)
#define OPS_ACC5(x) (x)
#define OPS_ACC6(x) (x)
#define OPS_ACC7(x) (x)
// user function
__device__
void
init_kernel(const double *x, double *rho_new, double *rhou_new,
double *rhoE_new, double *rhoin, double *rho_old,
double *rhou_old, double *rhoE_old) {
if (x[OPS_ACC0(0)] >= -4.0) {
rho_new[OPS_ACC1(0)] = 1.0 + eps * sin(lambda * x[OPS_ACC0(0)]);
rhou_new[OPS_ACC2(0)] = ur * rho_new[OPS_ACC1(0)];
rhoE_new[OPS_ACC3(0)] =
(pr / gam1) +
0.5 * pow(rhou_new[OPS_ACC2(0)], 2) / rho_new[OPS_ACC1(0)];
} else {
rho_new[OPS_ACC1(0)] = rhol;
rhou_new[OPS_ACC2(0)] = ul * rho_new[OPS_ACC1(0)];
rhoE_new[OPS_ACC3(0)] =
(pl / gam1) +
0.5 * pow(rhou_new[OPS_ACC2(0)], 2) / rho_new[OPS_ACC1(0)];
}
rho_old[OPS_ACC5(0)] = rho_new[OPS_ACC1(0)];
rhou_old[OPS_ACC6(0)] = rhou_new[OPS_ACC2(0)];
rhoE_old[OPS_ACC7(0)] = rhoE_new[OPS_ACC3(0)];
rhoin[OPS_ACC4(0)] = rho_new[OPS_ACC1(0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#undef OPS_ACC7
__global__ void
ops_init_kernel(const double *__restrict arg0, double *__restrict arg1,
double *__restrict arg2, double *__restrict arg3,
double *__restrict arg4, double *__restrict arg5,
double *__restrict arg6, double *__restrict arg7, int size0) {
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1;
arg1 += idx_x * 1 * 1;
arg2 += idx_x * 1 * 1;
arg3 += idx_x * 1 * 1;
arg4 += idx_x * 1 * 1;
arg5 += idx_x * 1 * 1;
arg6 += idx_x * 1 * 1;
arg7 += idx_x * 1 * 1;
if (idx_x < size0) {
init_kernel(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
}
}
// host stub function
void ops_par_loop_init_kernel(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6, ops_arg arg7) {
// Timing
double t1, t2, c1, c2;
ops_arg args[8] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 8, range, 1))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(1, "init_kernel");
OPS_kernels[1].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[1];
int end[1];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 1; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 1; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
int xdim5 = args[5].dat->size[0];
int xdim6 = args[6].dat->size[0];
int xdim7 = args[7].dat->size[0];
if (xdim0 != xdim0_init_kernel_h || xdim1 != xdim1_init_kernel_h ||
xdim2 != xdim2_init_kernel_h || xdim3 != xdim3_init_kernel_h ||
xdim4 != xdim4_init_kernel_h || xdim5 != xdim5_init_kernel_h ||
xdim6 != xdim6_init_kernel_h || xdim7 != xdim7_init_kernel_h) {
cudaMemcpyToSymbol(xdim0_init_kernel, &xdim0, sizeof(int));
xdim0_init_kernel_h = xdim0;
cudaMemcpyToSymbol(xdim1_init_kernel, &xdim1, sizeof(int));
xdim1_init_kernel_h = xdim1;
cudaMemcpyToSymbol(xdim2_init_kernel, &xdim2, sizeof(int));
xdim2_init_kernel_h = xdim2;
cudaMemcpyToSymbol(xdim3_init_kernel, &xdim3, sizeof(int));
xdim3_init_kernel_h = xdim3;
cudaMemcpyToSymbol(xdim4_init_kernel, &xdim4, sizeof(int));
xdim4_init_kernel_h = xdim4;
cudaMemcpyToSymbol(xdim5_init_kernel, &xdim5, sizeof(int));
xdim5_init_kernel_h = xdim5;
cudaMemcpyToSymbol(xdim6_init_kernel, &xdim6, sizeof(int));
xdim6_init_kernel_h = xdim6;
cudaMemcpyToSymbol(xdim7_init_kernel, &xdim7, sizeof(int));
xdim7_init_kernel_h = xdim7;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1, 1, 1);
dim3 tblock(OPS_block_size_x, 1, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
int dat4 = args[4].dat->elem_size;
int dat5 = args[5].dat->elem_size;
int dat6 = args[6].dat->elem_size;
int dat7 = args[7].dat->elem_size;
char *p_a[8];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
p_a[3] = (char *)args[3].data_d + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[4].dat->d_m[d];
#endif
int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] -
args[4].dat->base[0] - d_m[0]);
p_a[4] = (char *)args[4].data_d + base4;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[5].dat->d_m[d];
#endif
int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] -
args[5].dat->base[0] - d_m[0]);
p_a[5] = (char *)args[5].data_d + base5;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[6].dat->d_m[d];
#endif
int base6 = dat6 * 1 * (start[0] * args[6].stencil->stride[0] -
args[6].dat->base[0] - d_m[0]);
p_a[6] = (char *)args[6].data_d + base6;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[7].dat->d_m[d] + OPS_sub_dat_list[args[7].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[7].dat->d_m[d];
#endif
int base7 = dat7 * 1 * (start[0] * args[7].stencil->stride[0] -
args[7].dat->base[0] - d_m[0]);
p_a[7] = (char *)args[7].data_d + base7;
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args, 8, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[1].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_init_kernel<<<grid, tblock>>>((double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (double *)p_a[7], x_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[1].time += t1 - t2;
}
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[1], range);
ops_set_halo_dirtybit3(&args[2], range);
ops_set_halo_dirtybit3(&args[3], range);
ops_set_halo_dirtybit3(&args[4], range);
ops_set_halo_dirtybit3(&args[5], range);
ops_set_halo_dirtybit3(&args[6], range);
ops_set_halo_dirtybit3(&args[7], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[1].mpi_time += t2 - t1;
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg6);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg7);
}
}
|
abd89722e088bc6898218fc9e38dbf4493a8b90d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is still fast.
*
* Authors: Luc Grosheintz <forbugrep@zoho.com>
* Date: 2015-03-17
*/
#include "call_back.cuh"
__global__
void foo(double x) {
S::evil(x);
S::evil(x);
S::good(x);
S::good(x);
printf("%e\n", x);
}
__global__
void bar(double x) {
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
printf("%e\n", x);
}
int main(int argc, char *argv[]){
hipLaunchKernelGGL(( foo), dim3(1), dim3(1), 0, 0, 0.2);
hipLaunchKernelGGL(( bar), dim3(1), dim3(1), 0, 0, 0.2);
return 0;
}
| abd89722e088bc6898218fc9e38dbf4493a8b90d.cu | /* This is still fast.
*
* Authors: Luc Grosheintz <forbugrep@zoho.com>
* Date: 2015-03-17
*/
#include "call_back.cuh"
__global__
void foo(double x) {
S::evil(x);
S::evil(x);
S::good(x);
S::good(x);
printf("%e\n", x);
}
__global__
void bar(double x) {
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
S::good(x);
printf("%e\n", x);
}
int main(int argc, char *argv[]){
foo<<<1, 1>>>(0.2);
bar<<<1, 1>>>(0.2);
return 0;
}
|
hdia_zspmv.hip | // !!! This is a file automatically generated by hipify!!!
/*
* spGPU - Sparse matrices on GPU library.
*
* Copyright (C) 2010 - 2015
* Davide Barbieri - University of Rome Tor Vergata
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 3 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "cudadebug.h"
#include "cudalang.h"
#include "hip/hip_complex.h"
extern "C"
{
#include "core.h"
#include "hdia.h"
}
#include "debug.h"
#define VALUE_TYPE hipDoubleComplex
#define TYPE_SYMBOL Z
#define TEX_FETCH_TYPE int4
#include "hdia_spmv_base.cuh"
| hdia_zspmv.cu | /*
* spGPU - Sparse matrices on GPU library.
*
* Copyright (C) 2010 - 2015
* Davide Barbieri - University of Rome Tor Vergata
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 3 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "cudadebug.h"
#include "cudalang.h"
#include "cuComplex.h"
extern "C"
{
#include "core.h"
#include "hdia.h"
}
#include "debug.h"
#define VALUE_TYPE cuDoubleComplex
#define TYPE_SYMBOL Z
#define TEX_FETCH_TYPE int4
#include "hdia_spmv_base.cuh"
|
83a150f2649cb70bd9558de9bd1ed235294a7f62.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "Bprop2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
const float *layer1 = NULL;
hipMalloc(&layer1, XSIZE*YSIZE);
float *dsyn2 = NULL;
hipMalloc(&dsyn2, XSIZE*YSIZE);
const int count = 1;
const float alpha = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
Bprop2), dim3(gridBlock),dim3(threadBlock), 0, 0, out,layer1,dsyn2,count,alpha);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
Bprop2), dim3(gridBlock),dim3(threadBlock), 0, 0, out,layer1,dsyn2,count,alpha);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
Bprop2), dim3(gridBlock),dim3(threadBlock), 0, 0, out,layer1,dsyn2,count,alpha);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 83a150f2649cb70bd9558de9bd1ed235294a7f62.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "Bprop2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
const float *layer1 = NULL;
cudaMalloc(&layer1, XSIZE*YSIZE);
float *dsyn2 = NULL;
cudaMalloc(&dsyn2, XSIZE*YSIZE);
const int count = 1;
const float alpha = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
Bprop2<<<gridBlock,threadBlock>>>(out,layer1,dsyn2,count,alpha);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
Bprop2<<<gridBlock,threadBlock>>>(out,layer1,dsyn2,count,alpha);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
Bprop2<<<gridBlock,threadBlock>>>(out,layer1,dsyn2,count,alpha);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
f206bd92687a6aeca9b6e360994d1a0619c2f607.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2015 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "negative_log_likelihood_layer_tester_cuda.h"
#include <hip/hip_runtime.h>
#include "../negative_log_likelihood_layer.h"
namespace nnforge
{
namespace cuda
{
extern __shared__ float arr_sh[];
__global__ void negative_log_likelihood_kernel(
float * __restrict output,
const float * __restrict predicted,
const float * __restrict actual,
const float * __restrict scale_mask,
int input_feature_map_count,
int elem_count_per_feature_map,
float scale,
int entry_count)
{
int feature_map_id = threadIdx.x;
int neuron_id = blockIdx.x;
int entry_id = blockIdx.y;
int threadblock_size = blockDim.x;
float err = 0.0F;
int output_offset = entry_id * elem_count_per_feature_map + neuron_id;
float mask = 1.0F;
if (scale_mask)
mask = scale_mask[output_offset];
int thread_id = threadIdx.x;
if (mask != 0.0F)
{
int input_offset = (entry_id * input_feature_map_count + feature_map_id) * elem_count_per_feature_map + neuron_id;
while (feature_map_id < input_feature_map_count)
{
float actual_val = actual[input_offset];
float predicted_val = predicted[input_offset];
err -= (actual_val > 0.0F) ? actual_val * __logf(max(predicted_val, 1.0e-20F)) : 0.0F;
feature_map_id += threadblock_size;
input_offset += threadblock_size * elem_count_per_feature_map;
}
int lane_id = thread_id & 31;
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
err += __shfl_down(err, tx);
int warp_count = threadblock_size >> 5;
if (warp_count > 1)
{
if (lane_id == 0)
arr_sh[thread_id >> 5] = err;
__syncthreads();
if (thread_id < 32)
{
err = 0.0F;
if (thread_id < warp_count)
err = arr_sh[thread_id];
#pragma unroll
for(int tx = 4; tx > 0; tx >>= 1)
err += __shfl_down(err, tx);
}
}
}
if (thread_id == 0)
output[output_offset] = err * (mask * scale);
}
negative_log_likelihood_layer_tester_cuda::negative_log_likelihood_layer_tester_cuda()
{
}
negative_log_likelihood_layer_tester_cuda::~negative_log_likelihood_layer_tester_cuda()
{
}
void negative_log_likelihood_layer_tester_cuda::enqueue_forward_propagation(
hipStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
unsigned int entry_count)
{
int threadblock_size = get_threadblock_size(input_configuration_specific_list[0].feature_map_count);
const float * scale_mask = 0;
if (input_buffers.size() > 2)
scale_mask = *input_buffers[2];
int smem_size = ((threadblock_size + 32 - 1) / 32) * sizeof(float);
hipLaunchKernelGGL(( negative_log_likelihood_kernel), dim3(dim3(input_elem_count_per_feature_map_list[0], entry_count)), dim3(threadblock_size), smem_size, stream_id,
*output_buffer,
*input_buffers[0],
*input_buffers[1],
scale_mask,
input_configuration_specific_list[0].feature_map_count,
input_elem_count_per_feature_map_list[0],
scale,
entry_count);
}
void negative_log_likelihood_layer_tester_cuda::tester_configured()
{
nnforge_shared_ptr<const negative_log_likelihood_layer> layer_derived = nnforge_dynamic_pointer_cast<const negative_log_likelihood_layer>(layer_schema);
scale = layer_derived->scale;
}
int negative_log_likelihood_layer_tester_cuda::get_threadblock_size(int input_feature_map_count)
{
int threadblock_size;
if (input_feature_map_count < 256)
{
threadblock_size = (input_feature_map_count + 32 - 1) / 32 * 32;
}
else
{
int threadblock_count = (input_feature_map_count + 256 - 1) / 256;
threadblock_size = (input_feature_map_count + threadblock_count - 1) / threadblock_count;
threadblock_size = (threadblock_size + 32 - 1) / 32 * 32;
}
return threadblock_size;
}
}
}
| f206bd92687a6aeca9b6e360994d1a0619c2f607.cu | /*
* Copyright 2011-2015 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "negative_log_likelihood_layer_tester_cuda.h"
#include <cuda_runtime.h>
#include "../negative_log_likelihood_layer.h"
namespace nnforge
{
namespace cuda
{
extern __shared__ float arr_sh[];
__global__ void negative_log_likelihood_kernel(
float * __restrict output,
const float * __restrict predicted,
const float * __restrict actual,
const float * __restrict scale_mask,
int input_feature_map_count,
int elem_count_per_feature_map,
float scale,
int entry_count)
{
int feature_map_id = threadIdx.x;
int neuron_id = blockIdx.x;
int entry_id = blockIdx.y;
int threadblock_size = blockDim.x;
float err = 0.0F;
int output_offset = entry_id * elem_count_per_feature_map + neuron_id;
float mask = 1.0F;
if (scale_mask)
mask = scale_mask[output_offset];
int thread_id = threadIdx.x;
if (mask != 0.0F)
{
int input_offset = (entry_id * input_feature_map_count + feature_map_id) * elem_count_per_feature_map + neuron_id;
while (feature_map_id < input_feature_map_count)
{
float actual_val = actual[input_offset];
float predicted_val = predicted[input_offset];
err -= (actual_val > 0.0F) ? actual_val * __logf(max(predicted_val, 1.0e-20F)) : 0.0F;
feature_map_id += threadblock_size;
input_offset += threadblock_size * elem_count_per_feature_map;
}
int lane_id = thread_id & 31;
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
err += __shfl_down(err, tx);
int warp_count = threadblock_size >> 5;
if (warp_count > 1)
{
if (lane_id == 0)
arr_sh[thread_id >> 5] = err;
__syncthreads();
if (thread_id < 32)
{
err = 0.0F;
if (thread_id < warp_count)
err = arr_sh[thread_id];
#pragma unroll
for(int tx = 4; tx > 0; tx >>= 1)
err += __shfl_down(err, tx);
}
}
}
if (thread_id == 0)
output[output_offset] = err * (mask * scale);
}
negative_log_likelihood_layer_tester_cuda::negative_log_likelihood_layer_tester_cuda()
{
}
negative_log_likelihood_layer_tester_cuda::~negative_log_likelihood_layer_tester_cuda()
{
}
void negative_log_likelihood_layer_tester_cuda::enqueue_forward_propagation(
cudaStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
unsigned int entry_count)
{
int threadblock_size = get_threadblock_size(input_configuration_specific_list[0].feature_map_count);
const float * scale_mask = 0;
if (input_buffers.size() > 2)
scale_mask = *input_buffers[2];
int smem_size = ((threadblock_size + 32 - 1) / 32) * sizeof(float);
negative_log_likelihood_kernel<<<dim3(input_elem_count_per_feature_map_list[0], entry_count), threadblock_size, smem_size, stream_id>>>(
*output_buffer,
*input_buffers[0],
*input_buffers[1],
scale_mask,
input_configuration_specific_list[0].feature_map_count,
input_elem_count_per_feature_map_list[0],
scale,
entry_count);
}
void negative_log_likelihood_layer_tester_cuda::tester_configured()
{
nnforge_shared_ptr<const negative_log_likelihood_layer> layer_derived = nnforge_dynamic_pointer_cast<const negative_log_likelihood_layer>(layer_schema);
scale = layer_derived->scale;
}
int negative_log_likelihood_layer_tester_cuda::get_threadblock_size(int input_feature_map_count)
{
int threadblock_size;
if (input_feature_map_count < 256)
{
threadblock_size = (input_feature_map_count + 32 - 1) / 32 * 32;
}
else
{
int threadblock_count = (input_feature_map_count + 256 - 1) / 256;
threadblock_size = (input_feature_map_count + threadblock_count - 1) / threadblock_count;
threadblock_size = (threadblock_size + 32 - 1) / 32 * 32;
}
return threadblock_size;
}
}
}
|
3d4711d0ba7f144a84e59c5c6abb60b91af2ec40.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
#include <hip/hip_complex.h>
__global__ void sub_scalar_double(int n,int idx, double dx,double *dy,int incy,double *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = dy[i] - dx;
}
}
| 3d4711d0ba7f144a84e59c5c6abb60b91af2ec40.cu | extern "C"
#include <cuComplex.h>
__global__ void sub_scalar_double(int n,int idx, double dx,double *dy,int incy,double *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = dy[i] - dx;
}
}
|
041ee3cb8dcf3a4b65e352f9de09c3cc76e9a191.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/script.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
namespace {
template <typename scalar_t>
__device__ __forceinline__ scalar_t relu(scalar_t z) {
return z > 0 ? z : 0;
}
template <typename scalar_t>
__global__ void roll_sum_relu_first_cuda_kernel(
const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> input,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> hidden,
torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> res) {
//batch index
const int n = blockIdx.y;
// column index
const int c = blockIdx.x * blockDim.x + threadIdx.x;
const auto hidden_size = input.size(2);
if (c < hidden_size) {
int h_idx = c - 1;
if (h_idx < 0) {
h_idx = hidden_size - 1;
}
res[n][0][c] = relu(input[n][0][c] + hidden[n][h_idx]);
}
}
template <typename scalar_t>
__global__ void roll_sum_relu_cuda_kernel(
const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> input,
torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> res,
unsigned int seq_idx) {
//batch index
const int n = blockIdx.y;
// column index
const int c = blockIdx.x * blockDim.x + threadIdx.x;
const auto hidden_size = input.size(2);
if (c < hidden_size) {
int h_idx = c - 1;
if (h_idx < 0) {
h_idx = hidden_size - 1;
}
res[n][seq_idx][c] = relu(input[n][seq_idx][c] + res[n][seq_idx - 1][h_idx]);
}
}
template <typename scalar_t>
__global__ void calc_roll_grad_cuda_kernel(
const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> loss_grad,
torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> loss_grad_seq,
const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> outputs_results,
unsigned int s_len) {
//batch index
const int b = blockIdx.y;
// column index
int c = blockIdx.x * blockDim.x + threadIdx.x;
const auto hidden_size = outputs_results.size(2);
if (c >= hidden_size) {
return;
}
for (int seq_idx = s_len - 2; seq_idx >= 0 ; seq_idx--) {
auto value = loss_grad_seq[b][seq_idx + 1][c];
c--;
if (c < 0) {
c = hidden_size - 1;
}
auto out_res_value = outputs_results[b][seq_idx][c];
if (loss_grad.size(1) != 1) {
auto grad_loss_value = loss_grad[b][seq_idx][c];
loss_grad_seq[b][seq_idx][c] = (value + grad_loss_value) * (out_res_value > 0);
} else {
loss_grad_seq[b][seq_idx][c] = value * (out_res_value > 0);
}
}
}
} // namespace
void calc_roll_grad_cuda(
const torch::Tensor &loss_grad,
torch::Tensor &loss_grad_seq,
const torch::Tensor &outputs_results) {
auto b_size = outputs_results.size(0);
auto s_len = outputs_results.size(1);
auto h_size = outputs_results.size(2);
const int threads = 1024;
const dim3 blocks((h_size + threads - 1) / threads, b_size);
AT_DISPATCH_FLOATING_TYPES(loss_grad_seq.type(), "roll_sum_relu_cuda", ([&] {
hipLaunchKernelGGL(( calc_roll_grad_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
loss_grad.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
loss_grad_seq.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
outputs_results.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
s_len);
}));
hipDeviceSynchronize();
}
torch::Tensor roll_sum_relu_cuda(
torch::Tensor input,
torch::Tensor hidden) {
const auto batch_size = input.size(0);
const auto seq_len = input.size(1);
const auto input_size = input.size(2);
auto res = torch::empty_like(input);
const int threads = 1024;
const dim3 blocks((input_size + threads - 1) / threads, batch_size);
AT_DISPATCH_FLOATING_TYPES(input.type(), "roll_sum_relu_cuda", ([&] {
hipLaunchKernelGGL(( roll_sum_relu_first_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
input.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
hidden.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
res.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>());
}));
hipDeviceSynchronize();
for (auto seq_idx = 1; seq_idx < seq_len; seq_idx++) {
AT_DISPATCH_FLOATING_TYPES(input.type(), "roll_sum_relu_cuda", ([&] {
hipLaunchKernelGGL(( roll_sum_relu_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
input.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
res.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
seq_idx);
}));
hipDeviceSynchronize();
}
return res;
} | 041ee3cb8dcf3a4b65e352f9de09c3cc76e9a191.cu | #include <torch/script.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
namespace {
template <typename scalar_t>
__device__ __forceinline__ scalar_t relu(scalar_t z) {
return z > 0 ? z : 0;
}
template <typename scalar_t>
__global__ void roll_sum_relu_first_cuda_kernel(
const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> input,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> hidden,
torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> res) {
//batch index
const int n = blockIdx.y;
// column index
const int c = blockIdx.x * blockDim.x + threadIdx.x;
const auto hidden_size = input.size(2);
if (c < hidden_size) {
int h_idx = c - 1;
if (h_idx < 0) {
h_idx = hidden_size - 1;
}
res[n][0][c] = relu(input[n][0][c] + hidden[n][h_idx]);
}
}
template <typename scalar_t>
__global__ void roll_sum_relu_cuda_kernel(
const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> input,
torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> res,
unsigned int seq_idx) {
//batch index
const int n = blockIdx.y;
// column index
const int c = blockIdx.x * blockDim.x + threadIdx.x;
const auto hidden_size = input.size(2);
if (c < hidden_size) {
int h_idx = c - 1;
if (h_idx < 0) {
h_idx = hidden_size - 1;
}
res[n][seq_idx][c] = relu(input[n][seq_idx][c] + res[n][seq_idx - 1][h_idx]);
}
}
template <typename scalar_t>
__global__ void calc_roll_grad_cuda_kernel(
const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> loss_grad,
torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> loss_grad_seq,
const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> outputs_results,
unsigned int s_len) {
//batch index
const int b = blockIdx.y;
// column index
int c = blockIdx.x * blockDim.x + threadIdx.x;
const auto hidden_size = outputs_results.size(2);
if (c >= hidden_size) {
return;
}
for (int seq_idx = s_len - 2; seq_idx >= 0 ; seq_idx--) {
auto value = loss_grad_seq[b][seq_idx + 1][c];
c--;
if (c < 0) {
c = hidden_size - 1;
}
auto out_res_value = outputs_results[b][seq_idx][c];
if (loss_grad.size(1) != 1) {
auto grad_loss_value = loss_grad[b][seq_idx][c];
loss_grad_seq[b][seq_idx][c] = (value + grad_loss_value) * (out_res_value > 0);
} else {
loss_grad_seq[b][seq_idx][c] = value * (out_res_value > 0);
}
}
}
} // namespace
void calc_roll_grad_cuda(
const torch::Tensor &loss_grad,
torch::Tensor &loss_grad_seq,
const torch::Tensor &outputs_results) {
auto b_size = outputs_results.size(0);
auto s_len = outputs_results.size(1);
auto h_size = outputs_results.size(2);
const int threads = 1024;
const dim3 blocks((h_size + threads - 1) / threads, b_size);
AT_DISPATCH_FLOATING_TYPES(loss_grad_seq.type(), "roll_sum_relu_cuda", ([&] {
calc_roll_grad_cuda_kernel<scalar_t><<<blocks, threads>>>(
loss_grad.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
loss_grad_seq.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
outputs_results.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
s_len);
}));
cudaDeviceSynchronize();
}
torch::Tensor roll_sum_relu_cuda(
torch::Tensor input,
torch::Tensor hidden) {
const auto batch_size = input.size(0);
const auto seq_len = input.size(1);
const auto input_size = input.size(2);
auto res = torch::empty_like(input);
const int threads = 1024;
const dim3 blocks((input_size + threads - 1) / threads, batch_size);
AT_DISPATCH_FLOATING_TYPES(input.type(), "roll_sum_relu_cuda", ([&] {
roll_sum_relu_first_cuda_kernel<scalar_t><<<blocks, threads>>>(
input.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
hidden.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
res.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>());
}));
cudaDeviceSynchronize();
for (auto seq_idx = 1; seq_idx < seq_len; seq_idx++) {
AT_DISPATCH_FLOATING_TYPES(input.type(), "roll_sum_relu_cuda", ([&] {
roll_sum_relu_cuda_kernel<scalar_t><<<blocks, threads>>>(
input.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
res.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
seq_idx);
}));
cudaDeviceSynchronize();
}
return res;
} |
8b4fca069c09e3aa96a09046f963504ab5618c60.hip | // !!! This is a file automatically generated by hipify!!!
//
//===------------------ GeantX --------------------------------------------===//
//
// Geant Exascale Pilot
//
// For the licensing terms see LICENSE file.
// For the list of contributors see CREDITS file.
// Copyright (C) 2019, Geant Exascale Pilot team, All rights reserved.
//===----------------------------------------------------------------------===//
//
/**
* @file Geant/proxy/src/ProxyDataManager.cu
* @brief the data manager for proxy physics
*/
//===----------------------------------------------------------------------===//
//
#include <iostream>
#include <fstream>
#include <iomanip>
#include "Geant/proxy/ProxyDataManager.cuh"
#include "Geant/proxy/ProxyPhysicsTableIndex.hpp"
namespace geantx {
GEANT_HOST
ProxyDataManager::ProxyDataManager()
{
fSizeOfObject = 3*sizeof(int);
fNumPhysicsTables = ProxyPhysicsTableIndex::kNumberPhysicsTable;
fPhysicsTables = new ProxyPhysicsTable * [fNumPhysicsTables];
RetrievePhysicsData();
RetrieveCutsTable();
}
GEANT_HOST_DEVICE
ProxyDataManager::~ProxyDataManager()
{
for (int i = 0; i < fNumPhysicsTables ; ++i)
if(!fPhysicsTables[i]) delete fPhysicsTables[i];
delete fPhysicsTables;
delete [] fCutsTable;
}
GEANT_HOST
bool ProxyDataManager::RetrievePhysicsData(/* const std::string& dir */)
{
//retrieve physics tables (from the given directory)
char filename[256];
for(int it = 0 ; it < ProxyPhysicsTableIndex::kNumberPhysicsTable ; ++it) {
sprintf(filename,"data/table/%s",ProxyPhysicsTableName[it].c_str());
fPhysicsTables[it] = new ProxyPhysicsTable();
bool status = fPhysicsTables[it]->RetrievePhysicsTable(filename);
if(status) {
size_t size = fPhysicsTables[it]->SizeOfTable();
std::cout << "Retrieved " << filename << " Size = " << size << std::endl;
int nvector = fPhysicsTables[it]->NumberOfVector();
fSizeOfObject += size;
//TODO: set spline
for(int iv = 0 ; iv < nvector ; ++iv) {
// (fPhysicsTables[it]->GetVector())[iv]->SetSpline(useSpline);
}
}
else {
std::cout << "Failed to retrieve " << filename << std::endl;
}
}
return true;
}
GEANT_HOST
bool ProxyDataManager::RetrieveCutsTable(/* const std::string& dir */)
{
//retrieve material cut tables (from the given directory)
char fileName[256];
sprintf(fileName,"data/table/%s","cut.dat");
std::ifstream fIn;
// open input file
fIn.open(fileName,std::ios::in|std::ios::binary);
// check if the file has been opened successfully
if (!fIn)
{
fIn.close();
return false;
}
// Number of materials
std::string version;
fIn >> version;
fIn >> fNumOfCuts;
double cutLength;
double cutEnergy;
fCutsTable = new double [data::nParticleForCuts*fNumOfCuts];
for (size_t idx=0; idx < fNumOfCuts ; ++idx) {
for (size_t ipart=0; ipart < data::nParticleForCuts ; ++ipart) {
fIn >> cutLength >> cutEnergy;
fCutsTable[data::nParticleForCuts*idx+ipart] = cutEnergy; // [idx=*4 + i]
}
}
fSizeOfObject += sizeof(double)*data::nParticleForCuts*fNumOfCuts;
return true;
}
#ifdef GEANT_CUDA
void ProxyDataManager::RelocatePhysicsData(void *devPtr)
{
// allocate mapped device pointers on the host memory
ProxyPhysicsTable **fProxyPhysicsTables_d;
hipHostMalloc((void **)&fProxyPhysicsTables_d,
fNumPhysicsTables*sizeof(ProxyPhysicsTable*), hipHostMallocMapped);
// save fPhysicsTables on the host
ProxyPhysicsTable **fProxyPhysicsTables_h = fPhysicsTables;
// device pointers on the host memory
ProxyPhysicsTable *tables_d[fNumPhysicsTables];
// relocate pointers of this to the corresponding device pointers
for (int i = 0; i < fNumPhysicsTables ; ++i) {
hipMalloc((void **)&tables_d[i], fPhysicsTables[i]->SizeOfTable());
fPhysicsTables[i]->Relocate(tables_d[i]);
fProxyPhysicsTables_d[i] = tables_d[i];
}
fPhysicsTables = fProxyPhysicsTables_d;
// copy cuts table
int ncuts = data::nParticleForCuts*fNumOfCuts;
double *fCutsTable_d;
hipMalloc((void **)&(fCutsTable_d), sizeof(double)*ncuts);
double *fCutsTable_h = fCutsTable;
hipMemcpy(fCutsTable_d, fCutsTable, sizeof(double) * ncuts, hipMemcpyHostToDevice);
fCutsTable = fCutsTable_d;
// copy the whole content of this from host to device.
hipMemcpy(devPtr, this, fSizeOfObject, hipMemcpyHostToDevice);
// persistency on host
fPhysicsTables = fProxyPhysicsTables_h;
fCutsTable = fCutsTable_h;
}
#endif
GEANT_HOST_DEVICE
void ProxyDataManager::Print()
{
printf("%d\n",fNumPhysicsTables);
}
GEANT_HOST_DEVICE
void ProxyDataManager::PrintCutsTable()
{
printf("fNumOfCuts= %d\n",fNumOfCuts);
for (size_t idx=0; idx < fNumOfCuts ; ++idx) {
for (size_t ipart=0; ipart < data::nParticleForCuts ; ++ipart) {
printf(" %f\n",fCutsTable[data::nParticleForCuts*idx+ipart]);
}
}
}
} // namespace geantx
| 8b4fca069c09e3aa96a09046f963504ab5618c60.cu | //
//===------------------ GeantX --------------------------------------------===//
//
// Geant Exascale Pilot
//
// For the licensing terms see LICENSE file.
// For the list of contributors see CREDITS file.
// Copyright (C) 2019, Geant Exascale Pilot team, All rights reserved.
//===----------------------------------------------------------------------===//
//
/**
* @file Geant/proxy/src/ProxyDataManager.cu
* @brief the data manager for proxy physics
*/
//===----------------------------------------------------------------------===//
//
#include <iostream>
#include <fstream>
#include <iomanip>
#include "Geant/proxy/ProxyDataManager.cuh"
#include "Geant/proxy/ProxyPhysicsTableIndex.hpp"
namespace geantx {
GEANT_HOST
ProxyDataManager::ProxyDataManager()
{
fSizeOfObject = 3*sizeof(int);
fNumPhysicsTables = ProxyPhysicsTableIndex::kNumberPhysicsTable;
fPhysicsTables = new ProxyPhysicsTable * [fNumPhysicsTables];
RetrievePhysicsData();
RetrieveCutsTable();
}
GEANT_HOST_DEVICE
ProxyDataManager::~ProxyDataManager()
{
for (int i = 0; i < fNumPhysicsTables ; ++i)
if(!fPhysicsTables[i]) delete fPhysicsTables[i];
delete fPhysicsTables;
delete [] fCutsTable;
}
GEANT_HOST
bool ProxyDataManager::RetrievePhysicsData(/* const std::string& dir */)
{
//retrieve physics tables (from the given directory)
char filename[256];
for(int it = 0 ; it < ProxyPhysicsTableIndex::kNumberPhysicsTable ; ++it) {
sprintf(filename,"data/table/%s",ProxyPhysicsTableName[it].c_str());
fPhysicsTables[it] = new ProxyPhysicsTable();
bool status = fPhysicsTables[it]->RetrievePhysicsTable(filename);
if(status) {
size_t size = fPhysicsTables[it]->SizeOfTable();
std::cout << "Retrieved " << filename << " Size = " << size << std::endl;
int nvector = fPhysicsTables[it]->NumberOfVector();
fSizeOfObject += size;
//TODO: set spline
for(int iv = 0 ; iv < nvector ; ++iv) {
// (fPhysicsTables[it]->GetVector())[iv]->SetSpline(useSpline);
}
}
else {
std::cout << "Failed to retrieve " << filename << std::endl;
}
}
return true;
}
GEANT_HOST
bool ProxyDataManager::RetrieveCutsTable(/* const std::string& dir */)
{
//retrieve material cut tables (from the given directory)
char fileName[256];
sprintf(fileName,"data/table/%s","cut.dat");
std::ifstream fIn;
// open input file
fIn.open(fileName,std::ios::in|std::ios::binary);
// check if the file has been opened successfully
if (!fIn)
{
fIn.close();
return false;
}
// Number of materials
std::string version;
fIn >> version;
fIn >> fNumOfCuts;
double cutLength;
double cutEnergy;
fCutsTable = new double [data::nParticleForCuts*fNumOfCuts];
for (size_t idx=0; idx < fNumOfCuts ; ++idx) {
for (size_t ipart=0; ipart < data::nParticleForCuts ; ++ipart) {
fIn >> cutLength >> cutEnergy;
fCutsTable[data::nParticleForCuts*idx+ipart] = cutEnergy; // [idx=*4 + i]
}
}
fSizeOfObject += sizeof(double)*data::nParticleForCuts*fNumOfCuts;
return true;
}
#ifdef GEANT_CUDA
void ProxyDataManager::RelocatePhysicsData(void *devPtr)
{
// allocate mapped device pointers on the host memory
ProxyPhysicsTable **fProxyPhysicsTables_d;
cudaHostAlloc((void **)&fProxyPhysicsTables_d,
fNumPhysicsTables*sizeof(ProxyPhysicsTable*), cudaHostAllocMapped);
// save fPhysicsTables on the host
ProxyPhysicsTable **fProxyPhysicsTables_h = fPhysicsTables;
// device pointers on the host memory
ProxyPhysicsTable *tables_d[fNumPhysicsTables];
// relocate pointers of this to the corresponding device pointers
for (int i = 0; i < fNumPhysicsTables ; ++i) {
cudaMalloc((void **)&tables_d[i], fPhysicsTables[i]->SizeOfTable());
fPhysicsTables[i]->Relocate(tables_d[i]);
fProxyPhysicsTables_d[i] = tables_d[i];
}
fPhysicsTables = fProxyPhysicsTables_d;
// copy cuts table
int ncuts = data::nParticleForCuts*fNumOfCuts;
double *fCutsTable_d;
cudaMalloc((void **)&(fCutsTable_d), sizeof(double)*ncuts);
double *fCutsTable_h = fCutsTable;
cudaMemcpy(fCutsTable_d, fCutsTable, sizeof(double) * ncuts, cudaMemcpyHostToDevice);
fCutsTable = fCutsTable_d;
// copy the whole content of this from host to device.
cudaMemcpy(devPtr, this, fSizeOfObject, cudaMemcpyHostToDevice);
// persistency on host
fPhysicsTables = fProxyPhysicsTables_h;
fCutsTable = fCutsTable_h;
}
#endif
GEANT_HOST_DEVICE
void ProxyDataManager::Print()
{
printf("%d\n",fNumPhysicsTables);
}
GEANT_HOST_DEVICE
void ProxyDataManager::PrintCutsTable()
{
printf("fNumOfCuts= %d\n",fNumOfCuts);
for (size_t idx=0; idx < fNumOfCuts ; ++idx) {
for (size_t ipart=0; ipart < data::nParticleForCuts ; ++ipart) {
printf(" %f\n",fCutsTable[data::nParticleForCuts*idx+ipart]);
}
}
}
} // namespace geantx
|
e39e6a4e53444c044f6f5c023976b3aa55c8a13d.hip | // !!! This is a file automatically generated by hipify!!!
#include <string>
#include <vector>
#include <iostream>
#include <fstream>
#include <math.h>
#include <algorithm>
#include <utility>
#include <cfloat>
#include <cmath>
#include <cstdlib>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <thrust/pair.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/fill.h>
#define ROWS 440
#define COLS 138672
#define K 5
using std::vector;
using std::string;
using std::ifstream;
using std::getline;
using std::cout;
using std::endl;
// input data to be read from file line by line needs to be split
vector<string> split(string s, char delimeter='\t') {
vector<string> splitted;
int num_elem = s.size();
int i = 0;
int j;
while (i < num_elem) {
j = s.find(delimeter, i);
if (j == -1){
splitted.push_back(s.substr(i));
break;
}
splitted.push_back(s.substr(i,j-i));
i = j+1;
}
return splitted;
}
// read line by line and store data in a 1d array
void readData1d(ifstream& in, thrust::host_vector<int>& data) {
string d;
int rows = (int)ROWS;
for (int i = 0; i < rows; i++) {
getline(in, d);
data[i] = (int) atof(d.c_str());
}
return ;
}
// read line by line and store data in a 2d array
void readData(ifstream& in, thrust::host_vector<float>& data, char delimeter = '\t') {
string d;
vector<string> dSplit;
int rows = (int)ROWS;
int cols = (int)COLS;
for (int i = 0; i < rows; i++) {
getline(in, d);
dSplit = split(d, delimeter);
for (int j = 0; j < cols; j++) {
data[i*cols+j] = (float) atof(dSplit[j].c_str());
}
}
return ;
}
// predicate for sorting vector of pair<index,distance> by distance
struct compare
{
__host__ __device__
bool operator()(const thrust::pair<int, float> x, const thrust::pair<int, float> y)
{
return x.second < y.second;
}
};
bool compare2 (const std::pair<int, float> x, const std::pair<int, float> y)
{
return x.second < y.second;
}
int classify(int* nn, thrust::host_vector<int>& labels) {
int k = (int)K;
int mid = (k - 1)/2;
int ones = 0;
for (int i = 0; i < k; i++)
ones += labels[nn[i]];
if (ones > mid)
return 1;
else
return 0;
}
// using cosine distance
__global__ void distances(float *d_records, float *d_distances, int* m)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
int rows = (int)ROWS;
int cols = (int)COLS;
if (i < rows && i != *m){
float xDotY = 0.0;
float normX = 0.0;
float normY = 0.0;
for (int k = 0; k < cols; k++){
float x = d_records[i*cols+k];
float y = d_records[(*m)*cols+k];
xDotY += (x * y);
normX += (x * x);
normY += (y * y);
}
float normXY = normX * normY;
if (normXY == 0)
d_distances[i] = FLT_MAX;
else
d_distances[i] = 1-(xDotY / (normX * normY));
}
else if (i == *m)
d_distances[i] = FLT_MAX;
}
int main()
{
int rows = (int)ROWS;
int cols = (int)COLS;
int k = (int)K;
// connect to file with records
ifstream rec("PEMS_records01.txt");
ifstream lab("PEMS_labels01.txt");
// create corresponding host and device vectors
thrust::host_vector<float> h_records(rows*cols);
thrust::host_vector<int> h_labels(rows);
thrust::host_vector<int> h_nn(k);
thrust::host_vector<float> h_distances(rows);
// keep track
std::vector<std::pair<int, float> > index_distance(rows);
// keep track of accurate classification
int accurate = 0;
thrust::device_vector<float> d_records(rows*cols);
thrust::device_vector<float> d_distances(rows);
// read data from file
readData1d(lab, h_labels);
readData(rec, h_records);
// copy records from host to device
d_records = h_records;
// pointers to pass to kernel function
float *pd_records = thrust::raw_pointer_cast(&d_records[0]);
float *pd_distances = thrust::raw_pointer_cast(&d_distances[0]);
// estimate number of blocks given 512 thread per block
int nThreads = 512;
int nBlocks = rows/nThreads + 1;
// variable to specify to kernel what to leave out
int* xx;
hipMalloc((void**) &xx, sizeof(int));
// variable to hold nearest neighbors
int* nn = (int*) malloc(k*sizeof(int));
for (int i = 0; i < rows; i++) {
hipMemcpy(xx, &i, sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( distances), dim3(nBlocks),dim3(nThreads), 0, 0, pd_records, pd_distances, xx);
//synchronize
hipDeviceSynchronize();
// copy distances from device to host
h_distances = d_distances;
// couple distance & index to enable sorting without loss of index info
for (int m = 0; m < rows; m++)
index_distance[m] = std::make_pair(m, sqrt(h_distances[m]));
// sort
std::sort(index_distance.begin(), index_distance.end(), compare2);
// retrieve nearest neighbors
for (int m = 0; m < k; m++)
nn[m] = index_distance[m].first;
if (classify(nn, h_labels) == h_labels[i])
accurate++;
}
float accuracy = (accurate*1.0)/((float)ROWS);
cout << "The accuracy obtained is: " << accuracy << endl;
lab.close();
rec.close();
return 0;
}
| e39e6a4e53444c044f6f5c023976b3aa55c8a13d.cu | #include <string>
#include <vector>
#include <iostream>
#include <fstream>
#include <math.h>
#include <algorithm>
#include <utility>
#include <cfloat>
#include <cmath>
#include <cstdlib>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <thrust/pair.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/fill.h>
#define ROWS 440
#define COLS 138672
#define K 5
using std::vector;
using std::string;
using std::ifstream;
using std::getline;
using std::cout;
using std::endl;
// input data to be read from file line by line needs to be split
vector<string> split(string s, char delimeter='\t') {
vector<string> splitted;
int num_elem = s.size();
int i = 0;
int j;
while (i < num_elem) {
j = s.find(delimeter, i);
if (j == -1){
splitted.push_back(s.substr(i));
break;
}
splitted.push_back(s.substr(i,j-i));
i = j+1;
}
return splitted;
}
// read line by line and store data in a 1d array
void readData1d(ifstream& in, thrust::host_vector<int>& data) {
string d;
int rows = (int)ROWS;
for (int i = 0; i < rows; i++) {
getline(in, d);
data[i] = (int) atof(d.c_str());
}
return ;
}
// read line by line and store data in a 2d array
void readData(ifstream& in, thrust::host_vector<float>& data, char delimeter = '\t') {
string d;
vector<string> dSplit;
int rows = (int)ROWS;
int cols = (int)COLS;
for (int i = 0; i < rows; i++) {
getline(in, d);
dSplit = split(d, delimeter);
for (int j = 0; j < cols; j++) {
data[i*cols+j] = (float) atof(dSplit[j].c_str());
}
}
return ;
}
// predicate for sorting vector of pair<index,distance> by distance
struct compare
{
__host__ __device__
bool operator()(const thrust::pair<int, float> x, const thrust::pair<int, float> y)
{
return x.second < y.second;
}
};
bool compare2 (const std::pair<int, float> x, const std::pair<int, float> y)
{
return x.second < y.second;
}
int classify(int* nn, thrust::host_vector<int>& labels) {
int k = (int)K;
int mid = (k - 1)/2;
int ones = 0;
for (int i = 0; i < k; i++)
ones += labels[nn[i]];
if (ones > mid)
return 1;
else
return 0;
}
// using cosine distance
__global__ void distances(float *d_records, float *d_distances, int* m)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
int rows = (int)ROWS;
int cols = (int)COLS;
if (i < rows && i != *m){
float xDotY = 0.0;
float normX = 0.0;
float normY = 0.0;
for (int k = 0; k < cols; k++){
float x = d_records[i*cols+k];
float y = d_records[(*m)*cols+k];
xDotY += (x * y);
normX += (x * x);
normY += (y * y);
}
float normXY = normX * normY;
if (normXY == 0)
d_distances[i] = FLT_MAX;
else
d_distances[i] = 1-(xDotY / (normX * normY));
}
else if (i == *m)
d_distances[i] = FLT_MAX;
}
int main()
{
int rows = (int)ROWS;
int cols = (int)COLS;
int k = (int)K;
// connect to file with records
ifstream rec("PEMS_records01.txt");
ifstream lab("PEMS_labels01.txt");
// create corresponding host and device vectors
thrust::host_vector<float> h_records(rows*cols);
thrust::host_vector<int> h_labels(rows);
thrust::host_vector<int> h_nn(k);
thrust::host_vector<float> h_distances(rows);
// keep track
std::vector<std::pair<int, float> > index_distance(rows);
// keep track of accurate classification
int accurate = 0;
thrust::device_vector<float> d_records(rows*cols);
thrust::device_vector<float> d_distances(rows);
// read data from file
readData1d(lab, h_labels);
readData(rec, h_records);
// copy records from host to device
d_records = h_records;
// pointers to pass to kernel function
float *pd_records = thrust::raw_pointer_cast(&d_records[0]);
float *pd_distances = thrust::raw_pointer_cast(&d_distances[0]);
// estimate number of blocks given 512 thread per block
int nThreads = 512;
int nBlocks = rows/nThreads + 1;
// variable to specify to kernel what to leave out
int* xx;
cudaMalloc((void**) &xx, sizeof(int));
// variable to hold nearest neighbors
int* nn = (int*) malloc(k*sizeof(int));
for (int i = 0; i < rows; i++) {
cudaMemcpy(xx, &i, sizeof(int), cudaMemcpyHostToDevice);
distances<<<nBlocks,nThreads>>>(pd_records, pd_distances, xx);
//synchronize
cudaDeviceSynchronize();
// copy distances from device to host
h_distances = d_distances;
// couple distance & index to enable sorting without loss of index info
for (int m = 0; m < rows; m++)
index_distance[m] = std::make_pair(m, sqrt(h_distances[m]));
// sort
std::sort(index_distance.begin(), index_distance.end(), compare2);
// retrieve nearest neighbors
for (int m = 0; m < k; m++)
nn[m] = index_distance[m].first;
if (classify(nn, h_labels) == h_labels[i])
accurate++;
}
float accuracy = (accurate*1.0)/((float)ROWS);
cout << "The accuracy obtained is: " << accuracy << endl;
lab.close();
rec.close();
return 0;
}
|
e2fb748cda26d5fc255b2db170b6b194f740382c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernel2DXYp.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *dataOutput = NULL;
hipMalloc(&dataOutput, XSIZE*YSIZE);
double *dataInput = NULL;
hipMalloc(&dataInput, XSIZE*YSIZE);
double *boundaryTop = NULL;
hipMalloc(&boundaryTop, XSIZE*YSIZE);
double *boundaryBottom = NULL;
hipMalloc(&boundaryBottom, XSIZE*YSIZE);
const double *weights = NULL;
hipMalloc(&weights, XSIZE*YSIZE);
const int numSten = 1;
const int numStenHoriz = 1;
const int numStenLeft = 1;
const int numStenRight = 1;
const int numStenVert = 1;
const int numStenTop = 1;
const int numStenBottom = 1;
const int nxLocal = 1;
const int nyLocal = 1;
const int BLOCK_X = 1;
const int BLOCK_Y = 1;
const int nx = 1;
const int nyTile = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernel2DXYp), dim3(gridBlock),dim3(threadBlock), 0, 0, dataOutput,dataInput,boundaryTop,boundaryBottom,weights,numSten,numStenHoriz,numStenLeft,numStenRight,numStenVert,numStenTop,numStenBottom,nxLocal,nyLocal,BLOCK_X,BLOCK_Y,nx,nyTile);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernel2DXYp), dim3(gridBlock),dim3(threadBlock), 0, 0, dataOutput,dataInput,boundaryTop,boundaryBottom,weights,numSten,numStenHoriz,numStenLeft,numStenRight,numStenVert,numStenTop,numStenBottom,nxLocal,nyLocal,BLOCK_X,BLOCK_Y,nx,nyTile);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernel2DXYp), dim3(gridBlock),dim3(threadBlock), 0, 0, dataOutput,dataInput,boundaryTop,boundaryBottom,weights,numSten,numStenHoriz,numStenLeft,numStenRight,numStenVert,numStenTop,numStenBottom,nxLocal,nyLocal,BLOCK_X,BLOCK_Y,nx,nyTile);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | e2fb748cda26d5fc255b2db170b6b194f740382c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernel2DXYp.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *dataOutput = NULL;
cudaMalloc(&dataOutput, XSIZE*YSIZE);
double *dataInput = NULL;
cudaMalloc(&dataInput, XSIZE*YSIZE);
double *boundaryTop = NULL;
cudaMalloc(&boundaryTop, XSIZE*YSIZE);
double *boundaryBottom = NULL;
cudaMalloc(&boundaryBottom, XSIZE*YSIZE);
const double *weights = NULL;
cudaMalloc(&weights, XSIZE*YSIZE);
const int numSten = 1;
const int numStenHoriz = 1;
const int numStenLeft = 1;
const int numStenRight = 1;
const int numStenVert = 1;
const int numStenTop = 1;
const int numStenBottom = 1;
const int nxLocal = 1;
const int nyLocal = 1;
const int BLOCK_X = 1;
const int BLOCK_Y = 1;
const int nx = 1;
const int nyTile = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernel2DXYp<<<gridBlock,threadBlock>>>(dataOutput,dataInput,boundaryTop,boundaryBottom,weights,numSten,numStenHoriz,numStenLeft,numStenRight,numStenVert,numStenTop,numStenBottom,nxLocal,nyLocal,BLOCK_X,BLOCK_Y,nx,nyTile);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernel2DXYp<<<gridBlock,threadBlock>>>(dataOutput,dataInput,boundaryTop,boundaryBottom,weights,numSten,numStenHoriz,numStenLeft,numStenRight,numStenVert,numStenTop,numStenBottom,nxLocal,nyLocal,BLOCK_X,BLOCK_Y,nx,nyTile);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernel2DXYp<<<gridBlock,threadBlock>>>(dataOutput,dataInput,boundaryTop,boundaryBottom,weights,numSten,numStenHoriz,numStenLeft,numStenRight,numStenVert,numStenTop,numStenBottom,nxLocal,nyLocal,BLOCK_X,BLOCK_Y,nx,nyTile);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
c5dff5f283dd93b5a85f5a86db92ed80442c4be0.hip | // !!! This is a file automatically generated by hipify!!!
/************************************************************************/
// The purpose of this file is to provide a GPU implementation of the
// heat transfer simulation using MATLAB.
//
// Author: Jason Lowden
// Date: October 20, 2013
//
// File: KMeans.h
/************************************************************************/
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include <hip/hip_texture_types.h>
#include <cmath>
#include <hip/hip_runtime.h>
#include "HeatTransfer.h"
texture<float,hipTextureType2D,hipReadModeElementType> heatTexture ;
__global__ void HTkernel(float* data_d, int size, float heatSpeed){
int idy = threadIdx.x + blockIdx.x * blockDim.x;
int idx = threadIdx.y + blockIdx.y * blockDim.y;
float T_center=0,T_top=0,T_bottom=0,T_left=0,T_right=0;
if(idx>0 && idx<size-1 && idy>0 && idy<size-1){
T_top=tex2D(heatTexture,idx,idy-1);
T_left=tex2D(heatTexture,idx-1,idy);
T_center=tex2D(heatTexture,idx,idy);
T_right=tex2D(heatTexture,idx+1,idy);
T_bottom=tex2D(heatTexture,idx,idy+1);
data_d[idy*size+idx]=(T_center + (heatSpeed*(( T_top + T_bottom + T_left + T_right)- (4 * T_center))));
}
}
bool UpdateHeatMap(float* dataIn, float* dataOut, int size, float heatSpeed, int numIterations)
{
// Error return value
//hipError_t status;
int bytes = size*size*sizeof(float);
float * data_d;
hipMalloc((void**) &data_d,bytes);
hipChannelFormatDesc channel = hipCreateChannelDesc(32,0,0,0, hipChannelFormatKindFloat);
hipArray* aray;
hipMallocArray(&aray, &channel, size, size, 0);
hipMemcpyToArray(aray, 0, 0, dataIn, bytes, hipMemcpyHostToDevice);
heatTexture.filterMode=hipFilterModePoint;
heatTexture.addressMode[0]=hipAddressModeClamp;
heatTexture.addressMode[1]=hipAddressModeClamp;
heatTexture.normalized=false;
hipMemcpy(data_d,dataIn,bytes,hipMemcpyHostToDevice);
//hipMemcpyToArray(aray, 0, 0, dataIn, bytes, hipMemcpyHostToDevice);
for(int i = 0; i< numIterations;i++){
hipBindTextureToArray(&heatTexture,aray,&channel);
dim3 dimBlock (16,16);
int gridx=(int)ceil((float)size/16);
int gridy=(int)ceil((float)size/16);
dim3 dimGrid (gridx,gridy);
hipLaunchKernelGGL(( HTkernel), dim3(dimGrid),dim3(dimBlock), 0, 0, data_d, size, heatSpeed);
hipDeviceSynchronize();
hipUnbindTexture(&heatTexture);
hipMemcpyToArray(aray, 0, 0, data_d, bytes, hipMemcpyDeviceToDevice);
}
hipMemcpy(dataOut,data_d,bytes,hipMemcpyDeviceToHost);
hipFree(data_d);
hipFreeArray(aray);
return true;
} | c5dff5f283dd93b5a85f5a86db92ed80442c4be0.cu | /************************************************************************/
// The purpose of this file is to provide a GPU implementation of the
// heat transfer simulation using MATLAB.
//
// Author: Jason Lowden
// Date: October 20, 2013
//
// File: KMeans.h
/************************************************************************/
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include <cuda_texture_types.h>
#include <cmath>
#include <cuda.h>
#include "HeatTransfer.h"
texture<float,cudaTextureType2D,cudaReadModeElementType> heatTexture ;
__global__ void HTkernel(float* data_d, int size, float heatSpeed){
int idy = threadIdx.x + blockIdx.x * blockDim.x;
int idx = threadIdx.y + blockIdx.y * blockDim.y;
float T_center=0,T_top=0,T_bottom=0,T_left=0,T_right=0;
if(idx>0 && idx<size-1 && idy>0 && idy<size-1){
T_top=tex2D(heatTexture,idx,idy-1);
T_left=tex2D(heatTexture,idx-1,idy);
T_center=tex2D(heatTexture,idx,idy);
T_right=tex2D(heatTexture,idx+1,idy);
T_bottom=tex2D(heatTexture,idx,idy+1);
data_d[idy*size+idx]=(T_center + (heatSpeed*(( T_top + T_bottom + T_left + T_right)- (4 * T_center))));
}
}
bool UpdateHeatMap(float* dataIn, float* dataOut, int size, float heatSpeed, int numIterations)
{
// Error return value
//cudaError_t status;
int bytes = size*size*sizeof(float);
float * data_d;
cudaMalloc((void**) &data_d,bytes);
cudaChannelFormatDesc channel = cudaCreateChannelDesc(32,0,0,0, cudaChannelFormatKindFloat);
cudaArray* aray;
cudaMallocArray(&aray, &channel, size, size, 0);
cudaMemcpyToArray(aray, 0, 0, dataIn, bytes, cudaMemcpyHostToDevice);
heatTexture.filterMode=cudaFilterModePoint;
heatTexture.addressMode[0]=cudaAddressModeClamp;
heatTexture.addressMode[1]=cudaAddressModeClamp;
heatTexture.normalized=false;
cudaMemcpy(data_d,dataIn,bytes,cudaMemcpyHostToDevice);
//cudaMemcpyToArray(aray, 0, 0, dataIn, bytes, cudaMemcpyHostToDevice);
for(int i = 0; i< numIterations;i++){
cudaBindTextureToArray(&heatTexture,aray,&channel);
dim3 dimBlock (16,16);
int gridx=(int)ceil((float)size/16);
int gridy=(int)ceil((float)size/16);
dim3 dimGrid (gridx,gridy);
HTkernel<<<dimGrid,dimBlock>>>( data_d, size, heatSpeed);
cudaThreadSynchronize();
cudaUnbindTexture(&heatTexture);
cudaMemcpyToArray(aray, 0, 0, data_d, bytes, cudaMemcpyDeviceToDevice);
}
cudaMemcpy(dataOut,data_d,bytes,cudaMemcpyDeviceToHost);
cudaFree(data_d);
cudaFreeArray(aray);
return true;
} |
65137a497b4af69994e8676aaae21da998652272.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstddef> // size_t
#include <cstdint> // int_Xt
#include <raft/distance/distance_types.hpp> // DistanceType
#include <raft/spatial/knn/detail/fused_l2_knn-inl.cuh>
#define instantiate_raft_spatial_knn_detail_fusedL2Knn(Mvalue_idx, Mvalue_t, MusePrevTopKs) \
template void raft::spatial::knn::detail::fusedL2Knn<Mvalue_idx, Mvalue_t, MusePrevTopKs>( \
size_t D, \
Mvalue_idx * out_inds, \
Mvalue_t * out_dists, \
const Mvalue_t* index, \
const Mvalue_t* query, \
size_t n_index_rows, \
size_t n_query_rows, \
int k, \
bool rowMajorIndex, \
bool rowMajorQuery, \
hipStream_t stream, \
raft::distance::DistanceType metric)
instantiate_raft_spatial_knn_detail_fusedL2Knn(int32_t, float, true);
instantiate_raft_spatial_knn_detail_fusedL2Knn(int32_t, float, false);
#undef instantiate_raft_spatial_knn_detail_fusedL2Knn
| 65137a497b4af69994e8676aaae21da998652272.cu | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstddef> // size_t
#include <cstdint> // int_Xt
#include <raft/distance/distance_types.hpp> // DistanceType
#include <raft/spatial/knn/detail/fused_l2_knn-inl.cuh>
#define instantiate_raft_spatial_knn_detail_fusedL2Knn(Mvalue_idx, Mvalue_t, MusePrevTopKs) \
template void raft::spatial::knn::detail::fusedL2Knn<Mvalue_idx, Mvalue_t, MusePrevTopKs>( \
size_t D, \
Mvalue_idx * out_inds, \
Mvalue_t * out_dists, \
const Mvalue_t* index, \
const Mvalue_t* query, \
size_t n_index_rows, \
size_t n_query_rows, \
int k, \
bool rowMajorIndex, \
bool rowMajorQuery, \
cudaStream_t stream, \
raft::distance::DistanceType metric)
instantiate_raft_spatial_knn_detail_fusedL2Knn(int32_t, float, true);
instantiate_raft_spatial_knn_detail_fusedL2Knn(int32_t, float, false);
#undef instantiate_raft_spatial_knn_detail_fusedL2Knn
|
f2c9b8c4aa7e94166c2ea48e675d8218bd661c1f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "validEdge.h"
#include "kernelPrintf.h"
__global__ void kernelValidEdge(Extension *d_Extension,int *V,unsigned int numberElementd_Extension){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<numberElementd_Extension){
if(d_Extension[i].li<=d_Extension[i].lj){
V[i]=1;
}
}
}
hipError_t validEdge(Extension *d_Extension,int *V,unsigned int numberElementd_Extension){
hipError_t cudaStatus;
dim3 block(512);
dim3 grid(numberElementd_Extension+block.x-1/block.x);
hipLaunchKernelGGL(( kernelValidEdge), dim3(grid),dim3(block), 0, 0, d_Extension,V,numberElementd_Extension);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"hipDeviceSynchronize kernelValidEdge failed",cudaStatus);
goto labelError;
}
//
printf("\nV array: ");
hipLaunchKernelGGL(( kernelPrintf), dim3(grid),dim3(block), 0, 0, V,numberElementd_Extension);
labelError:
return cudaStatus;
}
| f2c9b8c4aa7e94166c2ea48e675d8218bd661c1f.cu | #include "validEdge.h"
#include "kernelPrintf.h"
__global__ void kernelValidEdge(Extension *d_Extension,int *V,unsigned int numberElementd_Extension){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<numberElementd_Extension){
if(d_Extension[i].li<=d_Extension[i].lj){
V[i]=1;
}
}
}
cudaError_t validEdge(Extension *d_Extension,int *V,unsigned int numberElementd_Extension){
cudaError_t cudaStatus;
dim3 block(512);
dim3 grid(numberElementd_Extension+block.x-1/block.x);
kernelValidEdge<<<grid,block>>>(d_Extension,V,numberElementd_Extension);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"cudaDeviceSynchronize kernelValidEdge failed",cudaStatus);
goto labelError;
}
//
printf("\nV array: ");
kernelPrintf<<<grid,block>>>(V,numberElementd_Extension);
labelError:
return cudaStatus;
}
|
50fdd0adfcb50dfd21fa7b9af3da00399b3af2c6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unittest/unittest.h>
#include <thrust/scan.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
template<typename Iterator1, typename Iterator2, typename Iterator3>
__global__
void inclusive_scan_by_key_kernel(Iterator1 keys_first, Iterator1 keys_last, Iterator2 values_first, Iterator3 result)
{
thrust::inclusive_scan_by_key(thrust::seq, keys_first, keys_last, values_first, result);
}
template<typename Iterator1, typename Iterator2, typename Iterator3>
__global__
void exclusive_scan_by_key_kernel(Iterator1 keys_first, Iterator1 keys_last, Iterator2 values_first, Iterator3 result)
{
thrust::exclusive_scan_by_key(thrust::seq, keys_first, keys_last, values_first, result);
}
template<typename Iterator1, typename Iterator2, typename Iterator3, typename T>
__global__
void exclusive_scan_by_key_kernel(Iterator1 keys_first, Iterator1 keys_last, Iterator2 values_first, Iterator3 result, T init)
{
thrust::exclusive_scan_by_key(thrust::seq, keys_first, keys_last, values_first, result, init);
}
template<typename T>
void TestScanByKeyDeviceSeq(const size_t n)
{
thrust::host_vector<int> h_keys(n);
for(size_t i = 0, k = 0; i < n; i++)
{
h_keys[i] = k;
if(rand() % 10 == 0)
{
k++;
}
}
thrust::device_vector<int> d_keys = h_keys;
thrust::host_vector<T> h_vals = unittest::random_integers<int>(n);
for(size_t i = 0; i < n; i++)
{
h_vals[i] = i % 10;
}
thrust::device_vector<T> d_vals = h_vals;
thrust::host_vector<T> h_output(n);
thrust::device_vector<T> d_output(n);
thrust::inclusive_scan_by_key(h_keys.begin(), h_keys.end(), h_vals.begin(), h_output.begin());
hipLaunchKernelGGL(( inclusive_scan_by_key_kernel), dim3(1),dim3(1), 0, 0, d_keys.begin(), d_keys.end(), d_vals.begin(), d_output.begin());
ASSERT_EQUAL(d_output, h_output);
thrust::exclusive_scan_by_key(h_keys.begin(), h_keys.end(), h_vals.begin(), h_output.begin());
hipLaunchKernelGGL(( exclusive_scan_by_key_kernel), dim3(1),dim3(1), 0, 0, d_keys.begin(), d_keys.end(), d_vals.begin(), d_output.begin());
ASSERT_EQUAL(d_output, h_output);
thrust::exclusive_scan_by_key(h_keys.begin(), h_keys.end(), h_vals.begin(), h_output.begin(), (T) 11);
hipLaunchKernelGGL(( exclusive_scan_by_key_kernel), dim3(1),dim3(1), 0, 0, d_keys.begin(), d_keys.end(), d_vals.begin(), d_output.begin(), (T) 11);
ASSERT_EQUAL(d_output, h_output);
// in-place scans
h_output = h_vals;
d_output = d_vals;
thrust::inclusive_scan_by_key(h_keys.begin(), h_keys.end(), h_output.begin(), h_output.begin());
hipLaunchKernelGGL(( inclusive_scan_by_key_kernel), dim3(1),dim3(1), 0, 0, d_keys.begin(), d_keys.end(), d_output.begin(), d_output.begin());
ASSERT_EQUAL(d_output, h_output);
h_output = h_vals;
d_output = d_vals;
thrust::exclusive_scan_by_key(h_keys.begin(), h_keys.end(), h_output.begin(), h_output.begin(), (T) 11);
hipLaunchKernelGGL(( exclusive_scan_by_key_kernel), dim3(1),dim3(1), 0, 0, d_keys.begin(), d_keys.end(), d_output.begin(), d_output.begin(), (T) 11);
ASSERT_EQUAL(d_output, h_output);
}
DECLARE_VARIABLE_UNITTEST(TestScanByKeyDeviceSeq);
| 50fdd0adfcb50dfd21fa7b9af3da00399b3af2c6.cu | #include <unittest/unittest.h>
#include <thrust/scan.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
template<typename Iterator1, typename Iterator2, typename Iterator3>
__global__
void inclusive_scan_by_key_kernel(Iterator1 keys_first, Iterator1 keys_last, Iterator2 values_first, Iterator3 result)
{
thrust::inclusive_scan_by_key(thrust::seq, keys_first, keys_last, values_first, result);
}
template<typename Iterator1, typename Iterator2, typename Iterator3>
__global__
void exclusive_scan_by_key_kernel(Iterator1 keys_first, Iterator1 keys_last, Iterator2 values_first, Iterator3 result)
{
thrust::exclusive_scan_by_key(thrust::seq, keys_first, keys_last, values_first, result);
}
template<typename Iterator1, typename Iterator2, typename Iterator3, typename T>
__global__
void exclusive_scan_by_key_kernel(Iterator1 keys_first, Iterator1 keys_last, Iterator2 values_first, Iterator3 result, T init)
{
thrust::exclusive_scan_by_key(thrust::seq, keys_first, keys_last, values_first, result, init);
}
template<typename T>
void TestScanByKeyDeviceSeq(const size_t n)
{
thrust::host_vector<int> h_keys(n);
for(size_t i = 0, k = 0; i < n; i++)
{
h_keys[i] = k;
if(rand() % 10 == 0)
{
k++;
}
}
thrust::device_vector<int> d_keys = h_keys;
thrust::host_vector<T> h_vals = unittest::random_integers<int>(n);
for(size_t i = 0; i < n; i++)
{
h_vals[i] = i % 10;
}
thrust::device_vector<T> d_vals = h_vals;
thrust::host_vector<T> h_output(n);
thrust::device_vector<T> d_output(n);
thrust::inclusive_scan_by_key(h_keys.begin(), h_keys.end(), h_vals.begin(), h_output.begin());
inclusive_scan_by_key_kernel<<<1,1>>>(d_keys.begin(), d_keys.end(), d_vals.begin(), d_output.begin());
ASSERT_EQUAL(d_output, h_output);
thrust::exclusive_scan_by_key(h_keys.begin(), h_keys.end(), h_vals.begin(), h_output.begin());
exclusive_scan_by_key_kernel<<<1,1>>>(d_keys.begin(), d_keys.end(), d_vals.begin(), d_output.begin());
ASSERT_EQUAL(d_output, h_output);
thrust::exclusive_scan_by_key(h_keys.begin(), h_keys.end(), h_vals.begin(), h_output.begin(), (T) 11);
exclusive_scan_by_key_kernel<<<1,1>>>(d_keys.begin(), d_keys.end(), d_vals.begin(), d_output.begin(), (T) 11);
ASSERT_EQUAL(d_output, h_output);
// in-place scans
h_output = h_vals;
d_output = d_vals;
thrust::inclusive_scan_by_key(h_keys.begin(), h_keys.end(), h_output.begin(), h_output.begin());
inclusive_scan_by_key_kernel<<<1,1>>>(d_keys.begin(), d_keys.end(), d_output.begin(), d_output.begin());
ASSERT_EQUAL(d_output, h_output);
h_output = h_vals;
d_output = d_vals;
thrust::exclusive_scan_by_key(h_keys.begin(), h_keys.end(), h_output.begin(), h_output.begin(), (T) 11);
exclusive_scan_by_key_kernel<<<1,1>>>(d_keys.begin(), d_keys.end(), d_output.begin(), d_output.begin(), (T) 11);
ASSERT_EQUAL(d_output, h_output);
}
DECLARE_VARIABLE_UNITTEST(TestScanByKeyDeviceSeq);
|
e74aa79817e613cd21b9fea1f1c4d7e95a2eddea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <float.h>
#include "hl_base.h"
#include "hl_cnn.h"
#include "hl_device_functions.cuh"
__global__ void KeMaxPoolForward(const int nthreads,
const real* inputData,
const int channels,
const int height,
const int width,
const int pooledH,
const int pooledW,
const int ksizeW,
const int ksizeH,
const int strideH,
const int strideW,
const int offsetH,
const int offsetW,
real* tgtData,
const int tgtStride,
real* maskData) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
int pw = index % pooledW;
int ph = (index / pooledW) % pooledH;
int c = (index / pooledW / pooledH) % channels;
int frameNum = index / pooledW / pooledH / channels;
int hstart = ph * strideH - offsetH;
int wstart = pw * strideW - offsetW;
int hend = min(hstart + ksizeH, height);
int wend = min(wstart + ksizeW, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
real maxval = -FLT_MAX;
int max_index = -1;
inputData += (frameNum * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (maxval < inputData[h * width + w]) {
max_index = h * width + w;
maxval = inputData[max_index];
}
}
}
int tgtIndex =
index % (pooledW * pooledH * channels) + frameNum * tgtStride;
tgtData[tgtIndex] = maxval;
if (maskData != NULL) {
maskData[tgtIndex] = max_index;
}
}
}
void hl_maxpool_forward(const int frameCnt,
const real* inputData,
const int channels,
const int height,
const int width,
const int pooledH,
const int pooledW,
const int sizeX,
const int sizeY,
const int strideH,
const int strideW,
const int paddingH,
const int paddingW,
real* tgtData,
const int tgtStride,
real* maskData) {
int num_kernels = pooledH * pooledW * channels * frameCnt;
int blocks = (num_kernels + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KeMaxPoolForward), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, num_kernels,
inputData,
channels,
height,
width,
pooledH,
pooledW,
sizeX,
sizeY,
strideH,
strideW,
paddingH,
paddingW,
tgtData,
tgtStride,
maskData);
CHECK_SYNC("hl_maxpool_forward failed");
}
__global__ void KeMaxPoolBackward(const int nthreads,
const real* inputData,
const real* outData,
const real* outGrad,
const int channels,
const int height,
const int width,
const int pooledH,
const int pooledW,
const int sizeX,
const int sizeY,
const int strideH,
const int strideW,
const int padH,
const int padW,
real scaleA,
real scaleB,
real* targetGrad,
const int outStride) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
// find out the local index
// find out the local offset
int offsetW = index % width + padW;
int offsetH = (index / width) % height + padH;
int offsetC = (index / width / height) % channels;
int frameNum = index / width / height / channels;
int phstart = (offsetH < sizeY) ? 0 : (offsetH - sizeY) / strideH + 1;
int pwstart = (offsetW < sizeX) ? 0 : (offsetW - sizeX) / strideW + 1;
int phend = offsetH >= 0 ? min(offsetH / strideH + 1, pooledH) : 0;
int pwend = offsetW >= 0 ? min(offsetW / strideW + 1, pooledW) : 0;
real gradient = 0;
real input = inputData[index];
outData += (frameNum * outStride + offsetC * pooledH * pooledW);
outGrad += (frameNum * outStride + offsetC * pooledH * pooledW);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (input == outData[ph * pooledW + pw]) {
gradient += outGrad[ph * pooledW + pw];
}
}
}
targetGrad[index] = scaleB * targetGrad[index] + scaleA * gradient;
}
}
void hl_maxpool_backward(const int frameCnt,
const real* inputData,
const real* outData,
const real* outGrad,
const int channels,
const int height,
const int width,
const int pooledH,
const int pooledW,
const int sizeX,
const int sizeY,
const int strideH,
const int strideW,
const int paddingH,
const int paddingW,
real scaleA,
real scaleB,
real* targetGrad,
const int outStride) {
int num_kernels = height * width * channels * frameCnt;
int blocks = (num_kernels + 1024 - 1) / 1024;
hipLaunchKernelGGL(( KeMaxPoolBackward), dim3(blocks), dim3(1024), 0, STREAM_DEFAULT, num_kernels,
inputData,
outData,
outGrad,
channels,
height,
width,
pooledH,
pooledW,
sizeX,
sizeY,
strideH,
strideW,
paddingH,
paddingW,
scaleA,
scaleB,
targetGrad,
outStride);
CHECK_SYNC("hl_maxpool_backward");
}
__global__ void KeAvgPoolForward(const int nthreads,
const real* inputData,
const int channels,
const int height,
const int width,
const int pooledH,
const int pooledW,
const int sizeX,
const int sizeY,
const int strideH,
const int strideW,
const int padH,
const int padW,
real* tgtData,
const int tgtStride,
const bool excludeMode) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
int pw = index % pooledW;
int ph = (index / pooledW) % pooledH;
int c = (index / pooledW / pooledH) % channels;
int frameNum = index / pooledW / pooledH / channels;
int hstart = ph * strideH - padH;
int wstart = pw * strideW - padW;
int hend = min(hstart + sizeY, height);
int wend = min(wstart + sizeX, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
int poolSize =
excludeMode ? (hend - hstart) * (wend - wstart) : sizeY * sizeX;
real aveval = 0;
inputData += (frameNum * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += inputData[h * width + w];
}
}
int tgtIndex =
index % (pooledW * pooledH * channels) + frameNum * tgtStride;
tgtData[tgtIndex] = aveval / poolSize;
}
}
void hl_avgpool_forward(const int frameCnt,
const real* inputData,
const int channels,
const int height,
const int width,
const int pooledH,
const int pooledW,
const int sizeX,
const int sizeY,
const int strideH,
const int strideW,
const int paddingH,
const int paddingW,
real* tgtData,
const int tgtStride,
const bool excludeMode) {
int num_kernels = pooledH * pooledW * channels * frameCnt;
int blocks = (num_kernels + 1024 - 1) / 1024;
hipLaunchKernelGGL(( KeAvgPoolForward), dim3(blocks), dim3(1024), 0, STREAM_DEFAULT, num_kernels,
inputData,
channels,
height,
width,
pooledH,
pooledW,
sizeX,
sizeY,
strideH,
strideW,
paddingH,
paddingW,
tgtData,
tgtStride,
excludeMode);
CHECK_SYNC("hl_avgpool_forward failed");
}
__global__ void KeAvgPoolBackward(const int nthreads,
const real* outGrad,
const int channels,
const int height,
const int width,
const int pooledH,
const int pooledW,
const int sizeX,
const int sizeY,
const int strideH,
const int strideW,
const int padH,
const int padW,
real scaleA,
real scaleB,
real* tgtGrad,
const int outStride,
const bool excludeMode) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
int offsetW = index % width + padW;
int offsetH = (index / width) % height + padH;
int offsetC = (index / width / height) % channels;
int frameNum = index / width / height / channels;
int phstart = (offsetH < sizeY) ? 0 : (offsetH - sizeY) / strideH + 1;
int pwstart = (offsetW < sizeX) ? 0 : (offsetW - sizeX) / strideW + 1;
int phend = offsetH >= 0 ? min(offsetH / strideH + 1, pooledH) : 0;
int pwend = offsetW >= 0 ? min(offsetW / strideW + 1, pooledW) : 0;
real gradient = 0;
outGrad += (frameNum * outStride + offsetC * pooledH * pooledW);
for (int ph = phstart; ph < phend; ++ph) {
int hstart = ph * strideH - padH;
int hend = min(hstart + sizeY, height);
hstart = max(hstart, 0);
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int wstart = pw * strideW - padW;
int wend = min(wstart + sizeX, width);
wstart = max(wstart, 0);
int poolSize =
excludeMode ? (hend - hstart) * (wend - wstart) : sizeY * sizeX;
gradient += outGrad[ph * pooledW + pw] / poolSize;
}
}
tgtGrad[index] = scaleB * tgtGrad[index] + scaleA * gradient;
}
}
void hl_avgpool_backward(const int frameCnt,
const real* outGrad,
const int channels,
const int height,
const int width,
const int pooledH,
const int pooledW,
const int sizeX,
const int sizeY,
const int strideH,
const int strideW,
const int paddingH,
const int paddingW,
real scaleA,
real scaleB,
real* backGrad,
const int outStride,
const bool excludeMode) {
int num_kernels = height * width * channels * frameCnt;
int blocks = (num_kernels + 1024 - 1) / 1024;
hipLaunchKernelGGL(( KeAvgPoolBackward), dim3(blocks), dim3(1024), 0, STREAM_DEFAULT, num_kernels,
outGrad,
channels,
height,
width,
pooledH,
pooledW,
sizeX,
sizeY,
strideH,
strideW,
paddingH,
paddingW,
scaleA,
scaleB,
backGrad,
outStride,
excludeMode);
CHECK_SYNC("hl_avgpool_backward failed");
}
__global__ void KeMaxPool3DForward(const int nthreads,
const real* inputData,
const int channels,
const int depth,
const int height,
const int width,
const int pooledD,
const int pooledH,
const int pooledW,
const int ksizeD,
const int ksizeH,
const int ksizeW,
const int strideD,
const int strideH,
const int strideW,
const int padD,
const int padH,
const int padW,
real* tgtData,
real* maxPoolIdxData,
const int tgtStride) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads);
index += blockDim.x * gridDim.x) {
int pw = index % pooledW;
int ph = (index / pooledW) % pooledH;
int pd = (index / pooledW / pooledH) % pooledD;
int c = (index / pooledW / pooledH / pooledD) % channels;
int frameNum = index / pooledW / pooledH / pooledD / channels;
int dstart = pd * strideD - padD;
int hstart = ph * strideH - padH;
int wstart = pw * strideW - padW;
int dend = min(dstart + ksizeD, depth);
int hend = min(hstart + ksizeH, height);
int wend = min(wstart + ksizeW, width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
real maxval = -FLT_MAX;
int maxIdx = -1;
inputData += (frameNum * channels + c) * depth * height * width;
for (int d = dstart; d < dend; ++d) {
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (maxval < inputData[(d * height + h) * width + w]) {
maxval = inputData[(d * height + h) * width + w];
maxIdx = (d * height + h) * width + w;
}
}
}
}
int tgtIndex =
index % (pooledW * pooledH * pooledD * channels) + frameNum * tgtStride;
tgtData[tgtIndex] = maxval;
maxPoolIdxData[tgtIndex] = maxIdx;
}
}
void hl_maxpool3D_forward(const int frameCnt,
const real* inputData,
const int channels,
const int depth,
const int height,
const int width,
const int pooledD,
const int pooledH,
const int pooledW,
const int sizeZ,
const int sizeY,
const int sizeX,
const int strideD,
const int strideH,
const int strideW,
const int padD,
const int padH,
const int padW,
real* tgtData,
real* maxPoolIdxData,
const int tgtStride) {
int num_kernels = pooledD * pooledH * pooledW * channels * frameCnt;
int blocks = (num_kernels + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KeMaxPool3DForward), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, num_kernels,
inputData,
channels,
depth,
height,
width,
pooledD,
pooledH,
pooledW,
sizeZ,
sizeY,
sizeX,
strideD,
strideH,
strideW,
padD,
padH,
padW,
tgtData,
maxPoolIdxData,
tgtStride);
CHECK_SYNC("hl_maxpool3D_forward failed");
}
__global__ void KeMaxPool3DBackward(const int nthreads,
const real* outGrad,
const int channels,
const int depth,
const int height,
const int width,
const int pooledD,
const int pooledH,
const int pooledW,
const int sizeZ,
const int sizeY,
const int sizeX,
const int strideD,
const int strideH,
const int strideW,
const int padD,
const int padH,
const int padW,
real scaleA,
real scaleB,
real* targetGrad,
real* maxPoolIdxData,
const int outStride) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads);
index += blockDim.x * gridDim.x) {
int offsetW = index % width;
int offsetH = (index / width) % height;
int offsetD = (index / width / height) % depth;
int offsetC = (index / width / height / depth) % channels;
int frameNum = index / width / height / depth / channels;
int pdstart =
(offsetD + padD < sizeZ) ? 0 : (offsetD + padD - sizeZ) / strideD + 1;
int phstart =
(offsetH + padH < sizeY) ? 0 : (offsetH + padH - sizeY) / strideH + 1;
int pwstart =
(offsetW + padW < sizeX) ? 0 : (offsetW + padW - sizeX) / strideW + 1;
int pdend = min((offsetD + padD) / strideD + 1, pooledD);
int phend = min((offsetH + padH) / strideH + 1, pooledH);
int pwend = min((offsetW + padW) / strideW + 1, pooledW);
real gradient = 0;
outGrad += ((frameNum * channels + offsetC) * pooledD * pooledH * pooledW);
maxPoolIdxData +=
((frameNum * channels + offsetC) * pooledD * pooledH * pooledW);
for (int pd = pdstart; pd < pdend; ++pd) {
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (((offsetD * height + offsetH) * width + offsetW) ==
maxPoolIdxData[(pd * pooledH + ph) * pooledW + pw])
gradient += outGrad[(pd * pooledH + ph) * pooledW + pw];
}
}
}
targetGrad[index] = scaleA * gradient + scaleB * targetGrad[index];
}
}
void hl_maxpool3D_backward(const int frameCnt,
const real* outGrad,
const int channels,
const int depth,
const int height,
const int width,
const int outputD,
const int outputH,
const int outputW,
const int sizeZ,
const int sizeY,
const int sizeX,
const int strideD,
const int strideH,
const int strideW,
const int paddingD,
const int paddingH,
const int paddingW,
real scaleA,
real scaleB,
real* targetGrad,
real* maxPoolIdxData,
const int outStride) {
int num_kernels = depth * height * width * channels * frameCnt;
int blocks = (num_kernels + 1024 - 1) / 1024;
hipLaunchKernelGGL(( KeMaxPool3DBackward), dim3(blocks), dim3(1024), 0, STREAM_DEFAULT, num_kernels,
outGrad,
channels,
depth,
height,
width,
outputD,
outputH,
outputW,
sizeZ,
sizeY,
sizeX,
strideD,
strideH,
strideW,
paddingD,
paddingH,
paddingW,
scaleA,
scaleB,
targetGrad,
maxPoolIdxData,
outStride);
CHECK_SYNC("hl_maxpool3D_backward");
}
__global__ void KeAvgPool3DForward(const int nthreads,
const real* inputData,
const int channels,
const int depth,
const int height,
const int width,
const int pooledD,
const int pooledH,
const int pooledW,
const int sizeZ,
const int sizeY,
const int sizeX,
const int strideD,
const int strideH,
const int strideW,
const int padD,
const int padH,
const int padW,
real* tgtData,
const int tgtStride) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads);
index += blockDim.x * gridDim.x) {
int pw = index % pooledW;
int ph = (index / pooledW) % pooledH;
int pd = (index / pooledW / pooledH) % pooledD;
int c = (index / pooledW / pooledH / pooledD) % channels;
int frameNum = index / pooledW / pooledH / pooledD / channels;
int dstart = pd * strideD - padD;
int hstart = ph * strideH - padH;
int wstart = pw * strideW - padW;
int dend = min(dstart + sizeZ, depth);
int hend = min(hstart + sizeY, height);
int wend = min(wstart + sizeX, width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart);
real aveval = 0;
inputData += (frameNum * channels + c) * depth * height * width;
for (int d = dstart; d < dend; ++d) {
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += inputData[(d * height + h) * width + w];
}
}
}
int tgtIndex =
index % (pooledW * pooledH * pooledD * channels) + frameNum * tgtStride;
tgtData[tgtIndex] = aveval / pool_size;
}
}
void hl_avgpool3D_forward(const int frameCnt,
const real* inputData,
const int channels,
const int depth,
const int height,
const int width,
const int pooledD,
const int pooledH,
const int pooledW,
const int sizeZ,
const int sizeY,
const int sizeX,
const int strideD,
const int strideH,
const int strideW,
const int paddingD,
const int paddingH,
const int paddingW,
real* tgtData,
const int tgtStride) {
int num_kernels = pooledD * pooledH * pooledW * channels * frameCnt;
int blocks = (num_kernels + 1024 - 1) / 1024;
hipLaunchKernelGGL(( KeAvgPool3DForward), dim3(blocks), dim3(1024), 0, STREAM_DEFAULT, num_kernels,
inputData,
channels,
depth,
height,
width,
pooledD,
pooledH,
pooledW,
sizeZ,
sizeY,
sizeX,
strideD,
strideH,
strideW,
paddingD,
paddingH,
paddingW,
tgtData,
tgtStride);
CHECK_SYNC("hl_avgpool3D_forward failed");
}
__global__ void KeAvgPool3DBackward(const int nthreads,
const real* outGrad,
const int channels,
const int depth,
const int height,
const int width,
const int pooledD,
const int pooledH,
const int pooledW,
const int sizeZ,
const int sizeY,
const int sizeX,
const int strideD,
const int strideH,
const int strideW,
const int padD,
const int padH,
const int padW,
real scaleA,
real scaleB,
real* tgtGrad,
const int outStride) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads);
index += blockDim.x * gridDim.x) {
int offsetW = index % width + padW;
int offsetH = (index / width) % height + padH;
int offsetD = (index / width / height) % depth + padD;
int offsetC = (index / width / height / depth) % channels;
int frameNum = index / width / height / depth / channels;
int pdstart = (offsetD < sizeZ) ? 0 : (offsetD - sizeZ) / strideD + 1;
int phstart = (offsetH < sizeY) ? 0 : (offsetH - sizeY) / strideH + 1;
int pwstart = (offsetW < sizeX) ? 0 : (offsetW - sizeX) / strideW + 1;
int pdend = min(offsetD / strideD + 1, pooledD);
int phend = min(offsetH / strideH + 1, pooledH);
int pwend = min(offsetW / strideW + 1, pooledW);
real gradient = 0;
outGrad += (frameNum * channels + offsetC) * pooledD * pooledH * pooledW;
for (int pd = pdstart; pd < pdend; ++pd) {
int dstart = pd * strideD - padD;
int dend = min(dstart + sizeZ, depth);
dstart = max(dstart, 0);
for (int ph = phstart; ph < phend; ++ph) {
int hstart = ph * strideH - padH;
int hend = min(hstart + sizeY, height);
hstart = max(hstart, 0);
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int wstart = pw * strideW - padW;
int wend = min(wstart + sizeX, width);
wstart = max(wstart, 0);
int poolsize = (dend - dstart) * (hend - hstart) * (wend - wstart);
gradient += outGrad[(pd * pooledH + ph) * pooledW + pw] / poolsize;
}
}
}
tgtGrad[index] = scaleA * gradient + scaleB * tgtGrad[index];
}
}
void hl_avgpool3D_backward(const int frameCnt,
const real* outGrad,
const int channels,
const int depth,
const int height,
const int width,
const int outputD,
const int outputH,
const int outputW,
const int sizeZ,
const int sizeY,
const int sizeX,
const int strideD,
const int strideH,
const int strideW,
int paddingD,
int paddingH,
int paddingW,
real scaleA,
real scaleB,
real* backGrad,
const int outStride) {
int num_kernels = depth * height * width * channels * frameCnt;
int blocks = (num_kernels + 1024 - 1) / 1024;
hipLaunchKernelGGL(( KeAvgPool3DBackward), dim3(blocks), dim3(1024), 0, STREAM_DEFAULT, num_kernels,
outGrad,
channels,
depth,
height,
width,
outputD,
outputH,
outputW,
sizeZ,
sizeY,
sizeX,
strideD,
strideH,
strideW,
paddingD,
paddingH,
paddingW,
scaleA,
scaleB,
backGrad,
outStride);
CHECK_SYNC("hl_avgpool3D_backward failed");
}
__global__ void KeBilinearInterpFw(const real* in,
const size_t inImgH,
const size_t inImgW,
const size_t inputH,
const size_t inputW,
real* out,
const size_t outImgH,
const size_t outImgW,
const size_t outputH,
const size_t outputW,
const size_t numChannels,
const real ratioH,
const real ratioW) {
int nthreads = outputH * outputW;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < nthreads) {
int outIdH = tid / outputW;
int outIdW = tid % outputW;
int inImgSize = inputW / numChannels;
int outImgSize = outputW / numChannels;
int channelId = outIdW / outImgSize;
int outImgIdy = (outIdW % outImgSize) / outImgW;
int inImgIdy = ratioH * outImgIdy;
int hId = (inImgIdy < inImgH - 1) ? 1 : 0;
real h1lambda = ratioH * outImgIdy - inImgIdy;
real h2lambda = 1.f - h1lambda;
int outImgIdx = tid % outImgW;
int inImgIdx = ratioW * outImgIdx;
int wId = (inImgIdx < inImgW - 1) ? 1 : 0;
real w1lambda = ratioW * outImgIdx - inImgIdx;
real w2lambda = 1.f - w1lambda;
const real* inPos = &in[outIdH * inputW + channelId * inImgSize +
inImgIdy * inImgW + inImgIdx];
// bilinear interpolation
out[outIdH * outputW + outIdW] =
h2lambda * (w2lambda * inPos[0] + w1lambda * inPos[wId]) +
h1lambda * (w2lambda * inPos[hId * inImgW] +
w1lambda * inPos[hId * inImgW + wId]);
}
}
void hl_bilinear_forward(const real* inData,
const size_t inImgH,
const size_t inImgW,
const size_t inputH,
const size_t inputW,
real* outData,
const size_t outImgH,
const size_t outImgW,
const size_t outputH,
const size_t outputW,
const size_t numChannels,
const real ratioH,
const real ratioW) {
int threadNum = outputH * outputW;
int blocks = (threadNum + 1024 - 1) / 1024;
hipLaunchKernelGGL(( KeBilinearInterpFw), dim3(blocks), dim3(1024), 0, STREAM_DEFAULT, inData,
inImgH,
inImgW,
inputH,
inputW,
outData,
outImgH,
outImgW,
outputH,
outputW,
numChannels,
ratioH,
ratioW);
CHECK_SYNC("hl_bilinear_forward failed");
}
__global__ void KeBilinearInterpBw(real* in,
const size_t inImgH,
const size_t inImgW,
const size_t inputH,
const size_t inputW,
const real* out,
const size_t outImgH,
const size_t outImgW,
const size_t outputH,
const size_t outputW,
const size_t numChannels,
const real ratioH,
const real ratioW) {
int nthreads = outputH * outputW;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < nthreads) {
int outIdH = tid / outputW;
int outIdW = tid % outputW;
int inImgSize = inputW / numChannels;
int outImgSize = outputW / numChannels;
int channelId = outIdW / outImgSize;
int outImgIdy = (outIdW % outImgSize) / outImgW;
int inImgIdy = ratioH * outImgIdy;
int hId = (inImgIdy < inImgH - 1) ? 1 : 0;
real h1lambda = ratioH * outImgIdy - inImgIdy;
real h2lambda = 1.f - h1lambda;
int outImgIdx = tid % outImgW;
int inImgIdx = ratioW * outImgIdx;
int wId = (inImgIdx < inImgW - 1) ? 1 : 0;
real w1lambda = ratioW * outImgIdx - inImgIdx;
real w2lambda = 1.f - w1lambda;
real* inPos = &in[outIdH * inputW + channelId * inImgSize +
inImgIdy * inImgW + inImgIdx];
const real* outPos = &out[outIdH * outputW + outIdW];
paddle::paddleAtomicAdd(&inPos[0], h2lambda * w2lambda * outPos[0]);
paddle::paddleAtomicAdd(&inPos[wId], h2lambda * w1lambda * outPos[0]);
paddle::paddleAtomicAdd(&inPos[hId * inImgW],
h1lambda * w2lambda * outPos[0]);
paddle::paddleAtomicAdd(&inPos[hId * inImgW + wId],
h1lambda * w1lambda * outPos[0]);
}
}
void hl_bilinear_backward(real* inGrad,
const size_t inImgH,
const size_t inImgW,
const size_t inputH,
const size_t inputW,
const real* outGrad,
const size_t outImgH,
const size_t outImgW,
const size_t outputH,
const size_t outputW,
const size_t numChannels,
const real ratioH,
const real ratioW) {
int threadNum = outputH * outputW;
int blocks = (threadNum + 1024 - 1) / 1024;
hipLaunchKernelGGL(( KeBilinearInterpBw), dim3(blocks), dim3(1024), 0, STREAM_DEFAULT, inGrad,
inImgH,
inImgW,
inputH,
inputW,
outGrad,
outImgH,
outImgW,
outputH,
outputW,
numChannels,
ratioH,
ratioW);
CHECK_SYNC("hl_bilinear_backward failed");
}
__global__ void maxoutFpCompute(size_t nthreads,
const real* inData,
real* outData,
int* idData,
size_t size,
size_t featLen,
size_t groups) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
size_t batch_idx = index / size;
size_t i = index % size;
size_t channel_idx = i / featLen;
size_t feat_idx = i % featLen;
size_t data_idx =
(batch_idx * size + channel_idx * featLen) * groups + feat_idx;
real max = inData[data_idx];
int maxId = 0;
for (size_t g = 1; g < groups; ++g) {
real tmp = inData[data_idx + g * featLen];
if (tmp > max) {
max = tmp;
maxId = g;
}
}
outData[index] = max;
idData[index] = maxId;
}
}
void hl_maxout_forward(const real* inData,
real* outData,
int* idData,
size_t batchSize,
size_t size,
size_t featLen,
size_t groups) {
int num_kernels = size * batchSize;
int blocks = (num_kernels + 1024 - 1) / 1024;
hipLaunchKernelGGL(( maxoutFpCompute), dim3(blocks), dim3(1024), 0, STREAM_DEFAULT,
num_kernels, inData, outData, idData, size, featLen, groups);
CHECK_SYNC("hl_maxout_forward failed");
}
__global__ void maxoutBpCompute(size_t nthreads,
real* inGrad,
const real* outGrad,
const int* idData,
size_t size,
size_t featLen,
size_t groups) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
size_t batch_idx = index / size;
size_t i = index % size;
size_t channel_idx = i / featLen;
size_t feat_idx = i % featLen;
size_t newIndex = batch_idx * size;
size_t gradIdx =
(channel_idx * groups + (idData + newIndex)[i]) * featLen + feat_idx;
(inGrad + newIndex * groups)[gradIdx] += (outGrad + newIndex)[i];
}
}
void hl_maxout_backward(real* inGrad,
const real* outGrad,
const int* idData,
size_t batchSize,
size_t size,
size_t featLen,
size_t groups) {
int num_kernels = size * batchSize;
int blocks = (num_kernels + 1024 - 1) / 1024;
hipLaunchKernelGGL(( maxoutBpCompute), dim3(blocks), dim3(1024), 0, STREAM_DEFAULT,
num_kernels, inGrad, outGrad, idData, size, featLen, groups);
CHECK_SYNC("hl_maxout_backward failed");
}
| e74aa79817e613cd21b9fea1f1c4d7e95a2eddea.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <float.h>
#include "hl_base.h"
#include "hl_cnn.h"
#include "hl_device_functions.cuh"
__global__ void KeMaxPoolForward(const int nthreads,
const real* inputData,
const int channels,
const int height,
const int width,
const int pooledH,
const int pooledW,
const int ksizeW,
const int ksizeH,
const int strideH,
const int strideW,
const int offsetH,
const int offsetW,
real* tgtData,
const int tgtStride,
real* maskData) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
int pw = index % pooledW;
int ph = (index / pooledW) % pooledH;
int c = (index / pooledW / pooledH) % channels;
int frameNum = index / pooledW / pooledH / channels;
int hstart = ph * strideH - offsetH;
int wstart = pw * strideW - offsetW;
int hend = min(hstart + ksizeH, height);
int wend = min(wstart + ksizeW, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
real maxval = -FLT_MAX;
int max_index = -1;
inputData += (frameNum * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (maxval < inputData[h * width + w]) {
max_index = h * width + w;
maxval = inputData[max_index];
}
}
}
int tgtIndex =
index % (pooledW * pooledH * channels) + frameNum * tgtStride;
tgtData[tgtIndex] = maxval;
if (maskData != NULL) {
maskData[tgtIndex] = max_index;
}
}
}
void hl_maxpool_forward(const int frameCnt,
const real* inputData,
const int channels,
const int height,
const int width,
const int pooledH,
const int pooledW,
const int sizeX,
const int sizeY,
const int strideH,
const int strideW,
const int paddingH,
const int paddingW,
real* tgtData,
const int tgtStride,
real* maskData) {
int num_kernels = pooledH * pooledW * channels * frameCnt;
int blocks = (num_kernels + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KeMaxPoolForward<<<grid, threads, 0, STREAM_DEFAULT>>>(num_kernels,
inputData,
channels,
height,
width,
pooledH,
pooledW,
sizeX,
sizeY,
strideH,
strideW,
paddingH,
paddingW,
tgtData,
tgtStride,
maskData);
CHECK_SYNC("hl_maxpool_forward failed");
}
__global__ void KeMaxPoolBackward(const int nthreads,
const real* inputData,
const real* outData,
const real* outGrad,
const int channels,
const int height,
const int width,
const int pooledH,
const int pooledW,
const int sizeX,
const int sizeY,
const int strideH,
const int strideW,
const int padH,
const int padW,
real scaleA,
real scaleB,
real* targetGrad,
const int outStride) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
// find out the local index
// find out the local offset
int offsetW = index % width + padW;
int offsetH = (index / width) % height + padH;
int offsetC = (index / width / height) % channels;
int frameNum = index / width / height / channels;
int phstart = (offsetH < sizeY) ? 0 : (offsetH - sizeY) / strideH + 1;
int pwstart = (offsetW < sizeX) ? 0 : (offsetW - sizeX) / strideW + 1;
int phend = offsetH >= 0 ? min(offsetH / strideH + 1, pooledH) : 0;
int pwend = offsetW >= 0 ? min(offsetW / strideW + 1, pooledW) : 0;
real gradient = 0;
real input = inputData[index];
outData += (frameNum * outStride + offsetC * pooledH * pooledW);
outGrad += (frameNum * outStride + offsetC * pooledH * pooledW);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (input == outData[ph * pooledW + pw]) {
gradient += outGrad[ph * pooledW + pw];
}
}
}
targetGrad[index] = scaleB * targetGrad[index] + scaleA * gradient;
}
}
void hl_maxpool_backward(const int frameCnt,
const real* inputData,
const real* outData,
const real* outGrad,
const int channels,
const int height,
const int width,
const int pooledH,
const int pooledW,
const int sizeX,
const int sizeY,
const int strideH,
const int strideW,
const int paddingH,
const int paddingW,
real scaleA,
real scaleB,
real* targetGrad,
const int outStride) {
int num_kernels = height * width * channels * frameCnt;
int blocks = (num_kernels + 1024 - 1) / 1024;
KeMaxPoolBackward<<<blocks, 1024, 0, STREAM_DEFAULT>>>(num_kernels,
inputData,
outData,
outGrad,
channels,
height,
width,
pooledH,
pooledW,
sizeX,
sizeY,
strideH,
strideW,
paddingH,
paddingW,
scaleA,
scaleB,
targetGrad,
outStride);
CHECK_SYNC("hl_maxpool_backward");
}
__global__ void KeAvgPoolForward(const int nthreads,
const real* inputData,
const int channels,
const int height,
const int width,
const int pooledH,
const int pooledW,
const int sizeX,
const int sizeY,
const int strideH,
const int strideW,
const int padH,
const int padW,
real* tgtData,
const int tgtStride,
const bool excludeMode) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
int pw = index % pooledW;
int ph = (index / pooledW) % pooledH;
int c = (index / pooledW / pooledH) % channels;
int frameNum = index / pooledW / pooledH / channels;
int hstart = ph * strideH - padH;
int wstart = pw * strideW - padW;
int hend = min(hstart + sizeY, height);
int wend = min(wstart + sizeX, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
int poolSize =
excludeMode ? (hend - hstart) * (wend - wstart) : sizeY * sizeX;
real aveval = 0;
inputData += (frameNum * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += inputData[h * width + w];
}
}
int tgtIndex =
index % (pooledW * pooledH * channels) + frameNum * tgtStride;
tgtData[tgtIndex] = aveval / poolSize;
}
}
void hl_avgpool_forward(const int frameCnt,
const real* inputData,
const int channels,
const int height,
const int width,
const int pooledH,
const int pooledW,
const int sizeX,
const int sizeY,
const int strideH,
const int strideW,
const int paddingH,
const int paddingW,
real* tgtData,
const int tgtStride,
const bool excludeMode) {
int num_kernels = pooledH * pooledW * channels * frameCnt;
int blocks = (num_kernels + 1024 - 1) / 1024;
KeAvgPoolForward<<<blocks, 1024, 0, STREAM_DEFAULT>>>(num_kernels,
inputData,
channels,
height,
width,
pooledH,
pooledW,
sizeX,
sizeY,
strideH,
strideW,
paddingH,
paddingW,
tgtData,
tgtStride,
excludeMode);
CHECK_SYNC("hl_avgpool_forward failed");
}
__global__ void KeAvgPoolBackward(const int nthreads,
const real* outGrad,
const int channels,
const int height,
const int width,
const int pooledH,
const int pooledW,
const int sizeX,
const int sizeY,
const int strideH,
const int strideW,
const int padH,
const int padW,
real scaleA,
real scaleB,
real* tgtGrad,
const int outStride,
const bool excludeMode) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
int offsetW = index % width + padW;
int offsetH = (index / width) % height + padH;
int offsetC = (index / width / height) % channels;
int frameNum = index / width / height / channels;
int phstart = (offsetH < sizeY) ? 0 : (offsetH - sizeY) / strideH + 1;
int pwstart = (offsetW < sizeX) ? 0 : (offsetW - sizeX) / strideW + 1;
int phend = offsetH >= 0 ? min(offsetH / strideH + 1, pooledH) : 0;
int pwend = offsetW >= 0 ? min(offsetW / strideW + 1, pooledW) : 0;
real gradient = 0;
outGrad += (frameNum * outStride + offsetC * pooledH * pooledW);
for (int ph = phstart; ph < phend; ++ph) {
int hstart = ph * strideH - padH;
int hend = min(hstart + sizeY, height);
hstart = max(hstart, 0);
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int wstart = pw * strideW - padW;
int wend = min(wstart + sizeX, width);
wstart = max(wstart, 0);
int poolSize =
excludeMode ? (hend - hstart) * (wend - wstart) : sizeY * sizeX;
gradient += outGrad[ph * pooledW + pw] / poolSize;
}
}
tgtGrad[index] = scaleB * tgtGrad[index] + scaleA * gradient;
}
}
void hl_avgpool_backward(const int frameCnt,
const real* outGrad,
const int channels,
const int height,
const int width,
const int pooledH,
const int pooledW,
const int sizeX,
const int sizeY,
const int strideH,
const int strideW,
const int paddingH,
const int paddingW,
real scaleA,
real scaleB,
real* backGrad,
const int outStride,
const bool excludeMode) {
int num_kernels = height * width * channels * frameCnt;
int blocks = (num_kernels + 1024 - 1) / 1024;
KeAvgPoolBackward<<<blocks, 1024, 0, STREAM_DEFAULT>>>(num_kernels,
outGrad,
channels,
height,
width,
pooledH,
pooledW,
sizeX,
sizeY,
strideH,
strideW,
paddingH,
paddingW,
scaleA,
scaleB,
backGrad,
outStride,
excludeMode);
CHECK_SYNC("hl_avgpool_backward failed");
}
__global__ void KeMaxPool3DForward(const int nthreads,
const real* inputData,
const int channels,
const int depth,
const int height,
const int width,
const int pooledD,
const int pooledH,
const int pooledW,
const int ksizeD,
const int ksizeH,
const int ksizeW,
const int strideD,
const int strideH,
const int strideW,
const int padD,
const int padH,
const int padW,
real* tgtData,
real* maxPoolIdxData,
const int tgtStride) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads);
index += blockDim.x * gridDim.x) {
int pw = index % pooledW;
int ph = (index / pooledW) % pooledH;
int pd = (index / pooledW / pooledH) % pooledD;
int c = (index / pooledW / pooledH / pooledD) % channels;
int frameNum = index / pooledW / pooledH / pooledD / channels;
int dstart = pd * strideD - padD;
int hstart = ph * strideH - padH;
int wstart = pw * strideW - padW;
int dend = min(dstart + ksizeD, depth);
int hend = min(hstart + ksizeH, height);
int wend = min(wstart + ksizeW, width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
real maxval = -FLT_MAX;
int maxIdx = -1;
inputData += (frameNum * channels + c) * depth * height * width;
for (int d = dstart; d < dend; ++d) {
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (maxval < inputData[(d * height + h) * width + w]) {
maxval = inputData[(d * height + h) * width + w];
maxIdx = (d * height + h) * width + w;
}
}
}
}
int tgtIndex =
index % (pooledW * pooledH * pooledD * channels) + frameNum * tgtStride;
tgtData[tgtIndex] = maxval;
maxPoolIdxData[tgtIndex] = maxIdx;
}
}
void hl_maxpool3D_forward(const int frameCnt,
const real* inputData,
const int channels,
const int depth,
const int height,
const int width,
const int pooledD,
const int pooledH,
const int pooledW,
const int sizeZ,
const int sizeY,
const int sizeX,
const int strideD,
const int strideH,
const int strideW,
const int padD,
const int padH,
const int padW,
real* tgtData,
real* maxPoolIdxData,
const int tgtStride) {
int num_kernels = pooledD * pooledH * pooledW * channels * frameCnt;
int blocks = (num_kernels + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KeMaxPool3DForward<<<grid, threads, 0, STREAM_DEFAULT>>>(num_kernels,
inputData,
channels,
depth,
height,
width,
pooledD,
pooledH,
pooledW,
sizeZ,
sizeY,
sizeX,
strideD,
strideH,
strideW,
padD,
padH,
padW,
tgtData,
maxPoolIdxData,
tgtStride);
CHECK_SYNC("hl_maxpool3D_forward failed");
}
__global__ void KeMaxPool3DBackward(const int nthreads,
const real* outGrad,
const int channels,
const int depth,
const int height,
const int width,
const int pooledD,
const int pooledH,
const int pooledW,
const int sizeZ,
const int sizeY,
const int sizeX,
const int strideD,
const int strideH,
const int strideW,
const int padD,
const int padH,
const int padW,
real scaleA,
real scaleB,
real* targetGrad,
real* maxPoolIdxData,
const int outStride) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads);
index += blockDim.x * gridDim.x) {
int offsetW = index % width;
int offsetH = (index / width) % height;
int offsetD = (index / width / height) % depth;
int offsetC = (index / width / height / depth) % channels;
int frameNum = index / width / height / depth / channels;
int pdstart =
(offsetD + padD < sizeZ) ? 0 : (offsetD + padD - sizeZ) / strideD + 1;
int phstart =
(offsetH + padH < sizeY) ? 0 : (offsetH + padH - sizeY) / strideH + 1;
int pwstart =
(offsetW + padW < sizeX) ? 0 : (offsetW + padW - sizeX) / strideW + 1;
int pdend = min((offsetD + padD) / strideD + 1, pooledD);
int phend = min((offsetH + padH) / strideH + 1, pooledH);
int pwend = min((offsetW + padW) / strideW + 1, pooledW);
real gradient = 0;
outGrad += ((frameNum * channels + offsetC) * pooledD * pooledH * pooledW);
maxPoolIdxData +=
((frameNum * channels + offsetC) * pooledD * pooledH * pooledW);
for (int pd = pdstart; pd < pdend; ++pd) {
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (((offsetD * height + offsetH) * width + offsetW) ==
maxPoolIdxData[(pd * pooledH + ph) * pooledW + pw])
gradient += outGrad[(pd * pooledH + ph) * pooledW + pw];
}
}
}
targetGrad[index] = scaleA * gradient + scaleB * targetGrad[index];
}
}
void hl_maxpool3D_backward(const int frameCnt,
const real* outGrad,
const int channels,
const int depth,
const int height,
const int width,
const int outputD,
const int outputH,
const int outputW,
const int sizeZ,
const int sizeY,
const int sizeX,
const int strideD,
const int strideH,
const int strideW,
const int paddingD,
const int paddingH,
const int paddingW,
real scaleA,
real scaleB,
real* targetGrad,
real* maxPoolIdxData,
const int outStride) {
int num_kernels = depth * height * width * channels * frameCnt;
int blocks = (num_kernels + 1024 - 1) / 1024;
KeMaxPool3DBackward<<<blocks, 1024, 0, STREAM_DEFAULT>>>(num_kernels,
outGrad,
channels,
depth,
height,
width,
outputD,
outputH,
outputW,
sizeZ,
sizeY,
sizeX,
strideD,
strideH,
strideW,
paddingD,
paddingH,
paddingW,
scaleA,
scaleB,
targetGrad,
maxPoolIdxData,
outStride);
CHECK_SYNC("hl_maxpool3D_backward");
}
__global__ void KeAvgPool3DForward(const int nthreads,
const real* inputData,
const int channels,
const int depth,
const int height,
const int width,
const int pooledD,
const int pooledH,
const int pooledW,
const int sizeZ,
const int sizeY,
const int sizeX,
const int strideD,
const int strideH,
const int strideW,
const int padD,
const int padH,
const int padW,
real* tgtData,
const int tgtStride) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads);
index += blockDim.x * gridDim.x) {
int pw = index % pooledW;
int ph = (index / pooledW) % pooledH;
int pd = (index / pooledW / pooledH) % pooledD;
int c = (index / pooledW / pooledH / pooledD) % channels;
int frameNum = index / pooledW / pooledH / pooledD / channels;
int dstart = pd * strideD - padD;
int hstart = ph * strideH - padH;
int wstart = pw * strideW - padW;
int dend = min(dstart + sizeZ, depth);
int hend = min(hstart + sizeY, height);
int wend = min(wstart + sizeX, width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart);
real aveval = 0;
inputData += (frameNum * channels + c) * depth * height * width;
for (int d = dstart; d < dend; ++d) {
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += inputData[(d * height + h) * width + w];
}
}
}
int tgtIndex =
index % (pooledW * pooledH * pooledD * channels) + frameNum * tgtStride;
tgtData[tgtIndex] = aveval / pool_size;
}
}
void hl_avgpool3D_forward(const int frameCnt,
const real* inputData,
const int channels,
const int depth,
const int height,
const int width,
const int pooledD,
const int pooledH,
const int pooledW,
const int sizeZ,
const int sizeY,
const int sizeX,
const int strideD,
const int strideH,
const int strideW,
const int paddingD,
const int paddingH,
const int paddingW,
real* tgtData,
const int tgtStride) {
int num_kernels = pooledD * pooledH * pooledW * channels * frameCnt;
int blocks = (num_kernels + 1024 - 1) / 1024;
KeAvgPool3DForward<<<blocks, 1024, 0, STREAM_DEFAULT>>>(num_kernels,
inputData,
channels,
depth,
height,
width,
pooledD,
pooledH,
pooledW,
sizeZ,
sizeY,
sizeX,
strideD,
strideH,
strideW,
paddingD,
paddingH,
paddingW,
tgtData,
tgtStride);
CHECK_SYNC("hl_avgpool3D_forward failed");
}
__global__ void KeAvgPool3DBackward(const int nthreads,
const real* outGrad,
const int channels,
const int depth,
const int height,
const int width,
const int pooledD,
const int pooledH,
const int pooledW,
const int sizeZ,
const int sizeY,
const int sizeX,
const int strideD,
const int strideH,
const int strideW,
const int padD,
const int padH,
const int padW,
real scaleA,
real scaleB,
real* tgtGrad,
const int outStride) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads);
index += blockDim.x * gridDim.x) {
int offsetW = index % width + padW;
int offsetH = (index / width) % height + padH;
int offsetD = (index / width / height) % depth + padD;
int offsetC = (index / width / height / depth) % channels;
int frameNum = index / width / height / depth / channels;
int pdstart = (offsetD < sizeZ) ? 0 : (offsetD - sizeZ) / strideD + 1;
int phstart = (offsetH < sizeY) ? 0 : (offsetH - sizeY) / strideH + 1;
int pwstart = (offsetW < sizeX) ? 0 : (offsetW - sizeX) / strideW + 1;
int pdend = min(offsetD / strideD + 1, pooledD);
int phend = min(offsetH / strideH + 1, pooledH);
int pwend = min(offsetW / strideW + 1, pooledW);
real gradient = 0;
outGrad += (frameNum * channels + offsetC) * pooledD * pooledH * pooledW;
for (int pd = pdstart; pd < pdend; ++pd) {
int dstart = pd * strideD - padD;
int dend = min(dstart + sizeZ, depth);
dstart = max(dstart, 0);
for (int ph = phstart; ph < phend; ++ph) {
int hstart = ph * strideH - padH;
int hend = min(hstart + sizeY, height);
hstart = max(hstart, 0);
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int wstart = pw * strideW - padW;
int wend = min(wstart + sizeX, width);
wstart = max(wstart, 0);
int poolsize = (dend - dstart) * (hend - hstart) * (wend - wstart);
gradient += outGrad[(pd * pooledH + ph) * pooledW + pw] / poolsize;
}
}
}
tgtGrad[index] = scaleA * gradient + scaleB * tgtGrad[index];
}
}
void hl_avgpool3D_backward(const int frameCnt,
const real* outGrad,
const int channels,
const int depth,
const int height,
const int width,
const int outputD,
const int outputH,
const int outputW,
const int sizeZ,
const int sizeY,
const int sizeX,
const int strideD,
const int strideH,
const int strideW,
int paddingD,
int paddingH,
int paddingW,
real scaleA,
real scaleB,
real* backGrad,
const int outStride) {
int num_kernels = depth * height * width * channels * frameCnt;
int blocks = (num_kernels + 1024 - 1) / 1024;
KeAvgPool3DBackward<<<blocks, 1024, 0, STREAM_DEFAULT>>>(num_kernels,
outGrad,
channels,
depth,
height,
width,
outputD,
outputH,
outputW,
sizeZ,
sizeY,
sizeX,
strideD,
strideH,
strideW,
paddingD,
paddingH,
paddingW,
scaleA,
scaleB,
backGrad,
outStride);
CHECK_SYNC("hl_avgpool3D_backward failed");
}
__global__ void KeBilinearInterpFw(const real* in,
const size_t inImgH,
const size_t inImgW,
const size_t inputH,
const size_t inputW,
real* out,
const size_t outImgH,
const size_t outImgW,
const size_t outputH,
const size_t outputW,
const size_t numChannels,
const real ratioH,
const real ratioW) {
int nthreads = outputH * outputW;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < nthreads) {
int outIdH = tid / outputW;
int outIdW = tid % outputW;
int inImgSize = inputW / numChannels;
int outImgSize = outputW / numChannels;
int channelId = outIdW / outImgSize;
int outImgIdy = (outIdW % outImgSize) / outImgW;
int inImgIdy = ratioH * outImgIdy;
int hId = (inImgIdy < inImgH - 1) ? 1 : 0;
real h1lambda = ratioH * outImgIdy - inImgIdy;
real h2lambda = 1.f - h1lambda;
int outImgIdx = tid % outImgW;
int inImgIdx = ratioW * outImgIdx;
int wId = (inImgIdx < inImgW - 1) ? 1 : 0;
real w1lambda = ratioW * outImgIdx - inImgIdx;
real w2lambda = 1.f - w1lambda;
const real* inPos = &in[outIdH * inputW + channelId * inImgSize +
inImgIdy * inImgW + inImgIdx];
// bilinear interpolation
out[outIdH * outputW + outIdW] =
h2lambda * (w2lambda * inPos[0] + w1lambda * inPos[wId]) +
h1lambda * (w2lambda * inPos[hId * inImgW] +
w1lambda * inPos[hId * inImgW + wId]);
}
}
void hl_bilinear_forward(const real* inData,
const size_t inImgH,
const size_t inImgW,
const size_t inputH,
const size_t inputW,
real* outData,
const size_t outImgH,
const size_t outImgW,
const size_t outputH,
const size_t outputW,
const size_t numChannels,
const real ratioH,
const real ratioW) {
int threadNum = outputH * outputW;
int blocks = (threadNum + 1024 - 1) / 1024;
KeBilinearInterpFw<<<blocks, 1024, 0, STREAM_DEFAULT>>>(inData,
inImgH,
inImgW,
inputH,
inputW,
outData,
outImgH,
outImgW,
outputH,
outputW,
numChannels,
ratioH,
ratioW);
CHECK_SYNC("hl_bilinear_forward failed");
}
__global__ void KeBilinearInterpBw(real* in,
const size_t inImgH,
const size_t inImgW,
const size_t inputH,
const size_t inputW,
const real* out,
const size_t outImgH,
const size_t outImgW,
const size_t outputH,
const size_t outputW,
const size_t numChannels,
const real ratioH,
const real ratioW) {
int nthreads = outputH * outputW;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < nthreads) {
int outIdH = tid / outputW;
int outIdW = tid % outputW;
int inImgSize = inputW / numChannels;
int outImgSize = outputW / numChannels;
int channelId = outIdW / outImgSize;
int outImgIdy = (outIdW % outImgSize) / outImgW;
int inImgIdy = ratioH * outImgIdy;
int hId = (inImgIdy < inImgH - 1) ? 1 : 0;
real h1lambda = ratioH * outImgIdy - inImgIdy;
real h2lambda = 1.f - h1lambda;
int outImgIdx = tid % outImgW;
int inImgIdx = ratioW * outImgIdx;
int wId = (inImgIdx < inImgW - 1) ? 1 : 0;
real w1lambda = ratioW * outImgIdx - inImgIdx;
real w2lambda = 1.f - w1lambda;
real* inPos = &in[outIdH * inputW + channelId * inImgSize +
inImgIdy * inImgW + inImgIdx];
const real* outPos = &out[outIdH * outputW + outIdW];
paddle::paddleAtomicAdd(&inPos[0], h2lambda * w2lambda * outPos[0]);
paddle::paddleAtomicAdd(&inPos[wId], h2lambda * w1lambda * outPos[0]);
paddle::paddleAtomicAdd(&inPos[hId * inImgW],
h1lambda * w2lambda * outPos[0]);
paddle::paddleAtomicAdd(&inPos[hId * inImgW + wId],
h1lambda * w1lambda * outPos[0]);
}
}
void hl_bilinear_backward(real* inGrad,
const size_t inImgH,
const size_t inImgW,
const size_t inputH,
const size_t inputW,
const real* outGrad,
const size_t outImgH,
const size_t outImgW,
const size_t outputH,
const size_t outputW,
const size_t numChannels,
const real ratioH,
const real ratioW) {
int threadNum = outputH * outputW;
int blocks = (threadNum + 1024 - 1) / 1024;
KeBilinearInterpBw<<<blocks, 1024, 0, STREAM_DEFAULT>>>(inGrad,
inImgH,
inImgW,
inputH,
inputW,
outGrad,
outImgH,
outImgW,
outputH,
outputW,
numChannels,
ratioH,
ratioW);
CHECK_SYNC("hl_bilinear_backward failed");
}
__global__ void maxoutFpCompute(size_t nthreads,
const real* inData,
real* outData,
int* idData,
size_t size,
size_t featLen,
size_t groups) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
size_t batch_idx = index / size;
size_t i = index % size;
size_t channel_idx = i / featLen;
size_t feat_idx = i % featLen;
size_t data_idx =
(batch_idx * size + channel_idx * featLen) * groups + feat_idx;
real max = inData[data_idx];
int maxId = 0;
for (size_t g = 1; g < groups; ++g) {
real tmp = inData[data_idx + g * featLen];
if (tmp > max) {
max = tmp;
maxId = g;
}
}
outData[index] = max;
idData[index] = maxId;
}
}
void hl_maxout_forward(const real* inData,
real* outData,
int* idData,
size_t batchSize,
size_t size,
size_t featLen,
size_t groups) {
int num_kernels = size * batchSize;
int blocks = (num_kernels + 1024 - 1) / 1024;
maxoutFpCompute<<<blocks, 1024, 0, STREAM_DEFAULT>>>(
num_kernels, inData, outData, idData, size, featLen, groups);
CHECK_SYNC("hl_maxout_forward failed");
}
__global__ void maxoutBpCompute(size_t nthreads,
real* inGrad,
const real* outGrad,
const int* idData,
size_t size,
size_t featLen,
size_t groups) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
size_t batch_idx = index / size;
size_t i = index % size;
size_t channel_idx = i / featLen;
size_t feat_idx = i % featLen;
size_t newIndex = batch_idx * size;
size_t gradIdx =
(channel_idx * groups + (idData + newIndex)[i]) * featLen + feat_idx;
(inGrad + newIndex * groups)[gradIdx] += (outGrad + newIndex)[i];
}
}
void hl_maxout_backward(real* inGrad,
const real* outGrad,
const int* idData,
size_t batchSize,
size_t size,
size_t featLen,
size_t groups) {
int num_kernels = size * batchSize;
int blocks = (num_kernels + 1024 - 1) / 1024;
maxoutBpCompute<<<blocks, 1024, 0, STREAM_DEFAULT>>>(
num_kernels, inGrad, outGrad, idData, size, featLen, groups);
CHECK_SYNC("hl_maxout_backward failed");
}
|
ee3adfefce1a907187b5bb7f91c24b3bc2f10612.hip | // !!! This is a file automatically generated by hipify!!!
#include <optix.h>
#include <optixu/optixu_math_namespace.h>
#include "raydata.cuh"
#include "random_hip.cuh"
#include "camera.cuh"
using namespace optix;
// Optix program built-in indices
rtDeclareVariable(uint2, theLaunchIndex, rtLaunchIndex, );
rtDeclareVariable(uint2, theLaunchDim, rtLaunchDim, );
// Ray state variables
rtDeclareVariable(optix::Ray, theRay, rtCurrentRay, );
rtDeclareVariable(PerRayData, thePrd, rtPayload, );
// "Global" variables
rtDeclareVariable(rtObject, sysWorld, , );
rtBuffer<float3, 2> sysOutputBuffer;
// Ray Generation variables
rtDeclareVariable(int, numSamples, , );
rtDeclareVariable(int, maxRayDepth, , );
inline __device__ float3 removeNaNs(float3 radiance)
{
float3 r = radiance;
if(!(r.x == r.x)) r.x = 0.0f;
if(!(r.y == r.y)) r.y = 0.0f;
if(!(r.z == r.z)) r.z = 0.0f;
return r;
}
inline __device__ float3 color(optix::Ray& theRay, uint32_t& seed)
{
PerRayData thePrd;
thePrd.seed = seed;
float3 sampleRadiance = make_float3(1.0f, 1.0f, 1.0f);
for(int i = 0; i < maxRayDepth; i++)
{
rtTrace(sysWorld, theRay, thePrd);
if (thePrd.scatterEvent == Ray_Miss)
{
float3 unitDirection = normalize(theRay.direction);
float t = 0.5f * (unitDirection.y + 1.0f);
float3 missColor = (1.0f-t) * make_float3(1.0f, 1.0f, 1.0f)
+ t * make_float3(0.5f, 0.7f, 1.0f);
return sampleRadiance * missColor;
}
else if (thePrd.scatterEvent == Ray_Finish)
{
return sampleRadiance * thePrd.attenuation;
}
else if (thePrd.scatterEvent == Ray_Cancel)
{
return make_float3(1000000.0f, 0.0f, 1000000.0f);
}
else
{
// Must have hit something
sampleRadiance *= thePrd.attenuation;
theRay = optix::make_Ray(
thePrd.scatter.origin,
thePrd.scatter.direction,
0,
0.001f,
RT_DEFAULT_MAX
);
}
}
seed = thePrd.seed;
return make_float3(0.0f, 0.0f, 0.0f);
}
RT_PROGRAM void rayGenProgram()
{
float3 lowerLeftCorner = make_float3(-2.0f, -1.0f, -1.0f);
float3 horizontal = make_float3(4.0f, 0.0f, 0.0f);
float3 vertical = make_float3(0.0f, 2.0f, 0.0f);
float3 origin = make_float3(0.0f, 0.0f, 0.0f);
uint32_t seed = tea<64>(theLaunchDim.x * theLaunchIndex.y + theLaunchIndex.x, 0);
float3 radiance = make_float3(0.0f, 0.0f, 0.0f);
for (int n = 0; n < numSamples; n++)
{
float s = float(theLaunchIndex.x+randf(seed)) / float(theLaunchDim.x);
float t = float(theLaunchIndex.y+randf(seed)) / float(theLaunchDim.y);
optix::Ray theRay = generateRay(s,t);
float3 sampleRadiance = color(theRay, seed);
// Remove NaNs
sampleRadiance = removeNaNs(sampleRadiance);
radiance += sampleRadiance;
}
radiance /= numSamples;
radiance = make_float3(
sqrtf(radiance.x),
sqrtf(radiance.y),
sqrtf(radiance.z)
);
sysOutputBuffer[theLaunchIndex] = radiance;
}
| ee3adfefce1a907187b5bb7f91c24b3bc2f10612.cu | #include <optix.h>
#include <optixu/optixu_math_namespace.h>
#include "raydata.cuh"
#include "random.cuh"
#include "camera.cuh"
using namespace optix;
// Optix program built-in indices
rtDeclareVariable(uint2, theLaunchIndex, rtLaunchIndex, );
rtDeclareVariable(uint2, theLaunchDim, rtLaunchDim, );
// Ray state variables
rtDeclareVariable(optix::Ray, theRay, rtCurrentRay, );
rtDeclareVariable(PerRayData, thePrd, rtPayload, );
// "Global" variables
rtDeclareVariable(rtObject, sysWorld, , );
rtBuffer<float3, 2> sysOutputBuffer;
// Ray Generation variables
rtDeclareVariable(int, numSamples, , );
rtDeclareVariable(int, maxRayDepth, , );
inline __device__ float3 removeNaNs(float3 radiance)
{
float3 r = radiance;
if(!(r.x == r.x)) r.x = 0.0f;
if(!(r.y == r.y)) r.y = 0.0f;
if(!(r.z == r.z)) r.z = 0.0f;
return r;
}
inline __device__ float3 color(optix::Ray& theRay, uint32_t& seed)
{
PerRayData thePrd;
thePrd.seed = seed;
float3 sampleRadiance = make_float3(1.0f, 1.0f, 1.0f);
for(int i = 0; i < maxRayDepth; i++)
{
rtTrace(sysWorld, theRay, thePrd);
if (thePrd.scatterEvent == Ray_Miss)
{
float3 unitDirection = normalize(theRay.direction);
float t = 0.5f * (unitDirection.y + 1.0f);
float3 missColor = (1.0f-t) * make_float3(1.0f, 1.0f, 1.0f)
+ t * make_float3(0.5f, 0.7f, 1.0f);
return sampleRadiance * missColor;
}
else if (thePrd.scatterEvent == Ray_Finish)
{
return sampleRadiance * thePrd.attenuation;
}
else if (thePrd.scatterEvent == Ray_Cancel)
{
return make_float3(1000000.0f, 0.0f, 1000000.0f);
}
else
{
// Must have hit something
sampleRadiance *= thePrd.attenuation;
theRay = optix::make_Ray(
thePrd.scatter.origin,
thePrd.scatter.direction,
0,
0.001f,
RT_DEFAULT_MAX
);
}
}
seed = thePrd.seed;
return make_float3(0.0f, 0.0f, 0.0f);
}
RT_PROGRAM void rayGenProgram()
{
float3 lowerLeftCorner = make_float3(-2.0f, -1.0f, -1.0f);
float3 horizontal = make_float3(4.0f, 0.0f, 0.0f);
float3 vertical = make_float3(0.0f, 2.0f, 0.0f);
float3 origin = make_float3(0.0f, 0.0f, 0.0f);
uint32_t seed = tea<64>(theLaunchDim.x * theLaunchIndex.y + theLaunchIndex.x, 0);
float3 radiance = make_float3(0.0f, 0.0f, 0.0f);
for (int n = 0; n < numSamples; n++)
{
float s = float(theLaunchIndex.x+randf(seed)) / float(theLaunchDim.x);
float t = float(theLaunchIndex.y+randf(seed)) / float(theLaunchDim.y);
optix::Ray theRay = generateRay(s,t);
float3 sampleRadiance = color(theRay, seed);
// Remove NaNs
sampleRadiance = removeNaNs(sampleRadiance);
radiance += sampleRadiance;
}
radiance /= numSamples;
radiance = make_float3(
sqrtf(radiance.x),
sqrtf(radiance.y),
sqrtf(radiance.z)
);
sysOutputBuffer[theLaunchIndex] = radiance;
}
|
652822f6889d47a46e0053084db7ca43632f05be.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017 tsooBGX contributors
*/
#include "./host_device_vector.h"
#include <thrust/fill.h>
#include <tsoobgx/data.h>
#include <algorithm>
#include <cstdint>
#include <mutex>
#include "device_helpers_hip.cuh"
namespace tsoobgx {
// the handler to call instead of hipSetDevice; only used for testing
static void (*cudaSetDeviceHandler)(int) = nullptr; // NOLINT
void SetCudaSetDeviceHandler(void (*handler)(int)) {
cudaSetDeviceHandler = handler;
}
// wrapper over access with useful methods
class Permissions {
GPUAccess access_;
explicit Permissions(GPUAccess access) : access_{access} {}
public:
Permissions() : access_{GPUAccess::kNone} {}
explicit Permissions(bool perm)
: access_(perm ? GPUAccess::kWrite : GPUAccess::kNone) {}
bool CanRead() const { return access_ >= kRead; }
bool CanWrite() const { return access_ == kWrite; }
bool CanAccess(GPUAccess access) const { return access_ >= access; }
void Grant(GPUAccess access) { access_ = ::max(access_, access); }
void DenyComplementary(GPUAccess compl_access) {
access_ = ::min(access_, GPUAccess::kWrite - compl_access);
}
Permissions Complementary() const {
return Permissions(GPUAccess::kWrite - access_);
}
};
template <typename T>
struct HostDeviceVectorImpl {
struct DeviceShard {
DeviceShard()
: proper_size_{0}, device_{-1}, start_{0}, perm_d_{false},
cached_size_{static_cast<size_t>(~0)}, vec_{nullptr} {}
void Init(HostDeviceVectorImpl<T>* vec, int device) {
if (vec_ == nullptr) { vec_ = vec; }
CHECK_EQ(vec, vec_);
device_ = device;
LazyResize(vec_->Size());
perm_d_ = vec_->perm_h_.Complementary();
}
void Init(HostDeviceVectorImpl<T>* vec, const DeviceShard& other) {
if (vec_ == nullptr) { vec_ = vec; }
CHECK_EQ(vec, vec_);
device_ = other.device_;
cached_size_ = other.cached_size_;
start_ = other.start_;
proper_size_ = other.proper_size_;
SetDevice();
data_.resize(other.data_.size());
perm_d_ = other.perm_d_;
}
void ScatterFrom(const T* begin) {
// TODO(canonizer): avoid full copy of host data
LazySyncDevice(GPUAccess::kWrite);
SetDevice();
dh::safe_cuda(hipMemcpyAsync(data_.data().get(), begin + start_,
data_.size() * sizeof(T), hipMemcpyDefault));
}
void GatherTo(thrust::device_ptr<T> begin) {
LazySyncDevice(GPUAccess::kRead);
SetDevice();
dh::safe_cuda(hipMemcpyAsync(begin.get() + start_, data_.data().get(),
proper_size_ * sizeof(T), hipMemcpyDefault));
}
void Fill(T v) {
// TODO(canonizer): avoid full copy of host data
LazySyncDevice(GPUAccess::kWrite);
SetDevice();
thrust::fill(data_.begin(), data_.end(), v);
}
void Copy(DeviceShard* other) {
// TODO(canonizer): avoid full copy of host data for this (but not for other)
LazySyncDevice(GPUAccess::kWrite);
other->LazySyncDevice(GPUAccess::kRead);
SetDevice();
dh::safe_cuda(hipMemcpyAsync(data_.data().get(), other->data_.data().get(),
data_.size() * sizeof(T), hipMemcpyDefault));
}
void LazySyncHost(GPUAccess access) {
SetDevice();
dh::safe_cuda(hipMemcpy(vec_->data_h_.data() + start_,
data_.data().get(), proper_size_ * sizeof(T),
hipMemcpyDeviceToHost));
perm_d_.DenyComplementary(access);
}
void LazyResize(size_t new_size) {
if (new_size == cached_size_) { return; }
// resize is required
int ndevices = vec_->distribution_.devices_.Size();
int device_index = vec_->distribution_.devices_.Index(device_);
start_ = vec_->distribution_.ShardStart(new_size, device_index);
proper_size_ = vec_->distribution_.ShardProperSize(new_size, device_index);
// The size on this device.
size_t size_d = vec_->distribution_.ShardSize(new_size, device_index);
SetDevice();
data_.resize(size_d);
cached_size_ = new_size;
}
void LazySyncDevice(GPUAccess access) {
if (perm_d_.CanAccess(access)) { return; }
if (perm_d_.CanRead()) {
// deny read to the host
perm_d_.Grant(access);
std::lock_guard<std::mutex> lock(vec_->mutex_);
vec_->perm_h_.DenyComplementary(access);
return;
}
// data is on the host
size_t size_h = vec_->data_h_.size();
LazyResize(size_h);
SetDevice();
dh::safe_cuda(
hipMemcpy(data_.data().get(), vec_->data_h_.data() + start_,
data_.size() * sizeof(T), hipMemcpyHostToDevice));
perm_d_.Grant(access);
std::lock_guard<std::mutex> lock(vec_->mutex_);
vec_->perm_h_.DenyComplementary(access);
vec_->size_d_ = size_h;
}
void SetDevice() {
if (cudaSetDeviceHandler == nullptr) {
dh::safe_cuda(hipSetDevice(device_));
} else {
(*cudaSetDeviceHandler)(device_);
}
}
T* Raw() { return data_.data().get(); }
size_t Start() const { return start_; }
size_t DataSize() const { return data_.size(); }
Permissions& Perm() { return perm_d_; }
Permissions const& Perm() const { return perm_d_; }
private:
int device_;
thrust::device_vector<T> data_;
// cached vector size
size_t cached_size_;
size_t start_;
// size of the portion to copy back to the host
size_t proper_size_;
Permissions perm_d_;
HostDeviceVectorImpl<T>* vec_;
};
HostDeviceVectorImpl(size_t size, T v, const GPUDistribution &distribution)
: distribution_(distribution), perm_h_(distribution.IsEmpty()), size_d_(0) {
if (!distribution_.IsEmpty()) {
size_d_ = size;
InitShards();
Fill(v);
} else {
data_h_.resize(size, v);
}
}
// required, as a new std::mutex has to be created
HostDeviceVectorImpl(const HostDeviceVectorImpl<T>& other)
: data_h_(other.data_h_), perm_h_(other.perm_h_), size_d_(other.size_d_),
distribution_(other.distribution_), mutex_() {
shards_.resize(other.shards_.size());
dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) {
shard.Init(this, other.shards_.at(i));
});
}
// Initializer can be std::vector<T> or std::initializer_list<T>
template <class Initializer>
HostDeviceVectorImpl(const Initializer& init, const GPUDistribution &distribution)
: distribution_(distribution), perm_h_(distribution.IsEmpty()), size_d_(0) {
if (!distribution_.IsEmpty()) {
size_d_ = init.size();
InitShards();
Copy(init);
} else {
data_h_ = init;
}
}
void InitShards() {
int ndevices = distribution_.devices_.Size();
shards_.resize(ndevices);
dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) {
shard.Init(this, distribution_.devices_.DeviceId(i));
});
}
size_t Size() const { return perm_h_.CanRead() ? data_h_.size() : size_d_; }
GPUSet Devices() const { return distribution_.devices_; }
const GPUDistribution& Distribution() const { return distribution_; }
T* DevicePointer(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kWrite);
return shards_.at(distribution_.devices_.Index(device)).Raw();
}
const T* ConstDevicePointer(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
return shards_.at(distribution_.devices_.Index(device)).Raw();
}
common::Span<T> DeviceSpan(int device) {
GPUSet devices = distribution_.devices_;
CHECK(devices.Contains(device));
LazySyncDevice(device, GPUAccess::kWrite);
return {shards_.at(devices.Index(device)).Raw(),
static_cast<typename common::Span<T>::index_type>(DeviceSize(device))};
}
common::Span<const T> ConstDeviceSpan(int device) {
GPUSet devices = distribution_.devices_;
CHECK(devices.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
using SpanInd = typename common::Span<const T>::index_type;
return {shards_.at(devices.Index(device)).Raw(),
static_cast<SpanInd>(DeviceSize(device))};
}
size_t DeviceSize(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
return shards_.at(distribution_.devices_.Index(device)).DataSize();
}
size_t DeviceStart(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
return shards_.at(distribution_.devices_.Index(device)).Start();
}
thrust::device_ptr<T> tbegin(int device) { // NOLINT
return thrust::device_ptr<T>(DevicePointer(device));
}
thrust::device_ptr<const T> tcbegin(int device) { // NOLINT
return thrust::device_ptr<const T>(ConstDevicePointer(device));
}
thrust::device_ptr<T> tend(int device) { // NOLINT
return tbegin(device) + DeviceSize(device);
}
thrust::device_ptr<const T> tcend(int device) { // NOLINT
return tcbegin(device) + DeviceSize(device);
}
void ScatterFrom(thrust::device_ptr<const T> begin, thrust::device_ptr<const T> end) {
CHECK_EQ(end - begin, Size());
if (perm_h_.CanWrite()) {
dh::safe_cuda(hipMemcpy(data_h_.data(), begin.get(),
(end - begin) * sizeof(T),
hipMemcpyDeviceToHost));
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.ScatterFrom(begin.get());
});
}
}
void GatherTo(thrust::device_ptr<T> begin, thrust::device_ptr<T> end) {
CHECK_EQ(end - begin, Size());
if (perm_h_.CanWrite()) {
dh::safe_cuda(hipMemcpy(begin.get(), data_h_.data(),
data_h_.size() * sizeof(T),
hipMemcpyHostToDevice));
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.GatherTo(begin); });
}
}
void Fill(T v) { // NOLINT
if (perm_h_.CanWrite()) {
std::fill(data_h_.begin(), data_h_.end(), v);
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.Fill(v); });
}
}
void Copy(HostDeviceVectorImpl<T>* other) {
CHECK_EQ(Size(), other->Size());
// Data is on host.
if (perm_h_.CanWrite() && other->perm_h_.CanWrite()) {
std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin());
return;
}
// Data is on device;
if (distribution_ != other->distribution_) {
distribution_ = GPUDistribution();
Shard(other->Distribution());
size_d_ = other->size_d_;
}
dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) {
shard.Copy(&other->shards_.at(i));
});
}
void Copy(const std::vector<T>& other) {
CHECK_EQ(Size(), other.size());
if (perm_h_.CanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.ScatterFrom(other.data());
});
}
}
void Copy(std::initializer_list<T> other) {
CHECK_EQ(Size(), other.size());
if (perm_h_.CanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.ScatterFrom(other.begin());
});
}
}
std::vector<T>& HostVector() {
LazySyncHost(GPUAccess::kWrite);
return data_h_;
}
const std::vector<T>& ConstHostVector() {
LazySyncHost(GPUAccess::kRead);
return data_h_;
}
void Shard(const GPUDistribution& distribution) {
if (distribution_ == distribution) { return; }
CHECK(distribution_.IsEmpty());
distribution_ = distribution;
InitShards();
}
void Shard(GPUSet new_devices) {
if (distribution_.Devices() == new_devices) { return; }
Shard(GPUDistribution::Block(new_devices));
}
void Reshard(const GPUDistribution &distribution) {
if (distribution_ == distribution) { return; }
LazySyncHost(GPUAccess::kWrite);
distribution_ = distribution;
shards_.clear();
InitShards();
}
void Resize(size_t new_size, T v) {
if (new_size == Size()) { return; }
if (distribution_.IsFixedSize()) {
CHECK_EQ(new_size, distribution_.offsets_.back());
}
if (Size() == 0 && !distribution_.IsEmpty()) {
// fast on-device resize
perm_h_ = Permissions(false);
size_d_ = new_size;
InitShards();
Fill(v);
} else {
// resize on host
LazySyncHost(GPUAccess::kWrite);
data_h_.resize(new_size, v);
}
}
void LazySyncHost(GPUAccess access) {
if (perm_h_.CanAccess(access)) { return; }
if (perm_h_.CanRead()) {
// data is present, just need to deny access to the device
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.Perm().DenyComplementary(access);
});
perm_h_.Grant(access);
return;
}
if (data_h_.size() != size_d_) { data_h_.resize(size_d_); }
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.LazySyncHost(access);
});
perm_h_.Grant(access);
}
void LazySyncDevice(int device, GPUAccess access) {
GPUSet devices = distribution_.Devices();
CHECK(devices.Contains(device));
shards_.at(devices.Index(device)).LazySyncDevice(access);
}
bool HostCanAccess(GPUAccess access) { return perm_h_.CanAccess(access); }
bool DeviceCanAccess(int device, GPUAccess access) {
GPUSet devices = distribution_.Devices();
if (!devices.Contains(device)) { return false; }
return shards_.at(devices.Index(device)).Perm().CanAccess(access);
}
private:
std::vector<T> data_h_;
Permissions perm_h_;
// the total size of the data stored on the devices
size_t size_d_;
GPUDistribution distribution_;
// protects size_d_ and perm_h_ when updated from multiple threads
std::mutex mutex_;
std::vector<DeviceShard> shards_;
};
template <typename T>
HostDeviceVector<T>::HostDeviceVector
(size_t size, T v, const GPUDistribution &distribution) : impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(size, v, distribution);
}
template <typename T>
HostDeviceVector<T>::HostDeviceVector
(std::initializer_list<T> init, const GPUDistribution &distribution) : impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(init, distribution);
}
template <typename T>
HostDeviceVector<T>::HostDeviceVector
(const std::vector<T>& init, const GPUDistribution &distribution) : impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(init, distribution);
}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(const HostDeviceVector<T>& other)
: impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(*other.impl_);
}
template <typename T>
HostDeviceVector<T>& HostDeviceVector<T>::operator=
(const HostDeviceVector<T>& other) {
if (this == &other) { return *this; }
std::unique_ptr<HostDeviceVectorImpl<T>> newImpl(new HostDeviceVectorImpl<T>(*other.impl_));
delete impl_;
impl_ = newImpl.release();
return *this;
}
template <typename T>
HostDeviceVector<T>::~HostDeviceVector() {
delete impl_;
impl_ = nullptr;
}
template <typename T>
size_t HostDeviceVector<T>::Size() const { return impl_->Size(); }
template <typename T>
GPUSet HostDeviceVector<T>::Devices() const { return impl_->Devices(); }
template <typename T>
const GPUDistribution& HostDeviceVector<T>::Distribution() const {
return impl_->Distribution();
}
template <typename T>
T* HostDeviceVector<T>::DevicePointer(int device) {
return impl_->DevicePointer(device);
}
template <typename T>
const T* HostDeviceVector<T>::ConstDevicePointer(int device) const {
return impl_->ConstDevicePointer(device);
}
template <typename T>
common::Span<T> HostDeviceVector<T>::DeviceSpan(int device) {
return impl_->DeviceSpan(device);
}
template <typename T>
common::Span<const T> HostDeviceVector<T>::ConstDeviceSpan(int device) const {
return impl_->ConstDeviceSpan(device);
}
template <typename T>
size_t HostDeviceVector<T>::DeviceStart(int device) const {
return impl_->DeviceStart(device);
}
template <typename T>
size_t HostDeviceVector<T>::DeviceSize(int device) const {
return impl_->DeviceSize(device);
}
template <typename T>
thrust::device_ptr<T> HostDeviceVector<T>::tbegin(int device) { // NOLINT
return impl_->tbegin(device);
}
template <typename T>
thrust::device_ptr<const T> HostDeviceVector<T>::tcbegin(int device) const { // NOLINT
return impl_->tcbegin(device);
}
template <typename T>
thrust::device_ptr<T> HostDeviceVector<T>::tend(int device) { // NOLINT
return impl_->tend(device);
}
template <typename T>
thrust::device_ptr<const T> HostDeviceVector<T>::tcend(int device) const { // NOLINT
return impl_->tcend(device);
}
template <typename T>
void HostDeviceVector<T>::ScatterFrom
(thrust::device_ptr<const T> begin, thrust::device_ptr<const T> end) {
impl_->ScatterFrom(begin, end);
}
template <typename T>
void HostDeviceVector<T>::GatherTo
(thrust::device_ptr<T> begin, thrust::device_ptr<T> end) const {
impl_->GatherTo(begin, end);
}
template <typename T>
void HostDeviceVector<T>::Fill(T v) {
impl_->Fill(v);
}
template <typename T>
void HostDeviceVector<T>::Copy(const HostDeviceVector<T>& other) {
impl_->Copy(other.impl_);
}
template <typename T>
void HostDeviceVector<T>::Copy(const std::vector<T>& other) {
impl_->Copy(other);
}
template <typename T>
void HostDeviceVector<T>::Copy(std::initializer_list<T> other) {
impl_->Copy(other);
}
template <typename T>
std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); }
template <typename T>
const std::vector<T>& HostDeviceVector<T>::ConstHostVector() const {
return impl_->ConstHostVector();
}
template <typename T>
bool HostDeviceVector<T>::HostCanAccess(GPUAccess access) const {
return impl_->HostCanAccess(access);
}
template <typename T>
bool HostDeviceVector<T>::DeviceCanAccess(int device, GPUAccess access) const {
return impl_->DeviceCanAccess(device, access);
}
template <typename T>
void HostDeviceVector<T>::Shard(GPUSet new_devices) const {
impl_->Shard(new_devices);
}
template <typename T>
void HostDeviceVector<T>::Shard(const GPUDistribution &distribution) const {
impl_->Shard(distribution);
}
template <typename T>
void HostDeviceVector<T>::Reshard(const GPUDistribution &distribution) {
impl_->Reshard(distribution);
}
template <typename T>
void HostDeviceVector<T>::Resize(size_t new_size, T v) {
impl_->Resize(new_size, v);
}
// explicit instantiations are required, as HostDeviceVector isn't header-only
template class HostDeviceVector<bst_float>;
template class HostDeviceVector<GradientPair>;
template class HostDeviceVector<int>;
template class HostDeviceVector<Entry>;
template class HostDeviceVector<size_t>;
} // namespace tsoobgx
| 652822f6889d47a46e0053084db7ca43632f05be.cu | /*!
* Copyright 2017 tsooBGX contributors
*/
#include "./host_device_vector.h"
#include <thrust/fill.h>
#include <tsoobgx/data.h>
#include <algorithm>
#include <cstdint>
#include <mutex>
#include "./device_helpers.cuh"
namespace tsoobgx {
// the handler to call instead of cudaSetDevice; only used for testing
static void (*cudaSetDeviceHandler)(int) = nullptr; // NOLINT
void SetCudaSetDeviceHandler(void (*handler)(int)) {
cudaSetDeviceHandler = handler;
}
// wrapper over access with useful methods
class Permissions {
GPUAccess access_;
explicit Permissions(GPUAccess access) : access_{access} {}
public:
Permissions() : access_{GPUAccess::kNone} {}
explicit Permissions(bool perm)
: access_(perm ? GPUAccess::kWrite : GPUAccess::kNone) {}
bool CanRead() const { return access_ >= kRead; }
bool CanWrite() const { return access_ == kWrite; }
bool CanAccess(GPUAccess access) const { return access_ >= access; }
void Grant(GPUAccess access) { access_ = std::max(access_, access); }
void DenyComplementary(GPUAccess compl_access) {
access_ = std::min(access_, GPUAccess::kWrite - compl_access);
}
Permissions Complementary() const {
return Permissions(GPUAccess::kWrite - access_);
}
};
template <typename T>
struct HostDeviceVectorImpl {
struct DeviceShard {
DeviceShard()
: proper_size_{0}, device_{-1}, start_{0}, perm_d_{false},
cached_size_{static_cast<size_t>(~0)}, vec_{nullptr} {}
void Init(HostDeviceVectorImpl<T>* vec, int device) {
if (vec_ == nullptr) { vec_ = vec; }
CHECK_EQ(vec, vec_);
device_ = device;
LazyResize(vec_->Size());
perm_d_ = vec_->perm_h_.Complementary();
}
void Init(HostDeviceVectorImpl<T>* vec, const DeviceShard& other) {
if (vec_ == nullptr) { vec_ = vec; }
CHECK_EQ(vec, vec_);
device_ = other.device_;
cached_size_ = other.cached_size_;
start_ = other.start_;
proper_size_ = other.proper_size_;
SetDevice();
data_.resize(other.data_.size());
perm_d_ = other.perm_d_;
}
void ScatterFrom(const T* begin) {
// TODO(canonizer): avoid full copy of host data
LazySyncDevice(GPUAccess::kWrite);
SetDevice();
dh::safe_cuda(cudaMemcpyAsync(data_.data().get(), begin + start_,
data_.size() * sizeof(T), cudaMemcpyDefault));
}
void GatherTo(thrust::device_ptr<T> begin) {
LazySyncDevice(GPUAccess::kRead);
SetDevice();
dh::safe_cuda(cudaMemcpyAsync(begin.get() + start_, data_.data().get(),
proper_size_ * sizeof(T), cudaMemcpyDefault));
}
void Fill(T v) {
// TODO(canonizer): avoid full copy of host data
LazySyncDevice(GPUAccess::kWrite);
SetDevice();
thrust::fill(data_.begin(), data_.end(), v);
}
void Copy(DeviceShard* other) {
// TODO(canonizer): avoid full copy of host data for this (but not for other)
LazySyncDevice(GPUAccess::kWrite);
other->LazySyncDevice(GPUAccess::kRead);
SetDevice();
dh::safe_cuda(cudaMemcpyAsync(data_.data().get(), other->data_.data().get(),
data_.size() * sizeof(T), cudaMemcpyDefault));
}
void LazySyncHost(GPUAccess access) {
SetDevice();
dh::safe_cuda(cudaMemcpy(vec_->data_h_.data() + start_,
data_.data().get(), proper_size_ * sizeof(T),
cudaMemcpyDeviceToHost));
perm_d_.DenyComplementary(access);
}
void LazyResize(size_t new_size) {
if (new_size == cached_size_) { return; }
// resize is required
int ndevices = vec_->distribution_.devices_.Size();
int device_index = vec_->distribution_.devices_.Index(device_);
start_ = vec_->distribution_.ShardStart(new_size, device_index);
proper_size_ = vec_->distribution_.ShardProperSize(new_size, device_index);
// The size on this device.
size_t size_d = vec_->distribution_.ShardSize(new_size, device_index);
SetDevice();
data_.resize(size_d);
cached_size_ = new_size;
}
void LazySyncDevice(GPUAccess access) {
if (perm_d_.CanAccess(access)) { return; }
if (perm_d_.CanRead()) {
// deny read to the host
perm_d_.Grant(access);
std::lock_guard<std::mutex> lock(vec_->mutex_);
vec_->perm_h_.DenyComplementary(access);
return;
}
// data is on the host
size_t size_h = vec_->data_h_.size();
LazyResize(size_h);
SetDevice();
dh::safe_cuda(
cudaMemcpy(data_.data().get(), vec_->data_h_.data() + start_,
data_.size() * sizeof(T), cudaMemcpyHostToDevice));
perm_d_.Grant(access);
std::lock_guard<std::mutex> lock(vec_->mutex_);
vec_->perm_h_.DenyComplementary(access);
vec_->size_d_ = size_h;
}
void SetDevice() {
if (cudaSetDeviceHandler == nullptr) {
dh::safe_cuda(cudaSetDevice(device_));
} else {
(*cudaSetDeviceHandler)(device_);
}
}
T* Raw() { return data_.data().get(); }
size_t Start() const { return start_; }
size_t DataSize() const { return data_.size(); }
Permissions& Perm() { return perm_d_; }
Permissions const& Perm() const { return perm_d_; }
private:
int device_;
thrust::device_vector<T> data_;
// cached vector size
size_t cached_size_;
size_t start_;
// size of the portion to copy back to the host
size_t proper_size_;
Permissions perm_d_;
HostDeviceVectorImpl<T>* vec_;
};
HostDeviceVectorImpl(size_t size, T v, const GPUDistribution &distribution)
: distribution_(distribution), perm_h_(distribution.IsEmpty()), size_d_(0) {
if (!distribution_.IsEmpty()) {
size_d_ = size;
InitShards();
Fill(v);
} else {
data_h_.resize(size, v);
}
}
// required, as a new std::mutex has to be created
HostDeviceVectorImpl(const HostDeviceVectorImpl<T>& other)
: data_h_(other.data_h_), perm_h_(other.perm_h_), size_d_(other.size_d_),
distribution_(other.distribution_), mutex_() {
shards_.resize(other.shards_.size());
dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) {
shard.Init(this, other.shards_.at(i));
});
}
// Initializer can be std::vector<T> or std::initializer_list<T>
template <class Initializer>
HostDeviceVectorImpl(const Initializer& init, const GPUDistribution &distribution)
: distribution_(distribution), perm_h_(distribution.IsEmpty()), size_d_(0) {
if (!distribution_.IsEmpty()) {
size_d_ = init.size();
InitShards();
Copy(init);
} else {
data_h_ = init;
}
}
void InitShards() {
int ndevices = distribution_.devices_.Size();
shards_.resize(ndevices);
dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) {
shard.Init(this, distribution_.devices_.DeviceId(i));
});
}
size_t Size() const { return perm_h_.CanRead() ? data_h_.size() : size_d_; }
GPUSet Devices() const { return distribution_.devices_; }
const GPUDistribution& Distribution() const { return distribution_; }
T* DevicePointer(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kWrite);
return shards_.at(distribution_.devices_.Index(device)).Raw();
}
const T* ConstDevicePointer(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
return shards_.at(distribution_.devices_.Index(device)).Raw();
}
common::Span<T> DeviceSpan(int device) {
GPUSet devices = distribution_.devices_;
CHECK(devices.Contains(device));
LazySyncDevice(device, GPUAccess::kWrite);
return {shards_.at(devices.Index(device)).Raw(),
static_cast<typename common::Span<T>::index_type>(DeviceSize(device))};
}
common::Span<const T> ConstDeviceSpan(int device) {
GPUSet devices = distribution_.devices_;
CHECK(devices.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
using SpanInd = typename common::Span<const T>::index_type;
return {shards_.at(devices.Index(device)).Raw(),
static_cast<SpanInd>(DeviceSize(device))};
}
size_t DeviceSize(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
return shards_.at(distribution_.devices_.Index(device)).DataSize();
}
size_t DeviceStart(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
return shards_.at(distribution_.devices_.Index(device)).Start();
}
thrust::device_ptr<T> tbegin(int device) { // NOLINT
return thrust::device_ptr<T>(DevicePointer(device));
}
thrust::device_ptr<const T> tcbegin(int device) { // NOLINT
return thrust::device_ptr<const T>(ConstDevicePointer(device));
}
thrust::device_ptr<T> tend(int device) { // NOLINT
return tbegin(device) + DeviceSize(device);
}
thrust::device_ptr<const T> tcend(int device) { // NOLINT
return tcbegin(device) + DeviceSize(device);
}
void ScatterFrom(thrust::device_ptr<const T> begin, thrust::device_ptr<const T> end) {
CHECK_EQ(end - begin, Size());
if (perm_h_.CanWrite()) {
dh::safe_cuda(cudaMemcpy(data_h_.data(), begin.get(),
(end - begin) * sizeof(T),
cudaMemcpyDeviceToHost));
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.ScatterFrom(begin.get());
});
}
}
void GatherTo(thrust::device_ptr<T> begin, thrust::device_ptr<T> end) {
CHECK_EQ(end - begin, Size());
if (perm_h_.CanWrite()) {
dh::safe_cuda(cudaMemcpy(begin.get(), data_h_.data(),
data_h_.size() * sizeof(T),
cudaMemcpyHostToDevice));
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.GatherTo(begin); });
}
}
void Fill(T v) { // NOLINT
if (perm_h_.CanWrite()) {
std::fill(data_h_.begin(), data_h_.end(), v);
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.Fill(v); });
}
}
void Copy(HostDeviceVectorImpl<T>* other) {
CHECK_EQ(Size(), other->Size());
// Data is on host.
if (perm_h_.CanWrite() && other->perm_h_.CanWrite()) {
std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin());
return;
}
// Data is on device;
if (distribution_ != other->distribution_) {
distribution_ = GPUDistribution();
Shard(other->Distribution());
size_d_ = other->size_d_;
}
dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) {
shard.Copy(&other->shards_.at(i));
});
}
void Copy(const std::vector<T>& other) {
CHECK_EQ(Size(), other.size());
if (perm_h_.CanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.ScatterFrom(other.data());
});
}
}
void Copy(std::initializer_list<T> other) {
CHECK_EQ(Size(), other.size());
if (perm_h_.CanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.ScatterFrom(other.begin());
});
}
}
std::vector<T>& HostVector() {
LazySyncHost(GPUAccess::kWrite);
return data_h_;
}
const std::vector<T>& ConstHostVector() {
LazySyncHost(GPUAccess::kRead);
return data_h_;
}
void Shard(const GPUDistribution& distribution) {
if (distribution_ == distribution) { return; }
CHECK(distribution_.IsEmpty());
distribution_ = distribution;
InitShards();
}
void Shard(GPUSet new_devices) {
if (distribution_.Devices() == new_devices) { return; }
Shard(GPUDistribution::Block(new_devices));
}
void Reshard(const GPUDistribution &distribution) {
if (distribution_ == distribution) { return; }
LazySyncHost(GPUAccess::kWrite);
distribution_ = distribution;
shards_.clear();
InitShards();
}
void Resize(size_t new_size, T v) {
if (new_size == Size()) { return; }
if (distribution_.IsFixedSize()) {
CHECK_EQ(new_size, distribution_.offsets_.back());
}
if (Size() == 0 && !distribution_.IsEmpty()) {
// fast on-device resize
perm_h_ = Permissions(false);
size_d_ = new_size;
InitShards();
Fill(v);
} else {
// resize on host
LazySyncHost(GPUAccess::kWrite);
data_h_.resize(new_size, v);
}
}
void LazySyncHost(GPUAccess access) {
if (perm_h_.CanAccess(access)) { return; }
if (perm_h_.CanRead()) {
// data is present, just need to deny access to the device
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.Perm().DenyComplementary(access);
});
perm_h_.Grant(access);
return;
}
if (data_h_.size() != size_d_) { data_h_.resize(size_d_); }
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.LazySyncHost(access);
});
perm_h_.Grant(access);
}
void LazySyncDevice(int device, GPUAccess access) {
GPUSet devices = distribution_.Devices();
CHECK(devices.Contains(device));
shards_.at(devices.Index(device)).LazySyncDevice(access);
}
bool HostCanAccess(GPUAccess access) { return perm_h_.CanAccess(access); }
bool DeviceCanAccess(int device, GPUAccess access) {
GPUSet devices = distribution_.Devices();
if (!devices.Contains(device)) { return false; }
return shards_.at(devices.Index(device)).Perm().CanAccess(access);
}
private:
std::vector<T> data_h_;
Permissions perm_h_;
// the total size of the data stored on the devices
size_t size_d_;
GPUDistribution distribution_;
// protects size_d_ and perm_h_ when updated from multiple threads
std::mutex mutex_;
std::vector<DeviceShard> shards_;
};
template <typename T>
HostDeviceVector<T>::HostDeviceVector
(size_t size, T v, const GPUDistribution &distribution) : impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(size, v, distribution);
}
template <typename T>
HostDeviceVector<T>::HostDeviceVector
(std::initializer_list<T> init, const GPUDistribution &distribution) : impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(init, distribution);
}
template <typename T>
HostDeviceVector<T>::HostDeviceVector
(const std::vector<T>& init, const GPUDistribution &distribution) : impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(init, distribution);
}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(const HostDeviceVector<T>& other)
: impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(*other.impl_);
}
template <typename T>
HostDeviceVector<T>& HostDeviceVector<T>::operator=
(const HostDeviceVector<T>& other) {
if (this == &other) { return *this; }
std::unique_ptr<HostDeviceVectorImpl<T>> newImpl(new HostDeviceVectorImpl<T>(*other.impl_));
delete impl_;
impl_ = newImpl.release();
return *this;
}
template <typename T>
HostDeviceVector<T>::~HostDeviceVector() {
delete impl_;
impl_ = nullptr;
}
template <typename T>
size_t HostDeviceVector<T>::Size() const { return impl_->Size(); }
template <typename T>
GPUSet HostDeviceVector<T>::Devices() const { return impl_->Devices(); }
template <typename T>
const GPUDistribution& HostDeviceVector<T>::Distribution() const {
return impl_->Distribution();
}
template <typename T>
T* HostDeviceVector<T>::DevicePointer(int device) {
return impl_->DevicePointer(device);
}
template <typename T>
const T* HostDeviceVector<T>::ConstDevicePointer(int device) const {
return impl_->ConstDevicePointer(device);
}
template <typename T>
common::Span<T> HostDeviceVector<T>::DeviceSpan(int device) {
return impl_->DeviceSpan(device);
}
template <typename T>
common::Span<const T> HostDeviceVector<T>::ConstDeviceSpan(int device) const {
return impl_->ConstDeviceSpan(device);
}
template <typename T>
size_t HostDeviceVector<T>::DeviceStart(int device) const {
return impl_->DeviceStart(device);
}
template <typename T>
size_t HostDeviceVector<T>::DeviceSize(int device) const {
return impl_->DeviceSize(device);
}
template <typename T>
thrust::device_ptr<T> HostDeviceVector<T>::tbegin(int device) { // NOLINT
return impl_->tbegin(device);
}
template <typename T>
thrust::device_ptr<const T> HostDeviceVector<T>::tcbegin(int device) const { // NOLINT
return impl_->tcbegin(device);
}
template <typename T>
thrust::device_ptr<T> HostDeviceVector<T>::tend(int device) { // NOLINT
return impl_->tend(device);
}
template <typename T>
thrust::device_ptr<const T> HostDeviceVector<T>::tcend(int device) const { // NOLINT
return impl_->tcend(device);
}
template <typename T>
void HostDeviceVector<T>::ScatterFrom
(thrust::device_ptr<const T> begin, thrust::device_ptr<const T> end) {
impl_->ScatterFrom(begin, end);
}
template <typename T>
void HostDeviceVector<T>::GatherTo
(thrust::device_ptr<T> begin, thrust::device_ptr<T> end) const {
impl_->GatherTo(begin, end);
}
template <typename T>
void HostDeviceVector<T>::Fill(T v) {
impl_->Fill(v);
}
template <typename T>
void HostDeviceVector<T>::Copy(const HostDeviceVector<T>& other) {
impl_->Copy(other.impl_);
}
template <typename T>
void HostDeviceVector<T>::Copy(const std::vector<T>& other) {
impl_->Copy(other);
}
template <typename T>
void HostDeviceVector<T>::Copy(std::initializer_list<T> other) {
impl_->Copy(other);
}
template <typename T>
std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); }
template <typename T>
const std::vector<T>& HostDeviceVector<T>::ConstHostVector() const {
return impl_->ConstHostVector();
}
template <typename T>
bool HostDeviceVector<T>::HostCanAccess(GPUAccess access) const {
return impl_->HostCanAccess(access);
}
template <typename T>
bool HostDeviceVector<T>::DeviceCanAccess(int device, GPUAccess access) const {
return impl_->DeviceCanAccess(device, access);
}
template <typename T>
void HostDeviceVector<T>::Shard(GPUSet new_devices) const {
impl_->Shard(new_devices);
}
template <typename T>
void HostDeviceVector<T>::Shard(const GPUDistribution &distribution) const {
impl_->Shard(distribution);
}
template <typename T>
void HostDeviceVector<T>::Reshard(const GPUDistribution &distribution) {
impl_->Reshard(distribution);
}
template <typename T>
void HostDeviceVector<T>::Resize(size_t new_size, T v) {
impl_->Resize(new_size, v);
}
// explicit instantiations are required, as HostDeviceVector isn't header-only
template class HostDeviceVector<bst_float>;
template class HostDeviceVector<GradientPair>;
template class HostDeviceVector<int>;
template class HostDeviceVector<Entry>;
template class HostDeviceVector<size_t>;
} // namespace tsoobgx
|
af2fcb2f4b4681b12d2ced5eac9b4decdbdaa10e.hip | // !!! This is a file automatically generated by hipify!!!
// TODO: After the run is finished the executable terminates with the following error. // TODO
// terminate called after throwing an instance of 'thrust::system::system_error'
// what(): unload of CUDA runtime failed
// Aborted (core dumped)
// Everything in this file is used only if compiled with USEDUDA defined
#ifdef USECUDA
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include "rocblas.h"
#include "cusparse_v2.h"
#include <cusp/coo_matrix.h>
#include <cusp/csr_matrix.h>
#include <cusp/print.h>
#include <cusp/monitor.h>
#include <cusp/krylov/cg.h>
#include <cusp/precond/diagonal.h>
#include <cusp/precond/ainv.h>
#include <cusp/precond/aggregation/smoothed_aggregation.h>
#include <cusp/transpose.h>
#include <cusp/multiply.h>
#include <cusp/elementwise.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
int NBLOCKS = 128;
int NTHREADS = 1024;
#define NGP_SIZE (8)
#define NENv_SIZE (27)
#define DIM_SIZE (3)
using namespace std;
using namespace thrust;
extern int NN, NNp, sparseM_NNZ, sparseG_NNZ, *sparseMrowStarts, *sparseGrowStarts, *sparseMrow, *sparseMcol, BCnVelNodes, zeroPressureNode, timeN, monPoint;
extern int ** BCvelNodes;
extern double dt, timeT, t_ini, tolerance, *sparseAvalue, *sparseKvalue, *UnpHalf_prev, *Pn, *R1, *R11, *R12, *R13, *R2, *Un, *UnpHalf, *KtimesAcc_prev, *Acc_prev, *Acc, *MdInv, *MdOrigInv, *Unp1, *Pnp1, *Pdot;
extern double convergenceCriteria, maxAcc;
extern double wallClockTimeCurrentTimeStep;
extern char dummyUserInput;
extern string whichProblem;
extern double *K_d, *A_d, *G1_d, *G2_d, *G3_d;
extern double *MdInv_d, *Un_d, *Pn_d, *Pnp1_d, *Pnp1_prev_d;
extern double *MdOrigInv_d;
extern double *R1_d, *R2_d, *R3_d;
extern double *UnpHalf_d, *Pdot_d, *Acc_d, *Unp1_d, *Unp1_prev_d;
extern double *UnpHalf_prev_d;
extern double *Acc_prev_d;
extern double *KtimesAcc_prev_d;
extern int *Mcol_d, *Mrow_d, *MrowStarts_d, *Gcol_d, *Grow_d, *GrowStarts_d;
extern int *BCvelNodes_d;
extern hipsparseHandle_t handle;
extern hipsparseMatDescr_t descr;
extern hipblasHandle_t handleCUBLAS;
extern hipError_t cudaStatus;
extern cusparseSolveAnalysisInfo_t analysisInfo1, analysisInfo2;
extern int *sparseGrow, *sparseGcol;
extern double *sparseG1value, *sparseG2value, *sparseG3value;
extern size_t freeGPUmemory, totalGPUmemory; // To measure total and free GPU memory
cusp::csr_matrix<int, double, cusp::device_memory> Z_CUSP_CSR_d;
extern bool PRINT_TIMES;
extern double getHighResolutionTime(int, double);
extern void createTecplot();
extern int NE, NGP, NENv;
extern int *NmeshColors, *meshColors, *elementsOfColor;
extern int nActiveColors;
extern int *LtoGvel_1d;
extern int *sparseMapM_1d;
extern double *Sv_1d;
extern double *gDSv_1d, *GQfactor_1d;
extern int *NmeshColors_d, *meshColors_d, *elementsOfColor_d;
extern int *LtoGvel_1d_d;
extern int *sparseMapM_1d_d;
extern double *Sv_1d_d;
extern double *gDSv_1d_d, *GQfactor_1d_d;
struct weighted_absolute_difference // used at steady state convergence check
{
double oneOverdt;
weighted_absolute_difference(const double oneOverdt)
: oneOverdt(oneOverdt)
{}
__host__ __device__
double operator()(thrust::tuple<double,double> t)
{
double thrust_Unp1_d = thrust::get<0>(t);
double thrust_Un_d = thrust::get<1>(t);
double accDummy;
accDummy = thrust_Unp1_d - thrust_Un_d;
return fabs(accDummy);
}
};
// double *UnGPU;
//========================================================================
void selectCUDAdevice()
//========================================================================
{
// Print information about available CUDA devices set the CUDA device to be used
hipDeviceProp_t prop;
int nDevices;
cout << "Available CUDA devices are" << endl;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipGetDeviceProperties(&prop, i);
printf(" %d: %s\n", i, prop.name);
}
if (nDevices == 1) { // TODO : This will not work on every machine.
hipSetDevice(0);
cout << "\nDevice " << 0 << " is selected.\n";
} else {
hipSetDevice(nDevices - 1);
//hipSetDevice(0);
cout << "\nDevice " << nDevices - 1 << " is selected.\n";
}
hipMemGetInfo(&freeGPUmemory, &totalGPUmemory);
cout << " Total GPU memory = " << totalGPUmemory << endl;
cout << " Free GPU memory = " << freeGPUmemory << endl << endl;
} // End of function selectCUDAdevice()
//========================================================================
void initializeAndAllocateGPU()
//========================================================================
{
// Do the necessary memory allocations for the GPU. Apply the initial
// condition or read the restart file.
handle = 0;
descr = 0;
// Initialize cusparse library
hipsparseCreate(&handle);
hipblasCreate(&handleCUBLAS);
// Create and setup matrix descriptor
hipsparseCreateMatDescr(&descr);
hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ZERO);
int NNZM = sparseM_NNZ / 3;
int NNZG = sparseG_NNZ / 3;
cudaStatus = hipMalloc((void**)&K_d, NNZM * sizeof(double)); if(cudaStatus != hipSuccess) { printf("Error01: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
//cudaStatus = hipMalloc((void**)&A_d, NNZM * sizeof(double)); if(cudaStatus != hipSuccess) { printf("Error02: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&G1_d, NNZG * sizeof(double)); if(cudaStatus != hipSuccess) { printf("Error03: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&G2_d, NNZG * sizeof(double)); if(cudaStatus != hipSuccess) { printf("Error04: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&G3_d, NNZG * sizeof(double)); if(cudaStatus != hipSuccess) { printf("Error05: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&UnpHalf_prev_d, 3*NN * sizeof(double)); if(cudaStatus != hipSuccess) { printf("Error06: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&KtimesAcc_prev_d, 3*NN * sizeof(double)); if(cudaStatus != hipSuccess) { printf("Error07: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&Acc_d, 3*NN * sizeof(double)); if(cudaStatus != hipSuccess) { printf("Error08: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&Acc_prev_d, 3*NN * sizeof(double)); if(cudaStatus != hipSuccess) { printf("Error09: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&Pn_d, NNp * sizeof(double)); if(cudaStatus != hipSuccess) { printf("Error10: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&Pnp1_d, NNp * sizeof(double)); if(cudaStatus != hipSuccess) { printf("Error11: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&Pnp1_prev_d, NNp * sizeof(double)); if(cudaStatus != hipSuccess) { printf("Error12: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&Pdot_d, NNp * sizeof(double)); if(cudaStatus != hipSuccess) { printf("Error13: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&Mrow_d, NNZM * sizeof(int)); if(cudaStatus != hipSuccess) { printf("Error14: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&Mcol_d, NNZM * sizeof(int)); if(cudaStatus != hipSuccess) { printf("Error15: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&MrowStarts_d, (NN+1) * sizeof(int)); if(cudaStatus != hipSuccess) { printf("Error16: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&Grow_d, NNZG * sizeof(int)); if(cudaStatus != hipSuccess) { printf("Error17: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&Gcol_d, NNZG * sizeof(int)); if(cudaStatus != hipSuccess) { printf("Error18: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&GrowStarts_d, (NN+1) * sizeof(int)); if(cudaStatus != hipSuccess) { printf("Error19: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&BCvelNodes_d, BCnVelNodes * sizeof(int)); if(cudaStatus != hipSuccess) { printf("Error20: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&MdOrigInv_d, 3*NN * sizeof(double)); if(cudaStatus != hipSuccess) { printf("Error21: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&MdInv_d, 3*NN * sizeof(double)); if(cudaStatus != hipSuccess) { printf("Error22: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&Un_d, 3*NN * sizeof(double)); if(cudaStatus != hipSuccess) { printf("Error23: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&Unp1_d, 3*NN * sizeof(double)); if(cudaStatus != hipSuccess) { printf("Error24: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&Unp1_prev_d, 3*NN * sizeof(double)); if(cudaStatus != hipSuccess) { printf("Error25: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&UnpHalf_d, 3*NN * sizeof(double)); if(cudaStatus != hipSuccess) { printf("Error26: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&R1_d, 3*NN * sizeof(double)); if(cudaStatus != hipSuccess) { printf("Error27: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&R2_d, NNp * sizeof(double)); if(cudaStatus != hipSuccess) { printf("Error28: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&R3_d, 3*NN * sizeof(double)); if(cudaStatus != hipSuccess) { printf("Error29: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMemcpy(MrowStarts_d, sparseMrowStarts, (NN+1) * sizeof(int), hipMemcpyHostToDevice); if(cudaStatus != hipSuccess) { printf("Error30: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMemcpy(K_d, sparseKvalue, NNZM * sizeof(double), hipMemcpyHostToDevice); if(cudaStatus != hipSuccess) { printf("Error31: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMemcpy(Mrow_d, sparseMrow, NNZM * sizeof(int), hipMemcpyHostToDevice); if(cudaStatus != hipSuccess) { printf("Error32: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMemcpy(Mcol_d, sparseMcol, NNZM * sizeof(int), hipMemcpyHostToDevice); if(cudaStatus != hipSuccess) { printf("Error33: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMemcpy(G1_d, sparseG1value, NNZG * sizeof(double), hipMemcpyHostToDevice); if(cudaStatus != hipSuccess) { printf("Error34: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMemcpy(G2_d, sparseG2value, NNZG * sizeof(double), hipMemcpyHostToDevice); if(cudaStatus != hipSuccess) { printf("Error35: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMemcpy(G3_d, sparseG3value, NNZG * sizeof(double), hipMemcpyHostToDevice); if(cudaStatus != hipSuccess) { printf("Error36: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMemcpy(Grow_d, sparseGrow, NNZG * sizeof(int), hipMemcpyHostToDevice); if(cudaStatus != hipSuccess) { printf("Error37: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMemcpy(Gcol_d, sparseGcol, NNZG * sizeof(int), hipMemcpyHostToDevice); if(cudaStatus != hipSuccess) { printf("Error38: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMemcpy(GrowStarts_d, sparseGrowStarts, (NN+1) * sizeof(int), hipMemcpyHostToDevice); if(cudaStatus != hipSuccess) { printf("Error39: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMemcpy(MdInv_d, MdInv, 3*NN * sizeof(double), hipMemcpyHostToDevice); if(cudaStatus != hipSuccess) { printf("Error40: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMemcpy(MdOrigInv_d, MdOrigInv, 3*NN * sizeof(double), hipMemcpyHostToDevice); if(cudaStatus != hipSuccess) { printf("Error41: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
int LARGE = 30;
cudaStatus = hipMalloc((void**)&NmeshColors_d, LARGE * sizeof(int)); if(cudaStatus != hipSuccess) { printf("Error42: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
//cudaStatus = hipMalloc((void**)&meshColors_d, NE * sizeof(int)); if(cudaStatus != hipSuccess) { printf("Error43: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&elementsOfColor_d, NE * sizeof(int)); if(cudaStatus != hipSuccess) { printf("Error43: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
//cudaStatus = hipMalloc((void**)&sparseMapM_1d_d, NE*NENv*NENv * sizeof(int)); if(cudaStatus != hipSuccess) { printf("Error44: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&LtoGvel_1d_d, NE*NENv*3 * sizeof(int)); if(cudaStatus != hipSuccess) { printf("Error45: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&Sv_1d_d, NGP*NENv * sizeof(double)); if(cudaStatus != hipSuccess) { printf("Error46: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&gDSv_1d_d, NE*NGP*NENv*3 * sizeof(double)); if(cudaStatus != hipSuccess) { printf("Error47: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMalloc((void**)&GQfactor_1d_d, NE*NGP * sizeof(double)); if(cudaStatus != hipSuccess) { printf("Error48: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMemcpy(NmeshColors_d, NmeshColors, LARGE * sizeof(int), hipMemcpyHostToDevice); if(cudaStatus != hipSuccess) { printf("Error49: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
//cudaStatus = hipMemcpy(meshColors_d, meshColors, NE * sizeof(int), hipMemcpyHostToDevice); if(cudaStatus != hipSuccess) { printf("Error50: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMemcpy(elementsOfColor_d, elementsOfColor, NE * sizeof(int), hipMemcpyHostToDevice); if(cudaStatus != hipSuccess) { printf("Error50: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
//cudaStatus = hipMemcpy(sparseMapM_1d_d, sparseMapM_1d, NE*NENv*NENv * sizeof(int), hipMemcpyHostToDevice); if(cudaStatus != hipSuccess) { printf("Error51: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMemcpy(LtoGvel_1d_d, LtoGvel_1d, NE*NENv*3 * sizeof(int), hipMemcpyHostToDevice); if(cudaStatus != hipSuccess) { printf("Error52: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMemcpy(Sv_1d_d, Sv_1d, NGP*NENv * sizeof(double), hipMemcpyHostToDevice); if(cudaStatus != hipSuccess) { printf("Error53: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMemcpy(gDSv_1d_d, gDSv_1d, NE*NGP*NENv*3 * sizeof(double), hipMemcpyHostToDevice); if(cudaStatus != hipSuccess) { printf("Error54: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMemcpy(GQfactor_1d_d, GQfactor_1d, NE*NGP * sizeof(double), hipMemcpyHostToDevice); if(cudaStatus != hipSuccess) { printf("Error55: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
// Extract the 1st column of BCvelNodes and send it to the device.
int *dummy;
dummy = new int[BCnVelNodes];
for(int i = 0; i < BCnVelNodes; i++) {
dummy[i] = BCvelNodes[i][0];
}
cudaStatus = hipMemcpy(BCvelNodes_d, dummy, BCnVelNodes * sizeof(int), hipMemcpyHostToDevice); if(cudaStatus != hipSuccess) { printf("Error42: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
delete[] dummy;
// Send Un to the GPU
cudaStatus = hipMemcpy(Un_d, Un, 3*NN * sizeof(double), hipMemcpyHostToDevice); if(cudaStatus != hipSuccess) { printf("Error43: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMemcpy(Pn_d, Pn, NNp * sizeof(double), hipMemcpyHostToDevice); if(cudaStatus != hipSuccess) { printf("Error44: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
// Initialize Pdot
hipMemset(Pdot_d, 0, NNp*sizeof(double));
hipMemGetInfo(&freeGPUmemory, &totalGPUmemory);
cout << endl;
cout << "After initializeAndAllocateGPU() function, free GPU memory = " << freeGPUmemory << endl;
} // End of function initializeAndAllocateGPU()
//========================================================================
void calculateZ_CUSP()
//========================================================================
{
// Uses CUSP to calculates [Z] = Gt * MdInvOrig * G.
// TODO: Try to minimize host-device memory transfers
// TODO: Use "views" property of CUSP to minimize device memory usage.
int NNZG = sparseG_NNZ/3;
// Define G1, G2, G3 as COO matrices on the HOST
cusp::coo_matrix<int, double, cusp::host_memory> G1_CUSP(NN, NNp, NNZG);
cusp::coo_matrix<int, double, cusp::host_memory> G2_CUSP(NN, NNp, NNZG);
cusp::coo_matrix<int, double, cusp::host_memory> G3_CUSP(NN, NNp, NNZG);
// Copy COO vectors of G1, G2 and G3 to CUSP matrices
thrust::copy(sparseGrow, sparseGrow + NNZG, G1_CUSP.row_indices.begin());
thrust::copy(sparseGcol, sparseGcol + NNZG, G1_CUSP.column_indices.begin());
thrust::copy(sparseG1value, sparseG1value + NNZG, G1_CUSP.values.begin());
thrust::copy(sparseGrow, sparseGrow + NNZG, G2_CUSP.row_indices.begin());
thrust::copy(sparseGcol, sparseGcol + NNZG, G2_CUSP.column_indices.begin());
thrust::copy(sparseG2value, sparseG2value + NNZG, G2_CUSP.values.begin());
thrust::copy(sparseGrow, sparseGrow + NNZG, G3_CUSP.row_indices.begin());
thrust::copy(sparseGcol, sparseGcol + NNZG, G3_CUSP.column_indices.begin());
thrust::copy(sparseG3value, sparseG3value + NNZG, G3_CUSP.values.begin());
// Define traspose of G matrices on the HOST.
cusp::coo_matrix<int, double, cusp::host_memory> G1t_CUSP(NNp, NN, NNZG);
cusp::coo_matrix<int, double, cusp::host_memory> G2t_CUSP(NNp, NN, NNZG);
cusp::coo_matrix<int, double, cusp::host_memory> G3t_CUSP(NNp, NN, NNZG);
cusp::transpose(G1_CUSP, G1t_CUSP);
cusp::transpose(G2_CUSP, G2t_CUSP);
cusp::transpose(G3_CUSP, G3t_CUSP);
// Multiply G1, G2 and G3 with MdOrigInv diagonal matrix.
double *G1mod, *G2mod, *G3mod; // These are values of G matrices multiplied with the diagonal MdOrigInv matrix.
G1mod = new double[NNZG];
G2mod = new double[NNZG];
G3mod = new double[NNZG];
for (int i = 0; i < NNZG; i++) {
G1mod[i] = sparseG1value[i] * MdOrigInv[sparseGrow[i]];
G2mod[i] = sparseG2value[i] * MdOrigInv[sparseGrow[i]];
G3mod[i] = sparseG3value[i] * MdOrigInv[sparseGrow[i]];
}
// Copy these modified G values to device
thrust::copy(G1mod, G1mod + NNZG, G1_CUSP.values.begin());
thrust::copy(G2mod, G2mod + NNZG, G2_CUSP.values.begin());
thrust::copy(G3mod, G3mod + NNZG, G3_CUSP.values.begin());
// Multiply Gt * Gmod matrices one by one. First store the results to a dummy matrix
// and them add them to Z_CUSP_COO
cusp::coo_matrix<int, double, cusp::host_memory> dummy;
cusp::coo_matrix<int, double, cusp::host_memory> Z_CUSP_COO;
cusp::multiply(G1t_CUSP, G1_CUSP, dummy);
Z_CUSP_COO = dummy;
cusp::multiply(G2t_CUSP, G2_CUSP, dummy);
cusp::add(Z_CUSP_COO, dummy, Z_CUSP_COO);
cusp::multiply(G3t_CUSP, G3_CUSP, dummy);
cusp::add(Z_CUSP_COO, dummy, Z_CUSP_COO);
// Convert Z_CUSP_COO into CSR format
cusp::csr_matrix<int, double, cusp::host_memory> Z_CUSP_CSR;
Z_CUSP_CSR = Z_CUSP_COO;
// Modify Z_CUSP_CSR for known pressures.
int LARGE = 1000; // TODO: How important is this LARGE value in solution accuracy and convergence rate of CG?
if (zeroPressureNode > 0) { // If node is negative it means we do not set pressure to zero at any node.
// Multiply Z[node][node] by LARGE
for (int j = Z_CUSP_CSR.row_offsets[zeroPressureNode]; j < Z_CUSP_CSR.row_offsets[zeroPressureNode + 1]; j++) { // Go through row "zerpPressureNode" of [Z].
if (Z_CUSP_CSR.column_indices[j] == zeroPressureNode) { // Determine the position of the diagonal entry in column "zeroPressureNode"
Z_CUSP_CSR.values[j] = Z_CUSP_CSR.values[j] * LARGE;
break;
}
}
}
Z_CUSP_CSR_d = Z_CUSP_CSR;
// CONTROL
//cusp::print(Z_CUSP_CSR);
cout << endl << " NNZ of Z_CUSP_CSR = " << Z_CUSP_CSR.num_entries << endl;
hipMemGetInfo(&freeGPUmemory, &totalGPUmemory);
cout << endl;
cout << "At the end of calculateZ_CUSP() function, free GPU memory = " << freeGPUmemory << endl;
/*
// Write Z_CUSP_CSR matrix to a file for further use by the MKL_CG solver in a different run.
ZcsrFile = fopen((whichProblem + ".zCSR").c_str(), "wb");
int *rowOffsets, *colIndices;
double *values;
rowOffsets = new int[NNp+1];
colIndices = new int[Z_CUSP_CSR.num_entries];
values = new double[Z_CUSP_CSR.num_entries];
for (int i = 0; i < NNp + 1; i++) {
rowOffsets[i] = Z_CUSP_CSR.row_offsets[i];
}
for (int i = 0; i < Z_CUSP_CSR.num_entries; i++) {
colIndices[i] = Z_CUSP_CSR.column_indices[i];
values[i] = Z_CUSP_CSR.values[i];
}
fwrite(&Z_CUSP_CSR.num_entries, sizeof(int), size_t(1), ZcsrFile);
fwrite(rowOffsets, sizeof(int), size_t(NNp+1), ZcsrFile);
fwrite(colIndices, sizeof(int), size_t(Z_CUSP_CSR.num_entries), ZcsrFile);
fwrite(values, sizeof(double), size_t(Z_CUSP_CSR.num_entries), ZcsrFile);
fclose(ZcsrFile);
delete[] rowOffsets;
delete[] colIndices;
delete[] values;
*/
} // End of function calculateZ_CUSP()
//========================================================================
void CUSP_CG_solver()
//========================================================================
{
// Solve the system of step 2 [Z]{Pdot}={R2} using CG.
// Allocate right hand side vector RHS_d and solution vector soln_d in device memory.
thrust::device_ptr<double> wrapped_R2_d(R2_d);
thrust::device_ptr<double> wrapped_Pdot_d(Pdot_d);
typedef typename cusp::array1d_view< thrust::device_ptr<double> > DeviceValueArrayView;
DeviceValueArrayView RHS_d (wrapped_R2_d, wrapped_R2_d + NNp);
DeviceValueArrayView soln_d (wrapped_Pdot_d, wrapped_Pdot_d + NNp);
// Set monitor
cusp::default_monitor<double> monitor(RHS_d, 1000, 1e-6);
//cusp::verbose_monitor<double> monitor(RHS_d, 1000, 1e-12);
// Set preconditioner
cusp::precond::diagonal<double, cusp::device_memory> M(Z_CUSP_CSR_d);
//cusp::identity_operator<double, cusp::device_memory> M(NNp, NNp);
//cusp::precond::scaled_bridson_ainv<double, cusp::device_memory> M(Z_CUSP_CSR_d, .1);
//cusp::precond::aggregation::smoothed_aggregation<int, double, cusp::device_memory> M(Z_CUSP_CSR_d);
cusp::krylov::cg(Z_CUSP_CSR_d, soln_d, RHS_d, monitor, M);
if (PRINT_TIMES) cout << "CUSP CG solver made " << monitor.iteration_count() << " iterations. Residual norm is " << monitor.residual_norm() << endl;
// CONTROL
//for(int i = 0; i < NNp; i++) {
// cout << Pdot[i] << endl;
//}
} // End of function CUSP_CG_solver()
//========================================================================
__global__ void getMonitorData(int monPoint, int NN, double *Un_d, double *Pn_d, double *monitorData_d)
//========================================================================
{
monitorData_d[0] = Un_d[monPoint];
monitorData_d[1] = Un_d[monPoint + NN];
monitorData_d[2] = Un_d[monPoint + 2*NN];
monitorData_d[3] = Pn_d[monPoint];
}
//========================================================================
__global__ void applyVelBC(int Nbc, int N, int *velBCdata, double *A)
//========================================================================
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// Change R1 for known velocity BCs
int node;
while (tid < Nbc) {
node = velBCdata[tid]; // Node at which this velocity BC is specified.
// Change R1 for the given u and v velocities.
A[node] = 0.0; // This is not velocity, but velocity difference between 2 iterations.
A[node + N] = 0.0; // This is not velocity, but velocity difference between 2 iterations.
A[node + 2*N] = 0.0; // This is not velocity, but velocity difference between 2 iterations.
tid += blockDim.x * gridDim.x;
}
}
//========================================================================
__global__ void applyPresBC(int zeroPressureNode, double *R2_d)
//========================================================================
{
// Change R2 for known zero presssures at the outlets
if (zeroPressureNode > 0) { // If node is negative it means we do not set pressure to zero at any node.
R2_d[zeroPressureNode] = 0.0; // This is not the RHS for pressure, but pressure difference between 2 iterations.
}
}
//========================================================================
__global__ void calculate_UnpHalf(int N, double dt, double *UnpHalf_d, double *Un_d, double *R1_d, double *MdInv_d)
//========================================================================
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
UnpHalf_d[tid] = Un_d[tid] + dt * R1_d[tid] * MdInv_d[tid];
tid += blockDim.x * gridDim.x;
}
}
//========================================================================
__global__ void calculate_step2dummyV1(int N, double d, double *A, double *B)
//========================================================================
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
A[tid] = d * B[tid];
tid += blockDim.x * gridDim.x;
}
}
//========================================================================
__global__ void calculate_step2dummyV2(int N, double d, double *A, double *B, double *C, double *D)
//========================================================================
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
A[tid] = d * B[tid] - C[tid] * D[tid];
tid += blockDim.x * gridDim.x;
}
}
//========================================================================
__global__ void subtractVectors(int N, double *A, double *B, double *C)
//========================================================================
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
A[tid] = B[tid] - C[tid];
tid += blockDim.x * gridDim.x;
}
}
//========================================================================
__global__ void permuteVector(int N, int *p, double *b, double *x)
//========================================================================
{
// Equates b to x, but using the permutation vector b, i.e. x(p(k)) = b(k)
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
x[p[tid]] = b[tid];
tid += blockDim.x * gridDim.x;
}
}
//========================================================================
__global__ void permuteVectorBack(int N, int *p, double *b, double *x)
//========================================================================
{
// Equates b to x, but using the permutation vector b, i.e. x(k) = b(p(k))
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
x[tid] = b[p[tid]];
tid += blockDim.x * gridDim.x;
}
}
//========================================================================
__global__ void calculate_Pnp1(int N, double dt, double *Pnp1_d, double *Pn_d, double *Pdot_d)
//========================================================================
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
Pnp1_d[tid] = Pn_d[tid] + dt * Pdot_d[tid];
tid += blockDim.x * gridDim.x;
}
}
//========================================================================
__global__ void calculate_Unp1(int N, double dt, double *Unp1_d, double *UnpHalf_d, double *Acc_d) // TODO: This function is the same as the previous one.
//========================================================================
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
Unp1_d[tid] = UnpHalf_d[tid] + dt * Acc_d[tid];
tid += blockDim.x * gridDim.x;
}
}
//========================================================================
__global__ void calculate_R3(int N, double dt, double *A, double *B)
//========================================================================
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
A[tid] = A[tid] - dt * B[tid];
tid += blockDim.x * gridDim.x;
}
}
//========================================================================
__global__ void multiplyVectors(int N, double *A, double *B, double *C)
//========================================================================
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
A[tid] = B[tid] * C[tid];
tid += blockDim.x * gridDim.x;
}
}
//========================================================================
__global__ void calcAndAssembleMatrixA(int NE, int NENv, int NGP, int NN, int offsetElements,
int *elementsOfColor,
int *LtoGvel,
double *U, double *U_prev,
double *Sv, double *gDSv, double *GQfactor,
double *R1)
//========================================================================
{
__shared__ double s_u[NENv_SIZE];
__shared__ double s_v[NENv_SIZE];
__shared__ double s_w[NENv_SIZE];
__shared__ double s_u_prev[NENv_SIZE];
__shared__ double s_v_prev[NENv_SIZE];
__shared__ double s_w_prev[NENv_SIZE];
__shared__ double s_u_GQ[NGP_SIZE];
__shared__ double s_v_GQ[NGP_SIZE];
__shared__ double s_w_GQ[NGP_SIZE];
__shared__ double s_Sv[NGP_SIZE*NENv_SIZE];
__shared__ double s_gDSv[NGP_SIZE*NENv_SIZE*DIM_SIZE];
__shared__ double s_Ae[NENv_SIZE*NENv_SIZE];
__shared__ double s_R1e[3*NENv_SIZE];
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int ebid = elementsOfColor[offsetElements + bid];
// Extract elemental u, v and w velocity values from the global
// solution array of the previous iteration.
if (tid < NENv) {
const int iLtoGu = LtoGvel[tid + ebid*NENv*3];
const int iLtoGv = LtoGvel[tid + ebid*NENv*3 + NENv];
const int iLtoGw = LtoGvel[tid + ebid*NENv*3 + NENv*2];
s_u[tid] = U[iLtoGu];
s_v[tid] = U[iLtoGv];
s_w[tid] = U[iLtoGw];
s_u_prev[tid] = U_prev[iLtoGu];
s_v_prev[tid] = U_prev[iLtoGv];
s_w_prev[tid] = U_prev[iLtoGw];
}
//// CONTROL
//if (ebid == 0) {
//if (tid < NENv) {
//printf("s_u[%d] = %f \n", tid, s_u[tid]);
//printf("s_v[%d] = %f \n", tid, s_v[tid]);
//printf("s_w[%d] = %f \n", tid, s_w[tid]);
//}
//}
////===========================================
// Copy shape functions to shared memory
if (tid < NENv) {
for (int k = 0; k < NGP; k++) {
const int iSv = NENv*k+tid;
s_Sv[iSv] = Sv[iSv];
}
}
//// CONTROL
//if (ebid == 0) {
//if (tid < NENv) {
//for (int k = 0; k < NGP; k++) {
//const int iSv = NENv*k+tid;
//printf("s_sV[%d] = %f \n", iSv, s_Sv[iSv]);
//}
//}
//}
////===========================================
// Copy elements gDSv to shared memory
if (tid < NENv) {
for (int k = 0; k < NGP; k++) {
for (int i = 0; i < 3; i++) {
const int iGDSv1 = ebid*NENv*NGP*3+k*NENv*3+tid*3+i;
const int iGDSv2 = k*NENv*3+tid*3+i;
s_gDSv[iGDSv2] = gDSv[iGDSv1];
}
}
}
//// CONTROL
//if (ebid == 0) {
//if (tid < NENv) {
//for (int k = 0; k < NGP; k++) {
//for (int i = 0; i < 3; i++) {
//const int iGDSv2 = k*NENv*3+tid*3+i;
//printf("s_gDSv[%d] = %f \n", iGDSv2, s_gDSv[iGDSv2]);
//}
//}
//}
//}
////===========================================
// Initialize elemental stiffness matrix Ae
if (tid < NENv) {
for (int i = 0; i < NENv; i++) {
const int iAe = i*NENv+tid;
s_Ae[iAe] = 0.00000;
}
}
// Initialize elemental stiffness matrix R1e
if (tid < NENv) {
s_R1e[tid] = 0.00000;
s_R1e[tid+NENv] = 0.00000;
s_R1e[tid+2*NENv] = 0.00000;
}
if (tid < NGP) {
// Above calculated u0 and v0 values are at the nodes. However in GQ
// integration we need them at GQ points.
// Initialize velocities at GQ points
s_u_GQ[tid] = 0.00000;
s_v_GQ[tid] = 0.00000;
s_w_GQ[tid] = 0.00000;
for (int i = 0; i < NENv; i++) {
const int iSv = tid*NENv+i;
s_u_GQ[tid] += s_Sv[iSv] * s_u[i];
s_v_GQ[tid] += s_Sv[iSv] * s_v[i];
s_w_GQ[tid] += s_Sv[iSv] * s_w[i];
}
}
//// CONTROL - NOT COMPLETE
//if (ebid == 0) {
//if (tid < NGP) {
//// Above calculated u0 and v0 values are at the nodes. However in GQ
//// integration we need them at GQ points.
//for (int j = 0; j < NENv; j++) {
//for (int i = 0; i < NENv; i++) {
//const int iSv = tid*NENv+i;
//const int iuGQ = tid*NENv+j;
//s_u_GQ[iuGQ] += s_Sv[iSv] * s_u[i];
//s_v_GQ[iuGQ] += s_Sv[iSv] * s_v[i];
//s_w_GQ[iuGQ] += s_Sv[iSv] * s_w[i];
//}
//}
//}
//}
////===========================================
// Calculate elemental stiffnes matrix
if (tid < NENv) {
for (int k = 0; k < NGP; k++) {
const double GQfactorThread = GQfactor[ebid*NGP+k];
for (int i = 0; i < NENv; i++) {
const int iGDSv = k*NENv*3+tid*3;
const int iAe = i*NENv+tid;
const int iSv = k*NENv+i;
s_Ae[iAe] += (s_u_GQ[k] * s_gDSv[iGDSv] +
s_v_GQ[k] * s_gDSv[iGDSv+1] +
s_w_GQ[k] * s_gDSv[iGDSv+2]) * s_Sv[iSv] * GQfactorThread;
}
}
}
// Calculate elemental R1 (right hand side vector), R1e
if (tid < NENv) {
for (int i = 0; i < NENv; i++) {
const int iAe = tid*NENv+i;
s_R1e[tid] += s_Ae[iAe] * s_u_prev[i];
s_R1e[tid+NENv] += s_Ae[iAe] * s_v_prev[i];
s_R1e[tid+2*NENv] += s_Ae[iAe] * s_w_prev[i];
}
}
if (tid < NENv) {
const int iLtoGu = LtoGvel[tid + ebid*NENv*3];
const int iLtoGv = LtoGvel[tid + ebid*NENv*3 + NENv];
const int iLtoGw = LtoGvel[tid + ebid*NENv*3 + NENv*2];
R1[iLtoGu] -= s_R1e[tid];
R1[iLtoGv] -= s_R1e[tid+NENv];
R1[iLtoGw] -= s_R1e[tid+2*NENv];
}
//if (ebid == 0) {
//if (tid < NENv) {
//for (int i = 0; i < NENv; i++) {
//const int iAe = i*NENv+tid;
//Ae_control[iAe] = s_Ae[iAe];
//}
//}
//}
} // End of function calcAndAssembleMatrixA()
//========================================================================
void calculateMatrixAGPU()
//========================================================================
{
// Calculate Ae and multiply them with Ue.
//int offset_gDSv_1d_d;
//int offset_GQfactor_1d_d;
//int offset_SparseMapM_1d_d;
int offsetElements = 0;
int nBlocksColor;
int nThreadsPerBlock = 32;
//hipMemset(A_d, 0, sparseM_NNZ/3*sizeof(double));
hipMemset(R1_d, 0, 3*NN*sizeof(double));
for (int color = 0; color < nActiveColors; color++) {
nBlocksColor = NmeshColors[color];
//cout << endl;
//cout << "offset_" << NmeshColors[color] << " = " << offsetElements << endl;
hipLaunchKernelGGL(( calcAndAssembleMatrixA), dim3(nBlocksColor),dim3(nThreadsPerBlock), 0, 0, NE, NENv, NGP, NN, offsetElements,
elementsOfColor_d,
LtoGvel_1d_d,
Un_d, UnpHalf_prev_d,
Sv_1d_d, gDSv_1d_d, GQfactor_1d_d,
R1_d);
offsetElements += NmeshColors[color];
}
} // End of function calculateMatrixAGPU()
//========================================================================
void step1GPUpart(int iter)
//========================================================================
{
// CUSPARSE Reference: docs.nvidia.com/cuda/cusparse/index.html#appendix-b-cusparse-library-c---example
// Calculate the RHS vector of step 1.
// R1 = - K * UnpHalf_prev - A * UnpHalf_prev - G * Pn;
int NNZM = sparseM_NNZ / 3;
int NNZG = sparseG_NNZ / 3;
double alpha = -1.000000000000000;
double beta = 1.000000000000000;
hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, NN, NN, NNZM, &alpha, descr, K_d, MrowStarts_d, Mcol_d, UnpHalf_prev_d, &beta, R1_d); // Part of (- K * UnpHalf_prev)
hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, NN, NN, NNZM, &alpha, descr, K_d, MrowStarts_d, Mcol_d, UnpHalf_prev_d + NN, &beta, R1_d + NN); // Part of (- K * UnpHalf_prev)
hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, NN, NN, NNZM, &alpha, descr, K_d, MrowStarts_d, Mcol_d, UnpHalf_prev_d + 2*NN, &beta, R1_d + 2*NN); // Part of (- K * UnpHalf_prev)
hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, NN, NNp, NNZG, &alpha, descr, G1_d, GrowStarts_d, Gcol_d, Pn_d, &beta, R1_d); // Part of (- G1 * Pn)
hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, NN, NNp, NNZG, &alpha, descr, G2_d, GrowStarts_d, Gcol_d, Pn_d, &beta, R1_d + NN); // Part of (- G1 * Pn)
hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, NN, NNp, NNZG, &alpha, descr, G3_d, GrowStarts_d, Gcol_d, Pn_d, &beta, R1_d + 2*NN); // Part of (- G1 * Pn)
hipLaunchKernelGGL(( applyVelBC), dim3(NBLOCKS),dim3(NTHREADS), 0, 0, BCnVelNodes, NN, BCvelNodes_d, R1_d);
// Calculate UnpHalf
hipLaunchKernelGGL(( calculate_UnpHalf), dim3(NBLOCKS),dim3(NTHREADS), 0, 0, 3*NN, dt, UnpHalf_d, Un_d, R1_d, MdInv_d);
} // End of function step1GPUpart()
//========================================================================
void step2GPU(int iter)
//========================================================================
{
// Executes step 2 of the method to determine pressure of the new time step.
// Calculate the RHS vector of step 2.
// This is 1/(dt*dt) times of the residual defined in Blasco's paper.
// R2 = Gt * (UnpHalf / (dt*dt) - MdOrigInv * K * Acc_prev)
double *dummy_d; // This will store (UnpHalf / (dt*dt) - MdOrigInv * K * Acc_prev) array.
cudaStatus = hipMalloc((void**)&dummy_d, 3*NN * sizeof(double)); if(cudaStatus != hipSuccess) { printf("Error102: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
double oneOverdt2 = 1.0000000000000000 / (dt*dt);
if (iter == 1) {
hipLaunchKernelGGL(( calculate_step2dummyV1), dim3(NBLOCKS),dim3(NTHREADS), 0, 0, 3*NN, oneOverdt2, dummy_d, UnpHalf_d);
} else {
hipLaunchKernelGGL(( calculate_step2dummyV2), dim3(NBLOCKS),dim3(NTHREADS), 0, 0, 3*NN, oneOverdt2, dummy_d, UnpHalf_d, MdOrigInv_d, KtimesAcc_prev_d);
}
// Multiply Gt with the previously calculated dummy arrays.
int NNZG = sparseG_NNZ / 3;
double alpha = 1.000000000000000;
double beta = 0.000000000000000;
hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_TRANSPOSE, NN, NNp, NNZG, &alpha, descr, G1_d, GrowStarts_d, Gcol_d, dummy_d, &beta, R2_d); // 1st part of [Gt] * {dummy}
beta = 1.000000000000000;
hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_TRANSPOSE, NN, NNp, NNZG, &alpha, descr, G2_d, GrowStarts_d, Gcol_d, dummy_d + NN, &beta, R2_d); // 2nd part of [Gt] * {dummy}
hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_TRANSPOSE, NN, NNp, NNZG, &alpha, descr, G3_d, GrowStarts_d, Gcol_d, dummy_d + 2*NN, &beta, R2_d); // 3rd part of [Gt] * {dummy}
hipLaunchKernelGGL(( applyPresBC), dim3(1),dim3(1), 0, 0, zeroPressureNode, R2_d);
// Use CUSP's CG solver to solve [Z] {Pdot}= {R2} system
CUSP_CG_solver(); // Calculate Pdot
hipLaunchKernelGGL(( calculate_Pnp1), dim3(NBLOCKS),dim3(NTHREADS), 0, 0, NNp, dt, Pnp1_d, Pn_d, Pdot_d); // Pnp1 = Pn + dt * Pdot // TODO: Use CUBLAS function
hipFree(dummy_d);
} // End of function step2GPU()
//========================================================================
void step3GPU(int iter)
//========================================================================
{
// Executes step 3 of the method to determine the velocity of the new time step.
// Calculate the RHS vector of step 3.
// R3 = - dt * (G * Pdot + K * Acc_prev)
int NNZG = sparseG_NNZ / 3;
double alpha = -dt;
double beta = 0.000000000000000; // TODO: Use a single Rvel instead of (R1 and R3)
hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, NN, NNp, NNZG, &alpha, descr, G1_d, GrowStarts_d, Gcol_d, Pdot_d, &beta, R3_d); // This contributes to (- dt * G1 * Pdot) part of R3
hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, NN, NNp, NNZG, &alpha, descr, G2_d, GrowStarts_d, Gcol_d, Pdot_d, &beta, R3_d + NN); // This contributes to (- dt * G2 * Pdot) part of R3
hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, NN, NNp, NNZG, &alpha, descr, G3_d, GrowStarts_d, Gcol_d, Pdot_d, &beta, R3_d + 2*NN); // This contributes to (- dt * G3 * Pdot) part of R3
// Subtract dt * KtimesAcc_prev from R3 if iter is not 1.
if (iter != 1) {
hipLaunchKernelGGL(( calculate_R3), dim3(NBLOCKS),dim3(NTHREADS), 0, 0, 3*NN, dt, R3_d, KtimesAcc_prev_d);
}
hipLaunchKernelGGL(( applyVelBC), dim3(NBLOCKS),dim3(NTHREADS), 0, 0, BCnVelNodes, NN, BCvelNodes_d, R3_d);
// Calculate Acc (Acc = R3 * MdInv)
hipLaunchKernelGGL(( multiplyVectors), dim3(NBLOCKS),dim3(NTHREADS), 0, 0, 3*NN, Acc_d, R3_d, MdInv_d); // TODO: Use CUBLAS function
// Calculate Unp1 (Unp1 = UnpHalf + dt * Acc)
hipLaunchKernelGGL(( calculate_Unp1), dim3(NBLOCKS),dim3(NTHREADS), 0, 0, 3*NN, dt, Unp1_d, UnpHalf_d, Acc_d); // TODO: Use CUBLAS function
//cudaStatus = hipMemcpy(Unp1, Unp1_d, 3*NN * sizeof(double), hipMemcpyDeviceToHost); if(cudaStatus != hipSuccess) { printf("Error107: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
} // End of function step3GPU()
//========================================================================
void calculate_KtimesAcc_prevGPU(void)
//========================================================================
{
int NNZM = sparseM_NNZ / 3;
double alpha = 1.000000000000000;
double beta = 0.000000000000000;
hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, NN, NN, NNZM, &alpha, descr, K_d, MrowStarts_d, Mcol_d, Acc_prev_d, &beta, KtimesAcc_prev_d); // 1st part of [K] * {Acc_prev}
hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, NN, NN, NNZM, &alpha, descr, K_d, MrowStarts_d, Mcol_d, Acc_prev_d + NN, &beta, KtimesAcc_prev_d + NN); // 2nd part of [K] * {Acc_prev}
hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, NN, NN, NNZM, &alpha, descr, K_d, MrowStarts_d, Mcol_d, Acc_prev_d + 2*NN, &beta, KtimesAcc_prev_d + 2*NN); // 3rd part of [K] * {Acc_prev}
} // End of function calculate_KtimesAcc_prevGPU()
//========================================================================
bool checkConvergenceGPU(void)
//========================================================================
{
double norm1, norm2, normalizedNorm1, normalizedNorm2;
// Calculate normalized norm for velocity
hipblasDnrm2(handleCUBLAS, 3*NN, Unp1_d, 1, &norm1); // norm1 = sqrt(sum(Unp1(i)*Unp1(i)))
double *dummy_d;
cudaStatus = hipMalloc((void**)&dummy_d, 3*NN * sizeof(double)); if(cudaStatus != hipSuccess) { printf("Error108: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; } // dummy_d will store (Unp1_d - Unp1_prev_d)
hipLaunchKernelGGL(( subtractVectors), dim3(NBLOCKS),dim3(NTHREADS), 0, 0, 3*NN, dummy_d, Unp1_d, Unp1_prev_d);
hipblasDnrm2(handleCUBLAS, 3*NN, dummy_d, 1, &norm2); // norm2 = sqrt(sum((Unp1(i)-Unp_prev(i))*(Unp1(i)-Unp_prev(i))))
normalizedNorm1 = norm2 / norm1; // Normalized norm for velocity
hipFree(dummy_d);
// Calculate normalized norm for pressure
hipblasDnrm2(handleCUBLAS, NNp, Pnp1_d, 1, &norm1); // norm1 = sqrt(sum(Pnp1(i)*Pnp1(i)))
cudaStatus = hipMalloc((void**)&dummy_d, NNp * sizeof(double)); if(cudaStatus != hipSuccess) { printf("Error109: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; } // dummy_d will now store (Pnp1_d - Pnp1_prev_d)
hipLaunchKernelGGL(( subtractVectors), dim3(NBLOCKS),dim3(NTHREADS), 0, 0, NNp, dummy_d, Pnp1_d, Pnp1_prev_d);
hipblasDnrm2(handleCUBLAS, NNp, dummy_d, 1, &norm2); // norm2 = sqrt(sum((Pnp1(i)-Pnp_prev(i))*(Pnp1(i)-Pnp_prev(i))))
normalizedNorm2 = norm2 / norm1; // Normalized norm for pressure
hipFree(dummy_d);
// Check convergence and get ready for the next iteration
if (normalizedNorm1 < tolerance && normalizedNorm2 < tolerance) {
return 1;
} else {
return 0;
}
} // End of function checkConvergenceGPU()
//========================================================================
void printMonitorDataGPU(int iter)
//========================================================================
{
// TODO: Avoid the following device-to-host copies and print monitor data using device variables.
//double *monitorData_d;
//hipMalloc((void**)&monitorData_d, 4 * sizeof(double));
//getMonitorData<<<1,1>>>(monPoint, NN, Un_d, Pn_d, monitorData_d);
//double *monitorData;
//monitorData = new double[4];
cudaStatus = hipMemcpy(Un, Un_d, 3*NN * sizeof(double), hipMemcpyDeviceToHost); if(cudaStatus != hipSuccess) { printf("Error110: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = hipMemcpy(Pn, Pn_d, NNp * sizeof(double), hipMemcpyDeviceToHost); if(cudaStatus != hipSuccess) { printf("Error111: %s\n", hipGetErrorString(cudaStatus)); cin >> dummyUserInput; }
printf("%6d %6d %10.5f %12.5f %12.5f %12.5f %12.5f %12.5f %12.5f\n",
timeN, iter, timeT, Un[monPoint], Un[monPoint + NN], Un[monPoint + 2*NN], Pn[monPoint], wallClockTimeCurrentTimeStep, maxAcc);
} // End of function printMonitorDataGPU()
//========================================================================
bool checkConvergenceInTimeGPU(void)
//========================================================================
{
double oneOverdt = 1.0000000000000000 / dt;
// wrap raw pointer with a device_ptr
thrust::device_ptr<double> thrust_Un_dbeg(Un_d);
thrust::device_ptr<double> thrust_Un_dend = thrust_Un_dbeg + 3*NN;
thrust::device_ptr<double> thrust_Unp1_dbeg(Unp1_d);
thrust::device_ptr<double> thrust_Unp1_dend = thrust_Unp1_dbeg + 3*NN;
// do the reduction
maxAcc = transform_reduce(make_zip_iterator(make_tuple(thrust_Unp1_dbeg, thrust_Un_dbeg)),
make_zip_iterator(make_tuple(thrust_Unp1_dend, thrust_Un_dend)),
weighted_absolute_difference(oneOverdt),
-1.f,
maximum<double>());
// Check convergence
if (maxAcc > convergenceCriteria) {
return 0;
} else {
return 1;
}
} // End of function checkConvergenceInTimeGPU()
#endif //USECUDA
| af2fcb2f4b4681b12d2ced5eac9b4decdbdaa10e.cu | // TODO: After the run is finished the executable terminates with the following error. // TODO
// terminate called after throwing an instance of 'thrust::system::system_error'
// what(): unload of CUDA runtime failed
// Aborted (core dumped)
// Everything in this file is used only if compiled with USEDUDA defined
#ifdef USECUDA
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include "cublas_v2.h"
#include "cusparse_v2.h"
#include <cusp/coo_matrix.h>
#include <cusp/csr_matrix.h>
#include <cusp/print.h>
#include <cusp/monitor.h>
#include <cusp/krylov/cg.h>
#include <cusp/precond/diagonal.h>
#include <cusp/precond/ainv.h>
#include <cusp/precond/aggregation/smoothed_aggregation.h>
#include <cusp/transpose.h>
#include <cusp/multiply.h>
#include <cusp/elementwise.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
int NBLOCKS = 128;
int NTHREADS = 1024;
#define NGP_SIZE (8)
#define NENv_SIZE (27)
#define DIM_SIZE (3)
using namespace std;
using namespace thrust;
extern int NN, NNp, sparseM_NNZ, sparseG_NNZ, *sparseMrowStarts, *sparseGrowStarts, *sparseMrow, *sparseMcol, BCnVelNodes, zeroPressureNode, timeN, monPoint;
extern int ** BCvelNodes;
extern double dt, timeT, t_ini, tolerance, *sparseAvalue, *sparseKvalue, *UnpHalf_prev, *Pn, *R1, *R11, *R12, *R13, *R2, *Un, *UnpHalf, *KtimesAcc_prev, *Acc_prev, *Acc, *MdInv, *MdOrigInv, *Unp1, *Pnp1, *Pdot;
extern double convergenceCriteria, maxAcc;
extern double wallClockTimeCurrentTimeStep;
extern char dummyUserInput;
extern string whichProblem;
extern double *K_d, *A_d, *G1_d, *G2_d, *G3_d;
extern double *MdInv_d, *Un_d, *Pn_d, *Pnp1_d, *Pnp1_prev_d;
extern double *MdOrigInv_d;
extern double *R1_d, *R2_d, *R3_d;
extern double *UnpHalf_d, *Pdot_d, *Acc_d, *Unp1_d, *Unp1_prev_d;
extern double *UnpHalf_prev_d;
extern double *Acc_prev_d;
extern double *KtimesAcc_prev_d;
extern int *Mcol_d, *Mrow_d, *MrowStarts_d, *Gcol_d, *Grow_d, *GrowStarts_d;
extern int *BCvelNodes_d;
extern cusparseHandle_t handle;
extern cusparseMatDescr_t descr;
extern cublasHandle_t handleCUBLAS;
extern cudaError_t cudaStatus;
extern cusparseSolveAnalysisInfo_t analysisInfo1, analysisInfo2;
extern int *sparseGrow, *sparseGcol;
extern double *sparseG1value, *sparseG2value, *sparseG3value;
extern size_t freeGPUmemory, totalGPUmemory; // To measure total and free GPU memory
cusp::csr_matrix<int, double, cusp::device_memory> Z_CUSP_CSR_d;
extern bool PRINT_TIMES;
extern double getHighResolutionTime(int, double);
extern void createTecplot();
extern int NE, NGP, NENv;
extern int *NmeshColors, *meshColors, *elementsOfColor;
extern int nActiveColors;
extern int *LtoGvel_1d;
extern int *sparseMapM_1d;
extern double *Sv_1d;
extern double *gDSv_1d, *GQfactor_1d;
extern int *NmeshColors_d, *meshColors_d, *elementsOfColor_d;
extern int *LtoGvel_1d_d;
extern int *sparseMapM_1d_d;
extern double *Sv_1d_d;
extern double *gDSv_1d_d, *GQfactor_1d_d;
struct weighted_absolute_difference // used at steady state convergence check
{
double oneOverdt;
weighted_absolute_difference(const double oneOverdt)
: oneOverdt(oneOverdt)
{}
__host__ __device__
double operator()(thrust::tuple<double,double> t)
{
double thrust_Unp1_d = thrust::get<0>(t);
double thrust_Un_d = thrust::get<1>(t);
double accDummy;
accDummy = thrust_Unp1_d - thrust_Un_d;
return fabs(accDummy);
}
};
// double *UnGPU;
//========================================================================
void selectCUDAdevice()
//========================================================================
{
// Print information about available CUDA devices set the CUDA device to be used
cudaDeviceProp prop;
int nDevices;
cout << "Available CUDA devices are" << endl;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaGetDeviceProperties(&prop, i);
printf(" %d: %s\n", i, prop.name);
}
if (nDevices == 1) { // TODO : This will not work on every machine.
cudaSetDevice(0);
cout << "\nDevice " << 0 << " is selected.\n";
} else {
cudaSetDevice(nDevices - 1);
//cudaSetDevice(0);
cout << "\nDevice " << nDevices - 1 << " is selected.\n";
}
cudaMemGetInfo(&freeGPUmemory, &totalGPUmemory);
cout << " Total GPU memory = " << totalGPUmemory << endl;
cout << " Free GPU memory = " << freeGPUmemory << endl << endl;
} // End of function selectCUDAdevice()
//========================================================================
void initializeAndAllocateGPU()
//========================================================================
{
// Do the necessary memory allocations for the GPU. Apply the initial
// condition or read the restart file.
handle = 0;
descr = 0;
// Initialize cusparse library
cusparseCreate(&handle);
cublasCreate(&handleCUBLAS);
// Create and setup matrix descriptor
cusparseCreateMatDescr(&descr);
cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO);
int NNZM = sparseM_NNZ / 3;
int NNZG = sparseG_NNZ / 3;
cudaStatus = cudaMalloc((void**)&K_d, NNZM * sizeof(double)); if(cudaStatus != cudaSuccess) { printf("Error01: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
//cudaStatus = cudaMalloc((void**)&A_d, NNZM * sizeof(double)); if(cudaStatus != cudaSuccess) { printf("Error02: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&G1_d, NNZG * sizeof(double)); if(cudaStatus != cudaSuccess) { printf("Error03: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&G2_d, NNZG * sizeof(double)); if(cudaStatus != cudaSuccess) { printf("Error04: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&G3_d, NNZG * sizeof(double)); if(cudaStatus != cudaSuccess) { printf("Error05: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&UnpHalf_prev_d, 3*NN * sizeof(double)); if(cudaStatus != cudaSuccess) { printf("Error06: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&KtimesAcc_prev_d, 3*NN * sizeof(double)); if(cudaStatus != cudaSuccess) { printf("Error07: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&Acc_d, 3*NN * sizeof(double)); if(cudaStatus != cudaSuccess) { printf("Error08: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&Acc_prev_d, 3*NN * sizeof(double)); if(cudaStatus != cudaSuccess) { printf("Error09: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&Pn_d, NNp * sizeof(double)); if(cudaStatus != cudaSuccess) { printf("Error10: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&Pnp1_d, NNp * sizeof(double)); if(cudaStatus != cudaSuccess) { printf("Error11: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&Pnp1_prev_d, NNp * sizeof(double)); if(cudaStatus != cudaSuccess) { printf("Error12: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&Pdot_d, NNp * sizeof(double)); if(cudaStatus != cudaSuccess) { printf("Error13: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&Mrow_d, NNZM * sizeof(int)); if(cudaStatus != cudaSuccess) { printf("Error14: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&Mcol_d, NNZM * sizeof(int)); if(cudaStatus != cudaSuccess) { printf("Error15: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&MrowStarts_d, (NN+1) * sizeof(int)); if(cudaStatus != cudaSuccess) { printf("Error16: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&Grow_d, NNZG * sizeof(int)); if(cudaStatus != cudaSuccess) { printf("Error17: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&Gcol_d, NNZG * sizeof(int)); if(cudaStatus != cudaSuccess) { printf("Error18: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&GrowStarts_d, (NN+1) * sizeof(int)); if(cudaStatus != cudaSuccess) { printf("Error19: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&BCvelNodes_d, BCnVelNodes * sizeof(int)); if(cudaStatus != cudaSuccess) { printf("Error20: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&MdOrigInv_d, 3*NN * sizeof(double)); if(cudaStatus != cudaSuccess) { printf("Error21: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&MdInv_d, 3*NN * sizeof(double)); if(cudaStatus != cudaSuccess) { printf("Error22: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&Un_d, 3*NN * sizeof(double)); if(cudaStatus != cudaSuccess) { printf("Error23: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&Unp1_d, 3*NN * sizeof(double)); if(cudaStatus != cudaSuccess) { printf("Error24: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&Unp1_prev_d, 3*NN * sizeof(double)); if(cudaStatus != cudaSuccess) { printf("Error25: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&UnpHalf_d, 3*NN * sizeof(double)); if(cudaStatus != cudaSuccess) { printf("Error26: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&R1_d, 3*NN * sizeof(double)); if(cudaStatus != cudaSuccess) { printf("Error27: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&R2_d, NNp * sizeof(double)); if(cudaStatus != cudaSuccess) { printf("Error28: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&R3_d, 3*NN * sizeof(double)); if(cudaStatus != cudaSuccess) { printf("Error29: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMemcpy(MrowStarts_d, sparseMrowStarts, (NN+1) * sizeof(int), cudaMemcpyHostToDevice); if(cudaStatus != cudaSuccess) { printf("Error30: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMemcpy(K_d, sparseKvalue, NNZM * sizeof(double), cudaMemcpyHostToDevice); if(cudaStatus != cudaSuccess) { printf("Error31: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMemcpy(Mrow_d, sparseMrow, NNZM * sizeof(int), cudaMemcpyHostToDevice); if(cudaStatus != cudaSuccess) { printf("Error32: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMemcpy(Mcol_d, sparseMcol, NNZM * sizeof(int), cudaMemcpyHostToDevice); if(cudaStatus != cudaSuccess) { printf("Error33: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMemcpy(G1_d, sparseG1value, NNZG * sizeof(double), cudaMemcpyHostToDevice); if(cudaStatus != cudaSuccess) { printf("Error34: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMemcpy(G2_d, sparseG2value, NNZG * sizeof(double), cudaMemcpyHostToDevice); if(cudaStatus != cudaSuccess) { printf("Error35: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMemcpy(G3_d, sparseG3value, NNZG * sizeof(double), cudaMemcpyHostToDevice); if(cudaStatus != cudaSuccess) { printf("Error36: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMemcpy(Grow_d, sparseGrow, NNZG * sizeof(int), cudaMemcpyHostToDevice); if(cudaStatus != cudaSuccess) { printf("Error37: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMemcpy(Gcol_d, sparseGcol, NNZG * sizeof(int), cudaMemcpyHostToDevice); if(cudaStatus != cudaSuccess) { printf("Error38: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMemcpy(GrowStarts_d, sparseGrowStarts, (NN+1) * sizeof(int), cudaMemcpyHostToDevice); if(cudaStatus != cudaSuccess) { printf("Error39: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMemcpy(MdInv_d, MdInv, 3*NN * sizeof(double), cudaMemcpyHostToDevice); if(cudaStatus != cudaSuccess) { printf("Error40: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMemcpy(MdOrigInv_d, MdOrigInv, 3*NN * sizeof(double), cudaMemcpyHostToDevice); if(cudaStatus != cudaSuccess) { printf("Error41: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
int LARGE = 30;
cudaStatus = cudaMalloc((void**)&NmeshColors_d, LARGE * sizeof(int)); if(cudaStatus != cudaSuccess) { printf("Error42: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
//cudaStatus = cudaMalloc((void**)&meshColors_d, NE * sizeof(int)); if(cudaStatus != cudaSuccess) { printf("Error43: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&elementsOfColor_d, NE * sizeof(int)); if(cudaStatus != cudaSuccess) { printf("Error43: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
//cudaStatus = cudaMalloc((void**)&sparseMapM_1d_d, NE*NENv*NENv * sizeof(int)); if(cudaStatus != cudaSuccess) { printf("Error44: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&LtoGvel_1d_d, NE*NENv*3 * sizeof(int)); if(cudaStatus != cudaSuccess) { printf("Error45: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&Sv_1d_d, NGP*NENv * sizeof(double)); if(cudaStatus != cudaSuccess) { printf("Error46: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&gDSv_1d_d, NE*NGP*NENv*3 * sizeof(double)); if(cudaStatus != cudaSuccess) { printf("Error47: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMalloc((void**)&GQfactor_1d_d, NE*NGP * sizeof(double)); if(cudaStatus != cudaSuccess) { printf("Error48: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMemcpy(NmeshColors_d, NmeshColors, LARGE * sizeof(int), cudaMemcpyHostToDevice); if(cudaStatus != cudaSuccess) { printf("Error49: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
//cudaStatus = cudaMemcpy(meshColors_d, meshColors, NE * sizeof(int), cudaMemcpyHostToDevice); if(cudaStatus != cudaSuccess) { printf("Error50: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMemcpy(elementsOfColor_d, elementsOfColor, NE * sizeof(int), cudaMemcpyHostToDevice); if(cudaStatus != cudaSuccess) { printf("Error50: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
//cudaStatus = cudaMemcpy(sparseMapM_1d_d, sparseMapM_1d, NE*NENv*NENv * sizeof(int), cudaMemcpyHostToDevice); if(cudaStatus != cudaSuccess) { printf("Error51: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMemcpy(LtoGvel_1d_d, LtoGvel_1d, NE*NENv*3 * sizeof(int), cudaMemcpyHostToDevice); if(cudaStatus != cudaSuccess) { printf("Error52: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMemcpy(Sv_1d_d, Sv_1d, NGP*NENv * sizeof(double), cudaMemcpyHostToDevice); if(cudaStatus != cudaSuccess) { printf("Error53: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMemcpy(gDSv_1d_d, gDSv_1d, NE*NGP*NENv*3 * sizeof(double), cudaMemcpyHostToDevice); if(cudaStatus != cudaSuccess) { printf("Error54: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMemcpy(GQfactor_1d_d, GQfactor_1d, NE*NGP * sizeof(double), cudaMemcpyHostToDevice); if(cudaStatus != cudaSuccess) { printf("Error55: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
// Extract the 1st column of BCvelNodes and send it to the device.
int *dummy;
dummy = new int[BCnVelNodes];
for(int i = 0; i < BCnVelNodes; i++) {
dummy[i] = BCvelNodes[i][0];
}
cudaStatus = cudaMemcpy(BCvelNodes_d, dummy, BCnVelNodes * sizeof(int), cudaMemcpyHostToDevice); if(cudaStatus != cudaSuccess) { printf("Error42: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
delete[] dummy;
// Send Un to the GPU
cudaStatus = cudaMemcpy(Un_d, Un, 3*NN * sizeof(double), cudaMemcpyHostToDevice); if(cudaStatus != cudaSuccess) { printf("Error43: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMemcpy(Pn_d, Pn, NNp * sizeof(double), cudaMemcpyHostToDevice); if(cudaStatus != cudaSuccess) { printf("Error44: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
// Initialize Pdot
cudaMemset(Pdot_d, 0, NNp*sizeof(double));
cudaMemGetInfo(&freeGPUmemory, &totalGPUmemory);
cout << endl;
cout << "After initializeAndAllocateGPU() function, free GPU memory = " << freeGPUmemory << endl;
} // End of function initializeAndAllocateGPU()
//========================================================================
void calculateZ_CUSP()
//========================================================================
{
// Uses CUSP to calculates [Z] = Gt * MdInvOrig * G.
// TODO: Try to minimize host-device memory transfers
// TODO: Use "views" property of CUSP to minimize device memory usage.
int NNZG = sparseG_NNZ/3;
// Define G1, G2, G3 as COO matrices on the HOST
cusp::coo_matrix<int, double, cusp::host_memory> G1_CUSP(NN, NNp, NNZG);
cusp::coo_matrix<int, double, cusp::host_memory> G2_CUSP(NN, NNp, NNZG);
cusp::coo_matrix<int, double, cusp::host_memory> G3_CUSP(NN, NNp, NNZG);
// Copy COO vectors of G1, G2 and G3 to CUSP matrices
thrust::copy(sparseGrow, sparseGrow + NNZG, G1_CUSP.row_indices.begin());
thrust::copy(sparseGcol, sparseGcol + NNZG, G1_CUSP.column_indices.begin());
thrust::copy(sparseG1value, sparseG1value + NNZG, G1_CUSP.values.begin());
thrust::copy(sparseGrow, sparseGrow + NNZG, G2_CUSP.row_indices.begin());
thrust::copy(sparseGcol, sparseGcol + NNZG, G2_CUSP.column_indices.begin());
thrust::copy(sparseG2value, sparseG2value + NNZG, G2_CUSP.values.begin());
thrust::copy(sparseGrow, sparseGrow + NNZG, G3_CUSP.row_indices.begin());
thrust::copy(sparseGcol, sparseGcol + NNZG, G3_CUSP.column_indices.begin());
thrust::copy(sparseG3value, sparseG3value + NNZG, G3_CUSP.values.begin());
// Define traspose of G matrices on the HOST.
cusp::coo_matrix<int, double, cusp::host_memory> G1t_CUSP(NNp, NN, NNZG);
cusp::coo_matrix<int, double, cusp::host_memory> G2t_CUSP(NNp, NN, NNZG);
cusp::coo_matrix<int, double, cusp::host_memory> G3t_CUSP(NNp, NN, NNZG);
cusp::transpose(G1_CUSP, G1t_CUSP);
cusp::transpose(G2_CUSP, G2t_CUSP);
cusp::transpose(G3_CUSP, G3t_CUSP);
// Multiply G1, G2 and G3 with MdOrigInv diagonal matrix.
double *G1mod, *G2mod, *G3mod; // These are values of G matrices multiplied with the diagonal MdOrigInv matrix.
G1mod = new double[NNZG];
G2mod = new double[NNZG];
G3mod = new double[NNZG];
for (int i = 0; i < NNZG; i++) {
G1mod[i] = sparseG1value[i] * MdOrigInv[sparseGrow[i]];
G2mod[i] = sparseG2value[i] * MdOrigInv[sparseGrow[i]];
G3mod[i] = sparseG3value[i] * MdOrigInv[sparseGrow[i]];
}
// Copy these modified G values to device
thrust::copy(G1mod, G1mod + NNZG, G1_CUSP.values.begin());
thrust::copy(G2mod, G2mod + NNZG, G2_CUSP.values.begin());
thrust::copy(G3mod, G3mod + NNZG, G3_CUSP.values.begin());
// Multiply Gt * Gmod matrices one by one. First store the results to a dummy matrix
// and them add them to Z_CUSP_COO
cusp::coo_matrix<int, double, cusp::host_memory> dummy;
cusp::coo_matrix<int, double, cusp::host_memory> Z_CUSP_COO;
cusp::multiply(G1t_CUSP, G1_CUSP, dummy);
Z_CUSP_COO = dummy;
cusp::multiply(G2t_CUSP, G2_CUSP, dummy);
cusp::add(Z_CUSP_COO, dummy, Z_CUSP_COO);
cusp::multiply(G3t_CUSP, G3_CUSP, dummy);
cusp::add(Z_CUSP_COO, dummy, Z_CUSP_COO);
// Convert Z_CUSP_COO into CSR format
cusp::csr_matrix<int, double, cusp::host_memory> Z_CUSP_CSR;
Z_CUSP_CSR = Z_CUSP_COO;
// Modify Z_CUSP_CSR for known pressures.
int LARGE = 1000; // TODO: How important is this LARGE value in solution accuracy and convergence rate of CG?
if (zeroPressureNode > 0) { // If node is negative it means we do not set pressure to zero at any node.
// Multiply Z[node][node] by LARGE
for (int j = Z_CUSP_CSR.row_offsets[zeroPressureNode]; j < Z_CUSP_CSR.row_offsets[zeroPressureNode + 1]; j++) { // Go through row "zerpPressureNode" of [Z].
if (Z_CUSP_CSR.column_indices[j] == zeroPressureNode) { // Determine the position of the diagonal entry in column "zeroPressureNode"
Z_CUSP_CSR.values[j] = Z_CUSP_CSR.values[j] * LARGE;
break;
}
}
}
Z_CUSP_CSR_d = Z_CUSP_CSR;
// CONTROL
//cusp::print(Z_CUSP_CSR);
cout << endl << " NNZ of Z_CUSP_CSR = " << Z_CUSP_CSR.num_entries << endl;
cudaMemGetInfo(&freeGPUmemory, &totalGPUmemory);
cout << endl;
cout << "At the end of calculateZ_CUSP() function, free GPU memory = " << freeGPUmemory << endl;
/*
// Write Z_CUSP_CSR matrix to a file for further use by the MKL_CG solver in a different run.
ZcsrFile = fopen((whichProblem + ".zCSR").c_str(), "wb");
int *rowOffsets, *colIndices;
double *values;
rowOffsets = new int[NNp+1];
colIndices = new int[Z_CUSP_CSR.num_entries];
values = new double[Z_CUSP_CSR.num_entries];
for (int i = 0; i < NNp + 1; i++) {
rowOffsets[i] = Z_CUSP_CSR.row_offsets[i];
}
for (int i = 0; i < Z_CUSP_CSR.num_entries; i++) {
colIndices[i] = Z_CUSP_CSR.column_indices[i];
values[i] = Z_CUSP_CSR.values[i];
}
fwrite(&Z_CUSP_CSR.num_entries, sizeof(int), size_t(1), ZcsrFile);
fwrite(rowOffsets, sizeof(int), size_t(NNp+1), ZcsrFile);
fwrite(colIndices, sizeof(int), size_t(Z_CUSP_CSR.num_entries), ZcsrFile);
fwrite(values, sizeof(double), size_t(Z_CUSP_CSR.num_entries), ZcsrFile);
fclose(ZcsrFile);
delete[] rowOffsets;
delete[] colIndices;
delete[] values;
*/
} // End of function calculateZ_CUSP()
//========================================================================
void CUSP_CG_solver()
//========================================================================
{
// Solve the system of step 2 [Z]{Pdot}={R2} using CG.
// Allocate right hand side vector RHS_d and solution vector soln_d in device memory.
thrust::device_ptr<double> wrapped_R2_d(R2_d);
thrust::device_ptr<double> wrapped_Pdot_d(Pdot_d);
typedef typename cusp::array1d_view< thrust::device_ptr<double> > DeviceValueArrayView;
DeviceValueArrayView RHS_d (wrapped_R2_d, wrapped_R2_d + NNp);
DeviceValueArrayView soln_d (wrapped_Pdot_d, wrapped_Pdot_d + NNp);
// Set monitor
cusp::default_monitor<double> monitor(RHS_d, 1000, 1e-6);
//cusp::verbose_monitor<double> monitor(RHS_d, 1000, 1e-12);
// Set preconditioner
cusp::precond::diagonal<double, cusp::device_memory> M(Z_CUSP_CSR_d);
//cusp::identity_operator<double, cusp::device_memory> M(NNp, NNp);
//cusp::precond::scaled_bridson_ainv<double, cusp::device_memory> M(Z_CUSP_CSR_d, .1);
//cusp::precond::aggregation::smoothed_aggregation<int, double, cusp::device_memory> M(Z_CUSP_CSR_d);
cusp::krylov::cg(Z_CUSP_CSR_d, soln_d, RHS_d, monitor, M);
if (PRINT_TIMES) cout << "CUSP CG solver made " << monitor.iteration_count() << " iterations. Residual norm is " << monitor.residual_norm() << endl;
// CONTROL
//for(int i = 0; i < NNp; i++) {
// cout << Pdot[i] << endl;
//}
} // End of function CUSP_CG_solver()
//========================================================================
__global__ void getMonitorData(int monPoint, int NN, double *Un_d, double *Pn_d, double *monitorData_d)
//========================================================================
{
monitorData_d[0] = Un_d[monPoint];
monitorData_d[1] = Un_d[monPoint + NN];
monitorData_d[2] = Un_d[monPoint + 2*NN];
monitorData_d[3] = Pn_d[monPoint];
}
//========================================================================
__global__ void applyVelBC(int Nbc, int N, int *velBCdata, double *A)
//========================================================================
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// Change R1 for known velocity BCs
int node;
while (tid < Nbc) {
node = velBCdata[tid]; // Node at which this velocity BC is specified.
// Change R1 for the given u and v velocities.
A[node] = 0.0; // This is not velocity, but velocity difference between 2 iterations.
A[node + N] = 0.0; // This is not velocity, but velocity difference between 2 iterations.
A[node + 2*N] = 0.0; // This is not velocity, but velocity difference between 2 iterations.
tid += blockDim.x * gridDim.x;
}
}
//========================================================================
__global__ void applyPresBC(int zeroPressureNode, double *R2_d)
//========================================================================
{
// Change R2 for known zero presssures at the outlets
if (zeroPressureNode > 0) { // If node is negative it means we do not set pressure to zero at any node.
R2_d[zeroPressureNode] = 0.0; // This is not the RHS for pressure, but pressure difference between 2 iterations.
}
}
//========================================================================
__global__ void calculate_UnpHalf(int N, double dt, double *UnpHalf_d, double *Un_d, double *R1_d, double *MdInv_d)
//========================================================================
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
UnpHalf_d[tid] = Un_d[tid] + dt * R1_d[tid] * MdInv_d[tid];
tid += blockDim.x * gridDim.x;
}
}
//========================================================================
__global__ void calculate_step2dummyV1(int N, double d, double *A, double *B)
//========================================================================
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
A[tid] = d * B[tid];
tid += blockDim.x * gridDim.x;
}
}
//========================================================================
__global__ void calculate_step2dummyV2(int N, double d, double *A, double *B, double *C, double *D)
//========================================================================
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
A[tid] = d * B[tid] - C[tid] * D[tid];
tid += blockDim.x * gridDim.x;
}
}
//========================================================================
__global__ void subtractVectors(int N, double *A, double *B, double *C)
//========================================================================
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
A[tid] = B[tid] - C[tid];
tid += blockDim.x * gridDim.x;
}
}
//========================================================================
__global__ void permuteVector(int N, int *p, double *b, double *x)
//========================================================================
{
// Equates b to x, but using the permutation vector b, i.e. x(p(k)) = b(k)
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
x[p[tid]] = b[tid];
tid += blockDim.x * gridDim.x;
}
}
//========================================================================
__global__ void permuteVectorBack(int N, int *p, double *b, double *x)
//========================================================================
{
// Equates b to x, but using the permutation vector b, i.e. x(k) = b(p(k))
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
x[tid] = b[p[tid]];
tid += blockDim.x * gridDim.x;
}
}
//========================================================================
__global__ void calculate_Pnp1(int N, double dt, double *Pnp1_d, double *Pn_d, double *Pdot_d)
//========================================================================
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
Pnp1_d[tid] = Pn_d[tid] + dt * Pdot_d[tid];
tid += blockDim.x * gridDim.x;
}
}
//========================================================================
__global__ void calculate_Unp1(int N, double dt, double *Unp1_d, double *UnpHalf_d, double *Acc_d) // TODO: This function is the same as the previous one.
//========================================================================
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
Unp1_d[tid] = UnpHalf_d[tid] + dt * Acc_d[tid];
tid += blockDim.x * gridDim.x;
}
}
//========================================================================
__global__ void calculate_R3(int N, double dt, double *A, double *B)
//========================================================================
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
A[tid] = A[tid] - dt * B[tid];
tid += blockDim.x * gridDim.x;
}
}
//========================================================================
__global__ void multiplyVectors(int N, double *A, double *B, double *C)
//========================================================================
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
A[tid] = B[tid] * C[tid];
tid += blockDim.x * gridDim.x;
}
}
//========================================================================
__global__ void calcAndAssembleMatrixA(int NE, int NENv, int NGP, int NN, int offsetElements,
int *elementsOfColor,
int *LtoGvel,
double *U, double *U_prev,
double *Sv, double *gDSv, double *GQfactor,
double *R1)
//========================================================================
{
__shared__ double s_u[NENv_SIZE];
__shared__ double s_v[NENv_SIZE];
__shared__ double s_w[NENv_SIZE];
__shared__ double s_u_prev[NENv_SIZE];
__shared__ double s_v_prev[NENv_SIZE];
__shared__ double s_w_prev[NENv_SIZE];
__shared__ double s_u_GQ[NGP_SIZE];
__shared__ double s_v_GQ[NGP_SIZE];
__shared__ double s_w_GQ[NGP_SIZE];
__shared__ double s_Sv[NGP_SIZE*NENv_SIZE];
__shared__ double s_gDSv[NGP_SIZE*NENv_SIZE*DIM_SIZE];
__shared__ double s_Ae[NENv_SIZE*NENv_SIZE];
__shared__ double s_R1e[3*NENv_SIZE];
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int ebid = elementsOfColor[offsetElements + bid];
// Extract elemental u, v and w velocity values from the global
// solution array of the previous iteration.
if (tid < NENv) {
const int iLtoGu = LtoGvel[tid + ebid*NENv*3];
const int iLtoGv = LtoGvel[tid + ebid*NENv*3 + NENv];
const int iLtoGw = LtoGvel[tid + ebid*NENv*3 + NENv*2];
s_u[tid] = U[iLtoGu];
s_v[tid] = U[iLtoGv];
s_w[tid] = U[iLtoGw];
s_u_prev[tid] = U_prev[iLtoGu];
s_v_prev[tid] = U_prev[iLtoGv];
s_w_prev[tid] = U_prev[iLtoGw];
}
//// CONTROL
//if (ebid == 0) {
//if (tid < NENv) {
//printf("s_u[%d] = %f \n", tid, s_u[tid]);
//printf("s_v[%d] = %f \n", tid, s_v[tid]);
//printf("s_w[%d] = %f \n", tid, s_w[tid]);
//}
//}
////===========================================
// Copy shape functions to shared memory
if (tid < NENv) {
for (int k = 0; k < NGP; k++) {
const int iSv = NENv*k+tid;
s_Sv[iSv] = Sv[iSv];
}
}
//// CONTROL
//if (ebid == 0) {
//if (tid < NENv) {
//for (int k = 0; k < NGP; k++) {
//const int iSv = NENv*k+tid;
//printf("s_sV[%d] = %f \n", iSv, s_Sv[iSv]);
//}
//}
//}
////===========================================
// Copy elements gDSv to shared memory
if (tid < NENv) {
for (int k = 0; k < NGP; k++) {
for (int i = 0; i < 3; i++) {
const int iGDSv1 = ebid*NENv*NGP*3+k*NENv*3+tid*3+i;
const int iGDSv2 = k*NENv*3+tid*3+i;
s_gDSv[iGDSv2] = gDSv[iGDSv1];
}
}
}
//// CONTROL
//if (ebid == 0) {
//if (tid < NENv) {
//for (int k = 0; k < NGP; k++) {
//for (int i = 0; i < 3; i++) {
//const int iGDSv2 = k*NENv*3+tid*3+i;
//printf("s_gDSv[%d] = %f \n", iGDSv2, s_gDSv[iGDSv2]);
//}
//}
//}
//}
////===========================================
// Initialize elemental stiffness matrix Ae
if (tid < NENv) {
for (int i = 0; i < NENv; i++) {
const int iAe = i*NENv+tid;
s_Ae[iAe] = 0.00000;
}
}
// Initialize elemental stiffness matrix R1e
if (tid < NENv) {
s_R1e[tid] = 0.00000;
s_R1e[tid+NENv] = 0.00000;
s_R1e[tid+2*NENv] = 0.00000;
}
if (tid < NGP) {
// Above calculated u0 and v0 values are at the nodes. However in GQ
// integration we need them at GQ points.
// Initialize velocities at GQ points
s_u_GQ[tid] = 0.00000;
s_v_GQ[tid] = 0.00000;
s_w_GQ[tid] = 0.00000;
for (int i = 0; i < NENv; i++) {
const int iSv = tid*NENv+i;
s_u_GQ[tid] += s_Sv[iSv] * s_u[i];
s_v_GQ[tid] += s_Sv[iSv] * s_v[i];
s_w_GQ[tid] += s_Sv[iSv] * s_w[i];
}
}
//// CONTROL - NOT COMPLETE
//if (ebid == 0) {
//if (tid < NGP) {
//// Above calculated u0 and v0 values are at the nodes. However in GQ
//// integration we need them at GQ points.
//for (int j = 0; j < NENv; j++) {
//for (int i = 0; i < NENv; i++) {
//const int iSv = tid*NENv+i;
//const int iuGQ = tid*NENv+j;
//s_u_GQ[iuGQ] += s_Sv[iSv] * s_u[i];
//s_v_GQ[iuGQ] += s_Sv[iSv] * s_v[i];
//s_w_GQ[iuGQ] += s_Sv[iSv] * s_w[i];
//}
//}
//}
//}
////===========================================
// Calculate elemental stiffnes matrix
if (tid < NENv) {
for (int k = 0; k < NGP; k++) {
const double GQfactorThread = GQfactor[ebid*NGP+k];
for (int i = 0; i < NENv; i++) {
const int iGDSv = k*NENv*3+tid*3;
const int iAe = i*NENv+tid;
const int iSv = k*NENv+i;
s_Ae[iAe] += (s_u_GQ[k] * s_gDSv[iGDSv] +
s_v_GQ[k] * s_gDSv[iGDSv+1] +
s_w_GQ[k] * s_gDSv[iGDSv+2]) * s_Sv[iSv] * GQfactorThread;
}
}
}
// Calculate elemental R1 (right hand side vector), R1e
if (tid < NENv) {
for (int i = 0; i < NENv; i++) {
const int iAe = tid*NENv+i;
s_R1e[tid] += s_Ae[iAe] * s_u_prev[i];
s_R1e[tid+NENv] += s_Ae[iAe] * s_v_prev[i];
s_R1e[tid+2*NENv] += s_Ae[iAe] * s_w_prev[i];
}
}
if (tid < NENv) {
const int iLtoGu = LtoGvel[tid + ebid*NENv*3];
const int iLtoGv = LtoGvel[tid + ebid*NENv*3 + NENv];
const int iLtoGw = LtoGvel[tid + ebid*NENv*3 + NENv*2];
R1[iLtoGu] -= s_R1e[tid];
R1[iLtoGv] -= s_R1e[tid+NENv];
R1[iLtoGw] -= s_R1e[tid+2*NENv];
}
//if (ebid == 0) {
//if (tid < NENv) {
//for (int i = 0; i < NENv; i++) {
//const int iAe = i*NENv+tid;
//Ae_control[iAe] = s_Ae[iAe];
//}
//}
//}
} // End of function calcAndAssembleMatrixA()
//========================================================================
void calculateMatrixAGPU()
//========================================================================
{
// Calculate Ae and multiply them with Ue.
//int offset_gDSv_1d_d;
//int offset_GQfactor_1d_d;
//int offset_SparseMapM_1d_d;
int offsetElements = 0;
int nBlocksColor;
int nThreadsPerBlock = 32;
//cudaMemset(A_d, 0, sparseM_NNZ/3*sizeof(double));
cudaMemset(R1_d, 0, 3*NN*sizeof(double));
for (int color = 0; color < nActiveColors; color++) {
nBlocksColor = NmeshColors[color];
//cout << endl;
//cout << "offset_" << NmeshColors[color] << " = " << offsetElements << endl;
calcAndAssembleMatrixA<<<nBlocksColor,nThreadsPerBlock>>>(NE, NENv, NGP, NN, offsetElements,
elementsOfColor_d,
LtoGvel_1d_d,
Un_d, UnpHalf_prev_d,
Sv_1d_d, gDSv_1d_d, GQfactor_1d_d,
R1_d);
offsetElements += NmeshColors[color];
}
} // End of function calculateMatrixAGPU()
//========================================================================
void step1GPUpart(int iter)
//========================================================================
{
// CUSPARSE Reference: docs.nvidia.com/cuda/cusparse/index.html#appendix-b-cusparse-library-c---example
// Calculate the RHS vector of step 1.
// R1 = - K * UnpHalf_prev - A * UnpHalf_prev - G * Pn;
int NNZM = sparseM_NNZ / 3;
int NNZG = sparseG_NNZ / 3;
double alpha = -1.000000000000000;
double beta = 1.000000000000000;
cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, NN, NN, NNZM, &alpha, descr, K_d, MrowStarts_d, Mcol_d, UnpHalf_prev_d, &beta, R1_d); // Part of (- K * UnpHalf_prev)
cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, NN, NN, NNZM, &alpha, descr, K_d, MrowStarts_d, Mcol_d, UnpHalf_prev_d + NN, &beta, R1_d + NN); // Part of (- K * UnpHalf_prev)
cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, NN, NN, NNZM, &alpha, descr, K_d, MrowStarts_d, Mcol_d, UnpHalf_prev_d + 2*NN, &beta, R1_d + 2*NN); // Part of (- K * UnpHalf_prev)
cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, NN, NNp, NNZG, &alpha, descr, G1_d, GrowStarts_d, Gcol_d, Pn_d, &beta, R1_d); // Part of (- G1 * Pn)
cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, NN, NNp, NNZG, &alpha, descr, G2_d, GrowStarts_d, Gcol_d, Pn_d, &beta, R1_d + NN); // Part of (- G1 * Pn)
cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, NN, NNp, NNZG, &alpha, descr, G3_d, GrowStarts_d, Gcol_d, Pn_d, &beta, R1_d + 2*NN); // Part of (- G1 * Pn)
applyVelBC<<<NBLOCKS,NTHREADS>>>(BCnVelNodes, NN, BCvelNodes_d, R1_d);
// Calculate UnpHalf
calculate_UnpHalf<<<NBLOCKS,NTHREADS>>>(3*NN, dt, UnpHalf_d, Un_d, R1_d, MdInv_d);
} // End of function step1GPUpart()
//========================================================================
void step2GPU(int iter)
//========================================================================
{
// Executes step 2 of the method to determine pressure of the new time step.
// Calculate the RHS vector of step 2.
// This is 1/(dt*dt) times of the residual defined in Blasco's paper.
// R2 = Gt * (UnpHalf / (dt*dt) - MdOrigInv * K * Acc_prev)
double *dummy_d; // This will store (UnpHalf / (dt*dt) - MdOrigInv * K * Acc_prev) array.
cudaStatus = cudaMalloc((void**)&dummy_d, 3*NN * sizeof(double)); if(cudaStatus != cudaSuccess) { printf("Error102: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
double oneOverdt2 = 1.0000000000000000 / (dt*dt);
if (iter == 1) {
calculate_step2dummyV1<<<NBLOCKS,NTHREADS>>>(3*NN, oneOverdt2, dummy_d, UnpHalf_d);
} else {
calculate_step2dummyV2<<<NBLOCKS,NTHREADS>>>(3*NN, oneOverdt2, dummy_d, UnpHalf_d, MdOrigInv_d, KtimesAcc_prev_d);
}
// Multiply Gt with the previously calculated dummy arrays.
int NNZG = sparseG_NNZ / 3;
double alpha = 1.000000000000000;
double beta = 0.000000000000000;
cusparseDcsrmv(handle, CUSPARSE_OPERATION_TRANSPOSE, NN, NNp, NNZG, &alpha, descr, G1_d, GrowStarts_d, Gcol_d, dummy_d, &beta, R2_d); // 1st part of [Gt] * {dummy}
beta = 1.000000000000000;
cusparseDcsrmv(handle, CUSPARSE_OPERATION_TRANSPOSE, NN, NNp, NNZG, &alpha, descr, G2_d, GrowStarts_d, Gcol_d, dummy_d + NN, &beta, R2_d); // 2nd part of [Gt] * {dummy}
cusparseDcsrmv(handle, CUSPARSE_OPERATION_TRANSPOSE, NN, NNp, NNZG, &alpha, descr, G3_d, GrowStarts_d, Gcol_d, dummy_d + 2*NN, &beta, R2_d); // 3rd part of [Gt] * {dummy}
applyPresBC<<<1,1>>>(zeroPressureNode, R2_d);
// Use CUSP's CG solver to solve [Z] {Pdot}= {R2} system
CUSP_CG_solver(); // Calculate Pdot
calculate_Pnp1<<<NBLOCKS,NTHREADS>>>(NNp, dt, Pnp1_d, Pn_d, Pdot_d); // Pnp1 = Pn + dt * Pdot // TODO: Use CUBLAS function
cudaFree(dummy_d);
} // End of function step2GPU()
//========================================================================
void step3GPU(int iter)
//========================================================================
{
// Executes step 3 of the method to determine the velocity of the new time step.
// Calculate the RHS vector of step 3.
// R3 = - dt * (G * Pdot + K * Acc_prev)
int NNZG = sparseG_NNZ / 3;
double alpha = -dt;
double beta = 0.000000000000000; // TODO: Use a single Rvel instead of (R1 and R3)
cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, NN, NNp, NNZG, &alpha, descr, G1_d, GrowStarts_d, Gcol_d, Pdot_d, &beta, R3_d); // This contributes to (- dt * G1 * Pdot) part of R3
cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, NN, NNp, NNZG, &alpha, descr, G2_d, GrowStarts_d, Gcol_d, Pdot_d, &beta, R3_d + NN); // This contributes to (- dt * G2 * Pdot) part of R3
cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, NN, NNp, NNZG, &alpha, descr, G3_d, GrowStarts_d, Gcol_d, Pdot_d, &beta, R3_d + 2*NN); // This contributes to (- dt * G3 * Pdot) part of R3
// Subtract dt * KtimesAcc_prev from R3 if iter is not 1.
if (iter != 1) {
calculate_R3<<<NBLOCKS,NTHREADS>>>(3*NN, dt, R3_d, KtimesAcc_prev_d);
}
applyVelBC<<<NBLOCKS,NTHREADS>>>(BCnVelNodes, NN, BCvelNodes_d, R3_d);
// Calculate Acc (Acc = R3 * MdInv)
multiplyVectors<<<NBLOCKS,NTHREADS>>>(3*NN, Acc_d, R3_d, MdInv_d); // TODO: Use CUBLAS function
// Calculate Unp1 (Unp1 = UnpHalf + dt * Acc)
calculate_Unp1<<<NBLOCKS,NTHREADS>>>(3*NN, dt, Unp1_d, UnpHalf_d, Acc_d); // TODO: Use CUBLAS function
//cudaStatus = cudaMemcpy(Unp1, Unp1_d, 3*NN * sizeof(double), cudaMemcpyDeviceToHost); if(cudaStatus != cudaSuccess) { printf("Error107: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
} // End of function step3GPU()
//========================================================================
void calculate_KtimesAcc_prevGPU(void)
//========================================================================
{
int NNZM = sparseM_NNZ / 3;
double alpha = 1.000000000000000;
double beta = 0.000000000000000;
cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, NN, NN, NNZM, &alpha, descr, K_d, MrowStarts_d, Mcol_d, Acc_prev_d, &beta, KtimesAcc_prev_d); // 1st part of [K] * {Acc_prev}
cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, NN, NN, NNZM, &alpha, descr, K_d, MrowStarts_d, Mcol_d, Acc_prev_d + NN, &beta, KtimesAcc_prev_d + NN); // 2nd part of [K] * {Acc_prev}
cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, NN, NN, NNZM, &alpha, descr, K_d, MrowStarts_d, Mcol_d, Acc_prev_d + 2*NN, &beta, KtimesAcc_prev_d + 2*NN); // 3rd part of [K] * {Acc_prev}
} // End of function calculate_KtimesAcc_prevGPU()
//========================================================================
bool checkConvergenceGPU(void)
//========================================================================
{
double norm1, norm2, normalizedNorm1, normalizedNorm2;
// Calculate normalized norm for velocity
cublasDnrm2(handleCUBLAS, 3*NN, Unp1_d, 1, &norm1); // norm1 = sqrt(sum(Unp1(i)*Unp1(i)))
double *dummy_d;
cudaStatus = cudaMalloc((void**)&dummy_d, 3*NN * sizeof(double)); if(cudaStatus != cudaSuccess) { printf("Error108: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; } // dummy_d will store (Unp1_d - Unp1_prev_d)
subtractVectors<<<NBLOCKS,NTHREADS>>>(3*NN, dummy_d, Unp1_d, Unp1_prev_d);
cublasDnrm2(handleCUBLAS, 3*NN, dummy_d, 1, &norm2); // norm2 = sqrt(sum((Unp1(i)-Unp_prev(i))*(Unp1(i)-Unp_prev(i))))
normalizedNorm1 = norm2 / norm1; // Normalized norm for velocity
cudaFree(dummy_d);
// Calculate normalized norm for pressure
cublasDnrm2(handleCUBLAS, NNp, Pnp1_d, 1, &norm1); // norm1 = sqrt(sum(Pnp1(i)*Pnp1(i)))
cudaStatus = cudaMalloc((void**)&dummy_d, NNp * sizeof(double)); if(cudaStatus != cudaSuccess) { printf("Error109: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; } // dummy_d will now store (Pnp1_d - Pnp1_prev_d)
subtractVectors<<<NBLOCKS,NTHREADS>>>(NNp, dummy_d, Pnp1_d, Pnp1_prev_d);
cublasDnrm2(handleCUBLAS, NNp, dummy_d, 1, &norm2); // norm2 = sqrt(sum((Pnp1(i)-Pnp_prev(i))*(Pnp1(i)-Pnp_prev(i))))
normalizedNorm2 = norm2 / norm1; // Normalized norm for pressure
cudaFree(dummy_d);
// Check convergence and get ready for the next iteration
if (normalizedNorm1 < tolerance && normalizedNorm2 < tolerance) {
return 1;
} else {
return 0;
}
} // End of function checkConvergenceGPU()
//========================================================================
void printMonitorDataGPU(int iter)
//========================================================================
{
// TODO: Avoid the following device-to-host copies and print monitor data using device variables.
//double *monitorData_d;
//cudaMalloc((void**)&monitorData_d, 4 * sizeof(double));
//getMonitorData<<<1,1>>>(monPoint, NN, Un_d, Pn_d, monitorData_d);
//double *monitorData;
//monitorData = new double[4];
cudaStatus = cudaMemcpy(Un, Un_d, 3*NN * sizeof(double), cudaMemcpyDeviceToHost); if(cudaStatus != cudaSuccess) { printf("Error110: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
cudaStatus = cudaMemcpy(Pn, Pn_d, NNp * sizeof(double), cudaMemcpyDeviceToHost); if(cudaStatus != cudaSuccess) { printf("Error111: %s\n", cudaGetErrorString(cudaStatus)); cin >> dummyUserInput; }
printf("%6d %6d %10.5f %12.5f %12.5f %12.5f %12.5f %12.5f %12.5f\n",
timeN, iter, timeT, Un[monPoint], Un[monPoint + NN], Un[monPoint + 2*NN], Pn[monPoint], wallClockTimeCurrentTimeStep, maxAcc);
} // End of function printMonitorDataGPU()
//========================================================================
bool checkConvergenceInTimeGPU(void)
//========================================================================
{
double oneOverdt = 1.0000000000000000 / dt;
// wrap raw pointer with a device_ptr
thrust::device_ptr<double> thrust_Un_dbeg(Un_d);
thrust::device_ptr<double> thrust_Un_dend = thrust_Un_dbeg + 3*NN;
thrust::device_ptr<double> thrust_Unp1_dbeg(Unp1_d);
thrust::device_ptr<double> thrust_Unp1_dend = thrust_Unp1_dbeg + 3*NN;
// do the reduction
maxAcc = transform_reduce(make_zip_iterator(make_tuple(thrust_Unp1_dbeg, thrust_Un_dbeg)),
make_zip_iterator(make_tuple(thrust_Unp1_dend, thrust_Un_dend)),
weighted_absolute_difference(oneOverdt),
-1.f,
maximum<double>());
// Check convergence
if (maxAcc > convergenceCriteria) {
return 0;
} else {
return 1;
}
} // End of function checkConvergenceInTimeGPU()
#endif //USECUDA
|
19cba347855b88d6cea98f61160e962332ff222d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include "dropout_layer.h"
#include "hip/hip_runtime.h"
#include "utils.h"
}
__global__ void yoloswag420blazeit360noscope(float *input, int size, float *rand, float prob, float scale) {
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id < size) input[id] = (rand[id] < prob) ? 0 : input[id]*scale;
}
void forward_dropout_layer_gpu(dropout_layer layer, network net) {
if (!net.train) return;
int size = layer.inputs*layer.batch;
cuda_random(layer.rand_gpu, size);
/*
int i;
for(i = 0; i < size; ++i){
layer.rand[i] = rand_uniform();
}
cuda_push_array(layer.rand_gpu, layer.rand, size);
*/
hipLaunchKernelGGL(( yoloswag420blazeit360noscope), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, net.input_gpu, size, layer.rand_gpu, layer.probability, layer.scale);
check_error(hipPeekAtLastError());
}
void backward_dropout_layer_gpu(dropout_layer layer, network net) {
if(!net.delta_gpu) return;
int size = layer.inputs*layer.batch;
hipLaunchKernelGGL(( yoloswag420blazeit360noscope), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, net.delta_gpu, size, layer.rand_gpu, layer.probability, layer.scale);
check_error(hipPeekAtLastError());
}
| 19cba347855b88d6cea98f61160e962332ff222d.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include "dropout_layer.h"
#include "cuda.h"
#include "utils.h"
}
__global__ void yoloswag420blazeit360noscope(float *input, int size, float *rand, float prob, float scale) {
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id < size) input[id] = (rand[id] < prob) ? 0 : input[id]*scale;
}
void forward_dropout_layer_gpu(dropout_layer layer, network net) {
if (!net.train) return;
int size = layer.inputs*layer.batch;
cuda_random(layer.rand_gpu, size);
/*
int i;
for(i = 0; i < size; ++i){
layer.rand[i] = rand_uniform();
}
cuda_push_array(layer.rand_gpu, layer.rand, size);
*/
yoloswag420blazeit360noscope<<<cuda_gridsize(size), BLOCK>>>(net.input_gpu, size, layer.rand_gpu, layer.probability, layer.scale);
check_error(cudaPeekAtLastError());
}
void backward_dropout_layer_gpu(dropout_layer layer, network net) {
if(!net.delta_gpu) return;
int size = layer.inputs*layer.batch;
yoloswag420blazeit360noscope<<<cuda_gridsize(size), BLOCK>>>(net.delta_gpu, size, layer.rand_gpu, layer.probability, layer.scale);
check_error(cudaPeekAtLastError());
}
|
7f4a47218eb07740d6666afdf45193e5f5b590c9.hip | // !!! This is a file automatically generated by hipify!!!
// a cuda app. we will convert this to opencl, and run it :-)
#include <iostream>
#include <memory>
#include <cassert>
using namespace std;
#include <hip/hip_runtime.h>
__global__ void setValue(float *data, int idx, float value) {
if(threadIdx.x == 0) {
data[idx] = value;
}
}
int main(int argc, char *argv[]) {
int N = 1024;
float *gpuFloats;
hipMalloc((void**)(&gpuFloats), N * sizeof(float));
hipLaunchKernelGGL(( setValue), dim3(dim3(32, 1, 1)), dim3(dim3(32, 1, 1)), 0, 0, gpuFloats, 2, 123.0f);
float hostFloats[4];
hipMemcpy(hostFloats, gpuFloats, 4 * sizeof(float), hipMemcpyDeviceToHost);
cout << "hostFloats[2] " << hostFloats[2] << endl;
assert(hostFloats[2] == 123.0f);
hipLaunchKernelGGL(( setValue), dim3(dim3(32, 1, 1)), dim3(dim3(32, 1, 1)), 0, 0, gpuFloats, 2, 222.0f);
hipMemcpy(hostFloats, gpuFloats, 4 * sizeof(float), hipMemcpyDeviceToHost);
cout << "hostFloats[2] " << hostFloats[2] << endl;
assert(hostFloats[2] == 222.0f);
hostFloats[2] = 444.0f;
hipMemcpy(gpuFloats, hostFloats, 4 * sizeof(float), hipMemcpyHostToDevice);
hostFloats[2] = 555.0f;
hipMemcpy(hostFloats, gpuFloats, 4 * sizeof(float), hipMemcpyDeviceToHost);
cout << "hostFloats[2] " << hostFloats[2] << endl;
assert(hostFloats[2] == 444.0f);
hipFree(gpuFloats);
return 0;
}
| 7f4a47218eb07740d6666afdf45193e5f5b590c9.cu | // a cuda app. we will convert this to opencl, and run it :-)
#include <iostream>
#include <memory>
#include <cassert>
using namespace std;
#include <cuda_runtime.h>
__global__ void setValue(float *data, int idx, float value) {
if(threadIdx.x == 0) {
data[idx] = value;
}
}
int main(int argc, char *argv[]) {
int N = 1024;
float *gpuFloats;
cudaMalloc((void**)(&gpuFloats), N * sizeof(float));
setValue<<<dim3(32, 1, 1), dim3(32, 1, 1)>>>(gpuFloats, 2, 123.0f);
float hostFloats[4];
cudaMemcpy(hostFloats, gpuFloats, 4 * sizeof(float), cudaMemcpyDeviceToHost);
cout << "hostFloats[2] " << hostFloats[2] << endl;
assert(hostFloats[2] == 123.0f);
setValue<<<dim3(32, 1, 1), dim3(32, 1, 1)>>>(gpuFloats, 2, 222.0f);
cudaMemcpy(hostFloats, gpuFloats, 4 * sizeof(float), cudaMemcpyDeviceToHost);
cout << "hostFloats[2] " << hostFloats[2] << endl;
assert(hostFloats[2] == 222.0f);
hostFloats[2] = 444.0f;
cudaMemcpy(gpuFloats, hostFloats, 4 * sizeof(float), cudaMemcpyHostToDevice);
hostFloats[2] = 555.0f;
cudaMemcpy(hostFloats, gpuFloats, 4 * sizeof(float), cudaMemcpyDeviceToHost);
cout << "hostFloats[2] " << hostFloats[2] << endl;
assert(hostFloats[2] == 444.0f);
cudaFree(gpuFloats);
return 0;
}
|
7ee49a067c8467a150bd1564810ff44e5d4793f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define BLOCKSIZE 32
//Sigmoid function for logistic regression
float sigmoid(float in){
return 1.0 / (1 + exp(-1 * in));
}
//Tiled version of matrix multiply
__global__ void MatrixMultiplyKernel(float *devA, float *devB, float *devC, int rows, int cols, int k, float alpha, float beta)
{
//Get the thread's x and y locations for its run
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
//Allocate shared memory to hold parts of A and B
__shared__ float tileA[BLOCKSIZE][BLOCKSIZE];
__shared__ float tileB[BLOCKSIZE][BLOCKSIZE];
//Use sum to get the result for a specific element
float sum = 0.0;
//Use iter to see if the loop should be run again
int iter = 0;
do{
//Check if the x thread falls within bounds of the matrices
if ((idy < rows) && (threadIdx.x + BLOCKSIZE*iter < k)){
tileA[threadIdx.y][threadIdx.x] = devA[threadIdx.x + idy*k + BLOCKSIZE*iter];
}
else {
tileA[threadIdx.y][threadIdx.x] = 0.0;
}
//Check if the y thread falls within bounds of the matrices
if ((threadIdx.y + BLOCKSIZE*iter < k) && (idx < cols)){
tileB[threadIdx.y][threadIdx.x] = devB[idx + (threadIdx.y + BLOCKSIZE*iter)*cols];
}
else {
tileB[threadIdx.y][threadIdx.x] = 0.0;
}
//Sync to ensure that all of the data has been grabbed for the tiles in this warp
__syncthreads();
//Sum the elements related to the element in C corresponding to idx and idy
for (int i = 0; i < BLOCKSIZE; i++){
sum += tileA[threadIdx.y][i] * tileB[i][threadIdx.x];
}
//Iterate the number done
iter++;
//Sync the threads again to ensure they have all done their work before going through the loop to get data
__syncthreads();
//Check if the tiles have covered all of C
} while (BLOCKSIZE*iter < k);
//If the thread falls within the matrix C, fill in its element, scaled by alpha and beta
if ((idy < rows) && (idx < cols)){
devC[idx + idy*cols] = sum * alpha + devC[idx + idy*cols] * beta;
}
}
__global__ void distKernel(float *devA, float *devB, float *devC, int K)
{
int idy = threadIdx.y + blockIdx.y * blockDim.y;
if ((idy < K)){
devC[idy] = (devA[idy] - devB[idy])*(devA[idy] - devB[idy]);
}
}
//Element wise subtraction of matrix A and B, stored in matrix C
__global__ void sub_sigKernel(float *A, float *B, float *C, int rows)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
//Ensure the thread is in bounds
if (i < rows){
C[i] = (1.0 / (1 + exp(-1 * B[i])));
C[i] = A[i] - C[i];
}
}
| 7ee49a067c8467a150bd1564810ff44e5d4793f3.cu | #include <stdio.h>
#define BLOCKSIZE 32
//Sigmoid function for logistic regression
float sigmoid(float in){
return 1.0 / (1 + exp(-1 * in));
}
//Tiled version of matrix multiply
__global__ void MatrixMultiplyKernel(float *devA, float *devB, float *devC, int rows, int cols, int k, float alpha, float beta)
{
//Get the thread's x and y locations for its run
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
//Allocate shared memory to hold parts of A and B
__shared__ float tileA[BLOCKSIZE][BLOCKSIZE];
__shared__ float tileB[BLOCKSIZE][BLOCKSIZE];
//Use sum to get the result for a specific element
float sum = 0.0;
//Use iter to see if the loop should be run again
int iter = 0;
do{
//Check if the x thread falls within bounds of the matrices
if ((idy < rows) && (threadIdx.x + BLOCKSIZE*iter < k)){
tileA[threadIdx.y][threadIdx.x] = devA[threadIdx.x + idy*k + BLOCKSIZE*iter];
}
else {
tileA[threadIdx.y][threadIdx.x] = 0.0;
}
//Check if the y thread falls within bounds of the matrices
if ((threadIdx.y + BLOCKSIZE*iter < k) && (idx < cols)){
tileB[threadIdx.y][threadIdx.x] = devB[idx + (threadIdx.y + BLOCKSIZE*iter)*cols];
}
else {
tileB[threadIdx.y][threadIdx.x] = 0.0;
}
//Sync to ensure that all of the data has been grabbed for the tiles in this warp
__syncthreads();
//Sum the elements related to the element in C corresponding to idx and idy
for (int i = 0; i < BLOCKSIZE; i++){
sum += tileA[threadIdx.y][i] * tileB[i][threadIdx.x];
}
//Iterate the number done
iter++;
//Sync the threads again to ensure they have all done their work before going through the loop to get data
__syncthreads();
//Check if the tiles have covered all of C
} while (BLOCKSIZE*iter < k);
//If the thread falls within the matrix C, fill in its element, scaled by alpha and beta
if ((idy < rows) && (idx < cols)){
devC[idx + idy*cols] = sum * alpha + devC[idx + idy*cols] * beta;
}
}
__global__ void distKernel(float *devA, float *devB, float *devC, int K)
{
int idy = threadIdx.y + blockIdx.y * blockDim.y;
if ((idy < K)){
devC[idy] = (devA[idy] - devB[idy])*(devA[idy] - devB[idy]);
}
}
//Element wise subtraction of matrix A and B, stored in matrix C
__global__ void sub_sigKernel(float *A, float *B, float *C, int rows)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
//Ensure the thread is in bounds
if (i < rows){
C[i] = (1.0 / (1 + exp(-1 * B[i])));
C[i] = A[i] - C[i];
}
}
|
d94bd954dc94e56a1478173a0be6d9a9da1f83c5.hip | // !!! This is a file automatically generated by hipify!!!
/* ------------------------------------------------------------------------ */
/* Copyright 2018, IBM Corp. */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); */
/* you may not use this file except in compliance with the License. */
/* You may obtain a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* ------------------------------------------------------------------------ */
#ifndef SD_TEMPLATE_FILE
#define SD_TEMPLATE_FILE "core/gpu/template_steps.cu"
#include "solid.h"
#include "solid/base/generic/dtype_macros.h"
#include "solid/base/generic/scalar.h"
#include "solid/base/generic/dtype_assign.h"
#include "solid/core/gpu/apply_elemwise1.h"
#include "solid/base/gpu/dtype_gpu.h"
#include "solid/base/generic/generate_all_types.h"
#else
/* Create the cuda kernels */
#if SDTYPE_IS_REAL(SDXTYPE)
SOLID_KERNELS_ELEMWISE1_PARAM(UNROLL, steps_int64,
{ solid_int64 offset; solid_int64 step; },
{ *_ptr = SOLID_FROM_ELEMWORKTYPE(param.offset + _index * param.step); })
SOLID_KERNELS_ELEMWISE1_PARAM(UNROLL, steps_double,
{ solid_double offset; solid_double step; },
{ *_ptr = SOLID_FROM_ELEMWORKTYPE(param.offset + _index * param.step); })
#else
SOLID_KERNELS_ELEMWISE1_PARAM(UNROLL, steps_int64,
{ solid_int64 offset; solid_int64 step; },
{ _ptr -> real = SOLID_FROM_ELEMWORKTYPE(param.offset + _index * param.step);
_ptr -> imag = SOLID_FROM_ELEMWORKTYPE(0);
})
SOLID_KERNELS_ELEMWISE1_PARAM(UNROLL, steps_double,
{ solid_double offset; solid_double step; },
{ _ptr -> real = SOLID_FROM_ELEMWORKTYPE(param.offset + _index * param.step);
_ptr -> imag = SOLID_FROM_ELEMWORKTYPE(0);
})
SOLID_KERNELS_ELEMWISE1_PARAM(UNROLL, steps_cdouble,
{ solid_cdouble offset; solid_cdouble step; },
{ _ptr -> real = SOLID_FROM_ELEMWORKTYPE(param.offset.real + _index * param.step.real);
_ptr -> imag = SOLID_FROM_ELEMWORKTYPE(param.offset.imag + _index * param.step.imag);
})
#endif
/* -------------------------------------------------------------------- */
SOLID_API int SOLID_FUNCTION(steps_int64)(int ndims, const size_t *size,
const ptrdiff_t *strides, void *ptr,
solid_int64 offset, solid_int64 step,
hipStream_t stream)
/* -------------------------------------------------------------------- */
{ SOLID_KERNEL_PARAM(steps_int64) param;
int result = 0;
/* Set user parameters*/
param.offset = offset;
param.step = step;
/* Set up and launch the appropriate kernel */
SOLID_LAUNCH_ELEMWISE1_PARAM(UNROLL, steps_int64, 0, stream, param, result);
return result;
}
/* -------------------------------------------------------------------- */
SOLID_API int SOLID_FUNCTION(steps_double)(int ndims, const size_t *size,
const ptrdiff_t *strides, void *ptr,
solid_double offset, solid_double step,
hipStream_t stream)
/* -------------------------------------------------------------------- */
{ SOLID_KERNEL_PARAM(steps_double) param;
int result = 0;
/* Set user parameters*/
param.offset = offset;
param.step = step;
/* Set up and launch the appropriate kernel */
SOLID_LAUNCH_ELEMWISE1_PARAM(UNROLL, steps_double, 0, stream, param, result);
return result;
}
#if SDTYPE_IS_COMPLEX(SDXTYPE)
/* -------------------------------------------------------------------- */
SOLID_API int SOLID_FUNCTION(steps_cdouble)(int ndims, const size_t *size,
const ptrdiff_t *strides, void *ptr,
solid_cdouble offset, solid_cdouble step,
hipStream_t stream)
/* -------------------------------------------------------------------- */
{ SOLID_KERNEL_PARAM(steps_cdouble) param;
int result = 0;
/* Set user parameters*/
param.offset = offset;
param.step = step;
/* Set up and launch the appropriate kernel */
SOLID_LAUNCH_ELEMWISE1_PARAM(UNROLL, steps_cdouble, 0, stream, param, result);
return result;
}
#endif
#endif
| d94bd954dc94e56a1478173a0be6d9a9da1f83c5.cu | /* ------------------------------------------------------------------------ */
/* Copyright 2018, IBM Corp. */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); */
/* you may not use this file except in compliance with the License. */
/* You may obtain a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* ------------------------------------------------------------------------ */
#ifndef SD_TEMPLATE_FILE
#define SD_TEMPLATE_FILE "core/gpu/template_steps.cu"
#include "solid.h"
#include "solid/base/generic/dtype_macros.h"
#include "solid/base/generic/scalar.h"
#include "solid/base/generic/dtype_assign.h"
#include "solid/core/gpu/apply_elemwise1.h"
#include "solid/base/gpu/dtype_gpu.h"
#include "solid/base/generic/generate_all_types.h"
#else
/* Create the cuda kernels */
#if SDTYPE_IS_REAL(SDXTYPE)
SOLID_KERNELS_ELEMWISE1_PARAM(UNROLL, steps_int64,
{ solid_int64 offset; solid_int64 step; },
{ *_ptr = SOLID_FROM_ELEMWORKTYPE(param.offset + _index * param.step); })
SOLID_KERNELS_ELEMWISE1_PARAM(UNROLL, steps_double,
{ solid_double offset; solid_double step; },
{ *_ptr = SOLID_FROM_ELEMWORKTYPE(param.offset + _index * param.step); })
#else
SOLID_KERNELS_ELEMWISE1_PARAM(UNROLL, steps_int64,
{ solid_int64 offset; solid_int64 step; },
{ _ptr -> real = SOLID_FROM_ELEMWORKTYPE(param.offset + _index * param.step);
_ptr -> imag = SOLID_FROM_ELEMWORKTYPE(0);
})
SOLID_KERNELS_ELEMWISE1_PARAM(UNROLL, steps_double,
{ solid_double offset; solid_double step; },
{ _ptr -> real = SOLID_FROM_ELEMWORKTYPE(param.offset + _index * param.step);
_ptr -> imag = SOLID_FROM_ELEMWORKTYPE(0);
})
SOLID_KERNELS_ELEMWISE1_PARAM(UNROLL, steps_cdouble,
{ solid_cdouble offset; solid_cdouble step; },
{ _ptr -> real = SOLID_FROM_ELEMWORKTYPE(param.offset.real + _index * param.step.real);
_ptr -> imag = SOLID_FROM_ELEMWORKTYPE(param.offset.imag + _index * param.step.imag);
})
#endif
/* -------------------------------------------------------------------- */
SOLID_API int SOLID_FUNCTION(steps_int64)(int ndims, const size_t *size,
const ptrdiff_t *strides, void *ptr,
solid_int64 offset, solid_int64 step,
cudaStream_t stream)
/* -------------------------------------------------------------------- */
{ SOLID_KERNEL_PARAM(steps_int64) param;
int result = 0;
/* Set user parameters*/
param.offset = offset;
param.step = step;
/* Set up and launch the appropriate kernel */
SOLID_LAUNCH_ELEMWISE1_PARAM(UNROLL, steps_int64, 0, stream, param, result);
return result;
}
/* -------------------------------------------------------------------- */
SOLID_API int SOLID_FUNCTION(steps_double)(int ndims, const size_t *size,
const ptrdiff_t *strides, void *ptr,
solid_double offset, solid_double step,
cudaStream_t stream)
/* -------------------------------------------------------------------- */
{ SOLID_KERNEL_PARAM(steps_double) param;
int result = 0;
/* Set user parameters*/
param.offset = offset;
param.step = step;
/* Set up and launch the appropriate kernel */
SOLID_LAUNCH_ELEMWISE1_PARAM(UNROLL, steps_double, 0, stream, param, result);
return result;
}
#if SDTYPE_IS_COMPLEX(SDXTYPE)
/* -------------------------------------------------------------------- */
SOLID_API int SOLID_FUNCTION(steps_cdouble)(int ndims, const size_t *size,
const ptrdiff_t *strides, void *ptr,
solid_cdouble offset, solid_cdouble step,
cudaStream_t stream)
/* -------------------------------------------------------------------- */
{ SOLID_KERNEL_PARAM(steps_cdouble) param;
int result = 0;
/* Set user parameters*/
param.offset = offset;
param.step = step;
/* Set up and launch the appropriate kernel */
SOLID_LAUNCH_ELEMWISE1_PARAM(UNROLL, steps_cdouble, 0, stream, param, result);
return result;
}
#endif
#endif
|
f8fe14bb54e91c625dcd738188368e399d162fbd.hip | // !!! This is a file automatically generated by hipify!!!
/*
ssh dduyngo@node009
nvcc -lm -o -level1 convolve.cu
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include "image_template.h"
#include <math.h>
#include<sys/time.h>
//Prints a floating point matrix of given dimensions for logic debugging
void print_matrix(float *image,int width,int height)
{
int i,j;
for(i=0;i<height;i++)
{
printf("\n");
for(j=0;j<width;j++)
printf(" %f",image[i*width+j]);
}
}
//A consolidated function that creates both the Gaussian kernel and the derivative kernel
void create_gaussians(float **gaussian_kernel,float **gaussian_deriv,int k_width,float sigma)
{
int i,j;
float sum=0;
int a=k_width/2;
printf("\n Creating kernels of width:%d and sigma:%f",k_width,sigma);
*gaussian_kernel=(float *)malloc(sizeof(float)*k_width);
*gaussian_deriv=(float *)malloc(sizeof(float)*k_width);
//Create kernel
sum=0;
for(i=0;i<k_width;i++)
{
(*gaussian_kernel)[i]=exp((-1*(i-a)*(i-a))/(2*sigma*sigma));
sum+=(*gaussian_kernel)[i];
}
for(i=0;i<k_width;i++)
(*gaussian_kernel)[i]/=sum;
//Create derivative
sum=0;
for(i=0;i<k_width;i++)
{
(*gaussian_deriv)[i]=-1*(i-a)*exp((-1*(i-a)*(i-a))/(2*sigma*sigma));
sum-=i*((*gaussian_deriv)[i]);
}
for(i=0;i<k_width;i++)
(*gaussian_deriv)[i]/=sum;
}
//A GPU kernel for convolution. Input image can be either int or float BUT the output is always float
__global__
void convolve(float *in_image,int width,int height,float *mask,int mask_width,int mask_height,float *out_image)
{
int i,j,k,m;
float sum;
int offseti,offsetj;
i=blockIdx.x*blockDim.x + threadIdx.x;
j=blockIdx.y*blockDim.y + threadIdx.y;
if(i<height && j <width)
{
sum=0;
for(k=0;k<mask_height;k++)
{
for(m=0;m<mask_width;m++)
{
offseti= -1*(mask_height/2)+k;
offsetj= -1*(mask_width/2)+m;
if(i+offseti >=0 && i+offseti<height && j+offsetj>=0 && j+offsetj<width)
{
sum+=(float)(in_image[(i+offseti)*width+(j+offsetj)])*mask[k*mask_width+m];
}
}
}
out_image[i*width+j]=(float)sum;
}
}
int main(int argc, char **argv)
{
//Declare all of the variable here
float *org_img;
//GPU device buffer for original image
float *d_org_img;
//CPU host buffers for the final output
float *vertical_gradient,*horizontal_gradient;
//GPU buffers for the final result
float *d_vertical_gradient,*d_horizontal_gradient;
//GPU buffers to hold intermediate convolution results
float *d_temp_horizontal,*d_temp_vertical;
//CPU host buffers to store the convolution masks
float *gaussian_kernel,*gaussian_deriv;
//GPU device buffers to store the convolution masks
float *d_gaussian_kernel,*d_gaussian_deriv;
int width,height,k_width;
float sigma,a;
struct timeval start,end;
if(argc!=3)
{
printf("\n The correct argument list is: exec <image file> <Sigma> \n");
exit(0);
}
//obtain the parameters
sigma=atof(argv[2]);
a=ceil((float)(2.5*sigma-0.5));
k_width=2*a+1;
//CPU portion of the code that reads/prepares the input data
read_image_template<float>(argv[1],&org_img,&width,&height);
//Computation starts here
gettimeofday(&start,NULL);
create_gaussians(&gaussian_kernel,&gaussian_deriv,k_width,sigma);
//Allocate for intermediate images
// temp_horizontal=(float *)malloc(sizeof(float)*width*height);
// temp_vertical=(float *)malloc(sizeof(float)*width*height);
horizontal_gradient=(float *)malloc(sizeof(float)*width*height);
vertical_gradient=(float *)malloc(sizeof(float)*width*height);
// CPU host mallocs for GPU buffers
hipMalloc((void **)&d_org_img,sizeof(float)*width*height);
hipMalloc((void **)&d_temp_horizontal,sizeof(float)*width*height);
hipMalloc((void **)&d_temp_vertical,sizeof(float)*width*height);
hipMalloc((void **)&d_horizontal_gradient,sizeof(float)*width*height);
hipMalloc((void **)&d_vertical_gradient,sizeof(float)*width*height);
hipMalloc((void **)&d_gaussian_kernel,sizeof(float)*k_width);
hipMalloc((void **)&d_gaussian_deriv,sizeof(float)*k_width);
//Check kernels
printf("\n The gaussian kernel is:");
print_matrix(gaussian_kernel,k_width,1);
printf("\n The gaussian derivative is:");
print_matrix(gaussian_deriv,k_width,1);
printf("\n");
//Offload all of the data to GPU device for convolution
hipMemcpy(d_org_img,org_img,sizeof(float)*width*height,hipMemcpyHostToDevice);
hipMemcpy(d_gaussian_kernel,gaussian_kernel,sizeof(float)*k_width,hipMemcpyHostToDevice);
hipMemcpy(d_gaussian_deriv,gaussian_deriv,sizeof(float)*k_width,hipMemcpyHostToDevice);
//Horizontal gradient. vertical kernel then horizontal derivative
int block_dim=16;
dim3 dimGrid(ceil(height/block_dim),ceil(width/block_dim),1);
dim3 dimBlock(block_dim,block_dim,1);
hipLaunchKernelGGL(( convolve), dim3(dimGrid),dim3(dimBlock), 0, 0, d_org_img,width,height,d_gaussian_kernel,1,k_width,d_temp_horizontal);
hipLaunchKernelGGL(( convolve), dim3(dimGrid),dim3(dimBlock), 0, 0, d_temp_horizontal,width,height,d_gaussian_deriv,k_width,1,d_horizontal_gradient);
//Vertical gradient. horizontal kernel then vertical derivative
hipLaunchKernelGGL(( convolve), dim3(dimGrid),dim3(dimBlock), 0, 0, d_org_img,width,height,d_gaussian_kernel,k_width,1,d_temp_vertical);
hipLaunchKernelGGL(( convolve), dim3(dimGrid),dim3(dimBlock), 0, 0, d_temp_vertical,width,height,d_gaussian_deriv,1,k_width,d_vertical_gradient);
//GPU to Host transfer of the final result
hipMemcpy(horizontal_gradient,d_horizontal_gradient,sizeof(float)*width*height,hipMemcpyDeviceToHost);
hipMemcpy(vertical_gradient,d_vertical_gradient,sizeof(float)*width*height,hipMemcpyDeviceToHost);
hipDeviceSynchronize();
gettimeofday(&end,NULL);
printf("Execution time in ms: %ld\n", ((end.tv_sec * 1000 + end.tv_usec/1000)
- (start.tv_sec * 1000 + start.tv_usec/1000)));
write_image_template<float>((char *)("horizontal_gradient.pgm"),horizontal_gradient,width,height);
write_image_template<float>((char *)("vertical_gradient.pgm"),vertical_gradient,width,height);
//free variables
free(org_img);
// free(temp_horizontal);
// free(temp_vertical);
free(horizontal_gradient);
free(vertical_gradient);
free(gaussian_kernel);
free(gaussian_deriv);
hipFree(d_org_img);
hipFree(d_gaussian_kernel);
hipFree(d_gaussian_deriv);
hipFree(d_temp_horizontal);
hipFree(d_temp_vertical);
hipFree(d_vertical_gradient);
hipFree(d_horizontal_gradient);
return 0;
}
| f8fe14bb54e91c625dcd738188368e399d162fbd.cu | /*
ssh dduyngo@node009
nvcc -lm -o -level1 convolve.cu
*/
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include "image_template.h"
#include <math.h>
#include<sys/time.h>
//Prints a floating point matrix of given dimensions for logic debugging
void print_matrix(float *image,int width,int height)
{
int i,j;
for(i=0;i<height;i++)
{
printf("\n");
for(j=0;j<width;j++)
printf(" %f",image[i*width+j]);
}
}
//A consolidated function that creates both the Gaussian kernel and the derivative kernel
void create_gaussians(float **gaussian_kernel,float **gaussian_deriv,int k_width,float sigma)
{
int i,j;
float sum=0;
int a=k_width/2;
printf("\n Creating kernels of width:%d and sigma:%f",k_width,sigma);
*gaussian_kernel=(float *)malloc(sizeof(float)*k_width);
*gaussian_deriv=(float *)malloc(sizeof(float)*k_width);
//Create kernel
sum=0;
for(i=0;i<k_width;i++)
{
(*gaussian_kernel)[i]=exp((-1*(i-a)*(i-a))/(2*sigma*sigma));
sum+=(*gaussian_kernel)[i];
}
for(i=0;i<k_width;i++)
(*gaussian_kernel)[i]/=sum;
//Create derivative
sum=0;
for(i=0;i<k_width;i++)
{
(*gaussian_deriv)[i]=-1*(i-a)*exp((-1*(i-a)*(i-a))/(2*sigma*sigma));
sum-=i*((*gaussian_deriv)[i]);
}
for(i=0;i<k_width;i++)
(*gaussian_deriv)[i]/=sum;
}
//A GPU kernel for convolution. Input image can be either int or float BUT the output is always float
__global__
void convolve(float *in_image,int width,int height,float *mask,int mask_width,int mask_height,float *out_image)
{
int i,j,k,m;
float sum;
int offseti,offsetj;
i=blockIdx.x*blockDim.x + threadIdx.x;
j=blockIdx.y*blockDim.y + threadIdx.y;
if(i<height && j <width)
{
sum=0;
for(k=0;k<mask_height;k++)
{
for(m=0;m<mask_width;m++)
{
offseti= -1*(mask_height/2)+k;
offsetj= -1*(mask_width/2)+m;
if(i+offseti >=0 && i+offseti<height && j+offsetj>=0 && j+offsetj<width)
{
sum+=(float)(in_image[(i+offseti)*width+(j+offsetj)])*mask[k*mask_width+m];
}
}
}
out_image[i*width+j]=(float)sum;
}
}
int main(int argc, char **argv)
{
//Declare all of the variable here
float *org_img;
//GPU device buffer for original image
float *d_org_img;
//CPU host buffers for the final output
float *vertical_gradient,*horizontal_gradient;
//GPU buffers for the final result
float *d_vertical_gradient,*d_horizontal_gradient;
//GPU buffers to hold intermediate convolution results
float *d_temp_horizontal,*d_temp_vertical;
//CPU host buffers to store the convolution masks
float *gaussian_kernel,*gaussian_deriv;
//GPU device buffers to store the convolution masks
float *d_gaussian_kernel,*d_gaussian_deriv;
int width,height,k_width;
float sigma,a;
struct timeval start,end;
if(argc!=3)
{
printf("\n The correct argument list is: exec <image file> <Sigma> \n");
exit(0);
}
//obtain the parameters
sigma=atof(argv[2]);
a=ceil((float)(2.5*sigma-0.5));
k_width=2*a+1;
//CPU portion of the code that reads/prepares the input data
read_image_template<float>(argv[1],&org_img,&width,&height);
//Computation starts here
gettimeofday(&start,NULL);
create_gaussians(&gaussian_kernel,&gaussian_deriv,k_width,sigma);
//Allocate for intermediate images
// temp_horizontal=(float *)malloc(sizeof(float)*width*height);
// temp_vertical=(float *)malloc(sizeof(float)*width*height);
horizontal_gradient=(float *)malloc(sizeof(float)*width*height);
vertical_gradient=(float *)malloc(sizeof(float)*width*height);
// CPU host mallocs for GPU buffers
cudaMalloc((void **)&d_org_img,sizeof(float)*width*height);
cudaMalloc((void **)&d_temp_horizontal,sizeof(float)*width*height);
cudaMalloc((void **)&d_temp_vertical,sizeof(float)*width*height);
cudaMalloc((void **)&d_horizontal_gradient,sizeof(float)*width*height);
cudaMalloc((void **)&d_vertical_gradient,sizeof(float)*width*height);
cudaMalloc((void **)&d_gaussian_kernel,sizeof(float)*k_width);
cudaMalloc((void **)&d_gaussian_deriv,sizeof(float)*k_width);
//Check kernels
printf("\n The gaussian kernel is:");
print_matrix(gaussian_kernel,k_width,1);
printf("\n The gaussian derivative is:");
print_matrix(gaussian_deriv,k_width,1);
printf("\n");
//Offload all of the data to GPU device for convolution
cudaMemcpy(d_org_img,org_img,sizeof(float)*width*height,cudaMemcpyHostToDevice);
cudaMemcpy(d_gaussian_kernel,gaussian_kernel,sizeof(float)*k_width,cudaMemcpyHostToDevice);
cudaMemcpy(d_gaussian_deriv,gaussian_deriv,sizeof(float)*k_width,cudaMemcpyHostToDevice);
//Horizontal gradient. vertical kernel then horizontal derivative
int block_dim=16;
dim3 dimGrid(ceil(height/block_dim),ceil(width/block_dim),1);
dim3 dimBlock(block_dim,block_dim,1);
convolve<<<dimGrid,dimBlock>>>(d_org_img,width,height,d_gaussian_kernel,1,k_width,d_temp_horizontal);
convolve<<<dimGrid,dimBlock>>>(d_temp_horizontal,width,height,d_gaussian_deriv,k_width,1,d_horizontal_gradient);
//Vertical gradient. horizontal kernel then vertical derivative
convolve<<<dimGrid,dimBlock>>>(d_org_img,width,height,d_gaussian_kernel,k_width,1,d_temp_vertical);
convolve<<<dimGrid,dimBlock>>>(d_temp_vertical,width,height,d_gaussian_deriv,1,k_width,d_vertical_gradient);
//GPU to Host transfer of the final result
cudaMemcpy(horizontal_gradient,d_horizontal_gradient,sizeof(float)*width*height,cudaMemcpyDeviceToHost);
cudaMemcpy(vertical_gradient,d_vertical_gradient,sizeof(float)*width*height,cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
gettimeofday(&end,NULL);
printf("Execution time in ms: %ld\n", ((end.tv_sec * 1000 + end.tv_usec/1000)
- (start.tv_sec * 1000 + start.tv_usec/1000)));
write_image_template<float>((char *)("horizontal_gradient.pgm"),horizontal_gradient,width,height);
write_image_template<float>((char *)("vertical_gradient.pgm"),vertical_gradient,width,height);
//free variables
free(org_img);
// free(temp_horizontal);
// free(temp_vertical);
free(horizontal_gradient);
free(vertical_gradient);
free(gaussian_kernel);
free(gaussian_deriv);
cudaFree(d_org_img);
cudaFree(d_gaussian_kernel);
cudaFree(d_gaussian_deriv);
cudaFree(d_temp_horizontal);
cudaFree(d_temp_vertical);
cudaFree(d_vertical_gradient);
cudaFree(d_horizontal_gradient);
return 0;
}
|
19170cb352d0bf52663e5b154788b68a4ac7aea8.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <helper_math.h>
#include <helper_cuda.h>
#include <float.h>
#include "ray.h"
#include "triangle.h"
#include "camera.h"
#define MAX_DEPTH 2
#define SMALLEST_DIST 1e-4
__constant__ Camera cam;
__constant__ int c_image_width;
__constant__ int c_image_height;
__device__ void writePixel(int x, int y, int image_width, float3 color, unsigned char *d_image);
__device__ float3 traceRay(Ray& ray, Triangle *d_objects, int objects_count, PointLight *lights, int lights_count)
{
// Exit if MAX_DEPTH reached.
if (ray.getDepth() > MAX_DEPTH)
{
return make_float3(0.0, 0.0, 0.0);
}
else
{
float tmin = FLT_MAX;
Triangle closestObject;
float t;
// Find closest hit object.
for (int i=0; i<objects_count; i++)
{
t = d_objects[i].intersect(ray);
if (t < tmin && t > SMALLEST_DIST)
{
tmin = t;
closestObject = d_objects[i];
}
}
// Some object was hit.
if (tmin != FLT_MAX)
{
float3 final_color = closestObject.shade(ray, tmin, d_objects, objects_count, lights, lights_count, cam);
if (closestObject.isReflective())
{
Ray reflectedRay = closestObject.getReflectedRay(ray, tmin);
float3 r_color = closestObject.getMaterial().kr * traceRay(reflectedRay, d_objects, objects_count, lights, lights_count);
final_color = final_color + r_color;
}
if (closestObject.isRefractive())
{
Ray refractedRay = closestObject.getRefractedRay(ray, tmin);
float3 r_color = closestObject.getMaterial().kt * traceRay(refractedRay, d_objects, objects_count, lights, lights_count);
final_color = final_color + r_color;
}
return final_color;
}
else
{
return make_float3(0.0, 0.0, 0.0);
}
}
}
__global__ void colorPixel(unsigned char *d_image, Triangle *d_objects, int objects_count, PointLight *lights, int lights_count, long long int *times)
{
long long int start = clock64();
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x >= c_image_width || y >= c_image_height)
return;
float3 ray_dir = cam.get_ray_direction(x, y);
Ray ray(cam.getPosition(), ray_dir);
float3 pixel_color = traceRay(ray, d_objects, objects_count, lights, lights_count);
long long int end = clock64();
long long int duration = end - start;
times[y * c_image_width + x] = duration;
writePixel(x, y, c_image_width, pixel_color, d_image);
}
__device__ void writePixel(int x, int y, int image_width, float3 color, unsigned char *d_image)
{
d_image[(y*image_width + x) * 3 + 0] = (unsigned char) (color.x * 255);
d_image[(y*image_width + x) * 3 + 1] = (unsigned char) (color.y * 255);
d_image[(y*image_width + x) * 3 + 2] = (unsigned char) (color.z * 255);
}
| 19170cb352d0bf52663e5b154788b68a4ac7aea8.cu | #include <cuda_runtime.h>
#include <helper_math.h>
#include <helper_cuda.h>
#include <float.h>
#include "ray.h"
#include "triangle.h"
#include "camera.h"
#define MAX_DEPTH 2
#define SMALLEST_DIST 1e-4
__constant__ Camera cam;
__constant__ int c_image_width;
__constant__ int c_image_height;
__device__ void writePixel(int x, int y, int image_width, float3 color, unsigned char *d_image);
__device__ float3 traceRay(Ray& ray, Triangle *d_objects, int objects_count, PointLight *lights, int lights_count)
{
// Exit if MAX_DEPTH reached.
if (ray.getDepth() > MAX_DEPTH)
{
return make_float3(0.0, 0.0, 0.0);
}
else
{
float tmin = FLT_MAX;
Triangle closestObject;
float t;
// Find closest hit object.
for (int i=0; i<objects_count; i++)
{
t = d_objects[i].intersect(ray);
if (t < tmin && t > SMALLEST_DIST)
{
tmin = t;
closestObject = d_objects[i];
}
}
// Some object was hit.
if (tmin != FLT_MAX)
{
float3 final_color = closestObject.shade(ray, tmin, d_objects, objects_count, lights, lights_count, cam);
if (closestObject.isReflective())
{
Ray reflectedRay = closestObject.getReflectedRay(ray, tmin);
float3 r_color = closestObject.getMaterial().kr * traceRay(reflectedRay, d_objects, objects_count, lights, lights_count);
final_color = final_color + r_color;
}
if (closestObject.isRefractive())
{
Ray refractedRay = closestObject.getRefractedRay(ray, tmin);
float3 r_color = closestObject.getMaterial().kt * traceRay(refractedRay, d_objects, objects_count, lights, lights_count);
final_color = final_color + r_color;
}
return final_color;
}
else
{
return make_float3(0.0, 0.0, 0.0);
}
}
}
__global__ void colorPixel(unsigned char *d_image, Triangle *d_objects, int objects_count, PointLight *lights, int lights_count, long long int *times)
{
long long int start = clock64();
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x >= c_image_width || y >= c_image_height)
return;
float3 ray_dir = cam.get_ray_direction(x, y);
Ray ray(cam.getPosition(), ray_dir);
float3 pixel_color = traceRay(ray, d_objects, objects_count, lights, lights_count);
long long int end = clock64();
long long int duration = end - start;
times[y * c_image_width + x] = duration;
writePixel(x, y, c_image_width, pixel_color, d_image);
}
__device__ void writePixel(int x, int y, int image_width, float3 color, unsigned char *d_image)
{
d_image[(y*image_width + x) * 3 + 0] = (unsigned char) (color.x * 255);
d_image[(y*image_width + x) * 3 + 1] = (unsigned char) (color.y * 255);
d_image[(y*image_width + x) * 3 + 2] = (unsigned char) (color.z * 255);
}
|
b1cb28a1224ea5d788fb12e050d9691953e96475.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cudaDSaturation_backPropagate_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
double *dx = NULL;
hipMalloc(&dx, XSIZE*YSIZE);
unsigned int size = 1;
double threshold = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cudaDSaturation_backPropagate_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, x,dx,size,threshold);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cudaDSaturation_backPropagate_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, x,dx,size,threshold);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cudaDSaturation_backPropagate_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, x,dx,size,threshold);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | b1cb28a1224ea5d788fb12e050d9691953e96475.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cudaDSaturation_backPropagate_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
double *dx = NULL;
cudaMalloc(&dx, XSIZE*YSIZE);
unsigned int size = 1;
double threshold = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cudaDSaturation_backPropagate_kernel<<<gridBlock,threadBlock>>>(x,dx,size,threshold);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cudaDSaturation_backPropagate_kernel<<<gridBlock,threadBlock>>>(x,dx,size,threshold);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cudaDSaturation_backPropagate_kernel<<<gridBlock,threadBlock>>>(x,dx,size,threshold);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
936ecdbfaf945a4ca4a52a86e873decdc7caa3b1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#define n 4
__device__
void dekomposisi(double A[][n], double D[][n]) {
int i, j, k, p, q, stop = 0;
double sum = 0;
for (p = 0; p < n; p++) {
for (j = p; j < n; j++) {
sum = 0;
for (k = 0; k < p; k++) {
sum += D[p][k] * D[k][j];
}
D[p][j] = A[p][j] - sum;
}
q = p;
for (i = q + 1; i < n; i++) {
sum = 0;
for(k = 0; k < q; k++) {
sum += D[i][k] * D[k][q];
}
if (D[q][q] == 0) {
printf("U[%d][%d] == 0\n Tidak bisa dibagi 0...\n", q, q);
stop = 1;
} else {
D[i][q] = (A[i][q] - sum) / D[q][q];
}
if (stop) {
break;
}
}
if (stop) {
break;
}
}
}
__device__
void sulih(double D[][n], double b[n], double x[n], double *y) {
int i, j;
double sum;
for (i = 0; i < n; i++) {
sum = 0;
for (j = 0; j < i; j++) {
sum += y[j] * D[i][j];
}
y[i] = b[i] - sum;
}
for (i = n-1; i >= 0; i--) {
sum = 0;
for (j = i+1; j < n; j++) {
sum += x[j] * D[i][j];
}
x[i] = (y[i] - sum) / D[i][i];
}
free(y);
}
__device__
void print_LU(double D[][n]) {
int i, j;
printf("Dekomposisi\n");
printf("L =\n");
for (i = 0; i < n; i++) {
printf("\t");
for (j = 0; j < i; j++) {
printf("%8.4f ", D[i][j]);
}
printf("%8d\n", 1);
}
printf("U =\n");
for (i = 0; i < n; i++) {
printf("\t");
for (j = 0; j < i; j++) {
printf("%8s ", "");
}
for (j = i; j < n; j++) {
printf("%8.4f ", D[i][j]);
}
printf("\n");
}
}
__device__
void print_x(double x[n]) {
int i;
printf("Solusi\n");
printf("x =\n");
for (i = 0; i < n; i++) {
printf("\t%8.4f\n", x[i]);
}
}
__global__
void crout(double *y) {
double A[][n] = {
{0.31, 0.14, 0.30, 0.27},
{0.26, 0.32, 0.18, 0.24},
{0.61, 0.22, 0.20, 0.31},
{0.40, 0.34, 0.36, 0.17},
// {0.7071, 0, 1, 0, 0.5, 0, 0, 0, 0},
// {0, 1, 0, 0, 0, -1, 0, 0, 0},
// {0, 0, -1, 0, 0, 0, 0, 0, 0},
// {0, 0, 0, 1, 0, 0, 0, 0, -0.7071},
// {0.7071, 0, 0, -1, -0.8660, 0, 0, 0, 0},
// {0, 0, 0, 0, 0, 0, 1, 0, 0.7071},
// {0, 0, 0, 0, -0.5, 0, -1, 0, 0},
// {0, 0, 0, 0, 0.8660, 1, 0, -1, 0},
// {0, 0, 0, 0, 0, 0, 0, 0, 0.7071},
// { 0.866, 0, -0.5, 0, 0, 0},
// { 0, 1, 0.5, 0, 0, 0},
// { 0.5, 0, 0.866, 0, 0, 0},
// {-0.866, -1, 0, -1, 0, 0},
// { -0.5, 0, 0, 0, -1, 0},
// { 0, 0, -0.866, 0, 0, -1},
};
double b[n] = {
1.02,
1.00,
1.34,
1.27,
// -1000,
// 0,
// 0,
// 0,
// 0,
// 500,
// -500,
// 0,
// 0,
// 0,
// 0,
// -1000,
// 0,
// 0,
// 0,
};
double D[][n] = {
{0, 0, 0, 0},
{0, 0, 0, 0},
{0, 0, 0, 0},
{0, 0, 0, 0},
{0, 0, 0, 0},
{0, 0, 0, 0},
};
double x[n] = {
0,
0,
0,
0,
};
dekomposisi(A, D);
print_LU(D);
sulih(D, b, x, y);
print_x(x);
}
int main(int argc, char *argv[] ) {
double *y;
hipMalloc(&y, n*sizeof(double));
hipLaunchKernelGGL(( crout), dim3(1), dim3(1), 0, 0, y);
hipDeviceSynchronize();
return 0;
}
| 936ecdbfaf945a4ca4a52a86e873decdc7caa3b1.cu | #include <stdio.h>
#include <stdlib.h>
#define n 4
__device__
void dekomposisi(double A[][n], double D[][n]) {
int i, j, k, p, q, stop = 0;
double sum = 0;
for (p = 0; p < n; p++) {
for (j = p; j < n; j++) {
sum = 0;
for (k = 0; k < p; k++) {
sum += D[p][k] * D[k][j];
}
D[p][j] = A[p][j] - sum;
}
q = p;
for (i = q + 1; i < n; i++) {
sum = 0;
for(k = 0; k < q; k++) {
sum += D[i][k] * D[k][q];
}
if (D[q][q] == 0) {
printf("U[%d][%d] == 0\n Tidak bisa dibagi 0...\n", q, q);
stop = 1;
} else {
D[i][q] = (A[i][q] - sum) / D[q][q];
}
if (stop) {
break;
}
}
if (stop) {
break;
}
}
}
__device__
void sulih(double D[][n], double b[n], double x[n], double *y) {
int i, j;
double sum;
for (i = 0; i < n; i++) {
sum = 0;
for (j = 0; j < i; j++) {
sum += y[j] * D[i][j];
}
y[i] = b[i] - sum;
}
for (i = n-1; i >= 0; i--) {
sum = 0;
for (j = i+1; j < n; j++) {
sum += x[j] * D[i][j];
}
x[i] = (y[i] - sum) / D[i][i];
}
free(y);
}
__device__
void print_LU(double D[][n]) {
int i, j;
printf("Dekomposisi\n");
printf("L =\n");
for (i = 0; i < n; i++) {
printf("\t");
for (j = 0; j < i; j++) {
printf("%8.4f ", D[i][j]);
}
printf("%8d\n", 1);
}
printf("U =\n");
for (i = 0; i < n; i++) {
printf("\t");
for (j = 0; j < i; j++) {
printf("%8s ", "");
}
for (j = i; j < n; j++) {
printf("%8.4f ", D[i][j]);
}
printf("\n");
}
}
__device__
void print_x(double x[n]) {
int i;
printf("Solusi\n");
printf("x =\n");
for (i = 0; i < n; i++) {
printf("\t%8.4f\n", x[i]);
}
}
__global__
void crout(double *y) {
double A[][n] = {
{0.31, 0.14, 0.30, 0.27},
{0.26, 0.32, 0.18, 0.24},
{0.61, 0.22, 0.20, 0.31},
{0.40, 0.34, 0.36, 0.17},
// {0.7071, 0, 1, 0, 0.5, 0, 0, 0, 0},
// {0, 1, 0, 0, 0, -1, 0, 0, 0},
// {0, 0, -1, 0, 0, 0, 0, 0, 0},
// {0, 0, 0, 1, 0, 0, 0, 0, -0.7071},
// {0.7071, 0, 0, -1, -0.8660, 0, 0, 0, 0},
// {0, 0, 0, 0, 0, 0, 1, 0, 0.7071},
// {0, 0, 0, 0, -0.5, 0, -1, 0, 0},
// {0, 0, 0, 0, 0.8660, 1, 0, -1, 0},
// {0, 0, 0, 0, 0, 0, 0, 0, 0.7071},
// { 0.866, 0, -0.5, 0, 0, 0},
// { 0, 1, 0.5, 0, 0, 0},
// { 0.5, 0, 0.866, 0, 0, 0},
// {-0.866, -1, 0, -1, 0, 0},
// { -0.5, 0, 0, 0, -1, 0},
// { 0, 0, -0.866, 0, 0, -1},
};
double b[n] = {
1.02,
1.00,
1.34,
1.27,
// -1000,
// 0,
// 0,
// 0,
// 0,
// 500,
// -500,
// 0,
// 0,
// 0,
// 0,
// -1000,
// 0,
// 0,
// 0,
};
double D[][n] = {
{0, 0, 0, 0},
{0, 0, 0, 0},
{0, 0, 0, 0},
{0, 0, 0, 0},
{0, 0, 0, 0},
{0, 0, 0, 0},
};
double x[n] = {
0,
0,
0,
0,
};
dekomposisi(A, D);
print_LU(D);
sulih(D, b, x, y);
print_x(x);
}
int main(int argc, char *argv[] ) {
double *y;
cudaMalloc(&y, n*sizeof(double));
crout<<<1, 1>>>(y);
cudaDeviceSynchronize();
return 0;
}
|
f80ccb43efb4118d82e0973f69ecb6301990d124.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstring>
#include <iostream>
enum class MaterialType { None, Lambertian, Metal };
struct vec3 {
float x;
float y;
float z;
};
struct GenericMaterial {
__device__ __host__ GenericMaterial():
matType(MaterialType::None),
numScalars(0), scalars(nullptr),
numVectors(0), vectors(nullptr) {
printf("Constructing GenericMaterial\n");
}
MaterialType matType;
int numScalars;
float *scalars;
int numVectors;
vec3 *vectors;
};
class Material {
public:
__device__ virtual bool scatter() const = 0;
};
class Lambertian: public Material {
public:
__device__ Lambertian(GenericMaterial* genMat) {
m_albedo = genMat->vectors[0];
}
__device__ virtual bool scatter() const {
printf("Lambertian albedo %f %f %f\n",
m_albedo.x, m_albedo.y, m_albedo.z);
return true;
}
const MaterialType matType = MaterialType::Lambertian;
private:
vec3 m_albedo;
};
class Metal: public Material {
public:
__device__ Metal(GenericMaterial* genMat) {
m_fuzz = genMat->scalars[0];
m_albedo = genMat->vectors[0];
}
__device__ virtual bool scatter() const {
printf("Metal albedo %f, %f %f %f\n",
m_fuzz, m_albedo.x, m_albedo.y, m_albedo.z);
return true;
}
const MaterialType matType = MaterialType::Metal;
private:
float m_fuzz;
vec3 m_albedo;
};
enum class ObjectType { None, Mesh, Sphere };
struct GenericObject {
__device__ __host__ GenericObject() :
objType(ObjectType::None),
numScalars(0), scalars(nullptr),
numVectors(0), vectors(nullptr) {}
ObjectType objType;
int numScalars;
float *scalars;
int numVectors;
vec3 *vectors;
};
class Object {
public:
__device__ virtual bool hit() const = 0;
const ObjectType objType = ObjectType::None;
protected:
Material *m_material;
};
class Mesh: public Object {
public:
const ObjectType objType = ObjectType::Mesh;
__device__ Mesh(GenericObject* genObj, Material* mat) {
m_numVertices = genObj->numVectors;
m_vertices = genObj->vectors;
m_material = mat;
}
__device__ virtual bool hit() const {
printf("hitting trianglemesh %d\n", m_numVertices);
m_material->scatter();
return true;
}
private:
int m_numVertices;
vec3 *m_vertices;
};
class Sphere: public Object {
public:
const ObjectType objType = ObjectType::Sphere;
__device__ Sphere(GenericObject* genObj, Material* mat) {
m_radius = genObj->scalars[0];
m_x = genObj->scalars[1];
m_material = mat;
}
__device__ virtual bool hit() const {
printf("hitting sphere %f, %f\n", m_radius, m_x);
m_material->scatter();
return true;
}
private:
float m_radius;
float m_x;
};
class ObjectList: public Object {
public:
__device__ ObjectList() :
objects(nullptr), num_objects(0) {}
__device__ ObjectList(Object** objects, int num_objects) :
objects(objects), num_objects(num_objects) {}
__device__ virtual bool hit() const {
for (int i = 0; i < num_objects; i++) {
objects[i]->hit();
}
return true;
}
Object **objects;
int num_objects;
};
__global__ void create_world(GenericObject* genObjList,
GenericMaterial* genMatList,
int numObjects,
Object** objectList,
Object** world)
{
printf("numObjects = %d\n", numObjects);
for (int objIdx = 0; objIdx < numObjects; objIdx++) {
Material *mat = nullptr;
GenericMaterial* genMat = &(genMatList[objIdx]);
if (genMat->matType == MaterialType::Lambertian) {
printf("Lambertian\n");
mat = new Lambertian(genMat);
}
else if (genMat->matType == MaterialType::Metal) {
printf("Metal\n");
mat = new Metal(genMat);
}
GenericObject* genObj = &(genObjList[objIdx]);
if (genObj->objType == ObjectType::Sphere) {
printf("Sphere %f %f\n", genObj->scalars[0], genObj->scalars[1]);
objectList[objIdx] = new Sphere(genObj, mat);
}
else if (genObj->objType == ObjectType::Mesh) {
printf("Mesh %d\n", genObj->numVectors);
objectList[objIdx] = new Mesh(genObj, mat);
}
}
*world = new ObjectList(objectList, numObjects);
}
__global__ void render_world(Object** world, int numObjects)
{
(*world)->hit();
}
void setupScene(GenericObject** genObjList,
GenericMaterial** genMatList, int& o_numObjects)
{
std::cout << "Setting up scene ..." << std::endl;
int numObjects = 2;
*genObjList = new GenericObject[numObjects];
*genMatList = new GenericMaterial[numObjects];
// Sphere
int numScalars = 2;
(*genObjList)[0].objType = ObjectType::Sphere;
(*genObjList)[0].numScalars = numScalars;
(*genObjList)[0].scalars = new float[numScalars];
(*genObjList)[0].scalars[0] = 1.0f;
(*genObjList)[0].scalars[1] = 0.5f;
(*genMatList)[0].matType = MaterialType::Lambertian;
(*genMatList)[0].numVectors = 1;
(*genMatList)[0].vectors = new vec3;
(*genMatList)[0].vectors[0] = {0.25f, 0.35f, 0.45f};
// Mesh
int numVectors = 3;
(*genObjList)[1].objType = ObjectType::Mesh;
(*genObjList)[1].numVectors = numVectors;
(*genObjList)[1].vectors = new vec3[numVectors];
(*genObjList)[1].vectors[0] = {0.0f, 0.0f, 0.0f};
(*genObjList)[1].vectors[1] = {1.0f, 0.0f, 0.0f};
(*genObjList)[1].vectors[2] = {0.0f, 1.0f, 0.0f};
(*genMatList)[1].matType = MaterialType::Metal;
(*genMatList)[1].numVectors = 1;
(*genMatList)[1].vectors = new vec3;
(*genMatList)[1].vectors[0] = {0.55f, 0.65f, 0.75f};
(*genMatList)[1].numScalars = 1;
(*genMatList)[1].scalars = new float;
(*genMatList)[1].scalars[0] = 0.95f;
o_numObjects = numObjects;
std::cout << "Setting up scene done." << std::endl;
}
int main()
{
GenericObject *genObjList;
GenericMaterial *genMatList;
int numObjects;
setupScene(&genObjList, &genMatList, numObjects);
// copy genObjList to device
// helpful: https://stackoverflow.com/questions/19404965/how-to-use-cudamalloc-cudamemcpy-for-a-pointer-to-a-structure-containing-point
GenericObject *h_genObjList = new GenericObject[numObjects];
std::memcpy(h_genObjList, genObjList, numObjects*sizeof(GenericObject));
for (int objIdx = 0; objIdx < numObjects; objIdx++) {
if (h_genObjList[objIdx].numScalars > 0) {
hipMalloc((void**)&(h_genObjList[objIdx].scalars),
h_genObjList[objIdx].numScalars*sizeof(float));
hipMemcpy(h_genObjList[objIdx].scalars, genObjList[objIdx].scalars,
h_genObjList[objIdx].numScalars*sizeof(float), hipMemcpyHostToDevice);
}
if (h_genObjList[objIdx].numVectors > 0) {
hipMalloc((void**)&(h_genObjList[objIdx].vectors),
h_genObjList[objIdx].numVectors*sizeof(vec3));
hipMemcpy(h_genObjList[objIdx].vectors, genObjList[objIdx].vectors,
h_genObjList[objIdx].numVectors*sizeof(vec3), hipMemcpyHostToDevice);
}
}
GenericObject *d_genObjList;
hipMalloc((void**)&d_genObjList, numObjects*sizeof(GenericObject));
hipMemcpy(d_genObjList, h_genObjList, numObjects*sizeof(GenericObject), hipMemcpyHostToDevice);
//~copy genObjList to device
// copy genMatList to device
// helpful: https://stackoverflow.com/questions/19404965/how-to-use-cudamalloc-cudamemcpy-for-a-pointer-to-a-structure-containing-point
GenericMaterial *h_genMatList = new GenericMaterial[numObjects];
std::memcpy(h_genMatList, genMatList, numObjects*sizeof(GenericMaterial));
for (int objIdx = 0; objIdx < numObjects; objIdx++) {
if (h_genMatList[objIdx].numScalars > 0) {
hipMalloc((void**)&(h_genMatList[objIdx].scalars),
h_genMatList[objIdx].numScalars*sizeof(float));
hipMemcpy(h_genMatList[objIdx].scalars, genMatList[objIdx].scalars,
h_genMatList[objIdx].numScalars*sizeof(float), hipMemcpyHostToDevice);
}
if (h_genMatList[objIdx].numVectors > 0) {
hipMalloc((void**)&(h_genMatList[objIdx].vectors),
h_genMatList[objIdx].numVectors*sizeof(vec3));
hipMemcpy(h_genMatList[objIdx].vectors, genMatList[objIdx].vectors,
h_genMatList[objIdx].numVectors*sizeof(vec3), hipMemcpyHostToDevice);
}
}
GenericMaterial *d_genMatList;
hipMalloc((void**)&d_genMatList, numObjects*sizeof(GenericMaterial));
hipMemcpy(d_genMatList, h_genMatList, numObjects*sizeof(GenericMaterial), hipMemcpyHostToDevice);
//~copy genMatList to device
Object **d_list;
hipMalloc((void**)&d_list, numObjects*sizeof(Object*));
Object **d_world;
hipMalloc((void**)&d_world, sizeof(Object*));
hipLaunchKernelGGL(( create_world), dim3(1),dim3(1), 0, 0, d_genObjList, d_genMatList, numObjects, d_list, d_world);
hipDeviceSynchronize();
std::cout << hipGetErrorString(hipGetLastError()) << std::endl;
hipLaunchKernelGGL(( render_world), dim3(1),dim3(1), 0, 0, d_world, numObjects);
hipDeviceSynchronize();
std::cout << hipGetErrorString(hipGetLastError()) << std::endl;
return 0;
}
| f80ccb43efb4118d82e0973f69ecb6301990d124.cu | #include <cstdio>
#include <cstring>
#include <iostream>
enum class MaterialType { None, Lambertian, Metal };
struct vec3 {
float x;
float y;
float z;
};
struct GenericMaterial {
__device__ __host__ GenericMaterial():
matType(MaterialType::None),
numScalars(0), scalars(nullptr),
numVectors(0), vectors(nullptr) {
printf("Constructing GenericMaterial\n");
}
MaterialType matType;
int numScalars;
float *scalars;
int numVectors;
vec3 *vectors;
};
class Material {
public:
__device__ virtual bool scatter() const = 0;
};
class Lambertian: public Material {
public:
__device__ Lambertian(GenericMaterial* genMat) {
m_albedo = genMat->vectors[0];
}
__device__ virtual bool scatter() const {
printf("Lambertian albedo %f %f %f\n",
m_albedo.x, m_albedo.y, m_albedo.z);
return true;
}
const MaterialType matType = MaterialType::Lambertian;
private:
vec3 m_albedo;
};
class Metal: public Material {
public:
__device__ Metal(GenericMaterial* genMat) {
m_fuzz = genMat->scalars[0];
m_albedo = genMat->vectors[0];
}
__device__ virtual bool scatter() const {
printf("Metal albedo %f, %f %f %f\n",
m_fuzz, m_albedo.x, m_albedo.y, m_albedo.z);
return true;
}
const MaterialType matType = MaterialType::Metal;
private:
float m_fuzz;
vec3 m_albedo;
};
enum class ObjectType { None, Mesh, Sphere };
struct GenericObject {
__device__ __host__ GenericObject() :
objType(ObjectType::None),
numScalars(0), scalars(nullptr),
numVectors(0), vectors(nullptr) {}
ObjectType objType;
int numScalars;
float *scalars;
int numVectors;
vec3 *vectors;
};
class Object {
public:
__device__ virtual bool hit() const = 0;
const ObjectType objType = ObjectType::None;
protected:
Material *m_material;
};
class Mesh: public Object {
public:
const ObjectType objType = ObjectType::Mesh;
__device__ Mesh(GenericObject* genObj, Material* mat) {
m_numVertices = genObj->numVectors;
m_vertices = genObj->vectors;
m_material = mat;
}
__device__ virtual bool hit() const {
printf("hitting trianglemesh %d\n", m_numVertices);
m_material->scatter();
return true;
}
private:
int m_numVertices;
vec3 *m_vertices;
};
class Sphere: public Object {
public:
const ObjectType objType = ObjectType::Sphere;
__device__ Sphere(GenericObject* genObj, Material* mat) {
m_radius = genObj->scalars[0];
m_x = genObj->scalars[1];
m_material = mat;
}
__device__ virtual bool hit() const {
printf("hitting sphere %f, %f\n", m_radius, m_x);
m_material->scatter();
return true;
}
private:
float m_radius;
float m_x;
};
class ObjectList: public Object {
public:
__device__ ObjectList() :
objects(nullptr), num_objects(0) {}
__device__ ObjectList(Object** objects, int num_objects) :
objects(objects), num_objects(num_objects) {}
__device__ virtual bool hit() const {
for (int i = 0; i < num_objects; i++) {
objects[i]->hit();
}
return true;
}
Object **objects;
int num_objects;
};
__global__ void create_world(GenericObject* genObjList,
GenericMaterial* genMatList,
int numObjects,
Object** objectList,
Object** world)
{
printf("numObjects = %d\n", numObjects);
for (int objIdx = 0; objIdx < numObjects; objIdx++) {
Material *mat = nullptr;
GenericMaterial* genMat = &(genMatList[objIdx]);
if (genMat->matType == MaterialType::Lambertian) {
printf("Lambertian\n");
mat = new Lambertian(genMat);
}
else if (genMat->matType == MaterialType::Metal) {
printf("Metal\n");
mat = new Metal(genMat);
}
GenericObject* genObj = &(genObjList[objIdx]);
if (genObj->objType == ObjectType::Sphere) {
printf("Sphere %f %f\n", genObj->scalars[0], genObj->scalars[1]);
objectList[objIdx] = new Sphere(genObj, mat);
}
else if (genObj->objType == ObjectType::Mesh) {
printf("Mesh %d\n", genObj->numVectors);
objectList[objIdx] = new Mesh(genObj, mat);
}
}
*world = new ObjectList(objectList, numObjects);
}
__global__ void render_world(Object** world, int numObjects)
{
(*world)->hit();
}
void setupScene(GenericObject** genObjList,
GenericMaterial** genMatList, int& o_numObjects)
{
std::cout << "Setting up scene ..." << std::endl;
int numObjects = 2;
*genObjList = new GenericObject[numObjects];
*genMatList = new GenericMaterial[numObjects];
// Sphere
int numScalars = 2;
(*genObjList)[0].objType = ObjectType::Sphere;
(*genObjList)[0].numScalars = numScalars;
(*genObjList)[0].scalars = new float[numScalars];
(*genObjList)[0].scalars[0] = 1.0f;
(*genObjList)[0].scalars[1] = 0.5f;
(*genMatList)[0].matType = MaterialType::Lambertian;
(*genMatList)[0].numVectors = 1;
(*genMatList)[0].vectors = new vec3;
(*genMatList)[0].vectors[0] = {0.25f, 0.35f, 0.45f};
// Mesh
int numVectors = 3;
(*genObjList)[1].objType = ObjectType::Mesh;
(*genObjList)[1].numVectors = numVectors;
(*genObjList)[1].vectors = new vec3[numVectors];
(*genObjList)[1].vectors[0] = {0.0f, 0.0f, 0.0f};
(*genObjList)[1].vectors[1] = {1.0f, 0.0f, 0.0f};
(*genObjList)[1].vectors[2] = {0.0f, 1.0f, 0.0f};
(*genMatList)[1].matType = MaterialType::Metal;
(*genMatList)[1].numVectors = 1;
(*genMatList)[1].vectors = new vec3;
(*genMatList)[1].vectors[0] = {0.55f, 0.65f, 0.75f};
(*genMatList)[1].numScalars = 1;
(*genMatList)[1].scalars = new float;
(*genMatList)[1].scalars[0] = 0.95f;
o_numObjects = numObjects;
std::cout << "Setting up scene done." << std::endl;
}
int main()
{
GenericObject *genObjList;
GenericMaterial *genMatList;
int numObjects;
setupScene(&genObjList, &genMatList, numObjects);
// copy genObjList to device
// helpful: https://stackoverflow.com/questions/19404965/how-to-use-cudamalloc-cudamemcpy-for-a-pointer-to-a-structure-containing-point
GenericObject *h_genObjList = new GenericObject[numObjects];
std::memcpy(h_genObjList, genObjList, numObjects*sizeof(GenericObject));
for (int objIdx = 0; objIdx < numObjects; objIdx++) {
if (h_genObjList[objIdx].numScalars > 0) {
cudaMalloc((void**)&(h_genObjList[objIdx].scalars),
h_genObjList[objIdx].numScalars*sizeof(float));
cudaMemcpy(h_genObjList[objIdx].scalars, genObjList[objIdx].scalars,
h_genObjList[objIdx].numScalars*sizeof(float), cudaMemcpyHostToDevice);
}
if (h_genObjList[objIdx].numVectors > 0) {
cudaMalloc((void**)&(h_genObjList[objIdx].vectors),
h_genObjList[objIdx].numVectors*sizeof(vec3));
cudaMemcpy(h_genObjList[objIdx].vectors, genObjList[objIdx].vectors,
h_genObjList[objIdx].numVectors*sizeof(vec3), cudaMemcpyHostToDevice);
}
}
GenericObject *d_genObjList;
cudaMalloc((void**)&d_genObjList, numObjects*sizeof(GenericObject));
cudaMemcpy(d_genObjList, h_genObjList, numObjects*sizeof(GenericObject), cudaMemcpyHostToDevice);
//~copy genObjList to device
// copy genMatList to device
// helpful: https://stackoverflow.com/questions/19404965/how-to-use-cudamalloc-cudamemcpy-for-a-pointer-to-a-structure-containing-point
GenericMaterial *h_genMatList = new GenericMaterial[numObjects];
std::memcpy(h_genMatList, genMatList, numObjects*sizeof(GenericMaterial));
for (int objIdx = 0; objIdx < numObjects; objIdx++) {
if (h_genMatList[objIdx].numScalars > 0) {
cudaMalloc((void**)&(h_genMatList[objIdx].scalars),
h_genMatList[objIdx].numScalars*sizeof(float));
cudaMemcpy(h_genMatList[objIdx].scalars, genMatList[objIdx].scalars,
h_genMatList[objIdx].numScalars*sizeof(float), cudaMemcpyHostToDevice);
}
if (h_genMatList[objIdx].numVectors > 0) {
cudaMalloc((void**)&(h_genMatList[objIdx].vectors),
h_genMatList[objIdx].numVectors*sizeof(vec3));
cudaMemcpy(h_genMatList[objIdx].vectors, genMatList[objIdx].vectors,
h_genMatList[objIdx].numVectors*sizeof(vec3), cudaMemcpyHostToDevice);
}
}
GenericMaterial *d_genMatList;
cudaMalloc((void**)&d_genMatList, numObjects*sizeof(GenericMaterial));
cudaMemcpy(d_genMatList, h_genMatList, numObjects*sizeof(GenericMaterial), cudaMemcpyHostToDevice);
//~copy genMatList to device
Object **d_list;
cudaMalloc((void**)&d_list, numObjects*sizeof(Object*));
Object **d_world;
cudaMalloc((void**)&d_world, sizeof(Object*));
create_world<<<1,1>>>(d_genObjList, d_genMatList, numObjects, d_list, d_world);
cudaDeviceSynchronize();
std::cout << cudaGetErrorString(cudaGetLastError()) << std::endl;
render_world<<<1,1>>>(d_world, numObjects);
cudaDeviceSynchronize();
std::cout << cudaGetErrorString(cudaGetLastError()) << std::endl;
return 0;
}
|
03ed7384761de4663a03f47e5ad38fe94d7f20af.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file indexing_op.cu
* \brief
* \author Siyi Li, Chi Zhang
*/
#include "./indexing_op.h"
#include "./util/tensor_util-inl.cuh"
#include "./util/tensor_util-inl.h"
namespace mxnet {
namespace op {
/*! \brief If there are out-of-bound indices, out will be assigned to 1.
*/
struct is_valid_check {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, int32_t* out, const DType* data,
const DType min, const DType max) {
if (data[i] < min || data[i] > max) *out = 1;
}
};
struct AddTakeGradRspGPUKernel {
template<typename DType, typename IType>
__device__ __forceinline__ static void Map(int tid,
DType* out,
const nnvm::dim_t* prefix_sum,
const IType* data,
const DType* ograd,
const nnvm::dim_t row_length) {
using nnvm::dim_t;
const dim_t data_i = tid / row_length;
const dim_t grad_i = tid % row_length;
const dim_t irow = static_cast<dim_t>(data[data_i]);
const dim_t rsp_row = prefix_sum[irow] - 1;
const DType val = ograd[data_i * row_length + grad_i];
atomicAdd(static_cast<DType *>(&(out[rsp_row*row_length+grad_i])), val);
}
};
/*
* \brief kernel for backward computation for take, executed with deterministic order
* \param thread_id the thread id
* \param out the output gradient data
* \param lookup_table the table to lookup the position of an id in gradient array
* \param sorted_data the sorted data input
* \param original_idx the original indices of the sorted data input
* \param ograd head gradient
* \param row_length the output dimension
* \param num_threads_per_row the number of threads to process a row together
* \param SZ the number of features a thread is responsible for
*/
template<int SZ>
struct AddTakeGradRspDeterministicKernel {
template<typename DType>
__device__ __forceinline__ static void Map(int thread_id,
DType* out,
const nnvm::dim_t* lookup_table,
const nnvm::dim_t* sorted_data,
const nnvm::dim_t data_size,
const nnvm::dim_t* original_idx,
const DType* ograd,
const nnvm::dim_t row_length,
const nnvm::dim_t num_threads_per_row) {
using nnvm::dim_t;
int tid = thread_id / num_threads_per_row;
const int feature_start = thread_id % num_threads_per_row * SZ;
int num_features = SZ;
if (feature_start + num_features > row_length) {
num_features = row_length - feature_start;
}
if (tid == 0 || sorted_data[tid - 1] != sorted_data[tid]) {
DType acc[SZ];
#pragma unroll
for (int i = 0; i < SZ; i++) {
acc[i] = 0;
}
const dim_t data = sorted_data[tid];
const dim_t row_id = lookup_table[data];
const dim_t out_offset = row_id * row_length + feature_start;
do {
const dim_t idx = original_idx[tid];
const dim_t ograd_offset = idx * row_length + feature_start;
for (int i = 0; i < num_features; i++) {
acc[i] += ograd[ograd_offset + i];
}
tid++;
} while (tid < data_size && sorted_data[tid - 1] == sorted_data[tid]);
for (int i = 0; i < num_features; i++) {
out[out_offset + i] += acc[i];
}
}
}
};
template<>
void SparseEmbeddingOpForwardRspImpl<gpu>(const OpContext& ctx,
const TBlob& data,
const NDArray& weight,
const OpReqType req,
const TBlob& output) {
if (req == kNullOp) return;
using namespace rowsparse;
using namespace mxnet_op;
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
// zeros weight
if (req == kWriteTo && !weight.storage_initialized()) {
size_t out_size = output.shape_.Size();
MSHADOW_TYPE_SWITCH(output.type_flag_, DType, {
Fill<false>(s, TBlob(output.dptr<DType>(), mshadow::Shape1(out_size),
gpu::kDevMask), kWriteTo, 0);
})
return;
}
// check out-of-bound indices
int32_t is_valid = 0;
MSHADOW_TYPE_SWITCH(data.type_flag_, DType, {
DType min = 0;
DType max = static_cast<DType>(weight.shape()[0] - 1);
DType* data_ptr = data.dptr<DType>();
size_t data_size = data.shape_.Size();
Tensor<gpu, 1, char> workspace = ctx.requested[0]
.get_space_typed<gpu, 1, char>(Shape1(sizeof(int32_t)), s);
int32_t* is_valid_ptr = reinterpret_cast<int32_t*>(workspace.dptr_);
Kernel<set_zero, gpu>::Launch(s, 1, is_valid_ptr);
Kernel<is_valid_check, gpu>::Launch(s, data_size, is_valid_ptr, data_ptr, min, max);
CUDA_CALL(hipMemcpy(&is_valid, is_valid_ptr, sizeof(int32_t),
hipMemcpyDeviceToHost));
})
CHECK_EQ(is_valid, 0) << "SparseEmbedding input contains data out of bound";
// the weight is actually dense
if (weight.aux_shape(kIdx)[0] == weight.shape()[0]) {
EmbeddingOpForwardDnsImpl<gpu>(s, data, weight.data(), req, output);
} else {
EmbeddingOpForwardRspImpl<gpu>(s, data, weight, req, output);
}
}
template<typename IType, typename DType, typename RType>
void SparseEmbeddingDeterministicKernelLaunch(const OpContext& ctx,
const TBlob& ograd,
const TBlob& data,
const OpReqType req,
const NDArray& output) {
using namespace mshadow;
using namespace mxnet_op;
using namespace expr;
using namespace rowsparse;
using nnvm::dim_t;
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
const dim_t num_rows = output.shape()[0];
const dim_t row_length = output.shape()[1];
const dim_t data_size = static_cast<dim_t>(data.shape_.Size());
// temp resource declarations
dim_t* lookup_table = NULL;
void* temp_storage = NULL;
dim_t* sorted_data = NULL;
dim_t* original_idx = NULL;
// calculate number of bytes for temp resources
size_t lookup_table_bytes = num_rows * sizeof(dim_t);
size_t sorted_data_storage_bytes = data_size * sizeof(dim_t);
size_t original_idx_storage_bytes = data_size * sizeof(dim_t);
size_t sort_workspace_size = SortByKeyWorkspaceSize<dim_t, dim_t, gpu>(data_size);
size_t unique_workspace_bytes = 0;
// estimate unique temp space
IType* data_ptr = data.dptr<IType>();
size_t *null_ptr = nullptr;
hipcub::DeviceSelect::Unique(NULL, unique_workspace_bytes, data_ptr, data_ptr,
null_ptr, data_size, Stream<gpu>::GetStream(s));
// One more space reserved for unique count
size_t temp_workspace_bytes = ::max(unique_workspace_bytes,
sort_workspace_size);
size_t total_storage_bytes = lookup_table_bytes + sorted_data_storage_bytes +
original_idx_storage_bytes + temp_workspace_bytes;
// request resource and split it. layout is:
// lookup_table, sorted_data, original_idx, temp_storage
Tensor<gpu, 1, char> workspace = ctx.requested[0]
.get_space_typed<gpu, 1, char>(Shape1(total_storage_bytes), s);
lookup_table = reinterpret_cast<dim_t*>(workspace.dptr_);
sorted_data = reinterpret_cast<dim_t*>(workspace.dptr_ + lookup_table_bytes);
original_idx = reinterpret_cast<dim_t*>(workspace.dptr_ + lookup_table_bytes +
sorted_data_storage_bytes);
temp_storage = workspace.dptr_ + total_storage_bytes - temp_workspace_bytes;
// make a copy of the data, to be sorted
TBlob sorted_data_blob(sorted_data, Shape1(data_size), gpu::kDevMask);
auto sorted_data_tensor = sorted_data_blob.FlatTo1D<gpu, dim_t>(s);
mxnet_op::copy(s, sorted_data_blob, data);
// generate original idx
Tensor<gpu, 1, dim_t> original_idx_tensor(original_idx, Shape1(data_size), s);
Kernel<range_fwd, gpu>::Launch(s, data_size, 1, static_cast<dim_t>(0),
static_cast<dim_t>(1), kWriteTo, original_idx);
// sort data with its original idx
int num_bits = ilog2(num_rows - 1);
char* temp_storage_ptr = reinterpret_cast<char*>(temp_storage);
Tensor<gpu, 1, char> temp_storage_tensor(temp_storage_ptr,
Shape1(sort_workspace_size), s);
SortByKey(sorted_data_tensor, original_idx_tensor, true,
&temp_storage_tensor, 0, num_bits);
// compute unique row ids based on sorted values.
output.CheckAndAllocAuxData(kIdx, Shape1(data_size + 1));
// fill row_idx array of output matrix, using the row_flg values
RType* grad_row_idx = output.aux_data(kIdx).dptr<RType>();
hipcub::DeviceSelect::Unique(temp_storage_ptr, unique_workspace_bytes, sorted_data,
grad_row_idx, grad_row_idx + data_size, data_size, Stream<gpu>::GetStream(s));
dim_t nnr = 0;
CUDA_CALL(hipMemcpy(&nnr, grad_row_idx + data_size, sizeof(RType),
hipMemcpyDeviceToHost));
CHECK_EQ(output.shape().ndim(), 2) << "Unexcepted ndim";
output.CheckAndAllocData(Shape2(nnr, output.shape()[1]));
output.set_aux_shape(kIdx, Shape1(nnr));
// generate lookup table
Kernel<MarkLookupTable, gpu>::Launch(s, nnr, lookup_table, grad_row_idx);
// accumulate gradients
DType* grad_data = output.data().dptr<DType>();
Fill<false>(s, TBlob(grad_data, Shape1(nnr * row_length), gpu::kDevMask),
kWriteTo, 0);
const int SZ = 4;
const nnvm::dim_t num_threads_per_row = (row_length + SZ - 1) / SZ;
Kernel<AddTakeGradRspDeterministicKernel<SZ>, gpu>::Launch(s, data_size * num_threads_per_row,
grad_data, lookup_table, sorted_data, data_size, original_idx,
ograd.dptr<DType>(), row_length, num_threads_per_row);
}
inline void SparseEmbeddingOpBackwardDeterministicRspImpl(const OpContext& ctx,
const TBlob& ograd,
const TBlob& data,
const OpReqType req,
const NDArray& output) {
using nnvm::dim_t;
if (req == kNullOp) return;
CHECK_EQ(req, kWriteTo) << "SparseEmbedding layer doesn't support "
<< "weight gradient calculation with req != write";
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
const dim_t data_size = static_cast<dim_t>(data.shape_.Size());
if (data_size == 0) {
FillZerosRspImpl(s, output);
return;
}
MSHADOW_TYPE_SWITCH(data.type_flag_, IType, {
MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(output.aux_type(rowsparse::kIdx), RType, {
SparseEmbeddingDeterministicKernelLaunch<IType, DType, RType>(ctx, ograd, data,
req, output);
});
});
});
}
template<>
inline void SparseEmbeddingOpBackwardRspImpl<gpu>(const bool deterministic,
const OpContext& ctx,
const TBlob& ograd,
const TBlob& data,
const OpReqType req,
const NDArray& output) {
if (deterministic) {
SparseEmbeddingOpBackwardDeterministicRspImpl(ctx, ograd, data, req, output);
return;
}
using namespace mshadow;
using namespace mxnet_op;
using namespace mshadow::expr;
using namespace rowsparse;
using nnvm::dim_t;
if (req == kNullOp) return;
CHECK_EQ(req, kWriteTo) << "SparseEmbedding layer doesn't support "
<< "weight gradient calculation with req != write";
// Request temporary storage for marking non-zero rows and prefix sum
Stream<gpu> *s = ctx.get_stream<gpu>();
dim_t num_rows = output.shape()[0];
dim_t row_length = output.shape()[1];
dim_t data_size = static_cast<dim_t>(data.shape_.Size());
dim_t num_threads;
MSHADOW_TYPE_SWITCH(data.type_flag_, IType, {
MSHADOW_SGL_DBL_TYPE_SWITCH(ograd.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(output.aux_type(kIdx), RType, {
dim_t* prefix_sum = NULL;
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
hipcub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
Stream<gpu>::GetStream(s));
Tensor<gpu, 1, char> workspace = ctx.requested[0]
.get_space_typed<gpu, 1, char>(Shape1(num_rows * sizeof(dim_t) +
temp_storage_bytes), s);
prefix_sum = reinterpret_cast<dim_t*>(workspace.dptr_);
d_temp_storage = workspace.dptr_ + num_rows*sizeof(dim_t);
num_threads = num_rows;
Fill<false>(s, TBlob(prefix_sum, Shape1(num_threads), gpu::kDevMask), kWriteTo, 0);
Kernel<MarkRowFlgKernel, gpu>::Launch(s, data_size, prefix_sum, data.dptr<IType>());
hipcub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
mshadow::Stream<gpu>::GetStream(s));
dim_t nnr = 0;
CUDA_CALL(hipMemcpy(&nnr, &prefix_sum[num_rows-1], sizeof(dim_t),
hipMemcpyDeviceToHost));
if (nnr == 0) {
FillZerosRspImpl(s, output);
return;
}
output.CheckAndAlloc({Shape1(nnr)});
RType* grad_row_idx = output.aux_data(kIdx).dptr<RType>();
// fill row_idx array of output matrix, using the row_flg values
Kernel<FillRspRowIdxKernel, gpu>::Launch(s, num_rows,
grad_row_idx, prefix_sum, num_rows);
// prefill with zeros
DType* grad_data = output.data().dptr<DType>();
Fill<false>(s, TBlob(grad_data, Shape1(nnr * row_length), gpu::kDevMask),
kWriteTo, 0);
// add the final gradients
num_threads = row_length * data_size;
Kernel<AddTakeGradRspGPUKernel, gpu>::Launch(s, num_threads, grad_data, prefix_sum,
data.dptr<IType>(), ograd.dptr<DType>(), row_length);
});
});
});
}
struct backward_gather_nd_gpu {
template<typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i, int N, int M, int K,
const mshadow::Shape<10> strides,
DType* out, const DType* data,
const IType* indices) {
int offset = 0;
for (int j = 0; j < M; ++j) {
offset += strides[j] * static_cast<int>(indices[j*N + i]);
}
for (int j = 0; j < K; ++j) {
atomicAdd(out + (offset + j), data[i * K + j]);
}
}
};
template<typename DType, typename IType>
inline void GatherNDBackwardImpl(int N, int M, int K,
const mshadow::Shape<10> strides,
DType* out,
const DType* data,
const IType* indices,
mshadow::Stream<gpu> *s) {
mxnet_op::Kernel<backward_gather_nd_gpu, gpu>::Launch(s, N, N, M, K, strides, out, data, indices);
}
NNVM_REGISTER_OP(Embedding)
.set_attr<FCompute>("FCompute<gpu>", EmbeddingOpForward<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", SparseEmbeddingOpForwardEx<gpu>);
NNVM_REGISTER_OP(_contrib_SparseEmbedding)
.set_attr<FComputeEx>("FComputeEx<gpu>", SparseEmbeddingOpForwardEx<gpu>);
NNVM_REGISTER_OP(_backward_Embedding)
.set_attr<FCompute>("FCompute<gpu>", EmbeddingOpBackward<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", EmbeddingOpBackwardEx<gpu>);
NNVM_REGISTER_OP(_backward_SparseEmbedding)
.set_attr<FComputeEx>("FComputeEx<gpu>", SparseEmbeddingOpBackwardEx<gpu>);
NNVM_REGISTER_OP(take)
.set_attr<FCompute>("FCompute<gpu>", TakeOpForward<gpu>);
NNVM_REGISTER_OP(_backward_take)
.set_attr<FCompute>("FCompute<gpu>", TakeOpBackward<gpu>);
NNVM_REGISTER_OP(batch_take)
.set_attr<FCompute>("FCompute<gpu>", BatchTakeOpForward<gpu>);
NNVM_REGISTER_OP(one_hot)
.set_attr<FCompute>("FCompute<gpu>", OneHotOpForward<gpu>);
NNVM_REGISTER_OP(gather_nd)
.set_attr<FCompute>("FCompute<gpu>", GatherNDForward<gpu>);
NNVM_REGISTER_OP(scatter_nd)
.set_attr<FCompute>("FCompute<gpu>", ScatterNDForward<gpu>);
NNVM_REGISTER_OP(_backward_gather_nd)
.set_attr<FCompute>("FCompute<gpu>", GatherNDBackward<gpu>);
NNVM_REGISTER_OP(_scatter_set_nd)
.set_attr<FCompute>("FCompute<gpu>", ScatterSetNDForward<gpu>);
} // namespace op
} // namespace mxnet
| 03ed7384761de4663a03f47e5ad38fe94d7f20af.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file indexing_op.cu
* \brief
* \author Siyi Li, Chi Zhang
*/
#include "./indexing_op.h"
#include "./util/tensor_util-inl.cuh"
#include "./util/tensor_util-inl.h"
namespace mxnet {
namespace op {
/*! \brief If there are out-of-bound indices, out will be assigned to 1.
*/
struct is_valid_check {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, int32_t* out, const DType* data,
const DType min, const DType max) {
if (data[i] < min || data[i] > max) *out = 1;
}
};
struct AddTakeGradRspGPUKernel {
template<typename DType, typename IType>
__device__ __forceinline__ static void Map(int tid,
DType* out,
const nnvm::dim_t* prefix_sum,
const IType* data,
const DType* ograd,
const nnvm::dim_t row_length) {
using nnvm::dim_t;
const dim_t data_i = tid / row_length;
const dim_t grad_i = tid % row_length;
const dim_t irow = static_cast<dim_t>(data[data_i]);
const dim_t rsp_row = prefix_sum[irow] - 1;
const DType val = ograd[data_i * row_length + grad_i];
atomicAdd(static_cast<DType *>(&(out[rsp_row*row_length+grad_i])), val);
}
};
/*
* \brief kernel for backward computation for take, executed with deterministic order
* \param thread_id the thread id
* \param out the output gradient data
* \param lookup_table the table to lookup the position of an id in gradient array
* \param sorted_data the sorted data input
* \param original_idx the original indices of the sorted data input
* \param ograd head gradient
* \param row_length the output dimension
* \param num_threads_per_row the number of threads to process a row together
* \param SZ the number of features a thread is responsible for
*/
template<int SZ>
struct AddTakeGradRspDeterministicKernel {
template<typename DType>
__device__ __forceinline__ static void Map(int thread_id,
DType* out,
const nnvm::dim_t* lookup_table,
const nnvm::dim_t* sorted_data,
const nnvm::dim_t data_size,
const nnvm::dim_t* original_idx,
const DType* ograd,
const nnvm::dim_t row_length,
const nnvm::dim_t num_threads_per_row) {
using nnvm::dim_t;
int tid = thread_id / num_threads_per_row;
const int feature_start = thread_id % num_threads_per_row * SZ;
int num_features = SZ;
if (feature_start + num_features > row_length) {
num_features = row_length - feature_start;
}
if (tid == 0 || sorted_data[tid - 1] != sorted_data[tid]) {
DType acc[SZ];
#pragma unroll
for (int i = 0; i < SZ; i++) {
acc[i] = 0;
}
const dim_t data = sorted_data[tid];
const dim_t row_id = lookup_table[data];
const dim_t out_offset = row_id * row_length + feature_start;
do {
const dim_t idx = original_idx[tid];
const dim_t ograd_offset = idx * row_length + feature_start;
for (int i = 0; i < num_features; i++) {
acc[i] += ograd[ograd_offset + i];
}
tid++;
} while (tid < data_size && sorted_data[tid - 1] == sorted_data[tid]);
for (int i = 0; i < num_features; i++) {
out[out_offset + i] += acc[i];
}
}
}
};
template<>
void SparseEmbeddingOpForwardRspImpl<gpu>(const OpContext& ctx,
const TBlob& data,
const NDArray& weight,
const OpReqType req,
const TBlob& output) {
if (req == kNullOp) return;
using namespace rowsparse;
using namespace mxnet_op;
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
// zeros weight
if (req == kWriteTo && !weight.storage_initialized()) {
size_t out_size = output.shape_.Size();
MSHADOW_TYPE_SWITCH(output.type_flag_, DType, {
Fill<false>(s, TBlob(output.dptr<DType>(), mshadow::Shape1(out_size),
gpu::kDevMask), kWriteTo, 0);
})
return;
}
// check out-of-bound indices
int32_t is_valid = 0;
MSHADOW_TYPE_SWITCH(data.type_flag_, DType, {
DType min = 0;
DType max = static_cast<DType>(weight.shape()[0] - 1);
DType* data_ptr = data.dptr<DType>();
size_t data_size = data.shape_.Size();
Tensor<gpu, 1, char> workspace = ctx.requested[0]
.get_space_typed<gpu, 1, char>(Shape1(sizeof(int32_t)), s);
int32_t* is_valid_ptr = reinterpret_cast<int32_t*>(workspace.dptr_);
Kernel<set_zero, gpu>::Launch(s, 1, is_valid_ptr);
Kernel<is_valid_check, gpu>::Launch(s, data_size, is_valid_ptr, data_ptr, min, max);
CUDA_CALL(cudaMemcpy(&is_valid, is_valid_ptr, sizeof(int32_t),
cudaMemcpyDeviceToHost));
})
CHECK_EQ(is_valid, 0) << "SparseEmbedding input contains data out of bound";
// the weight is actually dense
if (weight.aux_shape(kIdx)[0] == weight.shape()[0]) {
EmbeddingOpForwardDnsImpl<gpu>(s, data, weight.data(), req, output);
} else {
EmbeddingOpForwardRspImpl<gpu>(s, data, weight, req, output);
}
}
template<typename IType, typename DType, typename RType>
void SparseEmbeddingDeterministicKernelLaunch(const OpContext& ctx,
const TBlob& ograd,
const TBlob& data,
const OpReqType req,
const NDArray& output) {
using namespace mshadow;
using namespace mxnet_op;
using namespace expr;
using namespace rowsparse;
using nnvm::dim_t;
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
const dim_t num_rows = output.shape()[0];
const dim_t row_length = output.shape()[1];
const dim_t data_size = static_cast<dim_t>(data.shape_.Size());
// temp resource declarations
dim_t* lookup_table = NULL;
void* temp_storage = NULL;
dim_t* sorted_data = NULL;
dim_t* original_idx = NULL;
// calculate number of bytes for temp resources
size_t lookup_table_bytes = num_rows * sizeof(dim_t);
size_t sorted_data_storage_bytes = data_size * sizeof(dim_t);
size_t original_idx_storage_bytes = data_size * sizeof(dim_t);
size_t sort_workspace_size = SortByKeyWorkspaceSize<dim_t, dim_t, gpu>(data_size);
size_t unique_workspace_bytes = 0;
// estimate unique temp space
IType* data_ptr = data.dptr<IType>();
size_t *null_ptr = nullptr;
cub::DeviceSelect::Unique(NULL, unique_workspace_bytes, data_ptr, data_ptr,
null_ptr, data_size, Stream<gpu>::GetStream(s));
// One more space reserved for unique count
size_t temp_workspace_bytes = std::max(unique_workspace_bytes,
sort_workspace_size);
size_t total_storage_bytes = lookup_table_bytes + sorted_data_storage_bytes +
original_idx_storage_bytes + temp_workspace_bytes;
// request resource and split it. layout is:
// lookup_table, sorted_data, original_idx, temp_storage
Tensor<gpu, 1, char> workspace = ctx.requested[0]
.get_space_typed<gpu, 1, char>(Shape1(total_storage_bytes), s);
lookup_table = reinterpret_cast<dim_t*>(workspace.dptr_);
sorted_data = reinterpret_cast<dim_t*>(workspace.dptr_ + lookup_table_bytes);
original_idx = reinterpret_cast<dim_t*>(workspace.dptr_ + lookup_table_bytes +
sorted_data_storage_bytes);
temp_storage = workspace.dptr_ + total_storage_bytes - temp_workspace_bytes;
// make a copy of the data, to be sorted
TBlob sorted_data_blob(sorted_data, Shape1(data_size), gpu::kDevMask);
auto sorted_data_tensor = sorted_data_blob.FlatTo1D<gpu, dim_t>(s);
mxnet_op::copy(s, sorted_data_blob, data);
// generate original idx
Tensor<gpu, 1, dim_t> original_idx_tensor(original_idx, Shape1(data_size), s);
Kernel<range_fwd, gpu>::Launch(s, data_size, 1, static_cast<dim_t>(0),
static_cast<dim_t>(1), kWriteTo, original_idx);
// sort data with its original idx
int num_bits = ilog2(num_rows - 1);
char* temp_storage_ptr = reinterpret_cast<char*>(temp_storage);
Tensor<gpu, 1, char> temp_storage_tensor(temp_storage_ptr,
Shape1(sort_workspace_size), s);
SortByKey(sorted_data_tensor, original_idx_tensor, true,
&temp_storage_tensor, 0, num_bits);
// compute unique row ids based on sorted values.
output.CheckAndAllocAuxData(kIdx, Shape1(data_size + 1));
// fill row_idx array of output matrix, using the row_flg values
RType* grad_row_idx = output.aux_data(kIdx).dptr<RType>();
cub::DeviceSelect::Unique(temp_storage_ptr, unique_workspace_bytes, sorted_data,
grad_row_idx, grad_row_idx + data_size, data_size, Stream<gpu>::GetStream(s));
dim_t nnr = 0;
CUDA_CALL(cudaMemcpy(&nnr, grad_row_idx + data_size, sizeof(RType),
cudaMemcpyDeviceToHost));
CHECK_EQ(output.shape().ndim(), 2) << "Unexcepted ndim";
output.CheckAndAllocData(Shape2(nnr, output.shape()[1]));
output.set_aux_shape(kIdx, Shape1(nnr));
// generate lookup table
Kernel<MarkLookupTable, gpu>::Launch(s, nnr, lookup_table, grad_row_idx);
// accumulate gradients
DType* grad_data = output.data().dptr<DType>();
Fill<false>(s, TBlob(grad_data, Shape1(nnr * row_length), gpu::kDevMask),
kWriteTo, 0);
const int SZ = 4;
const nnvm::dim_t num_threads_per_row = (row_length + SZ - 1) / SZ;
Kernel<AddTakeGradRspDeterministicKernel<SZ>, gpu>::Launch(s, data_size * num_threads_per_row,
grad_data, lookup_table, sorted_data, data_size, original_idx,
ograd.dptr<DType>(), row_length, num_threads_per_row);
}
inline void SparseEmbeddingOpBackwardDeterministicRspImpl(const OpContext& ctx,
const TBlob& ograd,
const TBlob& data,
const OpReqType req,
const NDArray& output) {
using nnvm::dim_t;
if (req == kNullOp) return;
CHECK_EQ(req, kWriteTo) << "SparseEmbedding layer doesn't support "
<< "weight gradient calculation with req != write";
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
const dim_t data_size = static_cast<dim_t>(data.shape_.Size());
if (data_size == 0) {
FillZerosRspImpl(s, output);
return;
}
MSHADOW_TYPE_SWITCH(data.type_flag_, IType, {
MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(output.aux_type(rowsparse::kIdx), RType, {
SparseEmbeddingDeterministicKernelLaunch<IType, DType, RType>(ctx, ograd, data,
req, output);
});
});
});
}
template<>
inline void SparseEmbeddingOpBackwardRspImpl<gpu>(const bool deterministic,
const OpContext& ctx,
const TBlob& ograd,
const TBlob& data,
const OpReqType req,
const NDArray& output) {
if (deterministic) {
SparseEmbeddingOpBackwardDeterministicRspImpl(ctx, ograd, data, req, output);
return;
}
using namespace mshadow;
using namespace mxnet_op;
using namespace mshadow::expr;
using namespace rowsparse;
using nnvm::dim_t;
if (req == kNullOp) return;
CHECK_EQ(req, kWriteTo) << "SparseEmbedding layer doesn't support "
<< "weight gradient calculation with req != write";
// Request temporary storage for marking non-zero rows and prefix sum
Stream<gpu> *s = ctx.get_stream<gpu>();
dim_t num_rows = output.shape()[0];
dim_t row_length = output.shape()[1];
dim_t data_size = static_cast<dim_t>(data.shape_.Size());
dim_t num_threads;
MSHADOW_TYPE_SWITCH(data.type_flag_, IType, {
MSHADOW_SGL_DBL_TYPE_SWITCH(ograd.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(output.aux_type(kIdx), RType, {
dim_t* prefix_sum = NULL;
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
cub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
Stream<gpu>::GetStream(s));
Tensor<gpu, 1, char> workspace = ctx.requested[0]
.get_space_typed<gpu, 1, char>(Shape1(num_rows * sizeof(dim_t) +
temp_storage_bytes), s);
prefix_sum = reinterpret_cast<dim_t*>(workspace.dptr_);
d_temp_storage = workspace.dptr_ + num_rows*sizeof(dim_t);
num_threads = num_rows;
Fill<false>(s, TBlob(prefix_sum, Shape1(num_threads), gpu::kDevMask), kWriteTo, 0);
Kernel<MarkRowFlgKernel, gpu>::Launch(s, data_size, prefix_sum, data.dptr<IType>());
cub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
mshadow::Stream<gpu>::GetStream(s));
dim_t nnr = 0;
CUDA_CALL(cudaMemcpy(&nnr, &prefix_sum[num_rows-1], sizeof(dim_t),
cudaMemcpyDeviceToHost));
if (nnr == 0) {
FillZerosRspImpl(s, output);
return;
}
output.CheckAndAlloc({Shape1(nnr)});
RType* grad_row_idx = output.aux_data(kIdx).dptr<RType>();
// fill row_idx array of output matrix, using the row_flg values
Kernel<FillRspRowIdxKernel, gpu>::Launch(s, num_rows,
grad_row_idx, prefix_sum, num_rows);
// prefill with zeros
DType* grad_data = output.data().dptr<DType>();
Fill<false>(s, TBlob(grad_data, Shape1(nnr * row_length), gpu::kDevMask),
kWriteTo, 0);
// add the final gradients
num_threads = row_length * data_size;
Kernel<AddTakeGradRspGPUKernel, gpu>::Launch(s, num_threads, grad_data, prefix_sum,
data.dptr<IType>(), ograd.dptr<DType>(), row_length);
});
});
});
}
struct backward_gather_nd_gpu {
template<typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i, int N, int M, int K,
const mshadow::Shape<10> strides,
DType* out, const DType* data,
const IType* indices) {
int offset = 0;
for (int j = 0; j < M; ++j) {
offset += strides[j] * static_cast<int>(indices[j*N + i]);
}
for (int j = 0; j < K; ++j) {
atomicAdd(out + (offset + j), data[i * K + j]);
}
}
};
template<typename DType, typename IType>
inline void GatherNDBackwardImpl(int N, int M, int K,
const mshadow::Shape<10> strides,
DType* out,
const DType* data,
const IType* indices,
mshadow::Stream<gpu> *s) {
mxnet_op::Kernel<backward_gather_nd_gpu, gpu>::Launch(s, N, N, M, K, strides, out, data, indices);
}
NNVM_REGISTER_OP(Embedding)
.set_attr<FCompute>("FCompute<gpu>", EmbeddingOpForward<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", SparseEmbeddingOpForwardEx<gpu>);
NNVM_REGISTER_OP(_contrib_SparseEmbedding)
.set_attr<FComputeEx>("FComputeEx<gpu>", SparseEmbeddingOpForwardEx<gpu>);
NNVM_REGISTER_OP(_backward_Embedding)
.set_attr<FCompute>("FCompute<gpu>", EmbeddingOpBackward<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", EmbeddingOpBackwardEx<gpu>);
NNVM_REGISTER_OP(_backward_SparseEmbedding)
.set_attr<FComputeEx>("FComputeEx<gpu>", SparseEmbeddingOpBackwardEx<gpu>);
NNVM_REGISTER_OP(take)
.set_attr<FCompute>("FCompute<gpu>", TakeOpForward<gpu>);
NNVM_REGISTER_OP(_backward_take)
.set_attr<FCompute>("FCompute<gpu>", TakeOpBackward<gpu>);
NNVM_REGISTER_OP(batch_take)
.set_attr<FCompute>("FCompute<gpu>", BatchTakeOpForward<gpu>);
NNVM_REGISTER_OP(one_hot)
.set_attr<FCompute>("FCompute<gpu>", OneHotOpForward<gpu>);
NNVM_REGISTER_OP(gather_nd)
.set_attr<FCompute>("FCompute<gpu>", GatherNDForward<gpu>);
NNVM_REGISTER_OP(scatter_nd)
.set_attr<FCompute>("FCompute<gpu>", ScatterNDForward<gpu>);
NNVM_REGISTER_OP(_backward_gather_nd)
.set_attr<FCompute>("FCompute<gpu>", GatherNDBackward<gpu>);
NNVM_REGISTER_OP(_scatter_set_nd)
.set_attr<FCompute>("FCompute<gpu>", ScatterSetNDForward<gpu>);
} // namespace op
} // namespace mxnet
|
114ab8bd9275dac0b23bb2b9ede586e4f4144068.hip | // !!! This is a file automatically generated by hipify!!!
///tensor )
#include <matazure/tensor>
#include <image_utility.hpp>
using namespace matazure;
//nvcc bug walkaround
using cu_rgb_float_image = cuda::tensor<pointf<3>, 2>;
using cu_rgb_byte_image = cuda::tensor<pointb<3>, 2>;
__constant__ static_tensor<pointf<3>, dim< 3, 3>> mask;
//cudaconstant
//conv_global mask
MATAZURE_CUDA_PUZZEL_CONV_GLOBAL(conv_global, mask)
//
MATAZURE_CUDA_PUZZEL_CONV_BLOCK(conv_block, mask)
//
MATAZURE_CUDA_PUZZEL_CONV_BLOCK_CRACK(conv_block_crack, mask)
//
MATAZURE_CUDA_PUZZEL_CONV_BLOCK_OVERLAP(conv_block_overlap, mask)
int main(int argc, char *argv[]) {
if (argc < 2){
printf("please input a 3 channel(rbg) image path");
return -1;
}
auto ts_rgb = read_rgb_image(argv[1]);
static_tensor<pointf<3>, dim< 3, 3>> host_mask;
//
fill(host_mask, pointf<3>::all(1.0f) / host_mask.size());
cuda::copy_symbol(host_mask, mask);
auto pointf3_to_pointb3 = [] __matazure__ (pointf<3> x){
pointb<3> re{};
auto convertor = unary::saturate_convertor<byte>{};
re[0] = convertor(x[0]);
re[1] = convertor(x[1]);
re[2] = convertor(x[2]);
return re;
};
auto cts_rgb = mem_clone(ts_rgb, device_tag{});
auto lcts_conv = cuda::puzzle::conv_global(tensor_cast<pointf<3>>(clamp_zero(cts_rgb)));
auto cts_conv = apply(lcts_conv, pointf3_to_pointb3).persist();
hip::device_synchronize();
auto ts_conv = mem_clone(cts_conv, host_tag{});
auto conv_global_image_path = argc > 2 ? argv[2] : "conv_global.png";
write_rgb_png( conv_global_image_path, ts_conv);
cuda::tensor<pointf<3>, 2> cts_conv_block(cts_rgb.shape());
cuda::puzzle::conv_block<dim<16, 16>>(tensor_cast<pointf<3>>(clamp_zero(cts_rgb)), cts_conv_block);
auto cts_pointb3_conv_block = apply(cts_conv_block, pointf3_to_pointb3).persist();
hip::device_synchronize();
auto ts_conv_block = mem_clone(cts_pointb3_conv_block, host_tag{});
auto conv_block_image_path = argc > 3 ? argv[3] : "conv_block.png";
write_rgb_png(conv_block_image_path, ts_conv_block);
cuda::tensor<pointf<3>, 2> cts_conv_block_crack(cts_rgb.shape());
cuda::puzzle::conv_block_crack<dim<32, 32>>(tensor_cast<pointf<3>>(clamp_zero(cts_rgb)), cts_conv_block_crack);
auto cts_pointb3_conv_block_crack = apply(cts_conv_block_crack, pointf3_to_pointb3).persist();
hip::device_synchronize();
auto ts_conv_block_crack = mem_clone(cts_pointb3_conv_block_crack, host_tag{});
auto conv_block_crack_image_path = argc > 4 ? argv[4] : "conv_block_crack.png";
write_rgb_png(conv_block_crack_image_path, ts_conv_block_crack);
cuda::tensor<pointf<3>, 2> cts_conv_block_overlap(cts_rgb.shape());
cuda::puzzle::conv_block_overlap<dim<16, 16>>(tensor_cast<pointf<3>>(clamp_zero(cts_rgb)), cts_conv_block_overlap);
auto cts_pointb3_conv_block_overlap = apply(cts_conv_block_overlap,pointf3_to_pointb3).persist();
hip::device_synchronize();
auto ts_conv_block_overlap = mem_clone(cts_pointb3_conv_block_overlap, host_tag{});
auto conv_block_overlap_image_path = argc > 5 ? argv[5] : "conv_block_overlap.png";
write_rgb_png(conv_block_overlap_image_path, ts_conv_block_overlap);
return 0;
}
| 114ab8bd9275dac0b23bb2b9ede586e4f4144068.cu | ///tensor自带的多个不同版本的卷积运算, 适用于不同类型的,不同维度(最大四维, 可以自己拓展)的卷积运算
#include <matazure/tensor>
#include <image_utility.hpp>
using namespace matazure;
//nvcc bug walkaround
using cu_rgb_float_image = cuda::tensor<pointf<3>, 2>;
using cu_rgb_byte_image = cuda::tensor<pointb<3>, 2>;
__constant__ static_tensor<pointf<3>, dim< 3, 3>> mask;
//之所以使用宏来定义卷积函数,是因为cuda的constant内存必须全局申明
//声明一个叫做conv_global的卷积函数 用mask作为卷积核
MATAZURE_CUDA_PUZZEL_CONV_GLOBAL(conv_global, mask)
//分块的卷积实现
MATAZURE_CUDA_PUZZEL_CONV_BLOCK(conv_block, mask)
//边缘不处理的卷积实现
MATAZURE_CUDA_PUZZEL_CONV_BLOCK_CRACK(conv_block_crack, mask)
//处理了边缘的卷积实现
MATAZURE_CUDA_PUZZEL_CONV_BLOCK_OVERLAP(conv_block_overlap, mask)
int main(int argc, char *argv[]) {
if (argc < 2){
printf("please input a 3 channel(rbg) image path");
return -1;
}
auto ts_rgb = read_rgb_image(argv[1]);
static_tensor<pointf<3>, dim< 3, 3>> host_mask;
//使用均值卷积核
fill(host_mask, pointf<3>::all(1.0f) / host_mask.size());
cuda::copy_symbol(host_mask, mask);
auto pointf3_to_pointb3 = [] __matazure__ (pointf<3> x){
pointb<3> re{};
auto convertor = unary::saturate_convertor<byte>{};
re[0] = convertor(x[0]);
re[1] = convertor(x[1]);
re[2] = convertor(x[2]);
return re;
};
auto cts_rgb = mem_clone(ts_rgb, device_tag{});
auto lcts_conv = cuda::puzzle::conv_global(tensor_cast<pointf<3>>(clamp_zero(cts_rgb)));
auto cts_conv = apply(lcts_conv, pointf3_to_pointb3).persist();
cuda::device_synchronize();
auto ts_conv = mem_clone(cts_conv, host_tag{});
auto conv_global_image_path = argc > 2 ? argv[2] : "conv_global.png";
write_rgb_png( conv_global_image_path, ts_conv);
cuda::tensor<pointf<3>, 2> cts_conv_block(cts_rgb.shape());
cuda::puzzle::conv_block<dim<16, 16>>(tensor_cast<pointf<3>>(clamp_zero(cts_rgb)), cts_conv_block);
auto cts_pointb3_conv_block = apply(cts_conv_block, pointf3_to_pointb3).persist();
cuda::device_synchronize();
auto ts_conv_block = mem_clone(cts_pointb3_conv_block, host_tag{});
auto conv_block_image_path = argc > 3 ? argv[3] : "conv_block.png";
write_rgb_png(conv_block_image_path, ts_conv_block);
cuda::tensor<pointf<3>, 2> cts_conv_block_crack(cts_rgb.shape());
cuda::puzzle::conv_block_crack<dim<32, 32>>(tensor_cast<pointf<3>>(clamp_zero(cts_rgb)), cts_conv_block_crack);
auto cts_pointb3_conv_block_crack = apply(cts_conv_block_crack, pointf3_to_pointb3).persist();
cuda::device_synchronize();
auto ts_conv_block_crack = mem_clone(cts_pointb3_conv_block_crack, host_tag{});
auto conv_block_crack_image_path = argc > 4 ? argv[4] : "conv_block_crack.png";
write_rgb_png(conv_block_crack_image_path, ts_conv_block_crack);
cuda::tensor<pointf<3>, 2> cts_conv_block_overlap(cts_rgb.shape());
cuda::puzzle::conv_block_overlap<dim<16, 16>>(tensor_cast<pointf<3>>(clamp_zero(cts_rgb)), cts_conv_block_overlap);
auto cts_pointb3_conv_block_overlap = apply(cts_conv_block_overlap,pointf3_to_pointb3).persist();
cuda::device_synchronize();
auto ts_conv_block_overlap = mem_clone(cts_pointb3_conv_block_overlap, host_tag{});
auto conv_block_overlap_image_path = argc > 5 ? argv[5] : "conv_block_overlap.png";
write_rgb_png(conv_block_overlap_image_path, ts_conv_block_overlap);
return 0;
}
|
290c4933cf9c91e54e13328fca725ea68a63bdd8.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <iostream>
__device__ int cuda_mymin(int a, double b) {
return !(b<a)?a:round(b);
}
__device__ double cuda_fmod(double numer, double denom){
double tquou = floor(numer / denom);
return numer - tquou * denom;
}
__device__ int cuda_findcellidx_1D(const float* p, const int ncx) {
// Floor value to find cell
int idx = floor(p[0] * ncx);
idx = max(0, min(idx, ncx-1));
return idx;
}
__device__ int cuda_findcellidx_2D(const float* p, const int ncx, const int ncy) {
// Copy point
double point[2];
point[0] = p[0];
point[1] = p[1];
// Cell size
const float inc_x = 1.0 / ncx;
const float inc_y = 1.0 / ncy;
// Find initial row, col placement
double p0 = min((ncx * inc_x - 0.000000001), max(0.0, point[0]));
double p1 = min((ncy * inc_y - 0.000000001), max(0.0, point[1]));
double xmod = cuda_fmod((double)p0, (double)inc_x);
double ymod = cuda_fmod((double)p1, (double)inc_y);
double x = xmod / inc_x;
double y = ymod / inc_y;
int cell_idx = cuda_mymin(ncx-1, (p0 - xmod) / inc_x) +
cuda_mymin(ncy-1, (p1 - ymod) / inc_y) * ncx;
cell_idx *= 4;
// Out of bound (left)
if(point[0]<=0){
if(point[1] <= 0 && point[1]/inc_y<point[0]/inc_x){
// Nothing to do here
} else if(point[1] >= ncy * inc_y && point[1]/inc_y-ncy > -point[0]/inc_x) {
cell_idx += 2;
} else {
cell_idx += 3;
}
return cell_idx;
}
// Out of bound (right)
if(point[0] >= ncx*inc_x){
if(point[1]<=0 && -point[1]/inc_y > point[0]/inc_x - ncx){
// Nothing to do here
} else if(point[1] >= ncy*inc_y && point[1]/inc_y - ncy > point[0]/inc_x-ncx){
cell_idx += 2;
} else {
cell_idx += 1;
}
return cell_idx;
}
// Out of bound (up)
if(point[1] <= 0){
return cell_idx;
}
// Out of bound (bottom)
if(point[1] >= ncy*inc_y){
cell_idx += 2;
return cell_idx;
}
// OK, we are inbound
if(x<y){
if(1-x<y){
cell_idx += 2;
} else {
cell_idx += 3;
}
} else if(1-x<y) {
cell_idx += 1;
}
return cell_idx;
/*
// Cell size
const float inc_x = 1.0 / nx;
const float inc_y = 1.0 / ny;
// Copy point
float point[2];
point[0] = p[0];
point[1] = p[1];
// If point is outside [0, 1]x[0, 1] then we push it inside
if (point[0] < 0.0 || point[0] > 1.0 || point[1] < 0.0 || point[1] > 1.0) {
const float half = 0.5;
point[0] -= half;
point[1] -= half;
const float abs_x = abs(point[0]);
const float abs_y = abs(point[1]);
const float push_x = (abs_x < abs_y) ? half*inc_x : 0.0;
const float push_y = (abs_y < abs_x) ? half*inc_y : 0.0;
if (abs_x > half) {
point[0] = copysign(half - push_x, point[0]);
}
if (abs_y > half) {
point[1] = copysign(half - push_y, point[1]);
}
point[0] += half;
point[1] += half;
}
// Find initial row, col placement
const float p0 = min((float)(1.0 - 1e-8), point[0]);
const float p1 = min((float)(1.0 - 1e-8), point[1]);
const float p0ncx = p0*nx;
const float p1ncy = p1*ny;
const int ip0ncx = p0ncx; // rounds down
const int ip1ncy = p1ncy; // rounds down
int cell_idx = 4 * (ip0ncx + ip1ncy * nx);
// Find (sub)triangle
const float x = p0ncx - ip0ncx;
const float y = p1ncy - ip1ncy;
if (x < y) {
if (1-x < y) {
cell_idx += 2;
} else {
cell_idx += 3;
}
} else if (1-x < y) {
cell_idx += 1;
}
return cell_idx;
*/
}
__device__ int cuda_findcellidx_3D(const float* p, const int nx, const int ny, const int nz) {
// Cell size
const float inc_x = 1.0 / nx;
const float inc_y = 1.0 / ny;
const float inc_z = 1.0 / nz;
// Copy point
float point[3];
point[0] = p[0];
point[1] = p[1];
point[2] = p[2];
// If point is outside [0, 1]x[0, 1]x[0, 1] then we push it inside
if(point[0] < 0.0 || point[0] > 1.0 || point[1] < 0.0 || point[1] > 1.0) {
const float half = 0.5;
point[0] -= half;
point[1] -= half;
point[2] -= half;
const float abs_x = abs(point[0]);
const float abs_y = abs(point[1]);
const float abs_z = abs(point[2]);
const float push_x = (abs_x < abs_y && abs_x < abs_z) ? half*inc_x : 0.0;
const float push_y = (abs_y < abs_x && abs_x < abs_z) ? half*inc_y : 0.0;
const float push_z = (abs_z < abs_x && abs_x < abs_y) ? half*inc_z : 0.0;
if(abs_x > half){point[0] = copysign(half - push_x, point[0]);}
if(abs_y > half){point[1] = copysign(half - push_y, point[1]);}
if(abs_z > half){point[2] = copysign(half - push_z, point[2]);}
point[0] += half;
point[1] += half;
point[2] += half;
}
float zero = 0.0;
float p0 = min((float)(nx*inc_x-1e-8),max(zero, point[0]));
float p1 = min((float)(ny*inc_y-1e-8),max(zero, point[1]));
float p2 = min((float)(nz*inc_x-1e-8),max(zero, point[2]));
double xmod = cuda_fmod(p0,inc_x);
double ymod = cuda_fmod(p1,inc_y);
double zmod = cuda_fmod(p2,inc_z);
int i = cuda_mymin(nx-1,((p0 - xmod)/inc_x));
int j = cuda_mymin(ny-1,((p1 - ymod)/inc_y));
int k = cuda_mymin(nz-1,((p2 - zmod)/inc_z));
int cell_idx = 5*(i + j * nx + k * nx * ny);
double x = xmod/inc_x;
double y = ymod/inc_y;
double z = zmod/inc_z;
bool tf = false;
if (k%2==0){
if ((i%2==0 && j%2==1) || (i%2==1 && j%2==0)){
tf = true;
}
}
else if((i%2==0 && j%2==0) || (i%2==1 && j%2==1)){
tf = true;
}
if (tf){
double tmp = x;
x = y;
y = 1-tmp;
}
if (-x -y +z >= 0){
cell_idx+=1;
}
else if (x+y+z - 2 >= 0){
cell_idx+=2;
}
else if (-x+y-z >= 0){
cell_idx+=3;
}
else if (x-y-z >= 0){
cell_idx+=4;
}
return cell_idx;
}
__device__ void A_times_b_1D(float x[], const float* A, float* b) {
x[0] = A[0]*b[0] + A[1];
return;
}
__device__ void A_times_b_2D(float x[], const float* A, float* b) {
x[0] = A[0]*b[0] + A[1]*b[1] + A[2];
x[1] = A[3]*b[0] + A[4]*b[1] + A[5];
return;
}
__device__ void A_times_b_3D(float x[], const float* A, float* b) {
x[0] = A[0]*b[0] + A[1]*b[1] + A[2]*b[2] + A[3];
x[1] = A[4]*b[0] + A[5]*b[1] + A[6]*b[2] + A[7];
x[2] = A[8]*b[0] + A[9]*b[1] + A[10]*b[2] + A[11];
return;
}
__device__ void A_times_b_linear_1D(float x[], const float* A, float* b) {
x[0] = A[0]*b[0];
return;
}
__device__ void A_times_b_linear_2D(float x[], const float* A, float* b) {
x[0] = A[0]*b[0] + A[1]*b[1];
x[1] = A[3]*b[0] + A[4]*b[1];
return;
}
__device__ void A_times_b_linear_3D(float x[], const float* A, float* b) {
x[0] = A[0]*b[0] + A[1]*b[1] + A[2]*b[2];
x[1] = A[4]*b[0] + A[5]*b[1] + A[6]*b[2];
x[2] = A[8]*b[0] + A[9]*b[1] + A[10]*b[2];
return;
}
// Kernel declaration
__global__ void cpab_cuda_kernel_forward_1D(const int nP, const int batch_size,
float* newpoints, const float* points,
const float* Trels, const int* nStepSolver,
const int* nc, const int broadcast) {
int point_index = blockIdx.x * blockDim.x + threadIdx.x;
int batch_index = blockIdx.y * blockDim.y + threadIdx.y;
if(point_index < nP && batch_index < batch_size) {
// Get point
float point[1];
point[0] = points[broadcast*batch_index*nP*1+point_index];
// Define start index for the matrices belonging to this batch
// batch * 2 params pr cell * cell in x
int start_idx = batch_index * 2 * nc[0];
// Iterate in nStepSolver
int cellidx;
for(int n = 0; n < nStepSolver[0]; n++){
// Find cell idx
cellidx = cuda_findcellidx_1D(point, nc[0]);
// Extract the mapping in the cell
const float* Trels_idx = Trels + 2*cellidx + start_idx;
// Calculate trajectory of point
float point_updated[1];
A_times_b_1D(point_updated, Trels_idx, point);
point[0] = point_updated[0];
}
// Copy to output
newpoints[nP * batch_index + point_index] = point[0];
}
return;
}
__global__ void cpab_cuda_kernel_forward_2D(const int nP, const int batch_size,
float* newpoints, const float* points,
const float* Trels, const int* nStepSolver,
const int* nc, const int broadcast) {
int point_index = blockIdx.x * blockDim.x + threadIdx.x;
int batch_index = blockIdx.y * blockDim.y + threadIdx.y;
if(point_index < nP && batch_index < batch_size) {
// Get point
float point[2];
point[0] = points[broadcast*batch_index*nP*2+point_index];
point[1] = points[broadcast*batch_index*nP*2+point_index + nP];
// Define start index for the matrices belonging to this batch
// batch * num_elem * 4 triangles pr cell * cell in x * cell in y
int start_idx = batch_index * 6 * 4 * nc[0] * nc[1];
// Iterate in nStepSolver
int cellidx;
for(int n = 0; n < nStepSolver[0]; n++){
// Find cell idx
cellidx = cuda_findcellidx_2D(point, nc[0], nc[1]);
// Extract the mapping in the cell
const float* Trels_idx = Trels + 6*cellidx + start_idx;
// Calculate trajectory of point
float point_updated[2];
A_times_b_2D(point_updated, Trels_idx, point);
point[0] = point_updated[0];
point[1] = point_updated[1];
}
// Copy to output
newpoints[2 * nP * batch_index + point_index] = point[0];
newpoints[2 * nP * batch_index + point_index + nP] = point[1];
}
return;
}
__global__ void cpab_cuda_kernel_forward_3D(const int nP, const int batch_size,
float* newpoints, const float* points,
const float* Trels, const int* nStepSolver,
const int* nc, const int broadcast) {
int point_index = blockIdx.x * blockDim.x + threadIdx.x;
int batch_index = blockIdx.y * blockDim.y + threadIdx.y;
if(point_index < nP && batch_index < batch_size) {
// Get point
float point[3];
point[0] = points[broadcast*batch_index*nP*3+point_index];
point[1] = points[broadcast*batch_index*nP*3+point_index + nP];
point[2] = points[broadcast*batch_index*nP*3+point_index + 2*nP];
// Define start index for the matrices belonging to this batch
// batch * 12 params pr cell * 5 triangles pr cell * cell in x * cell in y * cell in z
int start_idx = batch_index * 12 * 5 * nc[0] * nc[1] * nc[2];
// Iterate in nStepSolver
int cellidx;
for(int n = 0; n < nStepSolver[0]; n++){
// Find cell idx
cellidx = cuda_findcellidx_3D(point, nc[0], nc[1], nc[2]);
// Extract the mapping in the cell
const float* Trels_idx = Trels + 12*cellidx + start_idx;
// Calculate trajectory of point
float point_updated[3];
A_times_b_3D(point_updated, Trels_idx, point);
point[0] = point_updated[0];
point[1] = point_updated[1];
point[2] = point_updated[2];
}
// Copy to output
newpoints[3 * nP * batch_index + point_index] = point[0];
newpoints[3 * nP * batch_index + point_index + nP] = point[1];
newpoints[3 * nP * batch_index + point_index + 2 * nP] = point[2];
}
return;
}
__global__ void cpab_cuda_kernel_backward_1D(dim3 nthreads, const int n_theta, const int d, const int nP, const int nC,
float* grad, const float* points, const float* As, const float* Bs,
const int* nStepSolver, const int* nc, const int broadcast) {
// Allocate memory for computations
float p[1], v[1], pMid[1], vMid[1], q[1], qMid[1];
float B_times_T[1], A_times_dTdAlpha[1], u[1], uMid[1];
float Alocal[2], Blocal[2];
int cellidx;
// Thread index
int point_index = threadIdx.x + blockIdx.x * blockDim.x;
int batch_index = threadIdx.y + blockIdx.y * blockDim.y;
int dim_index = threadIdx.z + blockIdx.z * blockDim.z;
// Make sure we are within bounds
if(point_index < nP && batch_index < n_theta && dim_index < d){
int index = nP * batch_index + point_index;
int boxsize = nP * n_theta;
// Define start index for the matrices belonging to this batch
// batch * 2 params pr cell * cell in x
int start_idx = batch_index * 2 * nc[0];
// Get point
p[0] = points[broadcast*batch_index*nP*1+point_index];
// Step size for solver
double h = (1.0 / nStepSolver[0]);
// Iterate a number of times
for(int t=0; t<nStepSolver[0]; t++) {
// Get current cell
cellidx = cuda_findcellidx_1D(p, nc[0]);
// Get index of A
int As_idx = 2*cellidx;
// Extract local A
for(int i = 0; i < 2; i++){
Alocal[i] = (As + As_idx + start_idx)[i];
}
// Compute velocity at current location
A_times_b_1D(v, Alocal, p);
// Compute midpoint
pMid[0] = p[0] + h*v[0]/2.0;
// Compute velocity at midpoint
A_times_b_1D(vMid, Alocal, pMid);
// Get index of B
int Bs_idx = 2 * dim_index * nC + As_idx;
// Get local B
for(int i = 0; i < 2; i++){
Blocal[i] = (Bs + Bs_idx)[i];
}
// Copy q
q[0] = grad[dim_index*boxsize + index];
// Step 1: Compute u using the old location
// Find current RHS (term 1 + term 2)
A_times_b_1D(B_times_T, Blocal, p); // Term 1
A_times_b_linear_1D(A_times_dTdAlpha, Alocal, q); // Term 2
// Sum both terms
u[0] = B_times_T[0] + A_times_dTdAlpha[0];
// Step 2: Compute mid "point"
qMid[0] = q[0] + h * u[0]/2.0;
// Step 3: Compute uMid
A_times_b_1D(B_times_T, Blocal, pMid); // Term 1
A_times_b_linear_1D(A_times_dTdAlpha, Alocal, qMid); // Term 2
// Sum both terms
uMid[0] = B_times_T[0] + A_times_dTdAlpha[0];
// Update q
q[0] += uMid[0] * h;
// Update gradient
grad[dim_index * boxsize + index] = q[0];
// Update p
p[0] += vMid[0]*h;
}
}
return;
}
__global__ void cpab_cuda_kernel_backward_2D(dim3 nthreads, const int n_theta, const int d, const int nP, const int nC,
float* grad, const float* points, const float* As, const float* Bs,
const int* nStepSolver, const int* nc, const int broadcast) {
// Allocate memory for computations
float p[2], v[2], pMid[2], vMid[2], q[2], qMid[2];
float B_times_T[2], A_times_dTdAlpha[2], u[2], uMid[2];
float Alocal[6], Blocal[6];
int cellidx;
// Thread index
int point_index = threadIdx.x + blockIdx.x * blockDim.x;
int batch_index = threadIdx.y + blockIdx.y * blockDim.y;
int dim_index = threadIdx.z + blockIdx.z * blockDim.z;
// Make sure we are within bounds
if(point_index < nP && batch_index < n_theta && dim_index < d){
int index = 2 * nP * batch_index + point_index;
int boxsize = 2 * nP * n_theta;
// Define start index for the matrices belonging to this batch
// batch * num_elem * 4 triangles pr cell * cell in x * cell in y
int start_idx = batch_index * 6 * 4 * nc[0] * nc[1];
// Get point
p[0] = points[broadcast*batch_index*nP*2+point_index];
p[1] = points[broadcast*batch_index*nP*2+point_index + nP];
// Step size for solver
double h = (1.0 / nStepSolver[0]);
// Iterate a number of times
for(int t=0; t<nStepSolver[0]; t++) {
// Get current cell
cellidx = cuda_findcellidx_2D(p, nc[0], nc[1]);
// Get index of A
int As_idx = 6*cellidx;
// Extract local A
for(int i = 0; i < 6; i++){
Alocal[i] = (As + As_idx + start_idx)[i];
}
// Compute velocity at current location
A_times_b_2D(v, Alocal, p);
// Compute midpoint
pMid[0] = p[0] + h*v[0]/2.0;
pMid[1] = p[1] + h*v[1]/2.0;
// Compute velocity at midpoint
A_times_b_2D(vMid, Alocal, pMid);
// Get index of B
int Bs_idx = 6 * dim_index * nC + As_idx;
// Get local B
for(int i = 0; i < 6; i++){
Blocal[i] = (Bs + Bs_idx)[i];
}
// Copy q
q[0] = grad[dim_index*boxsize + index];
q[1] = grad[dim_index*boxsize + index + nP];
// Step 1: Compute u using the old location
// Find current RHS (term 1 + term 2)
A_times_b_2D(B_times_T, Blocal, p); // Term 1
A_times_b_linear_2D(A_times_dTdAlpha, Alocal, q); // Term 2
// Sum both terms
u[0] = B_times_T[0] + A_times_dTdAlpha[0];
u[1] = B_times_T[1] + A_times_dTdAlpha[1];
// Step 2: Compute mid "point"
qMid[0] = q[0] + h * u[0]/2.0;
qMid[1] = q[1] + h * u[1]/2.0;
// Step 3: Compute uMid
A_times_b_2D(B_times_T, Blocal, pMid); // Term 1
A_times_b_linear_2D(A_times_dTdAlpha, Alocal, qMid); // Term 2
// Sum both terms
uMid[0] = B_times_T[0] + A_times_dTdAlpha[0];
uMid[1] = B_times_T[1] + A_times_dTdAlpha[1];
// Update q
q[0] += uMid[0] * h;
q[1] += uMid[1] * h;
// Update gradient
grad[dim_index * boxsize + index] = q[0];
grad[dim_index * boxsize + index + nP] = q[1];
// Update p
p[0] += vMid[0]*h;
p[1] += vMid[1]*h;
}
}
return;
}
__global__ void cpab_cuda_kernel_backward_3D(dim3 nthreads, const int n_theta, const int d, const int nP, const int nC,
float* grad, const float* points, const float* As, const float* Bs,
const int* nStepSolver, const int* nc, const int broadcast) {
// Allocate memory for computations
float p[3], v[3], pMid[3], vMid[3], q[3], qMid[3];
float B_times_T[3], A_times_dTdAlpha[3], u[3], uMid[3];
float Alocal[12], Blocal[12];
int cellidx;
// Thread index
int point_index = threadIdx.x + blockIdx.x * blockDim.x;
int batch_index = threadIdx.y + blockIdx.y * blockDim.y;
int dim_index = threadIdx.z + blockIdx.z * blockDim.z;
// Make sure we are within bounds
if(point_index < nP && batch_index < n_theta && dim_index < d){
int index = 3 * nP * batch_index + point_index;
int boxsize = 3 * nP * n_theta;
// Define start index for the matrices belonging to this batch
// batch * 12 params pr cell * 6 triangles pr cell * cell in x * cell in y * cell in z
int start_idx = batch_index * 12 * 5 * nc[0] * nc[1] * nc[2];
// Get point
p[0] = points[broadcast*batch_index*nP*3+point_index];
p[1] = points[broadcast*batch_index*nP*3+point_index + nP];
p[2] = points[broadcast*batch_index*nP*3+point_index + 2 * nP];
// Step size for solver
double h = (1.0 / nStepSolver[0]);
// Iterate a number of times
for(int t=0; t<nStepSolver[0]; t++) {
// Get current cell
cellidx = cuda_findcellidx_3D(p, nc[0], nc[1], nc[2]);
// Get index of A
int As_idx = 12*cellidx;
// Extract local A
for(int i = 0; i < 12; i++){
Alocal[i] = (As + As_idx + start_idx)[i];
}
// Compute velocity at current location
A_times_b_3D(v, Alocal, p);
// Compute midpoint
pMid[0] = p[0] + h*v[0]/2.0;
pMid[1] = p[1] + h*v[1]/2.0;
pMid[2] = p[2] + h*v[2]/2.0;
// Compute velocity at midpoint
A_times_b_3D(vMid, Alocal, pMid);
// Get index of B
int Bs_idx = 12 * dim_index * nC + As_idx;
// Get local B
for(int i = 0; i < 12; i++){
Blocal[i] = (Bs + Bs_idx)[i];
}
// Copy q
q[0] = grad[dim_index * boxsize + index];
q[1] = grad[dim_index * boxsize + index + nP];
q[2] = grad[dim_index * boxsize + index + 2*nP];
// Step 1: Compute u using the old location
// Find current RHS (term 1 + term 2)
A_times_b_3D(B_times_T, Blocal, p); // Term 1
A_times_b_linear_3D(A_times_dTdAlpha, Alocal, q); // Term 2
// Sum both terms
u[0] = B_times_T[0] + A_times_dTdAlpha[0];
u[1] = B_times_T[1] + A_times_dTdAlpha[1];
u[2] = B_times_T[2] + A_times_dTdAlpha[2];
// Step 2: Compute mid "point"
qMid[0] = q[0] + h * u[0]/2.0;
qMid[1] = q[1] + h * u[1]/2.0;
qMid[2] = q[2] + h * u[2]/2.0;
// Step 3: Compute uMid
A_times_b_3D(B_times_T, Blocal, pMid); // Term 1
A_times_b_linear_3D(A_times_dTdAlpha, Alocal, qMid); // Term 2
// Sum both terms
uMid[0] = B_times_T[0] + A_times_dTdAlpha[0];
uMid[1] = B_times_T[1] + A_times_dTdAlpha[1];
uMid[2] = B_times_T[2] + A_times_dTdAlpha[2];
// Update q
q[0] += uMid[0] * h;
q[1] += uMid[1] * h;
q[2] += uMid[2] * h;
// Update gradient
grad[dim_index * boxsize + index] = q[0];
grad[dim_index * boxsize + index + nP] = q[1];
grad[dim_index * boxsize + index + 2 * nP] = q[2];
// Update p
p[0] += vMid[0]*h;
p[1] += vMid[1]*h;
p[2] += vMid[2]*h;
}
}
return;
}
| 290c4933cf9c91e54e13328fca725ea68a63bdd8.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
__device__ int cuda_mymin(int a, double b) {
return !(b<a)?a:round(b);
}
__device__ double cuda_fmod(double numer, double denom){
double tquou = floor(numer / denom);
return numer - tquou * denom;
}
__device__ int cuda_findcellidx_1D(const float* p, const int ncx) {
// Floor value to find cell
int idx = floor(p[0] * ncx);
idx = max(0, min(idx, ncx-1));
return idx;
}
__device__ int cuda_findcellidx_2D(const float* p, const int ncx, const int ncy) {
// Copy point
double point[2];
point[0] = p[0];
point[1] = p[1];
// Cell size
const float inc_x = 1.0 / ncx;
const float inc_y = 1.0 / ncy;
// Find initial row, col placement
double p0 = min((ncx * inc_x - 0.000000001), max(0.0, point[0]));
double p1 = min((ncy * inc_y - 0.000000001), max(0.0, point[1]));
double xmod = cuda_fmod((double)p0, (double)inc_x);
double ymod = cuda_fmod((double)p1, (double)inc_y);
double x = xmod / inc_x;
double y = ymod / inc_y;
int cell_idx = cuda_mymin(ncx-1, (p0 - xmod) / inc_x) +
cuda_mymin(ncy-1, (p1 - ymod) / inc_y) * ncx;
cell_idx *= 4;
// Out of bound (left)
if(point[0]<=0){
if(point[1] <= 0 && point[1]/inc_y<point[0]/inc_x){
// Nothing to do here
} else if(point[1] >= ncy * inc_y && point[1]/inc_y-ncy > -point[0]/inc_x) {
cell_idx += 2;
} else {
cell_idx += 3;
}
return cell_idx;
}
// Out of bound (right)
if(point[0] >= ncx*inc_x){
if(point[1]<=0 && -point[1]/inc_y > point[0]/inc_x - ncx){
// Nothing to do here
} else if(point[1] >= ncy*inc_y && point[1]/inc_y - ncy > point[0]/inc_x-ncx){
cell_idx += 2;
} else {
cell_idx += 1;
}
return cell_idx;
}
// Out of bound (up)
if(point[1] <= 0){
return cell_idx;
}
// Out of bound (bottom)
if(point[1] >= ncy*inc_y){
cell_idx += 2;
return cell_idx;
}
// OK, we are inbound
if(x<y){
if(1-x<y){
cell_idx += 2;
} else {
cell_idx += 3;
}
} else if(1-x<y) {
cell_idx += 1;
}
return cell_idx;
/*
// Cell size
const float inc_x = 1.0 / nx;
const float inc_y = 1.0 / ny;
// Copy point
float point[2];
point[0] = p[0];
point[1] = p[1];
// If point is outside [0, 1]x[0, 1] then we push it inside
if (point[0] < 0.0 || point[0] > 1.0 || point[1] < 0.0 || point[1] > 1.0) {
const float half = 0.5;
point[0] -= half;
point[1] -= half;
const float abs_x = abs(point[0]);
const float abs_y = abs(point[1]);
const float push_x = (abs_x < abs_y) ? half*inc_x : 0.0;
const float push_y = (abs_y < abs_x) ? half*inc_y : 0.0;
if (abs_x > half) {
point[0] = copysign(half - push_x, point[0]);
}
if (abs_y > half) {
point[1] = copysign(half - push_y, point[1]);
}
point[0] += half;
point[1] += half;
}
// Find initial row, col placement
const float p0 = min((float)(1.0 - 1e-8), point[0]);
const float p1 = min((float)(1.0 - 1e-8), point[1]);
const float p0ncx = p0*nx;
const float p1ncy = p1*ny;
const int ip0ncx = p0ncx; // rounds down
const int ip1ncy = p1ncy; // rounds down
int cell_idx = 4 * (ip0ncx + ip1ncy * nx);
// Find (sub)triangle
const float x = p0ncx - ip0ncx;
const float y = p1ncy - ip1ncy;
if (x < y) {
if (1-x < y) {
cell_idx += 2;
} else {
cell_idx += 3;
}
} else if (1-x < y) {
cell_idx += 1;
}
return cell_idx;
*/
}
__device__ int cuda_findcellidx_3D(const float* p, const int nx, const int ny, const int nz) {
// Cell size
const float inc_x = 1.0 / nx;
const float inc_y = 1.0 / ny;
const float inc_z = 1.0 / nz;
// Copy point
float point[3];
point[0] = p[0];
point[1] = p[1];
point[2] = p[2];
// If point is outside [0, 1]x[0, 1]x[0, 1] then we push it inside
if(point[0] < 0.0 || point[0] > 1.0 || point[1] < 0.0 || point[1] > 1.0) {
const float half = 0.5;
point[0] -= half;
point[1] -= half;
point[2] -= half;
const float abs_x = abs(point[0]);
const float abs_y = abs(point[1]);
const float abs_z = abs(point[2]);
const float push_x = (abs_x < abs_y && abs_x < abs_z) ? half*inc_x : 0.0;
const float push_y = (abs_y < abs_x && abs_x < abs_z) ? half*inc_y : 0.0;
const float push_z = (abs_z < abs_x && abs_x < abs_y) ? half*inc_z : 0.0;
if(abs_x > half){point[0] = copysign(half - push_x, point[0]);}
if(abs_y > half){point[1] = copysign(half - push_y, point[1]);}
if(abs_z > half){point[2] = copysign(half - push_z, point[2]);}
point[0] += half;
point[1] += half;
point[2] += half;
}
float zero = 0.0;
float p0 = min((float)(nx*inc_x-1e-8),max(zero, point[0]));
float p1 = min((float)(ny*inc_y-1e-8),max(zero, point[1]));
float p2 = min((float)(nz*inc_x-1e-8),max(zero, point[2]));
double xmod = cuda_fmod(p0,inc_x);
double ymod = cuda_fmod(p1,inc_y);
double zmod = cuda_fmod(p2,inc_z);
int i = cuda_mymin(nx-1,((p0 - xmod)/inc_x));
int j = cuda_mymin(ny-1,((p1 - ymod)/inc_y));
int k = cuda_mymin(nz-1,((p2 - zmod)/inc_z));
int cell_idx = 5*(i + j * nx + k * nx * ny);
double x = xmod/inc_x;
double y = ymod/inc_y;
double z = zmod/inc_z;
bool tf = false;
if (k%2==0){
if ((i%2==0 && j%2==1) || (i%2==1 && j%2==0)){
tf = true;
}
}
else if((i%2==0 && j%2==0) || (i%2==1 && j%2==1)){
tf = true;
}
if (tf){
double tmp = x;
x = y;
y = 1-tmp;
}
if (-x -y +z >= 0){
cell_idx+=1;
}
else if (x+y+z - 2 >= 0){
cell_idx+=2;
}
else if (-x+y-z >= 0){
cell_idx+=3;
}
else if (x-y-z >= 0){
cell_idx+=4;
}
return cell_idx;
}
__device__ void A_times_b_1D(float x[], const float* A, float* b) {
x[0] = A[0]*b[0] + A[1];
return;
}
__device__ void A_times_b_2D(float x[], const float* A, float* b) {
x[0] = A[0]*b[0] + A[1]*b[1] + A[2];
x[1] = A[3]*b[0] + A[4]*b[1] + A[5];
return;
}
__device__ void A_times_b_3D(float x[], const float* A, float* b) {
x[0] = A[0]*b[0] + A[1]*b[1] + A[2]*b[2] + A[3];
x[1] = A[4]*b[0] + A[5]*b[1] + A[6]*b[2] + A[7];
x[2] = A[8]*b[0] + A[9]*b[1] + A[10]*b[2] + A[11];
return;
}
__device__ void A_times_b_linear_1D(float x[], const float* A, float* b) {
x[0] = A[0]*b[0];
return;
}
__device__ void A_times_b_linear_2D(float x[], const float* A, float* b) {
x[0] = A[0]*b[0] + A[1]*b[1];
x[1] = A[3]*b[0] + A[4]*b[1];
return;
}
__device__ void A_times_b_linear_3D(float x[], const float* A, float* b) {
x[0] = A[0]*b[0] + A[1]*b[1] + A[2]*b[2];
x[1] = A[4]*b[0] + A[5]*b[1] + A[6]*b[2];
x[2] = A[8]*b[0] + A[9]*b[1] + A[10]*b[2];
return;
}
// Kernel declaration
__global__ void cpab_cuda_kernel_forward_1D(const int nP, const int batch_size,
float* newpoints, const float* points,
const float* Trels, const int* nStepSolver,
const int* nc, const int broadcast) {
int point_index = blockIdx.x * blockDim.x + threadIdx.x;
int batch_index = blockIdx.y * blockDim.y + threadIdx.y;
if(point_index < nP && batch_index < batch_size) {
// Get point
float point[1];
point[0] = points[broadcast*batch_index*nP*1+point_index];
// Define start index for the matrices belonging to this batch
// batch * 2 params pr cell * cell in x
int start_idx = batch_index * 2 * nc[0];
// Iterate in nStepSolver
int cellidx;
for(int n = 0; n < nStepSolver[0]; n++){
// Find cell idx
cellidx = cuda_findcellidx_1D(point, nc[0]);
// Extract the mapping in the cell
const float* Trels_idx = Trels + 2*cellidx + start_idx;
// Calculate trajectory of point
float point_updated[1];
A_times_b_1D(point_updated, Trels_idx, point);
point[0] = point_updated[0];
}
// Copy to output
newpoints[nP * batch_index + point_index] = point[0];
}
return;
}
__global__ void cpab_cuda_kernel_forward_2D(const int nP, const int batch_size,
float* newpoints, const float* points,
const float* Trels, const int* nStepSolver,
const int* nc, const int broadcast) {
int point_index = blockIdx.x * blockDim.x + threadIdx.x;
int batch_index = blockIdx.y * blockDim.y + threadIdx.y;
if(point_index < nP && batch_index < batch_size) {
// Get point
float point[2];
point[0] = points[broadcast*batch_index*nP*2+point_index];
point[1] = points[broadcast*batch_index*nP*2+point_index + nP];
// Define start index for the matrices belonging to this batch
// batch * num_elem * 4 triangles pr cell * cell in x * cell in y
int start_idx = batch_index * 6 * 4 * nc[0] * nc[1];
// Iterate in nStepSolver
int cellidx;
for(int n = 0; n < nStepSolver[0]; n++){
// Find cell idx
cellidx = cuda_findcellidx_2D(point, nc[0], nc[1]);
// Extract the mapping in the cell
const float* Trels_idx = Trels + 6*cellidx + start_idx;
// Calculate trajectory of point
float point_updated[2];
A_times_b_2D(point_updated, Trels_idx, point);
point[0] = point_updated[0];
point[1] = point_updated[1];
}
// Copy to output
newpoints[2 * nP * batch_index + point_index] = point[0];
newpoints[2 * nP * batch_index + point_index + nP] = point[1];
}
return;
}
__global__ void cpab_cuda_kernel_forward_3D(const int nP, const int batch_size,
float* newpoints, const float* points,
const float* Trels, const int* nStepSolver,
const int* nc, const int broadcast) {
int point_index = blockIdx.x * blockDim.x + threadIdx.x;
int batch_index = blockIdx.y * blockDim.y + threadIdx.y;
if(point_index < nP && batch_index < batch_size) {
// Get point
float point[3];
point[0] = points[broadcast*batch_index*nP*3+point_index];
point[1] = points[broadcast*batch_index*nP*3+point_index + nP];
point[2] = points[broadcast*batch_index*nP*3+point_index + 2*nP];
// Define start index for the matrices belonging to this batch
// batch * 12 params pr cell * 5 triangles pr cell * cell in x * cell in y * cell in z
int start_idx = batch_index * 12 * 5 * nc[0] * nc[1] * nc[2];
// Iterate in nStepSolver
int cellidx;
for(int n = 0; n < nStepSolver[0]; n++){
// Find cell idx
cellidx = cuda_findcellidx_3D(point, nc[0], nc[1], nc[2]);
// Extract the mapping in the cell
const float* Trels_idx = Trels + 12*cellidx + start_idx;
// Calculate trajectory of point
float point_updated[3];
A_times_b_3D(point_updated, Trels_idx, point);
point[0] = point_updated[0];
point[1] = point_updated[1];
point[2] = point_updated[2];
}
// Copy to output
newpoints[3 * nP * batch_index + point_index] = point[0];
newpoints[3 * nP * batch_index + point_index + nP] = point[1];
newpoints[3 * nP * batch_index + point_index + 2 * nP] = point[2];
}
return;
}
__global__ void cpab_cuda_kernel_backward_1D(dim3 nthreads, const int n_theta, const int d, const int nP, const int nC,
float* grad, const float* points, const float* As, const float* Bs,
const int* nStepSolver, const int* nc, const int broadcast) {
// Allocate memory for computations
float p[1], v[1], pMid[1], vMid[1], q[1], qMid[1];
float B_times_T[1], A_times_dTdAlpha[1], u[1], uMid[1];
float Alocal[2], Blocal[2];
int cellidx;
// Thread index
int point_index = threadIdx.x + blockIdx.x * blockDim.x;
int batch_index = threadIdx.y + blockIdx.y * blockDim.y;
int dim_index = threadIdx.z + blockIdx.z * blockDim.z;
// Make sure we are within bounds
if(point_index < nP && batch_index < n_theta && dim_index < d){
int index = nP * batch_index + point_index;
int boxsize = nP * n_theta;
// Define start index for the matrices belonging to this batch
// batch * 2 params pr cell * cell in x
int start_idx = batch_index * 2 * nc[0];
// Get point
p[0] = points[broadcast*batch_index*nP*1+point_index];
// Step size for solver
double h = (1.0 / nStepSolver[0]);
// Iterate a number of times
for(int t=0; t<nStepSolver[0]; t++) {
// Get current cell
cellidx = cuda_findcellidx_1D(p, nc[0]);
// Get index of A
int As_idx = 2*cellidx;
// Extract local A
for(int i = 0; i < 2; i++){
Alocal[i] = (As + As_idx + start_idx)[i];
}
// Compute velocity at current location
A_times_b_1D(v, Alocal, p);
// Compute midpoint
pMid[0] = p[0] + h*v[0]/2.0;
// Compute velocity at midpoint
A_times_b_1D(vMid, Alocal, pMid);
// Get index of B
int Bs_idx = 2 * dim_index * nC + As_idx;
// Get local B
for(int i = 0; i < 2; i++){
Blocal[i] = (Bs + Bs_idx)[i];
}
// Copy q
q[0] = grad[dim_index*boxsize + index];
// Step 1: Compute u using the old location
// Find current RHS (term 1 + term 2)
A_times_b_1D(B_times_T, Blocal, p); // Term 1
A_times_b_linear_1D(A_times_dTdAlpha, Alocal, q); // Term 2
// Sum both terms
u[0] = B_times_T[0] + A_times_dTdAlpha[0];
// Step 2: Compute mid "point"
qMid[0] = q[0] + h * u[0]/2.0;
// Step 3: Compute uMid
A_times_b_1D(B_times_T, Blocal, pMid); // Term 1
A_times_b_linear_1D(A_times_dTdAlpha, Alocal, qMid); // Term 2
// Sum both terms
uMid[0] = B_times_T[0] + A_times_dTdAlpha[0];
// Update q
q[0] += uMid[0] * h;
// Update gradient
grad[dim_index * boxsize + index] = q[0];
// Update p
p[0] += vMid[0]*h;
}
}
return;
}
__global__ void cpab_cuda_kernel_backward_2D(dim3 nthreads, const int n_theta, const int d, const int nP, const int nC,
float* grad, const float* points, const float* As, const float* Bs,
const int* nStepSolver, const int* nc, const int broadcast) {
// Allocate memory for computations
float p[2], v[2], pMid[2], vMid[2], q[2], qMid[2];
float B_times_T[2], A_times_dTdAlpha[2], u[2], uMid[2];
float Alocal[6], Blocal[6];
int cellidx;
// Thread index
int point_index = threadIdx.x + blockIdx.x * blockDim.x;
int batch_index = threadIdx.y + blockIdx.y * blockDim.y;
int dim_index = threadIdx.z + blockIdx.z * blockDim.z;
// Make sure we are within bounds
if(point_index < nP && batch_index < n_theta && dim_index < d){
int index = 2 * nP * batch_index + point_index;
int boxsize = 2 * nP * n_theta;
// Define start index for the matrices belonging to this batch
// batch * num_elem * 4 triangles pr cell * cell in x * cell in y
int start_idx = batch_index * 6 * 4 * nc[0] * nc[1];
// Get point
p[0] = points[broadcast*batch_index*nP*2+point_index];
p[1] = points[broadcast*batch_index*nP*2+point_index + nP];
// Step size for solver
double h = (1.0 / nStepSolver[0]);
// Iterate a number of times
for(int t=0; t<nStepSolver[0]; t++) {
// Get current cell
cellidx = cuda_findcellidx_2D(p, nc[0], nc[1]);
// Get index of A
int As_idx = 6*cellidx;
// Extract local A
for(int i = 0; i < 6; i++){
Alocal[i] = (As + As_idx + start_idx)[i];
}
// Compute velocity at current location
A_times_b_2D(v, Alocal, p);
// Compute midpoint
pMid[0] = p[0] + h*v[0]/2.0;
pMid[1] = p[1] + h*v[1]/2.0;
// Compute velocity at midpoint
A_times_b_2D(vMid, Alocal, pMid);
// Get index of B
int Bs_idx = 6 * dim_index * nC + As_idx;
// Get local B
for(int i = 0; i < 6; i++){
Blocal[i] = (Bs + Bs_idx)[i];
}
// Copy q
q[0] = grad[dim_index*boxsize + index];
q[1] = grad[dim_index*boxsize + index + nP];
// Step 1: Compute u using the old location
// Find current RHS (term 1 + term 2)
A_times_b_2D(B_times_T, Blocal, p); // Term 1
A_times_b_linear_2D(A_times_dTdAlpha, Alocal, q); // Term 2
// Sum both terms
u[0] = B_times_T[0] + A_times_dTdAlpha[0];
u[1] = B_times_T[1] + A_times_dTdAlpha[1];
// Step 2: Compute mid "point"
qMid[0] = q[0] + h * u[0]/2.0;
qMid[1] = q[1] + h * u[1]/2.0;
// Step 3: Compute uMid
A_times_b_2D(B_times_T, Blocal, pMid); // Term 1
A_times_b_linear_2D(A_times_dTdAlpha, Alocal, qMid); // Term 2
// Sum both terms
uMid[0] = B_times_T[0] + A_times_dTdAlpha[0];
uMid[1] = B_times_T[1] + A_times_dTdAlpha[1];
// Update q
q[0] += uMid[0] * h;
q[1] += uMid[1] * h;
// Update gradient
grad[dim_index * boxsize + index] = q[0];
grad[dim_index * boxsize + index + nP] = q[1];
// Update p
p[0] += vMid[0]*h;
p[1] += vMid[1]*h;
}
}
return;
}
__global__ void cpab_cuda_kernel_backward_3D(dim3 nthreads, const int n_theta, const int d, const int nP, const int nC,
float* grad, const float* points, const float* As, const float* Bs,
const int* nStepSolver, const int* nc, const int broadcast) {
// Allocate memory for computations
float p[3], v[3], pMid[3], vMid[3], q[3], qMid[3];
float B_times_T[3], A_times_dTdAlpha[3], u[3], uMid[3];
float Alocal[12], Blocal[12];
int cellidx;
// Thread index
int point_index = threadIdx.x + blockIdx.x * blockDim.x;
int batch_index = threadIdx.y + blockIdx.y * blockDim.y;
int dim_index = threadIdx.z + blockIdx.z * blockDim.z;
// Make sure we are within bounds
if(point_index < nP && batch_index < n_theta && dim_index < d){
int index = 3 * nP * batch_index + point_index;
int boxsize = 3 * nP * n_theta;
// Define start index for the matrices belonging to this batch
// batch * 12 params pr cell * 6 triangles pr cell * cell in x * cell in y * cell in z
int start_idx = batch_index * 12 * 5 * nc[0] * nc[1] * nc[2];
// Get point
p[0] = points[broadcast*batch_index*nP*3+point_index];
p[1] = points[broadcast*batch_index*nP*3+point_index + nP];
p[2] = points[broadcast*batch_index*nP*3+point_index + 2 * nP];
// Step size for solver
double h = (1.0 / nStepSolver[0]);
// Iterate a number of times
for(int t=0; t<nStepSolver[0]; t++) {
// Get current cell
cellidx = cuda_findcellidx_3D(p, nc[0], nc[1], nc[2]);
// Get index of A
int As_idx = 12*cellidx;
// Extract local A
for(int i = 0; i < 12; i++){
Alocal[i] = (As + As_idx + start_idx)[i];
}
// Compute velocity at current location
A_times_b_3D(v, Alocal, p);
// Compute midpoint
pMid[0] = p[0] + h*v[0]/2.0;
pMid[1] = p[1] + h*v[1]/2.0;
pMid[2] = p[2] + h*v[2]/2.0;
// Compute velocity at midpoint
A_times_b_3D(vMid, Alocal, pMid);
// Get index of B
int Bs_idx = 12 * dim_index * nC + As_idx;
// Get local B
for(int i = 0; i < 12; i++){
Blocal[i] = (Bs + Bs_idx)[i];
}
// Copy q
q[0] = grad[dim_index * boxsize + index];
q[1] = grad[dim_index * boxsize + index + nP];
q[2] = grad[dim_index * boxsize + index + 2*nP];
// Step 1: Compute u using the old location
// Find current RHS (term 1 + term 2)
A_times_b_3D(B_times_T, Blocal, p); // Term 1
A_times_b_linear_3D(A_times_dTdAlpha, Alocal, q); // Term 2
// Sum both terms
u[0] = B_times_T[0] + A_times_dTdAlpha[0];
u[1] = B_times_T[1] + A_times_dTdAlpha[1];
u[2] = B_times_T[2] + A_times_dTdAlpha[2];
// Step 2: Compute mid "point"
qMid[0] = q[0] + h * u[0]/2.0;
qMid[1] = q[1] + h * u[1]/2.0;
qMid[2] = q[2] + h * u[2]/2.0;
// Step 3: Compute uMid
A_times_b_3D(B_times_T, Blocal, pMid); // Term 1
A_times_b_linear_3D(A_times_dTdAlpha, Alocal, qMid); // Term 2
// Sum both terms
uMid[0] = B_times_T[0] + A_times_dTdAlpha[0];
uMid[1] = B_times_T[1] + A_times_dTdAlpha[1];
uMid[2] = B_times_T[2] + A_times_dTdAlpha[2];
// Update q
q[0] += uMid[0] * h;
q[1] += uMid[1] * h;
q[2] += uMid[2] * h;
// Update gradient
grad[dim_index * boxsize + index] = q[0];
grad[dim_index * boxsize + index + nP] = q[1];
grad[dim_index * boxsize + index + 2 * nP] = q[2];
// Update p
p[0] += vMid[0]*h;
p[1] += vMid[1]*h;
p[2] += vMid[2]*h;
}
}
return;
}
|
9a942faaf9d67c4609ca645a297a1ed5cef6a29d.hip | // !!! This is a file automatically generated by hipify!!!
// Use all constants to debug and get the performance
#define DIMX 512
#define DIMY 512
#define DIMZ 512
#define BLOCKDIMX 32
#define BLOCKDIMY 4
#define BLOCKDIMZ 1
#define TOTAL (DIMX*DIMY*DIMZ)
#define BLOCKSIZEX 64
#define BLOCKSIZEY 16
#define BLOCKSIZEZ 1
#define NUMTHREADS (BLOCKDIMX*BLOCKDIMY*BLOCKDIMZ)
#define HALO 1
#define OPENEDDIMX (BLOCKSIZEX+2*HALO)
#define OPENEDDIMY (BLOCKSIZEY+2*HALO)
#define OPENEDDIMZ (BLOCKSIZEZ+2*HALO)
#define OPENEDDIMXY (OPENEDDIMX*OPENEDDIMY)
#define OPENEDDIMXYZ (OPENEDDIMX*OPENEDDIMY*OPENEDDIMZ)
#define CLOSEDDIMX (BLOCKSIZEX)
#define CLOSEDDIMY (BLOCKSIZEY)
#define CLOSEDDIMZ (BLOCKSIZEZ)
#define CLOSEDDIMXY (CLOSEDDIMX*CLOSEDDIMY)
#define CLOSEDDIMXYZ (CLOSEDDIMX*CLOSEDDIMY*CLOSEDDIMZ)
#define NUMREADING ((OPENEDDIMXYZ / NUMTHREADS) + ((OPENEDDIMXYZ%NUMTHREADS)?1:0))
#define NUMWRITING ((CLOSEDDIMXYZ / NUMTHREADS) + ((CLOSEDDIMXYZ%NUMTHREADS)?1:0))
// #define CORRECTNESS_DATA
#define CORRECTNESS_HEAT
// #define myclamp(x, value, tx, fx) {return ((x)==(value)) ? (tx):(fx)}
#define C0 0.25f
#define C1 0.50f
#include <iostream>
#include <fstream>
#include <sstream>
#include <iomanip> // std::setfill, std::setw
#include <string>
// #include <sys/ioctl.h>
#include <hip/hip_runtime.h>
#include <helper_math.h>
#include <gpu_timer.hpp>
using namespace std;
////////////////////////////////////////////////////////////////////////////////////////////////////
#define checkLastError() { \
hipError_t error = hipGetLastError(); \
int id; \
hipGetDevice(&id); \
if(error != hipSuccess) { \
printf("Cuda failure error in file '%s' in line %i: '%s' at device %d \n", \
__FILE__,__LINE__, hipGetErrorString(error), id); \
exit(EXIT_FAILURE); \
} \
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#define checkReadFile(filename, pData, size) { \
fstream *fs = new fstream; \
fs->open(filename, ios::in|ios::binary); \
if (!fs->is_open()) \
{ \
printf("Cannot open file '%s' in file '%s' at line %i\n", \
filename, __FILE__, __LINE__); \
return 1; \
} \
fs->read(reinterpret_cast<char*>(pData), size); \
fs->close(); \
delete fs; \
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#define checkWriteFile(filename, pData, size) { \
fstream *fs = new fstream; \
fs->open(filename, ios::out|ios::binary); \
if (!fs->is_open()) \
{ \
fprintf(stderr, "Cannot open file '%s' in file '%s' at line %i\n", \
filename, __FILE__, __LINE__); \
return 1; \
} \
fs->write(reinterpret_cast<char*>(pData), size); \
fs->close(); \
delete fs; \
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#define at(x, y, z, DIMX, DIMY, DIMZ) ( clamp((int)(z), 0, DIMZ-1)*DIMY*DIMX + \
clamp((int)(y), 0, DIMY-1)*DIMX + \
clamp((int)(x), 0, DIMX-1) )
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void heatflow_global(float *src, float *dst)
{
int closed_index_1d, offset_index_1d, global_index_1d;
int3 closed_index_3d, offset_index_3d, global_index_3d;
offset_index_3d = make_int3(blockIdx.x * BLOCKSIZEX,
blockIdx.y * BLOCKSIZEY,
blockIdx.z * BLOCKSIZEZ);
for(int thisWriting=0; thisWriting<NUMWRITING; thisWriting++)
{
closed_index_1d = threadIdx.z * blockDim.y * blockDim.x +
threadIdx.y * blockDim.x +
threadIdx.x +
thisWriting*NUMTHREADS;
closed_index_3d = make_int3((closed_index_1d % CLOSEDDIMXY % CLOSEDDIMX),
(closed_index_1d % CLOSEDDIMXY / CLOSEDDIMX),
(closed_index_1d / CLOSEDDIMXY) );
global_index_3d = make_int3((offset_index_3d.x + closed_index_3d.x),
(offset_index_3d.y + closed_index_3d.y),
(offset_index_3d.z + closed_index_3d.z) );
global_index_1d = global_index_3d.z * DIMY * DIMX +
global_index_3d.y * DIMX +
global_index_3d.x;
if(global_index_3d.z > 0 && global_index_3d.z < (DIMZ-1) &&
global_index_3d.y > 0 && global_index_3d.y < (DIMY-1) &&
global_index_3d.x > 0 && global_index_3d.x < (DIMX-1) )
{
dst[at(global_index_3d.x, global_index_3d.y, global_index_3d.z, DIMX, DIMY, DIMZ)]
= C0 * (src[at(global_index_3d.x+0, global_index_3d.y+0, global_index_3d.z+0, DIMX, DIMY, DIMZ)])+
C1 * (src[at(global_index_3d.x-1, global_index_3d.y+0, global_index_3d.z+0, DIMX, DIMY, DIMZ)] +
src[at(global_index_3d.x+1, global_index_3d.y+0, global_index_3d.z+0, DIMX, DIMY, DIMZ)] +
src[at(global_index_3d.x+0, global_index_3d.y-1, global_index_3d.z+0, DIMX, DIMY, DIMZ)] +
src[at(global_index_3d.x+0, global_index_3d.y+1, global_index_3d.z+0, DIMX, DIMY, DIMZ)] +
src[at(global_index_3d.x+0, global_index_3d.y+0, global_index_3d.z-1, DIMX, DIMY, DIMZ)] +
src[at(global_index_3d.x+0, global_index_3d.y+0, global_index_3d.z+1, DIMX, DIMY, DIMZ)]);
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void heatflow_shared(float *src, float *dst)
{
}
////////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
srand(time(NULL)); // for random number generator
hipSetDevice(2);checkLastError();
hipDeviceReset();checkLastError();
// Specify dimensions
// Allocate host memory
float *h_src = new float[TOTAL];
float *h_dst = new float[TOTAL];
// Allocate device memory
float *d_src;
float *d_dst;
hipMalloc((void**)&d_src, TOTAL*sizeof(float)); checkLastError();
hipMalloc((void**)&d_dst, TOTAL*sizeof(float)); checkLastError();
// Initialize the image source
for(int z=0; z<DIMZ; z++)
{
for(int y=0; y<DIMY; y++)
{
for(int x=0; x<DIMX; x++)
{
h_src[z*DIMY*DIMX+y*DIMX+x] = (float)( (int)rand() % 10); // 7;
}
}
}
// Transferring to the device memory
hipMemcpy(d_src, h_src, TOTAL*sizeof(float), hipMemcpyHostToDevice); checkLastError();
hipMemset(d_dst, 0, TOTAL*sizeof(float));checkLastError();
// parameters for performance eval
double flops, gbps, nops, nbp;
nbp = 8*4; // # of bytes transferred per point
nops = 8.; // # of flops per point
int iter = 20;
int rightData = 1;
int rightHeat = 1;
/// Verify the correctness of data
// #ifdef CORRECTNESS_DATA
hipMemcpy(d_dst, d_src, TOTAL*sizeof(float), hipMemcpyDeviceToDevice); checkLastError();
hipMemcpy(h_dst, d_dst, TOTAL*sizeof(float), hipMemcpyDeviceToHost); checkLastError();
for(int z=0; z<DIMZ && rightData; z++)
{
for(int y=0; y<DIMY && rightData; y++)
{
for(int x=0; x<DIMX && rightData; x++)
{
if(h_src[z*DIMY*DIMX+y*DIMX+x] != h_dst[z*DIMY*DIMX+y*DIMX+x])
{
printf("Data does not match at x: %d, y: %d, z: %d\n", x, y, z);
rightData = 0;
// goto cleanup_data;
}
}
}
}
if(rightData) printf("Data is correct.\n");
// cleanup_data:
// #endif
// grid construction
dim3 numThreads(BLOCKDIMX, BLOCKDIMY, BLOCKDIMZ); //Dim
dim3 numBlocks((DIMX/BLOCKSIZEX)+((DIMX%BLOCKSIZEX)?1:0), //Size for ILP
(DIMY/BLOCKSIZEY)+((DIMY%BLOCKSIZEY)?1:0),
(DIMZ/BLOCKSIZEZ)+((DIMZ%BLOCKSIZEZ)?1:0));
hipMemset(d_dst, 0, TOTAL*sizeof(float));checkLastError(); // Reset the result
memset(h_dst, 0, TOTAL*sizeof(float));
// launch kernel
GpuTimer gpu_timer;
gpu_timer.Start();
for(int n=0; n<iter; n++)
{
hipLaunchKernelGGL(( heatflow_global), dim3(numBlocks), dim3(numThreads), 0, 0, d_src, d_dst);
}
gpu_timer.Stop();
checkLastError();
float msec = gpu_timer.Elapsed();
gbps = nbp*DIMX*DIMY*DIMZ/(msec/1000.)/(1024.*1024.*1024.)*(double)iter;
flops = nops*DIMX*DIMY*DIMZ/(msec/1000.)/(1024.*1024.*1024.)*(double)iter;
printf("Computing time : %.3f msec, Device memory bandwidth : %.3f GB/s, GFLOPS : %.3f\n", msec, gbps, flops);
float* h_ref = new float[DIMX*DIMY*DIMZ];
float tmp, result;
// #ifdef CORRECTNESS_HEAT
/// Verify the correctness of heat flow, no check at boundary
// Golden result
for(int z=1; z<(DIMZ-1); z++)
{
for(int y=1; y<(DIMY-1); y++)
{
for(int x=1; x<(DIMX-1); x++)
{
result = C0 * (h_src[at(x+0, y+0, z+0, DIMX, DIMY, DIMZ)])+
C1 * (h_src[at(x-1, y+0, z+0, DIMX, DIMY, DIMZ)] +
h_src[at(x+1, y+0, z+0, DIMX, DIMY, DIMZ)] +
h_src[at(x+0, y-1, z+0, DIMX, DIMY, DIMZ)] +
h_src[at(x+0, y+1, z+0, DIMX, DIMY, DIMZ)] +
h_src[at(x+0, y+0, z-1, DIMX, DIMY, DIMZ)] +
h_src[at(x+0, y+0, z+1, DIMX, DIMY, DIMZ)]);
h_ref[at(x+0, y+0, z+0, DIMX, DIMY, DIMZ)] = result;
}
}
}
// Transferring to the host memory
hipMemcpy(h_dst, d_dst, TOTAL*sizeof(float), hipMemcpyDeviceToHost); checkLastError();
// Compare result
for(int z=1; z<(DIMZ-1) && rightHeat; z++)
{
for(int y=1; y<(DIMY-1) && rightHeat; y++)
{
for(int x=1; x<(DIMX-1) && rightHeat; x++)
{
if(h_ref[z*DIMY*DIMX+y*DIMX+x] != h_dst[z*DIMY*DIMX+y*DIMX+x])
{
printf("Solution does not match at x: %d, y: %d, z: %d\n", x, y, z);
printf("h_ref (%04.4f), h_dst (%04.4f)\n",
h_ref[z*DIMY*DIMX+y*DIMX+x],
h_dst[z*DIMY*DIMX+y*DIMX+x]);
rightHeat = 0;
// goto cleanup_heat;
}
}
}
}
if(rightHeat) printf("Solution is correct.\n");
// cleanup_heat:
// #endif
///!!! Print line
// struct winsize w;
// ioctl(0, TIOCGWINSZ, &w);
// for(int k=0; k<w.ws_col; k++)
// printf("-");
printf("\n");
checkLastError();
// cleanup:
hipFree(d_src);
hipFree(d_dst);
free(h_src);
free(h_dst);
free(h_ref);
return 0;
} | 9a942faaf9d67c4609ca645a297a1ed5cef6a29d.cu | // Use all constants to debug and get the performance
#define DIMX 512
#define DIMY 512
#define DIMZ 512
#define BLOCKDIMX 32
#define BLOCKDIMY 4
#define BLOCKDIMZ 1
#define TOTAL (DIMX*DIMY*DIMZ)
#define BLOCKSIZEX 64
#define BLOCKSIZEY 16
#define BLOCKSIZEZ 1
#define NUMTHREADS (BLOCKDIMX*BLOCKDIMY*BLOCKDIMZ)
#define HALO 1
#define OPENEDDIMX (BLOCKSIZEX+2*HALO)
#define OPENEDDIMY (BLOCKSIZEY+2*HALO)
#define OPENEDDIMZ (BLOCKSIZEZ+2*HALO)
#define OPENEDDIMXY (OPENEDDIMX*OPENEDDIMY)
#define OPENEDDIMXYZ (OPENEDDIMX*OPENEDDIMY*OPENEDDIMZ)
#define CLOSEDDIMX (BLOCKSIZEX)
#define CLOSEDDIMY (BLOCKSIZEY)
#define CLOSEDDIMZ (BLOCKSIZEZ)
#define CLOSEDDIMXY (CLOSEDDIMX*CLOSEDDIMY)
#define CLOSEDDIMXYZ (CLOSEDDIMX*CLOSEDDIMY*CLOSEDDIMZ)
#define NUMREADING ((OPENEDDIMXYZ / NUMTHREADS) + ((OPENEDDIMXYZ%NUMTHREADS)?1:0))
#define NUMWRITING ((CLOSEDDIMXYZ / NUMTHREADS) + ((CLOSEDDIMXYZ%NUMTHREADS)?1:0))
// #define CORRECTNESS_DATA
#define CORRECTNESS_HEAT
// #define myclamp(x, value, tx, fx) {return ((x)==(value)) ? (tx):(fx)}
#define C0 0.25f
#define C1 0.50f
#include <iostream>
#include <fstream>
#include <sstream>
#include <iomanip> // std::setfill, std::setw
#include <string>
// #include <sys/ioctl.h>
#include <cuda.h>
#include <helper_math.h>
#include <gpu_timer.hpp>
using namespace std;
////////////////////////////////////////////////////////////////////////////////////////////////////
#define checkLastError() { \
cudaError_t error = cudaGetLastError(); \
int id; \
cudaGetDevice(&id); \
if(error != cudaSuccess) { \
printf("Cuda failure error in file '%s' in line %i: '%s' at device %d \n", \
__FILE__,__LINE__, cudaGetErrorString(error), id); \
exit(EXIT_FAILURE); \
} \
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#define checkReadFile(filename, pData, size) { \
fstream *fs = new fstream; \
fs->open(filename, ios::in|ios::binary); \
if (!fs->is_open()) \
{ \
printf("Cannot open file '%s' in file '%s' at line %i\n", \
filename, __FILE__, __LINE__); \
return 1; \
} \
fs->read(reinterpret_cast<char*>(pData), size); \
fs->close(); \
delete fs; \
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#define checkWriteFile(filename, pData, size) { \
fstream *fs = new fstream; \
fs->open(filename, ios::out|ios::binary); \
if (!fs->is_open()) \
{ \
fprintf(stderr, "Cannot open file '%s' in file '%s' at line %i\n", \
filename, __FILE__, __LINE__); \
return 1; \
} \
fs->write(reinterpret_cast<char*>(pData), size); \
fs->close(); \
delete fs; \
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#define at(x, y, z, DIMX, DIMY, DIMZ) ( clamp((int)(z), 0, DIMZ-1)*DIMY*DIMX + \
clamp((int)(y), 0, DIMY-1)*DIMX + \
clamp((int)(x), 0, DIMX-1) )
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void heatflow_global(float *src, float *dst)
{
int closed_index_1d, offset_index_1d, global_index_1d;
int3 closed_index_3d, offset_index_3d, global_index_3d;
offset_index_3d = make_int3(blockIdx.x * BLOCKSIZEX,
blockIdx.y * BLOCKSIZEY,
blockIdx.z * BLOCKSIZEZ);
for(int thisWriting=0; thisWriting<NUMWRITING; thisWriting++)
{
closed_index_1d = threadIdx.z * blockDim.y * blockDim.x +
threadIdx.y * blockDim.x +
threadIdx.x +
thisWriting*NUMTHREADS;
closed_index_3d = make_int3((closed_index_1d % CLOSEDDIMXY % CLOSEDDIMX),
(closed_index_1d % CLOSEDDIMXY / CLOSEDDIMX),
(closed_index_1d / CLOSEDDIMXY) );
global_index_3d = make_int3((offset_index_3d.x + closed_index_3d.x),
(offset_index_3d.y + closed_index_3d.y),
(offset_index_3d.z + closed_index_3d.z) );
global_index_1d = global_index_3d.z * DIMY * DIMX +
global_index_3d.y * DIMX +
global_index_3d.x;
if(global_index_3d.z > 0 && global_index_3d.z < (DIMZ-1) &&
global_index_3d.y > 0 && global_index_3d.y < (DIMY-1) &&
global_index_3d.x > 0 && global_index_3d.x < (DIMX-1) )
{
dst[at(global_index_3d.x, global_index_3d.y, global_index_3d.z, DIMX, DIMY, DIMZ)]
= C0 * (src[at(global_index_3d.x+0, global_index_3d.y+0, global_index_3d.z+0, DIMX, DIMY, DIMZ)])+
C1 * (src[at(global_index_3d.x-1, global_index_3d.y+0, global_index_3d.z+0, DIMX, DIMY, DIMZ)] +
src[at(global_index_3d.x+1, global_index_3d.y+0, global_index_3d.z+0, DIMX, DIMY, DIMZ)] +
src[at(global_index_3d.x+0, global_index_3d.y-1, global_index_3d.z+0, DIMX, DIMY, DIMZ)] +
src[at(global_index_3d.x+0, global_index_3d.y+1, global_index_3d.z+0, DIMX, DIMY, DIMZ)] +
src[at(global_index_3d.x+0, global_index_3d.y+0, global_index_3d.z-1, DIMX, DIMY, DIMZ)] +
src[at(global_index_3d.x+0, global_index_3d.y+0, global_index_3d.z+1, DIMX, DIMY, DIMZ)]);
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void heatflow_shared(float *src, float *dst)
{
}
////////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
srand(time(NULL)); // for random number generator
cudaSetDevice(2);checkLastError();
cudaDeviceReset();checkLastError();
// Specify dimensions
// Allocate host memory
float *h_src = new float[TOTAL];
float *h_dst = new float[TOTAL];
// Allocate device memory
float *d_src;
float *d_dst;
cudaMalloc((void**)&d_src, TOTAL*sizeof(float)); checkLastError();
cudaMalloc((void**)&d_dst, TOTAL*sizeof(float)); checkLastError();
// Initialize the image source
for(int z=0; z<DIMZ; z++)
{
for(int y=0; y<DIMY; y++)
{
for(int x=0; x<DIMX; x++)
{
h_src[z*DIMY*DIMX+y*DIMX+x] = (float)( (int)rand() % 10); // 7;
}
}
}
// Transferring to the device memory
cudaMemcpy(d_src, h_src, TOTAL*sizeof(float), cudaMemcpyHostToDevice); checkLastError();
cudaMemset(d_dst, 0, TOTAL*sizeof(float));checkLastError();
// parameters for performance eval
double flops, gbps, nops, nbp;
nbp = 8*4; // # of bytes transferred per point
nops = 8.; // # of flops per point
int iter = 20;
int rightData = 1;
int rightHeat = 1;
/// Verify the correctness of data
// #ifdef CORRECTNESS_DATA
cudaMemcpy(d_dst, d_src, TOTAL*sizeof(float), cudaMemcpyDeviceToDevice); checkLastError();
cudaMemcpy(h_dst, d_dst, TOTAL*sizeof(float), cudaMemcpyDeviceToHost); checkLastError();
for(int z=0; z<DIMZ && rightData; z++)
{
for(int y=0; y<DIMY && rightData; y++)
{
for(int x=0; x<DIMX && rightData; x++)
{
if(h_src[z*DIMY*DIMX+y*DIMX+x] != h_dst[z*DIMY*DIMX+y*DIMX+x])
{
printf("Data does not match at x: %d, y: %d, z: %d\n", x, y, z);
rightData = 0;
// goto cleanup_data;
}
}
}
}
if(rightData) printf("Data is correct.\n");
// cleanup_data:
// #endif
// grid construction
dim3 numThreads(BLOCKDIMX, BLOCKDIMY, BLOCKDIMZ); //Dim
dim3 numBlocks((DIMX/BLOCKSIZEX)+((DIMX%BLOCKSIZEX)?1:0), //Size for ILP
(DIMY/BLOCKSIZEY)+((DIMY%BLOCKSIZEY)?1:0),
(DIMZ/BLOCKSIZEZ)+((DIMZ%BLOCKSIZEZ)?1:0));
cudaMemset(d_dst, 0, TOTAL*sizeof(float));checkLastError(); // Reset the result
memset(h_dst, 0, TOTAL*sizeof(float));
// launch kernel
GpuTimer gpu_timer;
gpu_timer.Start();
for(int n=0; n<iter; n++)
{
heatflow_global<<<numBlocks, numThreads>>>(d_src, d_dst);
}
gpu_timer.Stop();
checkLastError();
float msec = gpu_timer.Elapsed();
gbps = nbp*DIMX*DIMY*DIMZ/(msec/1000.)/(1024.*1024.*1024.)*(double)iter;
flops = nops*DIMX*DIMY*DIMZ/(msec/1000.)/(1024.*1024.*1024.)*(double)iter;
printf("Computing time : %.3f msec, Device memory bandwidth : %.3f GB/s, GFLOPS : %.3f\n", msec, gbps, flops);
float* h_ref = new float[DIMX*DIMY*DIMZ];
float tmp, result;
// #ifdef CORRECTNESS_HEAT
/// Verify the correctness of heat flow, no check at boundary
// Golden result
for(int z=1; z<(DIMZ-1); z++)
{
for(int y=1; y<(DIMY-1); y++)
{
for(int x=1; x<(DIMX-1); x++)
{
result = C0 * (h_src[at(x+0, y+0, z+0, DIMX, DIMY, DIMZ)])+
C1 * (h_src[at(x-1, y+0, z+0, DIMX, DIMY, DIMZ)] +
h_src[at(x+1, y+0, z+0, DIMX, DIMY, DIMZ)] +
h_src[at(x+0, y-1, z+0, DIMX, DIMY, DIMZ)] +
h_src[at(x+0, y+1, z+0, DIMX, DIMY, DIMZ)] +
h_src[at(x+0, y+0, z-1, DIMX, DIMY, DIMZ)] +
h_src[at(x+0, y+0, z+1, DIMX, DIMY, DIMZ)]);
h_ref[at(x+0, y+0, z+0, DIMX, DIMY, DIMZ)] = result;
}
}
}
// Transferring to the host memory
cudaMemcpy(h_dst, d_dst, TOTAL*sizeof(float), cudaMemcpyDeviceToHost); checkLastError();
// Compare result
for(int z=1; z<(DIMZ-1) && rightHeat; z++)
{
for(int y=1; y<(DIMY-1) && rightHeat; y++)
{
for(int x=1; x<(DIMX-1) && rightHeat; x++)
{
if(h_ref[z*DIMY*DIMX+y*DIMX+x] != h_dst[z*DIMY*DIMX+y*DIMX+x])
{
printf("Solution does not match at x: %d, y: %d, z: %d\n", x, y, z);
printf("h_ref (%04.4f), h_dst (%04.4f)\n",
h_ref[z*DIMY*DIMX+y*DIMX+x],
h_dst[z*DIMY*DIMX+y*DIMX+x]);
rightHeat = 0;
// goto cleanup_heat;
}
}
}
}
if(rightHeat) printf("Solution is correct.\n");
// cleanup_heat:
// #endif
///!!! Print line
// struct winsize w;
// ioctl(0, TIOCGWINSZ, &w);
// for(int k=0; k<w.ws_col; k++)
// printf("-");
printf("\n");
checkLastError();
// cleanup:
cudaFree(d_src);
cudaFree(d_dst);
free(h_src);
free(h_dst);
free(h_ref);
return 0;
} |
535fe0b55cb9522f83897c0c8663c97a5cc9a081.hip | // !!! This is a file automatically generated by hipify!!!
// includes, system
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <math.h>
#include <stdint.h>
#include <ctime>
// includes CUDA Runtime
#include <hip/hip_runtime.h>
#include <gputimer.h>
using namespace std;
const int ARRAY_SIZE = 5120;
typedef int32_t int32;
template <typename T>
__global__ void UnaryClipCustomKernel(const int32 size_in,
const T *__restrict__ in0,
const T *__restrict__ in1,
const T *__restrict__ in2,
T *__restrict__ out) {
/*
GPU_1D_KERNEL_LOOP(i, size_in) {
T value = in2[0] < in0[i] ? in2[0] : in0[i];
out[i] = value < in1[0] ? in1[0] : value;
}
*/
const int32 thread_id = blockIdx.x * blockDim.x + threadIdx.x;
const int32 total_thread_count = gridDim.x * blockDim.x;
int32 offset = thread_id;
while(offset < size_in){
T value = in2[0] < in0[offset] ? in2[0] : in0[offset];
out[offset] = value < in1[0] ? in1[0] : value;
offset += total_thread_count;
}
}
clock_t totalStart, totalEnd;
clock_t kernelStart, kernelEnd;
int main(int argc, char* argv[]){
//
totalStart = clock();
int nBytes = ARRAY_SIZE * sizeof(float);
float *in0, *in1, *in2, *out;
//
hipMallocManaged((void**)&in0, nBytes);
hipMallocManaged((void**)&out, nBytes);
hipMallocManaged((void**)&in1, sizeof(float));
hipMallocManaged((void**)&in2, sizeof(float));
//
// float
ifstream infile;
ofstream outfile;
infile.open("in0.txt",ios::in);
if(!infile.is_open())
cout<<"Opening data file fails"<<endl;
else
cout<<"Opening data file successes"<<endl;
for(int i=0; i<ARRAY_SIZE; i++){
infile>>in0[i];
}
in1[0] = 20.0;
in2[0] = 90.0;
// kernel
dim3 blockSize(256);
dim3 gridSize((ARRAY_SIZE + blockSize.x - 1) / blockSize.x);
// kernel
//
// GpuTimer timer;
// timer.Start();
kernelStart = clock();
hipLaunchKernelGGL(( UnaryClipCustomKernel), dim3(gridSize), dim3(blockSize), 0, 0, ARRAY_SIZE, in0, in1, in2, out);
// timer.Stop();
// printf("\nExecution time in milliseconds = %0.6f ms\n", timer.Elapsed());
// device,
hipDeviceSynchronize();
kernelEnd = clock();
double kernelTime=(double)(kernelEnd-kernelStart)/CLOCKS_PER_SEC;
cout<<"Kernel time:"<<kernelTime*1000<<"ms"<<endl; //ms
//
for(int i=0; i<ARRAY_SIZE; i++){
cout<<out[i]<<endl;
}
//
hipFree(in0);
hipFree(in1);
hipFree(in2);
hipFree(out);
//
totalEnd = clock();
double totalTime=(double)(totalEnd-totalStart)/CLOCKS_PER_SEC;
cout<<"Total time:"<<totalTime*1000<<"ms"<<endl; //ms
return 0;
} | 535fe0b55cb9522f83897c0c8663c97a5cc9a081.cu | // includes, system
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <math.h>
#include <stdint.h>
#include <ctime>
// includes CUDA Runtime
#include <cuda_runtime.h>
#include <gputimer.h>
using namespace std;
const int ARRAY_SIZE = 5120;
typedef int32_t int32;
template <typename T>
__global__ void UnaryClipCustomKernel(const int32 size_in,
const T *__restrict__ in0,
const T *__restrict__ in1,
const T *__restrict__ in2,
T *__restrict__ out) {
/*
GPU_1D_KERNEL_LOOP(i, size_in) {
T value = in2[0] < in0[i] ? in2[0] : in0[i];
out[i] = value < in1[0] ? in1[0] : value;
}
*/
const int32 thread_id = blockIdx.x * blockDim.x + threadIdx.x;
const int32 total_thread_count = gridDim.x * blockDim.x;
int32 offset = thread_id;
while(offset < size_in){
T value = in2[0] < in0[offset] ? in2[0] : in0[offset];
out[offset] = value < in1[0] ? in1[0] : value;
offset += total_thread_count;
}
}
clock_t totalStart, totalEnd;
clock_t kernelStart, kernelEnd;
int main(int argc, char* argv[]){
// 计时开始
totalStart = clock();
int nBytes = ARRAY_SIZE * sizeof(float);
float *in0, *in1, *in2, *out;
// 申请托管内存
cudaMallocManaged((void**)&in0, nBytes);
cudaMallocManaged((void**)&out, nBytes);
cudaMallocManaged((void**)&in1, sizeof(float));
cudaMallocManaged((void**)&in2, sizeof(float));
// 初始化数据
// 从文本中读取数据,此处用float类型
ifstream infile;
ofstream outfile;
infile.open("in0.txt",ios::in);
if(!infile.is_open())
cout<<"Opening data file fails"<<endl;
else
cout<<"Opening data file successes"<<endl;
for(int i=0; i<ARRAY_SIZE; i++){
infile>>in0[i];
}
in1[0] = 20.0;
in2[0] = 90.0;
// 定义kernel的执行配置
dim3 blockSize(256);
dim3 gridSize((ARRAY_SIZE + blockSize.x - 1) / blockSize.x);
// 执行kernel
// 计时
// GpuTimer timer;
// timer.Start();
kernelStart = clock();
UnaryClipCustomKernel<<<gridSize, blockSize>>>(ARRAY_SIZE, in0, in1, in2, out);
// timer.Stop();
// printf("\nExecution time in milliseconds = %0.6f ms\n", timer.Elapsed());
// 同步device, 保证结果能正确访问
cudaDeviceSynchronize();
kernelEnd = clock();
double kernelTime=(double)(kernelEnd-kernelStart)/CLOCKS_PER_SEC;
cout<<"Kernel time:"<<kernelTime*1000<<"ms"<<endl; //ms为单位
// 检查执行结果
for(int i=0; i<ARRAY_SIZE; i++){
cout<<out[i]<<endl;
}
// 释放内存
cudaFree(in0);
cudaFree(in1);
cudaFree(in2);
cudaFree(out);
// 计时结束
totalEnd = clock();
double totalTime=(double)(totalEnd-totalStart)/CLOCKS_PER_SEC;
cout<<"Total time:"<<totalTime*1000<<"ms"<<endl; //ms为单位
return 0;
} |
786fb132ba2728d3db306010f8659fc100169695.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "func_colorspace.h"
#include "../def/cu_define.h"
namespace
{
__constant__ float ZERO_LIMIT = 1e-3;
__device__ float3 rgb2hsv(float3& rgb)
{
float max_rgb = MAX(MAX(rgb.x, rgb.y), rgb.z);
float min_rgb = MIN(MIN(rgb.x, rgb.y), rgb.z);
// Calculate Saturation
float S = 0;
if (abs(max_rgb) >= ZERO_LIMIT)
S = 1 - min_rgb / max_rgb;
// Calcuate Hue
float H = 0;
float C = max_rgb - min_rgb;
bool isvalid = C > ZERO_LIMIT;
if (isvalid)
{
if (abs(max_rgb - rgb.x) < ZERO_LIMIT) // max = r
{
float tmp = (rgb.y - rgb.z) / C;
if (tmp < 0) H = 6 + tmp;
else H = tmp;
}
else if (abs(max_rgb - rgb.y) < ZERO_LIMIT) { // max = g
H = (rgb.z - rgb.x) / C + 2;
}
else { // max = b
H = (rgb.x - rgb.y) / C + 4;
}
}
else
H = 0;
// Project H to [0,1]
return make_float3(H / 6, S, max_rgb);
}
__device__ float3 hsv2rgb(float3& hsv)
{
auto H = hsv.x * 6; // Since the H has been projected to [0,1]
auto S = hsv.y;
auto V = hsv.z;
float C = 0;
if (abs(S) > ZERO_LIMIT) C = V * S;
float m = V - C; // the min value
float X = C * (1 - abs(fmod(H, 2.f) - 1));
float R = 0, G = 0, B = 0;
if (H >= 0 && H < 1) {
R = V; G = X + m; B = m;
}
else if (H >= 1 && H < 2) {
R = X + m; G = V; B = m;
}
else if (H >= 2 && H < 3) {
R = m; G = V; B = X + m;
}
else if (H >= 3 && H < 4) {
R = m; G = X + m; B = V;
}
else if (H >= 4 && H < 5) {
R = X + m; G = m; B = V;
}
else { // (H >= 5 && H <= 6)
R = V; G = m; B = X + m;
}
return make_float3(R, G, B);
}
__global__ void bgr2hsv(cv::cuda::PtrStepSz<float3> bgr, cv::cuda::PtrStepSz<float3> hsv)
{
int thread_id = _get_threadId_grid2D_block1D();
int row = thread_id / bgr.cols;
int col = thread_id % bgr.cols;
if (row < bgr.rows && col < bgr.cols)
{
auto rgb = make_float3(bgr(row, col).z, bgr(row, col).y, bgr(row, col).x);
hsv(row, col) = rgb2hsv(rgb);
}
}
__global__ void bgra2hsv(cv::cuda::PtrStepSz<float4> bgra, cv::cuda::PtrStepSz<float3> hsv)
{
int thread_id = _get_threadId_grid2D_block1D();
int row = thread_id / bgra.cols;
int col = thread_id % bgra.cols;
if (row < bgra.rows && col < bgra.cols)
{
auto rgb = make_float3(bgra(row, col).z, bgra(row, col).y, bgra(row, col).x);
hsv(row, col) = rgb2hsv(rgb);
}
}
__global__ void hsv2bgr(cv::cuda::PtrStepSz<float3> hsv, cv::cuda::PtrStepSz<float3> bgr)
{
int thread_id = _get_threadId_grid2D_block1D();
int row = thread_id / hsv.cols;
int col = thread_id % hsv.cols;
if (row < hsv.rows && col < hsv.cols)
{
auto rgb = hsv2rgb(hsv(row, col));
bgr(row, col) = make_float3(rgb.z, rgb.y, rgb.x);
}
}
__global__ void hsv2bgra(cv::cuda::PtrStepSz<float3> hsv, cv::cuda::PtrStepSz<float4> bgra)
{
int thread_id = _get_threadId_grid2D_block1D();
int row = thread_id / hsv.cols;
int col = thread_id % hsv.cols;
if (row < hsv.rows && col < hsv.cols)
{
auto rgb = hsv2rgb(hsv(row, col));
bgra(row, col) = make_float4(rgb.z, rgb.y, rgb.x, 1);
}
}
__global__ void calcVbyHSV(cv::cuda::PtrStepSz<float4> src, cv::cuda::PtrStepSz<float> v)
{
int thread_id = _get_threadId_grid2D_block1D();
int row = thread_id / src.cols;
int col = thread_id % src.cols;
if (row < src.rows && col < src.cols)
{
v(row, col) = MAX(MAX(src(row, col).x, src(row, col).y), src(row, col).z);
}
}
}
GPU_ALGO_BEGIN
void cvtColor(cv::cuda::GpuMat &src, cv::cuda::GpuMat &dst, ColorConvertType cvttype, hipStream_t stream)
{
switch (cvttype)
{
case BGR2HSV:
::bgr2hsv << < dim3(90, 90), 256, 0, stream >> > (src, dst);
break;
case HSV2BGR:
::hsv2bgr << < dim3(90, 90), 256, 0, stream >> > (src, dst);
break;
case BGRA2HSV:
::bgra2hsv << < dim3(90, 90), 256, 0, stream >> > (src, dst);
break;
case HSV2BGRA:
::hsv2bgra << < dim3(90, 90), 256, 0, stream >> > (src, dst);
break;
default:
break;
}
}
void calcVbyHSV(cv::cuda::GpuMat &src, cv::cuda::GpuMat &v, hipStream_t stream)
{
::calcVbyHSV << < dim3(90, 90), 256, 0, stream >> > (src, v);
}
GPU_ALGO_END | 786fb132ba2728d3db306010f8659fc100169695.cu | #include "func_colorspace.h"
#include "../def/cu_define.h"
namespace
{
__constant__ float ZERO_LIMIT = 1e-3;
__device__ float3 rgb2hsv(float3& rgb)
{
float max_rgb = MAX(MAX(rgb.x, rgb.y), rgb.z);
float min_rgb = MIN(MIN(rgb.x, rgb.y), rgb.z);
// Calculate Saturation
float S = 0;
if (abs(max_rgb) >= ZERO_LIMIT)
S = 1 - min_rgb / max_rgb;
// Calcuate Hue
float H = 0;
float C = max_rgb - min_rgb;
bool isvalid = C > ZERO_LIMIT;
if (isvalid)
{
if (abs(max_rgb - rgb.x) < ZERO_LIMIT) // max = r
{
float tmp = (rgb.y - rgb.z) / C;
if (tmp < 0) H = 6 + tmp;
else H = tmp;
}
else if (abs(max_rgb - rgb.y) < ZERO_LIMIT) { // max = g
H = (rgb.z - rgb.x) / C + 2;
}
else { // max = b
H = (rgb.x - rgb.y) / C + 4;
}
}
else
H = 0;
// Project H to [0,1]
return make_float3(H / 6, S, max_rgb);
}
__device__ float3 hsv2rgb(float3& hsv)
{
auto H = hsv.x * 6; // Since the H has been projected to [0,1]
auto S = hsv.y;
auto V = hsv.z;
float C = 0;
if (abs(S) > ZERO_LIMIT) C = V * S;
float m = V - C; // the min value
float X = C * (1 - abs(fmod(H, 2.f) - 1));
float R = 0, G = 0, B = 0;
if (H >= 0 && H < 1) {
R = V; G = X + m; B = m;
}
else if (H >= 1 && H < 2) {
R = X + m; G = V; B = m;
}
else if (H >= 2 && H < 3) {
R = m; G = V; B = X + m;
}
else if (H >= 3 && H < 4) {
R = m; G = X + m; B = V;
}
else if (H >= 4 && H < 5) {
R = X + m; G = m; B = V;
}
else { // (H >= 5 && H <= 6)
R = V; G = m; B = X + m;
}
return make_float3(R, G, B);
}
__global__ void bgr2hsv(cv::cuda::PtrStepSz<float3> bgr, cv::cuda::PtrStepSz<float3> hsv)
{
int thread_id = _get_threadId_grid2D_block1D();
int row = thread_id / bgr.cols;
int col = thread_id % bgr.cols;
if (row < bgr.rows && col < bgr.cols)
{
auto rgb = make_float3(bgr(row, col).z, bgr(row, col).y, bgr(row, col).x);
hsv(row, col) = rgb2hsv(rgb);
}
}
__global__ void bgra2hsv(cv::cuda::PtrStepSz<float4> bgra, cv::cuda::PtrStepSz<float3> hsv)
{
int thread_id = _get_threadId_grid2D_block1D();
int row = thread_id / bgra.cols;
int col = thread_id % bgra.cols;
if (row < bgra.rows && col < bgra.cols)
{
auto rgb = make_float3(bgra(row, col).z, bgra(row, col).y, bgra(row, col).x);
hsv(row, col) = rgb2hsv(rgb);
}
}
__global__ void hsv2bgr(cv::cuda::PtrStepSz<float3> hsv, cv::cuda::PtrStepSz<float3> bgr)
{
int thread_id = _get_threadId_grid2D_block1D();
int row = thread_id / hsv.cols;
int col = thread_id % hsv.cols;
if (row < hsv.rows && col < hsv.cols)
{
auto rgb = hsv2rgb(hsv(row, col));
bgr(row, col) = make_float3(rgb.z, rgb.y, rgb.x);
}
}
__global__ void hsv2bgra(cv::cuda::PtrStepSz<float3> hsv, cv::cuda::PtrStepSz<float4> bgra)
{
int thread_id = _get_threadId_grid2D_block1D();
int row = thread_id / hsv.cols;
int col = thread_id % hsv.cols;
if (row < hsv.rows && col < hsv.cols)
{
auto rgb = hsv2rgb(hsv(row, col));
bgra(row, col) = make_float4(rgb.z, rgb.y, rgb.x, 1);
}
}
__global__ void calcVbyHSV(cv::cuda::PtrStepSz<float4> src, cv::cuda::PtrStepSz<float> v)
{
int thread_id = _get_threadId_grid2D_block1D();
int row = thread_id / src.cols;
int col = thread_id % src.cols;
if (row < src.rows && col < src.cols)
{
v(row, col) = MAX(MAX(src(row, col).x, src(row, col).y), src(row, col).z);
}
}
}
GPU_ALGO_BEGIN
void cvtColor(cv::cuda::GpuMat &src, cv::cuda::GpuMat &dst, ColorConvertType cvttype, cudaStream_t stream)
{
switch (cvttype)
{
case BGR2HSV:
::bgr2hsv << < dim3(90, 90), 256, 0, stream >> > (src, dst);
break;
case HSV2BGR:
::hsv2bgr << < dim3(90, 90), 256, 0, stream >> > (src, dst);
break;
case BGRA2HSV:
::bgra2hsv << < dim3(90, 90), 256, 0, stream >> > (src, dst);
break;
case HSV2BGRA:
::hsv2bgra << < dim3(90, 90), 256, 0, stream >> > (src, dst);
break;
default:
break;
}
}
void calcVbyHSV(cv::cuda::GpuMat &src, cv::cuda::GpuMat &v, cudaStream_t stream)
{
::calcVbyHSV << < dim3(90, 90), 256, 0, stream >> > (src, v);
}
GPU_ALGO_END |
15e4fddba21bd7bd37d57b6d0a1210886c9a1cce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cutens.h"
#include "util.cuh"
#include "sgemm.cuh"
template <int DIM_X, int DIM_Y, int BLK_M, int BLK_N, int BLK_K,
int DIM_XA, int DIM_YA, int DIM_XB, int DIM_YB>
static void
sgemm_template(int M, int N, int K,
const float * __restrict__ A, int LDA,
const float * __restrict__ B, int LDB,
float * __restrict__ C, int LDC)
{
dim3 dimBlock(DIM_Y, DIM_X);
dim3 dimGrid(CEIL(N, BLK_N), CEIL(M, BLK_M));
hipLaunchKernelGGL(( ker_sgemm <DIM_X, DIM_Y, BLK_M, BLK_N, BLK_K,
DIM_XA, DIM_YA, DIM_XB, DIM_YB,
BLK_M/DIM_X, BLK_N/DIM_Y>)
, dim3(dimGrid), dim3(dimBlock), 0, 0,
M, N, K, A, LDA, B, LDB, C, LDC);
}
void sgemm(int M, int N, int K,
const float * __restrict__ A, int lda,
const float * __restrict__ B, int ldb,
float * __restrict__ C, int ldc)
{
sgemm_template <16,16, 96,96,16, 32,8, 32,8>
(M, N, K, A, K, B, K, C, N);
}
void sgemm(cuftens *a, cuftens *b, cuftens *c)
{
const int D=a->D, M=a->M, N=b->M, K=a->N;
cuASSERT(a->N == b->N, "err: shape\n");
sgemm_template <16,16, 96,96,16, 32,8, 32,8>
(D*M, N, K, a->data, K, b->data, K, c->data, N);
}
| 15e4fddba21bd7bd37d57b6d0a1210886c9a1cce.cu | #include "cutens.h"
#include "util.cuh"
#include "sgemm.cuh"
template <int DIM_X, int DIM_Y, int BLK_M, int BLK_N, int BLK_K,
int DIM_XA, int DIM_YA, int DIM_XB, int DIM_YB>
static void
sgemm_template(int M, int N, int K,
const float * __restrict__ A, int LDA,
const float * __restrict__ B, int LDB,
float * __restrict__ C, int LDC)
{
dim3 dimBlock(DIM_Y, DIM_X);
dim3 dimGrid(CEIL(N, BLK_N), CEIL(M, BLK_M));
ker_sgemm <DIM_X, DIM_Y, BLK_M, BLK_N, BLK_K,
DIM_XA, DIM_YA, DIM_XB, DIM_YB,
BLK_M/DIM_X, BLK_N/DIM_Y>
<<<dimGrid, dimBlock>>>
(M, N, K, A, LDA, B, LDB, C, LDC);
}
void sgemm(int M, int N, int K,
const float * __restrict__ A, int lda,
const float * __restrict__ B, int ldb,
float * __restrict__ C, int ldc)
{
sgemm_template <16,16, 96,96,16, 32,8, 32,8>
(M, N, K, A, K, B, K, C, N);
}
void sgemm(cuftens *a, cuftens *b, cuftens *c)
{
const int D=a->D, M=a->M, N=b->M, K=a->N;
cuASSERT(a->N == b->N, "err: shape\n");
sgemm_template <16,16, 96,96,16, 32,8, 32,8>
(D*M, N, K, a->data, K, b->data, K, c->data, N);
}
|
965da248182a8ed89a12ad9692d8dd27d366f553.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Context.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/PinnedMemoryAllocator.h>
#include <ATen/hip/HIPSolver.h>
#include <ATen/hip/HIPBlas.h>
#include <ATen/hip/HIPEvent.h>
#include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/hip/MiscUtils.h>
#include <ATen/native/hip/BatchLinearAlgebraLib.h>
namespace at {
namespace native {
// Some cuBLAS and cuSOLVER batched routines require input to be a device array of pointers to device individual matrices
// 'input' must be a contiguous tensor
template <typename scalar_t>
static Tensor get_device_pointers(const Tensor& input) {
auto input_data = input.data_ptr<scalar_t>();
int64_t input_mat_stride = matrixStride(input);
// cublas/cusolver interface requires 'int'
int batch_size = cuda_int_cast(batchCount(input), "batch_size");
// if batch_size==0, then start=0 and end=0
// if input_mat_stride==0, then step=sizeof(scalar_t)
return at::arange(
/*start=*/reinterpret_cast<int64_t>(input_data),
/*end=*/reinterpret_cast<int64_t>(input_data + batch_size * input_mat_stride),
/*step=*/static_cast<int64_t>(std::max<int64_t>(input_mat_stride, 1) * sizeof(scalar_t)),
input.options().dtype(at::kLong));
}
template <typename scalar_t>
void apply_geqrf_batched(const Tensor& input, const Tensor& tau) {
// AMD ROCm backend is implemented via rewriting all CUDA calls to HIP
// rocBLAS does not implement BLAS-like extensions of cuBLAS, they're in rocSOLVER
// rocSOLVER is currently not used in ATen, therefore we raise an error in this case
#ifndef CUDART_VERSION
TORCH_CHECK(false, "geqrf: Batched version is supported only with cuBLAS backend.")
#else
auto batch_size = cuda_int_cast(batchCount(input), "batch_size");
auto m = cuda_int_cast(input.size(-2), "m");
auto n = cuda_int_cast(input.size(-1), "n");
auto lda = std::max<int>(1, m);
// cuBLAS batched geqrf requires input to be the device array of pointers to device single matrices
Tensor input_ptr_array = get_device_pointers<scalar_t>(input);
Tensor tau_ptr_array = get_device_pointers<scalar_t>(tau.unsqueeze(-1));
auto input_ptr_array_data = reinterpret_cast<scalar_t**>(input_ptr_array.data_ptr());
auto tau_ptr_array_data = reinterpret_cast<scalar_t**>(tau_ptr_array.data_ptr());
int info;
auto handle = at::cuda::getCurrentCUDABlasHandle();
at::cuda::blas::geqrfBatched(handle, m, n, input_ptr_array_data, lda, tau_ptr_array_data, &info, batch_size);
// info only indicates wrong arguments to geqrfBatched call
// info is a host variable, we can check it without device synchronization
TORCH_INTERNAL_ASSERT(info == 0);
#endif
}
void geqrf_batched_cublas(const Tensor& input, const Tensor& tau) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "geqrf_batched_cuda", [&]{
apply_geqrf_batched<scalar_t>(input, tau);
});
}
template <typename scalar_t>
static void apply_lu_solve_batched_cublas(const Tensor& b, const Tensor& lu, const Tensor& pivots) {
#ifndef CUDART_VERSION
TORCH_CHECK(false, "lu_solve: cuBLAS backend for lu_solve is not available.")
#else
hipblasOperation_t trans = HIPBLAS_OP_N;
auto pivots_data = pivots.data_ptr<int>();
auto batch_size = cuda_int_cast(batchCount(lu), "batch_size");;
auto m = cuda_int_cast(lu.size(-2), "m");
auto nrhs = cuda_int_cast(b.size(-1), "nrhs");
auto lda = cuda_int_cast(std::max<int>(1, m), "lda");
int info = 0;
Tensor lu_ptr_array = get_device_pointers<scalar_t>(lu);
Tensor b_ptr_array = get_device_pointers<scalar_t>(b);
auto lu_ptr_array_data = reinterpret_cast<scalar_t**>(lu_ptr_array.data_ptr());
auto b_ptr_array_data = reinterpret_cast<scalar_t**>(b_ptr_array.data_ptr());
auto handle = at::cuda::getCurrentCUDABlasHandle();
at::cuda::blas::getrsBatched(handle, trans, m, nrhs, lu_ptr_array_data,
lda, pivots_data, b_ptr_array_data, lda, &info, batch_size);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info == 0);
#endif
}
void lu_solve_batched_cublas(const Tensor& b, const Tensor& lu, const Tensor& pivots) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(lu.scalar_type(), "lu_solve_cublas", [&]{
apply_lu_solve_batched_cublas<scalar_t>(b, lu, pivots);
});
}
template <typename scalar_t>
static void apply_triangular_solve(Tensor& A, Tensor& B, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
hipblasFillMode_t uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER;
hipblasOperation_t trans = transpose ? HIPBLAS_OP_T : HIPBLAS_OP_N;
trans = conjugate_transpose ? HIPBLAS_OP_C : trans;
hipblasDiagType_t diag = unitriangular ? HIPBLAS_DIAG_UNIT : HIPBLAS_DIAG_NON_UNIT;
hipblasSideMode_t side = HIPBLAS_SIDE_LEFT;
auto A_data = A.data_ptr<scalar_t>();
auto B_data = B.data_ptr<scalar_t>();
auto A_mat_stride = matrixStride(A);
auto B_mat_stride = matrixStride(B);
auto batch_size = batchCount(A);
auto m = cuda_int_cast(A.size(-2), "m");
auto n = cuda_int_cast(A.size(-1), "n");
auto nrhs = cuda_int_cast(B.size(-1), "nrhs");
auto lda = std::max<int>(1, m);
auto alpha = scalar_t{1};
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* A_working_ptr = &A_data[i * A_mat_stride];
scalar_t* B_working_ptr = &B_data[i * B_mat_stride];
auto handle = at::cuda::getCurrentCUDABlasHandle();
at::cuda::blas::trsm(handle, side, uplo, trans, diag, n, nrhs, &alpha, A_working_ptr, lda, B_working_ptr, lda);
}
}
void triangular_solve_cublas(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
(void)infos; // unused
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(A.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve<scalar_t>(A, B, upper, transpose, conjugate_transpose, unitriangular);
});
}
template <typename scalar_t>
static void apply_triangular_solve_batched(Tensor& A, Tensor& B, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
hipblasFillMode_t uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER;
hipblasOperation_t trans = transpose ? HIPBLAS_OP_T : HIPBLAS_OP_N;
trans = conjugate_transpose ? HIPBLAS_OP_C : trans;
hipblasDiagType_t diag = unitriangular ? HIPBLAS_DIAG_UNIT : HIPBLAS_DIAG_NON_UNIT;
hipblasSideMode_t side = HIPBLAS_SIDE_LEFT;
auto A_data = A.data_ptr<scalar_t>();
auto B_data = B.data_ptr<scalar_t>();
auto A_mat_stride = matrixStride(A);
auto B_mat_stride = matrixStride(B);
auto batch_size = cuda_int_cast(batchCount(A), "batch_size");
auto m = cuda_int_cast(A.size(-2), "m");
auto n = cuda_int_cast(A.size(-1), "n");
auto nrhs = cuda_int_cast(B.size(-1), "nrhs");
auto lda = std::max<int>(1, m);
auto alpha = scalar_t{1};
// cuBLAS batched trsm requires input to be the device array of pointers to device single matrices
Tensor A_ptr_array = get_device_pointers<scalar_t>(A);
Tensor B_ptr_array = get_device_pointers<scalar_t>(B);
auto A_ptr_array_data = reinterpret_cast<scalar_t**>(A_ptr_array.data_ptr());
auto B_ptr_array_data = reinterpret_cast<scalar_t**>(B_ptr_array.data_ptr());
auto handle = at::cuda::getCurrentCUDABlasHandle();
at::cuda::blas::trsmBatched(handle, side, uplo, trans, diag, n, nrhs, &alpha, A_ptr_array_data, lda, B_ptr_array_data, lda, batch_size);
}
void triangular_solve_batched_cublas(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
(void)infos; // unused
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(A.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve_batched<scalar_t>(A, B, upper, transpose, conjugate_transpose, unitriangular);
});
}
template <typename scalar_t>
inline void apply_gels_batched(const Tensor& A, Tensor& B, Tensor& infos) {
// AMD ROCm backend is implemented via rewriting all CUDA calls to HIP
// rocBLAS does not implement BLAS-like extensions of cuBLAS, they're in rocSOLVER
// rocSOLVER is currently not used in ATen, therefore we raise an error in this case
#ifndef CUDART_VERSION
TORCH_CHECK(false, "torch.linalg.lstsq: Batched version is supported only with cuBLAS backend.")
#else
auto trans = HIPBLAS_OP_N;
auto m = cuda_int_cast(A.size(-2), "m");
auto n = cuda_int_cast(A.size(-1), "n");
auto nrhs = cuda_int_cast(B.size(-1), "nrhs");
// cuBLAS from cuda10 and older doesn't work with nrhs == 0 (cuda11 works)
// so we need to put this early return
if (nrhs == 0) {
return;
}
auto batch_size = cuda_int_cast(batchCount(B), "batch_size");
auto lda = std::max<int>(1, m);
auto ldb = std::max<int>(1, m);
// cuBLAS's requirement
TORCH_CHECK(
m >= n,
"torch.linalg.lstsq: only overdetermined systems (input.size(-2) >= input.size(-1)) are allowed on CUDA with cuBLAS backend.");
// cuBLAS documentation says:
// Matrices Aarray[i] should not overlap; otherwise, undefined behavior is expected.
// explicitly broadcast the batch dimensions of A
IntArrayRef A_batch_sizes(A.sizes().data(), A.dim() - 2);
IntArrayRef B_batch_sizes(B.sizes().data(), B.dim() - 2);
std::vector<int64_t> expand_batch_portion = at::infer_size(A_batch_sizes, B_batch_sizes);
expand_batch_portion.insert(expand_batch_portion.end(), {A.size(-2), A.size(-1)});
Tensor A_expanded = A.expand({expand_batch_portion});
Tensor A_broadcasted = cloneBatchedColumnMajor(A_expanded);
// cuBLAS batched gels requires input to be the device array of pointers to device single matrices
Tensor A_ptr_array = get_device_pointers<scalar_t>(A_broadcasted);
Tensor B_ptr_array = get_device_pointers<scalar_t>(B);
auto A_ptr_array_data = reinterpret_cast<scalar_t**>(A_ptr_array.data_ptr());
auto B_ptr_array_data = reinterpret_cast<scalar_t**>(B_ptr_array.data_ptr());
auto infos_data = infos.data_ptr<int>();
auto handle = at::cuda::getCurrentCUDABlasHandle();
int info;
at::cuda::blas::gelsBatched<scalar_t>(
handle, trans, m, n, nrhs,
A_ptr_array_data, lda,
B_ptr_array_data, ldb,
&info,
infos_data,
batch_size);
// negative info indicates that an argument to gelsBatched call is invalid
TORCH_INTERNAL_ASSERT(info == 0);
#endif
}
// This is a type dispatching helper function for 'apply_gels_batched'
void gels_batched_cublas(const Tensor& a, Tensor& b, Tensor& infos) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(a.scalar_type(), "gels_batched_cublas", [&]{
apply_gels_batched<scalar_t>(a, b, infos);
});
}
#ifdef USE_CUSOLVER
inline static Tensor column_major_identity_matrix_like(const Tensor& self) {
auto size = self.sizes();
auto size_slice = IntArrayRef(size.data(), size.size()-1);
return at::ones(size_slice, self.options()).diag_embed().transpose(-2, -1);
}
template <typename scalar_t>
inline static void _apply_single_inverse_helper(scalar_t* self_ptr, scalar_t* self_inv_ptr, int* ipiv_ptr, int* info_getrf_ptr, int* info_getrs_ptr, int n, int lda) {
// self_inv_ptr should already be an identity matrix
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
at::cuda::solver::getrf<scalar_t>(handle, n, n, self_ptr, lda, ipiv_ptr, info_getrf_ptr);
at::cuda::solver::getrs<scalar_t>(handle, n, n, self_ptr, lda, ipiv_ptr, self_inv_ptr, lda, info_getrs_ptr);
}
template <typename scalar_t>
static void apply_batched_inverse_lib(Tensor& self, Tensor& self_inv, Tensor& infos_getrf, Tensor& infos_getrs) {
const int batch_size = cuda_int_cast(batchCount(self), "batchCount");
const int n = cuda_int_cast(self.size(-2), "self.size(-2)");
const int lda = std::max<int>(1, n);
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
auto infos_getrf_data = infos_getrf.data_ptr<int>();
auto infos_getrs_data = infos_getrs.data_ptr<int>();
auto& allocator = *::c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get();
// Heuristic: For small batch size or large matrix size, we use for-loop to iterate over the batches instead of
// calling the batched cublas routine.
if (batch_size <= 8 || /* batch_size > 8 && */ n >= 512) {
for (int64_t i = 0; i < batch_size; i++) {
auto dataPtr = allocator.allocate(sizeof(int) * lda);
int* pivot = reinterpret_cast<int*>(dataPtr.get());
int* infos_getrf_working_ptr = &infos_getrf_data[i];
int* infos_getrs_working_ptr = &infos_getrs_data[i];
_apply_single_inverse_helper<scalar_t>(
&self_data[i * self_mat_stride], &self_inv_data[i * self_inv_mat_stride], pivot, infos_getrf_working_ptr, infos_getrs_working_ptr, n, lda);
}
} else {
// cublas batched kernels require input be "device array of device pointers"
Tensor self_array = at::arange(
reinterpret_cast<int64_t>(self_data),
reinterpret_cast<int64_t>(&self_data[(batch_size-1) * self_mat_stride]) + 1,
static_cast<int64_t>(self_mat_stride * sizeof(scalar_t)), self.options().dtype(at::kLong));
Tensor self_inv_array = at::arange(
reinterpret_cast<int64_t>(self_inv_data),
reinterpret_cast<int64_t>(&self_inv_data[(batch_size-1) * self_inv_mat_stride]) + 1,
static_cast<int64_t>(self_inv_mat_stride * sizeof(scalar_t)), self.options().dtype(at::kLong));
auto dataPtr = allocator.allocate(sizeof(int)*batch_size*lda);
int* ipiv_array = reinterpret_cast<int*>(dataPtr.get());
at::cuda::blas::getrfBatched<scalar_t>(n, reinterpret_cast<scalar_t**>(self_array.data_ptr()), lda,
ipiv_array, infos_getrf_data, batch_size);
at::cuda::blas::getriBatched<scalar_t>(n, reinterpret_cast<scalar_t**>(self_array.data_ptr()), lda,
ipiv_array, reinterpret_cast<scalar_t**>(self_inv_array.data_ptr()), lda, infos_getrs_data, batch_size);
}
}
template <typename scalar_t>
static void apply_single_inverse_lib(const Tensor& self, Tensor& self_inv, Tensor& infos_getrf, Tensor& infos_getrs) {
int n = cuda_int_cast(self.size(-2), "self.size(-2)");
int lda = std::max<int>(1, n);
Tensor ipiv = at::empty({lda}, self.options().dtype(at::kInt));
_apply_single_inverse_helper<scalar_t>(
self.data_ptr<scalar_t>(), self_inv.data_ptr<scalar_t>(), ipiv.data_ptr<int>(), infos_getrf.data_ptr<int>(), infos_getrs.data_ptr<int>(), n, lda);
}
// This is a type dispatching helper function for 'apply_batched_inverse_lib' and 'apply_single_inverse_lib'
Tensor& _linalg_inv_out_helper_cuda_lib(Tensor& result, Tensor& infos_getrf, Tensor& infos_getrs) {
// assuming result is in column major order and contains the matrices to invert
Tensor input_working_copy = cloneBatchedColumnMajor(result);
// for getrf + getrs (cusolver path)
// result should be filled with identity matrices
result.zero_();
result.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
const int batch_size = cuda_int_cast(batchCount(result), "batchCount");
if (result.dim() > 2) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_batched_inverse_lib<scalar_t>(
input_working_copy, result, infos_getrf, infos_getrs);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_single_inverse_lib<scalar_t>(input_working_copy, result, infos_getrf, infos_getrs);
});
}
return result;
}
// entrance of calculations of `inverse` using cusolver getrf + getrs, cublas getrfBatched + getriBatched
Tensor _inverse_helper_cuda_lib(const Tensor& self) {
Tensor self_working_copy = cloneBatchedColumnMajor(self);
Tensor self_inv_working_copy = column_major_identity_matrix_like(self_working_copy);
const int batch_size = cuda_int_cast(batchCount(self), "batchCount");
if (self.dim() > 2 && batch_size > 1) {
Tensor infos_getrf = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
Tensor infos_getrs = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse_lib<scalar_t>(
self_working_copy, self_inv_working_copy, infos_getrf, infos_getrs);
});
batchCheckErrors(infos_getrf, "inverse_cuda");
batchCheckErrors(infos_getrs, "inverse_cuda");
} else {
Tensor infos_getrf = at::zeros({1}, self.options().dtype(kInt));
Tensor infos_getrs = at::zeros({1}, self.options().dtype(kInt));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse_lib<scalar_t>(self_working_copy, self_inv_working_copy, infos_getrf, infos_getrs);
});
batchCheckErrors(infos_getrf, "inverse_cuda");
batchCheckErrors(infos_getrs, "inverse_cuda");
}
return self_inv_working_copy;
}
// call cusolver gesvdj function to calculate svd
template<typename scalar_t>
inline static void _apply_svd_lib_gesvdj(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv, bool some) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
int batchsize = cuda_int_cast(batchCount(self), "batch size");
int m = cuda_int_cast(self.size(-2), "m");
int n = cuda_int_cast(self.size(-1), "n");
int lda = std::max<int>(1, m);
int ldvt = std::max<int>(1, n);
for(int i = 0; i < batchsize; i++){
// gesvdj_params controls the numerical accuracy of cusolver gesvdj iterations on GPU
hipsolverGesvdjInfo_t gesvdj_params;
TORCH_CUSOLVER_CHECK(hipsolverDnCreateGesvdjInfo(&gesvdj_params));
// TORCH_CUSOLVER_CHECK(hipsolverDnXgesvdjSetTolerance(gesvdj_params, 1.0e-7));
// TORCH_CUSOLVER_CHECK(hipsolverDnXgesvdjSetMaxSweeps(gesvdj_params, 15));
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
auto jobz = compute_uv ? HIPSOLVER_EIG_MODE_VECTOR : HIPSOLVER_EIG_MODE_NOVECTOR;
at::cuda::solver::gesvdj<scalar_t>(
handle, jobz, /*econ=*/ some ? 1 : 0, m, n,
self_data + i * self_stride,
lda,
S_data + i * S_stride,
U_data + i * U_stride,
lda,
VT_data + i * VT_stride,
ldvt,
infos.data_ptr<int>() + i,
gesvdj_params
);
TORCH_CUSOLVER_CHECK(hipsolverDnDestroyGesvdjInfo(gesvdj_params));
}
}
// wrapper around _apply_svd_lib_gesvdj that handles dtype dispatch,
// creates a working copy of the input, and creates V^H from the V returned by gesvdj
inline static void apply_svd_lib_gesvdj(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv, bool some) {
const int64_t m = self.size(-2);
const int64_t n = self.size(-1);
Tensor self_working_copy = cloneBatchedColumnMajor(self);
VT = VT.transpose(-2, -1); // gesvdj returns V instead of V^H
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda_gesvdj", [&] {
_apply_svd_lib_gesvdj<scalar_t>(self_working_copy, U, S, VT, infos, compute_uv, some);
});
}
// call cusolver gesvdj batched function to calculate svd
template<typename scalar_t>
inline static void _apply_svd_lib_gesvdjBatched(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
int batchsize = cuda_int_cast(batchCount(self), "batch size");
int m = cuda_int_cast(self.size(-2), "m");
int n = cuda_int_cast(self.size(-1), "n");
int lda = std::max<int>(1, m);
int ldvt = std::max<int>(1, n);
TORCH_INTERNAL_ASSERT(m <= 32 && n <= 32, "gesvdjBatched requires both matrix dimensions not greater than 32, but got "
"m = ", m, " n = ", n);
// gesvdj_params controls the numerical accuracy of cusolver gesvdj iterations on GPU
hipsolverGesvdjInfo_t gesvdj_params;
TORCH_CUSOLVER_CHECK(hipsolverDnCreateGesvdjInfo(&gesvdj_params));
// TORCH_CUSOLVER_CHECK(hipsolverDnXgesvdjSetTolerance(gesvdj_params, 1.0e-7));
// TORCH_CUSOLVER_CHECK(hipsolverDnXgesvdjSetMaxSweeps(gesvdj_params, 15));
TORCH_CUSOLVER_CHECK(hipsolverDnXgesvdjSetSortEig(gesvdj_params, 1));
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
auto jobz = compute_uv ? HIPSOLVER_EIG_MODE_VECTOR : HIPSOLVER_EIG_MODE_NOVECTOR;
at::cuda::solver::gesvdjBatched<scalar_t>(
handle, jobz, m, n, self_data, lda, S_data, U_data, lda, VT_data, ldvt,
infos.data_ptr<int>(), gesvdj_params, batchsize
);
TORCH_CUSOLVER_CHECK(hipsolverDnDestroyGesvdjInfo(gesvdj_params));
}
// wrapper around _apply_svd_lib_gesvdjBatched that handles dtype dispatch,
// creates a working copy of the input, and creates V^H from the V returned by gesvdj
inline static void apply_svd_lib_gesvdjBatched(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv) {
const int64_t m = self.size(-2);
const int64_t n = self.size(-1);
Tensor self_working_copy = cloneBatchedColumnMajor(self);
VT = VT.transpose(-2, -1); // gesvdj returns V instead of V^H
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda_gesvdjBatched", [&] {
_apply_svd_lib_gesvdjBatched<scalar_t>(self_working_copy, U, S, VT, infos, compute_uv);
});
}
// entrance of calculations of `svd` using cusolver gesvdj and gesvdjBatched
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda_lib(const Tensor& self, bool some, bool compute_uv) {
const int64_t batch_size = batchCount(self);
at::Tensor infos = at::zeros({batch_size}, self.options().dtype(at::kInt));
const int64_t m = self.size(-2);
const int64_t n = self.size(-1);
const int64_t k = ::min(m, n);
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = \
_create_U_S_VT(self, some, compute_uv, /* svd_use_cusolver = */ true);
// U, S, V working copies are already column majored now
// heuristic for using `gesvdjBatched` over `gesvdj`
if (m <= 32 && n <= 32 && batch_size > 1 && (!some || m == n)) {
apply_svd_lib_gesvdjBatched(self, U_working_copy, S_working_copy, VT_working_copy, infos, compute_uv);
} else {
apply_svd_lib_gesvdj(self, U_working_copy, S_working_copy, VT_working_copy, infos, compute_uv, some);
}
// A device-host sync will be performed.
batchCheckErrors(infos, "svd_cuda");
if (!compute_uv) {
VT_working_copy.zero_();
U_working_copy.zero_();
}
if (some) {
VT_working_copy = VT_working_copy.narrow(-2, 0, k);
}
// so far we have computed VT, but torch.svd returns V instead. Adjust accordingly.
VT_working_copy.transpose_(-2, -1);
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
// Implementation of Cholesky decomposition using looped cusolverDn<T>potrf or hipsolverDnXpotrf (64-bit)
template<typename scalar_t>
inline static void apply_cholesky_cusolver_potrf_looped(const Tensor& self_working_copy, bool upper, const Tensor& infos) {
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
const auto uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER;
const int64_t n = self_working_copy.size(-1);
const int64_t lda = std::max<int64_t>(1, n);
const int64_t batch_size = batchCount(self_working_copy);
const int64_t matrix_stride = matrixStride(self_working_copy);
scalar_t* self_working_copy_ptr = self_working_copy.data_ptr<scalar_t>();
int* infos_ptr = infos.data_ptr<int>();
#ifdef USE_CUSOLVER_64_BIT
size_t worksize_device;
size_t worksize_host;
hipsolverDnParams_t params;
hipDataType datatype = at::cuda::solver::get_cusolver_datatype<scalar_t>();
TORCH_CUSOLVER_CHECK(cusolverDnCreateParams(¶ms));
at::cuda::solver::xpotrf_buffersize(handle, params, uplo, n, datatype, nullptr, lda, datatype, &worksize_device, &worksize_host);
// allocate workspace storage
auto& device_allocator = *at::cuda::getCUDADeviceAllocator();
auto workdata_device = device_allocator.allocate(worksize_device * batch_size);
void* workdata_device_ptr = workdata_device.get();
auto& host_allocator = *at::getCPUAllocator();
auto workdata_host = host_allocator.allocate(worksize_host * batch_size);
void* workdata_host_ptr = workdata_host.get();
for (int64_t i = 0; i < batch_size; i++) {
at::cuda::solver::xpotrf(
handle, params, uplo, n, datatype,
self_working_copy_ptr + i * matrix_stride,
lda, datatype,
(char*)workdata_device_ptr + i * worksize_device, worksize_device,
(char*)workdata_host_ptr + i * worksize_host, worksize_host,
infos_ptr + i
);
}
TORCH_CUSOLVER_CHECK(cusolverDnDestroyParams(params));
#else // USE_CUSOLVER_64_BIT
int n_32 = cuda_int_cast(n, "n");
int lda_32 = cuda_int_cast(lda, "lda");
int lwork;
at::cuda::solver::potrf_buffersize<scalar_t>(
handle, uplo, n_32, nullptr, lda_32, &lwork);
// allocate workspace storage
auto& allocator = *at::cuda::getCUDADeviceAllocator();
auto work_data = allocator.allocate(sizeof(scalar_t)*lwork * batch_size);
scalar_t* work_data_ptr = static_cast<scalar_t*>(work_data.get());
for (int64_t i = 0; i < batch_size; i++) {
at::cuda::solver::potrf<scalar_t>(
handle, uplo, n_32,
self_working_copy_ptr + i * matrix_stride,
lda_32,
work_data_ptr + i * lwork,
lwork,
infos_ptr + i
);
}
#endif // USE_CUSOLVER_64_BIT
}
// Implementation of Cholesky decomposition using batched cusolverDn<T>potrfBatched
// Warning: cusolverDn<T>potrfBatched doesn't work quite well when matrix size or batch size is zero.
// If you write your own C++ extension and use this function, make sure you do a zero numel check for the input.
template<typename scalar_t>
inline static void apply_cholesky_cusolver_potrfBatched(const Tensor& self_working_copy, bool upper, const Tensor& infos) {
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
const auto uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER;
const int n = cuda_int_cast(self_working_copy.size(-1), "n");
const int lda = std::max<int>(1, n);
const int batch_size = cuda_int_cast(batchCount(self_working_copy), "batch_size");
// cusolver batched kernels require input be "device array of device pointers"
Tensor self_working_copy_array = get_device_pointers<scalar_t>(self_working_copy);
at::cuda::solver::potrfBatched<scalar_t>(
handle, uplo, n,
reinterpret_cast<scalar_t**>(self_working_copy_array.data_ptr()),
lda, infos.data_ptr<int>(), batch_size);
}
void cholesky_helper_cusolver(const Tensor& input, bool upper, const Tensor& info) {
if (input.numel() == 0) {
return;
}
if (use_cusolver_potrf_batched_ && batchCount(input) > 1) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "cholesky_cusolver", [&] {
apply_cholesky_cusolver_potrfBatched<scalar_t>(input, upper, info);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "cholesky_cusolver", [&] {
apply_cholesky_cusolver_potrf_looped<scalar_t>(input, upper, info);
});
}
}
template<typename scalar_t>
inline static void apply_cholesky_cusolver_potrs(Tensor& self_working_copy, const Tensor& A_column_major_copy, bool upper, Tensor& infos) {
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
const auto uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER;
const int64_t n = self_working_copy.size(-2);
const int64_t nrhs = self_working_copy.size(-1);
const int64_t lda = std::max<int64_t>(1, n);
const int64_t batch_size = batchCount(self_working_copy);
const int64_t self_matrix_stride = matrixStride(self_working_copy);
scalar_t* self_working_copy_ptr = self_working_copy.data_ptr<scalar_t>();
const scalar_t* A_ptr = A_column_major_copy.data_ptr<scalar_t>();
const int64_t A_matrix_stride = matrixStride(A_column_major_copy);
const int64_t ldb = std::max<int64_t>(1, A_column_major_copy.size(-1));
int* infos_ptr = infos.data_ptr<int>();
#ifdef USE_CUSOLVER_64_BIT
hipsolverDnParams_t params;
hipDataType datatype = at::cuda::solver::get_cusolver_datatype<scalar_t>();
TORCH_CUSOLVER_CHECK(cusolverDnCreateParams(¶ms));
for (int64_t i = 0; i < batch_size; i++) {
at::cuda::solver::xpotrs(
handle, params, uplo, n, nrhs, datatype,
A_ptr + i * A_matrix_stride,
lda, datatype,
self_working_copy_ptr + i * self_matrix_stride,
ldb,
infos_ptr
);
}
TORCH_CUSOLVER_CHECK(cusolverDnDestroyParams(params));
#else // USE_CUSOLVER_64_BIT
int n_32 = cuda_int_cast(n, "n");
int nrhs_32 = cuda_int_cast(nrhs, "nrhs");
int lda_32 = cuda_int_cast(lda, "lda");
int ldb_32 = cuda_int_cast(ldb, "ldb");
for (int64_t i = 0; i < batch_size; i++) {
at::cuda::solver::potrs<scalar_t>(
handle, uplo, n_32, nrhs_32,
A_ptr + i * A_matrix_stride,
lda_32,
self_working_copy_ptr + i * self_matrix_stride,
ldb_32,
infos_ptr
);
}
#endif // USE_CUSOLVER_64_BIT
}
// This code path is only dispatched to if MAGMA is not linked in the pytorch build.
// cusolverDn<t>potrsBatched only supports nrhs == 1
template<typename scalar_t>
inline static void apply_cholesky_cusolver_potrsBatched(Tensor& self_working_copy, const Tensor& A_column_major_copy, bool upper, Tensor& infos) {
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
const auto uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER;
const int64_t n = self_working_copy.size(-2);
const int64_t nrhs = self_working_copy.size(-1);
const int64_t lda = std::max<int64_t>(1, n);
const int64_t batch_size = batchCount(self_working_copy);
const int64_t self_matrix_stride = matrixStride(self_working_copy);
scalar_t* self_working_copy_ptr = self_working_copy.data_ptr<scalar_t>();
const scalar_t* A_ptr = A_column_major_copy.data_ptr<scalar_t>();
const int64_t A_matrix_stride = matrixStride(A_column_major_copy);
const int64_t ldb = std::max<int64_t>(1, A_column_major_copy.size(-1));
int* infos_ptr = infos.data_ptr<int>();
auto self_ptr_array = get_device_pointers<scalar_t>(self_working_copy);
auto A_ptr_array = get_device_pointers<scalar_t>(A_column_major_copy);
at::cuda::solver::potrsBatched(
handle, uplo,
cuda_int_cast(n, "n"),
cuda_int_cast(nrhs, "nrhs"),
reinterpret_cast<scalar_t**>(A_ptr_array.data_ptr()),
cuda_int_cast(lda, "lda"),
reinterpret_cast<scalar_t**>(self_ptr_array.data_ptr()),
cuda_int_cast(ldb, "ldb"),
infos_ptr,
cuda_int_cast(batch_size, "batch_size")
);
}
Tensor _cholesky_solve_helper_cuda_cusolver(const Tensor& self, const Tensor& A, bool upper) {
const int64_t batch_size = batchCount(self);
at::Tensor infos = at::zeros({1}, self.options().dtype(at::kInt));
at::Tensor self_working_copy = cloneBatchedColumnMajor(self);
at::Tensor A_column_major_copy = cloneBatchedColumnMajor(A);
const int64_t nrhs = self_working_copy.size(-1);
// cusolverDn<t>potrsBatched only supports nrhs == 1
if (batch_size > 1 && nrhs == 1) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_cuda_potrs_batched", [&] {
apply_cholesky_cusolver_potrsBatched<scalar_t>(self_working_copy, A_column_major_copy, upper, infos);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_cuda_potrs", [&] {
apply_cholesky_cusolver_potrs<scalar_t>(self_working_copy, A_column_major_copy, upper, infos);
});
}
// info from potrs and potrsBatched only report if the i-th parameter is wrong, not about the matrix singularity, etc.
// So we don't need to check it all the time.
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.item().toInt() == 0);
return self_working_copy;
}
void _cholesky_inverse_cusolver_potrs_based(Tensor& result, Tensor& infos, bool upper) {
at::Tensor input_working_copy = cloneBatchedColumnMajor(result);
at::Tensor infos_gpu = at::zeros({1}, result.options().dtype(at::kInt));
result.fill_(0);
result.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "cholesky_cuda_potri", [&] {
apply_cholesky_cusolver_potrs<scalar_t>(result, input_working_copy, upper, infos_gpu);
});
// Debug only: info of cusolver potrs only check if the i-th parameter is wrong
// Function argument `infos` is a CPU tensor, the following copy will cause a device-host sync.
// infos.copy_(infos_gpu);
}
Tensor& cholesky_inverse_kernel_impl_cusolver(Tensor &result, Tensor& infos, bool upper) {
_cholesky_inverse_cusolver_potrs_based(result, infos, upper);
return result;
}
/*
The geqrf function computes the QR decomposition of a m x n matrix A.
Args:
* `A` - [in] Tensor with matrices for QR decomposition,
[out] Tensor containing R in the upper triangle of A
and elementary reflectors below the main diagonal of A
* `tau` - Tensor containing the magnitudes of the elementary reflectors
* `m` - The number of rows of `input` to consider
* `n` - The number of columns of `input` to consider (actual sizes of `input` could be larger)
For further details, please see the cuSOLVER documentation for GEQRF.
*/
template <typename scalar_t>
static void apply_geqrf(const Tensor& A, const Tensor& tau) {
int64_t m = A.size(-2);
int64_t n = A.size(-1);
int64_t lda = std::max<int64_t>(1, m);
int64_t batch_size = batchCount(A);
auto A_stride = matrixStride(A);
auto tau_stride = tau.size(-1);
auto A_data = A.data_ptr<scalar_t>();
auto tau_data = tau.data_ptr<scalar_t>();
auto infos = at::zeros({1}, A.options().dtype(at::kInt));
auto infos_data = infos.data_ptr<int>();
// get the optimal work size and allocate workspace tensor
#ifdef USE_CUSOLVER_64_BIT
size_t worksize_device; // workspaceInBytesOnDevice
size_t worksize_host; // workspaceInBytesOnHost
hipsolverDnParams_t params = NULL; // use default algorithm (currently it's the only option)
at::cuda::solver::xgeqrf_bufferSize<scalar_t>(
at::cuda::getCurrentCUDASolverDnHandle(),
params,
m,
n,
A_data,
lda,
tau_data,
&worksize_device,
&worksize_host);
#else
int lwork;
int m_32 = cuda_int_cast(m, "m");
int n_32 = cuda_int_cast(n, "n");
int lda_32 = cuda_int_cast(lda, "lda");
at::cuda::solver::geqrf_bufferSize<scalar_t>(
at::cuda::getCurrentCUDASolverDnHandle(), m_32, n_32, A_data, lda_32, &lwork);
#endif // USE_CUSOLVER_64_BIT
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* A_working_ptr = &A_data[i * A_stride];
scalar_t* tau_working_ptr = &tau_data[i * tau_stride];
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
#ifdef USE_CUSOLVER_64_BIT
// allocate workspace storage on device and host
auto& device_allocator = *at::cuda::getCUDADeviceAllocator();
auto work_device_data = device_allocator.allocate(worksize_device);
auto& host_allocator = *at::getCPUAllocator();
auto work_host_data = host_allocator.allocate(worksize_host);
at::cuda::solver::xgeqrf<scalar_t>(
handle,
params,
m,
n,
A_working_ptr,
lda,
tau_working_ptr,
static_cast<scalar_t*>(work_device_data.get()),
worksize_device,
static_cast<scalar_t*>(work_host_data.get()),
worksize_host,
infos_data);
#else
// allocate workspace storage on device
auto& allocator = *at::cuda::getCUDADeviceAllocator();
auto work_data = allocator.allocate(sizeof(scalar_t) * std::max<int>(1, lwork));
at::cuda::solver::geqrf<scalar_t>(
handle,
m_32,
n_32,
A_working_ptr,
lda_32,
tau_working_ptr,
static_cast<scalar_t*>(work_data.get()),
lwork,
infos_data);
#endif // USE_CUSOLVER_64_BIT
}
// info from geqrf only reports if the i-th parameter is wrong, not about the matrix singularity
// so we don't need to check it all the time
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.item().toInt() == 0);
}
// This is a type dispatching helper function for 'apply_geqrf'
void geqrf_cusolver(const Tensor& input, const Tensor& tau) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "geqrf_cuda", [&]{
apply_geqrf<scalar_t>(input, tau);
});
}
/*
The ormqr function multiplies Q with another matrix from a sequence of
elementary reflectors, such as is produced by the geqrf function.
Args:
* `input` - Tensor with elementary reflectors below the diagonal,
encoding the matrix Q.
* `tau` - Tensor containing the magnitudes of the elementary
reflectors.
* `other` - [in] Tensor containing the matrix to be multiplied.
[out] result of the matrix multiplication with Q.
* `left` - bool, determining whether `other` is left- or right-multiplied with Q.
* `transpose` - bool, determining whether to transpose (or conjugate transpose) Q before multiplying.
For further details, please see the cuSOLVER documentation for ORMQR and UNMQR.
*/
template <typename scalar_t>
static void apply_ormqr(const Tensor& input, const Tensor& tau, const Tensor& other, bool left, bool transpose) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto side = left ? HIPBLAS_SIDE_LEFT : HIPBLAS_SIDE_RIGHT;
auto trans = transpose ? (input.is_complex() ? HIPBLAS_OP_C : HIPBLAS_OP_T) : HIPBLAS_OP_N;
auto input_data = input.data_ptr<scalar_t>();
auto tau_data = tau.data_ptr<scalar_t>();
auto other_data = other.data_ptr<scalar_t>();
auto input_matrix_stride = matrixStride(input);
auto other_matrix_stride = matrixStride(other);
auto tau_stride = tau.size(-1);
auto batch_size = batchCount(input);
auto m = cuda_int_cast(other.size(-2), "m");
auto n = cuda_int_cast(other.size(-1), "n");
auto k = cuda_int_cast(tau.size(-1), "k");
auto lda = std::max<int>(1, left ? m : n);
auto ldc = std::max<int>(1, m);
// get the optimal work size and allocate workspace tensor
int lwork;
at::cuda::solver::ormqr_bufferSize<scalar_t>(
at::cuda::getCurrentCUDASolverDnHandle(), side, trans, m, n, k, input_data, lda, tau_data, other_data, ldc, &lwork);
auto info = at::zeros({1}, input.options().dtype(at::kInt));
auto info_data = info.data_ptr<int>();
for (auto i = decltype(batch_size){0}; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
scalar_t* other_working_ptr = &other_data[i * other_matrix_stride];
scalar_t* tau_working_ptr = &tau_data[i * tau_stride];
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
// allocate workspace storage
auto& allocator = *at::cuda::getCUDADeviceAllocator();
auto work_data = allocator.allocate(sizeof(scalar_t)*lwork);
at::cuda::solver::ormqr<scalar_t>(
handle, side, trans, m, n, k,
input_working_ptr,
lda,
tau_working_ptr,
other_working_ptr,
ldc,
static_cast<scalar_t*>(work_data.get()),
lwork,
info_data
);
// info from ormqr only reports if the i-th parameter is wrong
// so we don't need to check it all the time
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info.item().toInt() == 0);
}
}
// This is a type dispatching helper function for 'apply_ormqr'
void ormqr_cusolver(const Tensor& input, const Tensor& tau, const Tensor& other, bool left, bool transpose) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "orgmr_cuda", [&]{
apply_ormqr<scalar_t>(input, tau, other, left, transpose);
});
}
/*
The orgqr function allows reconstruction of an orthogonal (or unitary) matrix Q,
from a sequence of elementary reflectors, such as produced by the geqrf function.
Args:
* `self` - Tensor with the directions of the elementary reflectors below the diagonal,
it will be overwritten with the result
* `tau` - Tensor containing the magnitudes of the elementary reflectors
For further details, please see the cuSOLVER documentation for ORGQR and UNGQR.
*/
template <typename scalar_t>
inline static void apply_orgqr(Tensor& self, const Tensor& tau) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto tau_data = tau.data_ptr<scalar_t>();
auto self_matrix_stride = matrixStride(self);
auto batchsize = cuda_int_cast(batchCount(self), "batch size");
auto m = cuda_int_cast(self.size(-2), "m");
auto n = cuda_int_cast(self.size(-1), "n");
auto k = cuda_int_cast(tau.size(-1), "k");
auto tau_stride = std::max<int>(1, k);
auto lda = std::max<int>(1, m);
// LAPACK's requirement
TORCH_INTERNAL_ASSERT(m >= n);
TORCH_INTERNAL_ASSERT(n >= k);
// cuSOLVER doesn't compute anything for this case, which is wrong
// the result should be a matrix with 1 on the diagonal
if (k == 0) {
self.fill_(0);
self.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
return;
}
// get the optimal work size and allocate workspace tensor
int lwork;
at::cuda::solver::orgqr_buffersize<scalar_t>(
at::cuda::getCurrentCUDASolverDnHandle(), m, n, k, self_data, lda, tau_data, &lwork);
auto info = at::zeros({1}, self.options().dtype(at::kInt));
auto info_data = info.data_ptr<int>();
for (auto i = decltype(batchsize){0}; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_matrix_stride];
scalar_t* tau_working_ptr = &tau_data[i * tau_stride];
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
// allocate workspace storage
auto& allocator = *at::cuda::getCUDADeviceAllocator();
auto work_data = allocator.allocate(sizeof(scalar_t)*lwork);
at::cuda::solver::orgqr<scalar_t>(
handle, m, n, k,
self_working_ptr,
lda,
tau_working_ptr,
static_cast<scalar_t*>(work_data.get()),
lwork,
info_data
);
// info from orgqr only reports if the i-th parameter is wrong
// so we don't need to check it all the time
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info.item().toInt() == 0);
}
}
// This is a type dispatching helper function for 'apply_orgqr'
Tensor& orgqr_helper_cusolver(Tensor& result, const Tensor& tau) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "orgqr_cuda", [&]{
apply_orgqr<scalar_t>(result, tau);
});
return result;
}
template <typename scalar_t>
static void apply_syevd(Tensor& values, Tensor& vectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
hipblasFillMode_t uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER;
hipsolverEigMode_t jobz = compute_eigenvectors ? HIPSOLVER_EIG_MODE_VECTOR : HIPSOLVER_EIG_MODE_NOVECTOR;
int64_t n = vectors.size(-1);
int64_t lda = std::max<int64_t>(1, n);
int64_t batch_size = batchCount(vectors);
auto vectors_stride = matrixStride(vectors);
auto values_stride = values.size(-1);
auto vectors_data = vectors.data_ptr<scalar_t>();
auto values_data = values.data_ptr<value_t>();
auto infos_data = infos.data_ptr<int>();
// get the optimal work size and allocate workspace tensor
#ifdef USE_CUSOLVER_64_BIT
size_t worksize_device; // workspaceInBytesOnDevice
size_t worksize_host; // workspaceInBytesOnHost
hipsolverDnParams_t params = NULL; // use default algorithm (currently it's the only option)
at::cuda::solver::xsyevd_bufferSize<scalar_t>(
at::cuda::getCurrentCUDASolverDnHandle(),
params,
jobz,
uplo,
n,
vectors_data,
lda,
values_data,
&worksize_device,
&worksize_host);
#else
int lwork;
int n_32 = cuda_int_cast(n, "n");
int lda_32 = cuda_int_cast(lda, "lda");
at::cuda::solver::syevd_bufferSize<scalar_t>(
at::cuda::getCurrentCUDASolverDnHandle(), jobz, uplo, n_32, vectors_data, lda_32, values_data, &lwork);
#endif // USE_CUSOLVER_64_BIT
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* vectors_working_ptr = &vectors_data[i * vectors_stride];
value_t* values_working_ptr = &values_data[i * values_stride];
int* info_working_ptr = &infos_data[i];
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
#ifdef USE_CUSOLVER_64_BIT
// allocate workspace storage on device and host
auto& device_allocator = *at::cuda::getCUDADeviceAllocator();
auto work_device_data = device_allocator.allocate(worksize_device);
auto& host_allocator = *at::getCPUAllocator();
auto work_host_data = host_allocator.allocate(worksize_host);
at::cuda::solver::xsyevd<scalar_t>(
handle,
params,
jobz,
uplo,
n,
vectors_working_ptr,
lda,
values_working_ptr,
static_cast<scalar_t*>(work_device_data.get()),
worksize_device,
static_cast<scalar_t*>(work_host_data.get()),
worksize_host,
info_working_ptr);
#else
// allocate workspace storage on device
auto& allocator = *at::cuda::getCUDADeviceAllocator();
auto work_data = allocator.allocate(sizeof(scalar_t) * lwork);
at::cuda::solver::syevd<scalar_t>(
handle,
jobz,
uplo,
n_32,
vectors_working_ptr,
lda_32,
values_working_ptr,
static_cast<scalar_t*>(work_data.get()),
lwork,
info_working_ptr);
#endif // USE_CUSOLVER_64_BIT
}
}
template <typename scalar_t>
static void apply_syevj(Tensor& values, Tensor& vectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
hipblasFillMode_t uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER;
hipsolverEigMode_t jobz = compute_eigenvectors ? HIPSOLVER_EIG_MODE_VECTOR : HIPSOLVER_EIG_MODE_NOVECTOR;
int n = cuda_int_cast(vectors.size(-1), "n");
int lda = std::max<int>(1, n);
auto batch_size = batchCount(vectors);
auto vectors_stride = matrixStride(vectors);
auto values_stride = values.size(-1);
auto vectors_data = vectors.data_ptr<scalar_t>();
auto values_data = values.data_ptr<value_t>();
auto infos_data = infos.data_ptr<int>();
// syevj_params controls the numerical accuracy of syevj
// by default the tolerance is set to machine accuracy
// the maximum number of iteration of Jacobi method by default is 100
// cuSOLVER documentations says: "15 sweeps are good enough to converge to machine accuracy"
// LAPACK has SVD routine based on similar Jacobi algorithm (gesvj) and there a maximum of 30 iterations is set
// Let's use the default values for now
hipsolverSyevjInfo_t syevj_params;
TORCH_CUSOLVER_CHECK(hipsolverDnCreateSyevjInfo(&syevj_params));
// get the optimal work size and allocate workspace tensor
int lwork;
at::cuda::solver::syevj_bufferSize<scalar_t>(
at::cuda::getCurrentCUDASolverDnHandle(), jobz, uplo, n, vectors_data, lda, values_data, &lwork, syevj_params);
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* vectors_working_ptr = &vectors_data[i * vectors_stride];
value_t* values_working_ptr = &values_data[i * values_stride];
int* info_working_ptr = &infos_data[i];
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
// allocate workspace storage on device
auto& allocator = *at::cuda::getCUDADeviceAllocator();
auto work_data = allocator.allocate(sizeof(scalar_t) * lwork);
at::cuda::solver::syevj<scalar_t>(
handle,
jobz,
uplo,
n,
vectors_working_ptr,
lda,
values_working_ptr,
static_cast<scalar_t*>(work_data.get()),
lwork,
info_working_ptr,
syevj_params);
}
TORCH_CUSOLVER_CHECK(hipsolverDnDestroySyevjInfo(syevj_params));
}
template <typename scalar_t>
static void apply_syevj_batched(Tensor& values, Tensor& vectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
hipblasFillMode_t uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER;
hipsolverEigMode_t jobz = compute_eigenvectors ? HIPSOLVER_EIG_MODE_VECTOR : HIPSOLVER_EIG_MODE_NOVECTOR;
int n = cuda_int_cast(vectors.size(-1), "n");
int lda = std::max<int>(1, n);
int batch_size = cuda_int_cast(batchCount(vectors), "batch_size");
auto vectors_data = vectors.data_ptr<scalar_t>();
auto values_data = values.data_ptr<value_t>();
auto infos_data = infos.data_ptr<int>();
// syevj_params controls the numerical accuracy of syevj
// by default the tolerance is set to machine accuracy
// the maximum number of iteration of Jacobi method by default is 100
// cuSOLVER documentations says: "15 sweeps are good enough to converge to machine accuracy"
// LAPACK has SVD routine based on similar Jacobi algorithm (gesvj) and there a maximum of 30 iterations is set
// Let's use the default values for now
hipsolverSyevjInfo_t syevj_params;
TORCH_CUSOLVER_CHECK(hipsolverDnCreateSyevjInfo(&syevj_params));
TORCH_CUSOLVER_CHECK(hipsolverDnXsyevjSetSortEig(syevj_params, 1));
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
// get the optimal work size and allocate workspace tensor
int lwork;
at::cuda::solver::syevjBatched_bufferSize<scalar_t>(
handle,
jobz,
uplo,
n,
vectors_data,
lda,
values_data,
&lwork,
syevj_params,
batch_size);
// allocate workspace storage on device
auto& allocator = *at::cuda::getCUDADeviceAllocator();
auto work_data = allocator.allocate(sizeof(scalar_t) * lwork);
at::cuda::solver::syevjBatched<scalar_t>(
handle,
jobz,
uplo,
n,
vectors_data,
lda,
values_data,
static_cast<scalar_t*>(work_data.get()),
lwork,
infos_data,
syevj_params,
batch_size);
TORCH_CUSOLVER_CHECK(hipsolverDnDestroySyevjInfo(syevj_params));
}
static void linalg_eigh_cusolver_syevd(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(eigenvectors.scalar_type(), "linalg_eigh_cuda", [&] {
apply_syevd<scalar_t>(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
});
}
static void linalg_eigh_cusolver_syevj(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(eigenvectors.scalar_type(), "linalg_eigh_cuda", [&] {
apply_syevj<scalar_t>(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
});
}
void linalg_eigh_cusolver(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
// TODO: syevj_batched should be added here, but at least for CUDA 11.2 it contains a bug leading to incorrect results
// See https://github.com/pytorch/pytorch/pull/53040#issuecomment-793626268 and https://github.com/cupy/cupy/issues/4847
// syevj is better than syevd for float32 dtype and matrix sizes 32x32 - 512x512
// See https://github.com/pytorch/pytorch/pull/53040#issuecomment-788264724
if (eigenvectors.scalar_type() == at::kFloat && eigenvectors.size(-1) >= 32 && eigenvectors.size(-1) <= 512) {
return linalg_eigh_cusolver_syevj(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
} else {
return linalg_eigh_cusolver_syevd(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
}
}
// The 'apply_' word is used for templated by dtype functions that call an API routine
// underneath. Since the cusolver API has a slightly different structure we do not prepend
// apply_ to this function.
void lu_looped_cusolver(const Tensor& self, const Tensor& pivots, const Tensor& infos, bool get_pivots) {
// Fill the pivots tensor with indices using 1-based (Fortran) indexing. This
// is needed for maintaining the same results with MAGMA.
auto k = ::min(self.size(-2), self.size(-1));
Tensor pivots_tmp = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand_as(pivots);
pivots.copy_(pivots_tmp);
AT_DISPATCH_FLOATING_TYPES(
self.scalar_type(),
"lu_cusolver",
[&self,
&pivots,
&infos,
&get_pivots]() {
int m = cuda_int_cast(self.size(-2), "m");
int n = cuda_int_cast(self.size(-1), "n");
int lda = std::max<int>(1, m);
int64_t self_stride = matrixStride(self);
int64_t batch_size = batchCount(self);
scalar_t* self_data = self.data_ptr<scalar_t>();
int* infos_data = infos.data_ptr<int>();
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
for (auto batch = decltype(batch_size){0}; batch < batch_size; ++batch) {
if (get_pivots) {
auto pivots_data = pivots.data_ptr<int>();
auto pivots_stride = pivots.size(-1);
at::cuda::solver::getrf<scalar_t>(
handle, m, n,
self_data + batch * self_stride,
lda,
pivots_data + batch * pivots_stride,
infos_data + batch
);
}
else {
at::cuda::solver::getrf<scalar_t>(
handle, m, n,
self_data + batch * self_stride,
lda,
nullptr,
infos_data + batch
);
}
}
});
// Necessary because cuSOLVER uses nan for outputs that correspond to 0 in MAGMA for non-pivoted LU.
// See https://github.com/pytorch/pytorch/issues/53879 for more details.
if (!get_pivots) {
at::nan_to_num_(const_cast<Tensor&>(self), 0, std::numeric_limits<double>::infinity(),
-std::numeric_limits<double>::infinity());
}
}
void lu_solve_looped_cusolver(const Tensor& b, const Tensor& lu, const Tensor& pivots) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(b.scalar_type(), "lu_solve_cusolver", [&] {
int n = cuda_int_cast(lu.size(-2), "n");
int nrhs = cuda_int_cast(b.size(-1), "nrhs");
auto batch_size = batchCount(lu);
auto info = at::zeros({1}, lu.options().dtype(kInt));
auto info_data = info.data_ptr<int>();
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
auto pivots_data = pivots.data_ptr<int>();
auto pivots_stride = pivots.size(-1);
auto lu_stride = matrixStride(lu);
auto b_stride = matrixStride(b);
int leading_dimension = cuda_int_cast(std::max<int>(1, n), "leading_dimension");
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
for (auto batch = decltype(batch_size){0}; batch < batch_size; ++batch) {
at::cuda::solver::getrs<scalar_t>(
handle,
n,
nrhs,
lu_data + batch * lu_stride,
leading_dimension,
pivots_data + batch * pivots_stride,
b_data + batch * b_stride,
leading_dimension,
info_data);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info.item().toInt() == 0);
}
});
}
#endif // USE_CUSOLVER
}} // namespace at::native
| 965da248182a8ed89a12ad9692d8dd27d366f553.cu | #include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/PinnedMemoryAllocator.h>
#include <ATen/cuda/CUDASolver.h>
#include <ATen/cuda/CUDABlas.h>
#include <ATen/cuda/CUDAEvent.h>
#include <c10/cuda/CUDAStream.h>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/cuda/MiscUtils.h>
#include <ATen/native/cuda/BatchLinearAlgebraLib.h>
namespace at {
namespace native {
// Some cuBLAS and cuSOLVER batched routines require input to be a device array of pointers to device individual matrices
// 'input' must be a contiguous tensor
template <typename scalar_t>
static Tensor get_device_pointers(const Tensor& input) {
auto input_data = input.data_ptr<scalar_t>();
int64_t input_mat_stride = matrixStride(input);
// cublas/cusolver interface requires 'int'
int batch_size = cuda_int_cast(batchCount(input), "batch_size");
// if batch_size==0, then start=0 and end=0
// if input_mat_stride==0, then step=sizeof(scalar_t)
return at::arange(
/*start=*/reinterpret_cast<int64_t>(input_data),
/*end=*/reinterpret_cast<int64_t>(input_data + batch_size * input_mat_stride),
/*step=*/static_cast<int64_t>(std::max<int64_t>(input_mat_stride, 1) * sizeof(scalar_t)),
input.options().dtype(at::kLong));
}
template <typename scalar_t>
void apply_geqrf_batched(const Tensor& input, const Tensor& tau) {
// AMD ROCm backend is implemented via rewriting all CUDA calls to HIP
// rocBLAS does not implement BLAS-like extensions of cuBLAS, they're in rocSOLVER
// rocSOLVER is currently not used in ATen, therefore we raise an error in this case
#ifndef CUDART_VERSION
TORCH_CHECK(false, "geqrf: Batched version is supported only with cuBLAS backend.")
#else
auto batch_size = cuda_int_cast(batchCount(input), "batch_size");
auto m = cuda_int_cast(input.size(-2), "m");
auto n = cuda_int_cast(input.size(-1), "n");
auto lda = std::max<int>(1, m);
// cuBLAS batched geqrf requires input to be the device array of pointers to device single matrices
Tensor input_ptr_array = get_device_pointers<scalar_t>(input);
Tensor tau_ptr_array = get_device_pointers<scalar_t>(tau.unsqueeze(-1));
auto input_ptr_array_data = reinterpret_cast<scalar_t**>(input_ptr_array.data_ptr());
auto tau_ptr_array_data = reinterpret_cast<scalar_t**>(tau_ptr_array.data_ptr());
int info;
auto handle = at::cuda::getCurrentCUDABlasHandle();
at::cuda::blas::geqrfBatched(handle, m, n, input_ptr_array_data, lda, tau_ptr_array_data, &info, batch_size);
// info only indicates wrong arguments to geqrfBatched call
// info is a host variable, we can check it without device synchronization
TORCH_INTERNAL_ASSERT(info == 0);
#endif
}
void geqrf_batched_cublas(const Tensor& input, const Tensor& tau) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "geqrf_batched_cuda", [&]{
apply_geqrf_batched<scalar_t>(input, tau);
});
}
template <typename scalar_t>
static void apply_lu_solve_batched_cublas(const Tensor& b, const Tensor& lu, const Tensor& pivots) {
#ifndef CUDART_VERSION
TORCH_CHECK(false, "lu_solve: cuBLAS backend for lu_solve is not available.")
#else
cublasOperation_t trans = CUBLAS_OP_N;
auto pivots_data = pivots.data_ptr<int>();
auto batch_size = cuda_int_cast(batchCount(lu), "batch_size");;
auto m = cuda_int_cast(lu.size(-2), "m");
auto nrhs = cuda_int_cast(b.size(-1), "nrhs");
auto lda = cuda_int_cast(std::max<int>(1, m), "lda");
int info = 0;
Tensor lu_ptr_array = get_device_pointers<scalar_t>(lu);
Tensor b_ptr_array = get_device_pointers<scalar_t>(b);
auto lu_ptr_array_data = reinterpret_cast<scalar_t**>(lu_ptr_array.data_ptr());
auto b_ptr_array_data = reinterpret_cast<scalar_t**>(b_ptr_array.data_ptr());
auto handle = at::cuda::getCurrentCUDABlasHandle();
at::cuda::blas::getrsBatched(handle, trans, m, nrhs, lu_ptr_array_data,
lda, pivots_data, b_ptr_array_data, lda, &info, batch_size);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info == 0);
#endif
}
void lu_solve_batched_cublas(const Tensor& b, const Tensor& lu, const Tensor& pivots) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(lu.scalar_type(), "lu_solve_cublas", [&]{
apply_lu_solve_batched_cublas<scalar_t>(b, lu, pivots);
});
}
template <typename scalar_t>
static void apply_triangular_solve(Tensor& A, Tensor& B, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
cublasFillMode_t uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER;
cublasOperation_t trans = transpose ? CUBLAS_OP_T : CUBLAS_OP_N;
trans = conjugate_transpose ? CUBLAS_OP_C : trans;
cublasDiagType_t diag = unitriangular ? CUBLAS_DIAG_UNIT : CUBLAS_DIAG_NON_UNIT;
cublasSideMode_t side = CUBLAS_SIDE_LEFT;
auto A_data = A.data_ptr<scalar_t>();
auto B_data = B.data_ptr<scalar_t>();
auto A_mat_stride = matrixStride(A);
auto B_mat_stride = matrixStride(B);
auto batch_size = batchCount(A);
auto m = cuda_int_cast(A.size(-2), "m");
auto n = cuda_int_cast(A.size(-1), "n");
auto nrhs = cuda_int_cast(B.size(-1), "nrhs");
auto lda = std::max<int>(1, m);
auto alpha = scalar_t{1};
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* A_working_ptr = &A_data[i * A_mat_stride];
scalar_t* B_working_ptr = &B_data[i * B_mat_stride];
auto handle = at::cuda::getCurrentCUDABlasHandle();
at::cuda::blas::trsm(handle, side, uplo, trans, diag, n, nrhs, &alpha, A_working_ptr, lda, B_working_ptr, lda);
}
}
void triangular_solve_cublas(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
(void)infos; // unused
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(A.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve<scalar_t>(A, B, upper, transpose, conjugate_transpose, unitriangular);
});
}
template <typename scalar_t>
static void apply_triangular_solve_batched(Tensor& A, Tensor& B, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
cublasFillMode_t uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER;
cublasOperation_t trans = transpose ? CUBLAS_OP_T : CUBLAS_OP_N;
trans = conjugate_transpose ? CUBLAS_OP_C : trans;
cublasDiagType_t diag = unitriangular ? CUBLAS_DIAG_UNIT : CUBLAS_DIAG_NON_UNIT;
cublasSideMode_t side = CUBLAS_SIDE_LEFT;
auto A_data = A.data_ptr<scalar_t>();
auto B_data = B.data_ptr<scalar_t>();
auto A_mat_stride = matrixStride(A);
auto B_mat_stride = matrixStride(B);
auto batch_size = cuda_int_cast(batchCount(A), "batch_size");
auto m = cuda_int_cast(A.size(-2), "m");
auto n = cuda_int_cast(A.size(-1), "n");
auto nrhs = cuda_int_cast(B.size(-1), "nrhs");
auto lda = std::max<int>(1, m);
auto alpha = scalar_t{1};
// cuBLAS batched trsm requires input to be the device array of pointers to device single matrices
Tensor A_ptr_array = get_device_pointers<scalar_t>(A);
Tensor B_ptr_array = get_device_pointers<scalar_t>(B);
auto A_ptr_array_data = reinterpret_cast<scalar_t**>(A_ptr_array.data_ptr());
auto B_ptr_array_data = reinterpret_cast<scalar_t**>(B_ptr_array.data_ptr());
auto handle = at::cuda::getCurrentCUDABlasHandle();
at::cuda::blas::trsmBatched(handle, side, uplo, trans, diag, n, nrhs, &alpha, A_ptr_array_data, lda, B_ptr_array_data, lda, batch_size);
}
void triangular_solve_batched_cublas(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
(void)infos; // unused
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(A.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve_batched<scalar_t>(A, B, upper, transpose, conjugate_transpose, unitriangular);
});
}
template <typename scalar_t>
inline void apply_gels_batched(const Tensor& A, Tensor& B, Tensor& infos) {
// AMD ROCm backend is implemented via rewriting all CUDA calls to HIP
// rocBLAS does not implement BLAS-like extensions of cuBLAS, they're in rocSOLVER
// rocSOLVER is currently not used in ATen, therefore we raise an error in this case
#ifndef CUDART_VERSION
TORCH_CHECK(false, "torch.linalg.lstsq: Batched version is supported only with cuBLAS backend.")
#else
auto trans = CUBLAS_OP_N;
auto m = cuda_int_cast(A.size(-2), "m");
auto n = cuda_int_cast(A.size(-1), "n");
auto nrhs = cuda_int_cast(B.size(-1), "nrhs");
// cuBLAS from cuda10 and older doesn't work with nrhs == 0 (cuda11 works)
// so we need to put this early return
if (nrhs == 0) {
return;
}
auto batch_size = cuda_int_cast(batchCount(B), "batch_size");
auto lda = std::max<int>(1, m);
auto ldb = std::max<int>(1, m);
// cuBLAS's requirement
TORCH_CHECK(
m >= n,
"torch.linalg.lstsq: only overdetermined systems (input.size(-2) >= input.size(-1)) are allowed on CUDA with cuBLAS backend.");
// cuBLAS documentation says:
// Matrices Aarray[i] should not overlap; otherwise, undefined behavior is expected.
// explicitly broadcast the batch dimensions of A
IntArrayRef A_batch_sizes(A.sizes().data(), A.dim() - 2);
IntArrayRef B_batch_sizes(B.sizes().data(), B.dim() - 2);
std::vector<int64_t> expand_batch_portion = at::infer_size(A_batch_sizes, B_batch_sizes);
expand_batch_portion.insert(expand_batch_portion.end(), {A.size(-2), A.size(-1)});
Tensor A_expanded = A.expand({expand_batch_portion});
Tensor A_broadcasted = cloneBatchedColumnMajor(A_expanded);
// cuBLAS batched gels requires input to be the device array of pointers to device single matrices
Tensor A_ptr_array = get_device_pointers<scalar_t>(A_broadcasted);
Tensor B_ptr_array = get_device_pointers<scalar_t>(B);
auto A_ptr_array_data = reinterpret_cast<scalar_t**>(A_ptr_array.data_ptr());
auto B_ptr_array_data = reinterpret_cast<scalar_t**>(B_ptr_array.data_ptr());
auto infos_data = infos.data_ptr<int>();
auto handle = at::cuda::getCurrentCUDABlasHandle();
int info;
at::cuda::blas::gelsBatched<scalar_t>(
handle, trans, m, n, nrhs,
A_ptr_array_data, lda,
B_ptr_array_data, ldb,
&info,
infos_data,
batch_size);
// negative info indicates that an argument to gelsBatched call is invalid
TORCH_INTERNAL_ASSERT(info == 0);
#endif
}
// This is a type dispatching helper function for 'apply_gels_batched'
void gels_batched_cublas(const Tensor& a, Tensor& b, Tensor& infos) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(a.scalar_type(), "gels_batched_cublas", [&]{
apply_gels_batched<scalar_t>(a, b, infos);
});
}
#ifdef USE_CUSOLVER
inline static Tensor column_major_identity_matrix_like(const Tensor& self) {
auto size = self.sizes();
auto size_slice = IntArrayRef(size.data(), size.size()-1);
return at::ones(size_slice, self.options()).diag_embed().transpose(-2, -1);
}
template <typename scalar_t>
inline static void _apply_single_inverse_helper(scalar_t* self_ptr, scalar_t* self_inv_ptr, int* ipiv_ptr, int* info_getrf_ptr, int* info_getrs_ptr, int n, int lda) {
// self_inv_ptr should already be an identity matrix
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
at::cuda::solver::getrf<scalar_t>(handle, n, n, self_ptr, lda, ipiv_ptr, info_getrf_ptr);
at::cuda::solver::getrs<scalar_t>(handle, n, n, self_ptr, lda, ipiv_ptr, self_inv_ptr, lda, info_getrs_ptr);
}
template <typename scalar_t>
static void apply_batched_inverse_lib(Tensor& self, Tensor& self_inv, Tensor& infos_getrf, Tensor& infos_getrs) {
const int batch_size = cuda_int_cast(batchCount(self), "batchCount");
const int n = cuda_int_cast(self.size(-2), "self.size(-2)");
const int lda = std::max<int>(1, n);
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
auto infos_getrf_data = infos_getrf.data_ptr<int>();
auto infos_getrs_data = infos_getrs.data_ptr<int>();
auto& allocator = *::c10::cuda::CUDACachingAllocator::get();
// Heuristic: For small batch size or large matrix size, we use for-loop to iterate over the batches instead of
// calling the batched cublas routine.
if (batch_size <= 8 || /* batch_size > 8 && */ n >= 512) {
for (int64_t i = 0; i < batch_size; i++) {
auto dataPtr = allocator.allocate(sizeof(int) * lda);
int* pivot = reinterpret_cast<int*>(dataPtr.get());
int* infos_getrf_working_ptr = &infos_getrf_data[i];
int* infos_getrs_working_ptr = &infos_getrs_data[i];
_apply_single_inverse_helper<scalar_t>(
&self_data[i * self_mat_stride], &self_inv_data[i * self_inv_mat_stride], pivot, infos_getrf_working_ptr, infos_getrs_working_ptr, n, lda);
}
} else {
// cublas batched kernels require input be "device array of device pointers"
Tensor self_array = at::arange(
reinterpret_cast<int64_t>(self_data),
reinterpret_cast<int64_t>(&self_data[(batch_size-1) * self_mat_stride]) + 1,
static_cast<int64_t>(self_mat_stride * sizeof(scalar_t)), self.options().dtype(at::kLong));
Tensor self_inv_array = at::arange(
reinterpret_cast<int64_t>(self_inv_data),
reinterpret_cast<int64_t>(&self_inv_data[(batch_size-1) * self_inv_mat_stride]) + 1,
static_cast<int64_t>(self_inv_mat_stride * sizeof(scalar_t)), self.options().dtype(at::kLong));
auto dataPtr = allocator.allocate(sizeof(int)*batch_size*lda);
int* ipiv_array = reinterpret_cast<int*>(dataPtr.get());
at::cuda::blas::getrfBatched<scalar_t>(n, reinterpret_cast<scalar_t**>(self_array.data_ptr()), lda,
ipiv_array, infos_getrf_data, batch_size);
at::cuda::blas::getriBatched<scalar_t>(n, reinterpret_cast<scalar_t**>(self_array.data_ptr()), lda,
ipiv_array, reinterpret_cast<scalar_t**>(self_inv_array.data_ptr()), lda, infos_getrs_data, batch_size);
}
}
template <typename scalar_t>
static void apply_single_inverse_lib(const Tensor& self, Tensor& self_inv, Tensor& infos_getrf, Tensor& infos_getrs) {
int n = cuda_int_cast(self.size(-2), "self.size(-2)");
int lda = std::max<int>(1, n);
Tensor ipiv = at::empty({lda}, self.options().dtype(at::kInt));
_apply_single_inverse_helper<scalar_t>(
self.data_ptr<scalar_t>(), self_inv.data_ptr<scalar_t>(), ipiv.data_ptr<int>(), infos_getrf.data_ptr<int>(), infos_getrs.data_ptr<int>(), n, lda);
}
// This is a type dispatching helper function for 'apply_batched_inverse_lib' and 'apply_single_inverse_lib'
Tensor& _linalg_inv_out_helper_cuda_lib(Tensor& result, Tensor& infos_getrf, Tensor& infos_getrs) {
// assuming result is in column major order and contains the matrices to invert
Tensor input_working_copy = cloneBatchedColumnMajor(result);
// for getrf + getrs (cusolver path)
// result should be filled with identity matrices
result.zero_();
result.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
const int batch_size = cuda_int_cast(batchCount(result), "batchCount");
if (result.dim() > 2) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_batched_inverse_lib<scalar_t>(
input_working_copy, result, infos_getrf, infos_getrs);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_single_inverse_lib<scalar_t>(input_working_copy, result, infos_getrf, infos_getrs);
});
}
return result;
}
// entrance of calculations of `inverse` using cusolver getrf + getrs, cublas getrfBatched + getriBatched
Tensor _inverse_helper_cuda_lib(const Tensor& self) {
Tensor self_working_copy = cloneBatchedColumnMajor(self);
Tensor self_inv_working_copy = column_major_identity_matrix_like(self_working_copy);
const int batch_size = cuda_int_cast(batchCount(self), "batchCount");
if (self.dim() > 2 && batch_size > 1) {
Tensor infos_getrf = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
Tensor infos_getrs = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse_lib<scalar_t>(
self_working_copy, self_inv_working_copy, infos_getrf, infos_getrs);
});
batchCheckErrors(infos_getrf, "inverse_cuda");
batchCheckErrors(infos_getrs, "inverse_cuda");
} else {
Tensor infos_getrf = at::zeros({1}, self.options().dtype(kInt));
Tensor infos_getrs = at::zeros({1}, self.options().dtype(kInt));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse_lib<scalar_t>(self_working_copy, self_inv_working_copy, infos_getrf, infos_getrs);
});
batchCheckErrors(infos_getrf, "inverse_cuda");
batchCheckErrors(infos_getrs, "inverse_cuda");
}
return self_inv_working_copy;
}
// call cusolver gesvdj function to calculate svd
template<typename scalar_t>
inline static void _apply_svd_lib_gesvdj(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv, bool some) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
int batchsize = cuda_int_cast(batchCount(self), "batch size");
int m = cuda_int_cast(self.size(-2), "m");
int n = cuda_int_cast(self.size(-1), "n");
int lda = std::max<int>(1, m);
int ldvt = std::max<int>(1, n);
for(int i = 0; i < batchsize; i++){
// gesvdj_params controls the numerical accuracy of cusolver gesvdj iterations on GPU
gesvdjInfo_t gesvdj_params;
TORCH_CUSOLVER_CHECK(cusolverDnCreateGesvdjInfo(&gesvdj_params));
// TORCH_CUSOLVER_CHECK(cusolverDnXgesvdjSetTolerance(gesvdj_params, 1.0e-7));
// TORCH_CUSOLVER_CHECK(cusolverDnXgesvdjSetMaxSweeps(gesvdj_params, 15));
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
auto jobz = compute_uv ? CUSOLVER_EIG_MODE_VECTOR : CUSOLVER_EIG_MODE_NOVECTOR;
at::cuda::solver::gesvdj<scalar_t>(
handle, jobz, /*econ=*/ some ? 1 : 0, m, n,
self_data + i * self_stride,
lda,
S_data + i * S_stride,
U_data + i * U_stride,
lda,
VT_data + i * VT_stride,
ldvt,
infos.data_ptr<int>() + i,
gesvdj_params
);
TORCH_CUSOLVER_CHECK(cusolverDnDestroyGesvdjInfo(gesvdj_params));
}
}
// wrapper around _apply_svd_lib_gesvdj that handles dtype dispatch,
// creates a working copy of the input, and creates V^H from the V returned by gesvdj
inline static void apply_svd_lib_gesvdj(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv, bool some) {
const int64_t m = self.size(-2);
const int64_t n = self.size(-1);
Tensor self_working_copy = cloneBatchedColumnMajor(self);
VT = VT.transpose(-2, -1); // gesvdj returns V instead of V^H
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda_gesvdj", [&] {
_apply_svd_lib_gesvdj<scalar_t>(self_working_copy, U, S, VT, infos, compute_uv, some);
});
}
// call cusolver gesvdj batched function to calculate svd
template<typename scalar_t>
inline static void _apply_svd_lib_gesvdjBatched(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
int batchsize = cuda_int_cast(batchCount(self), "batch size");
int m = cuda_int_cast(self.size(-2), "m");
int n = cuda_int_cast(self.size(-1), "n");
int lda = std::max<int>(1, m);
int ldvt = std::max<int>(1, n);
TORCH_INTERNAL_ASSERT(m <= 32 && n <= 32, "gesvdjBatched requires both matrix dimensions not greater than 32, but got "
"m = ", m, " n = ", n);
// gesvdj_params controls the numerical accuracy of cusolver gesvdj iterations on GPU
gesvdjInfo_t gesvdj_params;
TORCH_CUSOLVER_CHECK(cusolverDnCreateGesvdjInfo(&gesvdj_params));
// TORCH_CUSOLVER_CHECK(cusolverDnXgesvdjSetTolerance(gesvdj_params, 1.0e-7));
// TORCH_CUSOLVER_CHECK(cusolverDnXgesvdjSetMaxSweeps(gesvdj_params, 15));
TORCH_CUSOLVER_CHECK(cusolverDnXgesvdjSetSortEig(gesvdj_params, 1));
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
auto jobz = compute_uv ? CUSOLVER_EIG_MODE_VECTOR : CUSOLVER_EIG_MODE_NOVECTOR;
at::cuda::solver::gesvdjBatched<scalar_t>(
handle, jobz, m, n, self_data, lda, S_data, U_data, lda, VT_data, ldvt,
infos.data_ptr<int>(), gesvdj_params, batchsize
);
TORCH_CUSOLVER_CHECK(cusolverDnDestroyGesvdjInfo(gesvdj_params));
}
// wrapper around _apply_svd_lib_gesvdjBatched that handles dtype dispatch,
// creates a working copy of the input, and creates V^H from the V returned by gesvdj
inline static void apply_svd_lib_gesvdjBatched(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv) {
const int64_t m = self.size(-2);
const int64_t n = self.size(-1);
Tensor self_working_copy = cloneBatchedColumnMajor(self);
VT = VT.transpose(-2, -1); // gesvdj returns V instead of V^H
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda_gesvdjBatched", [&] {
_apply_svd_lib_gesvdjBatched<scalar_t>(self_working_copy, U, S, VT, infos, compute_uv);
});
}
// entrance of calculations of `svd` using cusolver gesvdj and gesvdjBatched
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda_lib(const Tensor& self, bool some, bool compute_uv) {
const int64_t batch_size = batchCount(self);
at::Tensor infos = at::zeros({batch_size}, self.options().dtype(at::kInt));
const int64_t m = self.size(-2);
const int64_t n = self.size(-1);
const int64_t k = std::min(m, n);
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = \
_create_U_S_VT(self, some, compute_uv, /* svd_use_cusolver = */ true);
// U, S, V working copies are already column majored now
// heuristic for using `gesvdjBatched` over `gesvdj`
if (m <= 32 && n <= 32 && batch_size > 1 && (!some || m == n)) {
apply_svd_lib_gesvdjBatched(self, U_working_copy, S_working_copy, VT_working_copy, infos, compute_uv);
} else {
apply_svd_lib_gesvdj(self, U_working_copy, S_working_copy, VT_working_copy, infos, compute_uv, some);
}
// A device-host sync will be performed.
batchCheckErrors(infos, "svd_cuda");
if (!compute_uv) {
VT_working_copy.zero_();
U_working_copy.zero_();
}
if (some) {
VT_working_copy = VT_working_copy.narrow(-2, 0, k);
}
// so far we have computed VT, but torch.svd returns V instead. Adjust accordingly.
VT_working_copy.transpose_(-2, -1);
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
// Implementation of Cholesky decomposition using looped cusolverDn<T>potrf or cusolverDnXpotrf (64-bit)
template<typename scalar_t>
inline static void apply_cholesky_cusolver_potrf_looped(const Tensor& self_working_copy, bool upper, const Tensor& infos) {
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
const auto uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER;
const int64_t n = self_working_copy.size(-1);
const int64_t lda = std::max<int64_t>(1, n);
const int64_t batch_size = batchCount(self_working_copy);
const int64_t matrix_stride = matrixStride(self_working_copy);
scalar_t* self_working_copy_ptr = self_working_copy.data_ptr<scalar_t>();
int* infos_ptr = infos.data_ptr<int>();
#ifdef USE_CUSOLVER_64_BIT
size_t worksize_device;
size_t worksize_host;
cusolverDnParams_t params;
cudaDataType datatype = at::cuda::solver::get_cusolver_datatype<scalar_t>();
TORCH_CUSOLVER_CHECK(cusolverDnCreateParams(¶ms));
at::cuda::solver::xpotrf_buffersize(handle, params, uplo, n, datatype, nullptr, lda, datatype, &worksize_device, &worksize_host);
// allocate workspace storage
auto& device_allocator = *at::cuda::getCUDADeviceAllocator();
auto workdata_device = device_allocator.allocate(worksize_device * batch_size);
void* workdata_device_ptr = workdata_device.get();
auto& host_allocator = *at::getCPUAllocator();
auto workdata_host = host_allocator.allocate(worksize_host * batch_size);
void* workdata_host_ptr = workdata_host.get();
for (int64_t i = 0; i < batch_size; i++) {
at::cuda::solver::xpotrf(
handle, params, uplo, n, datatype,
self_working_copy_ptr + i * matrix_stride,
lda, datatype,
(char*)workdata_device_ptr + i * worksize_device, worksize_device,
(char*)workdata_host_ptr + i * worksize_host, worksize_host,
infos_ptr + i
);
}
TORCH_CUSOLVER_CHECK(cusolverDnDestroyParams(params));
#else // USE_CUSOLVER_64_BIT
int n_32 = cuda_int_cast(n, "n");
int lda_32 = cuda_int_cast(lda, "lda");
int lwork;
at::cuda::solver::potrf_buffersize<scalar_t>(
handle, uplo, n_32, nullptr, lda_32, &lwork);
// allocate workspace storage
auto& allocator = *at::cuda::getCUDADeviceAllocator();
auto work_data = allocator.allocate(sizeof(scalar_t)*lwork * batch_size);
scalar_t* work_data_ptr = static_cast<scalar_t*>(work_data.get());
for (int64_t i = 0; i < batch_size; i++) {
at::cuda::solver::potrf<scalar_t>(
handle, uplo, n_32,
self_working_copy_ptr + i * matrix_stride,
lda_32,
work_data_ptr + i * lwork,
lwork,
infos_ptr + i
);
}
#endif // USE_CUSOLVER_64_BIT
}
// Implementation of Cholesky decomposition using batched cusolverDn<T>potrfBatched
// Warning: cusolverDn<T>potrfBatched doesn't work quite well when matrix size or batch size is zero.
// If you write your own C++ extension and use this function, make sure you do a zero numel check for the input.
template<typename scalar_t>
inline static void apply_cholesky_cusolver_potrfBatched(const Tensor& self_working_copy, bool upper, const Tensor& infos) {
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
const auto uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER;
const int n = cuda_int_cast(self_working_copy.size(-1), "n");
const int lda = std::max<int>(1, n);
const int batch_size = cuda_int_cast(batchCount(self_working_copy), "batch_size");
// cusolver batched kernels require input be "device array of device pointers"
Tensor self_working_copy_array = get_device_pointers<scalar_t>(self_working_copy);
at::cuda::solver::potrfBatched<scalar_t>(
handle, uplo, n,
reinterpret_cast<scalar_t**>(self_working_copy_array.data_ptr()),
lda, infos.data_ptr<int>(), batch_size);
}
void cholesky_helper_cusolver(const Tensor& input, bool upper, const Tensor& info) {
if (input.numel() == 0) {
return;
}
if (use_cusolver_potrf_batched_ && batchCount(input) > 1) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "cholesky_cusolver", [&] {
apply_cholesky_cusolver_potrfBatched<scalar_t>(input, upper, info);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "cholesky_cusolver", [&] {
apply_cholesky_cusolver_potrf_looped<scalar_t>(input, upper, info);
});
}
}
template<typename scalar_t>
inline static void apply_cholesky_cusolver_potrs(Tensor& self_working_copy, const Tensor& A_column_major_copy, bool upper, Tensor& infos) {
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
const auto uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER;
const int64_t n = self_working_copy.size(-2);
const int64_t nrhs = self_working_copy.size(-1);
const int64_t lda = std::max<int64_t>(1, n);
const int64_t batch_size = batchCount(self_working_copy);
const int64_t self_matrix_stride = matrixStride(self_working_copy);
scalar_t* self_working_copy_ptr = self_working_copy.data_ptr<scalar_t>();
const scalar_t* A_ptr = A_column_major_copy.data_ptr<scalar_t>();
const int64_t A_matrix_stride = matrixStride(A_column_major_copy);
const int64_t ldb = std::max<int64_t>(1, A_column_major_copy.size(-1));
int* infos_ptr = infos.data_ptr<int>();
#ifdef USE_CUSOLVER_64_BIT
cusolverDnParams_t params;
cudaDataType datatype = at::cuda::solver::get_cusolver_datatype<scalar_t>();
TORCH_CUSOLVER_CHECK(cusolverDnCreateParams(¶ms));
for (int64_t i = 0; i < batch_size; i++) {
at::cuda::solver::xpotrs(
handle, params, uplo, n, nrhs, datatype,
A_ptr + i * A_matrix_stride,
lda, datatype,
self_working_copy_ptr + i * self_matrix_stride,
ldb,
infos_ptr
);
}
TORCH_CUSOLVER_CHECK(cusolverDnDestroyParams(params));
#else // USE_CUSOLVER_64_BIT
int n_32 = cuda_int_cast(n, "n");
int nrhs_32 = cuda_int_cast(nrhs, "nrhs");
int lda_32 = cuda_int_cast(lda, "lda");
int ldb_32 = cuda_int_cast(ldb, "ldb");
for (int64_t i = 0; i < batch_size; i++) {
at::cuda::solver::potrs<scalar_t>(
handle, uplo, n_32, nrhs_32,
A_ptr + i * A_matrix_stride,
lda_32,
self_working_copy_ptr + i * self_matrix_stride,
ldb_32,
infos_ptr
);
}
#endif // USE_CUSOLVER_64_BIT
}
// This code path is only dispatched to if MAGMA is not linked in the pytorch build.
// cusolverDn<t>potrsBatched only supports nrhs == 1
template<typename scalar_t>
inline static void apply_cholesky_cusolver_potrsBatched(Tensor& self_working_copy, const Tensor& A_column_major_copy, bool upper, Tensor& infos) {
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
const auto uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER;
const int64_t n = self_working_copy.size(-2);
const int64_t nrhs = self_working_copy.size(-1);
const int64_t lda = std::max<int64_t>(1, n);
const int64_t batch_size = batchCount(self_working_copy);
const int64_t self_matrix_stride = matrixStride(self_working_copy);
scalar_t* self_working_copy_ptr = self_working_copy.data_ptr<scalar_t>();
const scalar_t* A_ptr = A_column_major_copy.data_ptr<scalar_t>();
const int64_t A_matrix_stride = matrixStride(A_column_major_copy);
const int64_t ldb = std::max<int64_t>(1, A_column_major_copy.size(-1));
int* infos_ptr = infos.data_ptr<int>();
auto self_ptr_array = get_device_pointers<scalar_t>(self_working_copy);
auto A_ptr_array = get_device_pointers<scalar_t>(A_column_major_copy);
at::cuda::solver::potrsBatched(
handle, uplo,
cuda_int_cast(n, "n"),
cuda_int_cast(nrhs, "nrhs"),
reinterpret_cast<scalar_t**>(A_ptr_array.data_ptr()),
cuda_int_cast(lda, "lda"),
reinterpret_cast<scalar_t**>(self_ptr_array.data_ptr()),
cuda_int_cast(ldb, "ldb"),
infos_ptr,
cuda_int_cast(batch_size, "batch_size")
);
}
Tensor _cholesky_solve_helper_cuda_cusolver(const Tensor& self, const Tensor& A, bool upper) {
const int64_t batch_size = batchCount(self);
at::Tensor infos = at::zeros({1}, self.options().dtype(at::kInt));
at::Tensor self_working_copy = cloneBatchedColumnMajor(self);
at::Tensor A_column_major_copy = cloneBatchedColumnMajor(A);
const int64_t nrhs = self_working_copy.size(-1);
// cusolverDn<t>potrsBatched only supports nrhs == 1
if (batch_size > 1 && nrhs == 1) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_cuda_potrs_batched", [&] {
apply_cholesky_cusolver_potrsBatched<scalar_t>(self_working_copy, A_column_major_copy, upper, infos);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_cuda_potrs", [&] {
apply_cholesky_cusolver_potrs<scalar_t>(self_working_copy, A_column_major_copy, upper, infos);
});
}
// info from potrs and potrsBatched only report if the i-th parameter is wrong, not about the matrix singularity, etc.
// So we don't need to check it all the time.
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.item().toInt() == 0);
return self_working_copy;
}
void _cholesky_inverse_cusolver_potrs_based(Tensor& result, Tensor& infos, bool upper) {
at::Tensor input_working_copy = cloneBatchedColumnMajor(result);
at::Tensor infos_gpu = at::zeros({1}, result.options().dtype(at::kInt));
result.fill_(0);
result.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "cholesky_cuda_potri", [&] {
apply_cholesky_cusolver_potrs<scalar_t>(result, input_working_copy, upper, infos_gpu);
});
// Debug only: info of cusolver potrs only check if the i-th parameter is wrong
// Function argument `infos` is a CPU tensor, the following copy will cause a device-host sync.
// infos.copy_(infos_gpu);
}
Tensor& cholesky_inverse_kernel_impl_cusolver(Tensor &result, Tensor& infos, bool upper) {
_cholesky_inverse_cusolver_potrs_based(result, infos, upper);
return result;
}
/*
The geqrf function computes the QR decomposition of a m x n matrix A.
Args:
* `A` - [in] Tensor with matrices for QR decomposition,
[out] Tensor containing R in the upper triangle of A
and elementary reflectors below the main diagonal of A
* `tau` - Tensor containing the magnitudes of the elementary reflectors
* `m` - The number of rows of `input` to consider
* `n` - The number of columns of `input` to consider (actual sizes of `input` could be larger)
For further details, please see the cuSOLVER documentation for GEQRF.
*/
template <typename scalar_t>
static void apply_geqrf(const Tensor& A, const Tensor& tau) {
int64_t m = A.size(-2);
int64_t n = A.size(-1);
int64_t lda = std::max<int64_t>(1, m);
int64_t batch_size = batchCount(A);
auto A_stride = matrixStride(A);
auto tau_stride = tau.size(-1);
auto A_data = A.data_ptr<scalar_t>();
auto tau_data = tau.data_ptr<scalar_t>();
auto infos = at::zeros({1}, A.options().dtype(at::kInt));
auto infos_data = infos.data_ptr<int>();
// get the optimal work size and allocate workspace tensor
#ifdef USE_CUSOLVER_64_BIT
size_t worksize_device; // workspaceInBytesOnDevice
size_t worksize_host; // workspaceInBytesOnHost
cusolverDnParams_t params = NULL; // use default algorithm (currently it's the only option)
at::cuda::solver::xgeqrf_bufferSize<scalar_t>(
at::cuda::getCurrentCUDASolverDnHandle(),
params,
m,
n,
A_data,
lda,
tau_data,
&worksize_device,
&worksize_host);
#else
int lwork;
int m_32 = cuda_int_cast(m, "m");
int n_32 = cuda_int_cast(n, "n");
int lda_32 = cuda_int_cast(lda, "lda");
at::cuda::solver::geqrf_bufferSize<scalar_t>(
at::cuda::getCurrentCUDASolverDnHandle(), m_32, n_32, A_data, lda_32, &lwork);
#endif // USE_CUSOLVER_64_BIT
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* A_working_ptr = &A_data[i * A_stride];
scalar_t* tau_working_ptr = &tau_data[i * tau_stride];
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
#ifdef USE_CUSOLVER_64_BIT
// allocate workspace storage on device and host
auto& device_allocator = *at::cuda::getCUDADeviceAllocator();
auto work_device_data = device_allocator.allocate(worksize_device);
auto& host_allocator = *at::getCPUAllocator();
auto work_host_data = host_allocator.allocate(worksize_host);
at::cuda::solver::xgeqrf<scalar_t>(
handle,
params,
m,
n,
A_working_ptr,
lda,
tau_working_ptr,
static_cast<scalar_t*>(work_device_data.get()),
worksize_device,
static_cast<scalar_t*>(work_host_data.get()),
worksize_host,
infos_data);
#else
// allocate workspace storage on device
auto& allocator = *at::cuda::getCUDADeviceAllocator();
auto work_data = allocator.allocate(sizeof(scalar_t) * std::max<int>(1, lwork));
at::cuda::solver::geqrf<scalar_t>(
handle,
m_32,
n_32,
A_working_ptr,
lda_32,
tau_working_ptr,
static_cast<scalar_t*>(work_data.get()),
lwork,
infos_data);
#endif // USE_CUSOLVER_64_BIT
}
// info from geqrf only reports if the i-th parameter is wrong, not about the matrix singularity
// so we don't need to check it all the time
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.item().toInt() == 0);
}
// This is a type dispatching helper function for 'apply_geqrf'
void geqrf_cusolver(const Tensor& input, const Tensor& tau) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "geqrf_cuda", [&]{
apply_geqrf<scalar_t>(input, tau);
});
}
/*
The ormqr function multiplies Q with another matrix from a sequence of
elementary reflectors, such as is produced by the geqrf function.
Args:
* `input` - Tensor with elementary reflectors below the diagonal,
encoding the matrix Q.
* `tau` - Tensor containing the magnitudes of the elementary
reflectors.
* `other` - [in] Tensor containing the matrix to be multiplied.
[out] result of the matrix multiplication with Q.
* `left` - bool, determining whether `other` is left- or right-multiplied with Q.
* `transpose` - bool, determining whether to transpose (or conjugate transpose) Q before multiplying.
For further details, please see the cuSOLVER documentation for ORMQR and UNMQR.
*/
template <typename scalar_t>
static void apply_ormqr(const Tensor& input, const Tensor& tau, const Tensor& other, bool left, bool transpose) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto side = left ? CUBLAS_SIDE_LEFT : CUBLAS_SIDE_RIGHT;
auto trans = transpose ? (input.is_complex() ? CUBLAS_OP_C : CUBLAS_OP_T) : CUBLAS_OP_N;
auto input_data = input.data_ptr<scalar_t>();
auto tau_data = tau.data_ptr<scalar_t>();
auto other_data = other.data_ptr<scalar_t>();
auto input_matrix_stride = matrixStride(input);
auto other_matrix_stride = matrixStride(other);
auto tau_stride = tau.size(-1);
auto batch_size = batchCount(input);
auto m = cuda_int_cast(other.size(-2), "m");
auto n = cuda_int_cast(other.size(-1), "n");
auto k = cuda_int_cast(tau.size(-1), "k");
auto lda = std::max<int>(1, left ? m : n);
auto ldc = std::max<int>(1, m);
// get the optimal work size and allocate workspace tensor
int lwork;
at::cuda::solver::ormqr_bufferSize<scalar_t>(
at::cuda::getCurrentCUDASolverDnHandle(), side, trans, m, n, k, input_data, lda, tau_data, other_data, ldc, &lwork);
auto info = at::zeros({1}, input.options().dtype(at::kInt));
auto info_data = info.data_ptr<int>();
for (auto i = decltype(batch_size){0}; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
scalar_t* other_working_ptr = &other_data[i * other_matrix_stride];
scalar_t* tau_working_ptr = &tau_data[i * tau_stride];
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
// allocate workspace storage
auto& allocator = *at::cuda::getCUDADeviceAllocator();
auto work_data = allocator.allocate(sizeof(scalar_t)*lwork);
at::cuda::solver::ormqr<scalar_t>(
handle, side, trans, m, n, k,
input_working_ptr,
lda,
tau_working_ptr,
other_working_ptr,
ldc,
static_cast<scalar_t*>(work_data.get()),
lwork,
info_data
);
// info from ormqr only reports if the i-th parameter is wrong
// so we don't need to check it all the time
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info.item().toInt() == 0);
}
}
// This is a type dispatching helper function for 'apply_ormqr'
void ormqr_cusolver(const Tensor& input, const Tensor& tau, const Tensor& other, bool left, bool transpose) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "orgmr_cuda", [&]{
apply_ormqr<scalar_t>(input, tau, other, left, transpose);
});
}
/*
The orgqr function allows reconstruction of an orthogonal (or unitary) matrix Q,
from a sequence of elementary reflectors, such as produced by the geqrf function.
Args:
* `self` - Tensor with the directions of the elementary reflectors below the diagonal,
it will be overwritten with the result
* `tau` - Tensor containing the magnitudes of the elementary reflectors
For further details, please see the cuSOLVER documentation for ORGQR and UNGQR.
*/
template <typename scalar_t>
inline static void apply_orgqr(Tensor& self, const Tensor& tau) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto tau_data = tau.data_ptr<scalar_t>();
auto self_matrix_stride = matrixStride(self);
auto batchsize = cuda_int_cast(batchCount(self), "batch size");
auto m = cuda_int_cast(self.size(-2), "m");
auto n = cuda_int_cast(self.size(-1), "n");
auto k = cuda_int_cast(tau.size(-1), "k");
auto tau_stride = std::max<int>(1, k);
auto lda = std::max<int>(1, m);
// LAPACK's requirement
TORCH_INTERNAL_ASSERT(m >= n);
TORCH_INTERNAL_ASSERT(n >= k);
// cuSOLVER doesn't compute anything for this case, which is wrong
// the result should be a matrix with 1 on the diagonal
if (k == 0) {
self.fill_(0);
self.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
return;
}
// get the optimal work size and allocate workspace tensor
int lwork;
at::cuda::solver::orgqr_buffersize<scalar_t>(
at::cuda::getCurrentCUDASolverDnHandle(), m, n, k, self_data, lda, tau_data, &lwork);
auto info = at::zeros({1}, self.options().dtype(at::kInt));
auto info_data = info.data_ptr<int>();
for (auto i = decltype(batchsize){0}; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_matrix_stride];
scalar_t* tau_working_ptr = &tau_data[i * tau_stride];
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
// allocate workspace storage
auto& allocator = *at::cuda::getCUDADeviceAllocator();
auto work_data = allocator.allocate(sizeof(scalar_t)*lwork);
at::cuda::solver::orgqr<scalar_t>(
handle, m, n, k,
self_working_ptr,
lda,
tau_working_ptr,
static_cast<scalar_t*>(work_data.get()),
lwork,
info_data
);
// info from orgqr only reports if the i-th parameter is wrong
// so we don't need to check it all the time
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info.item().toInt() == 0);
}
}
// This is a type dispatching helper function for 'apply_orgqr'
Tensor& orgqr_helper_cusolver(Tensor& result, const Tensor& tau) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "orgqr_cuda", [&]{
apply_orgqr<scalar_t>(result, tau);
});
return result;
}
template <typename scalar_t>
static void apply_syevd(Tensor& values, Tensor& vectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
cublasFillMode_t uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER;
cusolverEigMode_t jobz = compute_eigenvectors ? CUSOLVER_EIG_MODE_VECTOR : CUSOLVER_EIG_MODE_NOVECTOR;
int64_t n = vectors.size(-1);
int64_t lda = std::max<int64_t>(1, n);
int64_t batch_size = batchCount(vectors);
auto vectors_stride = matrixStride(vectors);
auto values_stride = values.size(-1);
auto vectors_data = vectors.data_ptr<scalar_t>();
auto values_data = values.data_ptr<value_t>();
auto infos_data = infos.data_ptr<int>();
// get the optimal work size and allocate workspace tensor
#ifdef USE_CUSOLVER_64_BIT
size_t worksize_device; // workspaceInBytesOnDevice
size_t worksize_host; // workspaceInBytesOnHost
cusolverDnParams_t params = NULL; // use default algorithm (currently it's the only option)
at::cuda::solver::xsyevd_bufferSize<scalar_t>(
at::cuda::getCurrentCUDASolverDnHandle(),
params,
jobz,
uplo,
n,
vectors_data,
lda,
values_data,
&worksize_device,
&worksize_host);
#else
int lwork;
int n_32 = cuda_int_cast(n, "n");
int lda_32 = cuda_int_cast(lda, "lda");
at::cuda::solver::syevd_bufferSize<scalar_t>(
at::cuda::getCurrentCUDASolverDnHandle(), jobz, uplo, n_32, vectors_data, lda_32, values_data, &lwork);
#endif // USE_CUSOLVER_64_BIT
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* vectors_working_ptr = &vectors_data[i * vectors_stride];
value_t* values_working_ptr = &values_data[i * values_stride];
int* info_working_ptr = &infos_data[i];
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
#ifdef USE_CUSOLVER_64_BIT
// allocate workspace storage on device and host
auto& device_allocator = *at::cuda::getCUDADeviceAllocator();
auto work_device_data = device_allocator.allocate(worksize_device);
auto& host_allocator = *at::getCPUAllocator();
auto work_host_data = host_allocator.allocate(worksize_host);
at::cuda::solver::xsyevd<scalar_t>(
handle,
params,
jobz,
uplo,
n,
vectors_working_ptr,
lda,
values_working_ptr,
static_cast<scalar_t*>(work_device_data.get()),
worksize_device,
static_cast<scalar_t*>(work_host_data.get()),
worksize_host,
info_working_ptr);
#else
// allocate workspace storage on device
auto& allocator = *at::cuda::getCUDADeviceAllocator();
auto work_data = allocator.allocate(sizeof(scalar_t) * lwork);
at::cuda::solver::syevd<scalar_t>(
handle,
jobz,
uplo,
n_32,
vectors_working_ptr,
lda_32,
values_working_ptr,
static_cast<scalar_t*>(work_data.get()),
lwork,
info_working_ptr);
#endif // USE_CUSOLVER_64_BIT
}
}
template <typename scalar_t>
static void apply_syevj(Tensor& values, Tensor& vectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
cublasFillMode_t uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER;
cusolverEigMode_t jobz = compute_eigenvectors ? CUSOLVER_EIG_MODE_VECTOR : CUSOLVER_EIG_MODE_NOVECTOR;
int n = cuda_int_cast(vectors.size(-1), "n");
int lda = std::max<int>(1, n);
auto batch_size = batchCount(vectors);
auto vectors_stride = matrixStride(vectors);
auto values_stride = values.size(-1);
auto vectors_data = vectors.data_ptr<scalar_t>();
auto values_data = values.data_ptr<value_t>();
auto infos_data = infos.data_ptr<int>();
// syevj_params controls the numerical accuracy of syevj
// by default the tolerance is set to machine accuracy
// the maximum number of iteration of Jacobi method by default is 100
// cuSOLVER documentations says: "15 sweeps are good enough to converge to machine accuracy"
// LAPACK has SVD routine based on similar Jacobi algorithm (gesvj) and there a maximum of 30 iterations is set
// Let's use the default values for now
syevjInfo_t syevj_params;
TORCH_CUSOLVER_CHECK(cusolverDnCreateSyevjInfo(&syevj_params));
// get the optimal work size and allocate workspace tensor
int lwork;
at::cuda::solver::syevj_bufferSize<scalar_t>(
at::cuda::getCurrentCUDASolverDnHandle(), jobz, uplo, n, vectors_data, lda, values_data, &lwork, syevj_params);
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* vectors_working_ptr = &vectors_data[i * vectors_stride];
value_t* values_working_ptr = &values_data[i * values_stride];
int* info_working_ptr = &infos_data[i];
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
// allocate workspace storage on device
auto& allocator = *at::cuda::getCUDADeviceAllocator();
auto work_data = allocator.allocate(sizeof(scalar_t) * lwork);
at::cuda::solver::syevj<scalar_t>(
handle,
jobz,
uplo,
n,
vectors_working_ptr,
lda,
values_working_ptr,
static_cast<scalar_t*>(work_data.get()),
lwork,
info_working_ptr,
syevj_params);
}
TORCH_CUSOLVER_CHECK(cusolverDnDestroySyevjInfo(syevj_params));
}
template <typename scalar_t>
static void apply_syevj_batched(Tensor& values, Tensor& vectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
cublasFillMode_t uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER;
cusolverEigMode_t jobz = compute_eigenvectors ? CUSOLVER_EIG_MODE_VECTOR : CUSOLVER_EIG_MODE_NOVECTOR;
int n = cuda_int_cast(vectors.size(-1), "n");
int lda = std::max<int>(1, n);
int batch_size = cuda_int_cast(batchCount(vectors), "batch_size");
auto vectors_data = vectors.data_ptr<scalar_t>();
auto values_data = values.data_ptr<value_t>();
auto infos_data = infos.data_ptr<int>();
// syevj_params controls the numerical accuracy of syevj
// by default the tolerance is set to machine accuracy
// the maximum number of iteration of Jacobi method by default is 100
// cuSOLVER documentations says: "15 sweeps are good enough to converge to machine accuracy"
// LAPACK has SVD routine based on similar Jacobi algorithm (gesvj) and there a maximum of 30 iterations is set
// Let's use the default values for now
syevjInfo_t syevj_params;
TORCH_CUSOLVER_CHECK(cusolverDnCreateSyevjInfo(&syevj_params));
TORCH_CUSOLVER_CHECK(cusolverDnXsyevjSetSortEig(syevj_params, 1));
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
// get the optimal work size and allocate workspace tensor
int lwork;
at::cuda::solver::syevjBatched_bufferSize<scalar_t>(
handle,
jobz,
uplo,
n,
vectors_data,
lda,
values_data,
&lwork,
syevj_params,
batch_size);
// allocate workspace storage on device
auto& allocator = *at::cuda::getCUDADeviceAllocator();
auto work_data = allocator.allocate(sizeof(scalar_t) * lwork);
at::cuda::solver::syevjBatched<scalar_t>(
handle,
jobz,
uplo,
n,
vectors_data,
lda,
values_data,
static_cast<scalar_t*>(work_data.get()),
lwork,
infos_data,
syevj_params,
batch_size);
TORCH_CUSOLVER_CHECK(cusolverDnDestroySyevjInfo(syevj_params));
}
static void linalg_eigh_cusolver_syevd(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(eigenvectors.scalar_type(), "linalg_eigh_cuda", [&] {
apply_syevd<scalar_t>(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
});
}
static void linalg_eigh_cusolver_syevj(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(eigenvectors.scalar_type(), "linalg_eigh_cuda", [&] {
apply_syevj<scalar_t>(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
});
}
void linalg_eigh_cusolver(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
// TODO: syevj_batched should be added here, but at least for CUDA 11.2 it contains a bug leading to incorrect results
// See https://github.com/pytorch/pytorch/pull/53040#issuecomment-793626268 and https://github.com/cupy/cupy/issues/4847
// syevj is better than syevd for float32 dtype and matrix sizes 32x32 - 512x512
// See https://github.com/pytorch/pytorch/pull/53040#issuecomment-788264724
if (eigenvectors.scalar_type() == at::kFloat && eigenvectors.size(-1) >= 32 && eigenvectors.size(-1) <= 512) {
return linalg_eigh_cusolver_syevj(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
} else {
return linalg_eigh_cusolver_syevd(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
}
}
// The 'apply_' word is used for templated by dtype functions that call an API routine
// underneath. Since the cusolver API has a slightly different structure we do not prepend
// apply_ to this function.
void lu_looped_cusolver(const Tensor& self, const Tensor& pivots, const Tensor& infos, bool get_pivots) {
// Fill the pivots tensor with indices using 1-based (Fortran) indexing. This
// is needed for maintaining the same results with MAGMA.
auto k = std::min(self.size(-2), self.size(-1));
Tensor pivots_tmp = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand_as(pivots);
pivots.copy_(pivots_tmp);
AT_DISPATCH_FLOATING_TYPES(
self.scalar_type(),
"lu_cusolver",
[&self,
&pivots,
&infos,
&get_pivots]() {
int m = cuda_int_cast(self.size(-2), "m");
int n = cuda_int_cast(self.size(-1), "n");
int lda = std::max<int>(1, m);
int64_t self_stride = matrixStride(self);
int64_t batch_size = batchCount(self);
scalar_t* self_data = self.data_ptr<scalar_t>();
int* infos_data = infos.data_ptr<int>();
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
for (auto batch = decltype(batch_size){0}; batch < batch_size; ++batch) {
if (get_pivots) {
auto pivots_data = pivots.data_ptr<int>();
auto pivots_stride = pivots.size(-1);
at::cuda::solver::getrf<scalar_t>(
handle, m, n,
self_data + batch * self_stride,
lda,
pivots_data + batch * pivots_stride,
infos_data + batch
);
}
else {
at::cuda::solver::getrf<scalar_t>(
handle, m, n,
self_data + batch * self_stride,
lda,
nullptr,
infos_data + batch
);
}
}
});
// Necessary because cuSOLVER uses nan for outputs that correspond to 0 in MAGMA for non-pivoted LU.
// See https://github.com/pytorch/pytorch/issues/53879 for more details.
if (!get_pivots) {
at::nan_to_num_(const_cast<Tensor&>(self), 0, std::numeric_limits<double>::infinity(),
-std::numeric_limits<double>::infinity());
}
}
void lu_solve_looped_cusolver(const Tensor& b, const Tensor& lu, const Tensor& pivots) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(b.scalar_type(), "lu_solve_cusolver", [&] {
int n = cuda_int_cast(lu.size(-2), "n");
int nrhs = cuda_int_cast(b.size(-1), "nrhs");
auto batch_size = batchCount(lu);
auto info = at::zeros({1}, lu.options().dtype(kInt));
auto info_data = info.data_ptr<int>();
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
auto pivots_data = pivots.data_ptr<int>();
auto pivots_stride = pivots.size(-1);
auto lu_stride = matrixStride(lu);
auto b_stride = matrixStride(b);
int leading_dimension = cuda_int_cast(std::max<int>(1, n), "leading_dimension");
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
for (auto batch = decltype(batch_size){0}; batch < batch_size; ++batch) {
at::cuda::solver::getrs<scalar_t>(
handle,
n,
nrhs,
lu_data + batch * lu_stride,
leading_dimension,
pivots_data + batch * pivots_stride,
b_data + batch * b_stride,
leading_dimension,
info_data);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info.item().toInt() == 0);
}
});
}
#endif // USE_CUSOLVER
}} // namespace at::native
|
abebd1355e433562e60fdfe8eedb9940a632d8ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "func.h"
#include <omp.h>
// setting GPU device
const int device = 0;
#define BLOCK_SIZE 16
__global__ void jac_mp_v3(int N, double delta, int max_iter, double *f, double *d_u, double *d_u_old) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
// Update u
d_u[i*N + j] = 0.25 * (d_u_old[(i-1)*N + j] + d_u_old[(i+1)*N + j] + d_u_old[i*N + (j-1)] + d_u_old[i*N + (j+1)] + delta*delta*f[i*N + j]);
}
}
}
int main(int argc, char *argv[]) {
//setting up device
hipSetDevice(device);
// timing
//double ts, te, mflops, memory,flop;
int max_iter, loops, N;
// command line argument sets the dimensions of the image
if (argc == 4 ) {
N = atoi(argv[1]) + 2;
max_iter = atoi(argv[2]);
}
else {
// use default N
N = 32 + 2;
max_iter = 100;
}
// arrays
double *d_f, *d_u, *d_u_old;
double *h_f, *h_u, *h_u_old;
int size_f = sizeof(double)*N*N;
int size_u = sizeof(double)*N*N;
int size_u_old = sizeof(double)*N*N;
// GPU
// Allocate memory on host and device
hipMalloc((void**)&d_f, size_f);
hipMalloc((void**)&d_u, size_u);
hipMalloc((void**)&d_u_old, size_u_old);
//h_f = (double*)malloc(size_f);
hipHostMalloc((void**)&h_f, size_f);
hipHostMalloc((void**)&h_u, size_u);
hipHostMalloc((void**)&h_u_old, size_u_old);
if (d_f == NULL || d_u == NULL || d_u_old ==NULL) {
fprintf(stderr, "memory allocation failed!\n");
return(1);
}
if (h_f == NULL || h_u == NULL || h_u_old ==NULL) {
fprintf(stderr, "memory allocation failed!\n");
return(1);
}
double time, time_end, time_IO_1, time_IO_2, time_compute, time_compute_end,tot_time_compute;
time = omp_get_wtime();
double delta = 2.0/N;
int i,j;
for (i = 0; i < N; i++){
for (j = 0; j < N; j++){
if (i >= N * 0.5 && i <= N * 2.0/3.0 && j >= N * 1.0/6.0 && j <= N * 1.0/3.0)
h_f[i*N + j] = 200.0;
else
h_f[i*N + j] = 0.0;
if (i == (N - 1) || i == 0 || j == (N - 1)){
h_u[i*N + j] = 20.0;
h_u_old[i*N + j] = 20.0;
}
else{
h_u[i*N + j] = 0.0;
h_u_old[i*N + j] = 0.0;
}
}
}
hipMemcpy(d_f, h_f, size_f, hipMemcpyHostToDevice);
hipMemcpy(d_u, h_u, size_u, hipMemcpyHostToDevice);
hipMemcpy(d_u_old, h_u_old, size_u_old, hipMemcpyHostToDevice);
time_IO_1 = omp_get_wtime()- time;
dim3 dimGrid(512,8,1); // 4096 blocks in total
dim3 dimBlock(16,16,1);// 256 threads per block
// do program
//ts = omp_get_wtime();
int k;
k=0;
//double *temp;
time_compute = omp_get_wtime();
while (k < max_iter) {
//Set u_old = u
//temp = h_u;
//h_u = h_u_old;
//h_u_old = temp;
hipMemcpy(d_u, h_u, size_u, hipMemcpyHostToDevice);
hipMemcpy(d_u_old, h_u_old, size_u_old, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( jac_mp_v3), dim3(dimGrid), dim3(dimBlock), 0, 0, N, delta, max_iter,d_f,d_u,d_u_old);
hipMemcpy(h_u_old, d_u, size_u_old, hipMemcpyDeviceToHost);
hipMemcpy(h_u, d_u_old, size_u, hipMemcpyDeviceToHost);
k++;
}
hipDeviceSynchronize();
time_compute_end = omp_get_wtime();
// end program
// Copy result back to host
hipMemcpy(h_u_old, d_u_old, size_u_old, hipMemcpyDeviceToHost);
hipMemcpy(h_u, d_u, size_u, hipMemcpyDeviceToHost);
//timing
time_end = omp_get_wtime();
time_IO_2 = time_end - time_compute_end;
tot_time_compute = time_compute_end - time_compute;
//print to see wheter it is right
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
printf("%g\t", h_u[i*N+j]);
}
printf("\n");
}
//flops
//flop=max_iter * (double)(N-2) * (double)(N-2) * 10.0;
// calculate mflops in O
//mflops = flop * 1.0e-06 * loops / te;
//memory = 3.0 * (double)(N-2) * (double)(N-2) * sizeof(double);
//printf("%d\t", n_cores);
//printf("%g\t", memory);
//printf("%g\t", mflops);
//printf("%g\n", te / loops);
// stats
double GB = 1.0e-09;
double gflops = (N * N * 2 / tot_time_compute) * GB;
double memory = size_f + size_u + size_u_old;
double memoryGBs = memory * GB * (1 / tot_time_compute);
printf("%g\t", memory); // footprint
printf("%g\t", gflops); // Gflops
printf("%g\t", gflops / 70.65); // pct. Gflops
printf("%g\t", memoryGBs); // bandwidth GB/s
printf("%g\t", memoryGBs / 8.98); // pct. bandwidth GB/s
printf("%g\t", time_end - time); // total time
printf("%g\t", time_IO_1 + time_IO_2); // I/O time
printf("%g\n", tot_time_compute); // compute time
// Cleanup
hipHostFree(h_f);
hipHostFree(h_u);
hipHostFree(h_u_old);
hipFree(d_f);
hipFree(d_u);
hipFree(d_u_old);
// end program
return(0);
}
| abebd1355e433562e60fdfe8eedb9940a632d8ed.cu | #include <stdio.h>
#include <stdlib.h>
#include "func.h"
#include <omp.h>
// setting GPU device
const int device = 0;
#define BLOCK_SIZE 16
__global__ void jac_mp_v3(int N, double delta, int max_iter, double *f, double *d_u, double *d_u_old) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
// Update u
d_u[i*N + j] = 0.25 * (d_u_old[(i-1)*N + j] + d_u_old[(i+1)*N + j] + d_u_old[i*N + (j-1)] + d_u_old[i*N + (j+1)] + delta*delta*f[i*N + j]);
}
}
}
int main(int argc, char *argv[]) {
//setting up device
cudaSetDevice(device);
// timing
//double ts, te, mflops, memory,flop;
int max_iter, loops, N;
// command line argument sets the dimensions of the image
if (argc == 4 ) {
N = atoi(argv[1]) + 2;
max_iter = atoi(argv[2]);
}
else {
// use default N
N = 32 + 2;
max_iter = 100;
}
// arrays
double *d_f, *d_u, *d_u_old;
double *h_f, *h_u, *h_u_old;
int size_f = sizeof(double)*N*N;
int size_u = sizeof(double)*N*N;
int size_u_old = sizeof(double)*N*N;
// GPU
// Allocate memory on host and device
cudaMalloc((void**)&d_f, size_f);
cudaMalloc((void**)&d_u, size_u);
cudaMalloc((void**)&d_u_old, size_u_old);
//h_f = (double*)malloc(size_f);
cudaMallocHost((void**)&h_f, size_f);
cudaMallocHost((void**)&h_u, size_u);
cudaMallocHost((void**)&h_u_old, size_u_old);
if (d_f == NULL || d_u == NULL || d_u_old ==NULL) {
fprintf(stderr, "memory allocation failed!\n");
return(1);
}
if (h_f == NULL || h_u == NULL || h_u_old ==NULL) {
fprintf(stderr, "memory allocation failed!\n");
return(1);
}
double time, time_end, time_IO_1, time_IO_2, time_compute, time_compute_end,tot_time_compute;
time = omp_get_wtime();
double delta = 2.0/N;
int i,j;
for (i = 0; i < N; i++){
for (j = 0; j < N; j++){
if (i >= N * 0.5 && i <= N * 2.0/3.0 && j >= N * 1.0/6.0 && j <= N * 1.0/3.0)
h_f[i*N + j] = 200.0;
else
h_f[i*N + j] = 0.0;
if (i == (N - 1) || i == 0 || j == (N - 1)){
h_u[i*N + j] = 20.0;
h_u_old[i*N + j] = 20.0;
}
else{
h_u[i*N + j] = 0.0;
h_u_old[i*N + j] = 0.0;
}
}
}
cudaMemcpy(d_f, h_f, size_f, cudaMemcpyHostToDevice);
cudaMemcpy(d_u, h_u, size_u, cudaMemcpyHostToDevice);
cudaMemcpy(d_u_old, h_u_old, size_u_old, cudaMemcpyHostToDevice);
time_IO_1 = omp_get_wtime()- time;
dim3 dimGrid(512,8,1); // 4096 blocks in total
dim3 dimBlock(16,16,1);// 256 threads per block
// do program
//ts = omp_get_wtime();
int k;
k=0;
//double *temp;
time_compute = omp_get_wtime();
while (k < max_iter) {
//Set u_old = u
//temp = h_u;
//h_u = h_u_old;
//h_u_old = temp;
cudaMemcpy(d_u, h_u, size_u, cudaMemcpyHostToDevice);
cudaMemcpy(d_u_old, h_u_old, size_u_old, cudaMemcpyHostToDevice);
jac_mp_v3<<<dimGrid, dimBlock>>>(N, delta, max_iter,d_f,d_u,d_u_old);
cudaMemcpy(h_u_old, d_u, size_u_old, cudaMemcpyDeviceToHost);
cudaMemcpy(h_u, d_u_old, size_u, cudaMemcpyDeviceToHost);
k++;
}
cudaDeviceSynchronize();
time_compute_end = omp_get_wtime();
// end program
// Copy result back to host
cudaMemcpy(h_u_old, d_u_old, size_u_old, cudaMemcpyDeviceToHost);
cudaMemcpy(h_u, d_u, size_u, cudaMemcpyDeviceToHost);
//timing
time_end = omp_get_wtime();
time_IO_2 = time_end - time_compute_end;
tot_time_compute = time_compute_end - time_compute;
//print to see wheter it is right
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
printf("%g\t", h_u[i*N+j]);
}
printf("\n");
}
//flops
//flop=max_iter * (double)(N-2) * (double)(N-2) * 10.0;
// calculate mflops in O
//mflops = flop * 1.0e-06 * loops / te;
//memory = 3.0 * (double)(N-2) * (double)(N-2) * sizeof(double);
//printf("%d\t", n_cores);
//printf("%g\t", memory);
//printf("%g\t", mflops);
//printf("%g\n", te / loops);
// stats
double GB = 1.0e-09;
double gflops = (N * N * 2 / tot_time_compute) * GB;
double memory = size_f + size_u + size_u_old;
double memoryGBs = memory * GB * (1 / tot_time_compute);
printf("%g\t", memory); // footprint
printf("%g\t", gflops); // Gflops
printf("%g\t", gflops / 70.65); // pct. Gflops
printf("%g\t", memoryGBs); // bandwidth GB/s
printf("%g\t", memoryGBs / 8.98); // pct. bandwidth GB/s
printf("%g\t", time_end - time); // total time
printf("%g\t", time_IO_1 + time_IO_2); // I/O time
printf("%g\n", tot_time_compute); // compute time
// Cleanup
cudaFreeHost(h_f);
cudaFreeHost(h_u);
cudaFreeHost(h_u_old);
cudaFree(d_f);
cudaFree(d_u);
cudaFree(d_u_old);
// end program
return(0);
}
|
d55a100ded2ba60131d0f442a5d2bde5e54039b8.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <hip/hip_runtime.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/partition.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#include "mesh.h"
#define ERRORCHECK 1
#define CACHE_FIRST 0
#define MATERIAL_SORT 0
#define ANTIALIASING 10
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene* hst_scene = nullptr;
static glm::vec3* dev_image = nullptr;
static Geom* dev_geoms = nullptr;
static Material* dev_materials = nullptr;
static PathSegment* dev_paths = nullptr;
static ShadeableIntersection* dev_intersections = nullptr;
// Cache for first bounce intersections for re-use
static PathSegment* dev_first_paths = nullptr;
static ShadeableIntersection* dev_first_intersections = nullptr;
// Sobol sequence start point for each pixel
static int* dev_sobol_seed = nullptr;
// Triangles of meshes
static glm::vec3* dev_triangles = nullptr;
// Octree for culling
static OctreeNodeDevice* dev_octree = nullptr;
// TODO: static variables for device memory, any extra info you need, etc
// Generate the start point of sobol sequence for each pixel
__global__ void generateSeed(Camera cam, int* sobolSeed) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
thrust::default_random_engine rng = makeSeededRandomEngine(0, x, y);
thrust::uniform_int_distribution<int> u(0, 1023);
sobolSeed[index] = u(rng);
}
}
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera& cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// Prepare the octree for device if we are using culling
if (scene->usingCulling) {
std::vector<OctreeNodeDevice> temp = scene->prepareOctree();
hipMalloc(&dev_octree, temp.size() * sizeof(OctreeNodeDevice));
hipMemcpy(dev_octree, temp.data(), temp.size() * sizeof(OctreeNodeDevice), hipMemcpyHostToDevice);
}
hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice);
// Copy mesh triangles to device
hipMalloc(&dev_triangles, scene->triangles.size() * sizeof(glm::vec3));
hipMemcpy(dev_triangles, scene->triangles.data(), scene->triangles.size() * sizeof(glm::vec3), hipMemcpyHostToDevice);
hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
hipMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
hipMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
hipMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice);
hipMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// Cache for first bounce intersections for re-use
hipMalloc(&dev_first_paths, pixelcount * sizeof(PathSegment));
hipMalloc(&dev_first_intersections, pixelcount * sizeof(ShadeableIntersection));
hipMemset(dev_first_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// Sobol sequence start point for each pixel
hipMalloc(&dev_sobol_seed, pixelcount * sizeof(int));
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
generateSeed << <blocksPerGrid2d, blockSize2d >> > (cam, dev_sobol_seed);
// TODO: initialize any extra device memeory you need
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
hipFree(dev_image); // no-op if dev_image is null
hipFree(dev_paths);
hipFree(dev_geoms);
hipFree(dev_materials);
hipFree(dev_intersections);
// Cache for first bounce intersections for re-use
hipFree(dev_first_paths);
hipFree(dev_first_intersections);
// Sobol sequence start point for each pixel
hipFree(dev_sobol_seed);
// Mesh triangles
hipFree(dev_triangles);
// Free octree
if (hst_scene != nullptr && hst_scene->usingCulling) {
hipFree(dev_octree);
}
// TODO: clean up any extra device memory you created
checkCUDAError("pathtraceFree");
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
// Antialiasing by jittering the ray
float rightAAOffset = 0.f;
float upAAOffset = 0.f;
if (ANTIALIASING != 0) {
thrust::default_random_engine rng = makeSeededRandomEngine(iter, x, y);
thrust::uniform_real_distribution<float> u01(0, 1);
float antiLength = 1.f / ANTIALIASING;
int anti = iter % (ANTIALIASING * ANTIALIASING);
rightAAOffset = (anti % ANTIALIASING + u01(rng)) * antiLength;
upAAOffset = (anti / ANTIALIASING + u01(rng)) * antiLength;
}
float rightOffset = (float)x - (float)cam.resolution.x * 0.5f + rightAAOffset;
float upOffset = (float)y - (float)cam.resolution.y * 0.5f + upAAOffset;
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * rightOffset
- cam.up * cam.pixelLength.y * upOffset
);
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
// Compute intersection using Octree
__global__ void computeIntersectionsOctree(
int depth
, int num_paths
, PathSegment* pathSegments
, Geom* geoms
, ShadeableIntersection* intersections
, glm::vec3* triangles
, OctreeNodeDevice* octree
, int meshMaterial
) {
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths) {
PathSegment pathSegment = pathSegments[path_index];
// Intersection variables
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// DFS variables
OctreeNodeDevice node = octree[0]; // Root
OctreeNodeDevice stack[8];
int branches[8];
int level = 0;
int status = 0;
bool first = true;
while (level >= 0) {
if (status == 0) {
if (!first) {
int j;
for (j = 0; j < 8; j++) {
if (node.children[j] != -1) {
node = octree[node.children[j]];
branches[level] = j;
level++;
break;
}
}
if (j == 8) { // No children
status = 1;
level--;
continue;
}
}
else {
first = false;
}
}
else {
node = stack[level];
int j;
for (j = branches[level] + 1; j < 8; j++) {
if (node.children[j] != -1) {
node = octree[node.children[j]];
branches[level] = j;
level++;
break;
}
}
if (j == 8) {
level--;
continue;
}
}
// If there is intersection with current bounding box
if (boundingBoxTest(node.minCorner, node.maxCorner, pathSegment.ray)) {
float t;
for (int ct = 0, i = node.geomStart; ct < node.geomCount; ct++, i++) {
Geom& geom = geoms[i];
if (geom.type == CUBE) {
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE) {
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
if (t > 0.0f && t_min > t) {
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
for (int ct = 0, i = node.triangleStart; ct < node.triangleCount; ct++, i += 3) {
t = triangleIntersectionTest(triangles[i], triangles[i + 1], triangles[i + 2],
pathSegment.ray, tmp_intersect, tmp_normal, outside);
if (t > 0.0f && t_min > t) {
t_min = t;
hit_geom_index = -2; // -2 marks the mesh
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (level == 8) {
status = 1;
level--;
}
else {
status = 0;
stack[level] = node;
}
}
// If not, backtrack to parent node, and search for next child
else {
status = 1;
level--;
}
}
if (hit_geom_index == -1) {
intersections[path_index].t = -1.0f;
}
else {
//The ray hits something
intersections[path_index].t = t_min;
if (hit_geom_index == -2) {
intersections[path_index].materialId = meshMaterial;
}
else {
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
}
intersections[path_index].surfaceNormal = normal;
}
}
}
// computeIntersections handles generating ray intersections ONLY.
// Generating new rays is handled in your shader(s).
// Feel free to modify the code below.
__global__ void computeIntersections(
int depth
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, ShadeableIntersection * intersections
, glm::vec3 * triangles
) {
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths) {
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++) {
Geom & geom = geoms[i];
if (geom.type == CUBE) {
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE) {
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == MESH) {
t = meshIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside, triangles);
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t) {
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1) {
intersections[path_index].t = -1.0f;
}
else {
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
}
}
// LOOK: "fake" shader demonstrating what you might do with the info in
// a ShadeableIntersection, as well as how to use thrust's random number
// generator. Observe that since the thrust random number generator basically
// adds "noise" to the iteration, the image should start off noisy and get
// cleaner as more iterations are computed.
//
// Note that this shader does NOT do a BSDF evaluation!
// Your shaders should handle that - this can allow techniques such as
// bump mapping.
__global__ void shadeFakeMaterial (
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths) {
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) { // if the intersection exists...
// Set up the RNG
// LOOK: this is how you use thrust's RNG! Please look at
// makeSeededRandomEngine as well.
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
}
// Otherwise, do some pseudo-lighting computation. This is actually more
// like what you would expect from shading in a rasterizer like OpenGL.
// TODO: replace this! you should be able to start with basically a one-liner
else {
float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f));
pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f;
pathSegments[idx].color *= u01(rng); // apply some noise because why not
pathSegments[idx].remainingBounces = 0;
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
} else {
pathSegments[idx].color = glm::vec3(0.0f);
}
}
}
__global__ void shadeMaterial(
int iter
, int* sobolSeed
, int num_paths
, ShadeableIntersection* shadeableIntersections
, PathSegment* pathSegments
, Material* materials
, glm::vec3* image
) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths) {
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) { // if the intersection exists...
// Set up the RNG
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
image[pathSegments[idx].pixelIndex] += pathSegments[idx].color;
}
else {
scatterRay(
pathSegments[idx],
getPointOnRay(pathSegments[idx].ray, intersection.t),
intersection.surfaceNormal,
material,
rng,
sobolSeed[idx]++
);
if (pathSegments[idx].remainingBounces <= 0) {
pathSegments[idx].color = glm::vec3(0.f);
}
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
}
else {
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = 0;
}
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
// Perform one iteration of path tracing
#if CACHE_FIRST == 1 && ANTIALIASING == 0
if (iter == 1) {
generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> >
(cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
hipMemcpy(dev_first_paths, dev_paths,
pixelcount * sizeof(PathSegment), hipMemcpyDeviceToDevice);
}
else {
hipMemcpy(dev_paths, dev_first_paths,
pixelcount * sizeof(PathSegment), hipMemcpyDeviceToDevice);
hipMemcpy(dev_intersections, dev_first_intersections,
pixelcount * sizeof(ShadeableIntersection), hipMemcpyDeviceToDevice);
}
#else
generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> >
(cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
#endif // CACHE_FIRST
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
bool iterationComplete = false;
while (!iterationComplete) {
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
if (CACHE_FIRST == 0 || ANTIALIASING != 0 || (iter == 1 || depth > 0)) {
// clean shading chunks
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// tracing
if (hst_scene->usingCulling) {
computeIntersectionsOctree << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, dev_geoms
, dev_intersections
, dev_triangles
, dev_octree
, hst_scene->meshMaterial
);
}
else {
computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
, dev_triangles
);
}
checkCUDAError("trace one bounce");
if (CACHE_FIRST == 1 && ANTIALIASING == 0 && iter == 1 && depth == 0) {
hipMemcpy(dev_first_intersections, dev_intersections,
pixelcount * sizeof(ShadeableIntersection), hipMemcpyDeviceToDevice);
}
}
hipDeviceSynchronize();
depth++;
// --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
#if MATERIAL_SORT == 1
thrust::sort_by_key(thrust::device,
dev_intersections, dev_intersections + num_paths, dev_paths,
path_sort());
#endif // MATERIAL_SORT
hipLaunchKernelGGL(( shadeMaterial), dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0,
iter,
dev_sobol_seed,
num_paths,
dev_intersections,
dev_paths,
dev_materials,
dev_image
);
//dev_path_end = thrust::partition(thrust::device, dev_paths,
// dev_paths + num_paths, path_continue());
dev_path_end = thrust::remove_if(thrust::device,
dev_paths, dev_paths + num_paths, path_terminated());
num_paths = dev_path_end - dev_paths;
if (num_paths <= 0) { // Stop based on stream compaction
iterationComplete = true;
}
}
// WATCH OUT
// If we use thrust::remove_if, it would be faster than thrust::partition.
// But the elements of which pred is true will be undefined. So we cannot gather
// the contribution here. Instead, we add it directly in shadeMaterial when we
// encounter a light source.
// Assemble this iteration and apply it to the image
//dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
//finalGather<<<numBlocksPixels, blockSize1d>>>(pixelcount, dev_image, dev_paths);
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
hipLaunchKernelGGL(( sendImageToPBO), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
hipMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
| d55a100ded2ba60131d0f442a5d2bde5e54039b8.cu | #include <cstdio>
#include <cuda.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/partition.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#include "mesh.h"
#define ERRORCHECK 1
#define CACHE_FIRST 0
#define MATERIAL_SORT 0
#define ANTIALIASING 10
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene* hst_scene = nullptr;
static glm::vec3* dev_image = nullptr;
static Geom* dev_geoms = nullptr;
static Material* dev_materials = nullptr;
static PathSegment* dev_paths = nullptr;
static ShadeableIntersection* dev_intersections = nullptr;
// Cache for first bounce intersections for re-use
static PathSegment* dev_first_paths = nullptr;
static ShadeableIntersection* dev_first_intersections = nullptr;
// Sobol sequence start point for each pixel
static int* dev_sobol_seed = nullptr;
// Triangles of meshes
static glm::vec3* dev_triangles = nullptr;
// Octree for culling
static OctreeNodeDevice* dev_octree = nullptr;
// TODO: static variables for device memory, any extra info you need, etc
// Generate the start point of sobol sequence for each pixel
__global__ void generateSeed(Camera cam, int* sobolSeed) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
thrust::default_random_engine rng = makeSeededRandomEngine(0, x, y);
thrust::uniform_int_distribution<int> u(0, 1023);
sobolSeed[index] = u(rng);
}
}
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera& cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// Prepare the octree for device if we are using culling
if (scene->usingCulling) {
std::vector<OctreeNodeDevice> temp = scene->prepareOctree();
cudaMalloc(&dev_octree, temp.size() * sizeof(OctreeNodeDevice));
cudaMemcpy(dev_octree, temp.data(), temp.size() * sizeof(OctreeNodeDevice), cudaMemcpyHostToDevice);
}
cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice);
// Copy mesh triangles to device
cudaMalloc(&dev_triangles, scene->triangles.size() * sizeof(glm::vec3));
cudaMemcpy(dev_triangles, scene->triangles.data(), scene->triangles.size() * sizeof(glm::vec3), cudaMemcpyHostToDevice);
cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
cudaMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice);
cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// Cache for first bounce intersections for re-use
cudaMalloc(&dev_first_paths, pixelcount * sizeof(PathSegment));
cudaMalloc(&dev_first_intersections, pixelcount * sizeof(ShadeableIntersection));
cudaMemset(dev_first_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// Sobol sequence start point for each pixel
cudaMalloc(&dev_sobol_seed, pixelcount * sizeof(int));
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
generateSeed << <blocksPerGrid2d, blockSize2d >> > (cam, dev_sobol_seed);
// TODO: initialize any extra device memeory you need
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
cudaFree(dev_image); // no-op if dev_image is null
cudaFree(dev_paths);
cudaFree(dev_geoms);
cudaFree(dev_materials);
cudaFree(dev_intersections);
// Cache for first bounce intersections for re-use
cudaFree(dev_first_paths);
cudaFree(dev_first_intersections);
// Sobol sequence start point for each pixel
cudaFree(dev_sobol_seed);
// Mesh triangles
cudaFree(dev_triangles);
// Free octree
if (hst_scene != nullptr && hst_scene->usingCulling) {
cudaFree(dev_octree);
}
// TODO: clean up any extra device memory you created
checkCUDAError("pathtraceFree");
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
// Antialiasing by jittering the ray
float rightAAOffset = 0.f;
float upAAOffset = 0.f;
if (ANTIALIASING != 0) {
thrust::default_random_engine rng = makeSeededRandomEngine(iter, x, y);
thrust::uniform_real_distribution<float> u01(0, 1);
float antiLength = 1.f / ANTIALIASING;
int anti = iter % (ANTIALIASING * ANTIALIASING);
rightAAOffset = (anti % ANTIALIASING + u01(rng)) * antiLength;
upAAOffset = (anti / ANTIALIASING + u01(rng)) * antiLength;
}
float rightOffset = (float)x - (float)cam.resolution.x * 0.5f + rightAAOffset;
float upOffset = (float)y - (float)cam.resolution.y * 0.5f + upAAOffset;
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * rightOffset
- cam.up * cam.pixelLength.y * upOffset
);
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
// Compute intersection using Octree
__global__ void computeIntersectionsOctree(
int depth
, int num_paths
, PathSegment* pathSegments
, Geom* geoms
, ShadeableIntersection* intersections
, glm::vec3* triangles
, OctreeNodeDevice* octree
, int meshMaterial
) {
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths) {
PathSegment pathSegment = pathSegments[path_index];
// Intersection variables
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// DFS variables
OctreeNodeDevice node = octree[0]; // Root
OctreeNodeDevice stack[8];
int branches[8];
int level = 0;
int status = 0;
bool first = true;
while (level >= 0) {
if (status == 0) {
if (!first) {
int j;
for (j = 0; j < 8; j++) {
if (node.children[j] != -1) {
node = octree[node.children[j]];
branches[level] = j;
level++;
break;
}
}
if (j == 8) { // No children
status = 1;
level--;
continue;
}
}
else {
first = false;
}
}
else {
node = stack[level];
int j;
for (j = branches[level] + 1; j < 8; j++) {
if (node.children[j] != -1) {
node = octree[node.children[j]];
branches[level] = j;
level++;
break;
}
}
if (j == 8) {
level--;
continue;
}
}
// If there is intersection with current bounding box
if (boundingBoxTest(node.minCorner, node.maxCorner, pathSegment.ray)) {
float t;
for (int ct = 0, i = node.geomStart; ct < node.geomCount; ct++, i++) {
Geom& geom = geoms[i];
if (geom.type == CUBE) {
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE) {
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
if (t > 0.0f && t_min > t) {
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
for (int ct = 0, i = node.triangleStart; ct < node.triangleCount; ct++, i += 3) {
t = triangleIntersectionTest(triangles[i], triangles[i + 1], triangles[i + 2],
pathSegment.ray, tmp_intersect, tmp_normal, outside);
if (t > 0.0f && t_min > t) {
t_min = t;
hit_geom_index = -2; // -2 marks the mesh
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (level == 8) {
status = 1;
level--;
}
else {
status = 0;
stack[level] = node;
}
}
// If not, backtrack to parent node, and search for next child
else {
status = 1;
level--;
}
}
if (hit_geom_index == -1) {
intersections[path_index].t = -1.0f;
}
else {
//The ray hits something
intersections[path_index].t = t_min;
if (hit_geom_index == -2) {
intersections[path_index].materialId = meshMaterial;
}
else {
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
}
intersections[path_index].surfaceNormal = normal;
}
}
}
// computeIntersections handles generating ray intersections ONLY.
// Generating new rays is handled in your shader(s).
// Feel free to modify the code below.
__global__ void computeIntersections(
int depth
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, ShadeableIntersection * intersections
, glm::vec3 * triangles
) {
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths) {
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++) {
Geom & geom = geoms[i];
if (geom.type == CUBE) {
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE) {
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == MESH) {
t = meshIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside, triangles);
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t) {
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1) {
intersections[path_index].t = -1.0f;
}
else {
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
}
}
// LOOK: "fake" shader demonstrating what you might do with the info in
// a ShadeableIntersection, as well as how to use thrust's random number
// generator. Observe that since the thrust random number generator basically
// adds "noise" to the iteration, the image should start off noisy and get
// cleaner as more iterations are computed.
//
// Note that this shader does NOT do a BSDF evaluation!
// Your shaders should handle that - this can allow techniques such as
// bump mapping.
__global__ void shadeFakeMaterial (
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths) {
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) { // if the intersection exists...
// Set up the RNG
// LOOK: this is how you use thrust's RNG! Please look at
// makeSeededRandomEngine as well.
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
}
// Otherwise, do some pseudo-lighting computation. This is actually more
// like what you would expect from shading in a rasterizer like OpenGL.
// TODO: replace this! you should be able to start with basically a one-liner
else {
float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f));
pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f;
pathSegments[idx].color *= u01(rng); // apply some noise because why not
pathSegments[idx].remainingBounces = 0;
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
} else {
pathSegments[idx].color = glm::vec3(0.0f);
}
}
}
__global__ void shadeMaterial(
int iter
, int* sobolSeed
, int num_paths
, ShadeableIntersection* shadeableIntersections
, PathSegment* pathSegments
, Material* materials
, glm::vec3* image
) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths) {
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) { // if the intersection exists...
// Set up the RNG
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
image[pathSegments[idx].pixelIndex] += pathSegments[idx].color;
}
else {
scatterRay(
pathSegments[idx],
getPointOnRay(pathSegments[idx].ray, intersection.t),
intersection.surfaceNormal,
material,
rng,
sobolSeed[idx]++
);
if (pathSegments[idx].remainingBounces <= 0) {
pathSegments[idx].color = glm::vec3(0.f);
}
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
}
else {
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = 0;
}
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
// Perform one iteration of path tracing
#if CACHE_FIRST == 1 && ANTIALIASING == 0
if (iter == 1) {
generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> >
(cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
cudaMemcpy(dev_first_paths, dev_paths,
pixelcount * sizeof(PathSegment), cudaMemcpyDeviceToDevice);
}
else {
cudaMemcpy(dev_paths, dev_first_paths,
pixelcount * sizeof(PathSegment), cudaMemcpyDeviceToDevice);
cudaMemcpy(dev_intersections, dev_first_intersections,
pixelcount * sizeof(ShadeableIntersection), cudaMemcpyDeviceToDevice);
}
#else
generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> >
(cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
#endif // CACHE_FIRST
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
bool iterationComplete = false;
while (!iterationComplete) {
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
if (CACHE_FIRST == 0 || ANTIALIASING != 0 || (iter == 1 || depth > 0)) {
// clean shading chunks
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// tracing
if (hst_scene->usingCulling) {
computeIntersectionsOctree << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, dev_geoms
, dev_intersections
, dev_triangles
, dev_octree
, hst_scene->meshMaterial
);
}
else {
computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
, dev_triangles
);
}
checkCUDAError("trace one bounce");
if (CACHE_FIRST == 1 && ANTIALIASING == 0 && iter == 1 && depth == 0) {
cudaMemcpy(dev_first_intersections, dev_intersections,
pixelcount * sizeof(ShadeableIntersection), cudaMemcpyDeviceToDevice);
}
}
cudaDeviceSynchronize();
depth++;
// --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
#if MATERIAL_SORT == 1
thrust::sort_by_key(thrust::device,
dev_intersections, dev_intersections + num_paths, dev_paths,
path_sort());
#endif // MATERIAL_SORT
shadeMaterial<<<numblocksPathSegmentTracing, blockSize1d>>> (
iter,
dev_sobol_seed,
num_paths,
dev_intersections,
dev_paths,
dev_materials,
dev_image
);
//dev_path_end = thrust::partition(thrust::device, dev_paths,
// dev_paths + num_paths, path_continue());
dev_path_end = thrust::remove_if(thrust::device,
dev_paths, dev_paths + num_paths, path_terminated());
num_paths = dev_path_end - dev_paths;
if (num_paths <= 0) { // Stop based on stream compaction
iterationComplete = true;
}
}
// WATCH OUT
// If we use thrust::remove_if, it would be faster than thrust::partition.
// But the elements of which pred is true will be undefined. So we cannot gather
// the contribution here. Instead, we add it directly in shadeMaterial when we
// encounter a light source.
// Assemble this iteration and apply it to the image
//dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
//finalGather<<<numBlocksPixels, blockSize1d>>>(pixelcount, dev_image, dev_paths);
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
sendImageToPBO<<<blocksPerGrid2d, blockSize2d>>>(pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
cudaMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
|
e7289ac0a7152168a015656d47c1fb0995bba2d5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <quda_internal.h>
#include <color_spinor_field.h>
#include <blas_quda.h>
#include <test_util.h>
// include because of nasty globals used in the tests
#include <dslash_util.h>
// google test
#include <gtest.h>
extern int test_type;
extern QudaPrecision prec;
extern QudaDslashType dslash_type;
extern QudaInverterType inv_type;
extern int nvec;
extern int device;
extern int xdim;
extern int ydim;
extern int zdim;
extern int tdim;
extern int gridsize_from_cmdline[];
extern int niter;
extern bool verify_results;
extern int Nsrc;
extern int Msrc;
extern QudaSolveType solve_type;
extern void usage(char** );
const int Nkernels = 42;
using namespace quda;
ColorSpinorField *xH, *yH, *zH, *wH, *vH, *hH, *lH;
ColorSpinorField *xD, *yD, *zD, *wD, *vD, *hD, *lD, *xmD, *ymD, *zmD;
std::vector<cpuColorSpinorField*> xmH;
std::vector<cpuColorSpinorField*> ymH;
std::vector<cpuColorSpinorField*> zmH;
int Nspin;
int Ncolor;
void setPrec(ColorSpinorParam ¶m, const QudaPrecision precision)
{
param.precision = precision;
if (Nspin == 1 || Nspin == 2 || precision == QUDA_DOUBLE_PRECISION) {
param.fieldOrder = QUDA_FLOAT2_FIELD_ORDER;
} else {
param.fieldOrder = QUDA_FLOAT4_FIELD_ORDER;
}
}
void
display_test_info()
{
printfQuda("running the following test:\n");
printfQuda("S_dimension T_dimension Nspin Ncolor\n");
printfQuda("%3d /%3d / %3d %3d %d %d\n", xdim, ydim, zdim, tdim, Nspin, Ncolor);
printfQuda("Grid partition info: X Y Z T\n");
printfQuda(" %d %d %d %d\n",
dimPartitioned(0),
dimPartitioned(1),
dimPartitioned(2),
dimPartitioned(3));
return;
}
int Nprec = 3;
bool skip_kernel(int precision, int kernel) {
// if we've selected a given kernel then make sure we only run that
if (test_type != -1 && kernel != test_type) return true;
// if we've selected a given precision then make sure we only run that
QudaPrecision this_prec = precision == 2 ? QUDA_DOUBLE_PRECISION : precision == 1 ? QUDA_SINGLE_PRECISION : QUDA_HALF_PRECISION;
if (prec != QUDA_INVALID_PRECISION && this_prec != prec) return true;
if ( Nspin == 2 && precision == 0) {
// avoid half precision tests if doing coarse fields
return true;
} else if (Nspin == 2 && kernel == 1) {
// avoid low-precision copy if doing coarse fields
return true;
} else if (Ncolor != 3 && (kernel == 31 || kernel == 32)) {
// only benchmark heavy-quark norm if doing 3 colors
return true;
} else if ((Nprec < 3) && (kernel == 0)) {
// only benchmark high-precision copy() if double is supported
return true;
}
return false;
}
void initFields(int prec)
{
// precisions used for the source field in the copyCuda() benchmark
QudaPrecision high_aux_prec = QUDA_INVALID_PRECISION;
QudaPrecision low_aux_prec = QUDA_INVALID_PRECISION;
ColorSpinorParam param;
param.nColor = Ncolor;
param.nSpin = Nspin;
param.nDim = 4; // number of spacetime dimensions
param.pad = 0; // padding must be zero for cpu fields
if (solve_type == QUDA_DIRECT_PC_SOLVE) {
param.siteSubset = QUDA_PARITY_SITE_SUBSET;
} else if (solve_type == QUDA_DIRECT_SOLVE) {
param.siteSubset = QUDA_FULL_SITE_SUBSET;
} else {
errorQuda("Unexpected solve_type=%d\n", solve_type);
}
if (param.siteSubset == QUDA_PARITY_SITE_SUBSET) param.x[0] = xdim/2;
else param.x[0] = xdim;
param.x[1] = ydim;
param.x[2] = zdim;
param.x[3] = tdim;
param.siteOrder = QUDA_EVEN_ODD_SITE_ORDER;
param.gammaBasis = QUDA_DEGRAND_ROSSI_GAMMA_BASIS;
param.precision = QUDA_DOUBLE_PRECISION;
param.fieldOrder = QUDA_SPACE_SPIN_COLOR_FIELD_ORDER;
param.create = QUDA_ZERO_FIELD_CREATE;
vH = new cpuColorSpinorField(param);
wH = new cpuColorSpinorField(param);
xH = new cpuColorSpinorField(param);
yH = new cpuColorSpinorField(param);
zH = new cpuColorSpinorField(param);
hH = new cpuColorSpinorField(param);
lH = new cpuColorSpinorField(param);
// create composite fields
// xmH = new cpuColorSpinorField(param);
// ymH = new cpuColorSpinorField(param);
xmH.reserve(Nsrc);
for (int cid = 0; cid < Nsrc; cid++) xmH.push_back(new cpuColorSpinorField(param));
ymH.reserve(Msrc);
for (int cid = 0; cid < Msrc; cid++) ymH.push_back(new cpuColorSpinorField(param));
zmH.reserve(Nsrc);
for (int cid = 0; cid < Nsrc; cid++) zmH.push_back(new cpuColorSpinorField(param));
static_cast<cpuColorSpinorField*>(vH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
static_cast<cpuColorSpinorField*>(wH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
static_cast<cpuColorSpinorField*>(xH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
static_cast<cpuColorSpinorField*>(yH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
static_cast<cpuColorSpinorField*>(zH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
static_cast<cpuColorSpinorField*>(hH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
static_cast<cpuColorSpinorField*>(lH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
for(int i=0; i<Nsrc; i++){
static_cast<cpuColorSpinorField*>(xmH[i])->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
}
for(int i=0; i<Msrc; i++){
static_cast<cpuColorSpinorField*>(ymH[i])->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
}
// Now set the parameters for the cuda fields
//param.pad = xdim*ydim*zdim/2;
if (param.nSpin == 4) param.gammaBasis = QUDA_UKQCD_GAMMA_BASIS;
param.create = QUDA_ZERO_FIELD_CREATE;
switch(prec) {
case 0:
setPrec(param, QUDA_HALF_PRECISION);
high_aux_prec = QUDA_DOUBLE_PRECISION;
low_aux_prec = QUDA_SINGLE_PRECISION;
break;
case 1:
setPrec(param, QUDA_SINGLE_PRECISION);
high_aux_prec = QUDA_DOUBLE_PRECISION;
low_aux_prec = QUDA_HALF_PRECISION;
break;
case 2:
setPrec(param, QUDA_DOUBLE_PRECISION);
high_aux_prec = QUDA_SINGLE_PRECISION;
low_aux_prec = QUDA_HALF_PRECISION;
break;
default:
errorQuda("Precision option not defined");
}
checkCudaError();
vD = new cudaColorSpinorField(param);
wD = new cudaColorSpinorField(param);
xD = new cudaColorSpinorField(param);
yD = new cudaColorSpinorField(param);
zD = new cudaColorSpinorField(param);
param.is_composite = true;
param.is_component = false;
// create composite fields
param.composite_dim = Nsrc;
xmD = new cudaColorSpinorField(param);
param.composite_dim = Msrc;
ymD = new cudaColorSpinorField(param);
param.composite_dim = Nsrc;
zmD = new cudaColorSpinorField(param);
param.is_composite = false;
param.is_component = false;
param.composite_dim = 1;
setPrec(param, high_aux_prec);
hD = new cudaColorSpinorField(param);
setPrec(param, low_aux_prec);
lD = new cudaColorSpinorField(param);
// check for successful allocation
checkCudaError();
// only do copy if not doing half precision with mg
bool flag = !(param.nSpin == 2 &&
(prec == 0 || low_aux_prec == QUDA_HALF_PRECISION) );
if ( flag ) {
*vD = *vH;
*wD = *wH;
*xD = *xH;
*yD = *yH;
*zD = *zH;
*hD = *hH;
*lD = *lH;
// for (int i=0; i < Nsrc; i++){
// xmD->Component(i) = *(xmH[i]);
// ymD->Component(i) = *(ymH[i]);
// }
// *ymD = *ymH;
}
}
void freeFields()
{
// release memory
delete vD;
delete wD;
delete xD;
delete yD;
delete zD;
delete hD;
delete lD;
delete xmD;
delete ymD;
delete zmD;
// release memory
delete vH;
delete wH;
delete xH;
delete yH;
delete zH;
delete hH;
delete lH;
for (int i=0; i < Nsrc; i++) delete xmH[i];
for (int i=0; i < Msrc; i++) delete ymH[i];
for (int i=0; i < Nsrc; i++) delete zmH[i];
xmH.clear();
ymH.clear();
zmH.clear();
}
double benchmark(int kernel, const int niter) {
double a, b, c;
quda::Complex a2, b2, c2;
quda::Complex * A = new quda::Complex[Nsrc*Msrc];
quda::Complex * B = new quda::Complex[Nsrc*Msrc];
quda::Complex * C = new quda::Complex[Nsrc*Msrc];
quda::Complex * A2 = new quda::Complex[Nsrc*Nsrc]; // for the block cDotProductNorm test
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start, 0);
{
switch (kernel) {
case 0:
for (int i=0; i < niter; ++i) blas::copy(*yD, *hD);
break;
case 1:
for (int i=0; i < niter; ++i) blas::copy(*yD, *lD);
break;
case 2:
for (int i=0; i < niter; ++i) blas::axpby(a, *xD, b, *yD);
break;
case 3:
for (int i=0; i < niter; ++i) blas::xpy(*xD, *yD);
break;
case 4:
for (int i=0; i < niter; ++i) blas::axpy(a, *xD, *yD);
break;
case 5:
for (int i=0; i < niter; ++i) blas::xpay(*xD, a, *yD);
break;
case 6:
for (int i=0; i < niter; ++i) blas::mxpy(*xD, *yD);
break;
case 7:
for (int i=0; i < niter; ++i) blas::ax(a, *xD);
break;
case 8:
for (int i=0; i < niter; ++i) blas::caxpy(a2, *xD, *yD);
break;
case 9:
for (int i=0; i < niter; ++i) blas::caxpby(a2, *xD, b2, *yD);
break;
case 10:
for (int i=0; i < niter; ++i) blas::cxpaypbz(*xD, a2, *yD, b2, *zD);
break;
case 11:
for (int i=0; i < niter; ++i) blas::axpyBzpcx(a, *xD, *yD, b, *zD, c);
break;
case 12:
for (int i=0; i < niter; ++i) blas::axpyZpbx(a, *xD, *yD, *zD, b);
break;
case 13:
for (int i=0; i < niter; ++i) blas::caxpbypzYmbw(a2, *xD, b2, *yD, *zD, *wD);
break;
case 14:
for (int i=0; i < niter; ++i) blas::cabxpyAx(a, b2, *xD, *yD);
break;
case 15:
for (int i=0; i < niter; ++i) blas::caxpbypz(a2, *xD, b2, *yD, *zD);
break;
case 16:
for (int i=0; i < niter; ++i) blas::caxpbypczpw(a2, *xD, b2, *yD, c2, *zD, *wD);
break;
case 17:
for (int i=0; i < niter; ++i) blas::caxpyXmaz(a2, *xD, *yD, *zD);
break;
// double
case 18:
for (int i=0; i < niter; ++i) blas::norm2(*xD);
break;
case 19:
for (int i=0; i < niter; ++i) blas::reDotProduct(*xD, *yD);
break;
case 20:
for (int i=0; i < niter; ++i) blas::axpyNorm(a, *xD, *yD);
break;
case 21:
for (int i=0; i < niter; ++i) blas::xmyNorm(*xD, *yD);
break;
case 22:
for (int i=0; i < niter; ++i) blas::caxpyNorm(a2, *xD, *yD);
break;
case 23:
for (int i=0; i < niter; ++i) blas::caxpyXmazNormX(a2, *xD, *yD, *zD);
break;
case 24:
for (int i=0; i < niter; ++i) blas::cabxpyAxNorm(a, b2, *xD, *yD);
break;
// double2
case 25:
for (int i=0; i < niter; ++i) blas::cDotProduct(*xD, *yD);
break;
case 26:
for (int i=0; i < niter; ++i) blas::xpaycDotzy(*xD, a, *yD, *zD);
break;
case 27:
for (int i=0; i < niter; ++i) blas::caxpyDotzy(a2, *xD, *yD, *zD);
break;
// double3
case 28:
for (int i=0; i < niter; ++i) blas::cDotProductNormA(*xD, *yD);
break;
case 29:
for (int i=0; i < niter; ++i) blas::cDotProductNormB(*xD, *yD);
break;
case 30:
for (int i=0; i < niter; ++i) blas::caxpbypzYmbwcDotProductUYNormY(a2, *xD, b2, *yD, *zD, *wD, *vD);
break;
case 31:
for (int i=0; i < niter; ++i) blas::HeavyQuarkResidualNorm(*xD, *yD);
break;
case 32:
for (int i=0; i < niter; ++i) blas::xpyHeavyQuarkResidualNorm(*xD, *yD, *zD);
break;
case 33:
for (int i=0; i < niter; ++i) blas::tripleCGReduction(*xD, *yD, *zD);
break;
case 34:
for (int i=0; i < niter; ++i) blas::tripleCGUpdate(a, b, *xD, *yD, *zD, *wD);
break;
case 35:
for (int i=0; i < niter; ++i) blas::axpyReDot(a, *xD, *yD);
break;
case 36:
for (int i=0; i < niter; ++i) blas::caxpy(A, *xmD,* ymD);
break;
case 37:
for (int i=0; i < niter; ++i) blas::axpyBzpcx((double*)A, xmD->Components(), zmD->Components(), (double*)B, *yD, (double*)C);
break;
case 38:
for (int i=0; i < niter; ++i) blas::caxpyBxpz(a2, *xD, *yD, b2, *zD);
break;
case 39:
for (int i=0; i < niter; ++i) blas::caxpyBzpx(a2, *xD, *yD, b2, *zD);
break;
case 40:
for (int i=0; i < niter; ++i) blas::cDotProduct(A2, xmD->Components(), xmD->Components());
break;
case 41:
for (int i=0; i < niter; ++i) blas::cDotProduct(A, xmD->Components(), ymD->Components());
break;
default:
errorQuda("Undefined blas kernel %d\n", kernel);
}
}
hipEventRecord(end, 0);
hipEventSynchronize(end);
float runTime;
hipEventElapsedTime(&runTime, start, end);
hipEventDestroy(start);
hipEventDestroy(end);
delete[] A;
delete[] B;
delete[] C;
delete[] A2;
double secs = runTime / 1000;
return secs;
}
#define ERROR(a) fabs(blas::norm2(*a##D) - blas::norm2(*a##H)) / blas::norm2(*a##H)
double test(int kernel) {
double a = M_PI, b = M_PI*exp(1.0), c = sqrt(M_PI);
quda::Complex a2(a, b), b2(b, -c), c2(a+b, c*a);
double error = 0;
quda::Complex * A = new quda::Complex[Nsrc*Msrc];
quda::Complex * B = new quda::Complex[Nsrc*Msrc];
quda::Complex * C = new quda::Complex[Nsrc*Msrc];
quda::Complex * A2 = new quda::Complex[Nsrc*Nsrc]; // for the block cDotProductNorm test
quda::Complex * B2 = new quda::Complex[Nsrc*Nsrc]; // for the block cDotProductNorm test
for(int i=0; i < Nsrc*Msrc; i++){
A[i] = a2* (1.0*((i/Nsrc) + i)) + b2 * (1.0*i) + c2 *(1.0*(Nsrc*Msrc/2-i));
B[i] = a2* (1.0*((i/Nsrc) + i)) - b2 * (M_PI*i) + c2 *(1.0*(Nsrc*Msrc/2-i));
C[i] = a2* (1.0*((M_PI/Nsrc) + i)) + b2 * (1.0*i) + c2 *(1.0*(Nsrc*Msrc/2-i));
}
for(int i=0; i < Nsrc*Nsrc; i++){
A2[i] = a2* (1.0*((i/Nsrc) + i)) + b2 * (1.0*i) + c2 *(1.0*(Nsrc*Nsrc/2-i));
B2[i] = a2* (1.0*((i/Nsrc) + i)) - b2 * (M_PI*i) + c2 *(1.0*(Nsrc*Nsrc/2-i));
}
// A[0] = a2;
// A[1] = 0.;
// A[2] = 0.;
// A[3] = 0.;
switch (kernel) {
case 0:
*hD = *hH;
blas::copy(*yD, *hD);
blas::copy(*yH, *hH);
error = ERROR(y);
break;
case 1:
*lD = *lH;
blas::copy(*yD, *lD);
blas::copy(*yH, *lH);
error = ERROR(y);
break;
case 2:
*xD = *xH;
*yD = *yH;
blas::axpby(a, *xD, b, *yD);
blas::axpby(a, *xH, b, *yH);
error = ERROR(y);
break;
case 3:
*xD = *xH;
*yD = *yH;
blas::xpy(*xD, *yD);
blas::xpy(*xH, *yH);
error = ERROR(y);
break;
case 4:
*xD = *xH;
*yD = *yH;
blas::axpy(a, *xD, *yD);
blas::axpy(a, *xH, *yH);
*zH = *yD;
error = ERROR(y);
break;
case 5:
*xD = *xH;
*yD = *yH;
blas::xpay(*xD, a, *yD);
blas::xpay(*xH, a, *yH);
error = ERROR(y);
break;
case 6:
*xD = *xH;
*yD = *yH;
blas::mxpy(*xD, *yD);
blas::mxpy(*xH, *yH);
error = ERROR(y);
break;
case 7:
*xD = *xH;
blas::ax(a, *xD);
blas::ax(a, *xH);
error = ERROR(x);
break;
case 8:
*xD = *xH;
*yD = *yH;
blas::caxpy(a2, *xD, *yD);
blas::caxpy(a2, *xH, *yH);
error = ERROR(y);
break;
case 9:
*xD = *xH;
*yD = *yH;
blas::caxpby(a2, *xD, b2, *yD);
blas::caxpby(a2, *xH, b2, *yH);
error = ERROR(y);
break;
case 10:
*xD = *xH;
*yD = *yH;
*zD = *zH;
blas::cxpaypbz(*xD, a2, *yD, b2, *zD);
blas::cxpaypbz(*xH, a2, *yH, b2, *zH);
error = ERROR(z);
break;
case 11:
*xD = *xH;
*yD = *yH;
*zD = *zH;
blas::axpyBzpcx(a, *xD, *yD, b, *zD, c);
blas::axpyBzpcx(a, *xH, *yH, b, *zH, c);
error = ERROR(x) + ERROR(y);
break;
case 12:
*xD = *xH;
*yD = *yH;
*zD = *zH;
blas::axpyZpbx(a, *xD, *yD, *zD, b);
blas::axpyZpbx(a, *xH, *yH, *zH, b);
error = ERROR(x) + ERROR(y);
break;
case 13:
*xD = *xH;
*yD = *yH;
*zD = *zH;
*wD = *wH;
blas::caxpbypzYmbw(a2, *xD, b2, *yD, *zD, *wD);
blas::caxpbypzYmbw(a2, *xH, b2, *yH, *zH, *wH);
error = ERROR(z) + ERROR(y);
break;
case 14:
*xD = *xH;
*yD = *yH;
blas::cabxpyAx(a, b2, *xD, *yD);
blas::cabxpyAx(a, b2, *xH, *yH);
error = ERROR(y) + ERROR(x);
break;
case 15:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{blas::caxpbypz(a2, *xD, b2, *yD, *zD);
blas::caxpbypz(a2, *xH, b2, *yH, *zH);
error = ERROR(z); }
break;
case 16:
*xD = *xH;
*yD = *yH;
*zD = *zH;
*wD = *wH;
{blas::caxpbypczpw(a2, *xD, b2, *yD, c2, *zD, *wD);
blas::caxpbypczpw(a2, *xH, b2, *yH, c2, *zH, *wH);
error = ERROR(w); }
break;
case 17:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{blas::caxpyXmaz(a, *xD, *yD, *zD);
blas::caxpyXmaz(a, *xH, *yH, *zH);
error = ERROR(y) + ERROR(x);}
break;
// double
case 18:
*xD = *xH;
*yH = *xD;
error = fabs(blas::norm2(*xD) - blas::norm2(*xH)) / blas::norm2(*xH);
break;
case 19:
*xD = *xH;
*yD = *yH;
error = fabs(blas::reDotProduct(*xD, *yD) - blas::reDotProduct(*xH, *yH)) / fabs(blas::reDotProduct(*xH, *yH));
break;
case 20:
*xD = *xH;
*yD = *yH;
{double d = blas::axpyNorm(a, *xD, *yD);
double h = blas::axpyNorm(a, *xH, *yH);
error = ERROR(y) + fabs(d-h)/fabs(h);}
break;
case 21:
*xD = *xH;
*yD = *yH;
{double d = blas::xmyNorm(*xD, *yD);
double h = blas::xmyNorm(*xH, *yH);
error = ERROR(y) + fabs(d-h)/fabs(h);}
break;
case 22:
*xD = *xH;
*yD = *yH;
{double d = blas::caxpyNorm(a, *xD, *yD);
double h = blas::caxpyNorm(a, *xH, *yH);
error = ERROR(y) + fabs(d-h)/fabs(h);}
break;
case 23:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{double d = blas::caxpyXmazNormX(a, *xD, *yD, *zD);
double h = blas::caxpyXmazNormX(a, *xH, *yH, *zH);
error = ERROR(y) + ERROR(x) + fabs(d-h)/fabs(h);}
break;
case 24:
*xD = *xH;
*yD = *yH;
{double d = blas::cabxpyAxNorm(a, b2, *xD, *yD);
double h = blas::cabxpyAxNorm(a, b2, *xH, *yH);
error = ERROR(x) + ERROR(y) + fabs(d-h)/fabs(h);}
break;
// double2
case 25:
*xD = *xH;
*yD = *yH;
error = abs(blas::cDotProduct(*xD, *yD) - blas::cDotProduct(*xH, *yH)) / abs(blas::cDotProduct(*xH, *yH));
break;
case 26:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{ quda::Complex d = blas::xpaycDotzy(*xD, a, *yD, *zD);
quda::Complex h = blas::xpaycDotzy(*xH, a, *yH, *zH);
error = fabs(blas::norm2(*yD) - blas::norm2(*yH)) / blas::norm2(*yH) + abs(d-h)/abs(h);
}
break;
case 27:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{quda::Complex d = blas::caxpyDotzy(a, *xD, *yD, *zD);
quda::Complex h = blas::caxpyDotzy(a, *xH, *yH, *zH);
error = ERROR(y) + abs(d-h)/abs(h);}
break;
// double3
case 28:
*xD = *xH;
*yD = *yH;
{ double3 d = blas::cDotProductNormA(*xD, *yD);
double3 h = blas::cDotProductNormA(*xH, *yH);
error = fabs(d.x - h.x) / fabs(h.x) + fabs(d.y - h.y) / fabs(h.y) + fabs(d.z - h.z) / fabs(h.z); }
break;
case 29:
*xD = *xH;
*yD = *yH;
{ double3 d = blas::cDotProductNormB(*xD, *yD);
double3 h = blas::cDotProductNormB(*xH, *yH);
error = fabs(d.x - h.x) / fabs(h.x) + fabs(d.y - h.y) / fabs(h.y) + fabs(d.z - h.z) / fabs(h.z); }
break;
case 30:
*xD = *xH;
*yD = *yH;
*zD = *zH;
*wD = *wH;
*vD = *vH;
{ double3 d = blas::caxpbypzYmbwcDotProductUYNormY(a2, *xD, b2, *yD, *zD, *wD, *vD);
double3 h = blas::caxpbypzYmbwcDotProductUYNormY(a2, *xH, b2, *yH, *zH, *wH, *vH);
error = ERROR(z) + ERROR(y) + fabs(d.x - h.x) / fabs(h.x) +
fabs(d.y - h.y) / fabs(h.y) + fabs(d.z - h.z) / fabs(h.z); }
break;
case 31:
*xD = *xH;
*yD = *yH;
{ double3 d = blas::HeavyQuarkResidualNorm(*xD, *yD);
double3 h = blas::HeavyQuarkResidualNorm(*xH, *yH);
error = fabs(d.x - h.x) / fabs(h.x) +
fabs(d.y - h.y) / fabs(h.y) + fabs(d.z - h.z) / fabs(h.z); }
break;
case 32:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{ double3 d = blas::xpyHeavyQuarkResidualNorm(*xD, *yD, *zD);
double3 h = blas::xpyHeavyQuarkResidualNorm(*xH, *yH, *zH);
error = ERROR(y) + fabs(d.x - h.x) / fabs(h.x) +
fabs(d.y - h.y) / fabs(h.y) + fabs(d.z - h.z) / fabs(h.z); }
break;
case 33:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{ double3 d = blas::tripleCGReduction(*xD, *yD, *zD);
double3 h = make_double3(blas::norm2(*xH), blas::norm2(*yH), blas::reDotProduct(*yH, *zH));
error = fabs(d.x - h.x) / fabs(h.x) +
fabs(d.y - h.y) / fabs(h.y) + fabs(d.z - h.z) / fabs(h.z); }
break;
case 34:
*xD = *xH;
*yD = *yH;
*zD = *zH;
*wD = *wH;
{ blas::tripleCGUpdate(a, b, *xD, *yD, *zD, *wD);
blas::tripleCGUpdate(a, b, *xH, *yH, *zH, *wH);
error = ERROR(y) + ERROR(z) + ERROR(w); }
break;
case 35:
*xD = *xH;
*yD = *yH;
{ double d = blas::axpyReDot(a, *xD, *yD);
double h = blas::axpyReDot(a, *xH, *yH);
error = ERROR(y) + fabs(d-h)/fabs(h); }
break;
case 36:
for (int i=0; i < Nsrc; i++) xmD->Component(i) = *(xmH[i]);
for (int i=0; i < Msrc; i++) ymD->Component(i) = *(ymH[i]);
blas::caxpy(A, *xmD, *ymD);
for (int i=0; i < Nsrc; i++){
for(int j=0; j < Msrc; j++){
blas::caxpy(A[Msrc*i+j], *(xmH[i]), *(ymH[j]));
}
}
error = 0;
for (int i=0; i < Msrc; i++){
error+= fabs(blas::norm2((ymD->Component(i))) - blas::norm2(*(ymH[i]))) / blas::norm2(*(ymH[i]));
}
error/= Msrc;
break;
case 37:
for (int i=0; i < Nsrc; i++) {
xmD->Component(i) = *(xmH[i]);
zmD->Component(i) = *(zmH[i]);
}
*yD = *yH;
blas::axpyBzpcx((double*)A, xmD->Components(), zmD->Components(), (double*)B, *yD, (const double*)C);
for (int i=0; i<Nsrc; i++) {
blas::axpyBzpcx(((double*)A)[i], *xmH[i], *zmH[i], ((double*)B)[i], *yH, ((double*)C)[i]);
}
error = 0;
for (int i=0; i < Nsrc; i++){
error+= fabs(blas::norm2((xmD->Component(i))) - blas::norm2(*(xmH[i]))) / blas::norm2(*(xmH[i]));
//error+= fabs(blas::norm2((zmD->Component(i))) - blas::norm2(*(zmH[i]))) / blas::norm2(*(zmH[i]));
}
error/= Nsrc;
break;
case 38:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{blas::caxpyBxpz(a, *xD, *yD, b2, *zD);
blas::caxpyBxpz(a, *xH, *yH, b2, *zH);
error = ERROR(x) + ERROR(z);}
break;
case 39:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{blas::caxpyBzpx(a, *xD, *yD, b2, *zD);
blas::caxpyBzpx(a, *xH, *yH, b2, *zH);
error = ERROR(x) + ERROR(z);}
break;
case 40:
for (int i=0; i < Nsrc; i++) xmD->Component(i) = *(xmH[i]);
blas::cDotProduct(A2, xmD->Components(), xmD->Components());
error = 0.0;
for (int i = 0; i < Nsrc; i++) {
for (int j = 0; j < Nsrc; j++) {
B2[i*Nsrc+j] = blas::cDotProduct(xmD->Component(i), xmD->Component(j));
error += std::abs(A2[i*Nsrc+j] - B2[i*Nsrc+j])/std::abs(B2[i*Nsrc+j]);
}
}
error /= Nsrc*Nsrc;
break;
case 41:
for (int i=0; i < Nsrc; i++) xmD->Component(i) = *(xmH[i]);
for (int i=0; i < Msrc; i++) ymD->Component(i) = *(ymH[i]);
blas::cDotProduct(A, xmD->Components(), ymD->Components());
error = 0.0;
for (int i = 0; i < Nsrc; i++) {
for (int j = 0; j < Msrc; j++) {
B[i*Msrc+j] = blas::cDotProduct(xmD->Component(i), ymD->Component(j));
error += std::abs(A[i*Msrc+j] - B[i*Msrc+j])/std::abs(B[i*Msrc+j]);
}
}
error /= Nsrc*Msrc;
break;
default:
errorQuda("Undefined blas kernel %d\n", kernel);
}
delete[] A;
delete[] B;
delete[] C;
delete[] A2;
delete[] B2;
return error;
}
const char *prec_str[] = {"half", "single", "double"};
// For googletest names must be non-empty, unique, and may only contain ASCII
// alphanumeric characters or underscore
const char *names[] = {
"copyHS",
"copyLS",
"axpby",
"xpy",
"axpy",
"xpay",
"mxpy",
"ax",
"caxpy",
"caxpby",
"cxpaypbz",
"axpyBzpcx",
"axpyZpbx",
"caxpbypzYmbw",
"cabxpyAx",
"caxpbypz",
"caxpbypczpw",
"caxpyXmaz",
"norm",
"reDotProduct",
"axpyNorm",
"xmyNorm",
"caxpyNorm",
"caxpyXmazNormX",
"cabxpyAxNorm",
"cDotProduct",
"xpaycDotzy",
"caxpyDotzy",
"cDotProductNormA",
"cDotProductNormB",
"caxpbypzYmbwcDotProductUYNormY",
"HeavyQuarkResidualNorm",
"xpyHeavyQuarkResidualNorm",
"tripleCGReduction",
"tripleCGUpdate",
"axpyReDot",
"caxpy_block",
"axpyBzpcx_block",
"caxpyBxpz",
"caxpyBzpx",
"cDotProductNorm_block",
"cDotProduct_block",
"caxpy_composite"
};
int main(int argc, char** argv)
{
::testing::InitGoogleTest(&argc, argv);
int result = 0;
prec = QUDA_INVALID_PRECISION;
test_type = -1;
for (int i = 1; i < argc; i++){
if(process_command_line_option(argc, argv, &i) == 0){
continue;
}
printfQuda("ERROR: Invalid option:%s\n", argv[i]);
usage(argv);
}
// override spin setting if mg solver is set to test coarse grids
if (inv_type == QUDA_MG_INVERTER) {
Nspin = 2;
Ncolor = nvec;
} else {
// set spin according to the type of dslash
Nspin = (dslash_type == QUDA_ASQTAD_DSLASH ||
dslash_type == QUDA_STAGGERED_DSLASH) ? 1 : 4;
Ncolor = 3;
}
setSpinorSiteSize(24);
initComms(argc, argv, gridsize_from_cmdline);
display_test_info();
initQuda(device);
setVerbosity(QUDA_SILENT);
// clear the error state
hipGetLastError();
// lastly check for correctness
if (verify_results) {
result = RUN_ALL_TESTS();
}
endQuda();
finalizeComms();
return result;
}
// The following tests each kernel at each precision using the google testing framework
using ::testing::TestWithParam;
using ::testing::Bool;
using ::testing::Values;
using ::testing::Range;
using ::testing::Combine;
class BlasTest : public ::testing::TestWithParam<::testing::tuple<int, int>> {
protected:
::testing::tuple<int, int> param;
public:
virtual ~BlasTest() { }
virtual void SetUp() {
param = GetParam();
initFields(::testing::get<0>(GetParam()));
}
virtual void TearDown() { freeFields(); }
};
TEST_P(BlasTest, verify) {
int prec = ::testing::get<0>(GetParam());
int kernel = ::testing::get<1>(GetParam());
// certain tests will fail to run for coarse grids so mark these as
// failed without running
double deviation = skip_kernel(prec,kernel) ? 1.0 : test(kernel);
// printfQuda("%-35s error = %e\n", names[kernel], deviation);
double tol = (prec == 2 ? 1e-10 : (prec == 1 ? 1e-5 : 1e-3));
tol = (kernel < 2) ? 1e-4 : tol; // use different tolerance for copy
EXPECT_LE(deviation, tol) << "CPU and CUDA implementations do not agree";
}
TEST_P(BlasTest, benchmark) {
int prec = ::testing::get<0>(GetParam());
int kernel = ::testing::get<1>(GetParam());
// do the initial tune
benchmark(kernel, 1);
// now rerun with more iterations to get accurate speed measurements
quda::blas::flops = 0;
quda::blas::bytes = 0;
double secs = benchmark(kernel, niter);
double gflops = (quda::blas::flops*1e-9)/(secs);
double gbytes = quda::blas::bytes/(secs*1e9);
RecordProperty("Gflops", std::to_string(gflops));
RecordProperty("GBs", std::to_string(gbytes));
printfQuda("%-31s: Gflop/s = %6.1f, GB/s = %6.1f\n", names[kernel], gflops, gbytes);
}
std::string getblasname(testing::TestParamInfo<::testing::tuple<int, int>> param){
int prec = ::testing::get<0>(param.param);
int kernel = ::testing::get<1>(param.param);
std::string str(names[kernel]);
str += std::string("_");
str += std::string(prec_str[prec]);
return str;//names[kernel] + "_" + prec_str[prec];
}
// half precision
INSTANTIATE_TEST_CASE_P(QUDA, BlasTest, Combine( Range(0,3), Range(0, Nkernels) ), getblasname);
| e7289ac0a7152168a015656d47c1fb0995bba2d5.cu | #include <stdio.h>
#include <stdlib.h>
#include <quda_internal.h>
#include <color_spinor_field.h>
#include <blas_quda.h>
#include <test_util.h>
// include because of nasty globals used in the tests
#include <dslash_util.h>
// google test
#include <gtest.h>
extern int test_type;
extern QudaPrecision prec;
extern QudaDslashType dslash_type;
extern QudaInverterType inv_type;
extern int nvec;
extern int device;
extern int xdim;
extern int ydim;
extern int zdim;
extern int tdim;
extern int gridsize_from_cmdline[];
extern int niter;
extern bool verify_results;
extern int Nsrc;
extern int Msrc;
extern QudaSolveType solve_type;
extern void usage(char** );
const int Nkernels = 42;
using namespace quda;
ColorSpinorField *xH, *yH, *zH, *wH, *vH, *hH, *lH;
ColorSpinorField *xD, *yD, *zD, *wD, *vD, *hD, *lD, *xmD, *ymD, *zmD;
std::vector<cpuColorSpinorField*> xmH;
std::vector<cpuColorSpinorField*> ymH;
std::vector<cpuColorSpinorField*> zmH;
int Nspin;
int Ncolor;
void setPrec(ColorSpinorParam ¶m, const QudaPrecision precision)
{
param.precision = precision;
if (Nspin == 1 || Nspin == 2 || precision == QUDA_DOUBLE_PRECISION) {
param.fieldOrder = QUDA_FLOAT2_FIELD_ORDER;
} else {
param.fieldOrder = QUDA_FLOAT4_FIELD_ORDER;
}
}
void
display_test_info()
{
printfQuda("running the following test:\n");
printfQuda("S_dimension T_dimension Nspin Ncolor\n");
printfQuda("%3d /%3d / %3d %3d %d %d\n", xdim, ydim, zdim, tdim, Nspin, Ncolor);
printfQuda("Grid partition info: X Y Z T\n");
printfQuda(" %d %d %d %d\n",
dimPartitioned(0),
dimPartitioned(1),
dimPartitioned(2),
dimPartitioned(3));
return;
}
int Nprec = 3;
bool skip_kernel(int precision, int kernel) {
// if we've selected a given kernel then make sure we only run that
if (test_type != -1 && kernel != test_type) return true;
// if we've selected a given precision then make sure we only run that
QudaPrecision this_prec = precision == 2 ? QUDA_DOUBLE_PRECISION : precision == 1 ? QUDA_SINGLE_PRECISION : QUDA_HALF_PRECISION;
if (prec != QUDA_INVALID_PRECISION && this_prec != prec) return true;
if ( Nspin == 2 && precision == 0) {
// avoid half precision tests if doing coarse fields
return true;
} else if (Nspin == 2 && kernel == 1) {
// avoid low-precision copy if doing coarse fields
return true;
} else if (Ncolor != 3 && (kernel == 31 || kernel == 32)) {
// only benchmark heavy-quark norm if doing 3 colors
return true;
} else if ((Nprec < 3) && (kernel == 0)) {
// only benchmark high-precision copy() if double is supported
return true;
}
return false;
}
void initFields(int prec)
{
// precisions used for the source field in the copyCuda() benchmark
QudaPrecision high_aux_prec = QUDA_INVALID_PRECISION;
QudaPrecision low_aux_prec = QUDA_INVALID_PRECISION;
ColorSpinorParam param;
param.nColor = Ncolor;
param.nSpin = Nspin;
param.nDim = 4; // number of spacetime dimensions
param.pad = 0; // padding must be zero for cpu fields
if (solve_type == QUDA_DIRECT_PC_SOLVE) {
param.siteSubset = QUDA_PARITY_SITE_SUBSET;
} else if (solve_type == QUDA_DIRECT_SOLVE) {
param.siteSubset = QUDA_FULL_SITE_SUBSET;
} else {
errorQuda("Unexpected solve_type=%d\n", solve_type);
}
if (param.siteSubset == QUDA_PARITY_SITE_SUBSET) param.x[0] = xdim/2;
else param.x[0] = xdim;
param.x[1] = ydim;
param.x[2] = zdim;
param.x[3] = tdim;
param.siteOrder = QUDA_EVEN_ODD_SITE_ORDER;
param.gammaBasis = QUDA_DEGRAND_ROSSI_GAMMA_BASIS;
param.precision = QUDA_DOUBLE_PRECISION;
param.fieldOrder = QUDA_SPACE_SPIN_COLOR_FIELD_ORDER;
param.create = QUDA_ZERO_FIELD_CREATE;
vH = new cpuColorSpinorField(param);
wH = new cpuColorSpinorField(param);
xH = new cpuColorSpinorField(param);
yH = new cpuColorSpinorField(param);
zH = new cpuColorSpinorField(param);
hH = new cpuColorSpinorField(param);
lH = new cpuColorSpinorField(param);
// create composite fields
// xmH = new cpuColorSpinorField(param);
// ymH = new cpuColorSpinorField(param);
xmH.reserve(Nsrc);
for (int cid = 0; cid < Nsrc; cid++) xmH.push_back(new cpuColorSpinorField(param));
ymH.reserve(Msrc);
for (int cid = 0; cid < Msrc; cid++) ymH.push_back(new cpuColorSpinorField(param));
zmH.reserve(Nsrc);
for (int cid = 0; cid < Nsrc; cid++) zmH.push_back(new cpuColorSpinorField(param));
static_cast<cpuColorSpinorField*>(vH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
static_cast<cpuColorSpinorField*>(wH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
static_cast<cpuColorSpinorField*>(xH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
static_cast<cpuColorSpinorField*>(yH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
static_cast<cpuColorSpinorField*>(zH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
static_cast<cpuColorSpinorField*>(hH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
static_cast<cpuColorSpinorField*>(lH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
for(int i=0; i<Nsrc; i++){
static_cast<cpuColorSpinorField*>(xmH[i])->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
}
for(int i=0; i<Msrc; i++){
static_cast<cpuColorSpinorField*>(ymH[i])->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
}
// Now set the parameters for the cuda fields
//param.pad = xdim*ydim*zdim/2;
if (param.nSpin == 4) param.gammaBasis = QUDA_UKQCD_GAMMA_BASIS;
param.create = QUDA_ZERO_FIELD_CREATE;
switch(prec) {
case 0:
setPrec(param, QUDA_HALF_PRECISION);
high_aux_prec = QUDA_DOUBLE_PRECISION;
low_aux_prec = QUDA_SINGLE_PRECISION;
break;
case 1:
setPrec(param, QUDA_SINGLE_PRECISION);
high_aux_prec = QUDA_DOUBLE_PRECISION;
low_aux_prec = QUDA_HALF_PRECISION;
break;
case 2:
setPrec(param, QUDA_DOUBLE_PRECISION);
high_aux_prec = QUDA_SINGLE_PRECISION;
low_aux_prec = QUDA_HALF_PRECISION;
break;
default:
errorQuda("Precision option not defined");
}
checkCudaError();
vD = new cudaColorSpinorField(param);
wD = new cudaColorSpinorField(param);
xD = new cudaColorSpinorField(param);
yD = new cudaColorSpinorField(param);
zD = new cudaColorSpinorField(param);
param.is_composite = true;
param.is_component = false;
// create composite fields
param.composite_dim = Nsrc;
xmD = new cudaColorSpinorField(param);
param.composite_dim = Msrc;
ymD = new cudaColorSpinorField(param);
param.composite_dim = Nsrc;
zmD = new cudaColorSpinorField(param);
param.is_composite = false;
param.is_component = false;
param.composite_dim = 1;
setPrec(param, high_aux_prec);
hD = new cudaColorSpinorField(param);
setPrec(param, low_aux_prec);
lD = new cudaColorSpinorField(param);
// check for successful allocation
checkCudaError();
// only do copy if not doing half precision with mg
bool flag = !(param.nSpin == 2 &&
(prec == 0 || low_aux_prec == QUDA_HALF_PRECISION) );
if ( flag ) {
*vD = *vH;
*wD = *wH;
*xD = *xH;
*yD = *yH;
*zD = *zH;
*hD = *hH;
*lD = *lH;
// for (int i=0; i < Nsrc; i++){
// xmD->Component(i) = *(xmH[i]);
// ymD->Component(i) = *(ymH[i]);
// }
// *ymD = *ymH;
}
}
void freeFields()
{
// release memory
delete vD;
delete wD;
delete xD;
delete yD;
delete zD;
delete hD;
delete lD;
delete xmD;
delete ymD;
delete zmD;
// release memory
delete vH;
delete wH;
delete xH;
delete yH;
delete zH;
delete hH;
delete lH;
for (int i=0; i < Nsrc; i++) delete xmH[i];
for (int i=0; i < Msrc; i++) delete ymH[i];
for (int i=0; i < Nsrc; i++) delete zmH[i];
xmH.clear();
ymH.clear();
zmH.clear();
}
double benchmark(int kernel, const int niter) {
double a, b, c;
quda::Complex a2, b2, c2;
quda::Complex * A = new quda::Complex[Nsrc*Msrc];
quda::Complex * B = new quda::Complex[Nsrc*Msrc];
quda::Complex * C = new quda::Complex[Nsrc*Msrc];
quda::Complex * A2 = new quda::Complex[Nsrc*Nsrc]; // for the block cDotProductNorm test
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
{
switch (kernel) {
case 0:
for (int i=0; i < niter; ++i) blas::copy(*yD, *hD);
break;
case 1:
for (int i=0; i < niter; ++i) blas::copy(*yD, *lD);
break;
case 2:
for (int i=0; i < niter; ++i) blas::axpby(a, *xD, b, *yD);
break;
case 3:
for (int i=0; i < niter; ++i) blas::xpy(*xD, *yD);
break;
case 4:
for (int i=0; i < niter; ++i) blas::axpy(a, *xD, *yD);
break;
case 5:
for (int i=0; i < niter; ++i) blas::xpay(*xD, a, *yD);
break;
case 6:
for (int i=0; i < niter; ++i) blas::mxpy(*xD, *yD);
break;
case 7:
for (int i=0; i < niter; ++i) blas::ax(a, *xD);
break;
case 8:
for (int i=0; i < niter; ++i) blas::caxpy(a2, *xD, *yD);
break;
case 9:
for (int i=0; i < niter; ++i) blas::caxpby(a2, *xD, b2, *yD);
break;
case 10:
for (int i=0; i < niter; ++i) blas::cxpaypbz(*xD, a2, *yD, b2, *zD);
break;
case 11:
for (int i=0; i < niter; ++i) blas::axpyBzpcx(a, *xD, *yD, b, *zD, c);
break;
case 12:
for (int i=0; i < niter; ++i) blas::axpyZpbx(a, *xD, *yD, *zD, b);
break;
case 13:
for (int i=0; i < niter; ++i) blas::caxpbypzYmbw(a2, *xD, b2, *yD, *zD, *wD);
break;
case 14:
for (int i=0; i < niter; ++i) blas::cabxpyAx(a, b2, *xD, *yD);
break;
case 15:
for (int i=0; i < niter; ++i) blas::caxpbypz(a2, *xD, b2, *yD, *zD);
break;
case 16:
for (int i=0; i < niter; ++i) blas::caxpbypczpw(a2, *xD, b2, *yD, c2, *zD, *wD);
break;
case 17:
for (int i=0; i < niter; ++i) blas::caxpyXmaz(a2, *xD, *yD, *zD);
break;
// double
case 18:
for (int i=0; i < niter; ++i) blas::norm2(*xD);
break;
case 19:
for (int i=0; i < niter; ++i) blas::reDotProduct(*xD, *yD);
break;
case 20:
for (int i=0; i < niter; ++i) blas::axpyNorm(a, *xD, *yD);
break;
case 21:
for (int i=0; i < niter; ++i) blas::xmyNorm(*xD, *yD);
break;
case 22:
for (int i=0; i < niter; ++i) blas::caxpyNorm(a2, *xD, *yD);
break;
case 23:
for (int i=0; i < niter; ++i) blas::caxpyXmazNormX(a2, *xD, *yD, *zD);
break;
case 24:
for (int i=0; i < niter; ++i) blas::cabxpyAxNorm(a, b2, *xD, *yD);
break;
// double2
case 25:
for (int i=0; i < niter; ++i) blas::cDotProduct(*xD, *yD);
break;
case 26:
for (int i=0; i < niter; ++i) blas::xpaycDotzy(*xD, a, *yD, *zD);
break;
case 27:
for (int i=0; i < niter; ++i) blas::caxpyDotzy(a2, *xD, *yD, *zD);
break;
// double3
case 28:
for (int i=0; i < niter; ++i) blas::cDotProductNormA(*xD, *yD);
break;
case 29:
for (int i=0; i < niter; ++i) blas::cDotProductNormB(*xD, *yD);
break;
case 30:
for (int i=0; i < niter; ++i) blas::caxpbypzYmbwcDotProductUYNormY(a2, *xD, b2, *yD, *zD, *wD, *vD);
break;
case 31:
for (int i=0; i < niter; ++i) blas::HeavyQuarkResidualNorm(*xD, *yD);
break;
case 32:
for (int i=0; i < niter; ++i) blas::xpyHeavyQuarkResidualNorm(*xD, *yD, *zD);
break;
case 33:
for (int i=0; i < niter; ++i) blas::tripleCGReduction(*xD, *yD, *zD);
break;
case 34:
for (int i=0; i < niter; ++i) blas::tripleCGUpdate(a, b, *xD, *yD, *zD, *wD);
break;
case 35:
for (int i=0; i < niter; ++i) blas::axpyReDot(a, *xD, *yD);
break;
case 36:
for (int i=0; i < niter; ++i) blas::caxpy(A, *xmD,* ymD);
break;
case 37:
for (int i=0; i < niter; ++i) blas::axpyBzpcx((double*)A, xmD->Components(), zmD->Components(), (double*)B, *yD, (double*)C);
break;
case 38:
for (int i=0; i < niter; ++i) blas::caxpyBxpz(a2, *xD, *yD, b2, *zD);
break;
case 39:
for (int i=0; i < niter; ++i) blas::caxpyBzpx(a2, *xD, *yD, b2, *zD);
break;
case 40:
for (int i=0; i < niter; ++i) blas::cDotProduct(A2, xmD->Components(), xmD->Components());
break;
case 41:
for (int i=0; i < niter; ++i) blas::cDotProduct(A, xmD->Components(), ymD->Components());
break;
default:
errorQuda("Undefined blas kernel %d\n", kernel);
}
}
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
float runTime;
cudaEventElapsedTime(&runTime, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
delete[] A;
delete[] B;
delete[] C;
delete[] A2;
double secs = runTime / 1000;
return secs;
}
#define ERROR(a) fabs(blas::norm2(*a##D) - blas::norm2(*a##H)) / blas::norm2(*a##H)
double test(int kernel) {
double a = M_PI, b = M_PI*exp(1.0), c = sqrt(M_PI);
quda::Complex a2(a, b), b2(b, -c), c2(a+b, c*a);
double error = 0;
quda::Complex * A = new quda::Complex[Nsrc*Msrc];
quda::Complex * B = new quda::Complex[Nsrc*Msrc];
quda::Complex * C = new quda::Complex[Nsrc*Msrc];
quda::Complex * A2 = new quda::Complex[Nsrc*Nsrc]; // for the block cDotProductNorm test
quda::Complex * B2 = new quda::Complex[Nsrc*Nsrc]; // for the block cDotProductNorm test
for(int i=0; i < Nsrc*Msrc; i++){
A[i] = a2* (1.0*((i/Nsrc) + i)) + b2 * (1.0*i) + c2 *(1.0*(Nsrc*Msrc/2-i));
B[i] = a2* (1.0*((i/Nsrc) + i)) - b2 * (M_PI*i) + c2 *(1.0*(Nsrc*Msrc/2-i));
C[i] = a2* (1.0*((M_PI/Nsrc) + i)) + b2 * (1.0*i) + c2 *(1.0*(Nsrc*Msrc/2-i));
}
for(int i=0; i < Nsrc*Nsrc; i++){
A2[i] = a2* (1.0*((i/Nsrc) + i)) + b2 * (1.0*i) + c2 *(1.0*(Nsrc*Nsrc/2-i));
B2[i] = a2* (1.0*((i/Nsrc) + i)) - b2 * (M_PI*i) + c2 *(1.0*(Nsrc*Nsrc/2-i));
}
// A[0] = a2;
// A[1] = 0.;
// A[2] = 0.;
// A[3] = 0.;
switch (kernel) {
case 0:
*hD = *hH;
blas::copy(*yD, *hD);
blas::copy(*yH, *hH);
error = ERROR(y);
break;
case 1:
*lD = *lH;
blas::copy(*yD, *lD);
blas::copy(*yH, *lH);
error = ERROR(y);
break;
case 2:
*xD = *xH;
*yD = *yH;
blas::axpby(a, *xD, b, *yD);
blas::axpby(a, *xH, b, *yH);
error = ERROR(y);
break;
case 3:
*xD = *xH;
*yD = *yH;
blas::xpy(*xD, *yD);
blas::xpy(*xH, *yH);
error = ERROR(y);
break;
case 4:
*xD = *xH;
*yD = *yH;
blas::axpy(a, *xD, *yD);
blas::axpy(a, *xH, *yH);
*zH = *yD;
error = ERROR(y);
break;
case 5:
*xD = *xH;
*yD = *yH;
blas::xpay(*xD, a, *yD);
blas::xpay(*xH, a, *yH);
error = ERROR(y);
break;
case 6:
*xD = *xH;
*yD = *yH;
blas::mxpy(*xD, *yD);
blas::mxpy(*xH, *yH);
error = ERROR(y);
break;
case 7:
*xD = *xH;
blas::ax(a, *xD);
blas::ax(a, *xH);
error = ERROR(x);
break;
case 8:
*xD = *xH;
*yD = *yH;
blas::caxpy(a2, *xD, *yD);
blas::caxpy(a2, *xH, *yH);
error = ERROR(y);
break;
case 9:
*xD = *xH;
*yD = *yH;
blas::caxpby(a2, *xD, b2, *yD);
blas::caxpby(a2, *xH, b2, *yH);
error = ERROR(y);
break;
case 10:
*xD = *xH;
*yD = *yH;
*zD = *zH;
blas::cxpaypbz(*xD, a2, *yD, b2, *zD);
blas::cxpaypbz(*xH, a2, *yH, b2, *zH);
error = ERROR(z);
break;
case 11:
*xD = *xH;
*yD = *yH;
*zD = *zH;
blas::axpyBzpcx(a, *xD, *yD, b, *zD, c);
blas::axpyBzpcx(a, *xH, *yH, b, *zH, c);
error = ERROR(x) + ERROR(y);
break;
case 12:
*xD = *xH;
*yD = *yH;
*zD = *zH;
blas::axpyZpbx(a, *xD, *yD, *zD, b);
blas::axpyZpbx(a, *xH, *yH, *zH, b);
error = ERROR(x) + ERROR(y);
break;
case 13:
*xD = *xH;
*yD = *yH;
*zD = *zH;
*wD = *wH;
blas::caxpbypzYmbw(a2, *xD, b2, *yD, *zD, *wD);
blas::caxpbypzYmbw(a2, *xH, b2, *yH, *zH, *wH);
error = ERROR(z) + ERROR(y);
break;
case 14:
*xD = *xH;
*yD = *yH;
blas::cabxpyAx(a, b2, *xD, *yD);
blas::cabxpyAx(a, b2, *xH, *yH);
error = ERROR(y) + ERROR(x);
break;
case 15:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{blas::caxpbypz(a2, *xD, b2, *yD, *zD);
blas::caxpbypz(a2, *xH, b2, *yH, *zH);
error = ERROR(z); }
break;
case 16:
*xD = *xH;
*yD = *yH;
*zD = *zH;
*wD = *wH;
{blas::caxpbypczpw(a2, *xD, b2, *yD, c2, *zD, *wD);
blas::caxpbypczpw(a2, *xH, b2, *yH, c2, *zH, *wH);
error = ERROR(w); }
break;
case 17:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{blas::caxpyXmaz(a, *xD, *yD, *zD);
blas::caxpyXmaz(a, *xH, *yH, *zH);
error = ERROR(y) + ERROR(x);}
break;
// double
case 18:
*xD = *xH;
*yH = *xD;
error = fabs(blas::norm2(*xD) - blas::norm2(*xH)) / blas::norm2(*xH);
break;
case 19:
*xD = *xH;
*yD = *yH;
error = fabs(blas::reDotProduct(*xD, *yD) - blas::reDotProduct(*xH, *yH)) / fabs(blas::reDotProduct(*xH, *yH));
break;
case 20:
*xD = *xH;
*yD = *yH;
{double d = blas::axpyNorm(a, *xD, *yD);
double h = blas::axpyNorm(a, *xH, *yH);
error = ERROR(y) + fabs(d-h)/fabs(h);}
break;
case 21:
*xD = *xH;
*yD = *yH;
{double d = blas::xmyNorm(*xD, *yD);
double h = blas::xmyNorm(*xH, *yH);
error = ERROR(y) + fabs(d-h)/fabs(h);}
break;
case 22:
*xD = *xH;
*yD = *yH;
{double d = blas::caxpyNorm(a, *xD, *yD);
double h = blas::caxpyNorm(a, *xH, *yH);
error = ERROR(y) + fabs(d-h)/fabs(h);}
break;
case 23:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{double d = blas::caxpyXmazNormX(a, *xD, *yD, *zD);
double h = blas::caxpyXmazNormX(a, *xH, *yH, *zH);
error = ERROR(y) + ERROR(x) + fabs(d-h)/fabs(h);}
break;
case 24:
*xD = *xH;
*yD = *yH;
{double d = blas::cabxpyAxNorm(a, b2, *xD, *yD);
double h = blas::cabxpyAxNorm(a, b2, *xH, *yH);
error = ERROR(x) + ERROR(y) + fabs(d-h)/fabs(h);}
break;
// double2
case 25:
*xD = *xH;
*yD = *yH;
error = abs(blas::cDotProduct(*xD, *yD) - blas::cDotProduct(*xH, *yH)) / abs(blas::cDotProduct(*xH, *yH));
break;
case 26:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{ quda::Complex d = blas::xpaycDotzy(*xD, a, *yD, *zD);
quda::Complex h = blas::xpaycDotzy(*xH, a, *yH, *zH);
error = fabs(blas::norm2(*yD) - blas::norm2(*yH)) / blas::norm2(*yH) + abs(d-h)/abs(h);
}
break;
case 27:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{quda::Complex d = blas::caxpyDotzy(a, *xD, *yD, *zD);
quda::Complex h = blas::caxpyDotzy(a, *xH, *yH, *zH);
error = ERROR(y) + abs(d-h)/abs(h);}
break;
// double3
case 28:
*xD = *xH;
*yD = *yH;
{ double3 d = blas::cDotProductNormA(*xD, *yD);
double3 h = blas::cDotProductNormA(*xH, *yH);
error = fabs(d.x - h.x) / fabs(h.x) + fabs(d.y - h.y) / fabs(h.y) + fabs(d.z - h.z) / fabs(h.z); }
break;
case 29:
*xD = *xH;
*yD = *yH;
{ double3 d = blas::cDotProductNormB(*xD, *yD);
double3 h = blas::cDotProductNormB(*xH, *yH);
error = fabs(d.x - h.x) / fabs(h.x) + fabs(d.y - h.y) / fabs(h.y) + fabs(d.z - h.z) / fabs(h.z); }
break;
case 30:
*xD = *xH;
*yD = *yH;
*zD = *zH;
*wD = *wH;
*vD = *vH;
{ double3 d = blas::caxpbypzYmbwcDotProductUYNormY(a2, *xD, b2, *yD, *zD, *wD, *vD);
double3 h = blas::caxpbypzYmbwcDotProductUYNormY(a2, *xH, b2, *yH, *zH, *wH, *vH);
error = ERROR(z) + ERROR(y) + fabs(d.x - h.x) / fabs(h.x) +
fabs(d.y - h.y) / fabs(h.y) + fabs(d.z - h.z) / fabs(h.z); }
break;
case 31:
*xD = *xH;
*yD = *yH;
{ double3 d = blas::HeavyQuarkResidualNorm(*xD, *yD);
double3 h = blas::HeavyQuarkResidualNorm(*xH, *yH);
error = fabs(d.x - h.x) / fabs(h.x) +
fabs(d.y - h.y) / fabs(h.y) + fabs(d.z - h.z) / fabs(h.z); }
break;
case 32:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{ double3 d = blas::xpyHeavyQuarkResidualNorm(*xD, *yD, *zD);
double3 h = blas::xpyHeavyQuarkResidualNorm(*xH, *yH, *zH);
error = ERROR(y) + fabs(d.x - h.x) / fabs(h.x) +
fabs(d.y - h.y) / fabs(h.y) + fabs(d.z - h.z) / fabs(h.z); }
break;
case 33:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{ double3 d = blas::tripleCGReduction(*xD, *yD, *zD);
double3 h = make_double3(blas::norm2(*xH), blas::norm2(*yH), blas::reDotProduct(*yH, *zH));
error = fabs(d.x - h.x) / fabs(h.x) +
fabs(d.y - h.y) / fabs(h.y) + fabs(d.z - h.z) / fabs(h.z); }
break;
case 34:
*xD = *xH;
*yD = *yH;
*zD = *zH;
*wD = *wH;
{ blas::tripleCGUpdate(a, b, *xD, *yD, *zD, *wD);
blas::tripleCGUpdate(a, b, *xH, *yH, *zH, *wH);
error = ERROR(y) + ERROR(z) + ERROR(w); }
break;
case 35:
*xD = *xH;
*yD = *yH;
{ double d = blas::axpyReDot(a, *xD, *yD);
double h = blas::axpyReDot(a, *xH, *yH);
error = ERROR(y) + fabs(d-h)/fabs(h); }
break;
case 36:
for (int i=0; i < Nsrc; i++) xmD->Component(i) = *(xmH[i]);
for (int i=0; i < Msrc; i++) ymD->Component(i) = *(ymH[i]);
blas::caxpy(A, *xmD, *ymD);
for (int i=0; i < Nsrc; i++){
for(int j=0; j < Msrc; j++){
blas::caxpy(A[Msrc*i+j], *(xmH[i]), *(ymH[j]));
}
}
error = 0;
for (int i=0; i < Msrc; i++){
error+= fabs(blas::norm2((ymD->Component(i))) - blas::norm2(*(ymH[i]))) / blas::norm2(*(ymH[i]));
}
error/= Msrc;
break;
case 37:
for (int i=0; i < Nsrc; i++) {
xmD->Component(i) = *(xmH[i]);
zmD->Component(i) = *(zmH[i]);
}
*yD = *yH;
blas::axpyBzpcx((double*)A, xmD->Components(), zmD->Components(), (double*)B, *yD, (const double*)C);
for (int i=0; i<Nsrc; i++) {
blas::axpyBzpcx(((double*)A)[i], *xmH[i], *zmH[i], ((double*)B)[i], *yH, ((double*)C)[i]);
}
error = 0;
for (int i=0; i < Nsrc; i++){
error+= fabs(blas::norm2((xmD->Component(i))) - blas::norm2(*(xmH[i]))) / blas::norm2(*(xmH[i]));
//error+= fabs(blas::norm2((zmD->Component(i))) - blas::norm2(*(zmH[i]))) / blas::norm2(*(zmH[i]));
}
error/= Nsrc;
break;
case 38:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{blas::caxpyBxpz(a, *xD, *yD, b2, *zD);
blas::caxpyBxpz(a, *xH, *yH, b2, *zH);
error = ERROR(x) + ERROR(z);}
break;
case 39:
*xD = *xH;
*yD = *yH;
*zD = *zH;
{blas::caxpyBzpx(a, *xD, *yD, b2, *zD);
blas::caxpyBzpx(a, *xH, *yH, b2, *zH);
error = ERROR(x) + ERROR(z);}
break;
case 40:
for (int i=0; i < Nsrc; i++) xmD->Component(i) = *(xmH[i]);
blas::cDotProduct(A2, xmD->Components(), xmD->Components());
error = 0.0;
for (int i = 0; i < Nsrc; i++) {
for (int j = 0; j < Nsrc; j++) {
B2[i*Nsrc+j] = blas::cDotProduct(xmD->Component(i), xmD->Component(j));
error += std::abs(A2[i*Nsrc+j] - B2[i*Nsrc+j])/std::abs(B2[i*Nsrc+j]);
}
}
error /= Nsrc*Nsrc;
break;
case 41:
for (int i=0; i < Nsrc; i++) xmD->Component(i) = *(xmH[i]);
for (int i=0; i < Msrc; i++) ymD->Component(i) = *(ymH[i]);
blas::cDotProduct(A, xmD->Components(), ymD->Components());
error = 0.0;
for (int i = 0; i < Nsrc; i++) {
for (int j = 0; j < Msrc; j++) {
B[i*Msrc+j] = blas::cDotProduct(xmD->Component(i), ymD->Component(j));
error += std::abs(A[i*Msrc+j] - B[i*Msrc+j])/std::abs(B[i*Msrc+j]);
}
}
error /= Nsrc*Msrc;
break;
default:
errorQuda("Undefined blas kernel %d\n", kernel);
}
delete[] A;
delete[] B;
delete[] C;
delete[] A2;
delete[] B2;
return error;
}
const char *prec_str[] = {"half", "single", "double"};
// For googletest names must be non-empty, unique, and may only contain ASCII
// alphanumeric characters or underscore
const char *names[] = {
"copyHS",
"copyLS",
"axpby",
"xpy",
"axpy",
"xpay",
"mxpy",
"ax",
"caxpy",
"caxpby",
"cxpaypbz",
"axpyBzpcx",
"axpyZpbx",
"caxpbypzYmbw",
"cabxpyAx",
"caxpbypz",
"caxpbypczpw",
"caxpyXmaz",
"norm",
"reDotProduct",
"axpyNorm",
"xmyNorm",
"caxpyNorm",
"caxpyXmazNormX",
"cabxpyAxNorm",
"cDotProduct",
"xpaycDotzy",
"caxpyDotzy",
"cDotProductNormA",
"cDotProductNormB",
"caxpbypzYmbwcDotProductUYNormY",
"HeavyQuarkResidualNorm",
"xpyHeavyQuarkResidualNorm",
"tripleCGReduction",
"tripleCGUpdate",
"axpyReDot",
"caxpy_block",
"axpyBzpcx_block",
"caxpyBxpz",
"caxpyBzpx",
"cDotProductNorm_block",
"cDotProduct_block",
"caxpy_composite"
};
int main(int argc, char** argv)
{
::testing::InitGoogleTest(&argc, argv);
int result = 0;
prec = QUDA_INVALID_PRECISION;
test_type = -1;
for (int i = 1; i < argc; i++){
if(process_command_line_option(argc, argv, &i) == 0){
continue;
}
printfQuda("ERROR: Invalid option:%s\n", argv[i]);
usage(argv);
}
// override spin setting if mg solver is set to test coarse grids
if (inv_type == QUDA_MG_INVERTER) {
Nspin = 2;
Ncolor = nvec;
} else {
// set spin according to the type of dslash
Nspin = (dslash_type == QUDA_ASQTAD_DSLASH ||
dslash_type == QUDA_STAGGERED_DSLASH) ? 1 : 4;
Ncolor = 3;
}
setSpinorSiteSize(24);
initComms(argc, argv, gridsize_from_cmdline);
display_test_info();
initQuda(device);
setVerbosity(QUDA_SILENT);
// clear the error state
cudaGetLastError();
// lastly check for correctness
if (verify_results) {
result = RUN_ALL_TESTS();
}
endQuda();
finalizeComms();
return result;
}
// The following tests each kernel at each precision using the google testing framework
using ::testing::TestWithParam;
using ::testing::Bool;
using ::testing::Values;
using ::testing::Range;
using ::testing::Combine;
class BlasTest : public ::testing::TestWithParam<::testing::tuple<int, int>> {
protected:
::testing::tuple<int, int> param;
public:
virtual ~BlasTest() { }
virtual void SetUp() {
param = GetParam();
initFields(::testing::get<0>(GetParam()));
}
virtual void TearDown() { freeFields(); }
};
TEST_P(BlasTest, verify) {
int prec = ::testing::get<0>(GetParam());
int kernel = ::testing::get<1>(GetParam());
// certain tests will fail to run for coarse grids so mark these as
// failed without running
double deviation = skip_kernel(prec,kernel) ? 1.0 : test(kernel);
// printfQuda("%-35s error = %e\n", names[kernel], deviation);
double tol = (prec == 2 ? 1e-10 : (prec == 1 ? 1e-5 : 1e-3));
tol = (kernel < 2) ? 1e-4 : tol; // use different tolerance for copy
EXPECT_LE(deviation, tol) << "CPU and CUDA implementations do not agree";
}
TEST_P(BlasTest, benchmark) {
int prec = ::testing::get<0>(GetParam());
int kernel = ::testing::get<1>(GetParam());
// do the initial tune
benchmark(kernel, 1);
// now rerun with more iterations to get accurate speed measurements
quda::blas::flops = 0;
quda::blas::bytes = 0;
double secs = benchmark(kernel, niter);
double gflops = (quda::blas::flops*1e-9)/(secs);
double gbytes = quda::blas::bytes/(secs*1e9);
RecordProperty("Gflops", std::to_string(gflops));
RecordProperty("GBs", std::to_string(gbytes));
printfQuda("%-31s: Gflop/s = %6.1f, GB/s = %6.1f\n", names[kernel], gflops, gbytes);
}
std::string getblasname(testing::TestParamInfo<::testing::tuple<int, int>> param){
int prec = ::testing::get<0>(param.param);
int kernel = ::testing::get<1>(param.param);
std::string str(names[kernel]);
str += std::string("_");
str += std::string(prec_str[prec]);
return str;//names[kernel] + "_" + prec_str[prec];
}
// half precision
INSTANTIATE_TEST_CASE_P(QUDA, BlasTest, Combine( Range(0,3), Range(0, Nkernels) ), getblasname);
|
aac5244e35fb6610b3f8d4971c79fa64b33af91e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// risky
#define dfloat double
#include "simpleRayTracer.h"
#define p_eps 1e-6
#define p_Nsamples 1
// ratio of importance in sampling primary ray versus random rays
#define p_primaryWeight 2.f
#define p_intersectDelta 0.1f
#define p_shadowDelta 0.15f
#define p_projectDelta 1e-2
#define p_maxLevel 5
#define p_maxNrays (2<<p_maxLevel)
#define p_apertureRadius 20.f
#define NRANDOM 10000
hipEvent_t startTimer, endTimer;
void initTimer(){
hipEventCreate(&startTimer);
hipEventCreate(&endTimer);
}
void ticTimer(){
hipEventRecord(startTimer);
}
void tocTimer(const char *message){
hipEventRecord(endTimer);
hipEventSynchronize(endTimer);
float elapsed;
hipEventElapsedTime(&elapsed, startTimer, endTimer);
printf("Kernel %s took %g seconds\n", message, elapsed/1000.);
}
__device__ bbox_t createBoundingBoxSphere(sphere_t &sphere);
__host__ __device__ dfloat clamp(dfloat x, dfloat xmin, dfloat xmax){
x = min(x, xmax);
x = max(x, xmin);
return x;
}
__host__ __device__ int iclamp(dfloat x, dfloat xmin, dfloat xmax){
x = min(x, xmax);
x = max(x, xmin);
return floor(x);
}
__forceinline__ __host__ __device__ vector_t sensorLocation(const int NI,
const int NJ,
const int I,
const int J,
const sensor_t &sensor){
vector_t sensorX = sensor.eyeX;
dfloat r = I/(dfloat)(NI-1);
dfloat s = J/(dfloat)(NJ-1);
r = (r-0.5f)*sensor.Ilength;
s = (s-0.5f)*sensor.Jlength;
vector_t sensorNormal =
vectorCrossProduct(sensor.Idir, sensor.Jdir);
sensorX.x += sensorNormal.x*sensor.offset;
sensorX.y += sensorNormal.y*sensor.offset;
sensorX.z += sensorNormal.z*sensor.offset;
sensorX.x += r*sensor.Idir.x;
sensorX.y += r*sensor.Idir.y;
sensorX.z += r*sensor.Idir.z;
sensorX.x += s*sensor.Jdir.x;
sensorX.y += s*sensor.Jdir.y;
sensorX.z += s*sensor.Jdir.z;
return sensorX;
}
__host__ __device__ void sensorMultipleLocations(const int NI,
const int NJ,
const int I,
const int J,
const sensor_t &sensor,
vector_t *sensorsX){
for(int samp=0;samp<p_Nsamples;++samp){
sensorsX[samp] = sensor.eyeX;
dfloat r = I/(dfloat)(NI-1);
dfloat s = J/(dfloat)(NJ-1);
dfloat theta = 2.f*M_PI*samp/(dfloat)p_Nsamples;
// circle of samples around sensor pixel
dfloat delta = .5; // scatter pixel radius
r = (r-0.5f+delta*cosf(theta)/NI)*sensor.Ilength;
s = (s-0.5f+delta*sinf(theta)/NJ)*sensor.Jlength;
vector_t sensorNormal = vectorCrossProduct(sensor.Idir, sensor.Jdir);
sensorsX[samp].x += sensorNormal.x*sensor.offset;
sensorsX[samp].y += sensorNormal.y*sensor.offset;
sensorsX[samp].z += sensorNormal.z*sensor.offset;
sensorsX[samp].x += r*sensor.Idir.x;
sensorsX[samp].y += r*sensor.Idir.y;
sensorsX[samp].z += r*sensor.Idir.z;
sensorsX[samp].x += s*sensor.Jdir.x;
sensorsX[samp].y += s*sensor.Jdir.y;
sensorsX[samp].z += s*sensor.Jdir.z;
}
}
__host__ __device__ vector_t vectorCreate(dfloat x, dfloat y, dfloat z){
vector_t v;
v.x = x;
v.y = y;
v.z = z;
return v;
}
/* Subtract two vectors and return the resulting vector_t */
__host__ __device__ vector_t vectorSub(const vector_t v1, const vector_t v2){
return vectorCreate(v1.x - v2.x, v1.y - v2.y, v1.z - v2.z);
}
/* Multiply two vectors and return the resulting scalar (dot product) */
__host__ __device__ dfloat vectorDot(const vector_t v1, const vector_t v2){
return v1.x * v2.x + v1.y * v2.y + v1.z * v2.z;
}
/* Entrywise multiply two vectors and return the resulting vector */
__host__ __device__ vector_t vectorDotMultiply(const vector_t v1, const vector_t v2){
return vectorCreate(v1.x * v2.x, v1.y * v2.y, v1.z * v2.z);
}
/* Entrywise divison of two vectors and return the resulting vector */
__host__ __device__ vector_t vectorDotDivide(const vector_t v1, const vector_t v2){
return vectorCreate(v1.x / v2.x, v1.y / v2.y, v1.z / v2.z);
}
__host__ __device__ vector_t vectorCrossProduct(const vector_t v1, const vector_t v2){
return vectorCreate(v1.y*v2.z-v1.z*v2.y,
v1.z*v2.x-v1.x*v2.z,
v1.x*v2.y-v1.y*v2.x);
}
/* Calculate Vector_T x Scalar and return resulting Vector*/
__host__ __device__ vector_t vectorScale(const dfloat c, const vector_t v){
return vectorCreate(v.x * c, v.y * c, v.z * c);
}
/* Add two vectors and return the resulting vector_t */
__host__ __device__ vector_t vectorAdd(const vector_t v1, const vector_t v2){
return vectorCreate(v1.x + v2.x, v1.y + v2.y, v1.z + v2.z);
}
__host__ __device__ dfloat vectorTripleProduct(const vector_t a, const vector_t b, const vector_t c){
const vector_t aXb = vectorCrossProduct(a, b);
return vectorDot(aXb, c);
}
// assume b is unit vector
__host__ __device__ vector_t vectorOrthogonalize(const vector_t a, const vector_t b){
dfloat adotb = vectorDot(a, b);
return vectorSub(a, vectorScale(adotb, b));
}
__host__ __device__ dfloat vectorNorm(const vector_t a){
return sqrt(vectorDot(a,a));
}
// return orthonormalized vector
__host__ __device__ vector_t vectorNormalize(const vector_t a){
dfloat d = vectorNorm(a);
if(d)
return vectorScale(1./d, a);
else
return vectorCreate(0,0,0);
}
// https://www.scratchapixel.com/code.php?id=10&origin=/lessons/3d-basic-rendering/minimal-ray-tracer-rendering-simple-shapes
// roots of a*t^2 + 2*b*t + c = 0
__forceinline__ __host__ __device__ bool solveQuadratic(const dfloat &a, const dfloat &b, const dfloat &c, dfloat &x0, dfloat &x1){
dfloat discr = b * b - a * c;
if (discr < 0) return false;
else if (discr == 0) {
x0 = x1 = - b / a;
}
else {
dfloat sqrtdiscr = sqrt(discr);
dfloat q = (b > 0) ?
-(b + sqrtdiscr) :
-(b - sqrtdiscr);
x0 = q / a;
x1 = c / q;
}
dfloat xmin = min(x0, x1);
dfloat xmax = max(x0, x1);
x0 = xmin;
x1 = xmax;
return true;
}
/* Check if the ray and triangle intersect */
__forceinline__ __host__ __device__ bool intersectRayTriangle(const ray_t &r, const triangle_t &tri, dfloat *t){
// TW: unused fudge factor
dfloat delta = 0;
bool retval = false;
vector_t B1 = vectorSub(tri.vertices[2], tri.vertices[0]);
vector_t B2 = vectorSub(tri.vertices[2], tri.vertices[1]);
vector_t B3 = r.dir;
vector_t R = vectorSub(tri.vertices[2], r.start);
dfloat J = vectorTripleProduct(B2, B3, B1);
dfloat L1 = vectorTripleProduct(B2, B3, R);
if(L1<delta*J) return false;
dfloat L2 = vectorTripleProduct(B3, B1, R);
if(L2<delta*J || L1+L2>J*(1+delta)) return false;
dfloat t0 = vectorTripleProduct(B1, B2, R)/J;
/* Verify t1 larger than 0 and less than the original t */
// TW: FUDGE FACTOR
if((t0 > p_intersectDelta) && (t0 < *t)){
*t = t0;
retval = true;
}
return retval;
}
/* Check if the ray and triangle intersect */
__forceinline__ __host__ __device__ bool intersectRayRectangle(const ray_t &r, const rectangle_t &rect, dfloat *t){
vector_t C = rect.center;
vector_t A1 = rect.axis[0];
vector_t A2 = rect.axis[1];
dfloat L1 = rect.length[0];
dfloat L2 = rect.length[1];
// n = A1 x A2
// (s + t*d - C).n = 0
// t = (C - s).n/(d.n)
vector_t n = vectorCrossProduct(A1, A2);
dfloat t0 = vectorDot(vectorSub(C,r.start), n)/vectorDot(r.dir, n);
// intersection behind start of ray
if(t0<0 || t0>*t) return false;
// X = s + t*d - C
vector_t X = vectorAdd(vectorSub(r.start,C), vectorScale(t0, r.dir));
dfloat h1 = vectorDot(A1, X)+0.5*L1; // shift
if(h1<0 || h1>L1) return false;
dfloat h2 = vectorDot(A2, X)+0.5*L2; // shift
if(h2<0 || h2>L1) return false;
// success
*t = t0;
return true;
}
/* Check if the ray and sphere intersect */
__forceinline__ __host__ __device__ bool intersectRaySphere(const ray_t &r, const sphere_t &s, dfloat *t){
bool retval = false;
/* A = d.d, the vector_t dot product of the direction */
dfloat A = vectorDot(r.dir, r.dir);
/* We need a vector_t representing the distance between the start of
* the ray and the position of the circle.
* This is the term (p0 - c)
*/
vector_t dist = vectorSub(r.start, s.pos);
/* 2d.(p0 - c) */
dfloat B = 2.f * vectorDot(r.dir, dist);
/* (p0 - c).(p0 - c) - r^2 */
dfloat C = vectorDot(dist, dist) - (s.radius * s.radius);
/* find roots of quadratic */
dfloat t0, t1;
if(solveQuadratic(A,0.5*B,C,t0,t1)){
if((t0 > p_intersectDelta) && (t0 < *t)){
*t = t0;
retval = true;
}else
retval = false;
}else{
retval = false;
}
return retval;
}
/* Check if the ray and sphere intersect */
__forceinline__ __host__ __device__ bool intersectRayEllipsoid(const ray_t &r, const ellipsoid_t &sh, dfloat *t){
bool retval = false;
/*
R = vector radii
((p.x-c.x)/R.x)^2 + ((p.y-c.y)/R.y)^2 + ((p.z-c.z)/R.z)^2 = 1;
p.x = s.x + t*d.x
s~ = s-c
((s~.x+t*d.x)/R.x)^2 + ((s~.y+t*d.y)/R.y)^2 + ((s~.z+t*d.z)/R.z)^2 = 1;
t^2 * ( (d.x/R.x)^2 + (d.y/R.y)^2 + (d.z/R.z)^2
+t *2* ( (d.x*s~.x/R.x^2) + (d.y*s~.y/R.y^2) + (d.z*s~.z/R.z^2) )
+ ( (s~.x/R.x)^2 + (s~.y/R.y)^2 + (s~.z/R.z)^2 - 1 ) = 0
*/
vector_t s = r.start;
vector_t d = r.dir;
vector_t c = sh.pos;
vector_t invR = sh.invRadii;
vector_t st = vectorSub(s, c);
/* A = d.d, the vector_t dot product of the direction */
vector_t dIR = vectorDotMultiply(d, invR);
vector_t stIR = vectorDotMultiply(st, invR);
dfloat A = vectorDot(dIR, dIR);
dfloat B = vectorDot(dIR, stIR);
dfloat C = vectorDot(stIR, stIR) - 1.f;
/* find roots of quadratic */
dfloat t0, t1;
if(solveQuadratic(A,B,C,t0,t1)){
if((t0 > p_intersectDelta) && (t0 < *t)){
*t = t0;
retval = true;
}else
retval = false;
}else{
retval = false;
}
return retval;
}
__forceinline__ __host__ __device__ bool intersectRayCone(const ray_t &r, const cone_t &cone, dfloat *t){
bool retval = false;
/*
cone-ray intersection tests:
| pos + t*dir - (vertex + axis*h) | = R*h/H
*/
vector_t p = r.start;
vector_t d = r.dir;
vector_t v = cone.vertex;
vector_t a = cone.axis;
dfloat R = cone.radius;
dfloat H = cone.height;
dfloat alpha = (R/H)*(R/H);
// p + t*d - (v + h*a) orth a
// h = (p-v + t*d).a
// if h>=0
// | p + t*d - (v + h*a)| = alpha*(p-v + t*d).a
// |(p-v) - ((p-v).a)*a + t*d - t*(a.d)*a | = alpha*(p-v+t*d).a
// | pminusvPerp + t*dPerp| = alpha*( (p-v).a + t*d.a)
//
dfloat adotd = vectorDot(a,d);
vector_t dPerp = vectorSub(d, vectorScale(adotd, a));
vector_t pminusv = vectorSub(p, v);
dfloat tmp = vectorDot(a,pminusv);
vector_t pminusvPerp = vectorSub(pminusv, vectorScale(tmp, a));
dfloat A = vectorDot(dPerp, dPerp) - alpha*adotd*adotd;
dfloat B = vectorDot(dPerp, pminusvPerp) - alpha*adotd*tmp;
dfloat C = vectorDot(pminusvPerp, pminusvPerp) - alpha*tmp*tmp;
/* find roots of quadratic */
dfloat t0, t1;
if(solveQuadratic(A,B,C,t0,t1)){
// cone is behind ray
if(t0<0 && t1<0)
return false;
// check location along axis
const dfloat h0 = tmp + t0*adotd;
const dfloat h1 = tmp + t1*adotd;
const int valid0 = ((h0>0) && (h0<H));
const int valid1 = ((h1>0) && (h1<H));
if(!valid0 && !valid1) // out of range
return false;
else if(valid0 && valid1){ // both viable
if(t0 > t1){ // nearest
t0 = t1;
}
}
else if(valid1){
t0 = t1;
}
if((t0 > p_intersectDelta) && (t0 < *t)){
*t = t0;
retval = true;
}else
retval = false;
}else{
retval = false;
}
return retval;
}
__forceinline__ __host__ __device__ bool intersectRayDisk(const ray_t &r, const disk_t &disk, dfloat *t){
vector_t s = r.start;
vector_t d = r.dir;
vector_t n = disk.normal;
vector_t c = disk.center;
// intersection with plane
dfloat ndotd = vectorDot(n, d);
// (s + t*d -c ).n = 0
dfloat t0 = vectorDot(vectorSub(c,s), n)/ndotd;
// intersection behind start of ray
if(t0<0 || t0>*t) return false;
vector_t p = vectorAdd(s, vectorScale(t0, d));
vector_t v = vectorSub(p, c);
dfloat R2 = vectorDot(v,v);
if(R2>=(disk.radius*disk.radius)-p_intersectDelta)
return false;
if(t0>*t) return false;
*t = t0;
return true;
}
__forceinline__ __host__ __device__ bool intersectRayCylinder(const ray_t &r, const cylinder_t &cylinder, dfloat *t){
bool retval = false;
/*
cylinder-ray intersection tests:
| p + t*d - (c+h*a) | = R
h = (p+t*d-c).a
0<= h <=H
*/
vector_t p = r.start;
vector_t d = r.dir;
vector_t c = cylinder.center;
vector_t a = cylinder.axis;
dfloat R = cylinder.radius;
dfloat H = cylinder.height;
dfloat adotd = vectorDot(a,d);
vector_t dPerp = vectorSub(d, vectorScale(adotd, a));
vector_t pminusc = vectorSub(p, c);
dfloat tmp = vectorDot(a,pminusc);
vector_t pminuscPerp = vectorSub(pminusc, vectorScale(tmp, a));
dfloat A = vectorDot(dPerp, dPerp);
dfloat B = vectorDot(dPerp, pminuscPerp);
dfloat C = vectorDot(pminuscPerp, pminuscPerp) - R*R;
#if 1
// prone to acne (FP32)
dfloat t0, t1;
if(solveQuadratic(A,B,C,t0,t1)){
// cylinder is behind ray
if(t0<=0 && t1<=0)
return false;
dfloat h0 = tmp + t0*adotd;
dfloat h1 = tmp + t1*adotd;
int valid0 = ((h0>0) && (h0<H));
int valid1 = ((h1>0) && (h1<H));
if(!valid0 && !valid1){
return false;
}
else if(valid0 && valid1){
if(t0 > t1){
t0= t1;
}
}
else if(valid1){
t0 = t1;
}
// TW: FUDGE FACTOR (was 1e-3)
if((t0 > p_intersectDelta) && (t0< ((*t)))){// weakened this test
*t = t0;
retval = true;
}else
retval = false;
}else{
retval = false;
}
#else
// prone to acne (FP32)
dfloat discr = B*B-A*C;
// TW: UNUSED FUDGE FACTOR
dfloat delta = p_intersectDelta; // need large tolerance
if(discr<=delta)
retval = false;
else{
dfloat sqrtdiscr = sqrtf(discr);
dfloat A2 = A*A;
dfloat t0A2 = (-B + sqrtdiscr)*A;
dfloat t1A2 = (-B - sqrtdiscr)*A;
if(t0A2<=delta*A2 && t1A2<=delta*A2) return false;
dfloat h0A2 = tmp*A2 + t0A2*adotd;
dfloat h1A2 = tmp*A2 + t1A2*adotd;
int valid0 = ((h0A2>delta*A2) && (h0A2<H*A2-delta*A2));
int valid1 = ((h1A2>delta*A2) && (h1A2<H*A2-delta*A2));
if(!valid0 && !valid1)
return false;
else if(valid0 && valid1){
if(t0A2 > t1A2){
t0A2 = t1A2;
}
}
else if(valid1){
t0A2 = t1A2;
}
// TW: FUDGE FACTOR (was 1e-3)
if((t0A2 > p_intersectDelta*A2) && (t0A2 < ((*t)*p_intersectDelta))){// weakened this test
*t = t0A2/A2;
retval = true;
}else
retval = false;
}
#endif
return retval;
}
__host__ __device__ bool intersectPointGridCell(const grid_t &grid,
const vector_t p,
const int cellI,
const int cellJ,
const int cellK){
if(p.x<=grid.xmin+(cellI )*grid.dx) return false;
if(p.x> grid.xmin+(cellI+1)*grid.dx) return false;
if(p.y<=grid.ymin+(cellJ )*grid.dy) return false;
if(p.y> grid.ymin+(cellJ+1)*grid.dy) return false;
if(p.z<=grid.zmin+(cellK )*grid.dz) return false;
if(p.z> grid.zmin+(cellK+1)*grid.dz) return false;
return true;
}
__host__ __device__ bool intersectRayBox(ray_t &r, const bbox_t &bbox, unsigned int &face){
vector_t d = r.dir;
vector_t s = r.start;
vector_t invd = r.invDir;
dfloat mint = 20000;
face = 0;
if(d.x>0){ // face 2
dfloat newt = (bbox.xmax-s.x)*invd.x; // d.x > 0
if(newt>0){
mint = min(mint, newt);
}
}
if(d.x<0){ // face 4
// s.x + newt*d.x = bbox.xmin
dfloat newt = (bbox.xmin-s.x)*invd.x;
if(newt>0){
mint = min(mint, newt);
}
}
if(d.y>0){ // face 3
dfloat newt = (bbox.ymax-s.y)*invd.y;
if(newt>0){
mint = min(mint, newt);
}
}
if(d.y<0){ // face 1
dfloat newt = (bbox.ymin-s.y)*invd.y;
if(newt>0){
mint = min(mint, newt);
}
}
if(d.z>0){ // face 5
dfloat newt = (bbox.zmax-s.z)*invd.z;
if(newt>0){
mint = min(mint, newt);
}
}
if(d.z<0){ // face 0
dfloat newt = (bbox.zmin-s.z)*invd.z;
if(newt>0){
mint = min(mint, newt);
}
}
face = 0;
if(d.x>0){ // face 2
dfloat newt = (bbox.xmax-s.x)*invd.x;
if(newt>0 && newt<=mint)
face |= 4;
}
if(d.x<0){ // face 4
dfloat newt = (bbox.xmin-s.x)*invd.x;
if(newt>0 && newt<=mint)
face |= 16;
}
if(d.y>0){ // face 3
dfloat newt = (bbox.ymax-s.y)*invd.y;
if(newt>0 && newt<=mint)
face |= 8;
}
if(d.y<0){ // face 1
dfloat newt = (bbox.ymin-s.y)*invd.y;
if(newt>0 && newt<=mint)
face |= 2;
}
if(d.z>0){ // face 5
dfloat newt = (bbox.zmax-s.z)*invd.z;
if(newt>0 && newt<=mint)
face |= 32;
}
if(d.z<0){ // face 0
dfloat newt = (bbox.zmin-s.z)*invd.z;
if(newt>0 && newt<=mint)
face |= 1;
}
if(face>0){
r.start = vectorAdd(s, vectorScale(mint, d));
return true;
}
return false;
}
__forceinline__ __host__ __device__ bool intersectRayShape(const ray_t &r, const shape_t &s, dfloat *t){
switch(s.type){
case SPHERE: return intersectRaySphere (r, s.sphere, t);
case CONE: return intersectRayCone (r, s.cone, t);
case DISK: return intersectRayDisk (r, s.disk, t);
case CYLINDER: return intersectRayCylinder (r, s.cylinder, t);
case IMAGE:
case RECTANGLE:return intersectRayRectangle(r, s.rectangle, t);
case TRIANGLE: return intersectRayTriangle (r, s.triangle, t);
case ELLIPSOID:return intersectRayEllipsoid(r, s.ellipsoid, t);
}
return false;
}
__forceinline__ __host__ __device__ vector_t computeNormal(const vector_t &v, const shape_t &s){
vector_t n = vectorCreate(0,0,0);
/* Find the normal for this new vector_t at the point of intersection */
switch(s.type){
case SPHERE:
{
n = vectorSub(v, s.sphere.pos);
break;
}
case ELLIPSOID:
{
vector_t vMs = vectorSub(v, s.ellipsoid.pos);
// f = (v-c).^2./(radii.^2) - 1 => n = grad f
n = vectorDotMultiply(vMs, vectorDotMultiply(s.ellipsoid.invRadii, s.ellipsoid.invRadii));
break;
}
case TRIANGLE:
{
vector_t a = vectorSub(s.triangle.vertices[2], s.triangle.vertices[0]);
vector_t b = vectorSub(s.triangle.vertices[1], s.triangle.vertices[0]);
n = vectorCrossProduct(a, b);
break;
}
case CONE:
{
// n = (v-vertex) x ( a x (v-vertex) )
vector_t vMinusVertex = vectorSub(v, s.cone.vertex);
// axis location
dfloat H = s.cone.height;
dfloat z = vectorDot(vMinusVertex, s.cone.axis);
// problematic if axis is parallel to v-Vertex
if(z>p_projectDelta && z<H-p_projectDelta)
n = vectorCrossProduct( vMinusVertex, vectorCrossProduct(s.cone.axis, vMinusVertex));
break;
}
case DISK:
{
vector_t vMc = vectorSub(v, s.disk.center);
dfloat R = s.disk.radius;
vector_t tmp = vectorOrthogonalize(vMc, s.disk.normal);
dfloat z = vectorNorm(tmp);
if(z<R-p_projectDelta)
n = s.disk.normal;
break;
}
case CYLINDER:
{
// z = (v - c).a => clamp
vector_t vMc = vectorSub(v, s.cylinder.center);
dfloat H = s.cylinder.height;
dfloat z = vectorDot(vMc, s.cylinder.axis);
if(z>p_projectDelta && z<H-p_projectDelta)
n = vectorOrthogonalize(vMc, s.cylinder.axis);
break;
}
case IMAGE:
case RECTANGLE:
{
#if 0
vector_t C = s.rectangle.center;
vector_t A1 = s.rectangle.axis[0];
vector_t A2 = s.rectangle.axis[1];
dfloat L1 = s.rectangle.length[0];
dfloat L2 = s.rectangle.length[1];
// X = v - C
vector_t X = vectorSub(v, C);
dfloat h1 = vectorDot(A1, X)+0.5*L1; // shift
dfloat h2 = vectorDot(A2, X)+0.5*L2; // shift
#endif
n = vectorCrossProduct(s.rectangle.axis[0], s.rectangle.axis[1]);
break;
}
}
// normalize when normal is not degenerate
dfloat tmp = vectorNorm(n);
if(tmp)
n = vectorScale(1./tmp, n);
return n;
}
__forceinline__ __host__ __device__ material_t computeMaterial(const int Nmaterials, const material_t *materials,
const vector_t &v, const shape_t &s){
material_t m;
switch(s.type){
case TRIANGLE:
{
// v = L1*v1 + L2*v2 + (1-L1-L2)*v3 + N1*n
// [ v3-v1 v2-v1 -N1][L1;L2;n] = [v3-v]
vector_t B1 = vectorSub(s.triangle.vertices[2], s.triangle.vertices[0]);
vector_t B2 = vectorSub(s.triangle.vertices[1], s.triangle.vertices[0]);
vector_t B3 = vectorCrossProduct(B1,B2);
vector_t R = vectorSub(s.triangle.vertices[2], v);
dfloat J = vectorTripleProduct(B2, B3, B1);
dfloat L1 = vectorTripleProduct(B2, B3, R)/J;
dfloat L2 = vectorTripleProduct(B3, B1, R)/J;
dfloat N1 = vectorTripleProduct(B1, B2, R)/J;
dfloat Iq = L1*s.triangle.q[0] + L2*s.triangle.q[1] + (1-L1-L2)*s.triangle.q[2];
// dfloat maxIq = 2.5, minIq =.05;
dfloat maxIq = 3, minIq = -3;
Iq = (Iq-minIq)/(maxIq-minIq);
#if 0
if(Iq<0 || Iq>1){
m.diffuse.red = 1;
m.diffuse.green = 1;
m.diffuse.blue = 1;
m.reflection = 0;
m.eta = 1.;
m.refraction = 1;
m.info.refractor = 1;
m.info.reflector = 0;
m.info.emitter = 0;
}
else{
}
#endif
Iq = clamp(Iq, 0, 1);
#if 0
dfloat redIq = (Iq<1./3.) ? 3.*Iq:0; // reverse ?
dfloat greenIq = (1./3<=Iq && Iq<2./3.) ? 3.*(Iq-1./3):0;
dfloat blueIq = (2./3<=Iq) ? 3.*(Iq-2./3):0;
#else
dfloat redIq = 0, greenIq = 0, blueIq = 0;
if(Iq<1/3.) redIq = 3*Iq;
else if(Iq>=2./3) blueIq = 3.*(Iq-2./3);
else{
redIq = 1;
greenIq = 1;
blueIq = 1;
}
#endif
m.diffuse.red = redIq;
m.diffuse.green = greenIq;
m.diffuse.blue = blueIq;
m.reflection = 0.05;
m.eta = 1.;
m.refraction = 0.01;
m.info.refractor = 0;
m.info.reflector = 1;
m.info.emitter = 0;
break;
}
case SPHERE:
case ELLIPSOID:
case DISK:
case CYLINDER:
case CONE:
{
m = materials[s.material];
break;
}
#if 0
case CYLINDER:
{
vector_t c = s.cylinder.center;
vector_t a = s.cylinder.axis;
vector_t vMc = vectorSub(v, c);
dfloat H = s.cylinder.height;
dfloat h = vectorDot(vMc, a);
int i = (int) (8.f*(h/H)); // checkerboard material selector
int idM = 10*((i%2)); // 1 if either i is odd or j is even
m = materials[idM];
break;
}
case CONE:
{
vector_t c = s.cone.vertex;
vector_t a = s.cone.axis;
vector_t vMc = vectorSub(v, c);
dfloat H = s.cone.height;
dfloat h = vectorDot(vMc, a);
int i = (int) (8.f*(h/H)); // checkerboard material selector
int idM = 20*((i%2)); // 1 if either i is odd or j is even
m = materials[idM];
break;
}
#endif
case RECTANGLE:
{
if(s.material>=0)
m = materials[s.material];
else{
vector_t C = s.rectangle.center;
vector_t A1 = s.rectangle.axis[0];
vector_t A2 = s.rectangle.axis[1];
dfloat L1 = s.rectangle.length[0];
dfloat L2 = s.rectangle.length[1];
// X = v - C
vector_t X = vectorSub(v, C);
dfloat h1 = vectorDot(A1, X)+0.5*L1; // shift
dfloat h2 = vectorDot(A2, X)+0.5*L2; // shift
int i = (int) (8.f*(h1/L1)); // checkerboard material selector
int j = (int) (8.f*(h2/L2));
int idM = ((i%2) ^ ((j+1)%2)); // 1 if either i is odd or j is even
// printf("i=%d, j=%d, h1=%g, h2=%g, L1=%g, L2=%g, idM = %d\n", i, j, h1, h2, L1, L2, idM);
m = materials[idM];
}
break;
}
case IMAGE:
{
vector_t C = s.rectangle.center;
vector_t A1 = s.rectangle.axis[0];
vector_t A2 = s.rectangle.axis[1];
dfloat L1 = s.rectangle.length[0];
dfloat L2 = s.rectangle.length[1];
const unsigned char *img = s.rectangle.image;
int NI = s.rectangle.NI;
int NJ = s.rectangle.NJ;
// X = v - C
vector_t X = vectorSub(v, C);
dfloat h1 = vectorDot(A1, X)+0.5*L1; // shift
dfloat h2 = vectorDot(A2, X)+0.5*L2; // shift
// nearest neighbor interpolation
dfloat i = iclamp(NI*h1/L1, 0, NI-1);
dfloat j = iclamp(NJ*h2/L2, 0, NJ-1);
int idM = (NI-1-i) + j*NI;
m.diffuse.red = img[idM*3 + 0]/256.f;
m.diffuse.green = img[idM*3 + 1]/256.f;
m.diffuse.blue = img[idM*3 + 2]/256.f;
m.reflection = 1;
m.refraction = 1;
m.info.refractor = 0;
m.info.reflector = 0;
m.info.emitter = 1;
break;
}
}
return m;
}
// grid search
__host__ __device__ bool gridRayIntersectionSearch(ray_t r,
const int Nshapes, const shape_t *shapes, const grid_t &grid,
dfloat *t, int ¤tShape){
// is start of ray in a grid cell ?
vector_t s = r.start; // will modify ray through s
vector_t d = r.dir;
vector_t invd;
if(d.x) invd.x = 1.f/d.x;
if(d.y) invd.y = 1.f/d.y;
if(d.z) invd.z = 1.f/d.z;
// if ray is outside grid then project onto grid
if(s.x<grid.xmin){
if(d.x<=0) return false; // pointing away or grazing from grid
dfloat t0 = -(s.x-grid.xmin)*invd.x;
s.x = grid.xmin;
s.y += t0*d.y;
s.z += t0*d.z;
}
if(s.x>grid.xmax){
if(d.x>=0) return false;
dfloat t0 = -(s.x-grid.xmax)*invd.x;
s.x = grid.xmax;
s.y += t0*d.y;
s.z += t0*d.z;
}
if(s.y<grid.ymin){
if(d.y<=0) return false; // pointing away or grazing from grid
dfloat t0 = -(s.y-grid.ymin)*invd.y;
s.y = grid.ymin;
s.x += t0*d.x;
s.z += t0*d.z;
}
if(s.y>grid.ymax){
if(d.y>=0) return false;
dfloat t0 = -(s.y-grid.ymax)*invd.y;
s.y = grid.ymax;
s.x += t0*d.x;
s.z += t0*d.z;
}
if(s.z<grid.zmin){
if(d.z<=0) return false; // pointing away or grazing from grid
dfloat t0 = -(s.z-grid.zmin)*invd.z;
s.z = grid.zmin;
s.x += t0*d.x;
s.y += t0*d.y;
}
if(s.z>grid.zmax){
if(d.z>=0) return false;
dfloat t0 = -(s.z-grid.zmax)*invd.z;
s.z = grid.zmax;
s.x += t0*d.x;
s.y += t0*d.y;
}
// now the ray start must be on the surface of the grid or in a cell
int cellI = iclamp((s.x-grid.xmin)*grid.invdx,0,grid.NI-1); // assumes grid.NI
int cellJ = iclamp((s.y-grid.ymin)*grid.invdy,0,grid.NJ-1);
int cellK = iclamp((s.z-grid.zmin)*grid.invdz,0,grid.NK-1);
ray_t newr = r;
newr.start = s;
newr.invDir = invd;
currentShape = -1;
do{
int cellID = cellI + grid.NI*cellJ + grid.NI*grid.NJ*cellK;
*t = 20000; // TW ?
int start = grid.c_boxStarts[cellID];
int end = grid.c_boxStarts[cellID+1];
for(int offset=start;offset<end;++offset){
const int obj = grid.c_boxContents[offset];
const shape_t shape = shapes[obj];
if(intersectRayShape(r, shape, t)){
vector_t intersect = vectorAdd(r.start, vectorScale(*t, r.dir));
if(intersectPointGridCell(grid, intersect, cellI, cellJ, cellK)){
currentShape = obj;
}
}
}
if(currentShape != -1){
return true;
}
unsigned int face = 0;
// find faces that ray passes through
intersectRayBox(newr,grid.c_bboxes[cellID], face);
if(face&1) --cellK; // face 0
if(face&2) --cellJ; // face 1
if(face&4) ++cellI; // face 2
if(face&8) ++cellJ; // face 3
if(face&16) --cellI;// face 4
if(face&32) ++cellK;// face 5
if(face==0){
break;
}
}while(cellI>=0 && cellI<grid.NI &&
cellJ>=0 && cellJ<grid.NJ &&
cellK>=0 && cellK<grid.NK);
return false;
}
__device__ colour_t trace(const grid_t grid,
const int Nshapes,
const shape_t *shapes,
const int Nlights,
const light_t *lights,
const int Nmaterials,
const material_t *materials,
ray_t r,
int level,
dfloat coef,
colour_t bg){
colour_t black;
black.red = 0;
black.green = 0;
black.blue = 0;
// initialize color as black
colour_t c = black;
int Nrays = 0, rayID = 0;
ray_t rayStack[p_maxNrays];
// add initial ray to stack
rayID = 0;
r.level = 0;
r.coef = coef;
rayStack[Nrays] = r;
++Nrays;
// keep looping until the stack is exhausted or the maximum number of rays is reached
while(rayID<Nrays && Nrays<p_maxNrays){
// get ray
r = rayStack[rayID];
// look for intersection of this ray with shapes
int currentShapeID = -1;
dfloat t = 20000.f;
// look through grid to find intersections with ray
gridRayIntersectionSearch(r, Nshapes, shapes, grid, &t, currentShapeID);
// none found
if(currentShapeID == -1){
if(rayID==0)
c = bg;
// go to next ray
++rayID;
continue;
}
// shape at nearest ray intersection
shape_t currentShape = shapes[currentShapeID];
// compute intersection location
vector_t intersection = vectorAdd(r.start, vectorScale(t, r.dir));
// find unit surface normal
vector_t n = computeNormal(intersection, currentShape);
/* use shadow tracing to determine color contribution from this intersection */
dfloat rdotn = vectorDot(r.dir, n);
/* Find the material to determine the colour */
material_t currentMat = computeMaterial(Nmaterials, materials, intersection, currentShape);
// test for reflection
info_t info = currentMat.info;
if(info.emitter==1){
dfloat lambert = rdotn * r.coef;
c.red += lambert * currentMat.diffuse.red;
c.green += lambert * currentMat.diffuse.green;
c.blue += lambert * currentMat.diffuse.blue;
}
else{
if(info.reflector==1){
/* start ray slightly off surface */
dfloat sc = p_shadowDelta;
if(rdotn>0) // reverse offset if inside
sc *= -1.f; // sign ? was -1
vector_t shadowStart = vectorAdd(intersection, vectorScale(sc, n)); // HACK to shift ray start off service
ray_t lightRay;
lightRay.start = shadowStart;
/* Find the value of the light at this point */
for(unsigned int j=0; j < Nlights; j++){
light_t currentLight = lights[j];
vector_t dist = vectorSub(currentLight.pos, shadowStart);
if(vectorDot(n, dist) <= 0) continue;
dfloat lightDist = vectorNorm(dist);
dfloat tshadow = lightDist;
if(tshadow <= 0) continue;
lightRay.dir = vectorScale((1.f/tshadow), dist);
/* search in light ray direction for object */
int shadowShapeID = -1;
gridRayIntersectionSearch(lightRay, Nshapes, shapes, grid, &tshadow, shadowShapeID);
// check for objects in path of shadow ray
bool inShadow = false;
if(shadowShapeID==-1) // no object causes shadow
inShadow = false;
else if(tshadow >= 0 && tshadow < lightDist) //
inShadow = true;
if(inShadow==false){
/* Lambert diffusion */
dfloat lambert = vectorDot(lightRay.dir, n) * r.coef;
c.red += lambert * currentLight.intensity.red * currentMat.diffuse.red;
c.green += lambert * currentLight.intensity.green * currentMat.diffuse.green;
c.blue += lambert * currentLight.intensity.blue * currentMat.diffuse.blue;
}
}
if((r.level+1<p_maxLevel) && Nrays<p_maxNrays) {
ray_t reflectRay;
// create new ray starting from offset intersection, with ray direction reflected in normal
reflectRay.start = shadowStart;
reflectRay.dir = vectorAdd(r.dir, vectorScale(-2.0f*rdotn, n));
// increment level for new ray
reflectRay.level = r.level+1;
reflectRay.coef = r.coef*currentMat.reflection; // scale intensity
// launch new ray
rayStack[Nrays] = reflectRay;
// increment ray counter
++Nrays;
}
}
// https://www.scratchapixel.com/code.php?id=13&origin=/lessons/3d-basic-rendering/introduction-to-shading
// test for refraction
if(info.refractor==1){
// can we add a new refraction ray to the stack ?
if((r.level+1<p_maxLevel) && Nrays<p_maxNrays){
// push ray onto other side of surface
dfloat sc = -p_shadowDelta; // reverse number above
if(rdotn>0)
sc *= -1;
// HACK to shift ray start off service
vector_t shadowStart = vectorAdd(intersection, vectorScale(sc, n));
// get index of refraction
dfloat eta = currentMat.eta;
if(rdotn>0){
rdotn *= -1;
}else{
eta = 1.f/eta;
}
dfloat kappa = 1.f - eta*eta*(1.f - rdotn*rdotn);
if(kappa>0){
// create new refraction ray
ray_t refractRay;
// https://www.cs.cornell.edu/courses/cs4620/2012fa/lectures/36raytracing.pdf
// newdir = eta*d - n*(eta*d.n - sqrt((1-eta*eta*(1-(d.n)^2))))
dfloat fac = eta*rdotn+sqrt(kappa); // was - (NEED TO DOUBLE CHECK - other normal)
refractRay.start = shadowStart;
refractRay.dir = vectorNormalize(vectorSub(vectorScale(eta, r.dir), vectorScale(fac, n)));
refractRay.level = r.level+1;
refractRay.coef = r.coef*currentMat.refraction; // scale intensity
rayStack[Nrays] = refractRay;
++Nrays;
}
}
}
}
// go to next ray on stack
++rayID;
}
return c;
}
__global__ void renderKernel (const int NI,
const int NJ,
const grid_t grid,
const sensor_t sensor,
const int Nshapes,
const shape_t *shapes,
const int Nlights,
const light_t *lights,
const int Nmaterials,
const material_t *materials,
const dfloat costheta,
const dfloat sintheta,
const dfloat *randomNumbers,
unsigned char *img
){
const colour_t bg = sensor.bg;
int I = threadIdx.x + blockDim.x*blockIdx.x;
int J = threadIdx.y + blockDim.y*blockIdx.y;
if(I<NI && J<NJ){
ray_t r;
dfloat coef = 1.0;
int level = 0;
// look at this: https://en.wikipedia.org/wiki/3D_projection
// 2.5 location of sensor pixel
colour_t c;
// dfloat randI = randomNumbers[I];
// dfloat randJ = randomNumbers[J];
dfloat x0 = sensor.eyeX.x;
dfloat y0 = sensor.eyeX.y;
dfloat z0 = sensor.eyeX.z;
// multiple rays emanating from sensor, passing through lens and focusing at the focal plane
// 1. compute intersection of ray passing through lens center to focal plane
// (sensorX + alpha*(lensC -sensorX)).sensorN = focalPlaneOffset
// alpha = (focalOffset-s.sensorN)/( (lensC-s).sensorN) [ . dot product ]
dfloat cx = BOXSIZE/2., cy = BOXSIZE/2., cz = BOXSIZE/2;
vector_t sensorN = vectorCrossProduct(sensor.Idir, sensor.Jdir);
vector_t sensorX = sensorLocation(NI, NJ, I, J, sensor);
dfloat focalPlaneOffset = sensor.focalPlaneOffset;
vector_t centralRayDir = vectorSub(sensor.lensC, sensorX);
dfloat alpha = (focalPlaneOffset - vectorDot(sensorX, sensorN))/vectorDot(centralRayDir, sensorN);
// 2. target
vector_t targetX = vectorAdd(sensorX, vectorScale(alpha, centralRayDir));
x0 = sensorX.x;
y0 = sensorX.y;
z0 = sensorX.z;
// 3. loop over vertical offsets on lens (thin lens)
c.red = 0; c.green = 0; c.blue = 0;
for(int samp=0;samp<p_Nsamples;++samp){
// aperture width
int sampId = (I+J*NI + samp*blockDim.x*blockDim.y)%NRANDOM;
dfloat offI = randomNumbers[2*sampId+0]*p_apertureRadius;
dfloat offJ = randomNumbers[2*sampId+1]*p_apertureRadius;
// choose random starting point on lens (assumes lens and sensor arre parallel)
if(samp>0) { // primary ray
x0 = sensor.lensC.x + offI*sensor.Idir.x + offJ*sensor.Jdir.x;
y0 = sensor.lensC.y + offI*sensor.Idir.y + offJ*sensor.Jdir.y;
z0 = sensor.lensC.z + offI*sensor.Idir.z + offJ*sensor.Jdir.z;
}
dfloat dx0 = targetX.x - x0;
dfloat dy0 = targetX.y - y0;
dfloat dz0 = targetX.z - z0;
dfloat L0 = sqrt(dx0*dx0+dy0*dy0+dz0*dz0);
dx0 = dx0/L0;
dy0 = dy0/L0;
dz0 = dz0/L0;
r.start.x = costheta*(x0-cx) - sintheta*(z0-cz) + cx;
r.start.y = y0;
r.start.z = sintheta*(x0-cx) + costheta*(z0-cz) + cz;
r.dir.x = costheta*dx0 - sintheta*dz0;
r.dir.y = dy0;
r.dir.z = sintheta*dx0 + costheta*dz0;
colour_t newc =
trace(grid, Nshapes, shapes, Nlights, lights, Nmaterials, materials, r, level, coef, bg);
dfloat sc = (samp==0) ? p_primaryWeight: 1.f;
c.red += sc*newc.red;
c.green += sc*newc.green;
c.blue += sc*newc.blue;
}
// primary weighted average
c.red /= (p_primaryWeight+p_Nsamples-1);
c.green /= (p_primaryWeight+p_Nsamples-1);
c.blue /= (p_primaryWeight+p_Nsamples-1);
// reverse vertical because of lensing
img[(I + (NJ-1-J)*NI)*3 + 0] = (unsigned char)min( c.red*255.0f, 255.0f);
img[(I + (NJ-1-J)*NI)*3 + 1] = (unsigned char)min(c.green*255.0f, 255.0f);
img[(I + (NJ-1-J)*NI)*3 + 2] = (unsigned char)min( c.blue*255.0f, 255.0f);
}
}
#define BLOCKSIZE 1024
#define LOGBLOCKSIZE 10
// https://en.wikipedia.org/wiki/Prefix_sum
// Hillis and Steele
// [ can be done with far fewer barriers ]
__global__ void startScanKernel(const int N,
const int *v,
int *scanv,
int *starts){
__shared__ int s_v0[BLOCKSIZE];
__shared__ int s_v1[BLOCKSIZE];
int j = threadIdx.x;
int b = blockIdx.x;
int n = j + b*BLOCKSIZE;
s_v0[j] = (n<N) ? v[j+b*BLOCKSIZE]: 0;
int offset = 1;
do{
__syncthreads();
s_v1[j] = (j<offset) ? s_v0[j] : (s_v0[j]+s_v0[j-offset]) ;
offset *= 2;
__syncthreads();
s_v0[j] = (j<offset) ? s_v1[j] : (s_v1[j]+s_v1[j-offset]) ;
offset *= 2;
} while(offset<BLOCKSIZE);
if(n<N)
scanv[n+1] = s_v0[j];
if(j==(BLOCKSIZE-1)){
starts[b+1] = s_v0[j];
}
}
__global__ void finishScanKernel(const int N,
int *scanv,
int *starts){
int j = threadIdx.x;
int b = blockIdx.x;
int n=j+b*BLOCKSIZE;
if(n<N){
int start = starts[b];
scanv[n+1] += start;
}
}
// returns the cumulative sum
int scan(const int N, const int *c_v, int *c_starts, int *starts, int *c_scanv){
int B = BLOCKSIZE;
int G = (N+BLOCKSIZE-1)/BLOCKSIZE;
hipLaunchKernelGGL(( startScanKernel) , dim3(G), dim3(B) , 0, 0, N, c_v, c_scanv, c_starts);
hipMemcpy(starts, c_starts, (G+1)*sizeof(int), hipMemcpyDeviceToHost);
starts[0] = 0;
for(int b=0;b<G;++b){
starts[b+1] += starts[b];
}
int count = starts[G];
hipMemcpy(c_starts, starts, (G+1)*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( finishScanKernel) , dim3(G), dim3(B) , 0, 0, N, c_scanv, c_starts);
return count;
}
__device__ bbox_t createBoundingBoxTriangle(triangle_t &triangle){
bbox_t bbox;
bbox.xmin = min(triangle.vertices[0].x, min(triangle.vertices[1].x, triangle.vertices[2].x));
bbox.xmax = max(triangle.vertices[0].x, max(triangle.vertices[1].x, triangle.vertices[2].x));
bbox.ymin = min(triangle.vertices[0].y, min(triangle.vertices[1].y, triangle.vertices[2].y));
bbox.ymax = max(triangle.vertices[0].y, max(triangle.vertices[1].y, triangle.vertices[2].y));
bbox.zmin = min(triangle.vertices[0].z, min(triangle.vertices[1].z, triangle.vertices[2].z));
bbox.zmax = max(triangle.vertices[0].z, max(triangle.vertices[1].z, triangle.vertices[2].z));
return bbox;
}
__device__ bbox_t createBoundingBoxSphere(sphere_t &sphere){
bbox_t bbox;
bbox.xmin = sphere.pos.x - sphere.radius;
bbox.xmax = sphere.pos.x + sphere.radius;
bbox.ymin = sphere.pos.y - sphere.radius;
bbox.ymax = sphere.pos.y + sphere.radius;
bbox.zmin = sphere.pos.z - sphere.radius;
bbox.zmax = sphere.pos.z + sphere.radius;
return bbox;
}
__device__ bbox_t createBoundingBoxEllipsoid(ellipsoid_t &ellipsoid){
bbox_t bbox;
bbox.xmin = ellipsoid.pos.x - 1.f/ellipsoid.invRadii.x;
bbox.xmax = ellipsoid.pos.x + 1.f/ellipsoid.invRadii.x;
bbox.ymin = ellipsoid.pos.y - 1.f/ellipsoid.invRadii.y;
bbox.ymax = ellipsoid.pos.y + 1.f/ellipsoid.invRadii.y;
bbox.zmin = ellipsoid.pos.z - 1.f/ellipsoid.invRadii.z;
bbox.zmax = ellipsoid.pos.z + 1.f/ellipsoid.invRadii.z;
return bbox;
}
__device__ bbox_t createBoundingBoxCylinder(cylinder_t &cylinder){
bbox_t bbox;
vector_t c = cylinder.center;
vector_t a = cylinder.axis;
dfloat R = cylinder.radius;
dfloat H = cylinder.height;
// xmax = c.x + (H/2)*a.x + (H/2)*|a.x| + R*|cross(e_x,a) |
bbox.xmax = c.x + (H/2)*a.x + (H/2)*fabs(a.x) + R*sqrtf(a.y*a.y + a.z*a.z);
bbox.xmin = c.x + (H/2)*a.x - (H/2)*fabs(a.x) - R*sqrtf(a.y*a.y + a.z*a.z);
bbox.ymax = c.y + (H/2)*a.y + (H/2)*fabs(a.y) + R*sqrtf(a.x*a.x + a.z*a.z);
bbox.ymin = c.y + (H/2)*a.y - (H/2)*fabs(a.y) - R*sqrtf(a.x*a.x + a.z*a.z);
bbox.zmax = c.z + (H/2)*a.z + (H/2)*fabs(a.z) + R*sqrtf(a.x*a.x + a.y*a.y);
bbox.zmin = c.z + (H/2)*a.z - (H/2)*fabs(a.z) - R*sqrtf(a.x*a.x + a.y*a.y);
return bbox;
}
__device__ bbox_t createBoundingBoxCone(cone_t &cone){
bbox_t bbox;
vector_t v = cone.vertex;
vector_t a = cone.axis;
dfloat R = cone.radius;
dfloat H = cone.height;
bbox.xmax = max(v.x, v.x + H*a.x + R*sqrtf(a.y*a.y + a.z*a.z));
bbox.xmin = min(v.x, v.x + H*a.x - R*sqrtf(a.y*a.y + a.z*a.z));
bbox.ymax = max(v.y, v.y + H*a.y + R*sqrtf(a.x*a.x + a.z*a.z));
bbox.ymin = min(v.y, v.y + H*a.y - R*sqrtf(a.x*a.x + a.z*a.z));
bbox.zmax = max(v.z, v.z + H*a.z + R*sqrtf(a.x*a.x + a.y*a.y));
bbox.zmin = min(v.z, v.z + H*a.z - R*sqrtf(a.x*a.x + a.y*a.y));
return bbox;
}
__device__ bbox_t createBoundingBoxRectangle(rectangle_t &rectangle){
bbox_t bbox;
vector_t C = rectangle.center;
vector_t A1 = rectangle.axis[0];
vector_t A2 = rectangle.axis[1];
vector_t n = vectorCrossProduct(A1, A2);
dfloat L1 = rectangle.length[0];
dfloat L2 = rectangle.length[1];
A1 = vectorScale(L1/2., A1);
A2 = vectorScale(L2/2., A2);
dfloat delta = 1e-1;
vector_t dn = vectorScale(delta, n);
vector_t Cdown = vectorSub(C, dn);
vector_t Cup = vectorAdd(C, dn);
vector_t v[8];
// C - delta*n + A1*(-L1/2) + A2*(-L2/2)
v[0] = vectorSub(Cdown, vectorAdd(A1, A2));
// C - delta*n + A1*(+L1/2) + A2*(-L2/2)
v[1] = vectorAdd(Cdown, vectorSub(A1, A2));
// C - delta*n + A1*(+L1/2) + A2*(+L2/2)
v[2] = vectorAdd(Cdown, vectorAdd(A1, A2));
// C - delta*n + A1*(-L1/2) + A2*(+L2/2)
v[3] = vectorAdd(Cdown, vectorSub(A2, A1));
// C + delta*n + A1*(-L1/2) + A2*(-L2/2)
v[4] = vectorSub(Cup, vectorAdd(A1, A2));
// C + delta*n + A1*(+L1/2) + A2*(-L2/2)
v[5] = vectorAdd(Cup, vectorSub(A1, A2));
// C + del6a*n + A1*(+L1/2) + A2*(+L2/2)
v[6] = vectorAdd(Cup, vectorAdd(A1, A2));
// C + delta*n + A1*(-L1/2) + A2*(+L2/2)
v[7] = vectorAdd(Cup, vectorSub(A2, A1));
bbox.xmin = 1e9;
bbox.ymin = 1e9;
bbox.zmin = 1e9;
bbox.xmax = -1e9;
bbox.ymax = -1e9;
bbox.zmax = -1e9;
#pragma unroll 8
for(int n=0;n<8;++n){
bbox.xmin = min(bbox.xmin, v[n].x);
bbox.ymin = min(bbox.ymin, v[n].y);
bbox.zmin = min(bbox.zmin, v[n].z);
bbox.xmax = max(bbox.xmax, v[n].x);
bbox.ymax = max(bbox.ymax, v[n].y);
bbox.zmax = max(bbox.zmax, v[n].z);
}
return bbox;
}
__device__ bbox_t createBoundingBoxDisk(disk_t &disk){
bbox_t bbox;
vector_t n = disk.normal;
vector_t c = disk.center;
dfloat R = disk.radius;
dfloat H = .1; // assert thickness in normal
// xmax = c.x + (H/2)*a.x + (H/2)*|a.x| + R*|cross(e_x,a) |
bbox.xmax = c.x + (H/2)*fabs(n.x) + R*sqrtf(n.y*n.y + n.z*n.z);
bbox.xmin = c.x - (H/2)*fabs(n.x) - R*sqrtf(n.y*n.y + n.z*n.z);
bbox.ymax = c.y + (H/2)*fabs(n.y) + R*sqrtf(n.x*n.x + n.z*n.z);
bbox.ymin = c.y - (H/2)*fabs(n.y) - R*sqrtf(n.x*n.x + n.z*n.z);
bbox.zmax = c.z + (H/2)*fabs(n.z) + R*sqrtf(n.x*n.x + n.y*n.y);
bbox.zmin = c.z - (H/2)*fabs(n.z) - R*sqrtf(n.x*n.x + n.y*n.y);
return bbox;
}
__device__ void createBoundingBoxShape(const grid_t &grid, shape_t &shape){
bbox_t bbox;
switch(shape.type){
case TRIANGLE: bbox = createBoundingBoxTriangle(shape.triangle); break;
case SPHERE: bbox = createBoundingBoxSphere(shape.sphere); break;
case ELLIPSOID: bbox = createBoundingBoxEllipsoid(shape.ellipsoid); break;
case IMAGE:
case RECTANGLE: bbox = createBoundingBoxRectangle(shape.rectangle); break;
case CYLINDER: bbox = createBoundingBoxCylinder(shape.cylinder); break;
case DISK: bbox = createBoundingBoxDisk(shape.disk); break;
case CONE: bbox = createBoundingBoxCone(shape.cone); break;
}
int imin = floor(grid.invdx*(bbox.xmin-grid.xmin));
int imax = floor(grid.invdx*(bbox.xmax-grid.xmin));
int jmin = floor(grid.invdy*(bbox.ymin-grid.ymin));
int jmax = floor(grid.invdy*(bbox.ymax-grid.ymin));
int kmin = floor(grid.invdz*(bbox.zmin-grid.zmin));
int kmax = floor(grid.invdz*(bbox.zmax-grid.zmin)); // was ceil
bbox.imin = iclamp(imin, 0, grid.NI-1);
bbox.imax = iclamp(imax, 0, grid.NI-1);
bbox.jmin = iclamp(jmin, 0, grid.NJ-1);
bbox.jmax = iclamp(jmax, 0, grid.NJ-1);
bbox.kmin = iclamp(kmin, 0, grid.NK-1);
bbox.kmax = iclamp(kmax, 0, grid.NK-1);
shape.bbox = bbox;
}
__global__ void countShapesInBoxesKernel(const grid_t grid, const int Nshapes, shape_t *shapes, int *counts){
int n = threadIdx.x + blockDim.x*blockIdx.x;
if(n<Nshapes){
shape_t &shape = shapes[n];
createBoundingBoxShape(grid, shape);
const int imin = shape.bbox.imin;
const int imax = shape.bbox.imax;
const int jmin = shape.bbox.jmin;
const int jmax = shape.bbox.jmax;
const int kmin = shape.bbox.kmin;
const int kmax = shape.bbox.kmax;
for(int k=kmin;k<=kmax;++k){
for(int j=jmin;j<=jmax;++j){
for(int i=imin;i<=imax;++i){
int id = i + j*grid.NI + k*grid.NI*grid.NJ;
atomicAdd(counts+id, 1);
}
}
}
}
}
__global__ void addShapesInBoxesKernel(const grid_t grid, const int Nshapes, const shape_t *shapes, int *boxCounters, int *boxContents){
const int n = threadIdx.x + blockDim.x*blockIdx.x;
if(n<Nshapes){
const shape_t &shape = shapes[n];
const int imin = shape.bbox.imin;
const int imax = shape.bbox.imax;
const int jmin = shape.bbox.jmin;
const int jmax = shape.bbox.jmax;
const int kmin = shape.bbox.kmin;
const int kmax = shape.bbox.kmax;
for(int k=kmin;k<=kmax;++k){
for(int j=jmin;j<=jmax;++j){
for(int i=imin;i<=imax;++i){
// box
const int id = i + j*grid.NI + k*grid.NI*grid.NJ;
// index in this box (post decremented)
// grab counter for this cell into index, then increment counter for this cell
const int index = atomicAdd(boxCounters+id,1);
boxContents[index] = shape.id;
}
}
}
}
}
void populateGrid(grid_t *grid, int Nshapes, shape_t *c_shapes){
if(grid->c_boxStarts){
hipFree(grid->c_boxStarts);
hipFree(grid->c_boxContents);
}
int Nboxes = grid->NI*grid->NJ*grid->NK;
int *boxCounts = (int*) calloc(Nboxes+1, sizeof(int));
int *c_boxCounts, *c_boxCounters;
hipMalloc(&c_boxCounts, (Nboxes+1)*sizeof(int));
hipMalloc(&(grid->c_boxStarts), (Nboxes+1)*sizeof(int));
hipMalloc(&(c_boxCounters), (Nboxes+1)*sizeof(int));
hipMemset(c_boxCounts, 0, (Nboxes+1)*sizeof(int));
int B = BLOCKSIZE;
int G = (Nshapes+B-1)/B;
hipLaunchKernelGGL(( countShapesInBoxesKernel) , dim3(G), dim3(B) , 0, 0, *grid, Nshapes, c_shapes, c_boxCounts);
// parallel scan to get cumulative offsets starting at zero
int *tmp = (int*) calloc(Nboxes+1, sizeof(int));
int *c_tmp;
hipMalloc(&c_tmp, (Nboxes+1)*sizeof(int));
int Nentries =
scan(Nboxes, c_boxCounts, c_tmp, tmp, grid->c_boxStarts);
// build container for boxes
hipMalloc(&(grid->c_boxContents), (Nentries+1)*sizeof(int));
hipMemcpy(c_boxCounters, grid->c_boxStarts, (Nboxes+1)*sizeof(int), hipMemcpyDeviceToDevice);
// add each shape to every box that intersects the shape's bounding box
hipLaunchKernelGGL(( addShapesInBoxesKernel) , dim3(G), dim3(B) , 0, 0, *grid, Nshapes, c_shapes, c_boxCounters, grid->c_boxContents);
free(tmp);
free(boxCounts);
hipFree(c_tmp);
hipFree(c_boxCounts);
hipFree(c_boxCounters);
}
void sceneOffload(scene_t *scene){
grid_t *grid = scene->grid;
hipMalloc(&(scene->c_materials), scene->Nmaterials*sizeof(material_t));
hipMalloc(&(scene->c_lights), scene->Nlights*sizeof(light_t));
hipMalloc(&(scene->c_shapes), scene->Nshapes*sizeof(shape_t));
hipMalloc(&(scene->c_img), WIDTH*HEIGHT*3*sizeof(char));
hipMalloc(&(grid->c_bboxes), (grid->NI*grid->NJ*grid->NK)*sizeof(bbox_t));
hipMemcpy(scene->c_shapes, scene->shapes, scene->Nshapes*sizeof(shape_t), hipMemcpyHostToDevice);
hipMemcpy(scene->c_materials, scene->materials, scene->Nmaterials*sizeof(material_t), hipMemcpyHostToDevice);
hipMemcpy(scene->c_lights, scene->lights, scene->Nlights*sizeof(light_t), hipMemcpyHostToDevice);
hipMemcpy(grid->c_bboxes, grid->bboxes, (grid->NI*grid->NJ*grid->NK)*sizeof(bbox_t), hipMemcpyHostToDevice);
hipMalloc(&(scene->c_randomNumbers), 2*NRANDOM*sizeof(dfloat));
hipMemcpy(scene->c_randomNumbers, scene->randomNumbers, 2*NRANDOM*sizeof(dfloat), hipMemcpyHostToDevice);
}
dfloat drandRange48(dfloat dmin, dfloat dmax){
return dmin + drand48()*(dmax-dmin);
}
// L = size of box
// delta = width of layer around box
grid_t *gridSetup(dfloat L, dfloat delta){
// bu ild grid
grid_t *grid = (grid_t*) calloc(1, sizeof(grid_t));
grid->xmin = -delta;
grid->xmax = L + delta;
grid->ymin = -delta;
grid->ymax = L + delta;
grid->zmin = -delta;
grid->zmax = L + delta;
grid->NI = 401;
grid->NJ = 401;
grid->NK = 401;
grid->dx = (grid->xmax-grid->xmin)/grid->NI;
grid->dy = (grid->ymax-grid->ymin)/grid->NJ;
grid->dz = (grid->zmax-grid->zmin)/grid->NK;
grid->invdx = grid->NI/(grid->xmax-grid->xmin);
grid->invdy = grid->NJ/(grid->ymax-grid->ymin);
grid->invdz = grid->NK/(grid->zmax-grid->zmin);
grid->bboxes = (bbox_t*) calloc(grid->NI*grid->NJ*grid->NK, sizeof(bbox_t));
for(int k=0;k<grid->NK;++k){
for(int j=0;j<grid->NJ;++j){
for(int i=0;i<grid->NI;++i){
int id = i + j*grid->NI + k*grid->NI*grid->NJ;
grid->bboxes[id].xmin = i*grid->dx + grid->xmin;
grid->bboxes[id].xmax = (i+1)*grid->dx + grid->xmin;
grid->bboxes[id].ymin = j*grid->dy + grid->ymin;
grid->bboxes[id].ymax = (j+1)*grid->dy + grid->ymin;
grid->bboxes[id].zmin = k*grid->dz + grid->zmin;
grid->bboxes[id].zmax = (k+1)*grid->dz + grid->zmin;
}
}
}
return grid;
}
scene_t *sceneSetup(int plotNelements,
dfloat *plotx,
dfloat *ploty,
dfloat *plotz,
dfloat *plotq){
int i;
int Nmaterials = 64;
material_t *materials = (material_t*) calloc(Nmaterials, sizeof(material_t));
materials[0].diffuse.red = 1;
materials[0].diffuse.green = 1;
materials[0].diffuse.blue = 1;
materials[0].reflection = 1;
materials[0].eta = 1;
materials[0].refraction = 0;
materials[0].info.refractor = 0;
materials[0].info.reflector = 1;
materials[0].info.emitter = 0;
materials[1].diffuse.red = 0;
materials[1].diffuse.green = 240/255.;
materials[1].diffuse.blue = 20/255.;
materials[1].reflection = .3;
materials[1].eta = .7;
materials[1].refraction = .1;
materials[1].info.refractor = 1;
materials[1].info.reflector = 1;
materials[1].info.emitter = 0;
for(i=2;i<Nmaterials;++i){
dfloat red = 0,green = 0,blue =0;
red = drandRange48(0.125,0.8);
green = drandRange48(0.125,0.8);
blue = drandRange48(0.125,0.8);
materials[i].diffuse.red = red;
materials[i].diffuse.green = green;
materials[i].diffuse.blue = blue;
materials[i].eta = 2;
materials[i].refraction = 1; // transmission coeff
if(drand48() > .5){
materials[i].reflection = .9;
materials[i].info.reflector = 1;
}
if(drand48() > .5){
materials[i].refraction = .9;
materials[i].info.refractor = 1;
}
if(!materials[i].info.refractor && !materials[i].info.reflector){
materials[i].info.reflector = 1;
}
#if 0
printf("materials[%d] = {{rgb=%g,%g,%g},{reflection=%g,refraction=%g,eta=%g},info {reflector=%d,refractor=%d,emitter=%d}\n",
i,
materials[i].diffuse.red,
materials[i].diffuse.green,
materials[i].diffuse.blue,
materials[i].reflection,
materials[i].refraction,
materials[i].eta,
materials[i].info.reflector,
materials[i].info.refractor,
materials[i].info.emitter);
#endif
}
int Ntriangles = plotNelements;
int Nrectangles = 1;
int Nshapes = Ntriangles + Nrectangles;
// length of side of world box
dfloat L = BOXSIZE;
shape_t *shapes = (shape_t*) calloc(Nshapes, sizeof(shape_t));
dfloat triXmin = 1e9, triXmax = -1e9;
dfloat triYmin = 1e9, triYmax = -1e9;
dfloat triZmin = 1e9, triZmax = -1e9;
for(int n=0;n<plotNelements*3;++n){
triXmin = min(triXmin, plotx[n]);
triXmax = max(triXmax, plotx[n]);
triYmin = min(triYmin, ploty[n]);
triYmax = max(triYmax, ploty[n]);
triZmin = min(triZmin, plotz[n]);
triZmax = max(triZmax, plotz[n]);
}
printf("Ntriangles = %d in range (%lg,%lg x %lg,%lg x %lg,%lg)\n",
Ntriangles, triXmin, triXmax, triYmin, triYmax, triZmin, triZmax);
dfloat maxL = max(triXmax-triXmin, max(triYmax-triYmin, triZmax-triZmin));
int bcnt = 0;
dfloat brot = 0;
dfloat bcosrot = cos(brot);
dfloat bsinrot = sin(brot);
dfloat boffx = 500; drandRange48(250, L-250);
dfloat boffy = 500; drandRange48(250, L-250);
dfloat boffz = 500; drandRange48(250, L-250);
dfloat bscal = 800;
int bmat = 32;
printf("bmat = %d\n", bmat);
dfloat newTriXmin = 1e9, newTriXmax = -1e9;
dfloat newTriYmin = 1e9, newTriYmax = -1e9;
dfloat newTriZmin = 1e9, newTriZmax = -1e9;
for(i=0;i<Ntriangles;++i){
for(int v=0;v<3;++v){
shapes[bcnt].triangle.vertices[v] =
vectorCreate((plotx[i*3+v]-triXmin)/maxL,
(plotz[i*3+v]-triZmin)/maxL,
(ploty[i*3+v]-triYmin)/maxL); // swapped y and z
shapes[bcnt].triangle.q[v] = plotq[i*3+v];
}
vector_t tmp = shapes[bcnt].triangle.vertices[1];
shapes[bcnt].triangle.vertices[1] = shapes[bcnt].triangle.vertices[2];
shapes[bcnt].triangle.vertices[2] = tmp;
for(int v=0;v<3;++v){
dfloat x = bscal*shapes[bcnt].triangle.vertices[v].x;
dfloat y = L - bscal*shapes[bcnt].triangle.vertices[v].y;
dfloat z = bscal*shapes[bcnt].triangle.vertices[v].z;
dfloat xrot = cos(brot)*x + sin(brot)*z;
dfloat zrot = -sin(brot)*x + cos(brot)*z;
shapes[bcnt].triangle.vertices[v].x = boffx + xrot;
shapes[bcnt].triangle.vertices[v].y = y;
shapes[bcnt].triangle.vertices[v].z = boffz + zrot;
#if 1
newTriXmin = min(newTriXmin, shapes[bcnt].triangle.vertices[v].x);
newTriXmax = max(newTriXmax, shapes[bcnt].triangle.vertices[v].x);
newTriYmin = min(newTriYmin, shapes[bcnt].triangle.vertices[v].y);
newTriYmax = max(newTriYmax, shapes[bcnt].triangle.vertices[v].y);
newTriZmin = min(newTriZmin, shapes[bcnt].triangle.vertices[v].z);
newTriZmax = max(newTriZmax, shapes[bcnt].triangle.vertices[v].z);
#endif
}
shapes[bcnt].material = bmat;
shapes[bcnt].type = TRIANGLE;
shapes[bcnt].id = bcnt;
++bcnt;
}
printf("Ntriangles = %d in range (%lg,%lg x %lg,%lg x %lg,%lg)\n",
Ntriangles, newTriXmin, newTriXmax, newTriYmin, newTriYmax, newTriZmin, newTriZmax);
int cnt = Ntriangles;
// add one rectangle
if(Nrectangles>0){
vector_t a = vectorCreate(0, L, 0);
vector_t b = vectorCreate(0, L, L);
vector_t c = vectorCreate(L, L, L);
vector_t d = vectorCreate(L, L, 0);
vector_t ab = vectorSub(d,a);
vector_t ad = vectorSub(b,a);
shapes[cnt].rectangle.length[0] = vectorNorm(ab);
shapes[cnt].rectangle.length[1] = vectorNorm(ad);
shapes[cnt].rectangle.axis[0] = vectorNormalize(ab);
shapes[cnt].rectangle.axis[1] = vectorNormalize(ad);
shapes[cnt].rectangle.center = vectorScale(0.25, vectorAdd(vectorAdd(a,b),vectorAdd(c,d)));
shapes[cnt].material = -1;
shapes[cnt].type = RECTANGLE;
shapes[cnt].id = cnt;
++cnt;
}
int Nlights = 5;
light_t *lights = (light_t*) calloc(Nlights, sizeof(light_t));
lights[0].pos.x = L/2;
lights[0].pos.y = 0;
lights[0].pos.z = -100;
lights[0].intensity.red = 1;
lights[0].intensity.green = 1;
lights[0].intensity.blue = 1;
lights[1].pos.x = 3200;
lights[1].pos.y = 3000;
lights[1].pos.z = -1000;
lights[1].intensity.red = 0.6;
lights[1].intensity.green = 0.7;
lights[1].intensity.blue = 1;
lights[2].pos.x = 600;
lights[2].pos.y = 0;
lights[2].pos.z = -100;
lights[2].intensity.red = 0.3;
lights[2].intensity.green = 0.5;
lights[2].intensity.blue = 1;
lights[3].pos.x = L/2;
lights[3].pos.y = 0;
lights[3].pos.z = L/2;
lights[3].intensity.red = 0.8;
lights[3].intensity.green = 0.8;
lights[3].intensity.blue = 1;
lights[4].pos.x = L;
lights[4].pos.y = L;
lights[4].pos.z = -1000;
lights[4].intensity.red = 1;
lights[4].intensity.green = 1;
lights[4].intensity.blue = 1;
scene_t *scene = (scene_t*) calloc(1, sizeof(scene_t));
scene->Ntriangles = plotNelements;
scene->Nlights = Nlights;
scene->lights = lights;
scene->Nshapes = Nshapes;
scene->shapes = shapes;
scene->Nmaterials = Nmaterials;
scene->materials = materials;
scene->grid = gridSetup(L, 1600);
scene->randomNumbers = (dfloat*) calloc(2*NRANDOM, sizeof(dfloat));
for(int i=0;i<NRANDOM;++i){
dfloat r1 = 2*drand48()-1;
dfloat r2 = 2*drand48()-1;
scene->randomNumbers[2*i+0] = r1/sqrt(r1*r1+r2*r2);
scene->randomNumbers[2*i+1] = r2/sqrt(r1*r1+r2*r2);
}
return scene;
}
/* Output data as PPM file */
void saveppm(char *filename, unsigned char *img, int width, int height){
/* FILE pointer */
FILE *f;
/* Open file for writing */
f = fopen(filename, "wb");
/* PPM header info, including the size of the image */
fprintf(f, "P6 %d %d %d\n", width, height, 255);
/* Write the image data to the file - remember 3 byte per pixel */
fwrite(img, 3, width*height, f);
/* Make sure you close the file */
fclose(f);
}
scene_t *simpleRayTracerSetup(int plotNelements,
dfloat *plotx,
dfloat *ploty,
dfloat *plotz,
dfloat *plotq){
// initialize triangles and spheres
scene_t *scene = sceneSetup(plotNelements, plotx, ploty, plotz, plotq);
// port to GPU
sceneOffload(scene);
int TX = 8, TY = 8;
dim3 B(TX,TY,1);
dim3 G( (WIDTH+TX-1)/TX, (HEIGHT+TY-1)/TY, 1);
void populateGrid(grid_t *grid, int Nshapes, shape_t *c_shapes);
populateGrid(scene->grid, scene->Nshapes, scene->c_shapes);
return scene;
}
// to compile animation:
// ffmpeg -y -i image_%05d.ppm -pix_fmt yuv420p foo.mp4
scene_t *scene = NULL;
void simpleRayTracer(int plotNelements,
dfloat *plotx,
dfloat *ploty,
dfloat *plotz,
dfloat *plotq,
const char *fileBaseName,
const int fileIndex){
// initialize triangles and spheres
if(!scene)
scene = simpleRayTracerSetup(plotNelements, plotx, ploty, plotz, plotq);
// update field
for(int i=0;i<scene->Ntriangles;++i){
for(int v=0;v<3;++v){
scene->shapes[i].triangle.q[v] = plotq[i*3+v];
}
}
hipMemcpy(scene->c_shapes, scene->shapes, scene->Nshapes*sizeof(shape_t), hipMemcpyHostToDevice);
// 1. location of observer eye (before rotation)
sensor_t sensor;
// background color
sensor.bg.red = 126./256;
sensor.bg.green = 192./256;
sensor.bg.blue = 238./256;
dfloat br = 3.75f*BOXSIZE;
// angle elevation to y-z plane
dfloat eyeAngle = .5*M_PI/2.f; // 0 is above, pi/2 is from side. M_PI/3; 0; M_PI/2.;
// target view
vector_t targetX = vectorCreate(BOXSIZE/2., BOXSIZE, BOXSIZE/2.); // this I do not understand why target -B/2
sensor.eyeX = vectorAdd(targetX, vectorCreate(0, -br*cos(eyeAngle), -br*sin(eyeAngle)));
dfloat sensorAngle = eyeAngle; +15.*M_PI/180.;
sensor.Idir = vectorCreate(1.f, 0.f, 0.f);
sensor.Jdir = vectorCreate(0.f, sin(sensorAngle), -cos(sensorAngle));
vector_t sensorNormal = vectorCrossProduct(sensor.Idir, sensor.Jdir);
#if 0
printf("eyeX = %g,%g,%g \n, IDir = %g,%g,%g, \n, Jdir = %g,%g,%g, Ndir = %g,%g,%g\n",
sensor.eyeX.x,
sensor.eyeX.y,
sensor.eyeX.z,
sensor.Idir.x,
sensor.Idir.y,
sensor.Idir.z,
sensor.Jdir.x,
sensor.Jdir.y,
sensor.Jdir.z,
sensorNormal.x,
sensorNormal.y,
sensorNormal.z);
#endif
// 2.4 length of sensor in axis 1 & 2
sensor.Ilength = 20.f;
sensor.Jlength = HEIGHT*20.f/WIDTH;
sensor.offset = 0.f;
// 2.5 normal distance from sensor to focal plane
dfloat lensOffset = 50;
sensor.lensC = vectorAdd(sensor.eyeX, vectorScale(lensOffset, vectorCrossProduct(sensor.Idir, sensor.Jdir)));
// why 0.25 ?
sensor.focalPlaneOffset = 0.22f*fabs(vectorTripleProduct(sensor.Idir, sensor.Jdir, vectorSub(targetX,sensor.eyeX))); // triple product
// printf("lensOffset = %g, sensor.focalPlaneOffset = %g\n", lensOffset, sensor.focalPlaneOffset);
/* rotation angle in y-z */
dfloat theta = M_PI*fileIndex*1./180.;
int TX = 8, TY = 8;
dim3 B(TX,TY,1);
dim3 G( (WIDTH+TX-1)/TX, (HEIGHT+TY-1)/TY, 1);
/* render scene */
hipLaunchKernelGGL(( renderKernel)
, dim3(G),dim3(B), 0, 0, WIDTH,
HEIGHT,
scene->grid[0],
sensor,
scene->Nshapes,
scene->c_shapes,
scene->Nlights,
scene->c_lights,
scene->Nmaterials,
scene->c_materials,
cos(theta),
sin(theta),
scene->c_randomNumbers,
scene->c_img);
/* copy image back to host */
unsigned char *img = (unsigned char*) calloc(3*WIDTH*HEIGHT, sizeof(char));
hipMemcpy(img, scene->c_img, 3*WIDTH*HEIGHT*sizeof(char), hipMemcpyDeviceToHost);
// make sure images directory exists
mkdir("images", S_IRUSR | S_IREAD | S_IWUSR | S_IWRITE | S_IXUSR | S_IEXEC);
char fileName[BUFSIZ];
sprintf(fileName, "images/%s_%05d.ppm", fileBaseName, fileIndex);
saveppm(fileName, img, WIDTH, HEIGHT);
free(img);
}
| aac5244e35fb6610b3f8d4971c79fa64b33af91e.cu | #include "cuda.h"
// risky
#define dfloat double
#include "simpleRayTracer.h"
#define p_eps 1e-6
#define p_Nsamples 1
// ratio of importance in sampling primary ray versus random rays
#define p_primaryWeight 2.f
#define p_intersectDelta 0.1f
#define p_shadowDelta 0.15f
#define p_projectDelta 1e-2
#define p_maxLevel 5
#define p_maxNrays (2<<p_maxLevel)
#define p_apertureRadius 20.f
#define NRANDOM 10000
cudaEvent_t startTimer, endTimer;
void initTimer(){
cudaEventCreate(&startTimer);
cudaEventCreate(&endTimer);
}
void ticTimer(){
cudaEventRecord(startTimer);
}
void tocTimer(const char *message){
cudaEventRecord(endTimer);
cudaEventSynchronize(endTimer);
float elapsed;
cudaEventElapsedTime(&elapsed, startTimer, endTimer);
printf("Kernel %s took %g seconds\n", message, elapsed/1000.);
}
__device__ bbox_t createBoundingBoxSphere(sphere_t &sphere);
__host__ __device__ dfloat clamp(dfloat x, dfloat xmin, dfloat xmax){
x = min(x, xmax);
x = max(x, xmin);
return x;
}
__host__ __device__ int iclamp(dfloat x, dfloat xmin, dfloat xmax){
x = min(x, xmax);
x = max(x, xmin);
return floor(x);
}
__forceinline__ __host__ __device__ vector_t sensorLocation(const int NI,
const int NJ,
const int I,
const int J,
const sensor_t &sensor){
vector_t sensorX = sensor.eyeX;
dfloat r = I/(dfloat)(NI-1);
dfloat s = J/(dfloat)(NJ-1);
r = (r-0.5f)*sensor.Ilength;
s = (s-0.5f)*sensor.Jlength;
vector_t sensorNormal =
vectorCrossProduct(sensor.Idir, sensor.Jdir);
sensorX.x += sensorNormal.x*sensor.offset;
sensorX.y += sensorNormal.y*sensor.offset;
sensorX.z += sensorNormal.z*sensor.offset;
sensorX.x += r*sensor.Idir.x;
sensorX.y += r*sensor.Idir.y;
sensorX.z += r*sensor.Idir.z;
sensorX.x += s*sensor.Jdir.x;
sensorX.y += s*sensor.Jdir.y;
sensorX.z += s*sensor.Jdir.z;
return sensorX;
}
__host__ __device__ void sensorMultipleLocations(const int NI,
const int NJ,
const int I,
const int J,
const sensor_t &sensor,
vector_t *sensorsX){
for(int samp=0;samp<p_Nsamples;++samp){
sensorsX[samp] = sensor.eyeX;
dfloat r = I/(dfloat)(NI-1);
dfloat s = J/(dfloat)(NJ-1);
dfloat theta = 2.f*M_PI*samp/(dfloat)p_Nsamples;
// circle of samples around sensor pixel
dfloat delta = .5; // scatter pixel radius
r = (r-0.5f+delta*cosf(theta)/NI)*sensor.Ilength;
s = (s-0.5f+delta*sinf(theta)/NJ)*sensor.Jlength;
vector_t sensorNormal = vectorCrossProduct(sensor.Idir, sensor.Jdir);
sensorsX[samp].x += sensorNormal.x*sensor.offset;
sensorsX[samp].y += sensorNormal.y*sensor.offset;
sensorsX[samp].z += sensorNormal.z*sensor.offset;
sensorsX[samp].x += r*sensor.Idir.x;
sensorsX[samp].y += r*sensor.Idir.y;
sensorsX[samp].z += r*sensor.Idir.z;
sensorsX[samp].x += s*sensor.Jdir.x;
sensorsX[samp].y += s*sensor.Jdir.y;
sensorsX[samp].z += s*sensor.Jdir.z;
}
}
__host__ __device__ vector_t vectorCreate(dfloat x, dfloat y, dfloat z){
vector_t v;
v.x = x;
v.y = y;
v.z = z;
return v;
}
/* Subtract two vectors and return the resulting vector_t */
__host__ __device__ vector_t vectorSub(const vector_t v1, const vector_t v2){
return vectorCreate(v1.x - v2.x, v1.y - v2.y, v1.z - v2.z);
}
/* Multiply two vectors and return the resulting scalar (dot product) */
__host__ __device__ dfloat vectorDot(const vector_t v1, const vector_t v2){
return v1.x * v2.x + v1.y * v2.y + v1.z * v2.z;
}
/* Entrywise multiply two vectors and return the resulting vector */
__host__ __device__ vector_t vectorDotMultiply(const vector_t v1, const vector_t v2){
return vectorCreate(v1.x * v2.x, v1.y * v2.y, v1.z * v2.z);
}
/* Entrywise divison of two vectors and return the resulting vector */
__host__ __device__ vector_t vectorDotDivide(const vector_t v1, const vector_t v2){
return vectorCreate(v1.x / v2.x, v1.y / v2.y, v1.z / v2.z);
}
__host__ __device__ vector_t vectorCrossProduct(const vector_t v1, const vector_t v2){
return vectorCreate(v1.y*v2.z-v1.z*v2.y,
v1.z*v2.x-v1.x*v2.z,
v1.x*v2.y-v1.y*v2.x);
}
/* Calculate Vector_T x Scalar and return resulting Vector*/
__host__ __device__ vector_t vectorScale(const dfloat c, const vector_t v){
return vectorCreate(v.x * c, v.y * c, v.z * c);
}
/* Add two vectors and return the resulting vector_t */
__host__ __device__ vector_t vectorAdd(const vector_t v1, const vector_t v2){
return vectorCreate(v1.x + v2.x, v1.y + v2.y, v1.z + v2.z);
}
__host__ __device__ dfloat vectorTripleProduct(const vector_t a, const vector_t b, const vector_t c){
const vector_t aXb = vectorCrossProduct(a, b);
return vectorDot(aXb, c);
}
// assume b is unit vector
__host__ __device__ vector_t vectorOrthogonalize(const vector_t a, const vector_t b){
dfloat adotb = vectorDot(a, b);
return vectorSub(a, vectorScale(adotb, b));
}
__host__ __device__ dfloat vectorNorm(const vector_t a){
return sqrt(vectorDot(a,a));
}
// return orthonormalized vector
__host__ __device__ vector_t vectorNormalize(const vector_t a){
dfloat d = vectorNorm(a);
if(d)
return vectorScale(1./d, a);
else
return vectorCreate(0,0,0);
}
// https://www.scratchapixel.com/code.php?id=10&origin=/lessons/3d-basic-rendering/minimal-ray-tracer-rendering-simple-shapes
// roots of a*t^2 + 2*b*t + c = 0
__forceinline__ __host__ __device__ bool solveQuadratic(const dfloat &a, const dfloat &b, const dfloat &c, dfloat &x0, dfloat &x1){
dfloat discr = b * b - a * c;
if (discr < 0) return false;
else if (discr == 0) {
x0 = x1 = - b / a;
}
else {
dfloat sqrtdiscr = sqrt(discr);
dfloat q = (b > 0) ?
-(b + sqrtdiscr) :
-(b - sqrtdiscr);
x0 = q / a;
x1 = c / q;
}
dfloat xmin = min(x0, x1);
dfloat xmax = max(x0, x1);
x0 = xmin;
x1 = xmax;
return true;
}
/* Check if the ray and triangle intersect */
__forceinline__ __host__ __device__ bool intersectRayTriangle(const ray_t &r, const triangle_t &tri, dfloat *t){
// TW: unused fudge factor
dfloat delta = 0;
bool retval = false;
vector_t B1 = vectorSub(tri.vertices[2], tri.vertices[0]);
vector_t B2 = vectorSub(tri.vertices[2], tri.vertices[1]);
vector_t B3 = r.dir;
vector_t R = vectorSub(tri.vertices[2], r.start);
dfloat J = vectorTripleProduct(B2, B3, B1);
dfloat L1 = vectorTripleProduct(B2, B3, R);
if(L1<delta*J) return false;
dfloat L2 = vectorTripleProduct(B3, B1, R);
if(L2<delta*J || L1+L2>J*(1+delta)) return false;
dfloat t0 = vectorTripleProduct(B1, B2, R)/J;
/* Verify t1 larger than 0 and less than the original t */
// TW: FUDGE FACTOR
if((t0 > p_intersectDelta) && (t0 < *t)){
*t = t0;
retval = true;
}
return retval;
}
/* Check if the ray and triangle intersect */
__forceinline__ __host__ __device__ bool intersectRayRectangle(const ray_t &r, const rectangle_t &rect, dfloat *t){
vector_t C = rect.center;
vector_t A1 = rect.axis[0];
vector_t A2 = rect.axis[1];
dfloat L1 = rect.length[0];
dfloat L2 = rect.length[1];
// n = A1 x A2
// (s + t*d - C).n = 0
// t = (C - s).n/(d.n)
vector_t n = vectorCrossProduct(A1, A2);
dfloat t0 = vectorDot(vectorSub(C,r.start), n)/vectorDot(r.dir, n);
// intersection behind start of ray
if(t0<0 || t0>*t) return false;
// X = s + t*d - C
vector_t X = vectorAdd(vectorSub(r.start,C), vectorScale(t0, r.dir));
dfloat h1 = vectorDot(A1, X)+0.5*L1; // shift
if(h1<0 || h1>L1) return false;
dfloat h2 = vectorDot(A2, X)+0.5*L2; // shift
if(h2<0 || h2>L1) return false;
// success
*t = t0;
return true;
}
/* Check if the ray and sphere intersect */
__forceinline__ __host__ __device__ bool intersectRaySphere(const ray_t &r, const sphere_t &s, dfloat *t){
bool retval = false;
/* A = d.d, the vector_t dot product of the direction */
dfloat A = vectorDot(r.dir, r.dir);
/* We need a vector_t representing the distance between the start of
* the ray and the position of the circle.
* This is the term (p0 - c)
*/
vector_t dist = vectorSub(r.start, s.pos);
/* 2d.(p0 - c) */
dfloat B = 2.f * vectorDot(r.dir, dist);
/* (p0 - c).(p0 - c) - r^2 */
dfloat C = vectorDot(dist, dist) - (s.radius * s.radius);
/* find roots of quadratic */
dfloat t0, t1;
if(solveQuadratic(A,0.5*B,C,t0,t1)){
if((t0 > p_intersectDelta) && (t0 < *t)){
*t = t0;
retval = true;
}else
retval = false;
}else{
retval = false;
}
return retval;
}
/* Check if the ray and sphere intersect */
__forceinline__ __host__ __device__ bool intersectRayEllipsoid(const ray_t &r, const ellipsoid_t &sh, dfloat *t){
bool retval = false;
/*
R = vector radii
((p.x-c.x)/R.x)^2 + ((p.y-c.y)/R.y)^2 + ((p.z-c.z)/R.z)^2 = 1;
p.x = s.x + t*d.x
s~ = s-c
((s~.x+t*d.x)/R.x)^2 + ((s~.y+t*d.y)/R.y)^2 + ((s~.z+t*d.z)/R.z)^2 = 1;
t^2 * ( (d.x/R.x)^2 + (d.y/R.y)^2 + (d.z/R.z)^2
+t *2* ( (d.x*s~.x/R.x^2) + (d.y*s~.y/R.y^2) + (d.z*s~.z/R.z^2) )
+ ( (s~.x/R.x)^2 + (s~.y/R.y)^2 + (s~.z/R.z)^2 - 1 ) = 0
*/
vector_t s = r.start;
vector_t d = r.dir;
vector_t c = sh.pos;
vector_t invR = sh.invRadii;
vector_t st = vectorSub(s, c);
/* A = d.d, the vector_t dot product of the direction */
vector_t dIR = vectorDotMultiply(d, invR);
vector_t stIR = vectorDotMultiply(st, invR);
dfloat A = vectorDot(dIR, dIR);
dfloat B = vectorDot(dIR, stIR);
dfloat C = vectorDot(stIR, stIR) - 1.f;
/* find roots of quadratic */
dfloat t0, t1;
if(solveQuadratic(A,B,C,t0,t1)){
if((t0 > p_intersectDelta) && (t0 < *t)){
*t = t0;
retval = true;
}else
retval = false;
}else{
retval = false;
}
return retval;
}
__forceinline__ __host__ __device__ bool intersectRayCone(const ray_t &r, const cone_t &cone, dfloat *t){
bool retval = false;
/*
cone-ray intersection tests:
| pos + t*dir - (vertex + axis*h) | = R*h/H
*/
vector_t p = r.start;
vector_t d = r.dir;
vector_t v = cone.vertex;
vector_t a = cone.axis;
dfloat R = cone.radius;
dfloat H = cone.height;
dfloat alpha = (R/H)*(R/H);
// p + t*d - (v + h*a) orth a
// h = (p-v + t*d).a
// if h>=0
// | p + t*d - (v + h*a)| = alpha*(p-v + t*d).a
// |(p-v) - ((p-v).a)*a + t*d - t*(a.d)*a | = alpha*(p-v+t*d).a
// | pminusvPerp + t*dPerp| = alpha*( (p-v).a + t*d.a)
//
dfloat adotd = vectorDot(a,d);
vector_t dPerp = vectorSub(d, vectorScale(adotd, a));
vector_t pminusv = vectorSub(p, v);
dfloat tmp = vectorDot(a,pminusv);
vector_t pminusvPerp = vectorSub(pminusv, vectorScale(tmp, a));
dfloat A = vectorDot(dPerp, dPerp) - alpha*adotd*adotd;
dfloat B = vectorDot(dPerp, pminusvPerp) - alpha*adotd*tmp;
dfloat C = vectorDot(pminusvPerp, pminusvPerp) - alpha*tmp*tmp;
/* find roots of quadratic */
dfloat t0, t1;
if(solveQuadratic(A,B,C,t0,t1)){
// cone is behind ray
if(t0<0 && t1<0)
return false;
// check location along axis
const dfloat h0 = tmp + t0*adotd;
const dfloat h1 = tmp + t1*adotd;
const int valid0 = ((h0>0) && (h0<H));
const int valid1 = ((h1>0) && (h1<H));
if(!valid0 && !valid1) // out of range
return false;
else if(valid0 && valid1){ // both viable
if(t0 > t1){ // nearest
t0 = t1;
}
}
else if(valid1){
t0 = t1;
}
if((t0 > p_intersectDelta) && (t0 < *t)){
*t = t0;
retval = true;
}else
retval = false;
}else{
retval = false;
}
return retval;
}
__forceinline__ __host__ __device__ bool intersectRayDisk(const ray_t &r, const disk_t &disk, dfloat *t){
vector_t s = r.start;
vector_t d = r.dir;
vector_t n = disk.normal;
vector_t c = disk.center;
// intersection with plane
dfloat ndotd = vectorDot(n, d);
// (s + t*d -c ).n = 0
dfloat t0 = vectorDot(vectorSub(c,s), n)/ndotd;
// intersection behind start of ray
if(t0<0 || t0>*t) return false;
vector_t p = vectorAdd(s, vectorScale(t0, d));
vector_t v = vectorSub(p, c);
dfloat R2 = vectorDot(v,v);
if(R2>=(disk.radius*disk.radius)-p_intersectDelta)
return false;
if(t0>*t) return false;
*t = t0;
return true;
}
__forceinline__ __host__ __device__ bool intersectRayCylinder(const ray_t &r, const cylinder_t &cylinder, dfloat *t){
bool retval = false;
/*
cylinder-ray intersection tests:
| p + t*d - (c+h*a) | = R
h = (p+t*d-c).a
0<= h <=H
*/
vector_t p = r.start;
vector_t d = r.dir;
vector_t c = cylinder.center;
vector_t a = cylinder.axis;
dfloat R = cylinder.radius;
dfloat H = cylinder.height;
dfloat adotd = vectorDot(a,d);
vector_t dPerp = vectorSub(d, vectorScale(adotd, a));
vector_t pminusc = vectorSub(p, c);
dfloat tmp = vectorDot(a,pminusc);
vector_t pminuscPerp = vectorSub(pminusc, vectorScale(tmp, a));
dfloat A = vectorDot(dPerp, dPerp);
dfloat B = vectorDot(dPerp, pminuscPerp);
dfloat C = vectorDot(pminuscPerp, pminuscPerp) - R*R;
#if 1
// prone to acne (FP32)
dfloat t0, t1;
if(solveQuadratic(A,B,C,t0,t1)){
// cylinder is behind ray
if(t0<=0 && t1<=0)
return false;
dfloat h0 = tmp + t0*adotd;
dfloat h1 = tmp + t1*adotd;
int valid0 = ((h0>0) && (h0<H));
int valid1 = ((h1>0) && (h1<H));
if(!valid0 && !valid1){
return false;
}
else if(valid0 && valid1){
if(t0 > t1){
t0= t1;
}
}
else if(valid1){
t0 = t1;
}
// TW: FUDGE FACTOR (was 1e-3)
if((t0 > p_intersectDelta) && (t0< ((*t)))){// weakened this test
*t = t0;
retval = true;
}else
retval = false;
}else{
retval = false;
}
#else
// prone to acne (FP32)
dfloat discr = B*B-A*C;
// TW: UNUSED FUDGE FACTOR
dfloat delta = p_intersectDelta; // need large tolerance
if(discr<=delta)
retval = false;
else{
dfloat sqrtdiscr = sqrtf(discr);
dfloat A2 = A*A;
dfloat t0A2 = (-B + sqrtdiscr)*A;
dfloat t1A2 = (-B - sqrtdiscr)*A;
if(t0A2<=delta*A2 && t1A2<=delta*A2) return false;
dfloat h0A2 = tmp*A2 + t0A2*adotd;
dfloat h1A2 = tmp*A2 + t1A2*adotd;
int valid0 = ((h0A2>delta*A2) && (h0A2<H*A2-delta*A2));
int valid1 = ((h1A2>delta*A2) && (h1A2<H*A2-delta*A2));
if(!valid0 && !valid1)
return false;
else if(valid0 && valid1){
if(t0A2 > t1A2){
t0A2 = t1A2;
}
}
else if(valid1){
t0A2 = t1A2;
}
// TW: FUDGE FACTOR (was 1e-3)
if((t0A2 > p_intersectDelta*A2) && (t0A2 < ((*t)*p_intersectDelta))){// weakened this test
*t = t0A2/A2;
retval = true;
}else
retval = false;
}
#endif
return retval;
}
__host__ __device__ bool intersectPointGridCell(const grid_t &grid,
const vector_t p,
const int cellI,
const int cellJ,
const int cellK){
if(p.x<=grid.xmin+(cellI )*grid.dx) return false;
if(p.x> grid.xmin+(cellI+1)*grid.dx) return false;
if(p.y<=grid.ymin+(cellJ )*grid.dy) return false;
if(p.y> grid.ymin+(cellJ+1)*grid.dy) return false;
if(p.z<=grid.zmin+(cellK )*grid.dz) return false;
if(p.z> grid.zmin+(cellK+1)*grid.dz) return false;
return true;
}
__host__ __device__ bool intersectRayBox(ray_t &r, const bbox_t &bbox, unsigned int &face){
vector_t d = r.dir;
vector_t s = r.start;
vector_t invd = r.invDir;
dfloat mint = 20000;
face = 0;
if(d.x>0){ // face 2
dfloat newt = (bbox.xmax-s.x)*invd.x; // d.x > 0
if(newt>0){
mint = min(mint, newt);
}
}
if(d.x<0){ // face 4
// s.x + newt*d.x = bbox.xmin
dfloat newt = (bbox.xmin-s.x)*invd.x;
if(newt>0){
mint = min(mint, newt);
}
}
if(d.y>0){ // face 3
dfloat newt = (bbox.ymax-s.y)*invd.y;
if(newt>0){
mint = min(mint, newt);
}
}
if(d.y<0){ // face 1
dfloat newt = (bbox.ymin-s.y)*invd.y;
if(newt>0){
mint = min(mint, newt);
}
}
if(d.z>0){ // face 5
dfloat newt = (bbox.zmax-s.z)*invd.z;
if(newt>0){
mint = min(mint, newt);
}
}
if(d.z<0){ // face 0
dfloat newt = (bbox.zmin-s.z)*invd.z;
if(newt>0){
mint = min(mint, newt);
}
}
face = 0;
if(d.x>0){ // face 2
dfloat newt = (bbox.xmax-s.x)*invd.x;
if(newt>0 && newt<=mint)
face |= 4;
}
if(d.x<0){ // face 4
dfloat newt = (bbox.xmin-s.x)*invd.x;
if(newt>0 && newt<=mint)
face |= 16;
}
if(d.y>0){ // face 3
dfloat newt = (bbox.ymax-s.y)*invd.y;
if(newt>0 && newt<=mint)
face |= 8;
}
if(d.y<0){ // face 1
dfloat newt = (bbox.ymin-s.y)*invd.y;
if(newt>0 && newt<=mint)
face |= 2;
}
if(d.z>0){ // face 5
dfloat newt = (bbox.zmax-s.z)*invd.z;
if(newt>0 && newt<=mint)
face |= 32;
}
if(d.z<0){ // face 0
dfloat newt = (bbox.zmin-s.z)*invd.z;
if(newt>0 && newt<=mint)
face |= 1;
}
if(face>0){
r.start = vectorAdd(s, vectorScale(mint, d));
return true;
}
return false;
}
__forceinline__ __host__ __device__ bool intersectRayShape(const ray_t &r, const shape_t &s, dfloat *t){
switch(s.type){
case SPHERE: return intersectRaySphere (r, s.sphere, t);
case CONE: return intersectRayCone (r, s.cone, t);
case DISK: return intersectRayDisk (r, s.disk, t);
case CYLINDER: return intersectRayCylinder (r, s.cylinder, t);
case IMAGE:
case RECTANGLE:return intersectRayRectangle(r, s.rectangle, t);
case TRIANGLE: return intersectRayTriangle (r, s.triangle, t);
case ELLIPSOID:return intersectRayEllipsoid(r, s.ellipsoid, t);
}
return false;
}
__forceinline__ __host__ __device__ vector_t computeNormal(const vector_t &v, const shape_t &s){
vector_t n = vectorCreate(0,0,0);
/* Find the normal for this new vector_t at the point of intersection */
switch(s.type){
case SPHERE:
{
n = vectorSub(v, s.sphere.pos);
break;
}
case ELLIPSOID:
{
vector_t vMs = vectorSub(v, s.ellipsoid.pos);
// f = (v-c).^2./(radii.^2) - 1 => n = grad f
n = vectorDotMultiply(vMs, vectorDotMultiply(s.ellipsoid.invRadii, s.ellipsoid.invRadii));
break;
}
case TRIANGLE:
{
vector_t a = vectorSub(s.triangle.vertices[2], s.triangle.vertices[0]);
vector_t b = vectorSub(s.triangle.vertices[1], s.triangle.vertices[0]);
n = vectorCrossProduct(a, b);
break;
}
case CONE:
{
// n = (v-vertex) x ( a x (v-vertex) )
vector_t vMinusVertex = vectorSub(v, s.cone.vertex);
// axis location
dfloat H = s.cone.height;
dfloat z = vectorDot(vMinusVertex, s.cone.axis);
// problematic if axis is parallel to v-Vertex
if(z>p_projectDelta && z<H-p_projectDelta)
n = vectorCrossProduct( vMinusVertex, vectorCrossProduct(s.cone.axis, vMinusVertex));
break;
}
case DISK:
{
vector_t vMc = vectorSub(v, s.disk.center);
dfloat R = s.disk.radius;
vector_t tmp = vectorOrthogonalize(vMc, s.disk.normal);
dfloat z = vectorNorm(tmp);
if(z<R-p_projectDelta)
n = s.disk.normal;
break;
}
case CYLINDER:
{
// z = (v - c).a => clamp
vector_t vMc = vectorSub(v, s.cylinder.center);
dfloat H = s.cylinder.height;
dfloat z = vectorDot(vMc, s.cylinder.axis);
if(z>p_projectDelta && z<H-p_projectDelta)
n = vectorOrthogonalize(vMc, s.cylinder.axis);
break;
}
case IMAGE:
case RECTANGLE:
{
#if 0
vector_t C = s.rectangle.center;
vector_t A1 = s.rectangle.axis[0];
vector_t A2 = s.rectangle.axis[1];
dfloat L1 = s.rectangle.length[0];
dfloat L2 = s.rectangle.length[1];
// X = v - C
vector_t X = vectorSub(v, C);
dfloat h1 = vectorDot(A1, X)+0.5*L1; // shift
dfloat h2 = vectorDot(A2, X)+0.5*L2; // shift
#endif
n = vectorCrossProduct(s.rectangle.axis[0], s.rectangle.axis[1]);
break;
}
}
// normalize when normal is not degenerate
dfloat tmp = vectorNorm(n);
if(tmp)
n = vectorScale(1./tmp, n);
return n;
}
__forceinline__ __host__ __device__ material_t computeMaterial(const int Nmaterials, const material_t *materials,
const vector_t &v, const shape_t &s){
material_t m;
switch(s.type){
case TRIANGLE:
{
// v = L1*v1 + L2*v2 + (1-L1-L2)*v3 + N1*n
// [ v3-v1 v2-v1 -N1][L1;L2;n] = [v3-v]
vector_t B1 = vectorSub(s.triangle.vertices[2], s.triangle.vertices[0]);
vector_t B2 = vectorSub(s.triangle.vertices[1], s.triangle.vertices[0]);
vector_t B3 = vectorCrossProduct(B1,B2);
vector_t R = vectorSub(s.triangle.vertices[2], v);
dfloat J = vectorTripleProduct(B2, B3, B1);
dfloat L1 = vectorTripleProduct(B2, B3, R)/J;
dfloat L2 = vectorTripleProduct(B3, B1, R)/J;
dfloat N1 = vectorTripleProduct(B1, B2, R)/J;
dfloat Iq = L1*s.triangle.q[0] + L2*s.triangle.q[1] + (1-L1-L2)*s.triangle.q[2];
// dfloat maxIq = 2.5, minIq =.05;
dfloat maxIq = 3, minIq = -3;
Iq = (Iq-minIq)/(maxIq-minIq);
#if 0
if(Iq<0 || Iq>1){
m.diffuse.red = 1;
m.diffuse.green = 1;
m.diffuse.blue = 1;
m.reflection = 0;
m.eta = 1.;
m.refraction = 1;
m.info.refractor = 1;
m.info.reflector = 0;
m.info.emitter = 0;
}
else{
}
#endif
Iq = clamp(Iq, 0, 1);
#if 0
dfloat redIq = (Iq<1./3.) ? 3.*Iq:0; // reverse ?
dfloat greenIq = (1./3<=Iq && Iq<2./3.) ? 3.*(Iq-1./3):0;
dfloat blueIq = (2./3<=Iq) ? 3.*(Iq-2./3):0;
#else
dfloat redIq = 0, greenIq = 0, blueIq = 0;
if(Iq<1/3.) redIq = 3*Iq;
else if(Iq>=2./3) blueIq = 3.*(Iq-2./3);
else{
redIq = 1;
greenIq = 1;
blueIq = 1;
}
#endif
m.diffuse.red = redIq;
m.diffuse.green = greenIq;
m.diffuse.blue = blueIq;
m.reflection = 0.05;
m.eta = 1.;
m.refraction = 0.01;
m.info.refractor = 0;
m.info.reflector = 1;
m.info.emitter = 0;
break;
}
case SPHERE:
case ELLIPSOID:
case DISK:
case CYLINDER:
case CONE:
{
m = materials[s.material];
break;
}
#if 0
case CYLINDER:
{
vector_t c = s.cylinder.center;
vector_t a = s.cylinder.axis;
vector_t vMc = vectorSub(v, c);
dfloat H = s.cylinder.height;
dfloat h = vectorDot(vMc, a);
int i = (int) (8.f*(h/H)); // checkerboard material selector
int idM = 10*((i%2)); // 1 if either i is odd or j is even
m = materials[idM];
break;
}
case CONE:
{
vector_t c = s.cone.vertex;
vector_t a = s.cone.axis;
vector_t vMc = vectorSub(v, c);
dfloat H = s.cone.height;
dfloat h = vectorDot(vMc, a);
int i = (int) (8.f*(h/H)); // checkerboard material selector
int idM = 20*((i%2)); // 1 if either i is odd or j is even
m = materials[idM];
break;
}
#endif
case RECTANGLE:
{
if(s.material>=0)
m = materials[s.material];
else{
vector_t C = s.rectangle.center;
vector_t A1 = s.rectangle.axis[0];
vector_t A2 = s.rectangle.axis[1];
dfloat L1 = s.rectangle.length[0];
dfloat L2 = s.rectangle.length[1];
// X = v - C
vector_t X = vectorSub(v, C);
dfloat h1 = vectorDot(A1, X)+0.5*L1; // shift
dfloat h2 = vectorDot(A2, X)+0.5*L2; // shift
int i = (int) (8.f*(h1/L1)); // checkerboard material selector
int j = (int) (8.f*(h2/L2));
int idM = ((i%2) ^ ((j+1)%2)); // 1 if either i is odd or j is even
// printf("i=%d, j=%d, h1=%g, h2=%g, L1=%g, L2=%g, idM = %d\n", i, j, h1, h2, L1, L2, idM);
m = materials[idM];
}
break;
}
case IMAGE:
{
vector_t C = s.rectangle.center;
vector_t A1 = s.rectangle.axis[0];
vector_t A2 = s.rectangle.axis[1];
dfloat L1 = s.rectangle.length[0];
dfloat L2 = s.rectangle.length[1];
const unsigned char *img = s.rectangle.image;
int NI = s.rectangle.NI;
int NJ = s.rectangle.NJ;
// X = v - C
vector_t X = vectorSub(v, C);
dfloat h1 = vectorDot(A1, X)+0.5*L1; // shift
dfloat h2 = vectorDot(A2, X)+0.5*L2; // shift
// nearest neighbor interpolation
dfloat i = iclamp(NI*h1/L1, 0, NI-1);
dfloat j = iclamp(NJ*h2/L2, 0, NJ-1);
int idM = (NI-1-i) + j*NI;
m.diffuse.red = img[idM*3 + 0]/256.f;
m.diffuse.green = img[idM*3 + 1]/256.f;
m.diffuse.blue = img[idM*3 + 2]/256.f;
m.reflection = 1;
m.refraction = 1;
m.info.refractor = 0;
m.info.reflector = 0;
m.info.emitter = 1;
break;
}
}
return m;
}
// grid search
__host__ __device__ bool gridRayIntersectionSearch(ray_t r,
const int Nshapes, const shape_t *shapes, const grid_t &grid,
dfloat *t, int ¤tShape){
// is start of ray in a grid cell ?
vector_t s = r.start; // will modify ray through s
vector_t d = r.dir;
vector_t invd;
if(d.x) invd.x = 1.f/d.x;
if(d.y) invd.y = 1.f/d.y;
if(d.z) invd.z = 1.f/d.z;
// if ray is outside grid then project onto grid
if(s.x<grid.xmin){
if(d.x<=0) return false; // pointing away or grazing from grid
dfloat t0 = -(s.x-grid.xmin)*invd.x;
s.x = grid.xmin;
s.y += t0*d.y;
s.z += t0*d.z;
}
if(s.x>grid.xmax){
if(d.x>=0) return false;
dfloat t0 = -(s.x-grid.xmax)*invd.x;
s.x = grid.xmax;
s.y += t0*d.y;
s.z += t0*d.z;
}
if(s.y<grid.ymin){
if(d.y<=0) return false; // pointing away or grazing from grid
dfloat t0 = -(s.y-grid.ymin)*invd.y;
s.y = grid.ymin;
s.x += t0*d.x;
s.z += t0*d.z;
}
if(s.y>grid.ymax){
if(d.y>=0) return false;
dfloat t0 = -(s.y-grid.ymax)*invd.y;
s.y = grid.ymax;
s.x += t0*d.x;
s.z += t0*d.z;
}
if(s.z<grid.zmin){
if(d.z<=0) return false; // pointing away or grazing from grid
dfloat t0 = -(s.z-grid.zmin)*invd.z;
s.z = grid.zmin;
s.x += t0*d.x;
s.y += t0*d.y;
}
if(s.z>grid.zmax){
if(d.z>=0) return false;
dfloat t0 = -(s.z-grid.zmax)*invd.z;
s.z = grid.zmax;
s.x += t0*d.x;
s.y += t0*d.y;
}
// now the ray start must be on the surface of the grid or in a cell
int cellI = iclamp((s.x-grid.xmin)*grid.invdx,0,grid.NI-1); // assumes grid.NI
int cellJ = iclamp((s.y-grid.ymin)*grid.invdy,0,grid.NJ-1);
int cellK = iclamp((s.z-grid.zmin)*grid.invdz,0,grid.NK-1);
ray_t newr = r;
newr.start = s;
newr.invDir = invd;
currentShape = -1;
do{
int cellID = cellI + grid.NI*cellJ + grid.NI*grid.NJ*cellK;
*t = 20000; // TW ?
int start = grid.c_boxStarts[cellID];
int end = grid.c_boxStarts[cellID+1];
for(int offset=start;offset<end;++offset){
const int obj = grid.c_boxContents[offset];
const shape_t shape = shapes[obj];
if(intersectRayShape(r, shape, t)){
vector_t intersect = vectorAdd(r.start, vectorScale(*t, r.dir));
if(intersectPointGridCell(grid, intersect, cellI, cellJ, cellK)){
currentShape = obj;
}
}
}
if(currentShape != -1){
return true;
}
unsigned int face = 0;
// find faces that ray passes through
intersectRayBox(newr,grid.c_bboxes[cellID], face);
if(face&1) --cellK; // face 0
if(face&2) --cellJ; // face 1
if(face&4) ++cellI; // face 2
if(face&8) ++cellJ; // face 3
if(face&16) --cellI;// face 4
if(face&32) ++cellK;// face 5
if(face==0){
break;
}
}while(cellI>=0 && cellI<grid.NI &&
cellJ>=0 && cellJ<grid.NJ &&
cellK>=0 && cellK<grid.NK);
return false;
}
__device__ colour_t trace(const grid_t grid,
const int Nshapes,
const shape_t *shapes,
const int Nlights,
const light_t *lights,
const int Nmaterials,
const material_t *materials,
ray_t r,
int level,
dfloat coef,
colour_t bg){
colour_t black;
black.red = 0;
black.green = 0;
black.blue = 0;
// initialize color as black
colour_t c = black;
int Nrays = 0, rayID = 0;
ray_t rayStack[p_maxNrays];
// add initial ray to stack
rayID = 0;
r.level = 0;
r.coef = coef;
rayStack[Nrays] = r;
++Nrays;
// keep looping until the stack is exhausted or the maximum number of rays is reached
while(rayID<Nrays && Nrays<p_maxNrays){
// get ray
r = rayStack[rayID];
// look for intersection of this ray with shapes
int currentShapeID = -1;
dfloat t = 20000.f;
// look through grid to find intersections with ray
gridRayIntersectionSearch(r, Nshapes, shapes, grid, &t, currentShapeID);
// none found
if(currentShapeID == -1){
if(rayID==0)
c = bg;
// go to next ray
++rayID;
continue;
}
// shape at nearest ray intersection
shape_t currentShape = shapes[currentShapeID];
// compute intersection location
vector_t intersection = vectorAdd(r.start, vectorScale(t, r.dir));
// find unit surface normal
vector_t n = computeNormal(intersection, currentShape);
/* use shadow tracing to determine color contribution from this intersection */
dfloat rdotn = vectorDot(r.dir, n);
/* Find the material to determine the colour */
material_t currentMat = computeMaterial(Nmaterials, materials, intersection, currentShape);
// test for reflection
info_t info = currentMat.info;
if(info.emitter==1){
dfloat lambert = rdotn * r.coef;
c.red += lambert * currentMat.diffuse.red;
c.green += lambert * currentMat.diffuse.green;
c.blue += lambert * currentMat.diffuse.blue;
}
else{
if(info.reflector==1){
/* start ray slightly off surface */
dfloat sc = p_shadowDelta;
if(rdotn>0) // reverse offset if inside
sc *= -1.f; // sign ? was -1
vector_t shadowStart = vectorAdd(intersection, vectorScale(sc, n)); // HACK to shift ray start off service
ray_t lightRay;
lightRay.start = shadowStart;
/* Find the value of the light at this point */
for(unsigned int j=0; j < Nlights; j++){
light_t currentLight = lights[j];
vector_t dist = vectorSub(currentLight.pos, shadowStart);
if(vectorDot(n, dist) <= 0) continue;
dfloat lightDist = vectorNorm(dist);
dfloat tshadow = lightDist;
if(tshadow <= 0) continue;
lightRay.dir = vectorScale((1.f/tshadow), dist);
/* search in light ray direction for object */
int shadowShapeID = -1;
gridRayIntersectionSearch(lightRay, Nshapes, shapes, grid, &tshadow, shadowShapeID);
// check for objects in path of shadow ray
bool inShadow = false;
if(shadowShapeID==-1) // no object causes shadow
inShadow = false;
else if(tshadow >= 0 && tshadow < lightDist) //
inShadow = true;
if(inShadow==false){
/* Lambert diffusion */
dfloat lambert = vectorDot(lightRay.dir, n) * r.coef;
c.red += lambert * currentLight.intensity.red * currentMat.diffuse.red;
c.green += lambert * currentLight.intensity.green * currentMat.diffuse.green;
c.blue += lambert * currentLight.intensity.blue * currentMat.diffuse.blue;
}
}
if((r.level+1<p_maxLevel) && Nrays<p_maxNrays) {
ray_t reflectRay;
// create new ray starting from offset intersection, with ray direction reflected in normal
reflectRay.start = shadowStart;
reflectRay.dir = vectorAdd(r.dir, vectorScale(-2.0f*rdotn, n));
// increment level for new ray
reflectRay.level = r.level+1;
reflectRay.coef = r.coef*currentMat.reflection; // scale intensity
// launch new ray
rayStack[Nrays] = reflectRay;
// increment ray counter
++Nrays;
}
}
// https://www.scratchapixel.com/code.php?id=13&origin=/lessons/3d-basic-rendering/introduction-to-shading
// test for refraction
if(info.refractor==1){
// can we add a new refraction ray to the stack ?
if((r.level+1<p_maxLevel) && Nrays<p_maxNrays){
// push ray onto other side of surface
dfloat sc = -p_shadowDelta; // reverse number above
if(rdotn>0)
sc *= -1;
// HACK to shift ray start off service
vector_t shadowStart = vectorAdd(intersection, vectorScale(sc, n));
// get index of refraction
dfloat eta = currentMat.eta;
if(rdotn>0){
rdotn *= -1;
}else{
eta = 1.f/eta;
}
dfloat kappa = 1.f - eta*eta*(1.f - rdotn*rdotn);
if(kappa>0){
// create new refraction ray
ray_t refractRay;
// https://www.cs.cornell.edu/courses/cs4620/2012fa/lectures/36raytracing.pdf
// newdir = eta*d - n*(eta*d.n - sqrt((1-eta*eta*(1-(d.n)^2))))
dfloat fac = eta*rdotn+sqrt(kappa); // was - (NEED TO DOUBLE CHECK - other normal)
refractRay.start = shadowStart;
refractRay.dir = vectorNormalize(vectorSub(vectorScale(eta, r.dir), vectorScale(fac, n)));
refractRay.level = r.level+1;
refractRay.coef = r.coef*currentMat.refraction; // scale intensity
rayStack[Nrays] = refractRay;
++Nrays;
}
}
}
}
// go to next ray on stack
++rayID;
}
return c;
}
__global__ void renderKernel (const int NI,
const int NJ,
const grid_t grid,
const sensor_t sensor,
const int Nshapes,
const shape_t *shapes,
const int Nlights,
const light_t *lights,
const int Nmaterials,
const material_t *materials,
const dfloat costheta,
const dfloat sintheta,
const dfloat *randomNumbers,
unsigned char *img
){
const colour_t bg = sensor.bg;
int I = threadIdx.x + blockDim.x*blockIdx.x;
int J = threadIdx.y + blockDim.y*blockIdx.y;
if(I<NI && J<NJ){
ray_t r;
dfloat coef = 1.0;
int level = 0;
// look at this: https://en.wikipedia.org/wiki/3D_projection
// 2.5 location of sensor pixel
colour_t c;
// dfloat randI = randomNumbers[I];
// dfloat randJ = randomNumbers[J];
dfloat x0 = sensor.eyeX.x;
dfloat y0 = sensor.eyeX.y;
dfloat z0 = sensor.eyeX.z;
// multiple rays emanating from sensor, passing through lens and focusing at the focal plane
// 1. compute intersection of ray passing through lens center to focal plane
// (sensorX + alpha*(lensC -sensorX)).sensorN = focalPlaneOffset
// alpha = (focalOffset-s.sensorN)/( (lensC-s).sensorN) [ . dot product ]
dfloat cx = BOXSIZE/2., cy = BOXSIZE/2., cz = BOXSIZE/2;
vector_t sensorN = vectorCrossProduct(sensor.Idir, sensor.Jdir);
vector_t sensorX = sensorLocation(NI, NJ, I, J, sensor);
dfloat focalPlaneOffset = sensor.focalPlaneOffset;
vector_t centralRayDir = vectorSub(sensor.lensC, sensorX);
dfloat alpha = (focalPlaneOffset - vectorDot(sensorX, sensorN))/vectorDot(centralRayDir, sensorN);
// 2. target
vector_t targetX = vectorAdd(sensorX, vectorScale(alpha, centralRayDir));
x0 = sensorX.x;
y0 = sensorX.y;
z0 = sensorX.z;
// 3. loop over vertical offsets on lens (thin lens)
c.red = 0; c.green = 0; c.blue = 0;
for(int samp=0;samp<p_Nsamples;++samp){
// aperture width
int sampId = (I+J*NI + samp*blockDim.x*blockDim.y)%NRANDOM;
dfloat offI = randomNumbers[2*sampId+0]*p_apertureRadius;
dfloat offJ = randomNumbers[2*sampId+1]*p_apertureRadius;
// choose random starting point on lens (assumes lens and sensor arre parallel)
if(samp>0) { // primary ray
x0 = sensor.lensC.x + offI*sensor.Idir.x + offJ*sensor.Jdir.x;
y0 = sensor.lensC.y + offI*sensor.Idir.y + offJ*sensor.Jdir.y;
z0 = sensor.lensC.z + offI*sensor.Idir.z + offJ*sensor.Jdir.z;
}
dfloat dx0 = targetX.x - x0;
dfloat dy0 = targetX.y - y0;
dfloat dz0 = targetX.z - z0;
dfloat L0 = sqrt(dx0*dx0+dy0*dy0+dz0*dz0);
dx0 = dx0/L0;
dy0 = dy0/L0;
dz0 = dz0/L0;
r.start.x = costheta*(x0-cx) - sintheta*(z0-cz) + cx;
r.start.y = y0;
r.start.z = sintheta*(x0-cx) + costheta*(z0-cz) + cz;
r.dir.x = costheta*dx0 - sintheta*dz0;
r.dir.y = dy0;
r.dir.z = sintheta*dx0 + costheta*dz0;
colour_t newc =
trace(grid, Nshapes, shapes, Nlights, lights, Nmaterials, materials, r, level, coef, bg);
dfloat sc = (samp==0) ? p_primaryWeight: 1.f;
c.red += sc*newc.red;
c.green += sc*newc.green;
c.blue += sc*newc.blue;
}
// primary weighted average
c.red /= (p_primaryWeight+p_Nsamples-1);
c.green /= (p_primaryWeight+p_Nsamples-1);
c.blue /= (p_primaryWeight+p_Nsamples-1);
// reverse vertical because of lensing
img[(I + (NJ-1-J)*NI)*3 + 0] = (unsigned char)min( c.red*255.0f, 255.0f);
img[(I + (NJ-1-J)*NI)*3 + 1] = (unsigned char)min(c.green*255.0f, 255.0f);
img[(I + (NJ-1-J)*NI)*3 + 2] = (unsigned char)min( c.blue*255.0f, 255.0f);
}
}
#define BLOCKSIZE 1024
#define LOGBLOCKSIZE 10
// https://en.wikipedia.org/wiki/Prefix_sum
// Hillis and Steele
// [ can be done with far fewer barriers ]
__global__ void startScanKernel(const int N,
const int *v,
int *scanv,
int *starts){
__shared__ int s_v0[BLOCKSIZE];
__shared__ int s_v1[BLOCKSIZE];
int j = threadIdx.x;
int b = blockIdx.x;
int n = j + b*BLOCKSIZE;
s_v0[j] = (n<N) ? v[j+b*BLOCKSIZE]: 0;
int offset = 1;
do{
__syncthreads();
s_v1[j] = (j<offset) ? s_v0[j] : (s_v0[j]+s_v0[j-offset]) ;
offset *= 2;
__syncthreads();
s_v0[j] = (j<offset) ? s_v1[j] : (s_v1[j]+s_v1[j-offset]) ;
offset *= 2;
} while(offset<BLOCKSIZE);
if(n<N)
scanv[n+1] = s_v0[j];
if(j==(BLOCKSIZE-1)){
starts[b+1] = s_v0[j];
}
}
__global__ void finishScanKernel(const int N,
int *scanv,
int *starts){
int j = threadIdx.x;
int b = blockIdx.x;
int n=j+b*BLOCKSIZE;
if(n<N){
int start = starts[b];
scanv[n+1] += start;
}
}
// returns the cumulative sum
int scan(const int N, const int *c_v, int *c_starts, int *starts, int *c_scanv){
int B = BLOCKSIZE;
int G = (N+BLOCKSIZE-1)/BLOCKSIZE;
startScanKernel <<< G, B >>> (N, c_v, c_scanv, c_starts);
cudaMemcpy(starts, c_starts, (G+1)*sizeof(int), cudaMemcpyDeviceToHost);
starts[0] = 0;
for(int b=0;b<G;++b){
starts[b+1] += starts[b];
}
int count = starts[G];
cudaMemcpy(c_starts, starts, (G+1)*sizeof(int), cudaMemcpyHostToDevice);
finishScanKernel <<< G, B >>> (N, c_scanv, c_starts);
return count;
}
__device__ bbox_t createBoundingBoxTriangle(triangle_t &triangle){
bbox_t bbox;
bbox.xmin = min(triangle.vertices[0].x, min(triangle.vertices[1].x, triangle.vertices[2].x));
bbox.xmax = max(triangle.vertices[0].x, max(triangle.vertices[1].x, triangle.vertices[2].x));
bbox.ymin = min(triangle.vertices[0].y, min(triangle.vertices[1].y, triangle.vertices[2].y));
bbox.ymax = max(triangle.vertices[0].y, max(triangle.vertices[1].y, triangle.vertices[2].y));
bbox.zmin = min(triangle.vertices[0].z, min(triangle.vertices[1].z, triangle.vertices[2].z));
bbox.zmax = max(triangle.vertices[0].z, max(triangle.vertices[1].z, triangle.vertices[2].z));
return bbox;
}
__device__ bbox_t createBoundingBoxSphere(sphere_t &sphere){
bbox_t bbox;
bbox.xmin = sphere.pos.x - sphere.radius;
bbox.xmax = sphere.pos.x + sphere.radius;
bbox.ymin = sphere.pos.y - sphere.radius;
bbox.ymax = sphere.pos.y + sphere.radius;
bbox.zmin = sphere.pos.z - sphere.radius;
bbox.zmax = sphere.pos.z + sphere.radius;
return bbox;
}
__device__ bbox_t createBoundingBoxEllipsoid(ellipsoid_t &ellipsoid){
bbox_t bbox;
bbox.xmin = ellipsoid.pos.x - 1.f/ellipsoid.invRadii.x;
bbox.xmax = ellipsoid.pos.x + 1.f/ellipsoid.invRadii.x;
bbox.ymin = ellipsoid.pos.y - 1.f/ellipsoid.invRadii.y;
bbox.ymax = ellipsoid.pos.y + 1.f/ellipsoid.invRadii.y;
bbox.zmin = ellipsoid.pos.z - 1.f/ellipsoid.invRadii.z;
bbox.zmax = ellipsoid.pos.z + 1.f/ellipsoid.invRadii.z;
return bbox;
}
__device__ bbox_t createBoundingBoxCylinder(cylinder_t &cylinder){
bbox_t bbox;
vector_t c = cylinder.center;
vector_t a = cylinder.axis;
dfloat R = cylinder.radius;
dfloat H = cylinder.height;
// xmax = c.x + (H/2)*a.x + (H/2)*|a.x| + R*|cross(e_x,a) |
bbox.xmax = c.x + (H/2)*a.x + (H/2)*fabs(a.x) + R*sqrtf(a.y*a.y + a.z*a.z);
bbox.xmin = c.x + (H/2)*a.x - (H/2)*fabs(a.x) - R*sqrtf(a.y*a.y + a.z*a.z);
bbox.ymax = c.y + (H/2)*a.y + (H/2)*fabs(a.y) + R*sqrtf(a.x*a.x + a.z*a.z);
bbox.ymin = c.y + (H/2)*a.y - (H/2)*fabs(a.y) - R*sqrtf(a.x*a.x + a.z*a.z);
bbox.zmax = c.z + (H/2)*a.z + (H/2)*fabs(a.z) + R*sqrtf(a.x*a.x + a.y*a.y);
bbox.zmin = c.z + (H/2)*a.z - (H/2)*fabs(a.z) - R*sqrtf(a.x*a.x + a.y*a.y);
return bbox;
}
__device__ bbox_t createBoundingBoxCone(cone_t &cone){
bbox_t bbox;
vector_t v = cone.vertex;
vector_t a = cone.axis;
dfloat R = cone.radius;
dfloat H = cone.height;
bbox.xmax = max(v.x, v.x + H*a.x + R*sqrtf(a.y*a.y + a.z*a.z));
bbox.xmin = min(v.x, v.x + H*a.x - R*sqrtf(a.y*a.y + a.z*a.z));
bbox.ymax = max(v.y, v.y + H*a.y + R*sqrtf(a.x*a.x + a.z*a.z));
bbox.ymin = min(v.y, v.y + H*a.y - R*sqrtf(a.x*a.x + a.z*a.z));
bbox.zmax = max(v.z, v.z + H*a.z + R*sqrtf(a.x*a.x + a.y*a.y));
bbox.zmin = min(v.z, v.z + H*a.z - R*sqrtf(a.x*a.x + a.y*a.y));
return bbox;
}
__device__ bbox_t createBoundingBoxRectangle(rectangle_t &rectangle){
bbox_t bbox;
vector_t C = rectangle.center;
vector_t A1 = rectangle.axis[0];
vector_t A2 = rectangle.axis[1];
vector_t n = vectorCrossProduct(A1, A2);
dfloat L1 = rectangle.length[0];
dfloat L2 = rectangle.length[1];
A1 = vectorScale(L1/2., A1);
A2 = vectorScale(L2/2., A2);
dfloat delta = 1e-1;
vector_t dn = vectorScale(delta, n);
vector_t Cdown = vectorSub(C, dn);
vector_t Cup = vectorAdd(C, dn);
vector_t v[8];
// C - delta*n + A1*(-L1/2) + A2*(-L2/2)
v[0] = vectorSub(Cdown, vectorAdd(A1, A2));
// C - delta*n + A1*(+L1/2) + A2*(-L2/2)
v[1] = vectorAdd(Cdown, vectorSub(A1, A2));
// C - delta*n + A1*(+L1/2) + A2*(+L2/2)
v[2] = vectorAdd(Cdown, vectorAdd(A1, A2));
// C - delta*n + A1*(-L1/2) + A2*(+L2/2)
v[3] = vectorAdd(Cdown, vectorSub(A2, A1));
// C + delta*n + A1*(-L1/2) + A2*(-L2/2)
v[4] = vectorSub(Cup, vectorAdd(A1, A2));
// C + delta*n + A1*(+L1/2) + A2*(-L2/2)
v[5] = vectorAdd(Cup, vectorSub(A1, A2));
// C + del6a*n + A1*(+L1/2) + A2*(+L2/2)
v[6] = vectorAdd(Cup, vectorAdd(A1, A2));
// C + delta*n + A1*(-L1/2) + A2*(+L2/2)
v[7] = vectorAdd(Cup, vectorSub(A2, A1));
bbox.xmin = 1e9;
bbox.ymin = 1e9;
bbox.zmin = 1e9;
bbox.xmax = -1e9;
bbox.ymax = -1e9;
bbox.zmax = -1e9;
#pragma unroll 8
for(int n=0;n<8;++n){
bbox.xmin = min(bbox.xmin, v[n].x);
bbox.ymin = min(bbox.ymin, v[n].y);
bbox.zmin = min(bbox.zmin, v[n].z);
bbox.xmax = max(bbox.xmax, v[n].x);
bbox.ymax = max(bbox.ymax, v[n].y);
bbox.zmax = max(bbox.zmax, v[n].z);
}
return bbox;
}
__device__ bbox_t createBoundingBoxDisk(disk_t &disk){
bbox_t bbox;
vector_t n = disk.normal;
vector_t c = disk.center;
dfloat R = disk.radius;
dfloat H = .1; // assert thickness in normal
// xmax = c.x + (H/2)*a.x + (H/2)*|a.x| + R*|cross(e_x,a) |
bbox.xmax = c.x + (H/2)*fabs(n.x) + R*sqrtf(n.y*n.y + n.z*n.z);
bbox.xmin = c.x - (H/2)*fabs(n.x) - R*sqrtf(n.y*n.y + n.z*n.z);
bbox.ymax = c.y + (H/2)*fabs(n.y) + R*sqrtf(n.x*n.x + n.z*n.z);
bbox.ymin = c.y - (H/2)*fabs(n.y) - R*sqrtf(n.x*n.x + n.z*n.z);
bbox.zmax = c.z + (H/2)*fabs(n.z) + R*sqrtf(n.x*n.x + n.y*n.y);
bbox.zmin = c.z - (H/2)*fabs(n.z) - R*sqrtf(n.x*n.x + n.y*n.y);
return bbox;
}
__device__ void createBoundingBoxShape(const grid_t &grid, shape_t &shape){
bbox_t bbox;
switch(shape.type){
case TRIANGLE: bbox = createBoundingBoxTriangle(shape.triangle); break;
case SPHERE: bbox = createBoundingBoxSphere(shape.sphere); break;
case ELLIPSOID: bbox = createBoundingBoxEllipsoid(shape.ellipsoid); break;
case IMAGE:
case RECTANGLE: bbox = createBoundingBoxRectangle(shape.rectangle); break;
case CYLINDER: bbox = createBoundingBoxCylinder(shape.cylinder); break;
case DISK: bbox = createBoundingBoxDisk(shape.disk); break;
case CONE: bbox = createBoundingBoxCone(shape.cone); break;
}
int imin = floor(grid.invdx*(bbox.xmin-grid.xmin));
int imax = floor(grid.invdx*(bbox.xmax-grid.xmin));
int jmin = floor(grid.invdy*(bbox.ymin-grid.ymin));
int jmax = floor(grid.invdy*(bbox.ymax-grid.ymin));
int kmin = floor(grid.invdz*(bbox.zmin-grid.zmin));
int kmax = floor(grid.invdz*(bbox.zmax-grid.zmin)); // was ceil
bbox.imin = iclamp(imin, 0, grid.NI-1);
bbox.imax = iclamp(imax, 0, grid.NI-1);
bbox.jmin = iclamp(jmin, 0, grid.NJ-1);
bbox.jmax = iclamp(jmax, 0, grid.NJ-1);
bbox.kmin = iclamp(kmin, 0, grid.NK-1);
bbox.kmax = iclamp(kmax, 0, grid.NK-1);
shape.bbox = bbox;
}
__global__ void countShapesInBoxesKernel(const grid_t grid, const int Nshapes, shape_t *shapes, int *counts){
int n = threadIdx.x + blockDim.x*blockIdx.x;
if(n<Nshapes){
shape_t &shape = shapes[n];
createBoundingBoxShape(grid, shape);
const int imin = shape.bbox.imin;
const int imax = shape.bbox.imax;
const int jmin = shape.bbox.jmin;
const int jmax = shape.bbox.jmax;
const int kmin = shape.bbox.kmin;
const int kmax = shape.bbox.kmax;
for(int k=kmin;k<=kmax;++k){
for(int j=jmin;j<=jmax;++j){
for(int i=imin;i<=imax;++i){
int id = i + j*grid.NI + k*grid.NI*grid.NJ;
atomicAdd(counts+id, 1);
}
}
}
}
}
__global__ void addShapesInBoxesKernel(const grid_t grid, const int Nshapes, const shape_t *shapes, int *boxCounters, int *boxContents){
const int n = threadIdx.x + blockDim.x*blockIdx.x;
if(n<Nshapes){
const shape_t &shape = shapes[n];
const int imin = shape.bbox.imin;
const int imax = shape.bbox.imax;
const int jmin = shape.bbox.jmin;
const int jmax = shape.bbox.jmax;
const int kmin = shape.bbox.kmin;
const int kmax = shape.bbox.kmax;
for(int k=kmin;k<=kmax;++k){
for(int j=jmin;j<=jmax;++j){
for(int i=imin;i<=imax;++i){
// box
const int id = i + j*grid.NI + k*grid.NI*grid.NJ;
// index in this box (post decremented)
// grab counter for this cell into index, then increment counter for this cell
const int index = atomicAdd(boxCounters+id,1);
boxContents[index] = shape.id;
}
}
}
}
}
void populateGrid(grid_t *grid, int Nshapes, shape_t *c_shapes){
if(grid->c_boxStarts){
cudaFree(grid->c_boxStarts);
cudaFree(grid->c_boxContents);
}
int Nboxes = grid->NI*grid->NJ*grid->NK;
int *boxCounts = (int*) calloc(Nboxes+1, sizeof(int));
int *c_boxCounts, *c_boxCounters;
cudaMalloc(&c_boxCounts, (Nboxes+1)*sizeof(int));
cudaMalloc(&(grid->c_boxStarts), (Nboxes+1)*sizeof(int));
cudaMalloc(&(c_boxCounters), (Nboxes+1)*sizeof(int));
cudaMemset(c_boxCounts, 0, (Nboxes+1)*sizeof(int));
int B = BLOCKSIZE;
int G = (Nshapes+B-1)/B;
countShapesInBoxesKernel <<< G, B >>> (*grid, Nshapes, c_shapes, c_boxCounts);
// parallel scan to get cumulative offsets starting at zero
int *tmp = (int*) calloc(Nboxes+1, sizeof(int));
int *c_tmp;
cudaMalloc(&c_tmp, (Nboxes+1)*sizeof(int));
int Nentries =
scan(Nboxes, c_boxCounts, c_tmp, tmp, grid->c_boxStarts);
// build container for boxes
cudaMalloc(&(grid->c_boxContents), (Nentries+1)*sizeof(int));
cudaMemcpy(c_boxCounters, grid->c_boxStarts, (Nboxes+1)*sizeof(int), cudaMemcpyDeviceToDevice);
// add each shape to every box that intersects the shape's bounding box
addShapesInBoxesKernel <<< G, B >>> (*grid, Nshapes, c_shapes, c_boxCounters, grid->c_boxContents);
free(tmp);
free(boxCounts);
cudaFree(c_tmp);
cudaFree(c_boxCounts);
cudaFree(c_boxCounters);
}
void sceneOffload(scene_t *scene){
grid_t *grid = scene->grid;
cudaMalloc(&(scene->c_materials), scene->Nmaterials*sizeof(material_t));
cudaMalloc(&(scene->c_lights), scene->Nlights*sizeof(light_t));
cudaMalloc(&(scene->c_shapes), scene->Nshapes*sizeof(shape_t));
cudaMalloc(&(scene->c_img), WIDTH*HEIGHT*3*sizeof(char));
cudaMalloc(&(grid->c_bboxes), (grid->NI*grid->NJ*grid->NK)*sizeof(bbox_t));
cudaMemcpy(scene->c_shapes, scene->shapes, scene->Nshapes*sizeof(shape_t), cudaMemcpyHostToDevice);
cudaMemcpy(scene->c_materials, scene->materials, scene->Nmaterials*sizeof(material_t), cudaMemcpyHostToDevice);
cudaMemcpy(scene->c_lights, scene->lights, scene->Nlights*sizeof(light_t), cudaMemcpyHostToDevice);
cudaMemcpy(grid->c_bboxes, grid->bboxes, (grid->NI*grid->NJ*grid->NK)*sizeof(bbox_t), cudaMemcpyHostToDevice);
cudaMalloc(&(scene->c_randomNumbers), 2*NRANDOM*sizeof(dfloat));
cudaMemcpy(scene->c_randomNumbers, scene->randomNumbers, 2*NRANDOM*sizeof(dfloat), cudaMemcpyHostToDevice);
}
dfloat drandRange48(dfloat dmin, dfloat dmax){
return dmin + drand48()*(dmax-dmin);
}
// L = size of box
// delta = width of layer around box
grid_t *gridSetup(dfloat L, dfloat delta){
// bu ild grid
grid_t *grid = (grid_t*) calloc(1, sizeof(grid_t));
grid->xmin = -delta;
grid->xmax = L + delta;
grid->ymin = -delta;
grid->ymax = L + delta;
grid->zmin = -delta;
grid->zmax = L + delta;
grid->NI = 401;
grid->NJ = 401;
grid->NK = 401;
grid->dx = (grid->xmax-grid->xmin)/grid->NI;
grid->dy = (grid->ymax-grid->ymin)/grid->NJ;
grid->dz = (grid->zmax-grid->zmin)/grid->NK;
grid->invdx = grid->NI/(grid->xmax-grid->xmin);
grid->invdy = grid->NJ/(grid->ymax-grid->ymin);
grid->invdz = grid->NK/(grid->zmax-grid->zmin);
grid->bboxes = (bbox_t*) calloc(grid->NI*grid->NJ*grid->NK, sizeof(bbox_t));
for(int k=0;k<grid->NK;++k){
for(int j=0;j<grid->NJ;++j){
for(int i=0;i<grid->NI;++i){
int id = i + j*grid->NI + k*grid->NI*grid->NJ;
grid->bboxes[id].xmin = i*grid->dx + grid->xmin;
grid->bboxes[id].xmax = (i+1)*grid->dx + grid->xmin;
grid->bboxes[id].ymin = j*grid->dy + grid->ymin;
grid->bboxes[id].ymax = (j+1)*grid->dy + grid->ymin;
grid->bboxes[id].zmin = k*grid->dz + grid->zmin;
grid->bboxes[id].zmax = (k+1)*grid->dz + grid->zmin;
}
}
}
return grid;
}
scene_t *sceneSetup(int plotNelements,
dfloat *plotx,
dfloat *ploty,
dfloat *plotz,
dfloat *plotq){
int i;
int Nmaterials = 64;
material_t *materials = (material_t*) calloc(Nmaterials, sizeof(material_t));
materials[0].diffuse.red = 1;
materials[0].diffuse.green = 1;
materials[0].diffuse.blue = 1;
materials[0].reflection = 1;
materials[0].eta = 1;
materials[0].refraction = 0;
materials[0].info.refractor = 0;
materials[0].info.reflector = 1;
materials[0].info.emitter = 0;
materials[1].diffuse.red = 0;
materials[1].diffuse.green = 240/255.;
materials[1].diffuse.blue = 20/255.;
materials[1].reflection = .3;
materials[1].eta = .7;
materials[1].refraction = .1;
materials[1].info.refractor = 1;
materials[1].info.reflector = 1;
materials[1].info.emitter = 0;
for(i=2;i<Nmaterials;++i){
dfloat red = 0,green = 0,blue =0;
red = drandRange48(0.125,0.8);
green = drandRange48(0.125,0.8);
blue = drandRange48(0.125,0.8);
materials[i].diffuse.red = red;
materials[i].diffuse.green = green;
materials[i].diffuse.blue = blue;
materials[i].eta = 2;
materials[i].refraction = 1; // transmission coeff
if(drand48() > .5){
materials[i].reflection = .9;
materials[i].info.reflector = 1;
}
if(drand48() > .5){
materials[i].refraction = .9;
materials[i].info.refractor = 1;
}
if(!materials[i].info.refractor && !materials[i].info.reflector){
materials[i].info.reflector = 1;
}
#if 0
printf("materials[%d] = {{rgb=%g,%g,%g},{reflection=%g,refraction=%g,eta=%g},info {reflector=%d,refractor=%d,emitter=%d}\n",
i,
materials[i].diffuse.red,
materials[i].diffuse.green,
materials[i].diffuse.blue,
materials[i].reflection,
materials[i].refraction,
materials[i].eta,
materials[i].info.reflector,
materials[i].info.refractor,
materials[i].info.emitter);
#endif
}
int Ntriangles = plotNelements;
int Nrectangles = 1;
int Nshapes = Ntriangles + Nrectangles;
// length of side of world box
dfloat L = BOXSIZE;
shape_t *shapes = (shape_t*) calloc(Nshapes, sizeof(shape_t));
dfloat triXmin = 1e9, triXmax = -1e9;
dfloat triYmin = 1e9, triYmax = -1e9;
dfloat triZmin = 1e9, triZmax = -1e9;
for(int n=0;n<plotNelements*3;++n){
triXmin = min(triXmin, plotx[n]);
triXmax = max(triXmax, plotx[n]);
triYmin = min(triYmin, ploty[n]);
triYmax = max(triYmax, ploty[n]);
triZmin = min(triZmin, plotz[n]);
triZmax = max(triZmax, plotz[n]);
}
printf("Ntriangles = %d in range (%lg,%lg x %lg,%lg x %lg,%lg)\n",
Ntriangles, triXmin, triXmax, triYmin, triYmax, triZmin, triZmax);
dfloat maxL = max(triXmax-triXmin, max(triYmax-triYmin, triZmax-triZmin));
int bcnt = 0;
dfloat brot = 0;
dfloat bcosrot = cos(brot);
dfloat bsinrot = sin(brot);
dfloat boffx = 500; drandRange48(250, L-250);
dfloat boffy = 500; drandRange48(250, L-250);
dfloat boffz = 500; drandRange48(250, L-250);
dfloat bscal = 800;
int bmat = 32;
printf("bmat = %d\n", bmat);
dfloat newTriXmin = 1e9, newTriXmax = -1e9;
dfloat newTriYmin = 1e9, newTriYmax = -1e9;
dfloat newTriZmin = 1e9, newTriZmax = -1e9;
for(i=0;i<Ntriangles;++i){
for(int v=0;v<3;++v){
shapes[bcnt].triangle.vertices[v] =
vectorCreate((plotx[i*3+v]-triXmin)/maxL,
(plotz[i*3+v]-triZmin)/maxL,
(ploty[i*3+v]-triYmin)/maxL); // swapped y and z
shapes[bcnt].triangle.q[v] = plotq[i*3+v];
}
vector_t tmp = shapes[bcnt].triangle.vertices[1];
shapes[bcnt].triangle.vertices[1] = shapes[bcnt].triangle.vertices[2];
shapes[bcnt].triangle.vertices[2] = tmp;
for(int v=0;v<3;++v){
dfloat x = bscal*shapes[bcnt].triangle.vertices[v].x;
dfloat y = L - bscal*shapes[bcnt].triangle.vertices[v].y;
dfloat z = bscal*shapes[bcnt].triangle.vertices[v].z;
dfloat xrot = cos(brot)*x + sin(brot)*z;
dfloat zrot = -sin(brot)*x + cos(brot)*z;
shapes[bcnt].triangle.vertices[v].x = boffx + xrot;
shapes[bcnt].triangle.vertices[v].y = y;
shapes[bcnt].triangle.vertices[v].z = boffz + zrot;
#if 1
newTriXmin = min(newTriXmin, shapes[bcnt].triangle.vertices[v].x);
newTriXmax = max(newTriXmax, shapes[bcnt].triangle.vertices[v].x);
newTriYmin = min(newTriYmin, shapes[bcnt].triangle.vertices[v].y);
newTriYmax = max(newTriYmax, shapes[bcnt].triangle.vertices[v].y);
newTriZmin = min(newTriZmin, shapes[bcnt].triangle.vertices[v].z);
newTriZmax = max(newTriZmax, shapes[bcnt].triangle.vertices[v].z);
#endif
}
shapes[bcnt].material = bmat;
shapes[bcnt].type = TRIANGLE;
shapes[bcnt].id = bcnt;
++bcnt;
}
printf("Ntriangles = %d in range (%lg,%lg x %lg,%lg x %lg,%lg)\n",
Ntriangles, newTriXmin, newTriXmax, newTriYmin, newTriYmax, newTriZmin, newTriZmax);
int cnt = Ntriangles;
// add one rectangle
if(Nrectangles>0){
vector_t a = vectorCreate(0, L, 0);
vector_t b = vectorCreate(0, L, L);
vector_t c = vectorCreate(L, L, L);
vector_t d = vectorCreate(L, L, 0);
vector_t ab = vectorSub(d,a);
vector_t ad = vectorSub(b,a);
shapes[cnt].rectangle.length[0] = vectorNorm(ab);
shapes[cnt].rectangle.length[1] = vectorNorm(ad);
shapes[cnt].rectangle.axis[0] = vectorNormalize(ab);
shapes[cnt].rectangle.axis[1] = vectorNormalize(ad);
shapes[cnt].rectangle.center = vectorScale(0.25, vectorAdd(vectorAdd(a,b),vectorAdd(c,d)));
shapes[cnt].material = -1;
shapes[cnt].type = RECTANGLE;
shapes[cnt].id = cnt;
++cnt;
}
int Nlights = 5;
light_t *lights = (light_t*) calloc(Nlights, sizeof(light_t));
lights[0].pos.x = L/2;
lights[0].pos.y = 0;
lights[0].pos.z = -100;
lights[0].intensity.red = 1;
lights[0].intensity.green = 1;
lights[0].intensity.blue = 1;
lights[1].pos.x = 3200;
lights[1].pos.y = 3000;
lights[1].pos.z = -1000;
lights[1].intensity.red = 0.6;
lights[1].intensity.green = 0.7;
lights[1].intensity.blue = 1;
lights[2].pos.x = 600;
lights[2].pos.y = 0;
lights[2].pos.z = -100;
lights[2].intensity.red = 0.3;
lights[2].intensity.green = 0.5;
lights[2].intensity.blue = 1;
lights[3].pos.x = L/2;
lights[3].pos.y = 0;
lights[3].pos.z = L/2;
lights[3].intensity.red = 0.8;
lights[3].intensity.green = 0.8;
lights[3].intensity.blue = 1;
lights[4].pos.x = L;
lights[4].pos.y = L;
lights[4].pos.z = -1000;
lights[4].intensity.red = 1;
lights[4].intensity.green = 1;
lights[4].intensity.blue = 1;
scene_t *scene = (scene_t*) calloc(1, sizeof(scene_t));
scene->Ntriangles = plotNelements;
scene->Nlights = Nlights;
scene->lights = lights;
scene->Nshapes = Nshapes;
scene->shapes = shapes;
scene->Nmaterials = Nmaterials;
scene->materials = materials;
scene->grid = gridSetup(L, 1600);
scene->randomNumbers = (dfloat*) calloc(2*NRANDOM, sizeof(dfloat));
for(int i=0;i<NRANDOM;++i){
dfloat r1 = 2*drand48()-1;
dfloat r2 = 2*drand48()-1;
scene->randomNumbers[2*i+0] = r1/sqrt(r1*r1+r2*r2);
scene->randomNumbers[2*i+1] = r2/sqrt(r1*r1+r2*r2);
}
return scene;
}
/* Output data as PPM file */
void saveppm(char *filename, unsigned char *img, int width, int height){
/* FILE pointer */
FILE *f;
/* Open file for writing */
f = fopen(filename, "wb");
/* PPM header info, including the size of the image */
fprintf(f, "P6 %d %d %d\n", width, height, 255);
/* Write the image data to the file - remember 3 byte per pixel */
fwrite(img, 3, width*height, f);
/* Make sure you close the file */
fclose(f);
}
scene_t *simpleRayTracerSetup(int plotNelements,
dfloat *plotx,
dfloat *ploty,
dfloat *plotz,
dfloat *plotq){
// initialize triangles and spheres
scene_t *scene = sceneSetup(plotNelements, plotx, ploty, plotz, plotq);
// port to GPU
sceneOffload(scene);
int TX = 8, TY = 8;
dim3 B(TX,TY,1);
dim3 G( (WIDTH+TX-1)/TX, (HEIGHT+TY-1)/TY, 1);
void populateGrid(grid_t *grid, int Nshapes, shape_t *c_shapes);
populateGrid(scene->grid, scene->Nshapes, scene->c_shapes);
return scene;
}
// to compile animation:
// ffmpeg -y -i image_%05d.ppm -pix_fmt yuv420p foo.mp4
scene_t *scene = NULL;
void simpleRayTracer(int plotNelements,
dfloat *plotx,
dfloat *ploty,
dfloat *plotz,
dfloat *plotq,
const char *fileBaseName,
const int fileIndex){
// initialize triangles and spheres
if(!scene)
scene = simpleRayTracerSetup(plotNelements, plotx, ploty, plotz, plotq);
// update field
for(int i=0;i<scene->Ntriangles;++i){
for(int v=0;v<3;++v){
scene->shapes[i].triangle.q[v] = plotq[i*3+v];
}
}
cudaMemcpy(scene->c_shapes, scene->shapes, scene->Nshapes*sizeof(shape_t), cudaMemcpyHostToDevice);
// 1. location of observer eye (before rotation)
sensor_t sensor;
// background color
sensor.bg.red = 126./256;
sensor.bg.green = 192./256;
sensor.bg.blue = 238./256;
dfloat br = 3.75f*BOXSIZE;
// angle elevation to y-z plane
dfloat eyeAngle = .5*M_PI/2.f; // 0 is above, pi/2 is from side. M_PI/3; 0; M_PI/2.;
// target view
vector_t targetX = vectorCreate(BOXSIZE/2., BOXSIZE, BOXSIZE/2.); // this I do not understand why target -B/2
sensor.eyeX = vectorAdd(targetX, vectorCreate(0, -br*cos(eyeAngle), -br*sin(eyeAngle)));
dfloat sensorAngle = eyeAngle; +15.*M_PI/180.;
sensor.Idir = vectorCreate(1.f, 0.f, 0.f);
sensor.Jdir = vectorCreate(0.f, sin(sensorAngle), -cos(sensorAngle));
vector_t sensorNormal = vectorCrossProduct(sensor.Idir, sensor.Jdir);
#if 0
printf("eyeX = %g,%g,%g \n, IDir = %g,%g,%g, \n, Jdir = %g,%g,%g, Ndir = %g,%g,%g\n",
sensor.eyeX.x,
sensor.eyeX.y,
sensor.eyeX.z,
sensor.Idir.x,
sensor.Idir.y,
sensor.Idir.z,
sensor.Jdir.x,
sensor.Jdir.y,
sensor.Jdir.z,
sensorNormal.x,
sensorNormal.y,
sensorNormal.z);
#endif
// 2.4 length of sensor in axis 1 & 2
sensor.Ilength = 20.f;
sensor.Jlength = HEIGHT*20.f/WIDTH;
sensor.offset = 0.f;
// 2.5 normal distance from sensor to focal plane
dfloat lensOffset = 50;
sensor.lensC = vectorAdd(sensor.eyeX, vectorScale(lensOffset, vectorCrossProduct(sensor.Idir, sensor.Jdir)));
// why 0.25 ?
sensor.focalPlaneOffset = 0.22f*fabs(vectorTripleProduct(sensor.Idir, sensor.Jdir, vectorSub(targetX,sensor.eyeX))); // triple product
// printf("lensOffset = %g, sensor.focalPlaneOffset = %g\n", lensOffset, sensor.focalPlaneOffset);
/* rotation angle in y-z */
dfloat theta = M_PI*fileIndex*1./180.;
int TX = 8, TY = 8;
dim3 B(TX,TY,1);
dim3 G( (WIDTH+TX-1)/TX, (HEIGHT+TY-1)/TY, 1);
/* render scene */
renderKernel
<<<G,B>>>(WIDTH,
HEIGHT,
scene->grid[0],
sensor,
scene->Nshapes,
scene->c_shapes,
scene->Nlights,
scene->c_lights,
scene->Nmaterials,
scene->c_materials,
cos(theta),
sin(theta),
scene->c_randomNumbers,
scene->c_img);
/* copy image back to host */
unsigned char *img = (unsigned char*) calloc(3*WIDTH*HEIGHT, sizeof(char));
cudaMemcpy(img, scene->c_img, 3*WIDTH*HEIGHT*sizeof(char), cudaMemcpyDeviceToHost);
// make sure images directory exists
mkdir("images", S_IRUSR | S_IREAD | S_IWUSR | S_IWRITE | S_IXUSR | S_IEXEC);
char fileName[BUFSIZ];
sprintf(fileName, "images/%s_%05d.ppm", fileBaseName, fileIndex);
saveppm(fileName, img, WIDTH, HEIGHT);
free(img);
}
|
14ae7838111aaf93dbb9e84fb32fa0df8d58b0a5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// @file im2row_gpu.cu
// @brief Stack image patches as matrix rows (GPU)
// @author Andrea Vedaldi
/*
Copyright (C) 2014-15 Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "im2row6D.hpp"
#include "../datacu.hpp"
#include <iostream>
using namespace vl ;
/* ---------------------------------------------------------------- */
/* im2row */
/* ---------------------------------------------------------------- */
template <typename T> __global__ void
im2row_forward_kernel(T* stacked,
T const* data,
const int numPatchesX,
const int numPatchesY,
const int numPatchesXAn,
const int numPatchesYAn,
const int numPatchSlices,
const int width, const int height,
const int widthAn, const int heightAn,
const int windowWidth, const int windowHeight,
const int windowWidthAn, const int windowHeightAn,
const int strideX, const int strideY,
const int strideXAn, const int strideYAn,
const int padLeft, const int padTop,
const int padLeftAn, const int padTopAn)
{
/* each kernel copies the pixels in an image patch for one channel */
int index = threadIdx.x + blockIdx.x * blockDim.x ;
if (index < numPatchSlices) {
/*
get the patch slice (x,y, xan, yan, z) to copy
*/
int x = index ;
int y = x / numPatchesX ;
int xAn = y / numPatchesY ;
int yAn = xAn / numPatchesXAn ;
int z = yAn / numPatchesYAn ;
x %= numPatchesX ;
y %= numPatchesY ;
xAn %= numPatchesXAn ;
yAn %= numPatchesYAn ;
/*
pick the top-left corer of the patch slice in the input image
*/
int x_data = x * strideX - padLeft ;
int y_data = y * strideY - padTop ;
int x_dataAn = xAn * strideXAn - padLeftAn ;
int y_dataAn = yAn * strideYAn - padTopAn ;
data += (((z * heightAn + y_dataAn) * widthAn + x_dataAn ) * height + y_data ) * width + x_data;
/*
pick the column of the stacked image which contains this patch,
and move down along the column at the beginning of the patch slice
*/
int patchSliceOffset = (windowWidth*windowHeight*windowWidthAn*windowHeightAn) * z ;
stacked += (((numPatchesYAn * patchSliceOffset + yAn) * numPatchesXAn + xAn ) * numPatchesY + y)
* numPatchesX + x;
/*
copy the patch slice
*/
for (int s = 0 ; s < windowHeightAn ; s += 1) {
for (int t = 0 ; t < windowWidthAn ; t += 1) {
for (int v = 0 ; v < windowHeight ; v += 1) {
for (int u = 0 ; u < windowWidth ; u += 1) {
if (y_data + v >= 0 &&
y_data + v < height &&
x_data + u >= 0 &&
x_data + u < width &&
y_dataAn + s >= 0 &&
y_dataAn + s < heightAn &&
x_dataAn + t >= 0 &&
x_dataAn + t < widthAn) {
*stacked = data[s * width * height * widthAn + t * width * height + v * width + u] ;
} else {
*stacked = 0 ;
}
stacked += (numPatchesX*numPatchesY*numPatchesXAn*numPatchesYAn) ;
}
}
}
}
}
}
/* ---------------------------------------------------------------- */
/* im2row backward kernel */
/* ---------------------------------------------------------------- */
// The next two functions assume b > 0.
__forceinline__ __device__
int floordiv6D(int a, int b)
{
int q = a/b ;
if (a >= 0 || a == q*b) return q ;
return q - 1 ;
}
__forceinline__ __device__
int ceildiv6D(int a, int b)
{
int q = a/b ;
if (a <= 0 || a == q*b) return q ;
return q + 1 ;
}
int floordiv6D_cpu(int a, int b)
{
int q = a/b ;
if (a >= 0 || a == q*b) return q ;
return q - 1 ;
}
int ceildiv6D_cpu(int a, int b)
{
int q = a/b ;
if (a <= 0 || a == q*b) return q ;
return q + 1 ;
}
template <typename T> __global__ void
im2row_backward_kernel(T* data,
T const* stacked,
const int numPatchesX,
const int numPatchesY,
const int numPatchesXAn,
const int numPatchesYAn,
const int dataVolume,
const int width,
const int height,
const int widthAn,
const int heightAn,
const int depth,
const int windowWidth,
const int windowHeight,
const int windowWidthAn,
const int windowHeightAn,
const int strideX,
const int strideY,
const int strideXAn,
const int strideYAn,
const int padLeft,
const int padTop,
const int padLeftAn,
const int padTopAn,
const int gcdx, const int gcdy,
const int xbar, const int ybar,
const int ubar, const int vbar,
const int gcdxAn, const int gcdyAn,
const int xbarAn, const int ybarAn,
const int ubarAn, const int vbarAn)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < dataVolume)
{
T accumulator = 0 ;
/*
The goal of this kernel is to accumulate data[index]=data[x_data,y_data]
all elements of the patch matrix that received copies of data[index] in the forward
pass. To do this, we need to find which patches (x,y) that contain
copies of this pixel and the relative offsets (u,v) within each such
patch.
First, we find which patches (x,y) contain copies of pixel (x_data,y_data)
in the input tensor. The input tensor coordiante (x_data,y_data) of
pixel (u,v) in patch (x,y) are related by equations:
x_data = x * strideX + u * dilateX - padLeft,
y_data = y * strideY + v * dilateY - padTop.
Now we find all values of (x,y) that can be generated by this equation.
These gives us the patches (x,y) that must be summed. We have:
strideX * x + dilateX * u = x_data + padLeft.
where x and u are integers. This is a linear Diophantine equation.
Rewrite it as:
ax + bu = c, where
a = strideX,
b = dilateY,
c = x_data + padLeft.
This equation has a solution only if the greatest common divisor
g = gcd(a,b) of a and b divides c as well. In this case,
let (x0,u0) be a solution (i.e. a x0 + b u0 = c); all other solutions
are in the form
x_k = x0 + Dx * k, Dx = b/g,
u_k = u0 - Du * k, Du = a/g.
Next, we look for the values of k such that x_k and u_k are within
bounds:
1) 0 <= x_k <= Pw - 1
2) 0 <= u_k <= Ww - 1
Thus
0) recall: gcd(a,b) must divide c
1) ceil(- x0/Dx) <= k <= floor((Iw - 1 - x0)/Dx)
2) ceil((u0 - Ww + 1)/Du) <= k <= floor(u0/Du)
Thus we need to look for the k in the interval
k_min = ceil(max(-x0/Dx, (u0 - Ww + 1)/Du)),
k_max = floor(min((Pw - 1 - x0)/Dx,u0/Du).
Toghether with (*) and the corresponding equations for y,
this produces a list of patches (x_k,y_p) that contains
pixel (x_data,y_data) (the list can be empty).
Furthermore, x_data is mapped to a specific pixel in
patch x_k whose coordiante is u_k, also given above.
*/
int x_data = index ;
int y_data = x_data / width ;
int x_dataAn = y_data / height ;
int y_dataAn = x_dataAn / widthAn ;
int z = y_dataAn / heightAn ;
x_data %= width ;
y_data %= height ;
x_dataAn %= widthAn ;
y_dataAn %= heightAn ;
int cx = x_data + padLeft ;
int cy = y_data + padTop ;
int cxAn = x_dataAn + padLeftAn ;
int cyAn = y_dataAn + padTopAn ;
int qx = cx / gcdx ;
int qy = cy / gcdy ;
int qxAn = cxAn / gcdxAn ;
int qyAn = cyAn / gcdyAn ;
if (cx != gcdx * qx || cy != gcdy * qy || cxAn != gcdxAn * qxAn || cyAn != gcdyAn * qyAn) { data[index] = 0 ; return ; }
int x0 = xbar * qx ;
int u0 = ubar * qx ;
int y0 = ybar * qy ;
int v0 = vbar * qy ;
int x0An = xbarAn * qxAn ;
int u0An = ubarAn * qxAn ;
int y0An = ybarAn * qyAn ;
int v0An = vbarAn * qyAn ;
int Dx = 1 / gcdx ;
int Du = strideX / gcdx ;
int Dy = 1 / gcdy ;
int Dv = strideY / gcdy ;
int DxAn = 1 / gcdxAn ;
int DuAn = strideXAn / gcdxAn ;
int DyAn = 1 / gcdyAn ;
int DvAn = strideYAn / gcdyAn ;
int kmin1 = ceildiv6D(-x0,Dx) ;
int kmax1 = floordiv6D(numPatchesX - 1 - x0,Dx) ;
int kmin2 = ceildiv6D(u0 - windowWidth + 1,Du) ;
int kmax2 = floordiv6D(u0,Du) ;
int kmin = max(kmin1,kmin2) ;
int kmax = min(kmax1,kmax2) ;
int qmin1 = ceildiv6D(-y0,Dy) ;
int qmax1 = floordiv6D(numPatchesY - 1 - y0,Dy) ;
int qmin2 = ceildiv6D(v0 - windowHeight + 1,Dv) ;
int qmax2 = floordiv6D(v0,Dv) ;
int qmin = max(qmin1,qmin2) ;
int qmax = min(qmax1,qmax2) ;
int kmin1An = ceildiv6D(-x0An,DxAn) ;
int kmax1An = floordiv6D(numPatchesXAn - 1 - x0An,DxAn) ;
int kmin2An = ceildiv6D(u0An - windowWidthAn + 1,DuAn) ;
int kmax2An = floordiv6D(u0An,DuAn) ;
int kminAn = max(kmin1An,kmin2An) ;
int kmaxAn = min(kmax1An,kmax2An) ;
int qmin1An = ceildiv6D(-y0An,DyAn) ;
int qmax1An = floordiv6D(numPatchesYAn - 1 - y0An,DyAn) ;
int qmin2An = ceildiv6D(v0An - windowHeightAn + 1,DvAn) ;
int qmax2An = floordiv6D(v0An,DvAn) ;
int qminAn = max(qmin1An,qmin2An) ;
int qmaxAn = min(qmax1An,qmax2An) ;
/*
Now we have kmin <= k <= kmax, qmin <= q <= qmax and
x_k = x0 + Dx * k, u_k = u0 - Du * k,
y_q = y0 + Dy * q, v_q = v0 - Dv * q.
Thus for each (k,q) in the allowable range, we visit
patch (x_k,y_q) and pixel (u_k,v_q) within it.
(x_k,y_q) tells us which row of the patch matix to look for, and
(u_k,v_q) tells us which column. Linearizing all this:
pm_row(k,q) = y_q * numPatchesX + x_k,
pm_col(k,q) = ((z * windowHeight) + v_q) * windowWidth + u_k.
This is further linearized into an index:
pm_index(k,q) = (numPatchesX*numPatchesY) * pm_col(k,q) + pm_row(k,q)
Substituting everything
pm_row(k,q)
= (y0 + Dy * q) * numPatchesX + x0 + Dx * k
= (numPatchesX * Dy) * q + Dx * k + (y0 * numPatchesX + x0)
= rqc * q + rkc * k + roc
pm_col(k,q)
= ((z * windowHeight) + v0 - Dv * q) * windowWidth + u0 - Du * k
= - (windowWidth * Dv) * q - (Du) * k + (windowHeight * windowWidth * z + v0 * windowWidth + u0)
= cqc * q + ckc * k + coc ;
pm_index(k,q)
= (numPatchesX*numPatchesY) * (cqc * q + ckc * k + coc) + rqc * q + rkc * k + roc
= (numPatchesX*numPatchesY * cqc + rqc) * q + (numPatchesX*numPatchesY * ckc + rkc) * k + (numPatchesX*numPatchesY * coc + roc)
= iqc * q + ikc * k + ioc
*/
int rqcAn = DyAn * numPatchesXAn * numPatchesX * numPatchesY ;
int rkcAn = DxAn * numPatchesX * numPatchesY ;
int rqc = numPatchesX * Dy ;
int rkc = Dx ;
int roc = y0An * numPatchesXAn * numPatchesX * numPatchesY + x0An * numPatchesX * numPatchesY + numPatchesX * y0 + x0 ;
int cqcAn = - windowWidthAn * windowHeight * windowWidth * DvAn;
int ckcAn = - windowHeight * windowWidth * DuAn;
int cqc = - windowWidth * Dv ;
int ckc = - Du ;
int coc = (( windowWidthAn * (windowHeightAn * z + v0An) + u0An ) * windowHeight + v0) * windowWidth + u0;
int np = numPatchesX * numPatchesY * numPatchesXAn * numPatchesYAn ;
int iqcAn = np * cqcAn + rqcAn ;
int ikcAn = np * ckcAn + rkcAn ;
int iqc = np * cqc + rqc ;
int ikc = np * ckc + rkc ;
int ioc = np * coc + roc ;
stacked += ioc ;
for (int qAn = qminAn ; qAn <= qmaxAn ; ++ qAn) {
for (int kAn = kminAn ; kAn <= kmaxAn ; ++ kAn) {
for (int q = qmin ; q <= qmax ; ++ q) {
for (int k = kmin ; k <= kmax ; ++ k) {
accumulator += stacked[iqcAn * qAn + ikcAn * kAn + iqc * q + ikc * k] ;
}
}
}
}
data[index] = accumulator;
}
}
namespace vl { namespace impl {
template<typename type>
struct im2row6D<vl::VLDT_GPU, type>
{
/* ------------------------------------------------------------ */
/* forward */
/* ------------------------------------------------------------ */
static vl::ErrorCode
forward(Context & context,
type* stacked,
type const* data,
size_t width,
size_t height,
size_t widthAn,
size_t heightAn,
size_t depth,
size_t windowWidth,
size_t windowHeight,
size_t windowWidthAn,
size_t windowHeightAn,
size_t strideX,
size_t strideY,
size_t strideXAn,
size_t strideYAn,
size_t padLeft,
size_t padRight,
size_t padTop,
size_t padBottom,
size_t padLeftAn,
size_t padRightAn,
size_t padTopAn,
size_t padBottomAn)
{
/* Each kernel instance copies a feature dimension of a patch */
int numPatchesX = (width + (padLeft + padRight) - windowWidth)/strideX + 1 ;
int numPatchesY = (height + (padTop + padBottom) - windowHeight)/strideY + 1 ;
int numPatchesXAn = (widthAn + (padLeftAn + padRightAn) - windowWidthAn)/strideXAn + 1 ;
int numPatchesYAn = (heightAn + (padTopAn + padBottomAn) - windowHeightAn)/strideYAn + 1 ;
int numPatchSlices = numPatchesX * numPatchesY * numPatchesXAn * numPatchesYAn * depth ;
hipLaunchKernelGGL(( im2row_forward_kernel<type>)
, dim3(divideAndRoundUp(numPatchSlices, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
stacked,
data,
numPatchesX,
numPatchesY,
numPatchesXAn,
numPatchesYAn,
numPatchSlices,
width, height,
widthAn, heightAn,
windowWidth, windowHeight,
windowWidthAn, windowHeightAn,
strideX, strideY,
strideXAn, strideYAn,
padLeft, padTop,
padLeftAn, padTopAn) ;
return context.setError(context.getCudaHelper().catchCudaError(__func__)) ;
}
/* ------------------------------------------------------------ */
/* backward */
/* ------------------------------------------------------------ */
static vl::ErrorCode
backward(Context & context,
type* data,
type const* stacked,
size_t width,
size_t height,
size_t widthAn,
size_t heightAn,
size_t depth,
size_t windowWidth,
size_t windowHeight,
size_t windowWidthAn,
size_t windowHeightAn,
size_t strideX,
size_t strideY,
size_t strideXAn,
size_t strideYAn,
size_t padLeft,
size_t padRight,
size_t padTop,
size_t padBottom,
size_t padLeftAn,
size_t padRightAn,
size_t padTopAn,
size_t padBottomAn)
{
/*
Each kernel integrates all contributions to a particular element
of data.
*/
int numPatchesX = (width + (padLeft + padRight) - windowWidth)/strideX + 1 ;
int numPatchesY = (height + (padTop + padBottom) - windowHeight)/strideY + 1 ;
int numPatchesXAn = (widthAn + (padLeftAn + padRightAn) - windowWidthAn)/strideXAn + 1 ;
int numPatchesYAn = (heightAn + (padTopAn + padBottomAn) - windowHeightAn)/strideYAn + 1 ;
int dataVolume = width * height * widthAn * heightAn * depth ;
int xbar ;
int ubar ;
int gcdx = vl::gcd(strideX, 1, xbar, ubar) ;
int ybar ;
int vbar ;
int gcdy = vl::gcd(strideY, 1, ybar, vbar) ;
int xbarAn ;
int ubarAn ;
int gcdxAn = vl::gcd(strideXAn, 1, xbarAn, ubarAn) ;
int ybarAn ;
int vbarAn ;
int gcdyAn = vl::gcd(strideYAn, 1, ybarAn, vbarAn) ;
hipLaunchKernelGGL(( im2row_backward_kernel<type>)
, dim3(divideAndRoundUp(dataVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
data,
stacked,
numPatchesX,
numPatchesY,
numPatchesXAn,
numPatchesYAn,
dataVolume,
width, height, widthAn, heightAn, depth,
windowWidth, windowHeight,
windowWidthAn, windowHeightAn,
strideX, strideY,
strideXAn, strideYAn,
padLeft, padTop,
padLeftAn, padTopAn,
gcdx, gcdy, xbar, ybar, ubar, vbar,
gcdxAn, gcdyAn, xbarAn, ybarAn, ubarAn, vbarAn) ;
return context.setError(context.getCudaHelper().catchCudaError(__func__)) ;
}
} ;
} }
// Instantiations
template struct vl::impl::im2row6D<vl::VLDT_GPU, float> ;
#ifdef ENABLE_DOUBLE
template struct vl::impl::im2row6D<vl::VLDT_GPU, double> ;
#endif
| 14ae7838111aaf93dbb9e84fb32fa0df8d58b0a5.cu | // @file im2row_gpu.cu
// @brief Stack image patches as matrix rows (GPU)
// @author Andrea Vedaldi
/*
Copyright (C) 2014-15 Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "im2row6D.hpp"
#include "../datacu.hpp"
#include <iostream>
using namespace vl ;
/* ---------------------------------------------------------------- */
/* im2row */
/* ---------------------------------------------------------------- */
template <typename T> __global__ void
im2row_forward_kernel(T* stacked,
T const* data,
const int numPatchesX,
const int numPatchesY,
const int numPatchesXAn,
const int numPatchesYAn,
const int numPatchSlices,
const int width, const int height,
const int widthAn, const int heightAn,
const int windowWidth, const int windowHeight,
const int windowWidthAn, const int windowHeightAn,
const int strideX, const int strideY,
const int strideXAn, const int strideYAn,
const int padLeft, const int padTop,
const int padLeftAn, const int padTopAn)
{
/* each kernel copies the pixels in an image patch for one channel */
int index = threadIdx.x + blockIdx.x * blockDim.x ;
if (index < numPatchSlices) {
/*
get the patch slice (x,y, xan, yan, z) to copy
*/
int x = index ;
int y = x / numPatchesX ;
int xAn = y / numPatchesY ;
int yAn = xAn / numPatchesXAn ;
int z = yAn / numPatchesYAn ;
x %= numPatchesX ;
y %= numPatchesY ;
xAn %= numPatchesXAn ;
yAn %= numPatchesYAn ;
/*
pick the top-left corer of the patch slice in the input image
*/
int x_data = x * strideX - padLeft ;
int y_data = y * strideY - padTop ;
int x_dataAn = xAn * strideXAn - padLeftAn ;
int y_dataAn = yAn * strideYAn - padTopAn ;
data += (((z * heightAn + y_dataAn) * widthAn + x_dataAn ) * height + y_data ) * width + x_data;
/*
pick the column of the stacked image which contains this patch,
and move down along the column at the beginning of the patch slice
*/
int patchSliceOffset = (windowWidth*windowHeight*windowWidthAn*windowHeightAn) * z ;
stacked += (((numPatchesYAn * patchSliceOffset + yAn) * numPatchesXAn + xAn ) * numPatchesY + y)
* numPatchesX + x;
/*
copy the patch slice
*/
for (int s = 0 ; s < windowHeightAn ; s += 1) {
for (int t = 0 ; t < windowWidthAn ; t += 1) {
for (int v = 0 ; v < windowHeight ; v += 1) {
for (int u = 0 ; u < windowWidth ; u += 1) {
if (y_data + v >= 0 &&
y_data + v < height &&
x_data + u >= 0 &&
x_data + u < width &&
y_dataAn + s >= 0 &&
y_dataAn + s < heightAn &&
x_dataAn + t >= 0 &&
x_dataAn + t < widthAn) {
*stacked = data[s * width * height * widthAn + t * width * height + v * width + u] ;
} else {
*stacked = 0 ;
}
stacked += (numPatchesX*numPatchesY*numPatchesXAn*numPatchesYAn) ;
}
}
}
}
}
}
/* ---------------------------------------------------------------- */
/* im2row backward kernel */
/* ---------------------------------------------------------------- */
// The next two functions assume b > 0.
__forceinline__ __device__
int floordiv6D(int a, int b)
{
int q = a/b ;
if (a >= 0 || a == q*b) return q ;
return q - 1 ;
}
__forceinline__ __device__
int ceildiv6D(int a, int b)
{
int q = a/b ;
if (a <= 0 || a == q*b) return q ;
return q + 1 ;
}
int floordiv6D_cpu(int a, int b)
{
int q = a/b ;
if (a >= 0 || a == q*b) return q ;
return q - 1 ;
}
int ceildiv6D_cpu(int a, int b)
{
int q = a/b ;
if (a <= 0 || a == q*b) return q ;
return q + 1 ;
}
template <typename T> __global__ void
im2row_backward_kernel(T* data,
T const* stacked,
const int numPatchesX,
const int numPatchesY,
const int numPatchesXAn,
const int numPatchesYAn,
const int dataVolume,
const int width,
const int height,
const int widthAn,
const int heightAn,
const int depth,
const int windowWidth,
const int windowHeight,
const int windowWidthAn,
const int windowHeightAn,
const int strideX,
const int strideY,
const int strideXAn,
const int strideYAn,
const int padLeft,
const int padTop,
const int padLeftAn,
const int padTopAn,
const int gcdx, const int gcdy,
const int xbar, const int ybar,
const int ubar, const int vbar,
const int gcdxAn, const int gcdyAn,
const int xbarAn, const int ybarAn,
const int ubarAn, const int vbarAn)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < dataVolume)
{
T accumulator = 0 ;
/*
The goal of this kernel is to accumulate data[index]=data[x_data,y_data]
all elements of the patch matrix that received copies of data[index] in the forward
pass. To do this, we need to find which patches (x,y) that contain
copies of this pixel and the relative offsets (u,v) within each such
patch.
First, we find which patches (x,y) contain copies of pixel (x_data,y_data)
in the input tensor. The input tensor coordiante (x_data,y_data) of
pixel (u,v) in patch (x,y) are related by equations:
x_data = x * strideX + u * dilateX - padLeft,
y_data = y * strideY + v * dilateY - padTop.
Now we find all values of (x,y) that can be generated by this equation.
These gives us the patches (x,y) that must be summed. We have:
strideX * x + dilateX * u = x_data + padLeft.
where x and u are integers. This is a linear Diophantine equation.
Rewrite it as:
ax + bu = c, where
a = strideX,
b = dilateY,
c = x_data + padLeft.
This equation has a solution only if the greatest common divisor
g = gcd(a,b) of a and b divides c as well. In this case,
let (x0,u0) be a solution (i.e. a x0 + b u0 = c); all other solutions
are in the form
x_k = x0 + Dx * k, Dx = b/g,
u_k = u0 - Du * k, Du = a/g.
Next, we look for the values of k such that x_k and u_k are within
bounds:
1) 0 <= x_k <= Pw - 1
2) 0 <= u_k <= Ww - 1
Thus
0) recall: gcd(a,b) must divide c
1) ceil(- x0/Dx) <= k <= floor((Iw - 1 - x0)/Dx)
2) ceil((u0 - Ww + 1)/Du) <= k <= floor(u0/Du)
Thus we need to look for the k in the interval
k_min = ceil(max(-x0/Dx, (u0 - Ww + 1)/Du)),
k_max = floor(min((Pw - 1 - x0)/Dx,u0/Du).
Toghether with (*) and the corresponding equations for y,
this produces a list of patches (x_k,y_p) that contains
pixel (x_data,y_data) (the list can be empty).
Furthermore, x_data is mapped to a specific pixel in
patch x_k whose coordiante is u_k, also given above.
*/
int x_data = index ;
int y_data = x_data / width ;
int x_dataAn = y_data / height ;
int y_dataAn = x_dataAn / widthAn ;
int z = y_dataAn / heightAn ;
x_data %= width ;
y_data %= height ;
x_dataAn %= widthAn ;
y_dataAn %= heightAn ;
int cx = x_data + padLeft ;
int cy = y_data + padTop ;
int cxAn = x_dataAn + padLeftAn ;
int cyAn = y_dataAn + padTopAn ;
int qx = cx / gcdx ;
int qy = cy / gcdy ;
int qxAn = cxAn / gcdxAn ;
int qyAn = cyAn / gcdyAn ;
if (cx != gcdx * qx || cy != gcdy * qy || cxAn != gcdxAn * qxAn || cyAn != gcdyAn * qyAn) { data[index] = 0 ; return ; }
int x0 = xbar * qx ;
int u0 = ubar * qx ;
int y0 = ybar * qy ;
int v0 = vbar * qy ;
int x0An = xbarAn * qxAn ;
int u0An = ubarAn * qxAn ;
int y0An = ybarAn * qyAn ;
int v0An = vbarAn * qyAn ;
int Dx = 1 / gcdx ;
int Du = strideX / gcdx ;
int Dy = 1 / gcdy ;
int Dv = strideY / gcdy ;
int DxAn = 1 / gcdxAn ;
int DuAn = strideXAn / gcdxAn ;
int DyAn = 1 / gcdyAn ;
int DvAn = strideYAn / gcdyAn ;
int kmin1 = ceildiv6D(-x0,Dx) ;
int kmax1 = floordiv6D(numPatchesX - 1 - x0,Dx) ;
int kmin2 = ceildiv6D(u0 - windowWidth + 1,Du) ;
int kmax2 = floordiv6D(u0,Du) ;
int kmin = max(kmin1,kmin2) ;
int kmax = min(kmax1,kmax2) ;
int qmin1 = ceildiv6D(-y0,Dy) ;
int qmax1 = floordiv6D(numPatchesY - 1 - y0,Dy) ;
int qmin2 = ceildiv6D(v0 - windowHeight + 1,Dv) ;
int qmax2 = floordiv6D(v0,Dv) ;
int qmin = max(qmin1,qmin2) ;
int qmax = min(qmax1,qmax2) ;
int kmin1An = ceildiv6D(-x0An,DxAn) ;
int kmax1An = floordiv6D(numPatchesXAn - 1 - x0An,DxAn) ;
int kmin2An = ceildiv6D(u0An - windowWidthAn + 1,DuAn) ;
int kmax2An = floordiv6D(u0An,DuAn) ;
int kminAn = max(kmin1An,kmin2An) ;
int kmaxAn = min(kmax1An,kmax2An) ;
int qmin1An = ceildiv6D(-y0An,DyAn) ;
int qmax1An = floordiv6D(numPatchesYAn - 1 - y0An,DyAn) ;
int qmin2An = ceildiv6D(v0An - windowHeightAn + 1,DvAn) ;
int qmax2An = floordiv6D(v0An,DvAn) ;
int qminAn = max(qmin1An,qmin2An) ;
int qmaxAn = min(qmax1An,qmax2An) ;
/*
Now we have kmin <= k <= kmax, qmin <= q <= qmax and
x_k = x0 + Dx * k, u_k = u0 - Du * k,
y_q = y0 + Dy * q, v_q = v0 - Dv * q.
Thus for each (k,q) in the allowable range, we visit
patch (x_k,y_q) and pixel (u_k,v_q) within it.
(x_k,y_q) tells us which row of the patch matix to look for, and
(u_k,v_q) tells us which column. Linearizing all this:
pm_row(k,q) = y_q * numPatchesX + x_k,
pm_col(k,q) = ((z * windowHeight) + v_q) * windowWidth + u_k.
This is further linearized into an index:
pm_index(k,q) = (numPatchesX*numPatchesY) * pm_col(k,q) + pm_row(k,q)
Substituting everything
pm_row(k,q)
= (y0 + Dy * q) * numPatchesX + x0 + Dx * k
= (numPatchesX * Dy) * q + Dx * k + (y0 * numPatchesX + x0)
= rqc * q + rkc * k + roc
pm_col(k,q)
= ((z * windowHeight) + v0 - Dv * q) * windowWidth + u0 - Du * k
= - (windowWidth * Dv) * q - (Du) * k + (windowHeight * windowWidth * z + v0 * windowWidth + u0)
= cqc * q + ckc * k + coc ;
pm_index(k,q)
= (numPatchesX*numPatchesY) * (cqc * q + ckc * k + coc) + rqc * q + rkc * k + roc
= (numPatchesX*numPatchesY * cqc + rqc) * q + (numPatchesX*numPatchesY * ckc + rkc) * k + (numPatchesX*numPatchesY * coc + roc)
= iqc * q + ikc * k + ioc
*/
int rqcAn = DyAn * numPatchesXAn * numPatchesX * numPatchesY ;
int rkcAn = DxAn * numPatchesX * numPatchesY ;
int rqc = numPatchesX * Dy ;
int rkc = Dx ;
int roc = y0An * numPatchesXAn * numPatchesX * numPatchesY + x0An * numPatchesX * numPatchesY + numPatchesX * y0 + x0 ;
int cqcAn = - windowWidthAn * windowHeight * windowWidth * DvAn;
int ckcAn = - windowHeight * windowWidth * DuAn;
int cqc = - windowWidth * Dv ;
int ckc = - Du ;
int coc = (( windowWidthAn * (windowHeightAn * z + v0An) + u0An ) * windowHeight + v0) * windowWidth + u0;
int np = numPatchesX * numPatchesY * numPatchesXAn * numPatchesYAn ;
int iqcAn = np * cqcAn + rqcAn ;
int ikcAn = np * ckcAn + rkcAn ;
int iqc = np * cqc + rqc ;
int ikc = np * ckc + rkc ;
int ioc = np * coc + roc ;
stacked += ioc ;
for (int qAn = qminAn ; qAn <= qmaxAn ; ++ qAn) {
for (int kAn = kminAn ; kAn <= kmaxAn ; ++ kAn) {
for (int q = qmin ; q <= qmax ; ++ q) {
for (int k = kmin ; k <= kmax ; ++ k) {
accumulator += stacked[iqcAn * qAn + ikcAn * kAn + iqc * q + ikc * k] ;
}
}
}
}
data[index] = accumulator;
}
}
namespace vl { namespace impl {
template<typename type>
struct im2row6D<vl::VLDT_GPU, type>
{
/* ------------------------------------------------------------ */
/* forward */
/* ------------------------------------------------------------ */
static vl::ErrorCode
forward(Context & context,
type* stacked,
type const* data,
size_t width,
size_t height,
size_t widthAn,
size_t heightAn,
size_t depth,
size_t windowWidth,
size_t windowHeight,
size_t windowWidthAn,
size_t windowHeightAn,
size_t strideX,
size_t strideY,
size_t strideXAn,
size_t strideYAn,
size_t padLeft,
size_t padRight,
size_t padTop,
size_t padBottom,
size_t padLeftAn,
size_t padRightAn,
size_t padTopAn,
size_t padBottomAn)
{
/* Each kernel instance copies a feature dimension of a patch */
int numPatchesX = (width + (padLeft + padRight) - windowWidth)/strideX + 1 ;
int numPatchesY = (height + (padTop + padBottom) - windowHeight)/strideY + 1 ;
int numPatchesXAn = (widthAn + (padLeftAn + padRightAn) - windowWidthAn)/strideXAn + 1 ;
int numPatchesYAn = (heightAn + (padTopAn + padBottomAn) - windowHeightAn)/strideYAn + 1 ;
int numPatchSlices = numPatchesX * numPatchesY * numPatchesXAn * numPatchesYAn * depth ;
im2row_forward_kernel<type>
<<< divideAndRoundUp(numPatchSlices, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(stacked,
data,
numPatchesX,
numPatchesY,
numPatchesXAn,
numPatchesYAn,
numPatchSlices,
width, height,
widthAn, heightAn,
windowWidth, windowHeight,
windowWidthAn, windowHeightAn,
strideX, strideY,
strideXAn, strideYAn,
padLeft, padTop,
padLeftAn, padTopAn) ;
return context.setError(context.getCudaHelper().catchCudaError(__func__)) ;
}
/* ------------------------------------------------------------ */
/* backward */
/* ------------------------------------------------------------ */
static vl::ErrorCode
backward(Context & context,
type* data,
type const* stacked,
size_t width,
size_t height,
size_t widthAn,
size_t heightAn,
size_t depth,
size_t windowWidth,
size_t windowHeight,
size_t windowWidthAn,
size_t windowHeightAn,
size_t strideX,
size_t strideY,
size_t strideXAn,
size_t strideYAn,
size_t padLeft,
size_t padRight,
size_t padTop,
size_t padBottom,
size_t padLeftAn,
size_t padRightAn,
size_t padTopAn,
size_t padBottomAn)
{
/*
Each kernel integrates all contributions to a particular element
of data.
*/
int numPatchesX = (width + (padLeft + padRight) - windowWidth)/strideX + 1 ;
int numPatchesY = (height + (padTop + padBottom) - windowHeight)/strideY + 1 ;
int numPatchesXAn = (widthAn + (padLeftAn + padRightAn) - windowWidthAn)/strideXAn + 1 ;
int numPatchesYAn = (heightAn + (padTopAn + padBottomAn) - windowHeightAn)/strideYAn + 1 ;
int dataVolume = width * height * widthAn * heightAn * depth ;
int xbar ;
int ubar ;
int gcdx = vl::gcd(strideX, 1, xbar, ubar) ;
int ybar ;
int vbar ;
int gcdy = vl::gcd(strideY, 1, ybar, vbar) ;
int xbarAn ;
int ubarAn ;
int gcdxAn = vl::gcd(strideXAn, 1, xbarAn, ubarAn) ;
int ybarAn ;
int vbarAn ;
int gcdyAn = vl::gcd(strideYAn, 1, ybarAn, vbarAn) ;
im2row_backward_kernel<type>
<<< divideAndRoundUp(dataVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(data,
stacked,
numPatchesX,
numPatchesY,
numPatchesXAn,
numPatchesYAn,
dataVolume,
width, height, widthAn, heightAn, depth,
windowWidth, windowHeight,
windowWidthAn, windowHeightAn,
strideX, strideY,
strideXAn, strideYAn,
padLeft, padTop,
padLeftAn, padTopAn,
gcdx, gcdy, xbar, ybar, ubar, vbar,
gcdxAn, gcdyAn, xbarAn, ybarAn, ubarAn, vbarAn) ;
return context.setError(context.getCudaHelper().catchCudaError(__func__)) ;
}
} ;
} }
// Instantiations
template struct vl::impl::im2row6D<vl::VLDT_GPU, float> ;
#ifdef ENABLE_DOUBLE
template struct vl::impl::im2row6D<vl::VLDT_GPU, double> ;
#endif
|
204564410ecb3aac3aab59da6753e42be16ae95a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <math.h>
__global__
void vecAddKernel(float* A, float* B, float* C, int n){
printf("-A: %f B: %f ",*A,*B);
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<n) C[i] = A[i] + B[i];
printf("C: %f \n", *C);
}
void vecAdd(float* A, float* B, float* C, int n){
int size = n * sizeof(float);
float *d_A, *d_B, *d_C;
hipMalloc((void**) &d_A,size);
hipMemcpy(d_A, A,size,hipMemcpyHostToDevice);
hipMalloc((void**) &d_B,size);
hipMemcpy(d_B, B,size,hipMemcpyHostToDevice);
hipMalloc((void**) &d_C,size);
//vecAddKernel<<<ceil(n/256.0),256>>>(d_A,d_B,d_C,n);
hipLaunchKernelGGL(( vecAddKernel), dim3(1),dim3(10), 0, 0, d_A,d_B,d_C,n);
hipMemcpy(C, d_C,size, hipMemcpyDeviceToHost);
hipFree(d_A);hipFree(d_B);hipFree(d_C);
printf("d_c: %f \n", *C);
}
int main(){
printf("nani: \n");
float A[10];
float B[10];
for(int i=0;i<10;++i){
A[i] = 1.0;
B[i] = 2.0;
}
float C[10];
vecAdd(A,B,C,10);
for(int i=0;i<10;++i){
printf("%f ",C[i]);
}
printf("\n");
return 0;
}
/**usr/local/cuda/bin*/
| 204564410ecb3aac3aab59da6753e42be16ae95a.cu | #include <stdio.h>
#include <cuda.h>
#include <math.h>
__global__
void vecAddKernel(float* A, float* B, float* C, int n){
printf("-A: %f B: %f ",*A,*B);
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<n) C[i] = A[i] + B[i];
printf("C: %f \n", *C);
}
void vecAdd(float* A, float* B, float* C, int n){
int size = n * sizeof(float);
float *d_A, *d_B, *d_C;
cudaMalloc((void**) &d_A,size);
cudaMemcpy(d_A, A,size,cudaMemcpyHostToDevice);
cudaMalloc((void**) &d_B,size);
cudaMemcpy(d_B, B,size,cudaMemcpyHostToDevice);
cudaMalloc((void**) &d_C,size);
//vecAddKernel<<<ceil(n/256.0),256>>>(d_A,d_B,d_C,n);
vecAddKernel<<<1,10>>>(d_A,d_B,d_C,n);
cudaMemcpy(C, d_C,size, cudaMemcpyDeviceToHost);
cudaFree(d_A);cudaFree(d_B);cudaFree(d_C);
printf("d_c: %f \n", *C);
}
int main(){
printf("nani: \n");
float A[10];
float B[10];
for(int i=0;i<10;++i){
A[i] = 1.0;
B[i] = 2.0;
}
float C[10];
vecAdd(A,B,C,10);
for(int i=0;i<10;++i){
printf("%f ",C[i]);
}
printf("\n");
return 0;
}
/**usr/local/cuda/bin*/
|
ead31b9137583d63ac4cbd11fe2c9cddde1feeca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "common.h"
#include "THHReduceApplyUtils.cuh"
#include <thrust/functional.h>
#define MULTILABELMARGIN_THREADS 1024
__global__ void cunn_MultiLabelMarginCriterion_updateOutput_kernel(float *output,
float *input,
float *target,
float *istarget,
int nframe,
int dim,
int sizeaverage)
{
// Temporary sums (for mapreduce)
__shared__ float sums[MULTILABELMARGIN_THREADS];
// vectors:
int k = blockIdx.x;
float *input_k = input + k*dim;
float *target_k = target + k*dim;
float *output_k = output + k;
float *istarget_k = istarget + k*dim;
// zero istarget
for (int d = threadIdx.x; d < dim; d += blockDim.x) {
istarget_k[d] = 0;
}
__syncthreads();
// mark targets in istarget
if (threadIdx.x == 0) {
for (int dt = 0; dt < dim; dt++) {
int target_idx = (int)target_k[dt] - TH_INDEX_BASE;
if (target_idx < 0) break;
istarget_k[target_idx] = 1;
}
}
__syncthreads();
// iterate over targets
float sum = 0;
for (int dt = 0; dt < dim; dt++) {
// next target:
int target_idx = (int)target_k[dt] - TH_INDEX_BASE;
if (target_idx < 0) break;
// current value for target
float input_target_k = input_k[target_idx];
// compare to all inputs (multithreaded):
for (int d = threadIdx.x; d < dim; d += blockDim.x) {
// contribute to loss only if not a target
if (!istarget_k[d]) {
float z = 1 - input_target_k + input_k[d];
if (z > 0)
sum += z;
}
}
}
// reduce
float totalSum = reduceBlock(sums, blockDim.x, sum, thrust::plus<float>(), 0.0f);
if (threadIdx.x == 0) {
if (sizeaverage) {
*output_k = (totalSum / dim) / nframe;
} else {
*output_k = totalSum / dim;
}
}
}
__global__ void cunn_MultiLabelMarginCriterion_updateGradInput_kernel(float *gradInput,
float *input,
float *target,
float *istarget,
int nframe,
int dim,
int sizeaverage)
{
// Temporary sums (for mapreduce)
__shared__ float sums[MULTILABELMARGIN_THREADS];
// vectors:
int k = blockIdx.x;
float *input_k = input + k*dim;
float *gradInput_k = gradInput + k*dim;
float *target_k = target + k*dim;
float *istarget_k = istarget + k*dim;
// gain:
float g = ( sizeaverage ? 1./((float)(nframe*dim)) : 1./((float)dim) );
// zero gradients:
for (int d = threadIdx.x; d < dim; d += blockDim.x) {
gradInput_k[d] = 0;
}
__syncthreads();
// iterate over targets
for (int dt = 0; dt < dim; dt++) {
// next target:
int target_idx = (int)target_k[dt] - TH_INDEX_BASE;
if (target_idx < 0) break;
// current value for target
float input_target_k = input_k[target_idx];
// compare to all inputs (multithreaded):
float sum = 0;
for (int d = threadIdx.x; d < dim; d += blockDim.x) {
// contribute to loss only if not a target
if (!istarget_k[d]) {
float z = 1 - input_target_k + input_k[d];
if (z > 0) {
sum -= g;
gradInput_k[d] += g;
}
}
}
__syncthreads();
// reduce sum
float totalSum = reduceBlock(sums, blockDim.x, sum, thrust::plus<float>(), 0.0f);
if (threadIdx.x == 0) {
gradInput_k[target_idx] += totalSum;
}
__syncthreads();
}
}
void THNN_CudaMultiLabelMarginCriterion_updateOutput(
THCState *state,
THCudaTensor *input,
THCudaTensor *target,
THCudaTensor *output,
THCudaTensor *istarget,
bool sizeaverage)
{
input = THCudaTensor_newContiguous(state, input);
target = THCudaTensor_newContiguous(state, target);
istarget = THCudaTensor_newContiguous(state, istarget);
THCudaTensor_resizeAs(state, istarget, input);
if(input->nDimension == 1)
{
THCudaTensor_resize1d(state, output, 1);
dim3 blocks(1);
dim3 threads(MULTILABELMARGIN_THREADS);
hipLaunchKernelGGL(( cunn_MultiLabelMarginCriterion_updateOutput_kernel), dim3(blocks),dim3(threads), 0, 0,
THCudaTensor_data(state, output),
THCudaTensor_data(state, input),
THCudaTensor_data(state, target),
THCudaTensor_data(state, istarget),
1, input->size[0],
sizeaverage
);
THCudaCheck(hipGetLastError());
}
else if(input->nDimension == 2)
{
THCudaTensor *output_tmp = THCudaTensor_newWithSize1d(state, input->size[0]);
dim3 blocks(input->size[0]);
dim3 threads(MULTILABELMARGIN_THREADS);
hipLaunchKernelGGL(( cunn_MultiLabelMarginCriterion_updateOutput_kernel), dim3(blocks),dim3(threads), 0, 0,
THCudaTensor_data(state, output_tmp),
THCudaTensor_data(state, input),
THCudaTensor_data(state, target),
THCudaTensor_data(state, istarget),
input->size[0], input->size[1],
sizeaverage
);
THCudaCheck(hipGetLastError());
THCudaTensor_resize1d(state, output, 1);
THCudaTensor_set1d(state, output, 0, THCudaTensor_sumall(state, output_tmp));
THCudaTensor_free(state, output_tmp);
}
else
THError("vector or matrix expected");
THCudaTensor_free(state, input);
THCudaTensor_free(state, target);
THCudaTensor_free(state, istarget);
}
void THNN_CudaMultiLabelMarginCriterion_updateGradInput(
THCState *state,
THCudaTensor *input,
THCudaTensor *target,
THCudaTensor *gradInput,
THCudaTensor *istarget,
bool sizeaverage)
{
input = THCudaTensor_newContiguous(state, input);
target = THCudaTensor_newContiguous(state, target);
istarget = THCudaTensor_newContiguous(state, istarget);
THCudaTensor_resizeAs(state, gradInput, input);
if(gradInput->nDimension == 1)
{
dim3 blocks(1);
dim3 threads(MULTILABELMARGIN_THREADS);
hipLaunchKernelGGL(( cunn_MultiLabelMarginCriterion_updateGradInput_kernel), dim3(blocks),dim3(threads), 0, 0, THCudaTensor_data(state, gradInput),
THCudaTensor_data(state, input),
THCudaTensor_data(state, target),
THCudaTensor_data(state, istarget),
1, gradInput->size[0],
sizeaverage);
}
else if(gradInput->nDimension == 2)
{
dim3 blocks(gradInput->size[0]);
dim3 threads(MULTILABELMARGIN_THREADS);
hipLaunchKernelGGL(( cunn_MultiLabelMarginCriterion_updateGradInput_kernel), dim3(blocks),dim3(threads), 0, 0, THCudaTensor_data(state, gradInput),
THCudaTensor_data(state, input),
THCudaTensor_data(state, target),
THCudaTensor_data(state, istarget),
gradInput->size[0], gradInput->size[1],
sizeaverage);
}
else
THError("vector or matrix expected");
THCudaCheck(hipGetLastError());
THCudaTensor_free(state, input);
THCudaTensor_free(state, target);
THCudaTensor_free(state, istarget);
}
#undef MULTILABELMARGIN_THREADS
| ead31b9137583d63ac4cbd11fe2c9cddde1feeca.cu | #include "THCUNN.h"
#include "common.h"
#include "THCReduceApplyUtils.cuh"
#include <thrust/functional.h>
#define MULTILABELMARGIN_THREADS 1024
__global__ void cunn_MultiLabelMarginCriterion_updateOutput_kernel(float *output,
float *input,
float *target,
float *istarget,
int nframe,
int dim,
int sizeaverage)
{
// Temporary sums (for mapreduce)
__shared__ float sums[MULTILABELMARGIN_THREADS];
// vectors:
int k = blockIdx.x;
float *input_k = input + k*dim;
float *target_k = target + k*dim;
float *output_k = output + k;
float *istarget_k = istarget + k*dim;
// zero istarget
for (int d = threadIdx.x; d < dim; d += blockDim.x) {
istarget_k[d] = 0;
}
__syncthreads();
// mark targets in istarget
if (threadIdx.x == 0) {
for (int dt = 0; dt < dim; dt++) {
int target_idx = (int)target_k[dt] - TH_INDEX_BASE;
if (target_idx < 0) break;
istarget_k[target_idx] = 1;
}
}
__syncthreads();
// iterate over targets
float sum = 0;
for (int dt = 0; dt < dim; dt++) {
// next target:
int target_idx = (int)target_k[dt] - TH_INDEX_BASE;
if (target_idx < 0) break;
// current value for target
float input_target_k = input_k[target_idx];
// compare to all inputs (multithreaded):
for (int d = threadIdx.x; d < dim; d += blockDim.x) {
// contribute to loss only if not a target
if (!istarget_k[d]) {
float z = 1 - input_target_k + input_k[d];
if (z > 0)
sum += z;
}
}
}
// reduce
float totalSum = reduceBlock(sums, blockDim.x, sum, thrust::plus<float>(), 0.0f);
if (threadIdx.x == 0) {
if (sizeaverage) {
*output_k = (totalSum / dim) / nframe;
} else {
*output_k = totalSum / dim;
}
}
}
__global__ void cunn_MultiLabelMarginCriterion_updateGradInput_kernel(float *gradInput,
float *input,
float *target,
float *istarget,
int nframe,
int dim,
int sizeaverage)
{
// Temporary sums (for mapreduce)
__shared__ float sums[MULTILABELMARGIN_THREADS];
// vectors:
int k = blockIdx.x;
float *input_k = input + k*dim;
float *gradInput_k = gradInput + k*dim;
float *target_k = target + k*dim;
float *istarget_k = istarget + k*dim;
// gain:
float g = ( sizeaverage ? 1./((float)(nframe*dim)) : 1./((float)dim) );
// zero gradients:
for (int d = threadIdx.x; d < dim; d += blockDim.x) {
gradInput_k[d] = 0;
}
__syncthreads();
// iterate over targets
for (int dt = 0; dt < dim; dt++) {
// next target:
int target_idx = (int)target_k[dt] - TH_INDEX_BASE;
if (target_idx < 0) break;
// current value for target
float input_target_k = input_k[target_idx];
// compare to all inputs (multithreaded):
float sum = 0;
for (int d = threadIdx.x; d < dim; d += blockDim.x) {
// contribute to loss only if not a target
if (!istarget_k[d]) {
float z = 1 - input_target_k + input_k[d];
if (z > 0) {
sum -= g;
gradInput_k[d] += g;
}
}
}
__syncthreads();
// reduce sum
float totalSum = reduceBlock(sums, blockDim.x, sum, thrust::plus<float>(), 0.0f);
if (threadIdx.x == 0) {
gradInput_k[target_idx] += totalSum;
}
__syncthreads();
}
}
void THNN_CudaMultiLabelMarginCriterion_updateOutput(
THCState *state,
THCudaTensor *input,
THCudaTensor *target,
THCudaTensor *output,
THCudaTensor *istarget,
bool sizeaverage)
{
input = THCudaTensor_newContiguous(state, input);
target = THCudaTensor_newContiguous(state, target);
istarget = THCudaTensor_newContiguous(state, istarget);
THCudaTensor_resizeAs(state, istarget, input);
if(input->nDimension == 1)
{
THCudaTensor_resize1d(state, output, 1);
dim3 blocks(1);
dim3 threads(MULTILABELMARGIN_THREADS);
cunn_MultiLabelMarginCriterion_updateOutput_kernel<<<blocks,threads>>>(
THCudaTensor_data(state, output),
THCudaTensor_data(state, input),
THCudaTensor_data(state, target),
THCudaTensor_data(state, istarget),
1, input->size[0],
sizeaverage
);
THCudaCheck(cudaGetLastError());
}
else if(input->nDimension == 2)
{
THCudaTensor *output_tmp = THCudaTensor_newWithSize1d(state, input->size[0]);
dim3 blocks(input->size[0]);
dim3 threads(MULTILABELMARGIN_THREADS);
cunn_MultiLabelMarginCriterion_updateOutput_kernel<<<blocks,threads>>>(
THCudaTensor_data(state, output_tmp),
THCudaTensor_data(state, input),
THCudaTensor_data(state, target),
THCudaTensor_data(state, istarget),
input->size[0], input->size[1],
sizeaverage
);
THCudaCheck(cudaGetLastError());
THCudaTensor_resize1d(state, output, 1);
THCudaTensor_set1d(state, output, 0, THCudaTensor_sumall(state, output_tmp));
THCudaTensor_free(state, output_tmp);
}
else
THError("vector or matrix expected");
THCudaTensor_free(state, input);
THCudaTensor_free(state, target);
THCudaTensor_free(state, istarget);
}
void THNN_CudaMultiLabelMarginCriterion_updateGradInput(
THCState *state,
THCudaTensor *input,
THCudaTensor *target,
THCudaTensor *gradInput,
THCudaTensor *istarget,
bool sizeaverage)
{
input = THCudaTensor_newContiguous(state, input);
target = THCudaTensor_newContiguous(state, target);
istarget = THCudaTensor_newContiguous(state, istarget);
THCudaTensor_resizeAs(state, gradInput, input);
if(gradInput->nDimension == 1)
{
dim3 blocks(1);
dim3 threads(MULTILABELMARGIN_THREADS);
cunn_MultiLabelMarginCriterion_updateGradInput_kernel<<<blocks,threads>>>(THCudaTensor_data(state, gradInput),
THCudaTensor_data(state, input),
THCudaTensor_data(state, target),
THCudaTensor_data(state, istarget),
1, gradInput->size[0],
sizeaverage);
}
else if(gradInput->nDimension == 2)
{
dim3 blocks(gradInput->size[0]);
dim3 threads(MULTILABELMARGIN_THREADS);
cunn_MultiLabelMarginCriterion_updateGradInput_kernel<<<blocks,threads>>>(THCudaTensor_data(state, gradInput),
THCudaTensor_data(state, input),
THCudaTensor_data(state, target),
THCudaTensor_data(state, istarget),
gradInput->size[0], gradInput->size[1],
sizeaverage);
}
else
THError("vector or matrix expected");
THCudaCheck(cudaGetLastError());
THCudaTensor_free(state, input);
THCudaTensor_free(state, target);
THCudaTensor_free(state, istarget);
}
#undef MULTILABELMARGIN_THREADS
|
193cb9feffeeddc3b350493cc5eea7dd06766929.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file batch_norm.cu
* \brief CUDA Batch Normalization code
* \author Chris Olivier, Bing Xu
* Adapted from Torch
*/
#include <hip/hip_runtime_api.h>
#include <algorithm>
#include "batch_norm-inl.h"
#define WRITE_DATA_FLAG 1
#define WRITE_GAMMA_FLAG 2
#define WRITE_BETA_FLAG 4
#define FIX_GAMMA_FLAG 8
#define IS_TRAINING_FLAG 16
#define USE_GLOBAL_STATS_FLAG 32
#if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5
#include "./cudnn/cudnn_batch_norm-inl.h"
#endif
#include "../../common/cuda_utils.h"
using namespace mxnet;
/*! \brief inverse standard deviation <-> variance */
#define VARIANCE_TO_INVSTD(__var$, __eps$) (1.0/sqrt((__var$) + DType(__eps$)))
#define INVSTD_TO_VARIANCE(__invstd$, __eps$) ((1.0 / ((__invstd$) * (__invstd$))) - (__eps$))
namespace mxnet {
namespace op {
namespace batchnorm {
namespace cuda {
static const unsigned WARP_SIZE = 32;
// The maximum number of threads in a block
static const unsigned MAX_BLOCK_SIZE = 512U;
template<typename In, typename Out>
struct ScalarConvert {
static __host__ __device__ __forceinline__ Out to(const In v) { return (Out) v; }
};
// Number of threads in a block given an input size up to MAX_BLOCK_SIZE
static unsigned getNumThreads(int nElem, const bool smaller) {
unsigned threadSizes[5] = {32, 64, 128, 256, MAX_BLOCK_SIZE};
const int maxi = smaller ? 4 : 5;
for (int i = 0; i != maxi; ++i) {
if (static_cast<unsigned>(nElem) <= threadSizes[i]) {
return threadSizes[i];
}
}
return smaller ? (MAX_BLOCK_SIZE >> 1) : MAX_BLOCK_SIZE;
}
// Returns the index of the most significant 1 bit in `val`.
__device__ __forceinline__ int getMSB(int val) {
return 31 - __clz(val);
}
template<typename DType, typename AccReal>
struct Float2 {
AccReal v1, v2;
__device__ Float2() {}
__device__ Float2(DType v1, DType v2)
: v1(ScalarConvert<DType, AccReal>::to(v1))
, v2(ScalarConvert<DType, AccReal>::to(v2)) {}
__device__ Float2(DType v)
: v1(ScalarConvert<DType, AccReal>::to(v))
, v2(ScalarConvert<DType, AccReal>::to(v)) {}
__device__ Float2(int v)
: v1(ScalarConvert<int, AccReal>::to(v))
, v2(ScalarConvert<int, AccReal>::to(v)) {}
__device__ Float2 &operator+=(const Float2 &a) {
v1 += a.v1;
v2 += a.v2;
return *this;
}
};
template<typename DType, typename AccReal, typename DeviceTensor>
struct SumOp {
__device__ SumOp(const DeviceTensor t) : tensor(t) {}
__device__ __forceinline__ AccReal operator()(int batch, int plane, int n) {
return ScalarConvert<DType, AccReal>::to(tensor.get_ref(batch, plane, n));
}
const DeviceTensor tensor;
};
template<typename DType, typename AccReal, typename DeviceTensor>
struct VarOp {
__device__ VarOp(AccReal m, const DeviceTensor t)
: mean(m)
, tensor(t) {
}
__device__ __forceinline__ AccReal operator()(int batch, int plane, int n) {
DType val = tensor.get_ref(batch, plane, n);
return (val - mean) * (val - mean);
}
const AccReal mean;
const DeviceTensor tensor;
};
template<typename DType, typename AccReal, typename DeviceTensor>
struct GradOp {
__device__ GradOp(AccReal m, const DeviceTensor i, const DeviceTensor g)
: mean(m), input(i), gradOutput(g) {}
__device__ __forceinline__ Float2<DType, AccReal> operator()(int batch, int plane, int n) {
const DType g = gradOutput.get_ref(batch, plane, n);
const DType c = ScalarConvert<AccReal, DType>::to(input.get_ref(batch, plane, n) - mean);
return Float2<DType, AccReal>(g, g * c);
}
const AccReal mean;
const DeviceTensor input;
const DeviceTensor gradOutput;
};
#if TORCH_HIP_VERSION >= 9000
#define FULLMASK 0xFFFFFFFF
#define __shfl_xor(...) __shfl_xor_sync(FULLMASK, __VA_ARGS__)
#endif
// Sum across all threads within a warp
template<typename T>
static __device__ __forceinline__ T warpSum(T val) {
#if __CUDA_ARCH__ >= 300
for (int i = 0; i < getMSB(WARP_SIZE); ++i) {
val += __shfl_xor(val, 1 << i, WARP_SIZE);
}
#else
__shared__ T values[MAX_BLOCK_SIZE];
values[threadIdx.x] = val;
__threadfence_block();
const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE;
for (int i = 1; i < WARP_SIZE; i++) {
val += values[base + ((i + threadIdx.x) % WARP_SIZE)];
}
#endif
return val;
}
template<typename DType, typename AccReal>
static __device__ __forceinline__ Float2<DType, AccReal> warpSum(Float2<DType, AccReal> value) {
value.v1 = warpSum(value.v1);
value.v2 = warpSum(value.v2);
return value;
}
// Sum across (batch, x/y/z) applying Op() pointwise
template<typename T, typename Op, typename DeviceTensor>
static __device__ T reduce(Op op, DeviceTensor tensor, int plane) {
T sum = (T) 0;
for (int batch = 0; batch < tensor.OuterSize(); ++batch) {
for (int x = threadIdx.x; x < tensor.InnerSize(); x += blockDim.x) {
sum += op(batch, plane, x);
}
}
// sum over NumThreads within a warp
sum = warpSum(sum);
// 'transpose', and reduce within warp again
__shared__ T shared[32];
__syncthreads();
if (threadIdx.x % WARP_SIZE == 0) {
shared[threadIdx.x / WARP_SIZE] = sum;
}
if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) {
// zero out the other entries in shared
shared[threadIdx.x] = (T) 0;
}
__syncthreads();
if (threadIdx.x / WARP_SIZE == 0) {
sum = warpSum(shared[threadIdx.x]);
if (threadIdx.x == 0) {
shared[0] = sum;
}
}
__syncthreads();
// Everyone picks it up, should be broadcast into the whole gradInput
return shared[0];
}
template <typename DType, typename AccReal, typename DeviceTensor1, typename DeviceTensor>
__global__ void BatchNormalizationUpdateOutputInferenceKernel(
DeviceTensor input,
DeviceTensor output,
DeviceTensor1 runningMean,
DeviceTensor1 runningVar,
DeviceTensor1 saveMean,
DeviceTensor1 saveInvStd,
DeviceTensor1 weight,
DeviceTensor1 bias,
const DType epsilon,
const uint32_t flags) {
int plane = blockIdx.x;
AccReal invstd = VARIANCE_TO_INVSTD(runningVar[plane], epsilon);
AccReal mean = ScalarConvert<DType, AccReal>::to(runningMean[plane]);
AccReal gamma = ((flags & FIX_GAMMA_FLAG) == 0 && weight.numElements() > 0)
? ScalarConvert<DType, AccReal>::to(weight[plane])
: ScalarConvert<int, AccReal>::to(1);
AccReal beta = bias.numElements() > 0 ? ScalarConvert<DType, AccReal>::to(bias[plane])
: ScalarConvert<int, AccReal>::to(0);
if (threadIdx.x == 0) {
saveMean[plane] = runningMean[plane];
saveInvStd[plane] = VARIANCE_TO_INVSTD(runningVar[plane], epsilon);
if ((flags & WRITE_GAMMA_FLAG) != 0 && (flags & FIX_GAMMA_FLAG) != 0
&& weight.numElements() > 0) {
weight[plane] = AccReal(1);
}
}
// Write normalized and update the output
for (int batch = 0, nbatch = input.OuterSize(); batch < nbatch; ++batch) {
for (int x = threadIdx.x, nx = input.InnerSize(); x < nx; x += blockDim.x) {
const DType inp = input.get_ref(batch, plane, x);
output.get_ref(batch, plane, x) =
ScalarConvert<AccReal, DType>::to(gamma * (inp - mean) * invstd + beta);
}
}
}
template<typename DType, typename AccReal, typename DeviceTensor1, typename DeviceTensor>
__global__ void BatchNormalizationUpdateOutputKernel(
DeviceTensor input,
DeviceTensor output,
DeviceTensor1 weight,
DeviceTensor1 bias,
const AccReal epsilon,
const AccReal momentum,
DeviceTensor1 runningMean,
DeviceTensor1 runningVar,
DeviceTensor1 saveMean,
DeviceTensor1 saveInvStd,
const uint32_t flags) {
const int plane = blockIdx.x;
const int N = input.OuterSize() * input.InnerSize();
const AccReal norm = AccReal(1) / N;
// Compute the mean and variance across (batch, x/y/z)
const AccReal mean = reduce<AccReal>(
SumOp<DType, AccReal, DeviceTensor>(input), input, plane) * norm;
__syncthreads();
const AccReal varN = reduce<AccReal>(VarOp<DType, AccReal, DeviceTensor>(mean, input),
input, plane);
AccReal invStd = 0;
if (varN != AccReal(0) || epsilon != AccReal(0)) {
invStd = AccReal(1.0) / sqrt(varN * norm + epsilon);
}
// Save the mean, variance, and moving averages
if (threadIdx.x == 0) {
// For one item (0th) per plane (channel), write the per-channel data (ie mean, variance, etc)
// Momentum based writeback
saveMean[plane] = ScalarConvert<AccReal, DType>::to(mean);
saveInvStd[plane] = invStd;
if ((flags & WRITE_GAMMA_FLAG) != 0 && (flags & FIX_GAMMA_FLAG) != 0
&& weight.numElements() > 0) {
weight[plane] = AccReal(1);
}
}
// Write normalized and update the output
const AccReal gamma = ((flags & FIX_GAMMA_FLAG) == 0 && weight.numElements() > 0)
? ScalarConvert<DType, AccReal>::to(weight[plane])
: ScalarConvert<int, AccReal>::to(1);
const AccReal beta = bias.numElements() > 0 ? ScalarConvert<DType, AccReal>::to(bias[plane])
: ScalarConvert<int, AccReal>::to(0);
for (int batch = 0, nbatch = input.OuterSize(); batch < nbatch; ++batch) {
for (int x = threadIdx.x, nx = input.InnerSize(); x < nx; x += blockDim.x) {
const DType inp = input.get_ref(batch, plane, x);
output.get_ref(batch, plane, x) =
ScalarConvert<AccReal, DType>::to(gamma * (inp - mean) * invStd + beta);
}
}
}
template<typename DeviceTensor1>
struct CUDATensors {
DeviceTensor1 gradWeight;
DeviceTensor1 gradBias;
DeviceTensor1 weight;
DeviceTensor1 runningMean;
DeviceTensor1 runningVar;
DeviceTensor1 saveMean;
DeviceTensor1 saveInvStd;
};
template<typename DType, typename AccReal, typename DeviceTensor1, typename DeviceTensor>
static __global__ void BatchNormalizationBackwardKernel(
const DeviceTensor input,
const DeviceTensor gradOutput,
DeviceTensor gradInput,
CUDATensors<DeviceTensor1> tensors,
const uint32_t flags,
const AccReal momentum,
const double eps) {
int plane = blockIdx.x;
int N = gradOutput.OuterSize() * gradOutput.InnerSize();
const bool is_train_and_not_global_stats =
(flags & IS_TRAINING_FLAG) != 0 && (flags & USE_GLOBAL_STATS_FLAG) == 0;
AccReal mean, invstd;
if (is_train_and_not_global_stats) {
mean = ScalarConvert<DType, AccReal>::to(tensors.saveMean[plane]);
invstd = tensors.saveInvStd[plane];
} else {
mean = ScalarConvert<DType, AccReal>::to(tensors.runningMean[plane]);
invstd = VARIANCE_TO_INVSTD(tensors.runningVar[plane], eps);
}
const AccReal weightVal = ((flags & FIX_GAMMA_FLAG) == 0 && tensors.weight.numElements() > 0) ?
ScalarConvert<DType, AccReal>::to(tensors.weight[plane]) : AccReal(1);
const AccReal norm = AccReal(1) / N;
// Compute two values across (batch, x/y/z) in one pass:
// 1. Sum(gradOutput)
// 2. DotProduct(input - mean, gradOutput)
GradOp<DType, AccReal, DeviceTensor> g(mean, input, gradOutput);
Float2< DType, AccReal > res = reduce < Float2 < DType, AccReal >,
GradOp< DType, AccReal, DeviceTensor >, DeviceTensor > (g, gradOutput, plane);
const AccReal gradOutputSum = res.v1;
const AccReal dotP = res.v2;
const AccReal gradMean = gradOutputSum * norm;
const AccReal projScale = dotP * norm * invstd * invstd;
const AccReal gradScale = invstd * weightVal;
if (threadIdx.x == 0 && is_train_and_not_global_stats) {
const AccReal localVariance = INVSTD_TO_VARIANCE(tensors.saveInvStd[plane], eps);
const AccReal localMean = tensors.saveMean[plane];
// update running averages
tensors.runningMean[plane] = tensors.runningMean[plane]
* momentum + localMean * (AccReal(1) - momentum);
tensors.runningVar[plane] = tensors.runningVar[plane]
* momentum + localVariance * (AccReal(1) - momentum);
}
if (gradInput.Size() > 0 && (flags & WRITE_DATA_FLAG) != 0) {
for (int batch = 0, nbatch = gradOutput.OuterSize(); batch < nbatch; ++batch) {
for (int x = threadIdx.x, nx = gradOutput.InnerSize(); x < nx; x += blockDim.x) {
const DType gradOut = gradOutput.get_ref(batch, plane, x);
if (is_train_and_not_global_stats) {
const DType inp = input.get_ref(batch, plane, x);
const AccReal proj = (inp - mean) * projScale;
gradInput.get_ref(batch, plane, x) =
ScalarConvert<AccReal, DType>::to((gradOut - proj - gradMean) * gradScale);
} else {
gradInput.get_ref(batch, plane, x) = ScalarConvert<AccReal, DType>::to(
gradOut * gradScale);
}
}
}
}
if (tensors.gradWeight.numElements() > 0 && threadIdx.x == 0 && (flags & WRITE_GAMMA_FLAG) != 0) {
if ((flags & FIX_GAMMA_FLAG) == 0) {
tensors.gradWeight[plane] = ScalarConvert<AccReal, DType>::to(dotP * invstd);
} else {
tensors.gradWeight[plane] = DType(0);
}
}
if (tensors.gradBias.numElements() > 0 && threadIdx.x == 0 && (flags & WRITE_BETA_FLAG) != 0) {
tensors.gradBias[plane] = ScalarConvert<AccReal, DType>::to(gradOutputSum);
}
}
template<typename DType, int Dim>
struct DeviceTensor {
public:
inline DeviceTensor() {}
inline DeviceTensor(DType *p, const int *size)
: dptr_(p) {
for (int i = 0; i < Dim; ++i) {
size_[i] = size ? size[i] : 0;
}
}
MSHADOW_XINLINE unsigned getSize(const int i) const {
return size_[i];
}
MSHADOW_XINLINE int numElements() const {
int n = 1;
for (int i = 0; i < Dim; ++i) {
n *= size_[i];
}
return n;
}
MSHADOW_XINLINE DType &operator()(const size_t batch,
const size_t plane,
const size_t x) const {
int offset = 0;
offset *= size_[0];
offset += batch;
offset *= size_[1];
offset += plane;
offset *= size_[2];
offset += x;
return *(const_cast<DType *>(dptr_ + offset));
}
MSHADOW_XINLINE DType &operator[](const size_t x) const {
return *(dptr_ + x);
}
MSHADOW_XINLINE size_t InnerSize() const {
size_t sz = 1;
for (size_t i = 2; i < Dim; ++i) {
sz *= size_[i];
}
return sz;
}
MSHADOW_XINLINE size_t ChannelCount() const {
return size_[1];
}
DType *dptr_;
int size_[Dim];
};
template<typename DType, int Dim>
static DeviceTensor<DType, Dim> devicetensor(const TBlob &blob) {
DType *data = blob.dptr<DType>();
const int inDim = blob.shape_.ndim();
if (inDim == Dim) {
DeviceTensor<DType, Dim> tensor(data, nullptr);
for (int i = 0; i < Dim; ++i) {
tensor.size_[i] = blob.size(i);
}
return tensor;
}
// View in which the last dimensions are collapsed or expanded as needed
int size[Dim];
for (int i = 0; i < Dim || i < inDim; ++i) {
if (i < Dim && i < inDim) {
size[i] = blob.size(i);
} else if (i < Dim) {
size[i] = 1;
} else {
size[Dim - 1] *= blob.size(i);
}
}
return DeviceTensor<DType, Dim>(data, &size[0]);
}
#define DeviceTensor1 DeviceTensor<AccReal, 1>
using namespace mxnet::op;
template<typename DType, typename AccReal>
static void BatchNormalizationUpdateOutput(mshadow::Stream<gpu> *s,
const OpContext &ctx,
const BatchNormParam& param,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_states,
const uint32_t flags,
double momentum,
double eps) {
batchnorm::BNTensor3<DType> input = batchnorm::BNTensor3<DType>(
in_data[batchnorm::kData], param.axis);
batchnorm::BNTensor3<DType> output = batchnorm::BNTensor3<DType>(
out_data[batchnorm::kOut], param.axis);
DeviceTensor1 weight = devicetensor<AccReal, 1>(in_data[batchnorm::kGamma]);
DeviceTensor1 bias = devicetensor<AccReal, 1>(in_data[batchnorm::kBeta]);
DeviceTensor1 runningMean = devicetensor<AccReal, 1>(aux_states[batchnorm::kMovingMean]);
DeviceTensor1 runningVar = devicetensor<AccReal, 1>(aux_states[batchnorm::kMovingVar]);
DeviceTensor1 saveMean = devicetensor<AccReal, 1>(out_data[batchnorm::kMean]);
DeviceTensor1 saveInvStd = devicetensor<AccReal, 1>(out_data[batchnorm::kVar]);
DCHECK_GT(weight.numElements(), 0);
if ((flags & IS_TRAINING_FLAG) == 0 || (flags & USE_GLOBAL_STATS_FLAG) != 0) {
dim3 blocks(input.ChannelCount());
dim3 threads(batchnorm::cuda::getNumThreads(input.InnerSize(), false));
hipLaunchKernelGGL(( BatchNormalizationUpdateOutputInferenceKernel<DType, AccReal, DeviceTensor1,
batchnorm::BNTensor3<DType>>)
, dim3(blocks), dim3(threads), 0, mshadow::Stream<gpu>::GetStream(s) ,
input, output, runningMean, runningVar, saveMean,
saveInvStd, weight, bias, eps, flags);
} else {
dim3 blocks(input.ChannelCount());
dim3 threads(batchnorm::cuda::getNumThreads(input.InnerSize(), false));
BatchNormalizationUpdateOutputKernel<DType, AccReal, DeviceTensor1,
batchnorm::BNTensor3<DType>>
<< < blocks, threads, 0, mshadow::Stream<gpu>::GetStream(s) >> > (
input, output, weight, bias, eps, momentum, runningMean, runningVar,
saveMean, saveInvStd, flags);
}
MSHADOW_CUDA_POST_KERNEL_CHECK(BatchNormalizationUpdateOutput);
}
template<typename DType, typename AccReal>
static void BatchNormalizationBackward(mshadow::Stream<gpu> *s,
const OpContext &ctx,
const BatchNormParam& param,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_states,
const uint32_t flags,
double momentum,
double eps) {
batchnorm::BNTensor3<DType> input = batchnorm::BNTensor3<DType>(
in_data[batchnorm::kData], param.axis);
batchnorm::BNTensor3<DType>gradOutput = batchnorm::BNTensor3<DType>(
out_grad[batchnorm::kOut], param.axis);
batchnorm::BNTensor3<DType>gradInput = batchnorm::BNTensor3<DType>(
in_grad[batchnorm::kData], param.axis);
CUDATensors<DeviceTensor1> tensors;
tensors.gradWeight = devicetensor<AccReal, 1>(in_grad[batchnorm::kGamma]);
tensors.gradBias = devicetensor<AccReal, 1>(in_grad[batchnorm::kBeta]);
tensors.weight = devicetensor<AccReal, 1>(in_data[batchnorm::kGamma]);
tensors.runningMean = devicetensor<AccReal, 1>(aux_states[batchnorm::kMovingMean]);
tensors.runningVar = devicetensor<AccReal, 1>(aux_states[batchnorm::kMovingVar]);
tensors.saveMean = devicetensor<AccReal, 1>(out_data[batchnorm::kMean]);
tensors.saveInvStd = devicetensor<AccReal, 1>(out_data[batchnorm::kVar]);
DCHECK_GT(tensors.weight.numElements(), 0);
#ifdef NDEBUG
constexpr bool SMALLER_THREADS = false;
#else
constexpr bool SMALLER_THREADS = true;
#endif
dim3 blocks(gradOutput.ChannelCount());
dim3 threads(batchnorm::cuda::getNumThreads(gradOutput.InnerSize(), SMALLER_THREADS));
hipLaunchKernelGGL(( BatchNormalizationBackwardKernel<DType, AccReal, DeviceTensor1, batchnorm::BNTensor3<DType>>)
, dim3(blocks), dim3(threads), 0, mshadow::Stream<gpu>::GetStream(s) ,
input, gradOutput, gradInput, tensors, flags, momentum, eps);
MSHADOW_CUDA_POST_KERNEL_CHECK(BatchNormalizationBackward);
}
} // namespace cuda
} // namespace batchnorm
template<typename xpu, typename DType, typename AccReal>
static inline uint32_t SetupFlags(const OpContext &ctx,
const BatchNormParam& params,
const std::vector<OpReqType> &req) {
uint32_t flags = 0;
flags |= ctx.is_train ? IS_TRAINING_FLAG : 0;
flags |= params.fix_gamma ? FIX_GAMMA_FLAG : 0;
flags |= params.use_global_stats ? USE_GLOBAL_STATS_FLAG : 0;
if (BatchNormOp<xpu, DType, AccReal>::IsWriting(req[batchnorm::kData])) {
flags |= WRITE_DATA_FLAG;
}
if (BatchNormOp<xpu, DType, AccReal>::IsWriting(req[batchnorm::kGamma])) {
flags |= WRITE_GAMMA_FLAG;
}
if (BatchNormOp<xpu, DType, AccReal>::IsWriting(req[batchnorm::kBeta])) {
flags |= WRITE_BETA_FLAG;
}
return flags;
}
/*! \brief Forward batch-norm pass on GPU */
template<typename xpu, typename DType, typename AccReal>
void BatchNormOp<xpu, DType, AccReal>::DoForward(mshadow::Stream<gpu> *stream,
const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_states) {
batchnorm::cuda::BatchNormalizationUpdateOutput<DType, AccReal>(
stream,
ctx,
param_,
in_data,
out_data,
aux_states,
SetupFlags<xpu, DType, AccReal>(ctx, param_, req),
param_.momentum,
param_.eps);
MSHADOW_CUDA_POST_KERNEL_CHECK(BatchNormOp_DoForward_gpu);
}
/*! \brief Backward batch-norm pass on GPU */
template<typename xpu, typename DType, typename AccReal>
void BatchNormOp<xpu, DType, AccReal>::DoBackward(mshadow::Stream<gpu> *stream,
const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_states) {
batchnorm::cuda::BatchNormalizationBackward<DType, AccReal>(
stream,
ctx,
param_,
out_grad,
in_data,
out_data,
in_grad,
aux_states,
SetupFlags<xpu, DType, AccReal>(ctx, param_, req),
param_.momentum,
param_.eps);
MSHADOW_CUDA_POST_KERNEL_CHECK(BatchNormOp_DoBackward_gpu);
}
/*! \brief Create GPU operator for batch normalization */
template<>
Operator *CreateOp<gpu>(BatchNormParam param, const int dtype, const TShape& shape) {
param.axis = mxnet::op::batchnorm::GetRealAxis(shape, param.axis);
Operator *op = NULL;
#if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5
if (!param.use_global_stats && !param.cudnn_off && shape.ndim() <= 4
&& param.axis == mxnet::op::batchnorm::DEFAULT_AXIS) {
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new CuDNNBatchNormOp<DType>(param);
})
} else {
MSHADOW_REAL_TYPE_SWITCH_EX(dtype, DType, AccReal, {
op = new BatchNormOp<gpu, DType, AccReal>(param);
})
}
#else
MSHADOW_REAL_TYPE_SWITCH_EX(dtype,
DType,
AccReal,
{ op = new BatchNormOp<gpu, DType, AccReal>(param); });
#endif
return op;
}
} // namespace op
} // namespace mxnet
| 193cb9feffeeddc3b350493cc5eea7dd06766929.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file batch_norm.cu
* \brief CUDA Batch Normalization code
* \author Chris Olivier, Bing Xu
* Adapted from Torch
*/
#include <cuda_runtime_api.h>
#include <algorithm>
#include "batch_norm-inl.h"
#define WRITE_DATA_FLAG 1
#define WRITE_GAMMA_FLAG 2
#define WRITE_BETA_FLAG 4
#define FIX_GAMMA_FLAG 8
#define IS_TRAINING_FLAG 16
#define USE_GLOBAL_STATS_FLAG 32
#if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5
#include "./cudnn/cudnn_batch_norm-inl.h"
#endif
#include "../../common/cuda_utils.h"
using namespace mxnet;
/*! \brief inverse standard deviation <-> variance */
#define VARIANCE_TO_INVSTD(__var$, __eps$) (1.0/sqrt((__var$) + DType(__eps$)))
#define INVSTD_TO_VARIANCE(__invstd$, __eps$) ((1.0 / ((__invstd$) * (__invstd$))) - (__eps$))
namespace mxnet {
namespace op {
namespace batchnorm {
namespace cuda {
static const unsigned WARP_SIZE = 32;
// The maximum number of threads in a block
static const unsigned MAX_BLOCK_SIZE = 512U;
template<typename In, typename Out>
struct ScalarConvert {
static __host__ __device__ __forceinline__ Out to(const In v) { return (Out) v; }
};
// Number of threads in a block given an input size up to MAX_BLOCK_SIZE
static unsigned getNumThreads(int nElem, const bool smaller) {
unsigned threadSizes[5] = {32, 64, 128, 256, MAX_BLOCK_SIZE};
const int maxi = smaller ? 4 : 5;
for (int i = 0; i != maxi; ++i) {
if (static_cast<unsigned>(nElem) <= threadSizes[i]) {
return threadSizes[i];
}
}
return smaller ? (MAX_BLOCK_SIZE >> 1) : MAX_BLOCK_SIZE;
}
// Returns the index of the most significant 1 bit in `val`.
__device__ __forceinline__ int getMSB(int val) {
return 31 - __clz(val);
}
template<typename DType, typename AccReal>
struct Float2 {
AccReal v1, v2;
__device__ Float2() {}
__device__ Float2(DType v1, DType v2)
: v1(ScalarConvert<DType, AccReal>::to(v1))
, v2(ScalarConvert<DType, AccReal>::to(v2)) {}
__device__ Float2(DType v)
: v1(ScalarConvert<DType, AccReal>::to(v))
, v2(ScalarConvert<DType, AccReal>::to(v)) {}
__device__ Float2(int v)
: v1(ScalarConvert<int, AccReal>::to(v))
, v2(ScalarConvert<int, AccReal>::to(v)) {}
__device__ Float2 &operator+=(const Float2 &a) {
v1 += a.v1;
v2 += a.v2;
return *this;
}
};
template<typename DType, typename AccReal, typename DeviceTensor>
struct SumOp {
__device__ SumOp(const DeviceTensor t) : tensor(t) {}
__device__ __forceinline__ AccReal operator()(int batch, int plane, int n) {
return ScalarConvert<DType, AccReal>::to(tensor.get_ref(batch, plane, n));
}
const DeviceTensor tensor;
};
template<typename DType, typename AccReal, typename DeviceTensor>
struct VarOp {
__device__ VarOp(AccReal m, const DeviceTensor t)
: mean(m)
, tensor(t) {
}
__device__ __forceinline__ AccReal operator()(int batch, int plane, int n) {
DType val = tensor.get_ref(batch, plane, n);
return (val - mean) * (val - mean);
}
const AccReal mean;
const DeviceTensor tensor;
};
template<typename DType, typename AccReal, typename DeviceTensor>
struct GradOp {
__device__ GradOp(AccReal m, const DeviceTensor i, const DeviceTensor g)
: mean(m), input(i), gradOutput(g) {}
__device__ __forceinline__ Float2<DType, AccReal> operator()(int batch, int plane, int n) {
const DType g = gradOutput.get_ref(batch, plane, n);
const DType c = ScalarConvert<AccReal, DType>::to(input.get_ref(batch, plane, n) - mean);
return Float2<DType, AccReal>(g, g * c);
}
const AccReal mean;
const DeviceTensor input;
const DeviceTensor gradOutput;
};
#if CUDA_VERSION >= 9000
#define FULLMASK 0xFFFFFFFF
#define __shfl_xor(...) __shfl_xor_sync(FULLMASK, __VA_ARGS__)
#endif
// Sum across all threads within a warp
template<typename T>
static __device__ __forceinline__ T warpSum(T val) {
#if __CUDA_ARCH__ >= 300
for (int i = 0; i < getMSB(WARP_SIZE); ++i) {
val += __shfl_xor(val, 1 << i, WARP_SIZE);
}
#else
__shared__ T values[MAX_BLOCK_SIZE];
values[threadIdx.x] = val;
__threadfence_block();
const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE;
for (int i = 1; i < WARP_SIZE; i++) {
val += values[base + ((i + threadIdx.x) % WARP_SIZE)];
}
#endif
return val;
}
template<typename DType, typename AccReal>
static __device__ __forceinline__ Float2<DType, AccReal> warpSum(Float2<DType, AccReal> value) {
value.v1 = warpSum(value.v1);
value.v2 = warpSum(value.v2);
return value;
}
// Sum across (batch, x/y/z) applying Op() pointwise
template<typename T, typename Op, typename DeviceTensor>
static __device__ T reduce(Op op, DeviceTensor tensor, int plane) {
T sum = (T) 0;
for (int batch = 0; batch < tensor.OuterSize(); ++batch) {
for (int x = threadIdx.x; x < tensor.InnerSize(); x += blockDim.x) {
sum += op(batch, plane, x);
}
}
// sum over NumThreads within a warp
sum = warpSum(sum);
// 'transpose', and reduce within warp again
__shared__ T shared[32];
__syncthreads();
if (threadIdx.x % WARP_SIZE == 0) {
shared[threadIdx.x / WARP_SIZE] = sum;
}
if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) {
// zero out the other entries in shared
shared[threadIdx.x] = (T) 0;
}
__syncthreads();
if (threadIdx.x / WARP_SIZE == 0) {
sum = warpSum(shared[threadIdx.x]);
if (threadIdx.x == 0) {
shared[0] = sum;
}
}
__syncthreads();
// Everyone picks it up, should be broadcast into the whole gradInput
return shared[0];
}
template <typename DType, typename AccReal, typename DeviceTensor1, typename DeviceTensor>
__global__ void BatchNormalizationUpdateOutputInferenceKernel(
DeviceTensor input,
DeviceTensor output,
DeviceTensor1 runningMean,
DeviceTensor1 runningVar,
DeviceTensor1 saveMean,
DeviceTensor1 saveInvStd,
DeviceTensor1 weight,
DeviceTensor1 bias,
const DType epsilon,
const uint32_t flags) {
int plane = blockIdx.x;
AccReal invstd = VARIANCE_TO_INVSTD(runningVar[plane], epsilon);
AccReal mean = ScalarConvert<DType, AccReal>::to(runningMean[plane]);
AccReal gamma = ((flags & FIX_GAMMA_FLAG) == 0 && weight.numElements() > 0)
? ScalarConvert<DType, AccReal>::to(weight[plane])
: ScalarConvert<int, AccReal>::to(1);
AccReal beta = bias.numElements() > 0 ? ScalarConvert<DType, AccReal>::to(bias[plane])
: ScalarConvert<int, AccReal>::to(0);
if (threadIdx.x == 0) {
saveMean[plane] = runningMean[plane];
saveInvStd[plane] = VARIANCE_TO_INVSTD(runningVar[plane], epsilon);
if ((flags & WRITE_GAMMA_FLAG) != 0 && (flags & FIX_GAMMA_FLAG) != 0
&& weight.numElements() > 0) {
weight[plane] = AccReal(1);
}
}
// Write normalized and update the output
for (int batch = 0, nbatch = input.OuterSize(); batch < nbatch; ++batch) {
for (int x = threadIdx.x, nx = input.InnerSize(); x < nx; x += blockDim.x) {
const DType inp = input.get_ref(batch, plane, x);
output.get_ref(batch, plane, x) =
ScalarConvert<AccReal, DType>::to(gamma * (inp - mean) * invstd + beta);
}
}
}
template<typename DType, typename AccReal, typename DeviceTensor1, typename DeviceTensor>
__global__ void BatchNormalizationUpdateOutputKernel(
DeviceTensor input,
DeviceTensor output,
DeviceTensor1 weight,
DeviceTensor1 bias,
const AccReal epsilon,
const AccReal momentum,
DeviceTensor1 runningMean,
DeviceTensor1 runningVar,
DeviceTensor1 saveMean,
DeviceTensor1 saveInvStd,
const uint32_t flags) {
const int plane = blockIdx.x;
const int N = input.OuterSize() * input.InnerSize();
const AccReal norm = AccReal(1) / N;
// Compute the mean and variance across (batch, x/y/z)
const AccReal mean = reduce<AccReal>(
SumOp<DType, AccReal, DeviceTensor>(input), input, plane) * norm;
__syncthreads();
const AccReal varN = reduce<AccReal>(VarOp<DType, AccReal, DeviceTensor>(mean, input),
input, plane);
AccReal invStd = 0;
if (varN != AccReal(0) || epsilon != AccReal(0)) {
invStd = AccReal(1.0) / sqrt(varN * norm + epsilon);
}
// Save the mean, variance, and moving averages
if (threadIdx.x == 0) {
// For one item (0th) per plane (channel), write the per-channel data (ie mean, variance, etc)
// Momentum based writeback
saveMean[plane] = ScalarConvert<AccReal, DType>::to(mean);
saveInvStd[plane] = invStd;
if ((flags & WRITE_GAMMA_FLAG) != 0 && (flags & FIX_GAMMA_FLAG) != 0
&& weight.numElements() > 0) {
weight[plane] = AccReal(1);
}
}
// Write normalized and update the output
const AccReal gamma = ((flags & FIX_GAMMA_FLAG) == 0 && weight.numElements() > 0)
? ScalarConvert<DType, AccReal>::to(weight[plane])
: ScalarConvert<int, AccReal>::to(1);
const AccReal beta = bias.numElements() > 0 ? ScalarConvert<DType, AccReal>::to(bias[plane])
: ScalarConvert<int, AccReal>::to(0);
for (int batch = 0, nbatch = input.OuterSize(); batch < nbatch; ++batch) {
for (int x = threadIdx.x, nx = input.InnerSize(); x < nx; x += blockDim.x) {
const DType inp = input.get_ref(batch, plane, x);
output.get_ref(batch, plane, x) =
ScalarConvert<AccReal, DType>::to(gamma * (inp - mean) * invStd + beta);
}
}
}
template<typename DeviceTensor1>
struct CUDATensors {
DeviceTensor1 gradWeight;
DeviceTensor1 gradBias;
DeviceTensor1 weight;
DeviceTensor1 runningMean;
DeviceTensor1 runningVar;
DeviceTensor1 saveMean;
DeviceTensor1 saveInvStd;
};
template<typename DType, typename AccReal, typename DeviceTensor1, typename DeviceTensor>
static __global__ void BatchNormalizationBackwardKernel(
const DeviceTensor input,
const DeviceTensor gradOutput,
DeviceTensor gradInput,
CUDATensors<DeviceTensor1> tensors,
const uint32_t flags,
const AccReal momentum,
const double eps) {
int plane = blockIdx.x;
int N = gradOutput.OuterSize() * gradOutput.InnerSize();
const bool is_train_and_not_global_stats =
(flags & IS_TRAINING_FLAG) != 0 && (flags & USE_GLOBAL_STATS_FLAG) == 0;
AccReal mean, invstd;
if (is_train_and_not_global_stats) {
mean = ScalarConvert<DType, AccReal>::to(tensors.saveMean[plane]);
invstd = tensors.saveInvStd[plane];
} else {
mean = ScalarConvert<DType, AccReal>::to(tensors.runningMean[plane]);
invstd = VARIANCE_TO_INVSTD(tensors.runningVar[plane], eps);
}
const AccReal weightVal = ((flags & FIX_GAMMA_FLAG) == 0 && tensors.weight.numElements() > 0) ?
ScalarConvert<DType, AccReal>::to(tensors.weight[plane]) : AccReal(1);
const AccReal norm = AccReal(1) / N;
// Compute two values across (batch, x/y/z) in one pass:
// 1. Sum(gradOutput)
// 2. DotProduct(input - mean, gradOutput)
GradOp<DType, AccReal, DeviceTensor> g(mean, input, gradOutput);
Float2< DType, AccReal > res = reduce < Float2 < DType, AccReal >,
GradOp< DType, AccReal, DeviceTensor >, DeviceTensor > (g, gradOutput, plane);
const AccReal gradOutputSum = res.v1;
const AccReal dotP = res.v2;
const AccReal gradMean = gradOutputSum * norm;
const AccReal projScale = dotP * norm * invstd * invstd;
const AccReal gradScale = invstd * weightVal;
if (threadIdx.x == 0 && is_train_and_not_global_stats) {
const AccReal localVariance = INVSTD_TO_VARIANCE(tensors.saveInvStd[plane], eps);
const AccReal localMean = tensors.saveMean[plane];
// update running averages
tensors.runningMean[plane] = tensors.runningMean[plane]
* momentum + localMean * (AccReal(1) - momentum);
tensors.runningVar[plane] = tensors.runningVar[plane]
* momentum + localVariance * (AccReal(1) - momentum);
}
if (gradInput.Size() > 0 && (flags & WRITE_DATA_FLAG) != 0) {
for (int batch = 0, nbatch = gradOutput.OuterSize(); batch < nbatch; ++batch) {
for (int x = threadIdx.x, nx = gradOutput.InnerSize(); x < nx; x += blockDim.x) {
const DType gradOut = gradOutput.get_ref(batch, plane, x);
if (is_train_and_not_global_stats) {
const DType inp = input.get_ref(batch, plane, x);
const AccReal proj = (inp - mean) * projScale;
gradInput.get_ref(batch, plane, x) =
ScalarConvert<AccReal, DType>::to((gradOut - proj - gradMean) * gradScale);
} else {
gradInput.get_ref(batch, plane, x) = ScalarConvert<AccReal, DType>::to(
gradOut * gradScale);
}
}
}
}
if (tensors.gradWeight.numElements() > 0 && threadIdx.x == 0 && (flags & WRITE_GAMMA_FLAG) != 0) {
if ((flags & FIX_GAMMA_FLAG) == 0) {
tensors.gradWeight[plane] = ScalarConvert<AccReal, DType>::to(dotP * invstd);
} else {
tensors.gradWeight[plane] = DType(0);
}
}
if (tensors.gradBias.numElements() > 0 && threadIdx.x == 0 && (flags & WRITE_BETA_FLAG) != 0) {
tensors.gradBias[plane] = ScalarConvert<AccReal, DType>::to(gradOutputSum);
}
}
template<typename DType, int Dim>
struct DeviceTensor {
public:
inline DeviceTensor() {}
inline DeviceTensor(DType *p, const int *size)
: dptr_(p) {
for (int i = 0; i < Dim; ++i) {
size_[i] = size ? size[i] : 0;
}
}
MSHADOW_XINLINE unsigned getSize(const int i) const {
return size_[i];
}
MSHADOW_XINLINE int numElements() const {
int n = 1;
for (int i = 0; i < Dim; ++i) {
n *= size_[i];
}
return n;
}
MSHADOW_XINLINE DType &operator()(const size_t batch,
const size_t plane,
const size_t x) const {
int offset = 0;
offset *= size_[0];
offset += batch;
offset *= size_[1];
offset += plane;
offset *= size_[2];
offset += x;
return *(const_cast<DType *>(dptr_ + offset));
}
MSHADOW_XINLINE DType &operator[](const size_t x) const {
return *(dptr_ + x);
}
MSHADOW_XINLINE size_t InnerSize() const {
size_t sz = 1;
for (size_t i = 2; i < Dim; ++i) {
sz *= size_[i];
}
return sz;
}
MSHADOW_XINLINE size_t ChannelCount() const {
return size_[1];
}
DType *dptr_;
int size_[Dim];
};
template<typename DType, int Dim>
static DeviceTensor<DType, Dim> devicetensor(const TBlob &blob) {
DType *data = blob.dptr<DType>();
const int inDim = blob.shape_.ndim();
if (inDim == Dim) {
DeviceTensor<DType, Dim> tensor(data, nullptr);
for (int i = 0; i < Dim; ++i) {
tensor.size_[i] = blob.size(i);
}
return tensor;
}
// View in which the last dimensions are collapsed or expanded as needed
int size[Dim];
for (int i = 0; i < Dim || i < inDim; ++i) {
if (i < Dim && i < inDim) {
size[i] = blob.size(i);
} else if (i < Dim) {
size[i] = 1;
} else {
size[Dim - 1] *= blob.size(i);
}
}
return DeviceTensor<DType, Dim>(data, &size[0]);
}
#define DeviceTensor1 DeviceTensor<AccReal, 1>
using namespace mxnet::op;
template<typename DType, typename AccReal>
static void BatchNormalizationUpdateOutput(mshadow::Stream<gpu> *s,
const OpContext &ctx,
const BatchNormParam& param,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_states,
const uint32_t flags,
double momentum,
double eps) {
batchnorm::BNTensor3<DType> input = batchnorm::BNTensor3<DType>(
in_data[batchnorm::kData], param.axis);
batchnorm::BNTensor3<DType> output = batchnorm::BNTensor3<DType>(
out_data[batchnorm::kOut], param.axis);
DeviceTensor1 weight = devicetensor<AccReal, 1>(in_data[batchnorm::kGamma]);
DeviceTensor1 bias = devicetensor<AccReal, 1>(in_data[batchnorm::kBeta]);
DeviceTensor1 runningMean = devicetensor<AccReal, 1>(aux_states[batchnorm::kMovingMean]);
DeviceTensor1 runningVar = devicetensor<AccReal, 1>(aux_states[batchnorm::kMovingVar]);
DeviceTensor1 saveMean = devicetensor<AccReal, 1>(out_data[batchnorm::kMean]);
DeviceTensor1 saveInvStd = devicetensor<AccReal, 1>(out_data[batchnorm::kVar]);
DCHECK_GT(weight.numElements(), 0);
if ((flags & IS_TRAINING_FLAG) == 0 || (flags & USE_GLOBAL_STATS_FLAG) != 0) {
dim3 blocks(input.ChannelCount());
dim3 threads(batchnorm::cuda::getNumThreads(input.InnerSize(), false));
BatchNormalizationUpdateOutputInferenceKernel<DType, AccReal, DeviceTensor1,
batchnorm::BNTensor3<DType>>
<<< blocks, threads, 0, mshadow::Stream<gpu>::GetStream(s) >>> (
input, output, runningMean, runningVar, saveMean,
saveInvStd, weight, bias, eps, flags);
} else {
dim3 blocks(input.ChannelCount());
dim3 threads(batchnorm::cuda::getNumThreads(input.InnerSize(), false));
BatchNormalizationUpdateOutputKernel<DType, AccReal, DeviceTensor1,
batchnorm::BNTensor3<DType>>
<< < blocks, threads, 0, mshadow::Stream<gpu>::GetStream(s) >> > (
input, output, weight, bias, eps, momentum, runningMean, runningVar,
saveMean, saveInvStd, flags);
}
MSHADOW_CUDA_POST_KERNEL_CHECK(BatchNormalizationUpdateOutput);
}
template<typename DType, typename AccReal>
static void BatchNormalizationBackward(mshadow::Stream<gpu> *s,
const OpContext &ctx,
const BatchNormParam& param,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_states,
const uint32_t flags,
double momentum,
double eps) {
batchnorm::BNTensor3<DType> input = batchnorm::BNTensor3<DType>(
in_data[batchnorm::kData], param.axis);
batchnorm::BNTensor3<DType>gradOutput = batchnorm::BNTensor3<DType>(
out_grad[batchnorm::kOut], param.axis);
batchnorm::BNTensor3<DType>gradInput = batchnorm::BNTensor3<DType>(
in_grad[batchnorm::kData], param.axis);
CUDATensors<DeviceTensor1> tensors;
tensors.gradWeight = devicetensor<AccReal, 1>(in_grad[batchnorm::kGamma]);
tensors.gradBias = devicetensor<AccReal, 1>(in_grad[batchnorm::kBeta]);
tensors.weight = devicetensor<AccReal, 1>(in_data[batchnorm::kGamma]);
tensors.runningMean = devicetensor<AccReal, 1>(aux_states[batchnorm::kMovingMean]);
tensors.runningVar = devicetensor<AccReal, 1>(aux_states[batchnorm::kMovingVar]);
tensors.saveMean = devicetensor<AccReal, 1>(out_data[batchnorm::kMean]);
tensors.saveInvStd = devicetensor<AccReal, 1>(out_data[batchnorm::kVar]);
DCHECK_GT(tensors.weight.numElements(), 0);
#ifdef NDEBUG
constexpr bool SMALLER_THREADS = false;
#else
constexpr bool SMALLER_THREADS = true;
#endif
dim3 blocks(gradOutput.ChannelCount());
dim3 threads(batchnorm::cuda::getNumThreads(gradOutput.InnerSize(), SMALLER_THREADS));
BatchNormalizationBackwardKernel<DType, AccReal, DeviceTensor1, batchnorm::BNTensor3<DType>>
<<< blocks, threads, 0, mshadow::Stream<gpu>::GetStream(s) >>> (
input, gradOutput, gradInput, tensors, flags, momentum, eps);
MSHADOW_CUDA_POST_KERNEL_CHECK(BatchNormalizationBackward);
}
} // namespace cuda
} // namespace batchnorm
template<typename xpu, typename DType, typename AccReal>
static inline uint32_t SetupFlags(const OpContext &ctx,
const BatchNormParam& params,
const std::vector<OpReqType> &req) {
uint32_t flags = 0;
flags |= ctx.is_train ? IS_TRAINING_FLAG : 0;
flags |= params.fix_gamma ? FIX_GAMMA_FLAG : 0;
flags |= params.use_global_stats ? USE_GLOBAL_STATS_FLAG : 0;
if (BatchNormOp<xpu, DType, AccReal>::IsWriting(req[batchnorm::kData])) {
flags |= WRITE_DATA_FLAG;
}
if (BatchNormOp<xpu, DType, AccReal>::IsWriting(req[batchnorm::kGamma])) {
flags |= WRITE_GAMMA_FLAG;
}
if (BatchNormOp<xpu, DType, AccReal>::IsWriting(req[batchnorm::kBeta])) {
flags |= WRITE_BETA_FLAG;
}
return flags;
}
/*! \brief Forward batch-norm pass on GPU */
template<typename xpu, typename DType, typename AccReal>
void BatchNormOp<xpu, DType, AccReal>::DoForward(mshadow::Stream<gpu> *stream,
const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_states) {
batchnorm::cuda::BatchNormalizationUpdateOutput<DType, AccReal>(
stream,
ctx,
param_,
in_data,
out_data,
aux_states,
SetupFlags<xpu, DType, AccReal>(ctx, param_, req),
param_.momentum,
param_.eps);
MSHADOW_CUDA_POST_KERNEL_CHECK(BatchNormOp_DoForward_gpu);
}
/*! \brief Backward batch-norm pass on GPU */
template<typename xpu, typename DType, typename AccReal>
void BatchNormOp<xpu, DType, AccReal>::DoBackward(mshadow::Stream<gpu> *stream,
const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_states) {
batchnorm::cuda::BatchNormalizationBackward<DType, AccReal>(
stream,
ctx,
param_,
out_grad,
in_data,
out_data,
in_grad,
aux_states,
SetupFlags<xpu, DType, AccReal>(ctx, param_, req),
param_.momentum,
param_.eps);
MSHADOW_CUDA_POST_KERNEL_CHECK(BatchNormOp_DoBackward_gpu);
}
/*! \brief Create GPU operator for batch normalization */
template<>
Operator *CreateOp<gpu>(BatchNormParam param, const int dtype, const TShape& shape) {
param.axis = mxnet::op::batchnorm::GetRealAxis(shape, param.axis);
Operator *op = NULL;
#if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5
if (!param.use_global_stats && !param.cudnn_off && shape.ndim() <= 4
&& param.axis == mxnet::op::batchnorm::DEFAULT_AXIS) {
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new CuDNNBatchNormOp<DType>(param);
})
} else {
MSHADOW_REAL_TYPE_SWITCH_EX(dtype, DType, AccReal, {
op = new BatchNormOp<gpu, DType, AccReal>(param);
})
}
#else
MSHADOW_REAL_TYPE_SWITCH_EX(dtype,
DType,
AccReal,
{ op = new BatchNormOp<gpu, DType, AccReal>(param); });
#endif
return op;
}
} // namespace op
} // namespace mxnet
|
140097267abb260ae9123cdd68d9875c2399507f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cmath>
#include <hip/hip_runtime.h>
#include "paint_kernel.h"
#define C_MAX_INTENSITIES 256
__global__
void cuda_paint (unsigned char* in_image, unsigned char* out_image, int width, int height, int radius, int nBins) {
// http://supercomputingblog.com/cuda/advanced-image-processing-with-cuda/2/
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
// Test to see if we're testing a valid pixel
if (i >= height || j >= width) return; // Don't bother doing the calculation. We're not in a valid pixel location
int intensityCount[C_MAX_INTENSITIES];
int avgR[C_MAX_INTENSITIES];
int avgG[C_MAX_INTENSITIES];
int avgB[C_MAX_INTENSITIES];
for (int k=0; k <= nBins; k++) {
intensityCount[k] = 0;
avgR[k] = 0;
avgG[k] = 0;
avgB[k] = 0;
}
// we have a radius r
int maxIntensityCount = 0;
int maxIntensityCountIndex = 0;
for (int k=i-radius; k <= i+radius;k++) {
if (k < 0 || k >= height) continue;
for (int l=j-radius; l <= j+radius; l++) {
if (l < 0 || l >= width) continue;
//int curPixel = in_image[k*stride/4 + l];
const int currentoffset = (j + k + l * width) * 4;
int curPixelr = in_image[currentoffset + 0];
int curPixelg = in_image[currentoffset + 1];
int curPixelb = in_image[currentoffset + 2];
int r = ((curPixelr & 0x00ff0000) >> 16);
int g = ((curPixelg & 0x0000ff00) >> 8);
int b = ((curPixelb & 0x000000ff) >> 0);
int curIntensity = (int)((float)((r+g+b)/3*nBins)/255.0f);
intensityCount[curIntensity]++;
if (intensityCount[curIntensity] > maxIntensityCount) {
maxIntensityCount = intensityCount[curIntensity];
maxIntensityCountIndex = curIntensity;
}
avgR[curIntensity] += r;
avgG[curIntensity] += g;
avgB[curIntensity] += b;
}
}
int finalR = avgR[maxIntensityCountIndex] / maxIntensityCount;
int finalG = avgG[maxIntensityCountIndex] / maxIntensityCount;
int finalB = avgB[maxIntensityCountIndex] / maxIntensityCount;
out_image[j * 4 + 0] = finalR;
out_image[j * 4 + 1] = finalG;
out_image[j * 4 + 2] = finalB;
out_image[j * 4 + 3] = 255;
}
void cuda_paint_prepare (unsigned char* in_image, unsigned char* out_image, int width, int height, int radius, int nBins) {
unsigned char* dev_input; // device char array input image
unsigned char* dev_output; // device char array output image
int blockSize;
int gridSize;
int malsize = width * height * 4 * sizeof(unsigned char);
fprintf(stdout, "Doing cuda paint...\n");
hipError_t mallocstatus = hipMalloc( (void**) &dev_input, malsize);
if (mallocstatus != hipSuccess) {
fprintf(stderr, "Malloc went wrong: %s\n", hipGetErrorString(mallocstatus));
}
hipError_t memcpystatus = hipMemcpy( dev_input, in_image, malsize, hipMemcpyHostToDevice );
if (memcpystatus != hipSuccess) {
fprintf(stderr, "Memcpy went wrong: %s\n", hipGetErrorString(memcpystatus));
}
hipError_t mallocoutputstatus = hipMalloc( (void**) &dev_output, malsize);
if (mallocoutputstatus != hipSuccess) {
fprintf(stderr, "Malloc went wrong: %s\n", hipGetErrorString(mallocoutputstatus));
}
blockSize = 256;
gridSize = (unsigned int) ceil( (double)(width * height * 4 / blockSize));
fprintf(stdout, "gridSize: %i\n", gridSize);
fprintf(stdout, "blockSize: %i\n", blockSize);
hipLaunchKernelGGL(( cuda_paint), dim3(gridSize), dim3(blockSize), 0, 0, dev_input, dev_output, width, height, radius, nBins);
hipError_t copybackstatus = hipMemcpy(out_image, dev_output, malsize, hipMemcpyDeviceToHost);
if (copybackstatus != hipSuccess) {
fprintf(stderr, "Copy back went wrong: %s\n", hipGetErrorString(copybackstatus));
}
hipFree(dev_input);
hipFree(dev_output);
hipDeviceReset();
}
| 140097267abb260ae9123cdd68d9875c2399507f.cu | #include <stdio.h>
#include <stdlib.h>
#include <cmath>
#include <cuda.h>
#include "paint_kernel.h"
#define C_MAX_INTENSITIES 256
__global__
void cuda_paint (unsigned char* in_image, unsigned char* out_image, int width, int height, int radius, int nBins) {
// http://supercomputingblog.com/cuda/advanced-image-processing-with-cuda/2/
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
// Test to see if we're testing a valid pixel
if (i >= height || j >= width) return; // Don't bother doing the calculation. We're not in a valid pixel location
int intensityCount[C_MAX_INTENSITIES];
int avgR[C_MAX_INTENSITIES];
int avgG[C_MAX_INTENSITIES];
int avgB[C_MAX_INTENSITIES];
for (int k=0; k <= nBins; k++) {
intensityCount[k] = 0;
avgR[k] = 0;
avgG[k] = 0;
avgB[k] = 0;
}
// we have a radius r
int maxIntensityCount = 0;
int maxIntensityCountIndex = 0;
for (int k=i-radius; k <= i+radius;k++) {
if (k < 0 || k >= height) continue;
for (int l=j-radius; l <= j+radius; l++) {
if (l < 0 || l >= width) continue;
//int curPixel = in_image[k*stride/4 + l];
const int currentoffset = (j + k + l * width) * 4;
int curPixelr = in_image[currentoffset + 0];
int curPixelg = in_image[currentoffset + 1];
int curPixelb = in_image[currentoffset + 2];
int r = ((curPixelr & 0x00ff0000) >> 16);
int g = ((curPixelg & 0x0000ff00) >> 8);
int b = ((curPixelb & 0x000000ff) >> 0);
int curIntensity = (int)((float)((r+g+b)/3*nBins)/255.0f);
intensityCount[curIntensity]++;
if (intensityCount[curIntensity] > maxIntensityCount) {
maxIntensityCount = intensityCount[curIntensity];
maxIntensityCountIndex = curIntensity;
}
avgR[curIntensity] += r;
avgG[curIntensity] += g;
avgB[curIntensity] += b;
}
}
int finalR = avgR[maxIntensityCountIndex] / maxIntensityCount;
int finalG = avgG[maxIntensityCountIndex] / maxIntensityCount;
int finalB = avgB[maxIntensityCountIndex] / maxIntensityCount;
out_image[j * 4 + 0] = finalR;
out_image[j * 4 + 1] = finalG;
out_image[j * 4 + 2] = finalB;
out_image[j * 4 + 3] = 255;
}
void cuda_paint_prepare (unsigned char* in_image, unsigned char* out_image, int width, int height, int radius, int nBins) {
unsigned char* dev_input; // device char array input image
unsigned char* dev_output; // device char array output image
int blockSize;
int gridSize;
int malsize = width * height * 4 * sizeof(unsigned char);
fprintf(stdout, "Doing cuda paint...\n");
cudaError_t mallocstatus = cudaMalloc( (void**) &dev_input, malsize);
if (mallocstatus != cudaSuccess) {
fprintf(stderr, "Malloc went wrong: %s\n", cudaGetErrorString(mallocstatus));
}
cudaError_t memcpystatus = cudaMemcpy( dev_input, in_image, malsize, cudaMemcpyHostToDevice );
if (memcpystatus != cudaSuccess) {
fprintf(stderr, "Memcpy went wrong: %s\n", cudaGetErrorString(memcpystatus));
}
cudaError_t mallocoutputstatus = cudaMalloc( (void**) &dev_output, malsize);
if (mallocoutputstatus != cudaSuccess) {
fprintf(stderr, "Malloc went wrong: %s\n", cudaGetErrorString(mallocoutputstatus));
}
blockSize = 256;
gridSize = (unsigned int) ceil( (double)(width * height * 4 / blockSize));
fprintf(stdout, "gridSize: %i\n", gridSize);
fprintf(stdout, "blockSize: %i\n", blockSize);
cuda_paint<<<gridSize, blockSize>>> (dev_input, dev_output, width, height, radius, nBins);
cudaError_t copybackstatus = cudaMemcpy(out_image, dev_output, malsize, cudaMemcpyDeviceToHost);
if (copybackstatus != cudaSuccess) {
fprintf(stderr, "Copy back went wrong: %s\n", cudaGetErrorString(copybackstatus));
}
cudaFree(dev_input);
cudaFree(dev_output);
cudaDeviceReset();
}
|
57b356c8a1ac8021b888d3b55b08dd17a7f039e6.hip | // !!! This is a file automatically generated by hipify!!!
#pragma once
#ifndef SIM_TEST_H
#define SIM_TEST_H
#include "../common.h"
#if IS_TESTING
#include "simbody.cu"
#include "simulation.h"
#include "simtester.h"
#include <iostream>
#include <iomanip>
#include <vector>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <cstdint>
std::vector<SimBody> SimHostTest(const std::vector<SimBody>& bodies, uint32_t num_samples)
{
std::vector<SimBody> bodies_ = bodies;
size_t i = 0;
size_t j = 0;
for(uint32_t sample(0); sample != num_samples; ++sample) {
for(i = 0; i < bodies_.size(); ++i) {
bodies_[i].ResetForce();
for(j = 0; j < bodies_.size(); ++j) {
if(i != j) bodies_[i].AddForce(bodies_[j]);
}
}
for(i = 0; i < bodies_.size(); ++i) {
bodies_[i].Tick(25000.0f);
}
}
return bodies_;
}
std::vector<SimBody> SimDeviceTest(const std::vector<SimBody>& bodies, uint32_t num_samples)
{
thrust::device_vector<SimBody> d_bodies;
std::vector<SimBody> h_bodies(bodies.size());
float timeStep = 25000.0f;
d_bodies = bodies;
int device;
hipDeviceProp_t prop;
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
std::cout << hipGetErrorString(err) << std::endl;
return h_bodies;
}
err = hipGetDevice(&device);
if (err != hipSuccess)
{
std::cout << "Error getting CUDA device... aborting." << std::endl;
return h_bodies;
}
err = hipGetDeviceProperties(&prop, device);
if (err == hipErrorInvalidDevice)
{
std::cout << "Invalid CUDA device found... aborting." << std::endl;
return h_bodies;
}
BodyArray arr = MakeArray(d_bodies);
int maxBlocks = prop.major > 2 ? 16 : 8;
int threads = prop.maxThreadsPerMultiProcessor / maxBlocks;
int blocks = (arr.size + threads - 1) / threads;
threads = (threads + 1) & ~1;
for(uint32_t sample(0); sample != num_samples; ++sample) {
hipLaunchKernelGGL(( SimCalc) , dim3(threads), dim3(threads) , 0, 0, arr);
//Ensure that we have done all calculations before we move on to tick.
hipDeviceSynchronize();
hipLaunchKernelGGL(( SimTick) , dim3(threads), dim3(threads), 0, 0, arr, timeStep);
//Ensure that we have ticked all before we move to calculate the average.
hipDeviceSynchronize();
}
//Copy the data back to the host.
thrust::copy(d_bodies.begin(), d_bodies.end(), h_bodies.begin());
return h_bodies;
}
bool SimTest(uint32_t num_bodies, uint32_t samples, float* percentage)
{
std::vector<SimBody> bodies;
uint32_t seed = (uint32_t)time(NULL);
srand(seed);
if(percentage) *percentage = 0.f;
bodies.reserve(num_bodies);
for (uint32_t i = 0; i < num_bodies; ++i) {
bodies.push_back(SimBody(
random(1.0E11f, 3.0E11f),
random(-6.0E11f, 9.0E11f),
random(-1000.0f, 1000.0f),
random(-1000.0f, 1000.0f),
random(1.0E9f, 1.0E24f)));
}
std::vector<SimBody> device;
std::vector<SimBody> host;
device = SimDeviceTest(bodies, samples);
host = SimHostTest(bodies, samples);
if(device.size() != num_bodies ||
host.size() != num_bodies)
return false;
unsigned equal = 0;
auto d_it = device.begin();
auto h_it = host.begin();
while(d_it != device.end() && h_it != host.end()) {
if((*d_it)==(*h_it)) ++equal;
++d_it; ++h_it;
}
float cmp = float(equal) / num_bodies;
if(percentage) *percentage = cmp;
return cmp > ACCURACY;
}
void SimFullTest(uint32_t extra_passes)
{
uint32_t success = 0;
auto do_test = [](uint32_t n, uint32_t s)->bool{
std::cout << "Testing " << n << " bodies (" << s << " samples)...";
float acc;
bool answer = SimTest(n, s, &acc);
std::cout << std::setprecision(2) << std::fixed << (answer ? " passed. (" : " failed. (") << (acc*100.0f) << "%)" << std::endl;
return answer;
};
success += !do_test(100, 128);
success += !do_test(200, 128);
success += !do_test(300, 512);
for(uint32_t i(0); i != extra_passes; ++i) {
success += !do_test((uint32_t)random(50,250),(uint32_t)random(64,128));
}
success += !do_test(100, 1024);
success += !do_test(5120, 32);
if(success == 0)
std::cout << "All tests passed!" << std::endl;
else
std::cout << success << " tests failed!" << std::endl;
}
#endif //IS_TESTING
#endif //SIM_TEST_H | 57b356c8a1ac8021b888d3b55b08dd17a7f039e6.cu | #pragma once
#ifndef SIM_TEST_H
#define SIM_TEST_H
#include "../common.h"
#if IS_TESTING
#include "simbody.cu"
#include "simulation.h"
#include "simtester.h"
#include <iostream>
#include <iomanip>
#include <vector>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <cstdint>
std::vector<SimBody> SimHostTest(const std::vector<SimBody>& bodies, uint32_t num_samples)
{
std::vector<SimBody> bodies_ = bodies;
size_t i = 0;
size_t j = 0;
for(uint32_t sample(0); sample != num_samples; ++sample) {
for(i = 0; i < bodies_.size(); ++i) {
bodies_[i].ResetForce();
for(j = 0; j < bodies_.size(); ++j) {
if(i != j) bodies_[i].AddForce(bodies_[j]);
}
}
for(i = 0; i < bodies_.size(); ++i) {
bodies_[i].Tick(25000.0f);
}
}
return bodies_;
}
std::vector<SimBody> SimDeviceTest(const std::vector<SimBody>& bodies, uint32_t num_samples)
{
thrust::device_vector<SimBody> d_bodies;
std::vector<SimBody> h_bodies(bodies.size());
float timeStep = 25000.0f;
d_bodies = bodies;
int device;
cudaDeviceProp prop;
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
std::cout << cudaGetErrorString(err) << std::endl;
return h_bodies;
}
err = cudaGetDevice(&device);
if (err != cudaSuccess)
{
std::cout << "Error getting CUDA device... aborting." << std::endl;
return h_bodies;
}
err = cudaGetDeviceProperties(&prop, device);
if (err == cudaErrorInvalidDevice)
{
std::cout << "Invalid CUDA device found... aborting." << std::endl;
return h_bodies;
}
BodyArray arr = MakeArray(d_bodies);
int maxBlocks = prop.major > 2 ? 16 : 8;
int threads = prop.maxThreadsPerMultiProcessor / maxBlocks;
int blocks = (arr.size + threads - 1) / threads;
threads = (threads + 1) & ~1;
for(uint32_t sample(0); sample != num_samples; ++sample) {
SimCalc <<< threads, threads >>>(arr);
//Ensure that we have done all calculations before we move on to tick.
cudaThreadSynchronize();
SimTick <<< threads, threads>>>(arr, timeStep);
//Ensure that we have ticked all before we move to calculate the average.
cudaThreadSynchronize();
}
//Copy the data back to the host.
thrust::copy(d_bodies.begin(), d_bodies.end(), h_bodies.begin());
return h_bodies;
}
bool SimTest(uint32_t num_bodies, uint32_t samples, float* percentage)
{
std::vector<SimBody> bodies;
uint32_t seed = (uint32_t)time(NULL);
srand(seed);
if(percentage) *percentage = 0.f;
bodies.reserve(num_bodies);
for (uint32_t i = 0; i < num_bodies; ++i) {
bodies.push_back(SimBody(
random(1.0E11f, 3.0E11f),
random(-6.0E11f, 9.0E11f),
random(-1000.0f, 1000.0f),
random(-1000.0f, 1000.0f),
random(1.0E9f, 1.0E24f)));
}
std::vector<SimBody> device;
std::vector<SimBody> host;
device = SimDeviceTest(bodies, samples);
host = SimHostTest(bodies, samples);
if(device.size() != num_bodies ||
host.size() != num_bodies)
return false;
unsigned equal = 0;
auto d_it = device.begin();
auto h_it = host.begin();
while(d_it != device.end() && h_it != host.end()) {
if((*d_it)==(*h_it)) ++equal;
++d_it; ++h_it;
}
float cmp = float(equal) / num_bodies;
if(percentage) *percentage = cmp;
return cmp > ACCURACY;
}
void SimFullTest(uint32_t extra_passes)
{
uint32_t success = 0;
auto do_test = [](uint32_t n, uint32_t s)->bool{
std::cout << "Testing " << n << " bodies (" << s << " samples)...";
float acc;
bool answer = SimTest(n, s, &acc);
std::cout << std::setprecision(2) << std::fixed << (answer ? " passed. (" : " failed. (") << (acc*100.0f) << "%)" << std::endl;
return answer;
};
success += !do_test(100, 128);
success += !do_test(200, 128);
success += !do_test(300, 512);
for(uint32_t i(0); i != extra_passes; ++i) {
success += !do_test((uint32_t)random(50,250),(uint32_t)random(64,128));
}
success += !do_test(100, 1024);
success += !do_test(5120, 32);
if(success == 0)
std::cout << "All tests passed!" << std::endl;
else
std::cout << success << " tests failed!" << std::endl;
}
#endif //IS_TESTING
#endif //SIM_TEST_H |
847422b887f12d866a505f8b46a03824db2fe3b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_header.h"
/*
GPU kernel to merge, using a batch method, multiple couples of array (a,b).
*/
__global__ void mergeSmallBatch_k(int *A, int length_A, int *B, int length_B, int *M, int nb_merge) {
int nb_threads = gridDim.x * blockDim.x,
gtidx = threadIdx.x + blockIdx.x * blockDim.x,
d = length_A+length_B;
int t, tidx, qt, gbx, begin_M, begin_A, begin_B;
duo K, P, Q;
while(gtidx < nb_merge*d) {
t = gtidx % blockDim.x, // index of the thread inside a block : 0 -> blockDim.x - 1
tidx = t%d, // index of the thread in its corresponding final array : 0 -> d-1
qt = (t-tidx)/d, // index of the group of the thread inside a block : 0 -> (blockDim.x/d)-1
gbx = (blockDim.x/d)*blockIdx.x + qt, // index of the group of the thread among all the blocks : 0 -> (blockDim.x/d)*gridDim.x - 1
begin_M = gbx*d, // index of the first element of M
begin_A = gbx*length_A, // index of the first element of A
begin_B = gbx*length_B; // index of the first element of B
if (tidx > length_A) {
K.x = tidx - length_A;
K.y = length_A;
P.x = length_A;
P.y = tidx - length_A;
}else {
K.x = 0;
K.y = tidx;
P.x = tidx;
P.y = 0;
}
while (true) {
int offset = abs(K.y - P.y) / 2;
Q.x = K.x + offset;
Q.y = K.y - offset;
if (Q.y >= 0 && Q.x <= length_B && (Q.y == length_A || Q.x == 0 || A[begin_A + Q.y] > B[begin_B + Q.x - 1])) {
if (Q.x == length_B || Q.y == 0 || A[begin_A + Q.y - 1] <= B[begin_B + Q.x]) {
if (Q.y < length_A && (Q.x == length_B || A[begin_A + Q.y] <= B[begin_B + Q.x])) {
M[begin_M + tidx] = A[begin_A + Q.y];
}else {
M[begin_M + tidx] = B[begin_B + Q.x];
}
break;
}
else {
K.x = Q.x + 1;
K.y = Q.y - 1;
}
}
else {
P.x = Q.x - 1;
P.y = Q.y + 1;
}
}
gtidx += nb_threads;
}
}
int main(int argc, char *argv[]) {
// initialize random seed
srand(time(0));
testCUDA(hipDeviceReset());
hipEvent_t start, stop;
testCUDA(hipEventCreate(&start));
testCUDA(hipEventCreate(&stop));
float timer;
/* Number of threads per block.
We want to launch N*SIZE_M threads to bind each thread with one element in the final array.
We choose N*SIZE_M if it fits into one block. Otherwise we take the biggest multiple of SIZE_M <= 1024.
*/
int tpb = min(N*SIZE_M, ((int)(1024/SIZE_M))*SIZE_M);
// To overwrite the default choice of tpb, just give it as the first program argument
if(argc == 2) tpb = atoi(argv[1]);
// number of blocks
int blk = (N*SIZE_M + tpb - 1)/tpb;
printf("------------------------------------------------------------------------------------------------------------------------------\n\n");
printf("N : %d\n",N);
printf("Size of A : %d\tSize of B : %d\tSize of M : %d\n",SIZE_A,SIZE_B,SIZE_M);
printf("Minimum number of threads to launch : %d\n",N*SIZE_M);
printf("Nb Blocks : %d\tNb threads/block : %d\t==>\t%d threads\n",blk, tpb, blk*tpb);
printf("\n------------------------------------------------------------------------------------------------------------------------------\n");
// Allocate CPU buffers for three vectors (two input, one output).
int* A = generate_array_batch(N*SIZE_A, SIZE_A);
int* B = generate_array_batch(N*SIZE_B, SIZE_B);
int* M = (int*)malloc(N*SIZE_M*sizeof(int));
// Declare GPU buffers
int *dev_a = nullptr;
int *dev_b = nullptr;
int *dev_m = nullptr;
// Allocate GPU buffers for three vectors (two input, one output).
testCUDA(hipMalloc((void**)&dev_a, N*SIZE_A*sizeof(int)));
testCUDA(hipMalloc((void**)&dev_b, N*SIZE_B*sizeof(int)));
testCUDA(hipMalloc((void**)&dev_m, N*SIZE_M*sizeof(int)));
// Copy input vectors from host memory to GPU buffers.
testCUDA(hipMemcpy(dev_a, A, N*SIZE_A*sizeof(int), hipMemcpyHostToDevice));
testCUDA(hipMemcpy(dev_b, B, N*SIZE_B*sizeof(int), hipMemcpyHostToDevice));
testCUDA(hipEventRecord(start, 0));
// Launch merge kernel on GPU
mergeSmallBatch_k << <blk, tpb >> > (dev_a, SIZE_A, dev_b, SIZE_B, dev_m,N);
// Wait until work is done
hipDeviceSynchronize();
testCUDA(hipEventRecord(stop, 0));
testCUDA(hipEventSynchronize(stop));
testCUDA(hipEventElapsedTime(&timer, start, stop));
// Copy result from GPU RAM into CPU RAM
testCUDA(hipMemcpy(M, dev_m, N*SIZE_M*sizeof(int), hipMemcpyDeviceToHost));
bool sorted = true;
for(int k=0 ; k<N ; ++k) {
sorted = check_array_sorted_no_print(&M[k*SIZE_M], SIZE_M);
if(!sorted) break;
}
if(sorted) {
printf("Batch SORT : OK\n");
printf("======================================================\n");
printf("Time to merge %d arrays of size %d : %f ms\n",N,SIZE_M,timer);
printf("======================================================\n");
}
else {
printf("BATCH SORT : KO\n");
}
// Free both CPU and GPU memory allocated
testCUDA(hipFree(dev_m));
testCUDA(hipFree(dev_b));
testCUDA(hipFree(dev_a));
free(M);
free(B);
free(A);
}
| 847422b887f12d866a505f8b46a03824db2fe3b2.cu | #include "cuda_header.h"
/*
GPU kernel to merge, using a batch method, multiple couples of array (a,b).
*/
__global__ void mergeSmallBatch_k(int *A, int length_A, int *B, int length_B, int *M, int nb_merge) {
int nb_threads = gridDim.x * blockDim.x,
gtidx = threadIdx.x + blockIdx.x * blockDim.x,
d = length_A+length_B;
int t, tidx, qt, gbx, begin_M, begin_A, begin_B;
duo K, P, Q;
while(gtidx < nb_merge*d) {
t = gtidx % blockDim.x, // index of the thread inside a block : 0 -> blockDim.x - 1
tidx = t%d, // index of the thread in its corresponding final array : 0 -> d-1
qt = (t-tidx)/d, // index of the group of the thread inside a block : 0 -> (blockDim.x/d)-1
gbx = (blockDim.x/d)*blockIdx.x + qt, // index of the group of the thread among all the blocks : 0 -> (blockDim.x/d)*gridDim.x - 1
begin_M = gbx*d, // index of the first element of M
begin_A = gbx*length_A, // index of the first element of A
begin_B = gbx*length_B; // index of the first element of B
if (tidx > length_A) {
K.x = tidx - length_A;
K.y = length_A;
P.x = length_A;
P.y = tidx - length_A;
}else {
K.x = 0;
K.y = tidx;
P.x = tidx;
P.y = 0;
}
while (true) {
int offset = abs(K.y - P.y) / 2;
Q.x = K.x + offset;
Q.y = K.y - offset;
if (Q.y >= 0 && Q.x <= length_B && (Q.y == length_A || Q.x == 0 || A[begin_A + Q.y] > B[begin_B + Q.x - 1])) {
if (Q.x == length_B || Q.y == 0 || A[begin_A + Q.y - 1] <= B[begin_B + Q.x]) {
if (Q.y < length_A && (Q.x == length_B || A[begin_A + Q.y] <= B[begin_B + Q.x])) {
M[begin_M + tidx] = A[begin_A + Q.y];
}else {
M[begin_M + tidx] = B[begin_B + Q.x];
}
break;
}
else {
K.x = Q.x + 1;
K.y = Q.y - 1;
}
}
else {
P.x = Q.x - 1;
P.y = Q.y + 1;
}
}
gtidx += nb_threads;
}
}
int main(int argc, char *argv[]) {
// initialize random seed
srand(time(0));
testCUDA(cudaDeviceReset());
cudaEvent_t start, stop;
testCUDA(cudaEventCreate(&start));
testCUDA(cudaEventCreate(&stop));
float timer;
/* Number of threads per block.
We want to launch N*SIZE_M threads to bind each thread with one element in the final array.
We choose N*SIZE_M if it fits into one block. Otherwise we take the biggest multiple of SIZE_M <= 1024.
*/
int tpb = min(N*SIZE_M, ((int)(1024/SIZE_M))*SIZE_M);
// To overwrite the default choice of tpb, just give it as the first program argument
if(argc == 2) tpb = atoi(argv[1]);
// number of blocks
int blk = (N*SIZE_M + tpb - 1)/tpb;
printf("------------------------------------------------------------------------------------------------------------------------------\n\n");
printf("N : %d\n",N);
printf("Size of A : %d\tSize of B : %d\tSize of M : %d\n",SIZE_A,SIZE_B,SIZE_M);
printf("Minimum number of threads to launch : %d\n",N*SIZE_M);
printf("Nb Blocks : %d\tNb threads/block : %d\t==>\t%d threads\n",blk, tpb, blk*tpb);
printf("\n------------------------------------------------------------------------------------------------------------------------------\n");
// Allocate CPU buffers for three vectors (two input, one output).
int* A = generate_array_batch(N*SIZE_A, SIZE_A);
int* B = generate_array_batch(N*SIZE_B, SIZE_B);
int* M = (int*)malloc(N*SIZE_M*sizeof(int));
// Declare GPU buffers
int *dev_a = nullptr;
int *dev_b = nullptr;
int *dev_m = nullptr;
// Allocate GPU buffers for three vectors (two input, one output).
testCUDA(cudaMalloc((void**)&dev_a, N*SIZE_A*sizeof(int)));
testCUDA(cudaMalloc((void**)&dev_b, N*SIZE_B*sizeof(int)));
testCUDA(cudaMalloc((void**)&dev_m, N*SIZE_M*sizeof(int)));
// Copy input vectors from host memory to GPU buffers.
testCUDA(cudaMemcpy(dev_a, A, N*SIZE_A*sizeof(int), cudaMemcpyHostToDevice));
testCUDA(cudaMemcpy(dev_b, B, N*SIZE_B*sizeof(int), cudaMemcpyHostToDevice));
testCUDA(cudaEventRecord(start, 0));
// Launch merge kernel on GPU
mergeSmallBatch_k << <blk, tpb >> > (dev_a, SIZE_A, dev_b, SIZE_B, dev_m,N);
// Wait until work is done
cudaDeviceSynchronize();
testCUDA(cudaEventRecord(stop, 0));
testCUDA(cudaEventSynchronize(stop));
testCUDA(cudaEventElapsedTime(&timer, start, stop));
// Copy result from GPU RAM into CPU RAM
testCUDA(cudaMemcpy(M, dev_m, N*SIZE_M*sizeof(int), cudaMemcpyDeviceToHost));
bool sorted = true;
for(int k=0 ; k<N ; ++k) {
sorted = check_array_sorted_no_print(&M[k*SIZE_M], SIZE_M);
if(!sorted) break;
}
if(sorted) {
printf("Batch SORT : OK\n");
printf("======================================================\n");
printf("Time to merge %d arrays of size %d : %f ms\n",N,SIZE_M,timer);
printf("======================================================\n");
}
else {
printf("BATCH SORT : KO\n");
}
// Free both CPU and GPU memory allocated
testCUDA(cudaFree(dev_m));
testCUDA(cudaFree(dev_b));
testCUDA(cudaFree(dev_a));
free(M);
free(B);
free(A);
}
|
ab8fa6d56acd6118d92cfee74d3177b70830202c.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file dct2_fft2_cuda_kernel.cu
* @author Zixuan Jiang, Jiaqi Gu
* @date Apr 2019
* @brief Refernece: Byeong Lee, "A new algorithm to compute the discrete cosine Transform,"
* in IEEE Transactions on Acoustics, Speech, and Signal Processing, vol. 32, no. 6, pp. 1243-1245, December 1984.
* The preprocess and postprocess of 2d dct and 2d idct are discussed in the original paper.
* idct(idxst(x)) and idxst(idct(x)) are similar to the idct2d(x),
* except tiny modifications on preprocessing and postprocessing
*/
#include <math.h>
#include <float.h>
#include "hip/hip_runtime.h"
#include "utility/src/utils.cuh"
#define TPB (16)
DREAMPLACE_BEGIN_NAMESPACE
inline __device__ int INDEX(const int hid, const int wid, const int N)
{
return (hid * N + wid);
}
// dct2_fft2
template <typename T>
__global__ void dct2dPreprocess(const T *x, T *y, const int M, const int N, const int halfN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < M && wid < N)
{
int index;
int cond = (((hid & 1) == 0) << 1) | ((wid & 1) == 0);
switch (cond)
{
case 0:
index = INDEX(2 * M - (hid + 1), N - (wid + 1) / 2, halfN);
break;
case 1:
index = INDEX(2 * M - (hid + 1), wid / 2, halfN);
break;
case 2:
index = INDEX(hid, N - (wid + 1) / 2, halfN);
break;
case 3:
index = INDEX(hid, wid / 2, halfN);
break;
default:
break;
}
y[index] = x[INDEX(hid, wid, N)];
}
}
template <typename T>
void dct2dPreprocessCudaLauncher(const T *x, T *y, const int M, const int N)
{
dim3 gridSize((N + TPB - 1) / TPB, (M + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
hipLaunchKernelGGL(( dct2dPreprocess<T>), dim3(gridSize), dim3(blockSize), 0, 0, x, y, M, N, N / 2);
}
template <typename T, typename TComplex>
__global__ void __launch_bounds__(TPB * TPB, 8) dct2dPostprocess(const TComplex *V, T *y, const int M, const int N,
const int halfM, const int halfN, const T two_over_MN, const T four_over_MN,
const TComplex *__restrict__ expkM, const TComplex *__restrict__ expkN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < halfM && wid < halfN)
{
int cond = ((hid != 0) << 1) | (wid != 0);
switch (cond)
{
case 0:
{
y[0] = V[0].x * four_over_MN;
y[halfN] = RealPartOfMul(expkN[halfN], V[halfN]) * four_over_MN;
y[INDEX(halfM, 0, N)] = expkM[halfM].x * V[INDEX(halfM, 0, halfN + 1)].x * four_over_MN;
y[INDEX(halfM, halfN, N)] = expkM[halfM].x * RealPartOfMul(expkN[halfN], V[INDEX(halfM, halfN, halfN + 1)]) * four_over_MN;
break;
}
case 1:
{
ComplexType<T> tmp;
tmp = V[wid];
y[wid] = RealPartOfMul(expkN[wid], tmp) * four_over_MN;
y[N - wid] = -ImaginaryPartOfMul(expkN[wid], tmp) * four_over_MN;
tmp = V[INDEX(halfM, wid, halfN + 1)];
y[INDEX(halfM, wid, N)] = expkM[halfM].x * RealPartOfMul(expkN[wid], tmp) * four_over_MN;
y[INDEX(halfM, N - wid, N)] = -expkM[halfM].x * ImaginaryPartOfMul(expkN[wid], tmp) * four_over_MN;
break;
}
case 2:
{
ComplexType<T> tmp1, tmp2, tmp_up, tmp_down;
tmp1 = V[INDEX(hid, 0, halfN + 1)];
tmp2 = V[INDEX(M - hid, 0, halfN + 1)];
tmp_up.x = expkM[hid].x * (tmp1.x + tmp2.x) + expkM[hid].y * (tmp2.y - tmp1.y);
tmp_down.x = -expkM[hid].y * (tmp1.x + tmp2.x) + expkM[hid].x * (tmp2.y - tmp1.y);
y[INDEX(hid, 0, N)] = tmp_up.x * two_over_MN;
y[INDEX(M - hid, 0, N)] = tmp_down.x * two_over_MN;
tmp1 = complexAdd(V[INDEX(hid, halfN, halfN + 1)], V[INDEX(M - hid, halfN, halfN + 1)]);
tmp2 = complexSubtract(V[INDEX(hid, halfN, halfN + 1)], V[INDEX(M - hid, halfN, halfN + 1)]);
tmp_up.x = expkM[hid].x * tmp1.x - expkM[hid].y * tmp2.y;
tmp_up.y = expkM[hid].x * tmp1.y + expkM[hid].y * tmp2.x;
tmp_down.x = -expkM[hid].y * tmp1.x - expkM[hid].x * tmp2.y;
tmp_down.y = -expkM[hid].y * tmp1.y + expkM[hid].x * tmp2.x;
y[INDEX(hid, halfN, N)] = RealPartOfMul(expkN[halfN], tmp_up) * two_over_MN;
y[INDEX(M - hid, halfN, N)] = RealPartOfMul(expkN[halfN], tmp_down) * two_over_MN;
break;
}
case 3:
{
ComplexType<T> tmp1, tmp2, tmp_up, tmp_down;
tmp1 = complexAdd(V[INDEX(hid, wid, halfN + 1)], V[INDEX(M - hid, wid, halfN + 1)]);
tmp2 = complexSubtract(V[INDEX(hid, wid, halfN + 1)], V[INDEX(M - hid, wid, halfN + 1)]);
tmp_up.x = expkM[hid].x * tmp1.x - expkM[hid].y * tmp2.y;
tmp_up.y = expkM[hid].x * tmp1.y + expkM[hid].y * tmp2.x;
tmp_down.x = -expkM[hid].y * tmp1.x - expkM[hid].x * tmp2.y;
tmp_down.y = -expkM[hid].y * tmp1.y + expkM[hid].x * tmp2.x;
y[INDEX(hid, wid, N)] = RealPartOfMul(expkN[wid], tmp_up) * two_over_MN;
y[INDEX(M - hid, wid, N)] = RealPartOfMul(expkN[wid], tmp_down) * two_over_MN;
y[INDEX(hid, N - wid, N)] = -ImaginaryPartOfMul(expkN[wid], tmp_up) * two_over_MN;
y[INDEX(M - hid, N - wid, N)] = -ImaginaryPartOfMul(expkN[wid], tmp_down) * two_over_MN;
break;
}
default:
assert(0);
break;
}
}
}
template <typename T>
void dct2dPostprocessCudaLauncher(const T *x, T *y, const int M, const int N,
const T *__restrict__ expkM, const T *__restrict__ expkN)
{
dim3 gridSize((N / 2 + TPB - 1) / TPB, (M / 2 + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
hipLaunchKernelGGL(( dct2dPostprocess<T, ComplexType<T>>), dim3(gridSize), dim3(blockSize), 0, 0, (ComplexType<T> *)x, y, M, N, M / 2, N / 2, (T)(2. / (M * N)), (T)(4. / (M * N)), (ComplexType<T> *)expkM, (ComplexType<T> *)expkN);
}
// idct2_fft2
template <typename T, typename TComplex>
__global__ void __launch_bounds__(TPB * TPB, 8) idct2_fft2Preprocess(const T *input, TComplex *output, const int M, const int N,
const int halfM, const int halfN,
const TComplex *__restrict__ expkM, const TComplex *__restrict__ expkN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < halfM && wid < halfN)
{
int cond = ((hid != 0) << 1) | (wid != 0);
switch (cond)
{
case 0:
{
T tmp1;
TComplex tmp_up;
output[0].x = input[0];
output[0].y = 0;
tmp1 = input[halfN];
tmp_up.x = tmp1;
tmp_up.y = tmp1;
output[halfN] = complexConj(complexMul(expkN[halfN], tmp_up));
tmp1 = input[INDEX(halfM, 0, N)];
tmp_up.x = tmp1;
tmp_up.y = tmp1;
output[INDEX(halfM, 0, halfN + 1)] = complexConj(complexMul(expkM[halfM], tmp_up));
tmp1 = input[INDEX(halfM, halfN, N)];
tmp_up.x = 0;
tmp_up.y = 2 * tmp1;
output[INDEX(halfM, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[halfN]), tmp_up));
break;
}
case 1:
{
TComplex tmp_up;
tmp_up.x = input[wid];
tmp_up.y = input[N - wid];
output[wid] = complexConj(complexMul(expkN[wid], tmp_up));
T tmp1 = input[INDEX(halfM, wid, N)];
T tmp2 = input[INDEX(halfM, N - wid, N)];
tmp_up.x = tmp1 - tmp2;
tmp_up.y = tmp1 + tmp2;
output[INDEX(halfM, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[wid]), tmp_up));
break;
}
case 2:
{
T tmp1, tmp3;
TComplex tmp_up, tmp_down;
tmp1 = input[INDEX(hid, 0, N)];
tmp3 = input[INDEX(M - hid, 0, N)];
tmp_up.x = tmp1;
tmp_up.y = tmp3;
tmp_down.x = tmp3;
tmp_down.y = tmp1;
output[INDEX(hid, 0, halfN + 1)] = complexConj(complexMul(expkM[hid], tmp_up));
output[INDEX(M - hid, 0, halfN + 1)] = complexConj(complexMul(expkM[M - hid], tmp_down));
tmp1 = input[INDEX(hid, halfN, N)];
tmp3 = input[INDEX(M - hid, halfN, N)];
tmp_up.x = tmp1 - tmp3;
tmp_up.y = tmp3 + tmp1;
tmp_down.x = tmp3 - tmp1;
tmp_down.y = tmp1 + tmp3;
output[INDEX(hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[halfN]), tmp_up));
output[INDEX(M - hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[halfN]), tmp_down));
break;
}
case 3:
{
T tmp1 = input[INDEX(hid, wid, N)];
T tmp2 = input[INDEX(hid, N - wid, N)];
T tmp3 = input[INDEX(M - hid, wid, N)];
T tmp4 = input[INDEX(M - hid, N - wid, N)];
TComplex tmp_up, tmp_down;
tmp_up.x = tmp1 - tmp4;
tmp_up.y = tmp3 + tmp2;
tmp_down.x = tmp3 - tmp2;
tmp_down.y = tmp1 + tmp4;
output[INDEX(hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[wid]), tmp_up));
output[INDEX(M - hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[wid]), tmp_down));
break;
}
default:
assert(0);
break;
}
}
}
template <typename T>
void idct2_fft2PreprocessCudaLauncher(
const T *x,
T *y,
const int M,
const int N,
const T *__restrict__ expkM,
const T *__restrict__ expkN)
{
dim3 gridSize((N / 2 + TPB - 1) / TPB, (M / 2 + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
hipLaunchKernelGGL(( idct2_fft2Preprocess<T, ComplexType<T>>), dim3(gridSize), dim3(blockSize), 0, 0, x, (ComplexType<T> *)y, M, N, M / 2, N / 2, (ComplexType<T> *)expkM, (ComplexType<T> *)expkN);
}
template <typename T>
__global__ void idct2_fft2Postprocess(const T *x, T *y, const int M, const int N, const int halfN, const int MN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < M && wid < N)
{
int cond = ((hid < M / 2) << 1) | (wid < N / 2);
int index;
switch (cond)
{
case 0:
index = INDEX(((M - hid) << 1) - 1, ((N - wid) << 1) - 1, N);
break;
case 1:
index = INDEX(((M - hid) << 1) - 1, wid << 1, N);
break;
case 2:
index = INDEX(hid << 1, ((N - wid) << 1) - 1, N);
break;
case 3:
index = INDEX(hid << 1, wid << 1, N);
break;
default:
assert(0);
break;
}
y[index] = x[INDEX(hid, wid, N)] * MN;
}
}
template <typename T>
void idct2_fft2PostprocessCudaLauncher(const T *x, T *y, const int M, const int N)
{
dim3 gridSize((N + TPB - 1) / TPB, (M + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
hipLaunchKernelGGL(( idct2_fft2Postprocess<T>), dim3(gridSize), dim3(blockSize), 0, 0, x, y, M, N, N / 2, M * N);
}
// idct_idxst
// Adpated from idct2d_preprocess(). The only change is the reordered input
// if (wid != 0)
// new_input[hid][wid] = input[hid][N - wid];
// else
// new_input[hid][0] = 0
template <typename T, typename TComplex>
__global__ void __launch_bounds__(TPB * TPB, 8) idct_idxstPreprocess(const T *input, TComplex *output, const int M, const int N,
const int halfM, const int halfN,
const TComplex *__restrict__ expkM, const TComplex *__restrict__ expkN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < halfM && wid < halfN)
{
int cond = ((hid != 0) << 1) | (wid != 0);
switch (cond)
{
case 0:
{
T tmp1;
TComplex tmp_up;
output[0].x = 0;
output[0].y = 0;
tmp1 = input[halfN];
tmp_up.x = tmp1;
tmp_up.y = tmp1;
output[halfN] = complexConj(complexMul(expkN[halfN], tmp_up));
output[INDEX(halfM, 0, halfN + 1)].x = 0;
output[INDEX(halfM, 0, halfN + 1)].y = 0;
tmp1 = input[INDEX(halfM, halfN, N)];
tmp_up.x = 0;
tmp_up.y = 2 * tmp1;
output[INDEX(halfM, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[halfN]), tmp_up));
break;
}
case 1:
{
TComplex tmp_up;
tmp_up.x = input[N - wid];
tmp_up.y = input[wid];
output[wid] = complexConj(complexMul(expkN[wid], tmp_up));
T tmp1 = input[INDEX(halfM, N - wid, N)];
T tmp2 = input[INDEX(halfM, wid, N)];
tmp_up.x = tmp1 - tmp2;
tmp_up.y = tmp1 + tmp2;
output[INDEX(halfM, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[wid]), tmp_up));
break;
}
case 2:
{
T tmp1, tmp3;
TComplex tmp_up, tmp_down;
output[INDEX(hid, 0, halfN + 1)].x = 0;
output[INDEX(hid, 0, halfN + 1)].y = 0;
output[INDEX(M - hid, 0, halfN + 1)].x = 0;
output[INDEX(M - hid, 0, halfN + 1)].y = 0;
tmp1 = input[INDEX(hid, halfN, N)];
tmp3 = input[INDEX(M - hid, halfN, N)];
tmp_up.x = tmp1 - tmp3;
tmp_up.y = tmp3 + tmp1;
tmp_down.x = tmp3 - tmp1;
tmp_down.y = tmp1 + tmp3;
output[INDEX(hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[halfN]), tmp_up));
output[INDEX(M - hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[halfN]), tmp_down));
break;
}
case 3:
{
T tmp1 = input[INDEX(hid, N - wid, N)];
T tmp2 = input[INDEX(hid, wid, N)];
T tmp3 = input[INDEX(M - hid, N - wid, N)];
T tmp4 = input[INDEX(M - hid, wid, N)];
TComplex tmp_up, tmp_down;
tmp_up.x = tmp1 - tmp4;
tmp_up.y = tmp3 + tmp2;
tmp_down.x = tmp3 - tmp2;
tmp_down.y = tmp1 + tmp4;
output[INDEX(hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[wid]), tmp_up));
output[INDEX(M - hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[wid]), tmp_down));
break;
}
default:
assert(0);
break;
}
}
}
template <typename T>
void idct_idxstPreprocessCudaLauncher(const T *x, T *y, const int M, const int N,
const T *__restrict__ expkM, const T *__restrict__ expkN)
{
dim3 gridSize((N / 2 + TPB - 1) / TPB, (M / 2 + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
hipLaunchKernelGGL(( idct_idxstPreprocess<T, ComplexType<T>>), dim3(gridSize), dim3(blockSize), 0, 0, x, (ComplexType<T> *)y, M, N, M / 2, N / 2, (ComplexType<T> *)expkM, (ComplexType<T> *)expkN);
}
// Adpated from idct2d_postprocess() with changes on sign and scale
// if (wid % 2 == 1)
// new_output[hid][wid] = -output[hid][wid];
// else
// new_output[hid][wid] = output[hid][wid];
template <typename T>
__global__ void idct_idxstPostprocess(const T *x, T *y, const int M, const int N, const int halfN, const int MN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < M && wid < N)
{
int cond = ((hid < M / 2) << 1) | (wid < N / 2);
int index;
switch (cond)
{
case 0:
index = INDEX(((M - hid) << 1) - 1, ((N - wid) << 1) - 1, N);
y[index] = -x[INDEX(hid, wid, N)] * MN;
break;
case 1:
index = INDEX(((M - hid) << 1) - 1, wid << 1, N);
y[index] = x[INDEX(hid, wid, N)] * MN;
break;
case 2:
index = INDEX(hid << 1, ((N - wid) << 1) - 1, N);
y[index] = -x[INDEX(hid, wid, N)] * MN;
break;
case 3:
index = INDEX(hid << 1, wid << 1, N);
y[index] = x[INDEX(hid, wid, N)] * MN;
break;
default:
assert(0);
break;
}
}
}
template <typename T>
void idct_idxstPostprocessCudaLauncher(const T *x, T *y, const int M, const int N)
{
dim3 gridSize((N + TPB - 1) / TPB, (M + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
hipLaunchKernelGGL(( idct_idxstPostprocess<T>), dim3(gridSize), dim3(blockSize), 0, 0, x, y, M, N, N / 2, M * N);
}
// idxst_idct
// Adpated from idct2d_preprocess(). The only change is the reordered input
// if (hid != 0)
// new_input[hid][wid] = input[M - hid][wid];
// else
// new_input[0][wid] = 0
template <typename T, typename TComplex>
__global__ void __launch_bounds__(TPB * TPB, 8) idxst_idctPreprocess(const T *input, TComplex *output, const int M, const int N,
const int halfM, const int halfN,
const TComplex *__restrict__ expkM, const TComplex *__restrict__ expkN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < halfM && wid < halfN)
{
int cond = ((hid != 0) << 1) | (wid != 0);
switch (cond)
{
case 0:
{
T tmp1;
TComplex tmp_up;
output[0].x = 0;
output[0].y = 0;
output[halfN].x = 0;
output[halfN].y = 0;
tmp1 = input[INDEX(halfM, 0, N)];
tmp_up.x = tmp1;
tmp_up.y = tmp1;
output[INDEX(halfM, 0, halfN + 1)] = complexConj(complexMul(expkM[halfM], tmp_up));
tmp1 = input[INDEX(halfM, halfN, N)];
tmp_up.x = 0;
tmp_up.y = 2 * tmp1;
output[INDEX(halfM, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[halfN]), tmp_up));
break;
}
case 1:
{
output[wid].x = 0;
output[wid].y = 0;
TComplex tmp_up;
T tmp1 = input[INDEX(halfM, wid, N)];
T tmp2 = input[INDEX(halfM, N - wid, N)];
tmp_up.x = tmp1 - tmp2;
tmp_up.y = tmp1 + tmp2;
output[INDEX(halfM, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[wid]), tmp_up));
break;
}
case 2:
{
T tmp1, tmp3;
TComplex tmp_up, tmp_down;
tmp1 = input[INDEX(M - hid, 0, N)];
tmp3 = input[INDEX(hid, 0, N)];
tmp_up.x = tmp1;
tmp_up.y = tmp3;
tmp_down.x = tmp3;
tmp_down.y = tmp1;
output[INDEX(hid, 0, halfN + 1)] = complexConj(complexMul(expkM[hid], tmp_up));
output[INDEX(M - hid, 0, halfN + 1)] = complexConj(complexMul(expkM[M - hid], tmp_down));
tmp1 = input[INDEX(M - hid, halfN, N)];
tmp3 = input[INDEX(hid, halfN, N)];
tmp_up.x = tmp1 - tmp3;
tmp_up.y = tmp3 + tmp1;
tmp_down.x = tmp3 - tmp1;
tmp_down.y = tmp1 + tmp3;
output[INDEX(hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[halfN]), tmp_up));
output[INDEX(M - hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[halfN]), tmp_down));
break;
}
case 3:
{
T tmp1 = input[INDEX(M - hid, wid, N)];
T tmp2 = input[INDEX(M - hid, N - wid, N)];
T tmp3 = input[INDEX(hid, wid, N)];
T tmp4 = input[INDEX(hid, N - wid, N)];
TComplex tmp_up, tmp_down;
tmp_up.x = tmp1 - tmp4;
tmp_up.y = tmp3 + tmp2;
tmp_down.x = tmp3 - tmp2;
tmp_down.y = tmp1 + tmp4;
output[INDEX(hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[wid]), tmp_up));
output[INDEX(M - hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[wid]), tmp_down));
break;
}
default:
assert(0);
break;
}
}
}
template <typename T>
void idxst_idctPreprocessCudaLauncher(
const T *x,
T *y,
const int M,
const int N,
const T *__restrict__ expkM,
const T *__restrict__ expkN)
{
dim3 gridSize((N / 2 + TPB - 1) / TPB, (M / 2 + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
hipLaunchKernelGGL(( idxst_idctPreprocess<T, ComplexType<T>>), dim3(gridSize), dim3(blockSize), 0, 0, x, (ComplexType<T> *)y, M, N, M / 2, N / 2, (ComplexType<T> *)expkM, (ComplexType<T> *)expkN);
}
// Adpated from idct2d_postprocess() with changes on sign and scale
// if (hid % 2 == 1)
// new_output[hid][wid] = -output[hid][wid];
// else
// new_output[hid][wid] = output[hid][wid];
template <typename T>
__global__ void idxst_idctPostprocess(const T *x, T *y, const int M, const int N, const int halfN, const int MN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < M && wid < N)
{
int cond = ((hid < M / 2) << 1) | (wid < N / 2);
int index;
switch (cond)
{
case 0:
index = INDEX(((M - hid) << 1) - 1, ((N - wid) << 1) - 1, N);
y[index] = -x[INDEX(hid, wid, N)] * MN;
break;
case 1:
index = INDEX(((M - hid) << 1) - 1, wid << 1, N);
y[index] = -x[INDEX(hid, wid, N)] * MN;
break;
case 2:
index = INDEX(hid << 1, ((N - wid) << 1) - 1, N);
y[index] = x[INDEX(hid, wid, N)] * MN;
break;
case 3:
index = INDEX(hid << 1, wid << 1, N);
y[index] = x[INDEX(hid, wid, N)] * MN;
break;
default:
assert(0);
break;
}
}
}
template <typename T>
void idxst_idctPostprocessCudaLauncher(const T *x, T *y, const int M, const int N)
{
dim3 gridSize((N + TPB - 1) / TPB, (M + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
hipLaunchKernelGGL(( idxst_idctPostprocess<T>), dim3(gridSize), dim3(blockSize), 0, 0, x, y, M, N, N / 2, M * N);
}
// dct2_fft2
#define REGISTER_DCT2DPREPROCESS_KERNEL_LAUNCHER(type) \
template void dct2dPreprocessCudaLauncher<type>( \
const type *x, \
type *y, \
const int M, \
const int N);
REGISTER_DCT2DPREPROCESS_KERNEL_LAUNCHER(float);
REGISTER_DCT2DPREPROCESS_KERNEL_LAUNCHER(double);
#define REGISTER_DCT2DPOSTPROCESS_KERNEL_LAUNCHER(type) \
template void dct2dPostprocessCudaLauncher<type>( \
const type *x, \
type *y, \
const int M, \
const int N, \
const type *__restrict__ expkM, \
const type *__restrict__ expkN);
REGISTER_DCT2DPOSTPROCESS_KERNEL_LAUNCHER(float);
REGISTER_DCT2DPOSTPROCESS_KERNEL_LAUNCHER(double);
//idct_idxst
#define REGISTER_IDCT_IDXSTPREPROCESS_KERNEL_LAUNCHER(type) \
template void idct_idxstPreprocessCudaLauncher<type>( \
const type *x, \
type *y, \
const int M, \
const int N, \
const type *__restrict__ expkM, \
const type *__restrict__ expkN);
REGISTER_IDCT_IDXSTPREPROCESS_KERNEL_LAUNCHER(float);
REGISTER_IDCT_IDXSTPREPROCESS_KERNEL_LAUNCHER(double);
#define REGISTER_IDCT_IDXSTPOSTPROCESS_KERNEL_LAUNCHER(type) \
template void idct_idxstPostprocessCudaLauncher<type>( \
const type *x, \
type *y, \
const int M, \
const int N);
REGISTER_IDCT_IDXSTPOSTPROCESS_KERNEL_LAUNCHER(float);
REGISTER_IDCT_IDXSTPOSTPROCESS_KERNEL_LAUNCHER(double);
//idxst_idct
#define REGISTER_IDXST_IDCTPREPROCESS_KERNEL_LAUNCHER(type) \
template void idxst_idctPreprocessCudaLauncher<type>( \
const type *x, \
type *y, \
const int M, \
const int N, \
const type *__restrict__ expkM, \
const type *__restrict__ expkN);
REGISTER_IDXST_IDCTPREPROCESS_KERNEL_LAUNCHER(float);
REGISTER_IDXST_IDCTPREPROCESS_KERNEL_LAUNCHER(double);
#define REGISTER_IDXST_IDCTPOSTPROCESS_KERNEL_LAUNCHER(type) \
template void idxst_idctPostprocessCudaLauncher<type>( \
const type *x, \
type *y, \
const int M, \
const int N);
REGISTER_IDXST_IDCTPOSTPROCESS_KERNEL_LAUNCHER(float);
REGISTER_IDXST_IDCTPOSTPROCESS_KERNEL_LAUNCHER(double);
//idct2_fft2
#define REGISTER_IDCT2_FFT2PREPROCESS_KERNEL_LAUNCHER(type) \
template void idct2_fft2PreprocessCudaLauncher<type>( \
const type *x, \
type *y, \
const int M, \
const int N, \
const type *__restrict__ expkM, \
const type *__restrict__ expkN);
REGISTER_IDCT2_FFT2PREPROCESS_KERNEL_LAUNCHER(float);
REGISTER_IDCT2_FFT2PREPROCESS_KERNEL_LAUNCHER(double);
#define REGISTER_IDCT2_FFT2POSTPROCESS_KERNEL_LAUNCHER(type) \
template void idct2_fft2PostprocessCudaLauncher<type>( \
const type *x, \
type *y, \
const int M, \
const int N);
REGISTER_IDCT2_FFT2POSTPROCESS_KERNEL_LAUNCHER(float);
REGISTER_IDCT2_FFT2POSTPROCESS_KERNEL_LAUNCHER(double);
DREAMPLACE_END_NAMESPACE
| ab8fa6d56acd6118d92cfee74d3177b70830202c.cu | /**
* @file dct2_fft2_cuda_kernel.cu
* @author Zixuan Jiang, Jiaqi Gu
* @date Apr 2019
* @brief Refernece: Byeong Lee, "A new algorithm to compute the discrete cosine Transform,"
* in IEEE Transactions on Acoustics, Speech, and Signal Processing, vol. 32, no. 6, pp. 1243-1245, December 1984.
* The preprocess and postprocess of 2d dct and 2d idct are discussed in the original paper.
* idct(idxst(x)) and idxst(idct(x)) are similar to the idct2d(x),
* except tiny modifications on preprocessing and postprocessing
*/
#include <math.h>
#include <float.h>
#include "cuda_runtime.h"
#include "utility/src/utils.cuh"
#define TPB (16)
DREAMPLACE_BEGIN_NAMESPACE
inline __device__ int INDEX(const int hid, const int wid, const int N)
{
return (hid * N + wid);
}
// dct2_fft2
template <typename T>
__global__ void dct2dPreprocess(const T *x, T *y, const int M, const int N, const int halfN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < M && wid < N)
{
int index;
int cond = (((hid & 1) == 0) << 1) | ((wid & 1) == 0);
switch (cond)
{
case 0:
index = INDEX(2 * M - (hid + 1), N - (wid + 1) / 2, halfN);
break;
case 1:
index = INDEX(2 * M - (hid + 1), wid / 2, halfN);
break;
case 2:
index = INDEX(hid, N - (wid + 1) / 2, halfN);
break;
case 3:
index = INDEX(hid, wid / 2, halfN);
break;
default:
break;
}
y[index] = x[INDEX(hid, wid, N)];
}
}
template <typename T>
void dct2dPreprocessCudaLauncher(const T *x, T *y, const int M, const int N)
{
dim3 gridSize((N + TPB - 1) / TPB, (M + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
dct2dPreprocess<T><<<gridSize, blockSize>>>(x, y, M, N, N / 2);
}
template <typename T, typename TComplex>
__global__ void __launch_bounds__(TPB * TPB, 8) dct2dPostprocess(const TComplex *V, T *y, const int M, const int N,
const int halfM, const int halfN, const T two_over_MN, const T four_over_MN,
const TComplex *__restrict__ expkM, const TComplex *__restrict__ expkN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < halfM && wid < halfN)
{
int cond = ((hid != 0) << 1) | (wid != 0);
switch (cond)
{
case 0:
{
y[0] = V[0].x * four_over_MN;
y[halfN] = RealPartOfMul(expkN[halfN], V[halfN]) * four_over_MN;
y[INDEX(halfM, 0, N)] = expkM[halfM].x * V[INDEX(halfM, 0, halfN + 1)].x * four_over_MN;
y[INDEX(halfM, halfN, N)] = expkM[halfM].x * RealPartOfMul(expkN[halfN], V[INDEX(halfM, halfN, halfN + 1)]) * four_over_MN;
break;
}
case 1:
{
ComplexType<T> tmp;
tmp = V[wid];
y[wid] = RealPartOfMul(expkN[wid], tmp) * four_over_MN;
y[N - wid] = -ImaginaryPartOfMul(expkN[wid], tmp) * four_over_MN;
tmp = V[INDEX(halfM, wid, halfN + 1)];
y[INDEX(halfM, wid, N)] = expkM[halfM].x * RealPartOfMul(expkN[wid], tmp) * four_over_MN;
y[INDEX(halfM, N - wid, N)] = -expkM[halfM].x * ImaginaryPartOfMul(expkN[wid], tmp) * four_over_MN;
break;
}
case 2:
{
ComplexType<T> tmp1, tmp2, tmp_up, tmp_down;
tmp1 = V[INDEX(hid, 0, halfN + 1)];
tmp2 = V[INDEX(M - hid, 0, halfN + 1)];
tmp_up.x = expkM[hid].x * (tmp1.x + tmp2.x) + expkM[hid].y * (tmp2.y - tmp1.y);
tmp_down.x = -expkM[hid].y * (tmp1.x + tmp2.x) + expkM[hid].x * (tmp2.y - tmp1.y);
y[INDEX(hid, 0, N)] = tmp_up.x * two_over_MN;
y[INDEX(M - hid, 0, N)] = tmp_down.x * two_over_MN;
tmp1 = complexAdd(V[INDEX(hid, halfN, halfN + 1)], V[INDEX(M - hid, halfN, halfN + 1)]);
tmp2 = complexSubtract(V[INDEX(hid, halfN, halfN + 1)], V[INDEX(M - hid, halfN, halfN + 1)]);
tmp_up.x = expkM[hid].x * tmp1.x - expkM[hid].y * tmp2.y;
tmp_up.y = expkM[hid].x * tmp1.y + expkM[hid].y * tmp2.x;
tmp_down.x = -expkM[hid].y * tmp1.x - expkM[hid].x * tmp2.y;
tmp_down.y = -expkM[hid].y * tmp1.y + expkM[hid].x * tmp2.x;
y[INDEX(hid, halfN, N)] = RealPartOfMul(expkN[halfN], tmp_up) * two_over_MN;
y[INDEX(M - hid, halfN, N)] = RealPartOfMul(expkN[halfN], tmp_down) * two_over_MN;
break;
}
case 3:
{
ComplexType<T> tmp1, tmp2, tmp_up, tmp_down;
tmp1 = complexAdd(V[INDEX(hid, wid, halfN + 1)], V[INDEX(M - hid, wid, halfN + 1)]);
tmp2 = complexSubtract(V[INDEX(hid, wid, halfN + 1)], V[INDEX(M - hid, wid, halfN + 1)]);
tmp_up.x = expkM[hid].x * tmp1.x - expkM[hid].y * tmp2.y;
tmp_up.y = expkM[hid].x * tmp1.y + expkM[hid].y * tmp2.x;
tmp_down.x = -expkM[hid].y * tmp1.x - expkM[hid].x * tmp2.y;
tmp_down.y = -expkM[hid].y * tmp1.y + expkM[hid].x * tmp2.x;
y[INDEX(hid, wid, N)] = RealPartOfMul(expkN[wid], tmp_up) * two_over_MN;
y[INDEX(M - hid, wid, N)] = RealPartOfMul(expkN[wid], tmp_down) * two_over_MN;
y[INDEX(hid, N - wid, N)] = -ImaginaryPartOfMul(expkN[wid], tmp_up) * two_over_MN;
y[INDEX(M - hid, N - wid, N)] = -ImaginaryPartOfMul(expkN[wid], tmp_down) * two_over_MN;
break;
}
default:
assert(0);
break;
}
}
}
template <typename T>
void dct2dPostprocessCudaLauncher(const T *x, T *y, const int M, const int N,
const T *__restrict__ expkM, const T *__restrict__ expkN)
{
dim3 gridSize((N / 2 + TPB - 1) / TPB, (M / 2 + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
dct2dPostprocess<T, ComplexType<T>><<<gridSize, blockSize>>>((ComplexType<T> *)x, y, M, N, M / 2, N / 2, (T)(2. / (M * N)), (T)(4. / (M * N)), (ComplexType<T> *)expkM, (ComplexType<T> *)expkN);
}
// idct2_fft2
template <typename T, typename TComplex>
__global__ void __launch_bounds__(TPB * TPB, 8) idct2_fft2Preprocess(const T *input, TComplex *output, const int M, const int N,
const int halfM, const int halfN,
const TComplex *__restrict__ expkM, const TComplex *__restrict__ expkN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < halfM && wid < halfN)
{
int cond = ((hid != 0) << 1) | (wid != 0);
switch (cond)
{
case 0:
{
T tmp1;
TComplex tmp_up;
output[0].x = input[0];
output[0].y = 0;
tmp1 = input[halfN];
tmp_up.x = tmp1;
tmp_up.y = tmp1;
output[halfN] = complexConj(complexMul(expkN[halfN], tmp_up));
tmp1 = input[INDEX(halfM, 0, N)];
tmp_up.x = tmp1;
tmp_up.y = tmp1;
output[INDEX(halfM, 0, halfN + 1)] = complexConj(complexMul(expkM[halfM], tmp_up));
tmp1 = input[INDEX(halfM, halfN, N)];
tmp_up.x = 0;
tmp_up.y = 2 * tmp1;
output[INDEX(halfM, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[halfN]), tmp_up));
break;
}
case 1:
{
TComplex tmp_up;
tmp_up.x = input[wid];
tmp_up.y = input[N - wid];
output[wid] = complexConj(complexMul(expkN[wid], tmp_up));
T tmp1 = input[INDEX(halfM, wid, N)];
T tmp2 = input[INDEX(halfM, N - wid, N)];
tmp_up.x = tmp1 - tmp2;
tmp_up.y = tmp1 + tmp2;
output[INDEX(halfM, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[wid]), tmp_up));
break;
}
case 2:
{
T tmp1, tmp3;
TComplex tmp_up, tmp_down;
tmp1 = input[INDEX(hid, 0, N)];
tmp3 = input[INDEX(M - hid, 0, N)];
tmp_up.x = tmp1;
tmp_up.y = tmp3;
tmp_down.x = tmp3;
tmp_down.y = tmp1;
output[INDEX(hid, 0, halfN + 1)] = complexConj(complexMul(expkM[hid], tmp_up));
output[INDEX(M - hid, 0, halfN + 1)] = complexConj(complexMul(expkM[M - hid], tmp_down));
tmp1 = input[INDEX(hid, halfN, N)];
tmp3 = input[INDEX(M - hid, halfN, N)];
tmp_up.x = tmp1 - tmp3;
tmp_up.y = tmp3 + tmp1;
tmp_down.x = tmp3 - tmp1;
tmp_down.y = tmp1 + tmp3;
output[INDEX(hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[halfN]), tmp_up));
output[INDEX(M - hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[halfN]), tmp_down));
break;
}
case 3:
{
T tmp1 = input[INDEX(hid, wid, N)];
T tmp2 = input[INDEX(hid, N - wid, N)];
T tmp3 = input[INDEX(M - hid, wid, N)];
T tmp4 = input[INDEX(M - hid, N - wid, N)];
TComplex tmp_up, tmp_down;
tmp_up.x = tmp1 - tmp4;
tmp_up.y = tmp3 + tmp2;
tmp_down.x = tmp3 - tmp2;
tmp_down.y = tmp1 + tmp4;
output[INDEX(hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[wid]), tmp_up));
output[INDEX(M - hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[wid]), tmp_down));
break;
}
default:
assert(0);
break;
}
}
}
template <typename T>
void idct2_fft2PreprocessCudaLauncher(
const T *x,
T *y,
const int M,
const int N,
const T *__restrict__ expkM,
const T *__restrict__ expkN)
{
dim3 gridSize((N / 2 + TPB - 1) / TPB, (M / 2 + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
idct2_fft2Preprocess<T, ComplexType<T>><<<gridSize, blockSize>>>(x, (ComplexType<T> *)y, M, N, M / 2, N / 2, (ComplexType<T> *)expkM, (ComplexType<T> *)expkN);
}
template <typename T>
__global__ void idct2_fft2Postprocess(const T *x, T *y, const int M, const int N, const int halfN, const int MN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < M && wid < N)
{
int cond = ((hid < M / 2) << 1) | (wid < N / 2);
int index;
switch (cond)
{
case 0:
index = INDEX(((M - hid) << 1) - 1, ((N - wid) << 1) - 1, N);
break;
case 1:
index = INDEX(((M - hid) << 1) - 1, wid << 1, N);
break;
case 2:
index = INDEX(hid << 1, ((N - wid) << 1) - 1, N);
break;
case 3:
index = INDEX(hid << 1, wid << 1, N);
break;
default:
assert(0);
break;
}
y[index] = x[INDEX(hid, wid, N)] * MN;
}
}
template <typename T>
void idct2_fft2PostprocessCudaLauncher(const T *x, T *y, const int M, const int N)
{
dim3 gridSize((N + TPB - 1) / TPB, (M + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
idct2_fft2Postprocess<T><<<gridSize, blockSize>>>(x, y, M, N, N / 2, M * N);
}
// idct_idxst
// Adpated from idct2d_preprocess(). The only change is the reordered input
// if (wid != 0)
// new_input[hid][wid] = input[hid][N - wid];
// else
// new_input[hid][0] = 0
template <typename T, typename TComplex>
__global__ void __launch_bounds__(TPB * TPB, 8) idct_idxstPreprocess(const T *input, TComplex *output, const int M, const int N,
const int halfM, const int halfN,
const TComplex *__restrict__ expkM, const TComplex *__restrict__ expkN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < halfM && wid < halfN)
{
int cond = ((hid != 0) << 1) | (wid != 0);
switch (cond)
{
case 0:
{
T tmp1;
TComplex tmp_up;
output[0].x = 0;
output[0].y = 0;
tmp1 = input[halfN];
tmp_up.x = tmp1;
tmp_up.y = tmp1;
output[halfN] = complexConj(complexMul(expkN[halfN], tmp_up));
output[INDEX(halfM, 0, halfN + 1)].x = 0;
output[INDEX(halfM, 0, halfN + 1)].y = 0;
tmp1 = input[INDEX(halfM, halfN, N)];
tmp_up.x = 0;
tmp_up.y = 2 * tmp1;
output[INDEX(halfM, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[halfN]), tmp_up));
break;
}
case 1:
{
TComplex tmp_up;
tmp_up.x = input[N - wid];
tmp_up.y = input[wid];
output[wid] = complexConj(complexMul(expkN[wid], tmp_up));
T tmp1 = input[INDEX(halfM, N - wid, N)];
T tmp2 = input[INDEX(halfM, wid, N)];
tmp_up.x = tmp1 - tmp2;
tmp_up.y = tmp1 + tmp2;
output[INDEX(halfM, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[wid]), tmp_up));
break;
}
case 2:
{
T tmp1, tmp3;
TComplex tmp_up, tmp_down;
output[INDEX(hid, 0, halfN + 1)].x = 0;
output[INDEX(hid, 0, halfN + 1)].y = 0;
output[INDEX(M - hid, 0, halfN + 1)].x = 0;
output[INDEX(M - hid, 0, halfN + 1)].y = 0;
tmp1 = input[INDEX(hid, halfN, N)];
tmp3 = input[INDEX(M - hid, halfN, N)];
tmp_up.x = tmp1 - tmp3;
tmp_up.y = tmp3 + tmp1;
tmp_down.x = tmp3 - tmp1;
tmp_down.y = tmp1 + tmp3;
output[INDEX(hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[halfN]), tmp_up));
output[INDEX(M - hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[halfN]), tmp_down));
break;
}
case 3:
{
T tmp1 = input[INDEX(hid, N - wid, N)];
T tmp2 = input[INDEX(hid, wid, N)];
T tmp3 = input[INDEX(M - hid, N - wid, N)];
T tmp4 = input[INDEX(M - hid, wid, N)];
TComplex tmp_up, tmp_down;
tmp_up.x = tmp1 - tmp4;
tmp_up.y = tmp3 + tmp2;
tmp_down.x = tmp3 - tmp2;
tmp_down.y = tmp1 + tmp4;
output[INDEX(hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[wid]), tmp_up));
output[INDEX(M - hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[wid]), tmp_down));
break;
}
default:
assert(0);
break;
}
}
}
template <typename T>
void idct_idxstPreprocessCudaLauncher(const T *x, T *y, const int M, const int N,
const T *__restrict__ expkM, const T *__restrict__ expkN)
{
dim3 gridSize((N / 2 + TPB - 1) / TPB, (M / 2 + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
idct_idxstPreprocess<T, ComplexType<T>><<<gridSize, blockSize>>>(x, (ComplexType<T> *)y, M, N, M / 2, N / 2, (ComplexType<T> *)expkM, (ComplexType<T> *)expkN);
}
// Adpated from idct2d_postprocess() with changes on sign and scale
// if (wid % 2 == 1)
// new_output[hid][wid] = -output[hid][wid];
// else
// new_output[hid][wid] = output[hid][wid];
template <typename T>
__global__ void idct_idxstPostprocess(const T *x, T *y, const int M, const int N, const int halfN, const int MN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < M && wid < N)
{
int cond = ((hid < M / 2) << 1) | (wid < N / 2);
int index;
switch (cond)
{
case 0:
index = INDEX(((M - hid) << 1) - 1, ((N - wid) << 1) - 1, N);
y[index] = -x[INDEX(hid, wid, N)] * MN;
break;
case 1:
index = INDEX(((M - hid) << 1) - 1, wid << 1, N);
y[index] = x[INDEX(hid, wid, N)] * MN;
break;
case 2:
index = INDEX(hid << 1, ((N - wid) << 1) - 1, N);
y[index] = -x[INDEX(hid, wid, N)] * MN;
break;
case 3:
index = INDEX(hid << 1, wid << 1, N);
y[index] = x[INDEX(hid, wid, N)] * MN;
break;
default:
assert(0);
break;
}
}
}
template <typename T>
void idct_idxstPostprocessCudaLauncher(const T *x, T *y, const int M, const int N)
{
dim3 gridSize((N + TPB - 1) / TPB, (M + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
idct_idxstPostprocess<T><<<gridSize, blockSize>>>(x, y, M, N, N / 2, M * N);
}
// idxst_idct
// Adpated from idct2d_preprocess(). The only change is the reordered input
// if (hid != 0)
// new_input[hid][wid] = input[M - hid][wid];
// else
// new_input[0][wid] = 0
template <typename T, typename TComplex>
__global__ void __launch_bounds__(TPB * TPB, 8) idxst_idctPreprocess(const T *input, TComplex *output, const int M, const int N,
const int halfM, const int halfN,
const TComplex *__restrict__ expkM, const TComplex *__restrict__ expkN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < halfM && wid < halfN)
{
int cond = ((hid != 0) << 1) | (wid != 0);
switch (cond)
{
case 0:
{
T tmp1;
TComplex tmp_up;
output[0].x = 0;
output[0].y = 0;
output[halfN].x = 0;
output[halfN].y = 0;
tmp1 = input[INDEX(halfM, 0, N)];
tmp_up.x = tmp1;
tmp_up.y = tmp1;
output[INDEX(halfM, 0, halfN + 1)] = complexConj(complexMul(expkM[halfM], tmp_up));
tmp1 = input[INDEX(halfM, halfN, N)];
tmp_up.x = 0;
tmp_up.y = 2 * tmp1;
output[INDEX(halfM, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[halfN]), tmp_up));
break;
}
case 1:
{
output[wid].x = 0;
output[wid].y = 0;
TComplex tmp_up;
T tmp1 = input[INDEX(halfM, wid, N)];
T tmp2 = input[INDEX(halfM, N - wid, N)];
tmp_up.x = tmp1 - tmp2;
tmp_up.y = tmp1 + tmp2;
output[INDEX(halfM, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[wid]), tmp_up));
break;
}
case 2:
{
T tmp1, tmp3;
TComplex tmp_up, tmp_down;
tmp1 = input[INDEX(M - hid, 0, N)];
tmp3 = input[INDEX(hid, 0, N)];
tmp_up.x = tmp1;
tmp_up.y = tmp3;
tmp_down.x = tmp3;
tmp_down.y = tmp1;
output[INDEX(hid, 0, halfN + 1)] = complexConj(complexMul(expkM[hid], tmp_up));
output[INDEX(M - hid, 0, halfN + 1)] = complexConj(complexMul(expkM[M - hid], tmp_down));
tmp1 = input[INDEX(M - hid, halfN, N)];
tmp3 = input[INDEX(hid, halfN, N)];
tmp_up.x = tmp1 - tmp3;
tmp_up.y = tmp3 + tmp1;
tmp_down.x = tmp3 - tmp1;
tmp_down.y = tmp1 + tmp3;
output[INDEX(hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[halfN]), tmp_up));
output[INDEX(M - hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[halfN]), tmp_down));
break;
}
case 3:
{
T tmp1 = input[INDEX(M - hid, wid, N)];
T tmp2 = input[INDEX(M - hid, N - wid, N)];
T tmp3 = input[INDEX(hid, wid, N)];
T tmp4 = input[INDEX(hid, N - wid, N)];
TComplex tmp_up, tmp_down;
tmp_up.x = tmp1 - tmp4;
tmp_up.y = tmp3 + tmp2;
tmp_down.x = tmp3 - tmp2;
tmp_down.y = tmp1 + tmp4;
output[INDEX(hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[wid]), tmp_up));
output[INDEX(M - hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[wid]), tmp_down));
break;
}
default:
assert(0);
break;
}
}
}
template <typename T>
void idxst_idctPreprocessCudaLauncher(
const T *x,
T *y,
const int M,
const int N,
const T *__restrict__ expkM,
const T *__restrict__ expkN)
{
dim3 gridSize((N / 2 + TPB - 1) / TPB, (M / 2 + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
idxst_idctPreprocess<T, ComplexType<T>><<<gridSize, blockSize>>>(x, (ComplexType<T> *)y, M, N, M / 2, N / 2, (ComplexType<T> *)expkM, (ComplexType<T> *)expkN);
}
// Adpated from idct2d_postprocess() with changes on sign and scale
// if (hid % 2 == 1)
// new_output[hid][wid] = -output[hid][wid];
// else
// new_output[hid][wid] = output[hid][wid];
template <typename T>
__global__ void idxst_idctPostprocess(const T *x, T *y, const int M, const int N, const int halfN, const int MN)
{
const int wid = blockDim.x * blockIdx.x + threadIdx.x;
const int hid = blockDim.y * blockIdx.y + threadIdx.y;
if (hid < M && wid < N)
{
int cond = ((hid < M / 2) << 1) | (wid < N / 2);
int index;
switch (cond)
{
case 0:
index = INDEX(((M - hid) << 1) - 1, ((N - wid) << 1) - 1, N);
y[index] = -x[INDEX(hid, wid, N)] * MN;
break;
case 1:
index = INDEX(((M - hid) << 1) - 1, wid << 1, N);
y[index] = -x[INDEX(hid, wid, N)] * MN;
break;
case 2:
index = INDEX(hid << 1, ((N - wid) << 1) - 1, N);
y[index] = x[INDEX(hid, wid, N)] * MN;
break;
case 3:
index = INDEX(hid << 1, wid << 1, N);
y[index] = x[INDEX(hid, wid, N)] * MN;
break;
default:
assert(0);
break;
}
}
}
template <typename T>
void idxst_idctPostprocessCudaLauncher(const T *x, T *y, const int M, const int N)
{
dim3 gridSize((N + TPB - 1) / TPB, (M + TPB - 1) / TPB, 1);
dim3 blockSize(TPB, TPB, 1);
idxst_idctPostprocess<T><<<gridSize, blockSize>>>(x, y, M, N, N / 2, M * N);
}
// dct2_fft2
#define REGISTER_DCT2DPREPROCESS_KERNEL_LAUNCHER(type) \
template void dct2dPreprocessCudaLauncher<type>( \
const type *x, \
type *y, \
const int M, \
const int N);
REGISTER_DCT2DPREPROCESS_KERNEL_LAUNCHER(float);
REGISTER_DCT2DPREPROCESS_KERNEL_LAUNCHER(double);
#define REGISTER_DCT2DPOSTPROCESS_KERNEL_LAUNCHER(type) \
template void dct2dPostprocessCudaLauncher<type>( \
const type *x, \
type *y, \
const int M, \
const int N, \
const type *__restrict__ expkM, \
const type *__restrict__ expkN);
REGISTER_DCT2DPOSTPROCESS_KERNEL_LAUNCHER(float);
REGISTER_DCT2DPOSTPROCESS_KERNEL_LAUNCHER(double);
//idct_idxst
#define REGISTER_IDCT_IDXSTPREPROCESS_KERNEL_LAUNCHER(type) \
template void idct_idxstPreprocessCudaLauncher<type>( \
const type *x, \
type *y, \
const int M, \
const int N, \
const type *__restrict__ expkM, \
const type *__restrict__ expkN);
REGISTER_IDCT_IDXSTPREPROCESS_KERNEL_LAUNCHER(float);
REGISTER_IDCT_IDXSTPREPROCESS_KERNEL_LAUNCHER(double);
#define REGISTER_IDCT_IDXSTPOSTPROCESS_KERNEL_LAUNCHER(type) \
template void idct_idxstPostprocessCudaLauncher<type>( \
const type *x, \
type *y, \
const int M, \
const int N);
REGISTER_IDCT_IDXSTPOSTPROCESS_KERNEL_LAUNCHER(float);
REGISTER_IDCT_IDXSTPOSTPROCESS_KERNEL_LAUNCHER(double);
//idxst_idct
#define REGISTER_IDXST_IDCTPREPROCESS_KERNEL_LAUNCHER(type) \
template void idxst_idctPreprocessCudaLauncher<type>( \
const type *x, \
type *y, \
const int M, \
const int N, \
const type *__restrict__ expkM, \
const type *__restrict__ expkN);
REGISTER_IDXST_IDCTPREPROCESS_KERNEL_LAUNCHER(float);
REGISTER_IDXST_IDCTPREPROCESS_KERNEL_LAUNCHER(double);
#define REGISTER_IDXST_IDCTPOSTPROCESS_KERNEL_LAUNCHER(type) \
template void idxst_idctPostprocessCudaLauncher<type>( \
const type *x, \
type *y, \
const int M, \
const int N);
REGISTER_IDXST_IDCTPOSTPROCESS_KERNEL_LAUNCHER(float);
REGISTER_IDXST_IDCTPOSTPROCESS_KERNEL_LAUNCHER(double);
//idct2_fft2
#define REGISTER_IDCT2_FFT2PREPROCESS_KERNEL_LAUNCHER(type) \
template void idct2_fft2PreprocessCudaLauncher<type>( \
const type *x, \
type *y, \
const int M, \
const int N, \
const type *__restrict__ expkM, \
const type *__restrict__ expkN);
REGISTER_IDCT2_FFT2PREPROCESS_KERNEL_LAUNCHER(float);
REGISTER_IDCT2_FFT2PREPROCESS_KERNEL_LAUNCHER(double);
#define REGISTER_IDCT2_FFT2POSTPROCESS_KERNEL_LAUNCHER(type) \
template void idct2_fft2PostprocessCudaLauncher<type>( \
const type *x, \
type *y, \
const int M, \
const int N);
REGISTER_IDCT2_FFT2POSTPROCESS_KERNEL_LAUNCHER(float);
REGISTER_IDCT2_FFT2POSTPROCESS_KERNEL_LAUNCHER(double);
DREAMPLACE_END_NAMESPACE
|
30f311cf0630aa50e9862cf308b6815994df5da1.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
int run_tests(void) {
int cuda_devices = 0;
std::cout << "CUDA version: " << CUDART_VERSION << "\n";
hipGetDeviceCount(&cuda_devices);
if(cuda_devices == 0) {
std::cout << "No Cuda hardware found. Exiting.\n";
return 0;
}
std::cout << "This computer has " << cuda_devices << " Cuda device(s).\n";
hipDeviceProp_t props;
hipGetDeviceProperties(&props, 0);
std::cout << "Properties of device 0.\n\n";
std::cout << " Name: " << props.name << "\n";
std::cout << " Global memory: " << props.totalGlobalMem << "\n";
std::cout << " Shared memory: " << props.sharedMemPerBlock << "\n";
std::cout << " Constant memory: " << props.totalConstMem << "\n";
std::cout << " Block registers: " << props.regsPerBlock << "\n";
std::cout << " Warp size: " << props.warpSize << "\n";
std::cout << " Threads per block: " << props.maxThreadsPerBlock << "\n";
std::cout << " Max block dimensions: [ " << props.maxThreadsDim[0] << ", " << props.maxThreadsDim[1] << ", " << props.maxThreadsDim[2] << " ]" << "\n";
std::cout << " Max grid dimensions: [ " << props.maxGridSize[0] << ", " << props.maxGridSize[1] << ", " << props.maxGridSize[2] << " ]" << "\n";
std::cout << "\n";
return 0;
}
| 30f311cf0630aa50e9862cf308b6815994df5da1.cu | #include <iostream>
int run_tests(void) {
int cuda_devices = 0;
std::cout << "CUDA version: " << CUDART_VERSION << "\n";
cudaGetDeviceCount(&cuda_devices);
if(cuda_devices == 0) {
std::cout << "No Cuda hardware found. Exiting.\n";
return 0;
}
std::cout << "This computer has " << cuda_devices << " Cuda device(s).\n";
cudaDeviceProp props;
cudaGetDeviceProperties(&props, 0);
std::cout << "Properties of device 0.\n\n";
std::cout << " Name: " << props.name << "\n";
std::cout << " Global memory: " << props.totalGlobalMem << "\n";
std::cout << " Shared memory: " << props.sharedMemPerBlock << "\n";
std::cout << " Constant memory: " << props.totalConstMem << "\n";
std::cout << " Block registers: " << props.regsPerBlock << "\n";
std::cout << " Warp size: " << props.warpSize << "\n";
std::cout << " Threads per block: " << props.maxThreadsPerBlock << "\n";
std::cout << " Max block dimensions: [ " << props.maxThreadsDim[0] << ", " << props.maxThreadsDim[1] << ", " << props.maxThreadsDim[2] << " ]" << "\n";
std::cout << " Max grid dimensions: [ " << props.maxGridSize[0] << ", " << props.maxGridSize[1] << ", " << props.maxGridSize[2] << " ]" << "\n";
std::cout << "\n";
return 0;
}
|
06cc98d80b9f9ab9ab25ce7ec2f491108dc2ccfd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unittest/unittest.h>
#include <thrust/set_operations.h>
#include <thrust/execution_policy.h>
template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4, typename Iterator5, typename Iterator6, typename Iterator7>
__global__
void set_union_by_key_kernel(ExecutionPolicy exec,
Iterator1 keys_first1, Iterator1 keys_last1,
Iterator2 keys_first2, Iterator2 keys_last2,
Iterator3 values_first1,
Iterator4 values_first2,
Iterator5 keys_result,
Iterator6 values_result,
Iterator7 result)
{
*result = thrust::set_union_by_key(exec, keys_first1, keys_last1, keys_first2, keys_last2, values_first1, values_first2, keys_result, values_result);
}
template<typename ExecutionPolicy>
void TestSetUnionByKeyDevice(ExecutionPolicy exec)
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::iterator Iterator;
Vector a_key(3), b_key(4);
Vector a_val(3), b_val(4);
a_key[0] = 0; a_key[1] = 2; a_key[2] = 4;
a_val[0] = 0; a_val[1] = 0; a_val[2] = 0;
b_key[0] = 0; b_key[1] = 3; b_key[2] = 3; b_key[3] = 4;
b_val[0] = 1; b_val[1] = 1; b_val[2] = 1; b_val[3] = 1;
Vector ref_key(5), ref_val(5);
ref_key[0] = 0; ref_key[1] = 2; ref_key[2] = 3; ref_key[3] = 3; ref_key[4] = 4;
ref_val[0] = 0; ref_val[1] = 0; ref_val[2] = 1; ref_val[3] = 1; ref_val[4] = 0;
Vector result_key(5), result_val(5);
thrust::device_vector<thrust::pair<Iterator,Iterator> > end_vec(1);
hipLaunchKernelGGL(( set_union_by_key_kernel), dim3(1),dim3(1), 0, 0, exec,
a_key.begin(), a_key.end(),
b_key.begin(), b_key.end(),
a_val.begin(),
b_val.begin(),
result_key.begin(),
result_val.begin(),
end_vec.begin());
thrust::pair<Iterator,Iterator> end = end_vec[0];
ASSERT_EQUAL_QUIET(result_key.end(), end.first);
ASSERT_EQUAL_QUIET(result_val.end(), end.second);
ASSERT_EQUAL(ref_key, result_key);
ASSERT_EQUAL(ref_val, result_val);
}
void TestSetUnionByKeyDeviceSeq()
{
TestSetUnionByKeyDevice(thrust::seq);
}
DECLARE_UNITTEST(TestSetUnionByKeyDeviceSeq);
void TestSetUnionByKeyDeviceDevice()
{
TestSetUnionByKeyDevice(thrust::device);
}
DECLARE_UNITTEST(TestSetUnionByKeyDeviceDevice);
void TestSetUnionByKeyCudaStreams()
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::iterator Iterator;
Vector a_key(3), b_key(4);
Vector a_val(3), b_val(4);
a_key[0] = 0; a_key[1] = 2; a_key[2] = 4;
a_val[0] = 0; a_val[1] = 0; a_val[2] = 0;
b_key[0] = 0; b_key[1] = 3; b_key[2] = 3; b_key[3] = 4;
b_val[0] = 1; b_val[1] = 1; b_val[2] = 1; b_val[3] = 1;
Vector ref_key(5), ref_val(5);
ref_key[0] = 0; ref_key[1] = 2; ref_key[2] = 3; ref_key[3] = 3; ref_key[4] = 4;
ref_val[0] = 0; ref_val[1] = 0; ref_val[2] = 1; ref_val[3] = 1; ref_val[4] = 0;
Vector result_key(5), result_val(5);
hipStream_t s;
hipStreamCreate(&s);
thrust::pair<Iterator,Iterator> end =
thrust::set_union_by_key(thrust::hip::par.on(s),
a_key.begin(), a_key.end(),
b_key.begin(), b_key.end(),
a_val.begin(),
b_val.begin(),
result_key.begin(),
result_val.begin());
hipStreamSynchronize(s);
ASSERT_EQUAL_QUIET(result_key.end(), end.first);
ASSERT_EQUAL_QUIET(result_val.end(), end.second);
ASSERT_EQUAL(ref_key, result_key);
ASSERT_EQUAL(ref_val, result_val);
hipStreamDestroy(s);
}
DECLARE_UNITTEST(TestSetUnionByKeyCudaStreams);
| 06cc98d80b9f9ab9ab25ce7ec2f491108dc2ccfd.cu | #include <unittest/unittest.h>
#include <thrust/set_operations.h>
#include <thrust/execution_policy.h>
template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4, typename Iterator5, typename Iterator6, typename Iterator7>
__global__
void set_union_by_key_kernel(ExecutionPolicy exec,
Iterator1 keys_first1, Iterator1 keys_last1,
Iterator2 keys_first2, Iterator2 keys_last2,
Iterator3 values_first1,
Iterator4 values_first2,
Iterator5 keys_result,
Iterator6 values_result,
Iterator7 result)
{
*result = thrust::set_union_by_key(exec, keys_first1, keys_last1, keys_first2, keys_last2, values_first1, values_first2, keys_result, values_result);
}
template<typename ExecutionPolicy>
void TestSetUnionByKeyDevice(ExecutionPolicy exec)
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::iterator Iterator;
Vector a_key(3), b_key(4);
Vector a_val(3), b_val(4);
a_key[0] = 0; a_key[1] = 2; a_key[2] = 4;
a_val[0] = 0; a_val[1] = 0; a_val[2] = 0;
b_key[0] = 0; b_key[1] = 3; b_key[2] = 3; b_key[3] = 4;
b_val[0] = 1; b_val[1] = 1; b_val[2] = 1; b_val[3] = 1;
Vector ref_key(5), ref_val(5);
ref_key[0] = 0; ref_key[1] = 2; ref_key[2] = 3; ref_key[3] = 3; ref_key[4] = 4;
ref_val[0] = 0; ref_val[1] = 0; ref_val[2] = 1; ref_val[3] = 1; ref_val[4] = 0;
Vector result_key(5), result_val(5);
thrust::device_vector<thrust::pair<Iterator,Iterator> > end_vec(1);
set_union_by_key_kernel<<<1,1>>>(exec,
a_key.begin(), a_key.end(),
b_key.begin(), b_key.end(),
a_val.begin(),
b_val.begin(),
result_key.begin(),
result_val.begin(),
end_vec.begin());
thrust::pair<Iterator,Iterator> end = end_vec[0];
ASSERT_EQUAL_QUIET(result_key.end(), end.first);
ASSERT_EQUAL_QUIET(result_val.end(), end.second);
ASSERT_EQUAL(ref_key, result_key);
ASSERT_EQUAL(ref_val, result_val);
}
void TestSetUnionByKeyDeviceSeq()
{
TestSetUnionByKeyDevice(thrust::seq);
}
DECLARE_UNITTEST(TestSetUnionByKeyDeviceSeq);
void TestSetUnionByKeyDeviceDevice()
{
TestSetUnionByKeyDevice(thrust::device);
}
DECLARE_UNITTEST(TestSetUnionByKeyDeviceDevice);
void TestSetUnionByKeyCudaStreams()
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::iterator Iterator;
Vector a_key(3), b_key(4);
Vector a_val(3), b_val(4);
a_key[0] = 0; a_key[1] = 2; a_key[2] = 4;
a_val[0] = 0; a_val[1] = 0; a_val[2] = 0;
b_key[0] = 0; b_key[1] = 3; b_key[2] = 3; b_key[3] = 4;
b_val[0] = 1; b_val[1] = 1; b_val[2] = 1; b_val[3] = 1;
Vector ref_key(5), ref_val(5);
ref_key[0] = 0; ref_key[1] = 2; ref_key[2] = 3; ref_key[3] = 3; ref_key[4] = 4;
ref_val[0] = 0; ref_val[1] = 0; ref_val[2] = 1; ref_val[3] = 1; ref_val[4] = 0;
Vector result_key(5), result_val(5);
cudaStream_t s;
cudaStreamCreate(&s);
thrust::pair<Iterator,Iterator> end =
thrust::set_union_by_key(thrust::cuda::par.on(s),
a_key.begin(), a_key.end(),
b_key.begin(), b_key.end(),
a_val.begin(),
b_val.begin(),
result_key.begin(),
result_val.begin());
cudaStreamSynchronize(s);
ASSERT_EQUAL_QUIET(result_key.end(), end.first);
ASSERT_EQUAL_QUIET(result_val.end(), end.second);
ASSERT_EQUAL(ref_key, result_key);
ASSERT_EQUAL(ref_val, result_val);
cudaStreamDestroy(s);
}
DECLARE_UNITTEST(TestSetUnionByKeyCudaStreams);
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.