diff --git a/.gitattributes b/.gitattributes index f53286e5f141fabb59c097eee625d84bd739893f..fe68a2c758ba4d2ee020cffcb9592a5aa202c244 100644 --- a/.gitattributes +++ b/.gitattributes @@ -179,3 +179,4 @@ parrot/lib/python3.10/site-packages/xxhash/_xxhash.cpython-310-x86_64-linux-gnu. parrot/lib/python3.10/site-packages/mpmath/__pycache__/function_docs.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text parrot/lib/python3.10/site-packages/torchvision/image.so filter=lfs diff=lfs merge=lfs -text parrot/lib/python3.10/site-packages/pillow.libs/libpng16-58efbb84.so.16.43.0 filter=lfs diff=lfs merge=lfs -text +parrot/lib/libsqlite3.so.0.8.6 filter=lfs diff=lfs merge=lfs -text diff --git a/parrot/lib/libsqlite3.so.0.8.6 b/parrot/lib/libsqlite3.so.0.8.6 new file mode 100644 index 0000000000000000000000000000000000000000..531fb86e0309a27d33fb4bc03e4442023e5cd590 --- /dev/null +++ b/parrot/lib/libsqlite3.so.0.8.6 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71932eb5bf89092fbd2c900601fc9f24aa184d65038aaec2445fd11b1d923327 +size 1543808 diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/multi_tensor_apply.cuh b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/multi_tensor_apply.cuh new file mode 100644 index 0000000000000000000000000000000000000000..12f41cb49c6bf505db48f1d21e312578f19da836 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/multi_tensor_apply.cuh @@ -0,0 +1,132 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +Copyright NVIDIA/apex +This file is adapted from fused adam in NVIDIA/apex, commit a109f85 +*/ + +#include +#include +#include +#include +#include +#include "compat.h" + +#include + +// #include + +// This header is the one-stop shop for all your multi-tensor apply needs. + +// TODO: Kernel arg size limit may be <4KB for some other cards (ie Jetson) +constexpr int depth_to_max_tensors[5] = {110, 64, 48, 36, 30}; +constexpr int depth_to_max_blocks[5] = {320, 320, 320, 320, 320}; + +template +struct TensorListMetadata { + void* addresses[n][depth_to_max_tensors[n - 1]]; + int sizes[depth_to_max_tensors[n - 1]]; + unsigned char block_to_tensor[depth_to_max_blocks[n - 1]]; + int block_to_chunk[depth_to_max_blocks[n - 1]]; // I fear this needs to be a full int. + int start_tensor_this_launch; +}; + +template +__global__ void multi_tensor_apply_kernel(int chunk_size, + volatile int* noop_flag, + T tl, + U callable, + ArgTypes... args) +{ + // Hand the chunk information to the user-supplied functor to process however it likes. + callable(chunk_size, noop_flag, tl, args...); +} + +template +void multi_tensor_apply(int block_size, + int chunk_size, + const at::Tensor& noop_flag, + const std::vector>& tensor_lists, + T callable, + ArgTypes... args) +{ + TORCH_CHECK(tensor_lists.size() == depth, "tensor_lists.size() != depth"); + int len0 = tensor_lists[0].size(); + TORCH_CHECK(len0 > 0, "tensor_lists[0].size() is not > 0"); + auto ref_device = tensor_lists[0][0].device(); + TORCH_CHECK(ref_device.type() == at::kCUDA, "expected input to be on cuda"); + for (int l = 0; l < tensor_lists.size(); l++) // No range-based for because I need indices + { + TORCH_CHECK(tensor_lists[l].size() == len0, "Size mismatch among tensor lists"); + for (int t = 0; t < tensor_lists[l].size(); t++) { + // TODO: Print which tensor fails. + bool contiguous_memory = tensor_lists[l][t].is_contiguous(); +#ifdef VERSION_GE_1_5 + contiguous_memory = (contiguous_memory || + tensor_lists[l][t].is_contiguous(at::MemoryFormat::ChannelsLast)); +#endif + TORCH_CHECK(contiguous_memory, "A tensor was not contiguous."); + TORCH_CHECK(tensor_lists[l][t].device() == ref_device, + "A tensor was not on the same device as the first tensor"); + TORCH_CHECK(tensor_lists[l][t].numel() == tensor_lists[0][t].numel(), "Size mismatch"); + } + } + + int ntensors = tensor_lists[0].size(); + + TensorListMetadata tl; + + const at::cuda::OptionalCUDAGuard device_guard(device_of(tensor_lists[0][0])); + auto stream = at::cuda::getCurrentCUDAStream(); + + tl.start_tensor_this_launch = 0; + int loc_block_info = 0; + int loc_tensor_info = 0; + for (int t = 0; t < ntensors; t++) { + tl.sizes[loc_tensor_info] = tensor_lists[0][t].numel(); + for (int d = 0; d < depth; d++) + tl.addresses[d][loc_tensor_info] = tensor_lists[d][t].data_ptr(); + loc_tensor_info++; + + int chunks_this_tensor = (tensor_lists[0][t].numel() + chunk_size - 1) / chunk_size; + + for (int chunk = 0; chunk < chunks_this_tensor; chunk++) { + // std::cout << chunks_this_tensor << std::endl; + tl.block_to_tensor[loc_block_info] = loc_tensor_info - 1; + tl.block_to_chunk[loc_block_info] = chunk; + loc_block_info++; + + bool tensors_full = (loc_tensor_info == depth_to_max_tensors[depth - 1] && + chunk == chunks_this_tensor - 1); + bool blocks_full = (loc_block_info == depth_to_max_blocks[depth - 1]); + bool last_chunk = (t == ntensors - 1 && chunk == chunks_this_tensor - 1); + if (tensors_full || blocks_full || last_chunk) { + // using accscalar_t = acc_type; + multi_tensor_apply_kernel<<>>( + chunk_size, noop_flag.DATA_PTR(), tl, callable, args...); + + AT_CUDA_CHECK(cudaGetLastError()); + + // Reset. The control flow possibilities here make my brain hurt. + loc_block_info = 0; + if (chunk == chunks_this_tensor - 1) { + // std::cout << "Hit case 1 " << cond1 << " " << cond2 << " " << cond3 << + // std::endl; + loc_tensor_info = 0; + tl.start_tensor_this_launch = t + 1; + } else { + // std::cout << "Hit case 2 " << cond1 << " " << cond2 << " " << cond3 << + // std::endl; + tl.sizes[0] = tl.sizes[loc_tensor_info - 1]; + for (int d = 0; d < depth; d++) + tl.addresses[d][0] = tl.addresses[d][loc_tensor_info - 1]; + loc_tensor_info = 1; + tl.start_tensor_this_launch = t; + } + } + } + } +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_common.cpp b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_common.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0f2895dfa328b1a15ac8b29075722584e8c5ec5e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_common.cpp @@ -0,0 +1,342 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +Functionality for swapping optimizer tensors to/from (NVMe) storage devices. +*/ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "deepspeed_aio_common.h" + +using namespace std; +using namespace std::chrono; + +#define DEBUG_DS_AIO_PERF 0 +#define DEBUG_DS_AIO_SUBMIT_PERF 0 + +static const std::string c_library_name = "deepspeed_aio"; + +static void _report_aio_statistics(const char* tag, + const std::vector>& latencies) + __attribute__((unused)); + +static void _report_aio_statistics(const char* tag, + const std::vector>& latencies) +{ + std::vector lat_usec; + for (auto& lat : latencies) { lat_usec.push_back(lat.count() * 1e6); } + const auto min_lat = *(std::min_element(lat_usec.begin(), lat_usec.end())); + const auto max_lat = *(std::max_element(lat_usec.begin(), lat_usec.end())); + const auto avg_lat = std::accumulate(lat_usec.begin(), lat_usec.end(), 0) / lat_usec.size(); + + std::cout << c_library_name << ": latency statistics(usec) " << tag + << " min/max/avg = " << min_lat << " " << max_lat << " " << avg_lat << std::endl; +} + +static void _get_aio_latencies(std::vector>& raw_latencies, + struct deepspeed_aio_latency_t& summary_latencies) +{ + std::vector lat_usec; + for (auto& lat : raw_latencies) { lat_usec.push_back(lat.count() * 1e6); } + summary_latencies._min_usec = *(std::min_element(lat_usec.begin(), lat_usec.end())); + summary_latencies._max_usec = *(std::max_element(lat_usec.begin(), lat_usec.end())); + summary_latencies._avg_usec = + std::accumulate(lat_usec.begin(), lat_usec.end(), 0) / lat_usec.size(); +} + +static void _do_io_submit_singles(const long long int n_iocbs, + const long long int iocb_index, + std::unique_ptr& aio_ctxt, + std::vector>& submit_times) +{ + for (auto i = 0; i < n_iocbs; ++i) { + const auto st = std::chrono::high_resolution_clock::now(); + const auto submit_ret = io_submit(aio_ctxt->_io_ctxt, 1, aio_ctxt->_iocbs.data() + i); + submit_times.push_back(std::chrono::high_resolution_clock::now() - st); +#if DEBUG_DS_AIO_SUBMIT_PERF + printf("submit(usec) %f io_index=%lld buf=%p len=%lu off=%llu \n", + submit_times.back().count() * 1e6, + iocb_index, + aio_ctxt->_iocbs[i]->u.c.buf, + aio_ctxt->_iocbs[i]->u.c.nbytes, + aio_ctxt->_iocbs[i]->u.c.offset); +#endif + assert(submit_ret > 0); + } +} + +static void _do_io_submit_block(const long long int n_iocbs, + const long long int iocb_index, + std::unique_ptr& aio_ctxt, + std::vector>& submit_times) +{ + const auto st = std::chrono::high_resolution_clock::now(); + const auto submit_ret = io_submit(aio_ctxt->_io_ctxt, n_iocbs, aio_ctxt->_iocbs.data()); + submit_times.push_back(std::chrono::high_resolution_clock::now() - st); +#if DEBUG_DS_AIO_SUBMIT_PERF + printf("submit(usec) %f io_index=%lld nr=%lld buf=%p len=%lu off=%llu \n", + submit_times.back().count() * 1e6, + iocb_index, + n_iocbs, + aio_ctxt->_iocbs[0]->u.c.buf, + aio_ctxt->_iocbs[0]->u.c.nbytes, + aio_ctxt->_iocbs[0]->u.c.offset); +#endif + assert(submit_ret > 0); +} + +static int _do_io_complete(const long long int min_completes, + const long long int max_completes, + std::unique_ptr& aio_ctxt, + std::vector>& reap_times) +{ + const auto start_time = std::chrono::high_resolution_clock::now(); + long long int n_completes = io_pgetevents(aio_ctxt->_io_ctxt, + min_completes, + max_completes, + aio_ctxt->_io_events.data(), + nullptr, + nullptr); + reap_times.push_back(std::chrono::high_resolution_clock::now() - start_time); + assert(n_completes >= min_completes); + return n_completes; +} + +void do_aio_operation_sequential(const bool read_op, + std::unique_ptr& aio_ctxt, + std::unique_ptr& xfer_ctxt, + deepspeed_aio_config_t* config, + deepspeed_aio_perf_t* perf) +{ + struct io_prep_context prep_ctxt(read_op, xfer_ctxt, aio_ctxt->_block_size, &aio_ctxt->_iocbs); + + const auto num_io_blocks = static_cast( + ceil(static_cast(xfer_ctxt->_num_bytes) / aio_ctxt->_block_size)); +#if DEBUG_DS_AIO_PERF + const auto io_op_name = std::string(read_op ? "read" : "write"); + std::cout << c_library_name << ": start " << io_op_name << " " << xfer_ctxt->_num_bytes + << " bytes with " << num_io_blocks << " io blocks" << std::endl; +#endif + + std::vector> submit_times; + std::vector> reap_times; + const auto max_queue_bytes = + static_cast(aio_ctxt->_queue_depth * aio_ctxt->_block_size); + + auto start = std::chrono::high_resolution_clock::now(); + for (long long iocb_index = 0; iocb_index < num_io_blocks; + iocb_index += aio_ctxt->_queue_depth) { + const auto start_offset = iocb_index * aio_ctxt->_block_size; + const auto start_buffer = (char*)xfer_ctxt->_mem_buffer + start_offset; + const auto n_iocbs = + min(static_cast(aio_ctxt->_queue_depth), (num_io_blocks - iocb_index)); + const auto num_bytes = min(max_queue_bytes, (xfer_ctxt->_num_bytes - start_offset)); + prep_ctxt.prep_iocbs(n_iocbs, num_bytes, start_buffer, start_offset); + + if (config->_single_submit) { + _do_io_submit_singles(n_iocbs, iocb_index, aio_ctxt, submit_times); + } else { + _do_io_submit_block(n_iocbs, iocb_index, aio_ctxt, submit_times); + } + + _do_io_complete(n_iocbs, n_iocbs, aio_ctxt, reap_times); + } + const std::chrono::duration elapsed = std::chrono::high_resolution_clock::now() - start; + + if (perf) { + _get_aio_latencies(submit_times, perf->_submit); + _get_aio_latencies(reap_times, perf->_complete); + perf->_e2e_usec = elapsed.count() * 1e6; + perf->_e2e_rate_GB = (xfer_ctxt->_num_bytes / elapsed.count() / 1e9); + } + +#if DEBUG_DS_AIO_PERF + _report_aio_statistics("submit", submit_times); + _report_aio_statistics("complete", reap_times); +#endif + +#if DEBUG_DS_AIO_PERF + std::cout << c_library_name << ": runtime(usec) " << elapsed.count() * 1e6 + << " rate(GB/sec) = " << (xfer_ctxt->_num_bytes / elapsed.count() / 1e9) << std::endl; +#endif + +#if DEBUG_DS_AIO_PERF + std::cout << c_library_name << ": finish " << io_op_name << " " << xfer_ctxt->_num_bytes + << " bytes " << std::endl; +#endif +} + +void do_aio_operation_overlap(const bool read_op, + std::unique_ptr& aio_ctxt, + std::unique_ptr& xfer_ctxt, + deepspeed_aio_config_t* config, + deepspeed_aio_perf_t* perf) +{ + struct io_prep_generator io_gen(read_op, xfer_ctxt, aio_ctxt->_block_size); + +#if DEBUG_DS_AIO_PERF + const auto io_op_name = std::string(read_op ? "read" : "write"); + std::cout << c_library_name << ": start " << io_op_name << " " << xfer_ctxt->_num_bytes + << " bytes with " << io_gen._num_io_blocks << " io blocks" << std::endl; +#endif + + std::vector> submit_times; + std::vector> reap_times; + + auto request_iocbs = aio_ctxt->_queue_depth; + auto n_pending_iocbs = 0; + const auto min_completes = 1; + auto start = std::chrono::high_resolution_clock::now(); + while (true) { + const auto n_iocbs = io_gen.prep_iocbs(request_iocbs - n_pending_iocbs, &aio_ctxt->_iocbs); + if (n_iocbs > 0) { + if (config->_single_submit) { + _do_io_submit_singles( + n_iocbs, (io_gen._next_iocb_index - n_iocbs), aio_ctxt, submit_times); + } else { + _do_io_submit_block( + n_iocbs, (io_gen._next_iocb_index - n_iocbs), aio_ctxt, submit_times); + } + } + + n_pending_iocbs += n_iocbs; + assert(n_pending_iocbs <= aio_ctxt->_queue_depth); + + if (n_pending_iocbs == 0) { break; } + + const auto n_complete = + _do_io_complete(min_completes, n_pending_iocbs, aio_ctxt, reap_times); + n_pending_iocbs -= n_complete; + } + + const std::chrono::duration elapsed = std::chrono::high_resolution_clock::now() - start; + + if (perf) { + _get_aio_latencies(submit_times, perf->_submit); + _get_aio_latencies(reap_times, perf->_complete); + perf->_e2e_usec = elapsed.count() * 1e6; + perf->_e2e_rate_GB = (xfer_ctxt->_num_bytes / elapsed.count() / 1e9); + } + +#if DEBUG_DS_AIO_PERF + _report_aio_statistics("submit", submit_times); + _report_aio_statistics("complete", reap_times); +#endif + +#if DEBUG_DS_AIO_PERF + std::cout << c_library_name << ": runtime(usec) " << elapsed.count() * 1e6 + << " rate(GB/sec) = " << (xfer_ctxt->_num_bytes / elapsed.count() / 1e9) << std::endl; +#endif + +#if DEBUG_DS_AIO_PERF + std::cout << c_library_name << ": finish " << io_op_name << " " << xfer_ctxt->_num_bytes + << " bytes " << std::endl; +#endif +} + +void report_file_error(const char* filename, const std::string file_op, const int error_code) +{ + std::string err_msg = file_op + std::string(" failed on ") + std::string(filename) + + " error = " + std::to_string(error_code); + std::cerr << c_library_name << ": " << err_msg << std::endl; +} + +int open_file(const char* filename, const bool read_op) +{ + const int flags = read_op ? (O_RDONLY | O_DIRECT) : (O_WRONLY | O_CREAT | O_DIRECT); +#if defined(__ENABLE_CANN__) + int* flags_ptr = (int*)&flags; + *flags_ptr = read_op ? (O_RDONLY) : (O_WRONLY | O_CREAT); +#endif + const int mode = 0600; + const auto fd = open(filename, flags, mode); + if (fd == -1) { + const auto error_code = errno; + const auto error_msg = read_op ? " open for read " : " open for write "; + report_file_error(filename, error_msg, error_code); + return -1; + } + return fd; +} + +int regular_read(const char* filename, std::vector& buffer) +{ + long long int num_bytes; + const auto f_size = get_file_size(filename, num_bytes); + assert(f_size != -1); + buffer.resize(num_bytes); + const auto fd = open(filename, O_RDONLY, 0600); + assert(fd != -1); + long long int read_bytes = 0; + auto r = 0; + do { + const auto buffer_ptr = buffer.data() + read_bytes; + const auto bytes_to_read = num_bytes - read_bytes; + r = read(fd, buffer_ptr, bytes_to_read); + read_bytes += r; + } while (r > 0); + + if (read_bytes != num_bytes) { + std::cerr << "read error " + << " read_bytes (read) = " << read_bytes << " num_bytes (fstat) = " << num_bytes + << std::endl; + } + assert(read_bytes == num_bytes); + close(fd); + return 0; +} + +static bool _validate_buffer(const char* filename, void* aio_buffer, const long long int num_bytes) +{ + std::vector regular_buffer; + const auto reg_ret = regular_read(filename, regular_buffer); + assert(0 == reg_ret); + std::cout << "regular read of " << filename << " returned " << regular_buffer.size() << " bytes" + << std::endl; + + if (static_cast(regular_buffer.size()) != num_bytes) { return false; } + + return (0 == memcmp(aio_buffer, regular_buffer.data(), regular_buffer.size())); +} + +bool validate_aio_operation(const bool read_op, + const char* filename, + void* aio_buffer, + const long long int num_bytes) +{ + const auto msg_suffix = std::string("deepspeed_aio_") + + std::string(read_op ? "read()" : "write()") + + std::string("using read()"); + + if (false == _validate_buffer(filename, aio_buffer, num_bytes)) { + std::cout << "Fail: correctness of " << msg_suffix << std::endl; + return false; + } + + std::cout << "Pass: correctness of " << msg_suffix << std::endl; + return true; +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/adam/fused_adam.cpp b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/adam/fused_adam.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d25578f410da278dace61fb5b488b14ed1257c9f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/adam/fused_adam.cpp @@ -0,0 +1,48 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "cpu_adam.h" + +// C++ interface + +void multi_tensor_adam(int chunk_size, + at::Tensor noop_flag, + std::vector> tensor_lists, /*gpmv*/ + const float lr, + const float beta1, + const float beta2, + const float epsilon, + const int step, + const int mode, + const int bias_correction, + const float weight_decay) +{ + static bool initialized = false; + if (!initialized) { + create_adam_optimizer(0); + initialized = true; + } + for (int i = 0; i < tensor_lists[0].size(); i++) { + ds_adam_step(0, + step, + lr, + beta1, + beta2, + epsilon, + weight_decay, + bias_correction, + tensor_lists[1][i], + tensor_lists[0][i], + tensor_lists[2][i], + tensor_lists[3][i]); + } +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("multi_tensor_adam", + &multi_tensor_adam, + "Compute and apply gradient update to parameters for Adam optimizer"); +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/comm/ccl.cpp b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/comm/ccl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6428ab5cbfa586c6c99f98a647525a3bae87bab4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/comm/ccl.cpp @@ -0,0 +1,639 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// states for collectives +enum coll_state { + coll_begin = 0, + // coll states for naive allreduce + coll_allreduce_naive__copy_in_done, // this state is for rank != 0 + coll_allreduce_naive__reduce_done, // this state is for rank == 0 + coll_allreduce_naive__copy_out_done, // this state is for rank != 0 +}; + +// SHM building blocks +struct SharedData { + const char* name; + int descriptor; + void* bytes; + size_t nbytes; +}; + +void shared_open(SharedData* data, const char* name, size_t nbytes) +{ + int d = shm_open(name, O_RDWR, S_IRUSR | S_IWUSR); + if (d != -1) { + void* bytes = mmap(NULL, nbytes, PROT_READ | PROT_WRITE, MAP_SHARED, d, 0); + data->name = name; + data->descriptor = d; + data->bytes = bytes; + data->nbytes = nbytes; + } else { + printf("shared_open %s failed\n", name); + data->descriptor = -1; + } +} + +void shared_create(SharedData* data, const char* name, void* bytes, size_t nbytes) +{ + int d = shm_open(name, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR); + if (d != -1) { + if (nbytes = write(d, bytes, nbytes)) { shared_open(data, name, nbytes); } + } else { + printf("shared_create %s failed\n", name); + } +} + +void shared_close(SharedData* data) +{ + if (data->descriptor != -1) { + munmap(data->bytes, data->nbytes); + shm_unlink(data->name); + } +} + +// SHM based allreduce helper functions +// buffer that holds shm name +#define NAME_BUF_SIZE 1000 +#define MAX_BUF_SIZE 1048576 +#define SHM_BUFFER_NAME "deepspeed_allreduce_buffer" +SharedData allreduce_buffer; +struct allreduce_workspace { + enum coll_state state; + char buffer[MAX_BUF_SIZE]; +}; +struct allreduce_workspace* workspace; + +void wait_buffer_state_until(int index, enum coll_state state) +{ + volatile enum coll_state* state_ptr = &(workspace[index].state); + + while (*state_ptr != state) + ; +} + +void wait_buffer_state_until_not(int index, enum coll_state state) +{ + volatile enum coll_state* state_ptr = &(workspace[index].state); + + while (*state_ptr == state) + ; +} + +__m512 cvt_bf16_to_fp32(const __m256i src) __attribute__((target("avx512bw"))); +inline __m512 cvt_bf16_to_fp32(const __m256i src) +{ + auto y = _mm512_cvtepu16_epi32(src); + return _mm512_castsi512_ps(_mm512_bslli_epi128(y, 2)); +} + +inline __m256i cvt_fp32_to_bf16(const __m512 src) __attribute__((target("avx512bw"))); +inline __m256i cvt_fp32_to_bf16(const __m512 src) +{ + __m512i value = _mm512_castps_si512(src); + __m512i nan = _mm512_set1_epi32(0xffff); + auto mask_value = _mm512_cmp_ps_mask(src, src, _CMP_ORD_Q); + __m512i ones = _mm512_set1_epi32(0x1); + __m512i vec_bias = _mm512_set1_epi32(0x7fff); + // uint32_t lsb = (input >> 16) & 1; + auto t_value = _mm512_and_si512(_mm512_srli_epi32(value, 16), ones); + // uint32_t rounding_bias = 0x7fff + lsb; + t_value = _mm512_add_epi32(t_value, vec_bias); + // input += rounding_bias; + t_value = _mm512_add_epi32(t_value, value); + // input = input >> 16; + t_value = _mm512_srli_epi32(t_value, 16); + // Check NaN before converting back to bf16 + t_value = _mm512_mask_blend_epi32(mask_value, nan, t_value); + return _mm512_cvtusepi32_epi16(t_value); +} + +void reduce_2_bf16_buffers(int num_elements, void* in_out, void* in) + __attribute__((target("avx512bw"))); + +void reduce_bf16_buffers(int num_elements, int num_buffers, struct allreduce_workspace* workspace) + __attribute__((target("avx512bw"))); + +void reduce_2_fp32_buffers(int num_elements, void* in_out, void* in) + __attribute__((target("avx512bw"))); + +void reduce_fp32_buffers(int num_elements, int num_buffers, struct allreduce_workspace* workspace) + __attribute__((target("avx512bw"))); + +// N_REDUCE_LIMIT is the number of buffers that can be reduced together in one shot. +// Compared with do N-1 2-reduces which needs 2*(N-1) read and N-1 write, +// N-reduce only needs N read and 1 write, this saves 2/3 memory bandwidth. +// When increase N_REDUCE_LIMIT to a bigger number, do the following steps +// 1. Extend REPEAT_ macros list down below +// 2. Extend switch cases which call "REPEAT(X, ...)" down below +#define N_REDUCE_LIMIT 8 + +void reduce_all_buffers(struct allreduce_workspace* workspace, + int num_elements, + c10::ScalarType scalar_type, + int num_buffers) +{ + switch (scalar_type) { + case c10::ScalarType::BFloat16: + if (num_buffers > 2 && num_buffers <= N_REDUCE_LIMIT) { + reduce_bf16_buffers(num_elements, num_buffers, workspace); + } else { + for (int i = 1; i < num_buffers; i++) { + reduce_2_bf16_buffers(num_elements, workspace[0].buffer, workspace[i].buffer); + } + } + break; + case c10::ScalarType::Float: + if (num_buffers > 2 && num_buffers <= N_REDUCE_LIMIT) { + reduce_fp32_buffers(num_elements, num_buffers, workspace); + } else { + for (int i = 1; i < num_buffers; i++) { + reduce_2_fp32_buffers(num_elements, workspace[0].buffer, workspace[i].buffer); + } + } + break; + default: assert(!"Should not get here"); + } +} + +#define REPEAT(N, x) REPEAT_##N(x) +#define REPEAT_1(x) x(1) +#define REPEAT_2(x) \ + REPEAT_1(x); \ + x(2) +#define REPEAT_3(x) \ + REPEAT_2(x); \ + x(3) +#define REPEAT_4(x) \ + REPEAT_3(x); \ + x(4) +#define REPEAT_5(x) \ + REPEAT_4(x); \ + x(5) +#define REPEAT_6(x) \ + REPEAT_5(x); \ + x(6) +#define REPEAT_7(x) \ + REPEAT_6(x); \ + x(7) + +#define CVT_ADD_BF16(x) \ + do { \ + auto in##x##_val = \ + cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)(workspace[x].buffer + i))); \ + inout_val = _mm512_add_ps(inout_val, in##x##_val); \ + } while (0) + +// Reduce functions down below use vectorized algorithm, the number of bytes processed each +// iteration depends on vector length. 256bit vector ==> 32 bytes, 512bit vector ==> 64 bytes +// If you change implementation of reduce_2_bf16_buffers or reduce_2_fp32_buffers, check +// whether this number needs to be changed +#define VECTOR_LENGTH_IN_BYTES 32 + +// num_elements must be divisible by 16 (caller check) +void reduce_bf16_buffers(int num_elements, int num_buffers, struct allreduce_workspace* workspace) +{ +#pragma omp parallel for + for (int i = 0; i < num_elements * 2; i += VECTOR_LENGTH_IN_BYTES) { + auto inout_val = cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)(workspace[0].buffer + i))); + switch (num_buffers) { + case 8: REPEAT(7, CVT_ADD_BF16); break; + case 7: REPEAT(6, CVT_ADD_BF16); break; + case 6: REPEAT(5, CVT_ADD_BF16); break; + case 5: REPEAT(4, CVT_ADD_BF16); break; + case 4: REPEAT(3, CVT_ADD_BF16); break; + case 3: REPEAT(2, CVT_ADD_BF16); break; + default: assert(!"Should not get here."); + } + _mm256_storeu_si256((__m256i*)(workspace[0].buffer + i), cvt_fp32_to_bf16(inout_val)); + } +} + +void reduce_2_bf16_buffers(int num_elements, void* in_out, void* in1) +{ +#pragma omp parallel for + for (int i = 0; i < num_elements * 2; i += VECTOR_LENGTH_IN_BYTES) { + auto inout_val = cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)((char*)in_out + i))); + auto in1_val = cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)((char*)in1 + i))); + inout_val = _mm512_add_ps(inout_val, in1_val); + _mm256_storeu_si256((__m256i*)((char*)in_out + i), cvt_fp32_to_bf16(inout_val)); + } +} + +#define CVT_ADD_F32(x) \ + do { \ + auto in##x##_val = _mm256_loadu_ps((float*)(workspace[x].buffer + i)); \ + inout_val = _mm256_add_ps(inout_val, in##x##_val); \ + } while (0) + +// num_elements must be divisible by 16 (caller check) +void reduce_fp32_buffers(int num_elements, int num_buffers, struct allreduce_workspace* workspace) +{ +#pragma omp parallel for + for (int i = 0; i < num_elements * 4; i += VECTOR_LENGTH_IN_BYTES) { + auto inout_val = _mm256_loadu_ps((float*)(workspace[0].buffer + i)); + switch (num_buffers) { + case 8: REPEAT(7, CVT_ADD_F32); break; + case 7: REPEAT(6, CVT_ADD_F32); break; + case 6: REPEAT(5, CVT_ADD_F32); break; + case 5: REPEAT(4, CVT_ADD_F32); break; + case 4: REPEAT(3, CVT_ADD_F32); break; + case 3: REPEAT(2, CVT_ADD_F32); break; + default: assert(!"Should not get here."); + } + _mm256_storeu_ps((float*)(workspace[0].buffer + i), inout_val); + } +} + +void reduce_2_fp32_buffers(int num_elements, void* in_out, void* in1) +{ +#pragma omp parallel for + for (int i = 0; i < num_elements * 4; i += VECTOR_LENGTH_IN_BYTES) { + auto inout_val = _mm256_loadu_ps((float*)((char*)in_out + i)); + auto in1_val = _mm256_loadu_ps((float*)((char*)in1 + i)); + inout_val = _mm256_add_ps(inout_val, in1_val); + _mm256_storeu_ps((float*)((char*)in_out + i), inout_val); + } +} + +// Communicatiooon settings +int world_rank = -1; +int world_size = -1; + +std::set _comm_ids; +std::set _colors; +std::vector _ccl_comms; +ccl::shared_ptr_class sub_kvs; +std::map, int> group_to_comm_id; + +ccl::communicator& _get_comm_from_group() { return _ccl_comms[0]; } +ccl::communicator& _get_comm_from_group(py::object group) { return _ccl_comms[0]; } +ccl::communicator& _get_comm_from_group(std::vector ranks) +{ + if (group_to_comm_id.find(ranks) != group_to_comm_id.end()) { + auto id = group_to_comm_id.find(ranks); + return _ccl_comms[id->second]; + } + return _ccl_comms[0]; +} + +#define CCLCHECK(cmd) \ + do { \ + cmd; \ + } while (0) + +#define KVS_CREATE_SUCCESS 0 +#define KVS_CREATE_FAILURE -1 + +bool is_initialized = 0; + +ccl::shared_ptr_class kvs; + +bool all_ranks_local_p = false; + +void initialize(int size, int rank, torch::Tensor& kvs_data) +{ + if (is_initialized) return; + + // Check whether all ranks is on the same physical machine. + // If true, we will use an SHM based low latency allreduce + + auto ls_string = std::getenv("LOCAL_SIZE"); + int ls = 0; + if (ls_string != NULL) { ls = std::stoi(std::getenv("LOCAL_SIZE")); } + + if (size >= 1 && size == ls) { all_ranks_local_p = true; } + + world_size = size; + world_rank = rank; + is_initialized = 1; + + ccl::kvs::address_type main_addr; + + if (rank != 0) { + memcpy(main_addr.data(), kvs_data.data_ptr(), main_addr.size()); + kvs = ccl::create_kvs(main_addr); + } + + _ccl_comms.emplace_back(ccl::create_communicator(size, rank, kvs)); + + auto addr_string = std::getenv("MASTER_ADDR"); + if (addr_string == NULL) { addr_string = ""; } + auto port_string = std::getenv("MASTER_PORT"); + if (port_string == NULL) { port_string = ""; } + char shm_name[NAME_BUF_SIZE]; + snprintf(shm_name, + NAME_BUF_SIZE, + "%s_%d_%s_%s", + SHM_BUFFER_NAME, + getuid(), + addr_string, + port_string); + // create shared workspace for SHM based allreduce + if (all_ranks_local_p) { + if (rank == 0) { + workspace = + (struct allreduce_workspace*)malloc(size * sizeof(struct allreduce_workspace)); + shared_create( + &allreduce_buffer, shm_name, workspace, size * sizeof(struct allreduce_workspace)); + workspace = (struct allreduce_workspace*)allreduce_buffer.bytes; + for (int i = 0; i < size; i++) { workspace[i].state = coll_begin; } + } + CCLCHECK(ccl::barrier(_get_comm_from_group()).wait()); + if (rank != 0) { + shared_open(&allreduce_buffer, shm_name, size * sizeof(struct allreduce_workspace)); + } + workspace = (struct allreduce_workspace*)allreduce_buffer.bytes; + } +} + +/* + rank == 0: create main kvs and return its address + rank == else: return an empty address +*/ +std::vector get_kvs_addr(int rank) +{ + if (rank == 0) { + kvs = ccl::create_main_kvs(); + ccl::kvs::address_type main_addr = kvs->get_address(); + auto ccl_kvs_addr = std::vector(main_addr.begin(), main_addr.end()); + return ccl_kvs_addr; + } else { + ccl::kvs::address_type main_addr; + auto ccl_kvs_addr = std::vector(main_addr.begin(), main_addr.end()); + return ccl_kvs_addr; + } +} + +int get_rank(int group = 0) { return world_rank; } + +int get_world_size(int group = 0) { return world_size; } + +// Find the next ordered, unique value to a set. E.g. <0,1,2,7> --> 3 +int next_unique_val(std::set s) +{ + std::set::iterator itr; + // Base case. Add 0 to start of set. + if (s.empty() || *s.begin() != 0) { + return 0; + // second base case where s = {0} (the case of s = {n != 0} is caught above) + } else if (s.size() == 1) { + return 1; + } else { + int prev_val = *s.begin(); + for (itr = std::next(s.begin()); itr != s.end(); itr++) { + if (*itr != prev_val + 1) { return prev_val + 1; } + prev_val = *itr; + } + return *(s.end()) + 1; + } +} + +std::vector get_sub_kvs_addr(bool first) +{ + if (first) { + sub_kvs = ccl::create_main_kvs(); + ccl::kvs::address_type main_addr = sub_kvs->get_address(); + auto ccl_kvs_addr = std::vector(main_addr.begin(), main_addr.end()); + return ccl_kvs_addr; + } else { + ccl::kvs::address_type main_addr; + auto ccl_kvs_addr = std::vector(main_addr.begin(), main_addr.end()); + return ccl_kvs_addr; + } +} + +void initialize_sub_comm(int size, int rank, torch::Tensor& kvs_data, std::vector ranks) +{ + ccl::kvs::address_type main_addr; + if (rank != 0) { + memcpy(main_addr.data(), kvs_data.data_ptr(), main_addr.size()); + sub_kvs = ccl::create_kvs(main_addr); + } + _ccl_comms.push_back(ccl::create_communicator(size, rank, sub_kvs)); + group_to_comm_id[ranks] = _ccl_comms.size() - 1; +} + +ccl::datatype get_ccl_datatype(c10::ScalarType type) +{ + ccl::datatype ccl_type; + switch (type) { + case c10::ScalarType::Int: ccl_type = ccl::datatype::int32; break; + case c10::ScalarType::Long: ccl_type = ccl::datatype::int64; break; + case c10::ScalarType::Float: ccl_type = ccl::datatype::float32; break; + case c10::ScalarType::Double: ccl_type = ccl::datatype::float64; break; + case c10::ScalarType::BFloat16: ccl_type = ccl::datatype::bfloat16; break; + case c10::ScalarType::Half: ccl_type = ccl::datatype::float16; break; + default: ccl_type = ccl::datatype::int8; + } + return ccl_type; +} + +ccl::reduction get_ccl_reduce_op(py::object op, at::Tensor& input) +{ + py::object ReduceOp = py::module_::import("deepspeed.comm").attr("ReduceOp"); + if (!py::isinstance(op, ReduceOp)) { + throw std::runtime_error("Error: Op must be of type ReduceOp"); + } + + int op_val = py::int_(op.attr("value")); + ccl::reduction ccl_op; + + if (input.scalar_type() == at::kBool) { + if (op_val == (int)py::int_(ReduceOp.attr("SUM").attr("value"))) { + // For bool tensors, map sum to max, which both represent a bitwise or. + // This is to prevent overflow issues with sum, since we use uint8 to + // represent a bool (see cclDataType mapping). + ccl_op = ccl::reduction::max; + } else if (op_val == (int)py::int_(ReduceOp.attr("AVG").attr("value"))) { + throw std::runtime_error("Error: For bool tensors, op must be of type ReduceOp"); + } + } + + if (op_val == (int)py::int_(ReduceOp.attr("SUM").attr("value"))) { + ccl_op = ccl::reduction::sum; + } else if (op_val == (int)py::int_(ReduceOp.attr("MIN").attr("value"))) { + ccl_op = ccl::reduction::min; + } else if (op_val == (int)py::int_(ReduceOp.attr("MAX").attr("value"))) { + ccl_op = ccl::reduction::max; + } else if (op_val == (int)py::int_(ReduceOp.attr("PRODUCT").attr("value"))) { + ccl_op = ccl::reduction::prod; + } else { + throw std::runtime_error("Error: Unrecognized ReduceOp type"); + } + return ccl_op; +} + +void broadcast(torch::Tensor& data, int src, std::vector group, bool async_op) +{ + CCLCHECK(ccl::broadcast(data.data_ptr(), + data.numel(), + get_ccl_datatype(data.scalar_type()), + src, + _get_comm_from_group(group)) + .wait()); +} + +// TODO: implement torch's async_op behavior, document it. +void all_reduce(torch::Tensor& data, py::object op, std::vector group, bool async_op) +{ + CCLCHECK(ccl::allreduce(data.data_ptr(), + data.data_ptr(), + data.numel(), + get_ccl_datatype(data.scalar_type()), + get_ccl_reduce_op(op, data), + _get_comm_from_group(group)) + .wait()); +} + +void all_reduce_caching(torch::Tensor& data, + py::object op, + std::string match_id, + std::vector group, + bool async_op) +{ + ccl::allreduce_attr attr = ccl::default_allreduce_attr; + auto match_str = ccl::v1::string(match_id); + attr.template set(true); + attr.template set(match_str); + // To control this, use operation attribute and set true value for to_cache field and unique + // string (for example, tensor name) for match_id field. Note that: + // match_id should be the same for a specific communication operation across all ranks. + // If the same tensor is a part of different communication operations, match_id should have + // different values for each of these operations. + CCLCHECK(ccl::allreduce(data.data_ptr(), + data.data_ptr(), + data.numel(), + get_ccl_datatype(data.scalar_type()), + get_ccl_reduce_op(op, data), + _get_comm_from_group(group), + attr) + .wait()); +} + +static void parallel_memcpy(void* to, void* from, size_t n_bytes) + __attribute__((target("avx512bw"))); +static void parallel_memcpy(void* to, void* from, size_t n_bytes) +{ +#pragma omp parallel for + for (int i = 0; i < n_bytes; i += VECTOR_LENGTH_IN_BYTES) { + auto val = _mm256_loadu_si256((__m256i*)((char*)from + i)); + _mm256_storeu_si256((__m256i*)((char*)to + i), val); + } +} + +void inference_all_reduce(torch::Tensor& data, py::object op, bool async_op) +{ + static py::object ReduceOp = py::module_::import("deepspeed.comm").attr("ReduceOp"); + static auto ReduceOpSum = (int)py::int_(ReduceOp.attr("SUM").attr("value")); + + assert(py::int_(op.attr("value")) == ReduceOpSum); + + auto numel = data.numel(); + + int data_size = 0; + bool data_type_fallback = false; + + switch (data.scalar_type()) { + case c10::ScalarType::BFloat16: data_size = numel * 2; break; + case c10::ScalarType::Float: data_size = numel * 4; break; + default: data_type_fallback = true; + } + + if (data_type_fallback || (data_size % VECTOR_LENGTH_IN_BYTES) != 0 || !all_ranks_local_p) { + // fallback to oneccl allreduce + CCLCHECK(ccl::allreduce(data.data_ptr(), + data.data_ptr(), + data.numel(), + get_ccl_datatype(data.scalar_type()), + get_ccl_reduce_op(op, data), + _get_comm_from_group()) + .wait()); + return; + } + + for (int offset = 0; offset < data_size; offset += MAX_BUF_SIZE) { + auto data_ptr = ((char*)(data.data_ptr()) + offset); + size_t chunk_size = data_size - offset > MAX_BUF_SIZE ? MAX_BUF_SIZE : data_size - offset; + size_t chunk_el = chunk_size / (data_size / numel); + + parallel_memcpy(workspace[world_rank].buffer, data_ptr, chunk_size); + std::atomic_thread_fence(std::memory_order_release); + workspace[world_rank].state = coll_allreduce_naive__copy_in_done; + + if (world_rank == 0) { + // compute allreduce result on rank 0 + for (int i = 1; i < world_size; i++) { + // wait until the other rank copy the buffer + wait_buffer_state_until(i, coll_allreduce_naive__copy_in_done); + } + reduce_all_buffers(workspace, chunk_el, data.scalar_type(), world_size); + std::atomic_thread_fence(std::memory_order_release); + workspace[world_rank].state = coll_allreduce_naive__reduce_done; + parallel_memcpy(data_ptr, workspace[0].buffer, chunk_size); + } + if (world_rank != 0) { + wait_buffer_state_until(0, coll_allreduce_naive__reduce_done); + parallel_memcpy(data_ptr, workspace[0].buffer, chunk_size); + std::atomic_thread_fence(std::memory_order_release); + workspace[world_rank].state = coll_allreduce_naive__copy_out_done; + } + if (world_rank == 0) { + for (int i = 1; i < world_size; i++) { + wait_buffer_state_until(i, coll_allreduce_naive__copy_out_done); + } + std::atomic_thread_fence(std::memory_order_release); + workspace[world_rank].state = coll_begin; + } + if (world_rank != 0) { + // if rank 0 spin too fast it could be in state 1 of next allreduce + // in this case wait_buffer_state_until(0, 0) may cause deadlock + // what we are certain is when rank 0 finishes the state won't be 2 + wait_buffer_state_until_not(0, coll_allreduce_naive__reduce_done); + workspace[world_rank].state = coll_begin; + } + } +} + +void barrier(std::vector group, bool async_op) +{ + CCLCHECK(ccl::barrier(_get_comm_from_group(group)).wait()); +} + +std::vector get_available_coll() +{ + std::vector colls{ + "broadcast", "all_reduce", "inference_all_reduce", "all_reduce_caching", "barrier"}; + return colls; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("get_kvs_addr", &get_kvs_addr, "create and get main kvs addr"); + m.def("initialize", &initialize, "ccl initialize"); + m.def("get_rank", &get_rank, "get rank"); + m.def("get_world_size", &get_world_size, "get world size"); + m.def("broadcast", &broadcast, "ccl broadcast"); + m.def("all_reduce", &all_reduce, "ccl all_reduce"); + m.def("inference_all_reduce", &inference_all_reduce, "low latency all_reduce implementation"); + m.def("all_reduce_caching", &all_reduce_caching, "ccl all_reduce with caching"); + m.def("barrier", &barrier, "barrier"); + m.def("initialize_sub_comm", &initialize_sub_comm, "initialize_sub_comm"); + m.def("get_sub_kvs_addr", &get_sub_kvs_addr, "get_sub_kvs_addr"); + m.def("get_available_coll", &get_available_coll, "get_available_coll"); +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/lion/fused_lion.cpp b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/lion/fused_lion.cpp new file mode 100644 index 0000000000000000000000000000000000000000..708df7f0146aa996e1652ed938c038331738e149 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/lion/fused_lion.cpp @@ -0,0 +1,43 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "cpu_lion.h" + +// C++ interface + +void multi_tensor_lion(int chunk_size, + at::Tensor noop_flag, + std::vector> tensor_lists, /*gpmv*/ + const float lr, + const float beta1, + const float beta2, + const int step, + const int mode, + const float weight_decay) +{ + static bool initialized = false; + if (!initialized) { + create_lion_optimizer(0); + initialized = true; + } + for (int i = 0; i < tensor_lists[0].size(); i++) { + ds_lion_step(0, + step, + lr, + beta1, + beta2, + weight_decay, + tensor_lists[1][i], + tensor_lists[0][i], + tensor_lists[2][i]); + } +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("multi_tensor_lion", + &multi_tensor_lion, + "Compute and apply gradient update to parameters for Lion optimizer"); +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/cpu_lion.cpp b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/cpu_lion.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a0562eac9c4aa4632898a858b04108ee01580f17 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/cpu_lion.cpp @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "cpu_lion.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("lion_update", &ds_lion_step, "DeepSpeed CPU Lion update (C++)"); + m.def("lion_update_copy", + &ds_lion_step_plus_copy, + "DeepSpeed CPU Lion update and param copy (C++)"); + m.def("create_lion", &create_lion_optimizer, "DeepSpeed CPU Lion (C++)"); + m.def("destroy_lion", &destroy_lion_optimizer, "DeepSpeed CPU Lion destroy (C++)"); +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/cpu_lion_impl.cpp b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/cpu_lion_impl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..28314cf5b6e1b4dbb97d26d2608cb2c17c432abd --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/cpu_lion_impl.cpp @@ -0,0 +1,268 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include +#include +#include +#include +#include +#include +#include +#include "cpu_lion.h" + +#if defined(__ENABLE_CUDA__) +#include +#include "cublas_v2.h" +#include "cuda.h" +#include "curand.h" +#include "custom_cuda_layers.h" +#endif + +static std::unordered_map> s_optimizers; + +// C++ interface + +void Lion_Optimizer::Step_1(float* _params, + float* grads, + float* _exp_avg, + size_t _param_size, + ds_half_precision_t* dev_params, + bool half_precision) +{ + size_t rounded_size = 0; +#if defined(__AVX512__) or defined(__AVX256__) + Step_AVX<1>(&rounded_size, _params, grads, _exp_avg, _param_size, dev_params, half_precision); +#endif + if (_param_size > rounded_size) { + float betta1_minus1 = 1 - _betta1; + float betta2_minus1 = 1 - _betta2; + + float alpha = _alpha; + float after_decay = 1 - alpha * _weight_decay; + ds_half_precision_t* grads_cast_h; + ds_half_precision_t* params_cast_h; + if (half_precision) { + grads_cast_h = reinterpret_cast(grads); + params_cast_h = reinterpret_cast(_params); + } + + for (size_t t = rounded_size; t < _param_size; t += TILE) { + size_t copy_size = TILE; + if ((t + TILE) > _param_size) copy_size = _param_size - t; + size_t offset = copy_size + t; +#if defined(__ENABLE_CUDA__) + if ((t / TILE) >= 2) { cudaStreamSynchronize(_streams[_buf_index]); } +#elif defined(__ENABLE_CANN__) + if ((t / TILE) >= 2) { aclrtSynchronizeStream(_streams[_buf_index].stream()); } +#endif +#pragma omp parallel for + for (size_t k = t; k < offset; k++) { + float grad = half_precision ? (float)grads_cast_h[k] : grads[k]; + float param = half_precision ? (float)params_cast_h[k] : _params[k]; + float momentum = _exp_avg[k]; + float tmp = momentum * _betta1; + tmp = grad * betta1_minus1 + tmp; + // Rely on portable C++ methods to manipulate the sign bit of a floating-point + // number. + tmp = -std::copysignf(alpha, tmp); + if (_weight_decay > 0) { + param = param * after_decay + tmp; + } else { + param = param + tmp; + } + momentum = momentum * _betta2; + momentum = grad * betta2_minus1 + momentum; +#if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__) + if (dev_params) _doubled_buffer[_buf_index][k - t] = param; +#endif + if (half_precision) + params_cast_h[k] = (ds_half_precision_t)param; + else + _params[k] = param; + _exp_avg[k] = momentum; + } +#if defined(__ENABLE_CUDA__) + if (dev_params) { + launch_param_update( + _doubled_buffer[_buf_index], dev_params + t, (copy_size), _streams[_buf_index]); + + _buf_index = !_buf_index; + } +#elif defined(__ENABLE_CANN__) + if (dev_params) { + size_t memcpy_size = copy_size * sizeof(_doubled_buffer[_buf_index][0]); + aclrtMemcpy(dev_params + t, + memcpy_size, + _doubled_buffer[_buf_index], + memcpy_size, + aclrtMemcpyKind::ACL_MEMCPY_HOST_TO_DEVICE); + + _buf_index = !_buf_index; + } +#endif + } + } +} + +void Lion_Optimizer::Step_4(float* _params, + float* grads, + float* _exp_avg, + size_t _param_size, + ds_half_precision_t* dev_params, + bool half_precision) +{ + size_t rounded_size = 0; +#if defined(__AVX512__) or defined(__AVX256__) + Step_AVX<4>(&rounded_size, _params, grads, _exp_avg, _param_size, dev_params, half_precision); +#endif + if (_param_size > rounded_size) + Step_1((_params + rounded_size), + (grads + rounded_size), + (_exp_avg + rounded_size), + (_param_size - rounded_size), + (dev_params != nullptr ? (dev_params + rounded_size) : dev_params), + half_precision); +} + +int create_lion_optimizer(int optimizer_id, + float alpha, + float betta1, + float betta2, + float weight_decay, + bool should_log) +{ + auto opt = std::make_shared(alpha, betta1, betta2, weight_decay); + + s_optimizers[optimizer_id] = opt; + + if (should_log) { + std::string avx_type = ""; +#if defined(__AVX512__) + avx_type = "AVX512"; +#else +#if defined(__AVX256__) + avx_type = "AVX2"; +#else + avx_type = "scalar"; +#endif +#endif + + printf("Lion Optimizer #%d is created with %s arithmetic capability.\n", + optimizer_id, + avx_type.c_str()); + printf("Config: alpha=%f, betas=(%f, %f), weight_decay=%f\n", + alpha, + betta1, + betta2, + weight_decay); + } + + return 0; +} + +void Lion_Optimizer::Step_8(float* _params, + float* grads, + float* _exp_avg, + size_t _param_size, + ds_half_precision_t* dev_params, + bool half_precision) +{ + size_t rounded_size = 0; +#if defined(__AVX512__) or defined(__AVX256__) + Step_AVX<8>(&rounded_size, _params, grads, _exp_avg, _param_size, dev_params, half_precision); +#endif + if (_param_size > rounded_size) + Step_4((_params + rounded_size), + (grads + rounded_size), + (_exp_avg + rounded_size), + (_param_size - rounded_size), + (dev_params != nullptr ? (dev_params + rounded_size) : dev_params), + half_precision); +} + +int ds_lion_step(int optimizer_id, + size_t step, + float lr, + float beta1, + float beta2, + float weight_decay, + torch::Tensor& params, + torch::Tensor& grads, + torch::Tensor& exp_avg) +{ + auto params_c = params.contiguous(); + auto grads_c = grads.contiguous(); + auto exp_avg_c = exp_avg.contiguous(); + + // assert(params.options().dtype() == grads.options().dtype()); + + float* params_ptr = (float*)params_c.data_ptr(); + float* grads_ptr = (float*)grads_c.data_ptr(); + float* exp_avg_ptr = (float*)exp_avg_c.data_ptr(); + + std::shared_ptr opt = + std::static_pointer_cast(s_optimizers[optimizer_id]); + opt->IncrementStep(step, beta1, beta2); + opt->update_state(lr, weight_decay); + + opt->Step_8(params_ptr, + grads_ptr, + exp_avg_ptr, + params_c.numel(), + nullptr, + (params.options().dtype() == at::kHalf)); + +#if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__) + opt->SynchronizeStreams(); +#endif + return 0; +} + +int ds_lion_step_plus_copy(int optimizer_id, + size_t step, + float lr, + float beta1, + float beta2, + float weight_decay, + torch::Tensor& params, + torch::Tensor& grads, + torch::Tensor& exp_avg, + torch::Tensor& gpu_params) +{ +#if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__) + auto params_c = params.contiguous(); + auto gpu_params_c = gpu_params.contiguous(); + auto exp_avg_c = exp_avg.contiguous(); + auto grads_c = grads.contiguous(); + + float* params_ptr = (float*)params_c.data_ptr(); + float* grads_ptr = (float*)grads_c.data_ptr(); + ds_half_precision_t* gpu_params_ptr = (ds_half_precision_t*)gpu_params_c.data_ptr(); + float* exp_avg_ptr = (float*)exp_avg_c.data_ptr(); + + std::shared_ptr opt = + std::static_pointer_cast(s_optimizers[optimizer_id]); + opt->IncrementStep(step, beta1, beta2); + opt->update_state(lr, weight_decay); + opt->Step_8(params_ptr, + grads_ptr, + exp_avg_ptr, + params_c.numel(), + gpu_params_ptr, + (params.options().dtype() == at::kHalf)); + + opt->SynchronizeStreams(); +#else + assert(false); +#endif + return 0; +} + +int destroy_lion_optimizer(int optimizer_id) +{ + s_optimizers.erase(optimizer_id); + + return 0; +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/fused_lion_frontend.cpp b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/fused_lion_frontend.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e523f97ca3098444a07b21394cf6a773ddcc700d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/fused_lion_frontend.cpp @@ -0,0 +1,22 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include + +void multi_tensor_lion_cuda(int chunk_size, + at::Tensor noop_flag, + std::vector> tensor_lists, + const float lr, + const float beta1, + const float beta2, + const int step, + const float weight_decay); + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("multi_tensor_lion", + &multi_tensor_lion_cuda, + "Compute and apply gradient update to parameters for Lion optimizer"); +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/multi_tensor_apply.cuh b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/multi_tensor_apply.cuh new file mode 100644 index 0000000000000000000000000000000000000000..12f41cb49c6bf505db48f1d21e312578f19da836 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/multi_tensor_apply.cuh @@ -0,0 +1,132 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +Copyright NVIDIA/apex +This file is adapted from fused adam in NVIDIA/apex, commit a109f85 +*/ + +#include +#include +#include +#include +#include +#include "compat.h" + +#include + +// #include + +// This header is the one-stop shop for all your multi-tensor apply needs. + +// TODO: Kernel arg size limit may be <4KB for some other cards (ie Jetson) +constexpr int depth_to_max_tensors[5] = {110, 64, 48, 36, 30}; +constexpr int depth_to_max_blocks[5] = {320, 320, 320, 320, 320}; + +template +struct TensorListMetadata { + void* addresses[n][depth_to_max_tensors[n - 1]]; + int sizes[depth_to_max_tensors[n - 1]]; + unsigned char block_to_tensor[depth_to_max_blocks[n - 1]]; + int block_to_chunk[depth_to_max_blocks[n - 1]]; // I fear this needs to be a full int. + int start_tensor_this_launch; +}; + +template +__global__ void multi_tensor_apply_kernel(int chunk_size, + volatile int* noop_flag, + T tl, + U callable, + ArgTypes... args) +{ + // Hand the chunk information to the user-supplied functor to process however it likes. + callable(chunk_size, noop_flag, tl, args...); +} + +template +void multi_tensor_apply(int block_size, + int chunk_size, + const at::Tensor& noop_flag, + const std::vector>& tensor_lists, + T callable, + ArgTypes... args) +{ + TORCH_CHECK(tensor_lists.size() == depth, "tensor_lists.size() != depth"); + int len0 = tensor_lists[0].size(); + TORCH_CHECK(len0 > 0, "tensor_lists[0].size() is not > 0"); + auto ref_device = tensor_lists[0][0].device(); + TORCH_CHECK(ref_device.type() == at::kCUDA, "expected input to be on cuda"); + for (int l = 0; l < tensor_lists.size(); l++) // No range-based for because I need indices + { + TORCH_CHECK(tensor_lists[l].size() == len0, "Size mismatch among tensor lists"); + for (int t = 0; t < tensor_lists[l].size(); t++) { + // TODO: Print which tensor fails. + bool contiguous_memory = tensor_lists[l][t].is_contiguous(); +#ifdef VERSION_GE_1_5 + contiguous_memory = (contiguous_memory || + tensor_lists[l][t].is_contiguous(at::MemoryFormat::ChannelsLast)); +#endif + TORCH_CHECK(contiguous_memory, "A tensor was not contiguous."); + TORCH_CHECK(tensor_lists[l][t].device() == ref_device, + "A tensor was not on the same device as the first tensor"); + TORCH_CHECK(tensor_lists[l][t].numel() == tensor_lists[0][t].numel(), "Size mismatch"); + } + } + + int ntensors = tensor_lists[0].size(); + + TensorListMetadata tl; + + const at::cuda::OptionalCUDAGuard device_guard(device_of(tensor_lists[0][0])); + auto stream = at::cuda::getCurrentCUDAStream(); + + tl.start_tensor_this_launch = 0; + int loc_block_info = 0; + int loc_tensor_info = 0; + for (int t = 0; t < ntensors; t++) { + tl.sizes[loc_tensor_info] = tensor_lists[0][t].numel(); + for (int d = 0; d < depth; d++) + tl.addresses[d][loc_tensor_info] = tensor_lists[d][t].data_ptr(); + loc_tensor_info++; + + int chunks_this_tensor = (tensor_lists[0][t].numel() + chunk_size - 1) / chunk_size; + + for (int chunk = 0; chunk < chunks_this_tensor; chunk++) { + // std::cout << chunks_this_tensor << std::endl; + tl.block_to_tensor[loc_block_info] = loc_tensor_info - 1; + tl.block_to_chunk[loc_block_info] = chunk; + loc_block_info++; + + bool tensors_full = (loc_tensor_info == depth_to_max_tensors[depth - 1] && + chunk == chunks_this_tensor - 1); + bool blocks_full = (loc_block_info == depth_to_max_blocks[depth - 1]); + bool last_chunk = (t == ntensors - 1 && chunk == chunks_this_tensor - 1); + if (tensors_full || blocks_full || last_chunk) { + // using accscalar_t = acc_type; + multi_tensor_apply_kernel<<>>( + chunk_size, noop_flag.DATA_PTR(), tl, callable, args...); + + AT_CUDA_CHECK(cudaGetLastError()); + + // Reset. The control flow possibilities here make my brain hurt. + loc_block_info = 0; + if (chunk == chunks_this_tensor - 1) { + // std::cout << "Hit case 1 " << cond1 << " " << cond2 << " " << cond3 << + // std::endl; + loc_tensor_info = 0; + tl.start_tensor_this_launch = t + 1; + } else { + // std::cout << "Hit case 2 " << cond1 << " " << cond2 << " " << cond3 << + // std::endl; + tl.sizes[0] = tl.sizes[loc_tensor_info - 1]; + for (int d = 0; d < depth; d++) + tl.addresses[d][0] = tl.addresses[d][loc_tensor_info - 1]; + loc_tensor_info = 1; + tl.start_tensor_this_launch = t; + } + } + } + } +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/multi_tensor_lion.cu b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/multi_tensor_lion.cu new file mode 100644 index 0000000000000000000000000000000000000000..f5fe6dfdd7d0c3fa9200da8592d0a3f56b138428 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/multi_tensor_lion.cu @@ -0,0 +1,126 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +Copyright NVIDIA/apex +This file is adapted from fused adam in NVIDIA/apex, commit a109f85 +*/ + +#include +#include +#include +#include +// Another possibility: +// #include + +#include + +#include "multi_tensor_apply.cuh" +#include "type_shim.h" + +#define BLOCK_SIZE 512 +#define ILP 4 + +using MATH_T = float; + +template +struct LionFunctor { + __device__ __forceinline__ void operator()(int chunk_size, + volatile int* noop_gmem, + TensorListMetadata<3>& tl, + const float beta1, + const float beta2, + const float lr, + const float decay) + { + // I'd like this kernel to propagate infs/nans. + // if(*noop_gmem == 1) + // return; + + int tensor_loc = tl.block_to_tensor[blockIdx.x]; + + // potentially use to pass in list of scalar + // int tensor_num = tl.start_tensor_this_launch + tensor_loc; + + int chunk_idx = tl.block_to_chunk[blockIdx.x]; + int n = tl.sizes[tensor_loc]; + + T* g = (T*)tl.addresses[0][tensor_loc]; + g += chunk_idx * chunk_size; + + T* p = (T*)tl.addresses[1][tensor_loc]; + p += chunk_idx * chunk_size; + + T* m = (T*)tl.addresses[2][tensor_loc]; + m += chunk_idx * chunk_size; + + n -= chunk_idx * chunk_size; + + MATH_T after_decay = 1.0f - lr * decay; + + // see note in multi_tensor_scale_kernel.cu + for (int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * ILP) { + MATH_T r_g[ILP]; + MATH_T r_p[ILP]; + MATH_T r_m[ILP]; +#pragma unroll + for (int ii = 0; ii < ILP; ii++) { + int i = i_start + threadIdx.x + ii * blockDim.x; + if (i < n && i < chunk_size) { + r_g[ii] = g[i]; + r_p[ii] = p[i]; + r_m[ii] = m[i]; + } else { + r_g[ii] = MATH_T(0); + r_p[ii] = MATH_T(0); + r_m[ii] = MATH_T(0); + } + } +#pragma unroll + for (int ii = 0; ii < ILP; ii++) { + MATH_T c = beta1 * r_m[ii] + (1 - beta1) * r_g[ii]; + MATH_T update = c > 0 ? (-lr) : lr; + r_p[ii] = r_p[ii] * after_decay + update; + r_m[ii] = beta2 * r_m[ii] + (1 - beta2) * r_g[ii]; + } +#pragma unroll + for (int ii = 0; ii < ILP; ii++) { + int i = i_start + threadIdx.x + ii * blockDim.x; + if (i < n && i < chunk_size) { + p[i] = r_p[ii]; + m[i] = r_m[ii]; + } + } + } + } +}; + +void multi_tensor_lion_cuda(int chunk_size, + at::Tensor noop_flag, + std::vector> tensor_lists, + const float lr, + const float beta1, + const float beta2, + const int step, + const float weight_decay) +{ + using namespace at; + + // Assume single type across p,g,m1,m2 now + DISPATCH_DOUBLE_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), + 0, + "lion", + multi_tensor_apply<3>(BLOCK_SIZE, + chunk_size, + noop_flag, + tensor_lists, + LionFunctor(), + beta1, + beta2, + lr, + weight_decay);) + + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/random_ltd/gather_scatter.cu b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/random_ltd/gather_scatter.cu new file mode 100644 index 0000000000000000000000000000000000000000..291c8eb063b0e0a56843fb43d83caa5fdb00f6cb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/random_ltd/gather_scatter.cu @@ -0,0 +1,186 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "custom_cuda_layers.h" +#include "memory_access_utils.h" + +namespace cg = cooperative_groups; + +namespace td_data { +constexpr int granularity = 16; +} + +template +__global__ void gather_tokens_impl(T* retained_tokens, + const T* activations, + int32_t* gather_indices, + int32_t sampled_tokens, + int32_t channels, + int32_t read_batch_stride, + int32_t read_seq_stride, + int32_t write_batch_stride, + int32_t write_seq_stride) +{ + constexpr int mem_vals_t = td_data::granularity / sizeof(T); + + cg::thread_block tb = cg::this_thread_block(); + + const int gather_idx = gather_indices[tb.group_index().x * sampled_tokens + tb.group_index().y]; + + const int read_offset = read_batch_stride * tb.group_index().x + read_seq_stride * gather_idx; + const int write_offset = + write_batch_stride * tb.group_index().x + write_seq_stride * tb.group_index().y; + + for (int i = tb.thread_index().x * mem_vals_t; i < channels; i += blockDim.x * mem_vals_t) { + T local_data[mem_vals_t]; + mem_access::load_global(local_data, activations + read_offset + i); + mem_access::store_global(retained_tokens + write_offset + i, + local_data); + } +} + +template +void launch_gather_tokens(T* retained_tokens, + T* activations, + int32_t* gather_indices, + int32_t batch_size, + int32_t sampled_tokens, + int32_t channels, + int32_t read_batch_stride, + int32_t read_seq_stride, + int32_t write_batch_stride, + int32_t write_seq_stride, + cudaStream_t stream) +{ + constexpr int mem_vals_t = td_data::granularity / sizeof(T); + + const int load_steps = (channels + mem_vals_t - 1) / mem_vals_t; + const int threads = (load_steps >= 1024) ? 1024 : load_steps; + + dim3 block(threads); + dim3 grid(batch_size, sampled_tokens); + + gather_tokens_impl<<>>(retained_tokens, + activations, + gather_indices, + sampled_tokens, + channels, + read_batch_stride, + read_seq_stride, + write_batch_stride, + write_seq_stride); +} + +template void launch_gather_tokens(float*, + float*, + int32_t*, + int32_t, + int32_t, + int32_t, + int32_t, + int32_t, + int32_t, + int32_t, + cudaStream_t); + +template void launch_gather_tokens<__half>(__half*, + __half*, + int32_t*, + int32_t, + int32_t, + int32_t, + int32_t, + int32_t, + int32_t, + int32_t, + cudaStream_t); + +template +__global__ void scatter_tokens_impl(T* all_activations, + const T* layer_activations, + int32_t* gather_indices, + int32_t retained_tokens, + int32_t channels, + int32_t read_batch_stride, + int32_t read_seq_stride, + int32_t write_batch_stride, + int32_t write_seq_stride) +{ + constexpr int mem_vals_t = td_data::granularity / sizeof(T); + + cg::thread_block tb = cg::this_thread_block(); + + const int gather_idx = + gather_indices[tb.group_index().x * retained_tokens + tb.group_index().y]; + + const int read_offset = + read_batch_stride * tb.group_index().x + read_seq_stride * tb.group_index().y; + const int write_offset = + write_batch_stride * tb.group_index().x + write_seq_stride * gather_idx; + + for (int i = tb.thread_index().x * mem_vals_t; i < channels; i += mem_vals_t * blockDim.x) { + T local_data[mem_vals_t]; + mem_access::load_global(local_data, + layer_activations + read_offset + i); + mem_access::store_global(all_activations + write_offset + i, + local_data); + } +} + +template +void launch_scatter_tokens(T* all_activations, + T* layer_activations, + int32_t* gather_indices, + int32_t batch_size, + int32_t sampled_tokens, + int32_t channels, + int32_t read_batch_stride, + int32_t read_seq_stride, + int32_t write_batch_stride, + int32_t write_seq_stride, + cudaStream_t stream) +{ + constexpr int mem_vals_t = td_data::granularity / sizeof(T); + + const int load_steps = (channels + mem_vals_t - 1) / mem_vals_t; + const int threads = (load_steps >= 1024) ? 1024 : load_steps; + + dim3 block(threads); + dim3 grid(batch_size, sampled_tokens); + + scatter_tokens_impl<<>>(all_activations, + layer_activations, + gather_indices, + sampled_tokens, + channels, + read_batch_stride, + read_seq_stride, + write_batch_stride, + write_seq_stride); +} + +template void launch_scatter_tokens(float*, + float*, + int32_t*, + int32_t, + int32_t, + int32_t, + int32_t, + int32_t, + int32_t, + int32_t, + cudaStream_t); + +template void launch_scatter_tokens<__half>(__half*, + __half*, + int32_t*, + int32_t, + int32_t, + int32_t, + int32_t, + int32_t, + int32_t, + int32_t, + cudaStream_t); diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/random_ltd/pt_binding.cpp b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/random_ltd/pt_binding.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b50476719872e80fe785cac2c17501b24e1a01e4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/random_ltd/pt_binding.cpp @@ -0,0 +1,216 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include +#include +#include "custom_cuda_layers.h" + +torch::Tensor token_sort_(torch::Tensor& unsorted_token_ids, int64_t original_tokens) +{ + const int layers = unsorted_token_ids.size(0); + const int batch_size = unsorted_token_ids.size(1); + const int reserved_tokens = unsorted_token_ids.size(2); + + launch_token_sort(unsorted_token_ids.data_ptr(), + layers, + batch_size, + reserved_tokens, + original_tokens, + c10::cuda::getCurrentCUDAStream()); + + return unsorted_token_ids; +} + +torch::Tensor token_gather(torch::Tensor& activations, + torch::Tensor& sorted_indices, + bool batch_first) +{ + // Activations may be in either [N, S, C] or [S, N, C] while sorted_indices is + // always in [N, retained] + /* + TORCH_CHECK(sorted_indices.size(0) == activations.size(0) || + sorted_indices.size(0) == activations.size(1), + "Unable to match the batch size of the sorted indices to the activation + shape."); TORCH_CHECK(activations.size(2) % 8 == 0, "Channels must be divisible by 8 to align + with vectorized loads."); + */ + // bool batch_first = sorted_indices.size(0) == activations.size(0); + + const int64_t dim_0 = (batch_first) ? sorted_indices.size(0) : sorted_indices.size(1); + const int64_t dim_1 = (batch_first) ? sorted_indices.size(1) : sorted_indices.size(0); + const int64_t dim_2 = activations.size(2); + + auto output = torch::empty({dim_0, dim_1, dim_2}, activations.options()); + + const int batch_size = sorted_indices.size(0); + const int channels = dim_2; + const int retained_tokens = sorted_indices.size(1); + const int read_batch_stride = (batch_first) ? activations.stride(0) : activations.stride(1); + const int read_seq_stride = (batch_first) ? activations.stride(1) : activations.stride(0); + const int write_batch_stride = (batch_first) ? output.stride(0) : output.stride(1); + const int write_seq_stride = (batch_first) ? output.stride(1) : output.stride(0); + + if (activations.options().dtype() == torch::kFloat) { + launch_gather_tokens((float*)output.data_ptr(), + (float*)activations.data_ptr(), + (int32_t*)sorted_indices.data_ptr(), + batch_size, + retained_tokens, + channels, + read_batch_stride, + read_seq_stride, + write_batch_stride, + write_seq_stride, + c10::cuda::getCurrentCUDAStream()); + } else { + launch_gather_tokens((__half*)output.data_ptr(), + (__half*)activations.data_ptr(), + (int32_t*)sorted_indices.data_ptr(), + batch_size, + retained_tokens, + channels, + read_batch_stride, + read_seq_stride, + write_batch_stride, + write_seq_stride, + c10::cuda::getCurrentCUDAStream()); + } + + return output; +} + +torch::Tensor token_scatter_(torch::Tensor& all_activations, + torch::Tensor& layer_activations, + torch::Tensor& sorted_indices, + bool batch_first) +{ + // Activations may be in either [N, S, C] or [S, N, C] while sorted_indices is + // always in [N, retained] + /* + TORCH_CHECK(sorted_indices.size(0) == all_activations.size(0) || + sorted_indices.size(0) == all_activations.size(1), + "Unable to match the batch size of the sorted indices to the activation + shape."); TORCH_CHECK(all_activations.size(2) % 8 != 0, "Channels must be divisible by 8 to + align with vectorized loads."); + */ + // bool batch_first = sorted_indices.size(0) == all_activations.size(0); + + const int batch_size = sorted_indices.size(0); + const int channels = all_activations.size(2); + const int retained_tokens = sorted_indices.size(1); + const int read_batch_stride = (batch_first) ? layer_activations.stride(0) + : layer_activations.stride(1); + const int read_seq_stride = (batch_first) ? layer_activations.stride(1) + : layer_activations.stride(0); + const int write_batch_stride = (batch_first) ? all_activations.stride(0) + : all_activations.stride(1); + const int write_seq_stride = (batch_first) ? all_activations.stride(1) + : all_activations.stride(0); + + if (all_activations.options().dtype() == torch::kFloat) { + launch_scatter_tokens((float*)all_activations.data_ptr(), + (float*)layer_activations.data_ptr(), + (int32_t*)sorted_indices.data_ptr(), + batch_size, + retained_tokens, + channels, + read_batch_stride, + read_seq_stride, + write_batch_stride, + write_seq_stride, + c10::cuda::getCurrentCUDAStream()); + } else { + launch_scatter_tokens((__half*)all_activations.data_ptr(), + (__half*)layer_activations.data_ptr(), + (int32_t*)sorted_indices.data_ptr(), + batch_size, + retained_tokens, + channels, + read_batch_stride, + read_seq_stride, + write_batch_stride, + write_seq_stride, + c10::cuda::getCurrentCUDAStream()); + } + + return all_activations; +} + +torch::Tensor mask_gather_bert(torch::Tensor& dense_mask, torch::Tensor& sorted_indices) +{ + // TORCH_CHECK(dense_mask.dim() == 4) + + const int batch_size = dense_mask.size(0); + const int layers = sorted_indices.size(0); + /* + TORCH_CHECK(layers * batch_size == sorted_indices.size(0), + "Mismatch between the indices and the mask"); + */ + const int orig_seq_len = dense_mask.size(3); + const int truncated_seq_len = sorted_indices.size(2); + + auto output = torch::empty({layers, batch_size, 1, truncated_seq_len, truncated_seq_len}, + dense_mask.options()); + + if (dense_mask.options().dtype() == torch::kFloat) { + launch_slice_bert_mask((float*)output.data_ptr(), + (const float*)dense_mask.data_ptr(), + (const int32_t*)sorted_indices.data_ptr(), + layers, + batch_size, + truncated_seq_len, + orig_seq_len, + c10::cuda::getCurrentCUDAStream()); + } else { + launch_slice_bert_mask((__half*)output.data_ptr(), + (const __half*)dense_mask.data_ptr(), + (const int32_t*)sorted_indices.data_ptr(), + layers, + batch_size, + truncated_seq_len, + orig_seq_len, + c10::cuda::getCurrentCUDAStream()); + } + + return output; +} + +torch::Tensor mask_gather_gpt(torch::Tensor dense_mask, int truncated_seq_len) +{ + // TORCH_CHECK(dense_mask.dim() == 4) + + const int batch_size = dense_mask.size(0); + const int orig_seq_len = dense_mask.size(3); + + auto output = + torch::empty({batch_size, 1, truncated_seq_len, truncated_seq_len}, dense_mask.options()); + + if (dense_mask.options().dtype() == torch::kFloat) { + launch_slice_gpt_mask((float*)output.data_ptr(), + (const float*)dense_mask.data_ptr(), + batch_size, + truncated_seq_len, + orig_seq_len, + c10::cuda::getCurrentCUDAStream()); + } else { + launch_slice_gpt_mask((__half*)output.data_ptr(), + (const __half*)dense_mask.data_ptr(), + batch_size, + truncated_seq_len, + orig_seq_len, + c10::cuda::getCurrentCUDAStream()); + } + + return output; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("token_sort_", &token_sort_, "Comparison free sorting algorithm (CUDA)"); + m.def("token_gather", &token_gather, "Parallel gather of tokens (CUDA)"); + m.def("token_scatter_", &token_scatter_, "Parallel scatter of tokens (CUDA)"); + m.def("mask_gather_bert", &mask_gather_bert, "Token-based mask gather for BERT masking (CUDA)"); + m.def("mask_gather_gpt", &mask_gather_gpt, "Token-based mask gather for GPT masking (CUDA)"); +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/random_ltd/slice_attn_masks.cu b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/random_ltd/slice_attn_masks.cu new file mode 100644 index 0000000000000000000000000000000000000000..bc3823b846550cef1049e461ea35c35b53522146 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/random_ltd/slice_attn_masks.cu @@ -0,0 +1,128 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "custom_cuda_layers.h" +#include "memory_access_utils.h" + +namespace cg = cooperative_groups; + +template +__global__ void slice_gpt_mask_impl(T* output_mask, + const T* input_mask, + int truncated_seq_len, + int orig_seq_len) +{ + const int in_batch_stride = orig_seq_len * orig_seq_len; + const int out_batch_stride = truncated_seq_len * truncated_seq_len; + + cg::thread_block tb = cg::this_thread_block(); + + const T* input_mask_block = + input_mask + blockIdx.x * in_batch_stride + blockIdx.y * orig_seq_len; + T* output_mask_block = + output_mask + blockIdx.x * out_batch_stride + blockIdx.y * truncated_seq_len; + + for (int i = tb.thread_index().x; i < truncated_seq_len; i += blockDim.x) { + output_mask_block[i] = input_mask_block[i]; + } +} + +template +void launch_slice_gpt_mask(T* output_mask, + const T* input_mask, + int batch_size, + int truncated_seq_len, + int orig_seq_len, + cudaStream_t stream) +{ + const int threads = (truncated_seq_len >= 1024) ? 1024 : truncated_seq_len; + + dim3 block(threads); + dim3 grid(batch_size, truncated_seq_len); + + slice_gpt_mask_impl + <<>>(output_mask, input_mask, truncated_seq_len, orig_seq_len); +} + +template void launch_slice_gpt_mask(float*, const float*, int, int, int, cudaStream_t); + +template void launch_slice_gpt_mask<__half>(__half*, const __half*, int, int, int, cudaStream_t); + +template +__global__ void slice_bert_mask_impl(T* output_mask, + const T* input_mask, + const int32_t* retained_indices, + int32_t truncated_seq_len, + int32_t orig_seq_len) +{ + const int in_batch_stride = orig_seq_len * orig_seq_len; + const int out_batch_stride = truncated_seq_len * truncated_seq_len; + const int out_layer_stride = out_batch_stride * gridDim.y; + + cg::thread_block tb = cg::this_thread_block(); + + const int out_layer_offset = tb.group_index().x * out_layer_stride; + + const int in_batch_offset = tb.group_index().y * in_batch_stride; + const int out_batch_offset = tb.group_index().y * out_batch_stride; + + const int32_t gather_row = + retained_indices[tb.group_index().y * truncated_seq_len + tb.group_index().z]; + const int in_seq_offset = gather_row * orig_seq_len; + const int out_seq_offset = tb.group_index().z * truncated_seq_len; + + const T* in_sequence = input_mask + in_batch_offset + in_seq_offset; + T* out_sequence = output_mask + out_layer_offset + out_batch_offset + out_seq_offset; + const int32_t* gather_data = retained_indices + tb.group_index().y * truncated_seq_len; + + for (int i = tb.thread_index().x; i < truncated_seq_len; i += blockDim.x) { + out_sequence[i] = in_sequence[gather_data[i]]; + } +} + +/* +Since the Bert mask is not causal like GPT, we can't just generate a set of +masks for the entire model based off a single layer sample. + +We map the kernel as follows: +z-dimension: layer +y-dimension: batch +x-dimension: sequence_offset +*/ +template +void launch_slice_bert_mask(T* output_mask, + const T* input_mask, + const int32_t* retained_indices, + int32_t layers, + int32_t batch_size, + int32_t truncated_seq_len, + int32_t orig_seq_len, + cudaStream_t stream) +{ + const int threads = (truncated_seq_len >= 1024) ? 1024 : truncated_seq_len; + dim3 block(threads); + dim3 grid(layers, batch_size, truncated_seq_len); + + slice_bert_mask_impl<<>>( + output_mask, input_mask, retained_indices, truncated_seq_len, orig_seq_len); +} + +template void launch_slice_bert_mask(float*, + const float*, + const int32_t*, + int32_t, + int32_t, + int32_t, + int32_t, + cudaStream_t); + +template void launch_slice_bert_mask<__half>(__half*, + const __half*, + const int32_t*, + int32_t, + int32_t, + int32_t, + int32_t, + cudaStream_t); diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/random_ltd/token_sort.cu b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/random_ltd/token_sort.cu new file mode 100644 index 0000000000000000000000000000000000000000..3049471cfe34286593569cb8f6781199e5fdea90 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/random_ltd/token_sort.cu @@ -0,0 +1,194 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include +#include "custom_cuda_layers.h" +#include "memory_access_utils.h" + +namespace cg = cooperative_groups; + +namespace td_sort { +constexpr int threads = 512; +constexpr int granularity = 16; +constexpr int mem_vals = granularity / sizeof(int32_t); +constexpr int max_buffer_size = (threads + 1) * mem_vals; + +#ifdef __HIP_PLATFORM_AMD__ +constexpr int warp_size = 64; +#else +constexpr int warp_size = 32; +#endif + +constexpr int max_warps = threads / warp_size; +} // namespace td_sort + +template +__global__ void scan_sort(int32_t* data, int reserved_tokens, int original_tokens) +{ + cg::thread_block tb = cg::this_thread_block(); + cg::thread_block_tile warp = cg::tiled_partition(tb); + + __shared__ int32_t indices_buffer[td_sort::max_buffer_size]; + __shared__ int32_t intermediate_buffer[td_sort::max_warps]; + __shared__ int32_t sorted_indices_buffer[td_sort::max_buffer_size]; + + for (int i = tb.thread_index().x * td_sort::mem_vals; i < original_tokens + 1; + i += tb.group_dim().x * td_sort::mem_vals) { + uint32_t zeros[td_sort::mem_vals] = {0, 0, 0, 0}; + mem_access::store_shared(indices_buffer + i, zeros); + } + + int32_t local_vals[VALS_PER_THREAD]; + + // We flatten layers/batch into a single indexing dimension + int32_t* data_block = data + tb.group_index().x * reserved_tokens; + + // The next two loops really could be fused for a more logical code layout, but don't want to + // move the barrier forward +#pragma unroll + for (int i = 0; i < VALS_PER_THREAD; i++) { + const int iter_idx = i * td_sort::threads + tb.thread_index().x; + if (iter_idx < reserved_tokens) { + mem_access::load_global(local_vals + i, data_block + iter_idx); + } else { + local_vals[i] = 0; + } + } + + tb.sync(); + +#pragma unroll + for (int i = 0; i < VALS_PER_THREAD; i++) { + const int iter_idx = i * td_sort::threads + tb.thread_index().x; + if (iter_idx < reserved_tokens) { + const int32_t one = 1; + mem_access::store_shared(indices_buffer + local_vals[i], &one); + } + } + + tb.sync(); + + int32_t local_input[td_sort::mem_vals]; + mem_access::load_shared( + local_input, indices_buffer + tb.thread_index().x * td_sort::mem_vals); + + int32_t reduce_vals[td_sort::mem_vals]; + reduce_vals[0] = local_input[0]; + +#pragma unroll + for (int i = 1; i < td_sort::mem_vals; i++) { + reduce_vals[i] = local_input[i] + reduce_vals[i - 1]; + } + + int32_t step_1_val = reduce_vals[td_sort::mem_vals - 1]; + // Short span exclusive scan algorithm (less work efficient) +#pragma unroll + for (int i = 1; i < td_sort::warp_size; i *= 2) { + int32_t step_val = warp.shfl_up(step_1_val, i); + step_1_val = (warp.thread_rank() < i) ? step_1_val : step_1_val + step_val; + } + + if (warp.thread_rank() == td_sort::warp_size - 1) { + mem_access::store_shared(intermediate_buffer + warp.meta_group_rank(), + &step_1_val); + } + + tb.sync(); + + if (warp.meta_group_rank() == 0) { + int32_t step_2_val = 0; + if (warp.thread_rank() < td_sort::max_warps) { + mem_access::load_shared(&step_2_val, + intermediate_buffer + warp.thread_rank()); + } + +#pragma unroll + for (int i = 1; i < td_sort::warp_size; i *= 2) { + int32_t step_val = warp.shfl_up(step_2_val, i); + step_2_val = (warp.thread_rank() < i) ? step_2_val : step_2_val + step_val; + } + + if (warp.thread_rank() < td_sort::max_warps) { + mem_access::store_shared(intermediate_buffer + warp.thread_rank(), + &step_2_val); + } + } + + tb.sync(); + + int step_2_val = 0; + if (warp.meta_group_rank() > 0) { + mem_access::load_shared(&step_2_val, + intermediate_buffer + warp.meta_group_rank() - 1); + } + + const int thread_offset = reduce_vals[td_sort::mem_vals - 1]; + +#pragma unroll + for (int i = 0; i < td_sort::mem_vals; i++) { + reduce_vals[i] += step_1_val + step_2_val - thread_offset; + } + mem_access::store_shared( + indices_buffer + tb.thread_index().x * td_sort::mem_vals, reduce_vals); + + if (tb.thread_index().x == 0) { + indices_buffer[original_tokens] = original_tokens - indices_buffer[original_tokens]; + } + tb.sync(); + + for (int i = 0; i < VALS_PER_THREAD; i++) { + const int iter_idx = i * td_sort::threads + tb.thread_index().x; + if (iter_idx < reserved_tokens) { + if (local_vals[i] == 0) { + int zero = 0; + mem_access::store_shared(sorted_indices_buffer, &zero); + } else { + int sorted_idx; + mem_access::load_shared(&sorted_idx, + indices_buffer + local_vals[i] - 1); + mem_access::store_shared(sorted_indices_buffer + sorted_idx, + local_vals + i); + } + } + } + + tb.sync(); + +#pragma unroll + for (int i = 0; i < VALS_PER_THREAD; i++) { + const int iter_idx = i * td_sort::threads + tb.thread_index().x; + if (iter_idx < reserved_tokens) { + int32_t store_val; + mem_access::load_shared(&store_val, sorted_indices_buffer + iter_idx); + mem_access::store_global(data_block + iter_idx, &store_val); + } + } +} + +void launch_token_sort(int32_t* indices, + int layers, + int batch_size, + int reserved_size, + int original_tokens, + cudaStream_t stream) +{ + // Each sort is completely independent, can flatten this dimension + dim3 grid(layers * batch_size); + dim3 block(td_sort::threads); + + const int vals_per_thread = (reserved_size + td_sort::threads - 1) / td_sort::threads; + + if (vals_per_thread == 1) { + scan_sort<1><<>>(indices, reserved_size, original_tokens); + } else if (vals_per_thread == 2) { + scan_sort<2><<>>(indices, reserved_size, original_tokens); + } else if (vals_per_thread == 3) { + scan_sort<3><<>>(indices, reserved_size, original_tokens); + } else if (vals_per_thread == 4) { + scan_sort<4><<>>(indices, reserved_size, original_tokens); + } else { + assert(false); + } +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/sparse_attention/utils.cpp b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/sparse_attention/utils.cpp new file mode 100644 index 0000000000000000000000000000000000000000..352306ba26128b96236f43764c26d2f4191eb391 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/sparse_attention/utils.cpp @@ -0,0 +1,127 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +DeepSpeed note, code taken & adapted from commit 9aa94789f13ada713af36cfd8cca2fc9a7f6b79a + https:github.com/ptillet/torch-blocksparse/blob/master/csrc/utils.cpp +*/ + +#include +#include +#include +#include +#ifdef _OPENMP +#include +#endif + +typedef std::vector> ret_t; + +void segment_blocks(torch::Tensor layout, + torch::Tensor idx, + torch::Tensor scratch, + int max_width, + ret_t& ret) +{ + size_t H = layout.size(0); + size_t M = layout.size(1); + size_t N = layout.size(2); + torch::Tensor tmp = torch::zeros_like(layout); + + auto _tmp = tmp.accessor(); + auto _layout = layout.accessor(); + auto _idx = idx.accessor(); + auto _scratch = scratch.accessor(); + std::vector current(H, 0); + +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (size_t h = 0; h < H; h++) { + // surrounding indices + std::vector ii_left(max_width, -1); + std::vector> ii_top(max_width, std::vector(N, -1)); + + for (size_t m = 0; m < M; m++) { + for (size_t n = 0; n < N; n++) { + int v = _layout[h][m][n]; + if (v == 0) continue; + int n_left = ii_left[max_width - 1]; + int m_top = ii_top[max_width - 1][n]; + int top = (m_top >= 0) ? _tmp[h][m_top][n] : 0; + int left = (n_left >= 0) ? _tmp[h][m][n_left] : 0; + int topleft = (m_top >= 0 && n_left >= 0) ? _tmp[h][m_top][n_left] : 0; + int width = std::min(left, std::min(top, topleft)) + 1; + + // reset width if blocks cannot be + // packed together (i.e., there's a 1 "in the middle") + for (int nn = n_left + 1; nn < n; nn++) + if (ii_top[max_width - 1][nn] > ii_top[max_width - 1][n]) width = 1; + _tmp[h][m][n] = width; + + // update n_left ring buffer + for (int k = 0; k < max_width - 1; k++) ii_left[k] = ii_left[k + 1]; + ii_left[max_width - 1] = n; + + // update ii_top ring buffer + for (int k = 0; k < max_width - 1; k++) ii_top[k][n] = ii_top[k + 1][n]; + ii_top[max_width - 1][n] = m; + + // block is too small -- skip + if (width != max_width) continue; + + // retained blocks are set to zeros + for (size_t km = 0; km < max_width; km++) + for (size_t kn = 0; kn < max_width; kn++) { + int mm = ii_top[km][n]; + int nn = ii_left[kn]; + if (mm < 0 || nn < 0) continue; + _layout[h][mm][nn] = 0; + _tmp[h][mm][nn] = 0; + _scratch[h][current[h]][0] = (int)h; + _scratch[h][current[h]][1] = (int)mm; + _scratch[h][current[h]][2] = (int)nn; + _scratch[h][current[h]][3] = _idx[h][mm][nn]; + current[h]++; + } + } + } + } + std::vector to_cat; + for (size_t h = 0; h < H; h++) + if (current[h] > 0) to_cat.push_back(scratch[h].slice(0, 0, current[h])); + if (!to_cat.empty()) ret.push_back({max_width, torch::cat(to_cat)}); +} + +ret_t sdd_segment(torch::Tensor layout, int start_width) +{ + ret_t ret; + + // block index + torch::Tensor idx = torch::zeros_like(layout); + int current = 0; + int64_t H = layout.size(0); + int64_t M = layout.size(1); + int64_t N = layout.size(2); + auto _layout = layout.accessor(); + auto _idx = idx.accessor(); + for (int64_t h = 0; h < H; h++) + for (int64_t m = 0; m < M; m++) + for (int64_t n = 0; n < N; n++) { + if (_layout[h][m][n] == 0) continue; + _idx[h][m][n] = current++; + } + + // scratch memory + torch::Tensor scratch = torch::empty({H, layout.sum().item(), 4}, layout.dtype()); + + for (int max_width = start_width; max_width > 0; max_width /= 2) + segment_blocks(layout, idx, scratch, max_width, ret); + return ret; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("sdd_segment", &sdd_segment, "SDD segmentation handler"); +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/spatial/csrc/pt_binding.cpp b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/spatial/csrc/pt_binding.cpp new file mode 100644 index 0000000000000000000000000000000000000000..cbf6636a6ee4ac5769a88b7cab7d2a222f00d5cc --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/spatial/csrc/pt_binding.cpp @@ -0,0 +1,112 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include +#include +#include +#include +#include "spatial_cuda_layers.h" + +ChannelsLastProblem dimension_problem(at::Tensor& input) +{ + ChannelsLastProblem dims; + + if (input.dim() == 4) { + // In some sense this is unsafe (and a reflection of the assumptions made inside + // the C10 options checker). Basically, there's no great way to be sure that + // a tensor is in channels last because a 1x1 image will appear to be in channels + // last even when it isn't. + assert(input.is_contiguous(at::MemoryFormat::ChannelsLast)); + dims.batch_size = input.size(0); + dims.seq_len = input.size(2) * input.size(3); + dims.channels = input.size(1); + } else { + assert(input.is_contiguous()); + dims.batch_size = input.size(0); + dims.seq_len = input.size(1); + dims.channels = input.size(2); + } + + return dims; +} + +at::Tensor seq_unroll_bias_add(at::Tensor& input, at::Tensor& bias) +{ + assert(input.dtype() == at::kHalf); + + // TODO(cmikeh2): Should probably refactor this into a more portable + // description, since it does generalize for channels-last + ChannelsLastProblem problem = dimension_problem(input); + + auto output = at::empty_like(input); + + launch_opt_bias_add((__half*)output.data_ptr(), + (const __half*)input.data_ptr(), + (const __half*)bias.data_ptr(), + nullptr, + nullptr, + problem.batch_size, + problem.seq_len, + problem.channels, + at::cuda::getCurrentCUDAStream()); + + return output; +} + +at::Tensor seq_bias_add_add(at::Tensor& input, at::Tensor& bias, at::Tensor& other) +{ + assert(input.dtype() == at::kHalf); + + // TODO(cmikeh2): Should probably refactor this into a more portable + // description, since it does generalize for channels-last + ChannelsLastProblem problem = dimension_problem(input); + + auto output = at::empty_like(input); + + launch_opt_bias_add((__half*)output.data_ptr(), + (const __half*)input.data_ptr(), + (const __half*)bias.data_ptr(), + (const __half*)other.data_ptr(), + nullptr, + problem.batch_size, + problem.seq_len, + problem.channels, + at::cuda::getCurrentCUDAStream()); + + return output; +} + +at::Tensor seq_bias_add_bias_add(at::Tensor& input, + at::Tensor& bias, + at::Tensor& other, + at::Tensor& other_bias) +{ + assert(input.dtype() == at::kHalf); + + // TODO(cmikeh2): Should probably refactor this into a more portable + // description, since it does generalize for channels-last + ChannelsLastProblem problem = dimension_problem(input); + + auto output = at::empty_like(input); + + launch_opt_bias_add((__half*)output.data_ptr(), + (const __half*)input.data_ptr(), + (const __half*)bias.data_ptr(), + (const __half*)other.data_ptr(), + (const __half*)other_bias.data_ptr(), + problem.batch_size, + problem.seq_len, + problem.channels, + at::cuda::getCurrentCUDAStream()); + + return output; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("nhwc_bias_add", &seq_unroll_bias_add); + m.def("nhwc_bias_add_add", &seq_bias_add_add); + m.def("nhwc_bias_add_bias_add", &seq_bias_add_bias_add); +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/spatial/includes/spatial_cuda_layers.h b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/spatial/includes/spatial_cuda_layers.h new file mode 100644 index 0000000000000000000000000000000000000000..4f56f89f16cd1a200f057ebd651575224be23b7b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/spatial/includes/spatial_cuda_layers.h @@ -0,0 +1,37 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#if __CUDA_ARCH__ >= 530 +#define HALF_PRECISION_AVAILABLE = 1 +#endif + +#ifdef __HIP_PLATFORM_AMD__ +#include +#else +#include +#endif + +#include +#include + +/*********** Group Norm Kernels, Structs, and Helpers ************/ + +struct { + int64_t batch_size; + int64_t seq_len; + int64_t channels; +} typedef ChannelsLastProblem; + +void launch_opt_bias_add(__half* result, + const __half* activation, + const __half* bias, + const __half* other, + const __half* other_bias, + int batch_size, + int seq_len, + int channels, + cudaStream_t stream); diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/dropout_kernels.cu b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/dropout_kernels.cu new file mode 100644 index 0000000000000000000000000000000000000000..97cb3592dcdc4cf0a2dc37e34fc9b13117d9bcc5 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/dropout_kernels.cu @@ -0,0 +1,873 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "custom_cuda_layers.h" + +const int unroll_factor = 4; + +__global__ void dropout_kernel(const int N, + const float ratio, + float* out, + const float* Xdata, + uint8_t* mask, + std::pair seed) +{ + const float scale = 1. / (1. - ratio); + int idx = blockIdx.x * blockDim.x + threadIdx.x; + + curandStatePhilox4_32_10_t state; + curand_init(seed.first, idx, seed.second, &state); + + CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) + { + float4 rand = curand_uniform4(&state); + uint8_t m[unroll_factor]; + + m[0] = (uint8_t)(rand.x > ratio); + m[1] = (uint8_t)(rand.y > ratio); + m[2] = (uint8_t)(rand.z > ratio); + m[3] = (uint8_t)(rand.w > ratio); + + int i = j * unroll_factor; + + mask[i] = (uint8_t)m[0]; + mask[i + 1] = (uint8_t)m[1]; + mask[i + 2] = (uint8_t)m[2]; + mask[i + 3] = (uint8_t)m[3]; + + out[i] = Xdata[i] * scale * m[0]; + out[i + 1] = Xdata[i + 1] * scale * m[1]; + out[i + 2] = Xdata[i + 2] * scale * m[2]; + out[i + 3] = Xdata[i + 3] * scale * m[3]; + } + int high_index = + ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; + if (N > high_index) { + float4 rand = curand_uniform4(&state); + float* rand_data = &(rand.x); + int k = 0; + for (int i = high_index; i < N; i++) { + uint8_t m = (uint8_t)(rand_data[k++] > ratio); + out[i] = Xdata[i] * scale * m; + mask[i] = m; + } + } +} + +__global__ void dropout_kernel(const int N, + const float ratio, + __half* out, + const __half* Xdata, + uint8_t* mask, + std::pair seed) +{ + const float scale = 1. / (1. - ratio); + + int idx = blockIdx.x * blockDim.x + threadIdx.x; + + curandStatePhilox4_32_10_t state; + curand_init(seed.first, idx, seed.second, &state); + +#ifdef __STOCHASTIC_MODE__ + + const __half2 h_scale = __float2half2_rn(scale); + const float2* x_cast = reinterpret_cast(Xdata); + float2* out_cast = reinterpret_cast(out); + uint32_t* mask_cast = reinterpret_cast(mask); + + uint32_t m_32; + uint8_t* m = reinterpret_cast(&m_32); + + float2 result_f; + __half2* result_h = reinterpret_cast<__half2*>(&result_f); + __half2 mask_h[2]; + float2 mask_f[2]; + + CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) + { + float2 x_f = x_cast[j]; + __half2* x_h = reinterpret_cast<__half2*>(&x_f); + + float4 rand = curand_uniform4(&state); + + m[0] = (uint8_t)(rand.x > ratio); + m[1] = (uint8_t)(rand.y > ratio); + m[2] = (uint8_t)(rand.z > ratio); + m[3] = (uint8_t)(rand.w > ratio); + + float* mask_f_data = &mask_f[0].x; +#pragma unroll + for (int i = 0; i < unroll_factor; i++) mask_f_data[i] = (float)(m[i]); + + mask_h[0] = __float22half2_rn(mask_f[0]); + mask_h[1] = __float22half2_rn(mask_f[1]); + + result_h[0] = x_h[0] * h_scale * mask_h[0]; + result_h[1] = x_h[1] * h_scale * mask_h[1]; + + out_cast[j] = result_f; + + mask_cast[j] = m_32; + } + +#else + + CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) + { + int i = j * unroll_factor; + + const __half2* vals_half = reinterpret_cast(Xdata + i); + float2 vals_half_f[2]; + vals_half_f[0] = __half22float2(vals_half[0]); + vals_half_f[1] = __half22float2(vals_half[1]); + + uint8_t m[unroll_factor]; + float4 rand = curand_uniform4(&state); + m[0] = (uint8_t)(rand.x > ratio); + m[1] = (uint8_t)(rand.y > ratio); + m[2] = (uint8_t)(rand.z > ratio); + m[3] = (uint8_t)(rand.w > ratio); + + out[i] = __float2half(vals_half_f[0].x * scale * m[0]); + out[i + 1] = __float2half(vals_half_f[0].y * scale * m[1]); + out[i + 2] = __float2half(vals_half_f[1].x * scale * m[2]); + out[i + 3] = __float2half(vals_half_f[1].y * scale * m[3]); + + mask[i] = m[0]; + mask[i + 1] = m[1]; + mask[i + 2] = m[2]; + mask[i + 3] = m[3]; + } + +#endif + int high_index = + ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; + if (N > high_index) { + float4 rand = curand_uniform4(&state); + float* rand_data = &(rand.x); + int k = 0; + for (int i = high_index; i < N; i++) { + uint8_t m = (uint8_t)(rand_data[k++] > ratio); + out[i] = __float2half((float)Xdata[i] * scale * m); + mask[i] = m; + } + } +} + +__global__ void dropout_kernel_bwd(const int N, + const float ratio, + const float* Xdata, + float* out, + uint8_t* mask, + std::pair seed) +{ + const float scale = 1. / (1. - ratio); + CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) + { + int i = j * unroll_factor; + + out[i] = mask[i] ? Xdata[i] * scale : 0.0; + out[i + 1] = mask[i + 1] ? Xdata[i + 1] * scale : 0.0; + out[i + 2] = mask[i + 2] ? Xdata[i + 2] * scale : 0.0; + out[i + 3] = mask[i + 3] ? Xdata[i + 3] * scale : 0.0; + } + int high_index = + ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; + if (N > high_index) { + for (int i = high_index; i < N; i++) { out[i] = mask[i] ? Xdata[i] * scale : 0.0; } + } +} + +__global__ void dropout_kernel_bwd(const int N, + const float ratio, + const __half* Xdata, + __half* out, + uint8_t* mask, + std::pair seed) +{ + const float scale = 1. / (1. - ratio); + +#ifdef __STOCHASTIC_MODE__ + + const __half2 h_scale = __float2half2_rn(scale); + + const float2* x_cast = reinterpret_cast(Xdata); + float2* out_cast = reinterpret_cast(out); + uint32_t* mask_cast = reinterpret_cast(mask); + + CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) + { + float2 x_f = x_cast[j]; + __half2* x_h = reinterpret_cast<__half2*>(&x_f); + + uint32_t m_32 = mask_cast[j]; + uint8_t* m = (uint8_t*)&m_32; + + __half2 mask_h[2]; + float2 mask_f[2]; + + float* mask_f_data = &mask_f[0].x; +#pragma unroll + for (int i = 0; i < unroll_factor; i++) mask_f_data[i] = (float)(m[i]); + +#pragma unroll + for (int i = 0; i < 2; i++) mask_h[i] = __float22half2_rn(mask_f[i]); + + float2 result_f; + __half2* result_h = reinterpret_cast<__half2*>(&result_f); + + result_h[0] = x_h[0] * h_scale * mask_h[0]; + result_h[1] = x_h[1] * h_scale * mask_h[1]; + + out_cast[j] = result_f; + } + +#else + + const __half h_scale = __float2half(scale); + const __half h_zero = __float2half(0.0); + + CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) + { + int i = j * unroll_factor; + + const __half2* vals_half = reinterpret_cast(Xdata + i); + + uint8_t* m = mask + i; + + float2 vals_half_f[2]; + + vals_half_f[0] = __half22float2(vals_half[0]); + vals_half_f[1] = __half22float2(vals_half[1]); + + out[i] = __float2half(vals_half_f[0].x * scale * m[0]); + out[i + 1] = __float2half(vals_half_f[0].y * scale * m[1]); + out[i + 2] = __float2half(vals_half_f[1].x * scale * m[2]); + out[i + 3] = __float2half(vals_half_f[1].y * scale * m[3]); + } + +#endif + int high_index = + ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; + if (N > high_index) { + for (int i = high_index; i < N; i++) { + out[i] = __float2half((float)Xdata[i] * scale * mask[i]); + } + } +} + +template +void launch_dropout(T* out, + const T* vals, + uint8_t* mask, + int total_count, + int dim, + float ratio, + cudaStream_t stream, + bool bwd) +{ + assert(unroll_factor == 4); + + dim3 grid_dim = DS_GET_BLOCKS(total_count / unroll_factor); + dim3 block_dim = DS_CUDA_NUM_THREADS; + + if (dim > 512) { + block_dim.x >>= 1; + grid_dim.x <<= 1; + } + uint64_t inc = total_count / grid_dim.x / block_dim.x; + std::pair seed = TrainingContext::Instance().IncrementOffset(inc); + if (bwd) + dropout_kernel_bwd<<>>( + total_count, ratio, vals, out, mask, seed); + else + dropout_kernel<<>>( + total_count, ratio, out, vals, mask, seed); +} + +template void launch_dropout(float* out, + const float* vals, + uint8_t* mask, + int total_count, + int dim, + float ratio, + cudaStream_t stream, + bool); +template void launch_dropout(__half* out, + const __half* vals, + uint8_t* mask, + int total_count, + int dim, + float ratio, + cudaStream_t stream, + bool); + +__global__ void dropout_grad_kernel(const int N, const float scale, float* Xdata, uint8_t* mask) +{ + CUDA_1D_KERNEL_LOOP(i, N) { Xdata[i] *= scale * mask[i]; } +} + +__global__ void dropout_grad_kernel(const int N, const float scale, __half* Xdata, uint8_t* mask) +{ + const __half2 h_scale = __float2half2_rn(scale); + float2* x_cast = reinterpret_cast(Xdata); + uint32_t* mask_cast = reinterpret_cast(mask); + + CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) + { + float2 x_data = x_cast[j]; + uint32_t m_32 = mask_cast[j]; + uint8_t* m = (uint8_t*)&m_32; + + float2 result_f; + __half2* result_h = reinterpret_cast<__half2*>(&result_f); + +#ifdef __STOCHASTIC_MODE__ + + __half2* x_data_h = reinterpret_cast<__half2*>(&x_data); + __half2 mask_h[2]; + float2 mask_f[2]; + + float* mask_f_data = &mask_f[0].x; +#pragma unroll + for (int i = 0; i < unroll_factor; i++) *(mask_f_data++) = (float)(m[i]); + + mask_h[0] = __float22half2_rn(mask_f[0]); + mask_h[1] = __float22half2_rn(mask_f[1]); + + result_h[0] = x_data_h[0] * h_scale * mask_h[0]; + result_h[1] = x_data_h[1] * h_scale * mask_h[1]; + +#else + + __half* x_data_h = reinterpret_cast<__half*>(&x_data); + float2 result[2]; + + result[0].x = (float)x_data_h[0] * scale * m[0]; + result[0].y = (float)x_data_h[1] * scale * m[1]; + result[1].x = (float)x_data_h[2] * scale * m[2]; + result[1].y = (float)x_data_h[3] * scale * m[3]; + + result_h[0] = __float22half2_rn(result[0]); + result_h[1] = __float22half2_rn(result[1]); + +#endif + x_cast[j] = result_f; + } + int high_index = + ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; + if (N > high_index) { + for (int i = high_index; i < N; i++) { + Xdata[i] = __float2half((float)Xdata[i] * scale * mask[i]); + } + } +} + +template +void launch_dropout_grad(T* vals, uint8_t* mask, int total_count, float ratio, cudaStream_t stream) +{ + assert(unroll_factor == 4); + + const float scale = 1. / (1. - ratio); + dropout_grad_kernel<<>>(total_count, scale, vals, mask); +} + +template void launch_dropout_grad(float* vals, + uint8_t* mask, + int total_count, + float ratio, + cudaStream_t stream); +template void launch_dropout_grad(__half* vals, + uint8_t* mask, + int total_count, + float ratio, + cudaStream_t stream); + +__global__ void dropout_grad_kernel(const int N, + const float scale, + const float* Xdata, + float* out, + uint8_t* mask) +{ + CUDA_1D_KERNEL_LOOP(i, N) { out[i] = Xdata[i] * scale * mask[i]; } +} + +__global__ void dropout_grad_kernel(const int N, + const float scale, + const __half* Xdata, + __half* out, + uint8_t* mask) +{ + const float2* x_cast = reinterpret_cast(Xdata); + float2* out_cast = reinterpret_cast(out); + const uint32_t* mask_cast = reinterpret_cast(mask); + + float2 result_f; + __half2* result_h = reinterpret_cast<__half2*>(&result_f); + + CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) + { + float2 x_data = x_cast[j]; + uint32_t m_32 = mask_cast[j]; + uint8_t* m = (uint8_t*)&m_32; + + __half* x_data_h = reinterpret_cast<__half*>(&x_data); + float2 result[2]; + + result[0].x = (float)x_data_h[0] * scale * m[0]; + result[0].y = (float)x_data_h[1] * scale * m[1]; + result[1].x = (float)x_data_h[2] * scale * m[2]; + result[1].y = (float)x_data_h[3] * scale * m[3]; + + result_h[0] = __float22half2_rn(result[0]); + result_h[1] = __float22half2_rn(result[1]); + + out_cast[j] = result_f; + } + int high_index = + ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; + if (N > high_index) { + for (int i = high_index; i < N; i++) { + out[i] = __float2half((float)Xdata[i] * scale * mask[i]); + } + } +} + +template +void launch_dropout_grad(T* vals_out, + const T* vals, + uint8_t* mask, + int total_count, + float ratio, + cudaStream_t stream) +{ + assert(unroll_factor == 4); + + const float scale = 1. / (1. - ratio); + dropout_grad_kernel<<>>(total_count, scale, vals, vals_out, mask); +} +template void launch_dropout_grad(float*, + const float* vals, + uint8_t* mask, + int total_count, + float ratio, + cudaStream_t stream); +template void launch_dropout_grad(__half*, + const __half* vals, + uint8_t* mask, + int total_count, + float ratio, + cudaStream_t stream); + +__global__ void dropout_kernel(const int N, + const int dim, + const float ratio, + const float* bias, + float* Xdata, + uint8_t* mask, + std::pair seed) +{ + const float scale = 1. / (1. - ratio); + int idx = blockIdx.x * blockDim.x + threadIdx.x; + int tid = threadIdx.x % (dim / unroll_factor); + + curandStatePhilox4_32_10_t state; + curand_init(seed.first, idx, seed.second, &state); + + float4* Xdata_cast = reinterpret_cast(Xdata); + uint32_t* mask_32 = reinterpret_cast(mask); + const float4* bias_cast = reinterpret_cast(bias); + + CUDA_1D_KERNEL_LOOP(j, N) + { + float4 rand = curand_uniform4(&state); + uint32_t m_32; + uint8_t* m = (uint8_t*)&m_32; + + m[0] = (uint8_t)(rand.x > ratio); + m[1] = (uint8_t)(rand.y > ratio); + m[2] = (uint8_t)(rand.z > ratio); + m[3] = (uint8_t)(rand.w > ratio); + + float4 x_data = Xdata_cast[j]; + float4 b_data = bias_cast[j % (dim / unroll_factor)]; + + x_data.x += b_data.x; + x_data.y += b_data.y; + x_data.z += b_data.z; + x_data.w += b_data.w; + + x_data.x = x_data.x * scale * m[0]; + x_data.y = x_data.y * scale * m[1]; + x_data.z = x_data.z * scale * m[2]; + x_data.w = x_data.w * scale * m[3]; + + mask_32[j] = m_32; + Xdata_cast[j] = x_data; + } + int high_index = + ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; + if (N > high_index) { + float4 rand = curand_uniform4(&state); + float* rand_data = &(rand.x); + int k = 0; + for (int i = high_index; i < N; i++) { + float x_data = Xdata[i] + bias[i % dim]; + uint8_t m = (uint8_t)(rand_data[k++] > ratio); + Xdata[i] = x_data * scale * m; + mask[i] = m; + } + } +} + +__global__ void dropout_kernel(const int N, + const int dim, + const float ratio, + const __half* bias, + __half* Xdata, + uint8_t* mask, + std::pair seed) +{ + const float scale = 1. / (1. - ratio); + int idx = blockIdx.x * blockDim.x + threadIdx.x; + int tid = threadIdx.x % (dim / unroll_factor); + + curandStatePhilox4_32_10_t state; + curand_init(seed.first, idx, seed.second, &state); + + float2* Xdata_cast = reinterpret_cast(Xdata); + uint32_t* mask_32 = reinterpret_cast(mask); + const float2* bias_cast = reinterpret_cast(bias); + + CUDA_1D_KERNEL_LOOP(j, N) + { + float4 rand = curand_uniform4(&state); + + float2 data_f; + __half2* data_h = reinterpret_cast<__half2*>(&data_f); + + float2 bias_f; + __half2* bias_h = reinterpret_cast<__half2*>(&bias_f); + + data_f = Xdata_cast[j]; + bias_f = bias_cast[j % (dim / unroll_factor)]; + + float2 data_h_0 = __half22float2(data_h[0]); + float2 data_h_1 = __half22float2(data_h[1]); + + float2 bias_h_0 = __half22float2(bias_h[0]); + float2 bias_h_1 = __half22float2(bias_h[1]); + + data_h_0.x += bias_h_0.x; + data_h_0.y += bias_h_0.y; + data_h_1.x += bias_h_1.x; + data_h_1.y += bias_h_1.y; + + uint32_t m_32; + uint8_t* m = (uint8_t*)&m_32; + + m[0] = (uint8_t)(rand.x > ratio); + m[1] = (uint8_t)(rand.y > ratio); + m[2] = (uint8_t)(rand.z > ratio); + m[3] = (uint8_t)(rand.w > ratio); + + data_h_0.x = __float2half(data_h_0.x * scale * m[0]); + data_h_0.y = __float2half(data_h_0.y * scale * m[1]); + data_h_1.x = __float2half(data_h_1.x * scale * m[2]); + data_h_1.y = __float2half(data_h_1.y * scale * m[3]); + + float2 result_f; + __half2* result_h = reinterpret_cast<__half2*>(&result_f); + + result_h[0] = __float22half2_rn(data_h_0); + result_h[1] = __float22half2_rn(data_h_1); + + Xdata_cast[j] = result_f; + mask_32[j] = m_32; + } + int high_index = + ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; + if (N > high_index) { + float4 rand = curand_uniform4(&state); + float* rand_data = &(rand.x); + int k = 0; + for (int i = high_index; i < N; i++) { + float x_data = (float)Xdata[i] + (float)bias[i % dim]; + uint8_t m = (uint8_t)(rand_data[k++] > ratio); + Xdata[i] = __float2half(x_data * scale * m); + mask[i] = m; + } + } +} + +template +void launch_dropout(T* out, + const T* bias, + uint8_t* mask, + int batch, + int dim, + float ratio, + cudaStream_t stream) +{ + assert(unroll_factor == 4); + + int total_count = batch * dim / unroll_factor; + + dim3 grid_dim = DS_GET_BLOCKS(total_count); + dim3 block_dim = DS_CUDA_NUM_THREADS; + + uint64_t inc = (batch * dim) / grid_dim.x / block_dim.x; + std::pair seed = TrainingContext::Instance().IncrementOffset(inc); + + dropout_kernel<<>>( + total_count, dim, ratio, bias, out, mask, seed); +} + +template void launch_dropout(float*, + const float* bias, + uint8_t* mask, + int batch, + int dim, + float ratio, + cudaStream_t stream); +template void launch_dropout(__half*, + const __half* bias, + uint8_t* mask, + int batch, + int dim, + float ratio, + cudaStream_t stream); + +__global__ void dropout_kernel(const int N, + const int dim, + const float ratio, + const float* input, + const float* residual, + const float* bias, + float* out, + uint8_t* mask, + std::pair seed) +{ + const float scale = 1. / (1. - ratio); + int idx = blockIdx.x * blockDim.x + threadIdx.x; + int tid = threadIdx.x % (dim / unroll_factor); + + curandStatePhilox4_32_10_t state; + curand_init(seed.first, idx, seed.second, &state); + + float4* out_cast = reinterpret_cast(out); + uint32_t* mask_32 = reinterpret_cast(mask); + + const float4* bias_cast = reinterpret_cast(bias); + const float4* residual_cast = reinterpret_cast(residual); + const float4* input_cast = reinterpret_cast(input); + + CUDA_1D_KERNEL_LOOP(j, N) + { + float4 rand = curand_uniform4(&state); + + uint32_t m_32; + uint8_t* m = (uint8_t*)&m_32; + + m[0] = (uint8_t)(rand.x > ratio); + m[1] = (uint8_t)(rand.y > ratio); + m[2] = (uint8_t)(rand.z > ratio); + m[3] = (uint8_t)(rand.w > ratio); + + float4 out_data; + float4 b_data = bias_cast[j % (dim / unroll_factor)]; + float4 res_data = residual_cast[j]; + float4 inp_data = input_cast[j]; + + out_data.x = (b_data.x + inp_data.x); + out_data.y = (b_data.y + inp_data.y); + out_data.z = (b_data.z + inp_data.z); + out_data.w = (b_data.w + inp_data.w); + + out_data.x = out_data.x * scale * m[0]; + out_data.y = out_data.y * scale * m[1]; + out_data.z = out_data.z * scale * m[2]; + out_data.w = out_data.w * scale * m[3]; + + out_data.x += res_data.x; + out_data.y += res_data.y; + out_data.z += res_data.z; + out_data.w += res_data.w; + + mask_32[j] = m_32; + out_cast[j] = out_data; + } + int high_index = + ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; + if (N > high_index) { + float4 rand = curand_uniform4(&state); + float* rand_data = &(rand.x); + int k = 0; + for (int i = high_index; i < N; i++) { + float x_data = input[i] + bias[i % dim]; + uint8_t m = (uint8_t)(rand_data[k++] > ratio); + x_data = x_data * scale * m; + x_data += residual[i]; + + out[i] = x_data; + mask[i] = m; + } + } +} + +__global__ void dropout_kernel(const int N, + const int dim, + const float ratio, + const __half* input, + const __half* residual, + const __half* bias, + __half* out, + uint8_t* mask, + std::pair seed) +{ + const float scale = 1. / (1. - ratio); + int idx = blockIdx.x * blockDim.x + threadIdx.x; + int tid = threadIdx.x % (dim / unroll_factor); + + curandStatePhilox4_32_10_t state; + curand_init(seed.first, idx, seed.second, &state); + + float2* out_cast = reinterpret_cast(out); + uint32_t* mask_32 = reinterpret_cast(mask); + + const float2* bias_cast = reinterpret_cast(bias); + const float2* residual_cast = reinterpret_cast(residual); + const float2* input_cast = reinterpret_cast(input); + + CUDA_1D_KERNEL_LOOP(j, N) + { + float4 rand = curand_uniform4(&state); + + float2 data_f; + __half2* data_h = reinterpret_cast<__half2*>(&data_f); + + float2 bias_f; + __half2* bias_h = reinterpret_cast<__half2*>(&bias_f); + + float2 residual_f; + __half2* residual_h = reinterpret_cast<__half2*>(&residual_f); + + float2 input_f; + __half2* input_h = reinterpret_cast<__half2*>(&input_f); + + bias_f = bias_cast[j % (dim / unroll_factor)]; + residual_f = residual_cast[j]; + input_f = input_cast[j]; + + float2 data_h_0 = __half22float2(data_h[0]); + float2 data_h_1 = __half22float2(data_h[1]); + + float2 bias_h_0 = __half22float2(bias_h[0]); + float2 bias_h_1 = __half22float2(bias_h[1]); + + float2 residual_h_0 = __half22float2(residual_h[0]); + float2 residual_h_1 = __half22float2(residual_h[1]); + + float2 input_h_0 = __half22float2(input_h[0]); + float2 input_h_1 = __half22float2(input_h[1]); + + data_h_0.x = (bias_h_0.x + input_h_0.x); + data_h_0.y = (bias_h_0.y + input_h_0.y); + data_h_1.x = (bias_h_1.x + input_h_1.x); + data_h_1.y = (bias_h_1.y + input_h_1.y); + + uint32_t m_32; + uint8_t* m = (uint8_t*)&m_32; + + m[0] = (uint8_t)(rand.x > ratio); + m[1] = (uint8_t)(rand.y > ratio); + m[2] = (uint8_t)(rand.z > ratio); + m[3] = (uint8_t)(rand.w > ratio); + + data_h_0.x = __float2half(data_h_0.x * scale * m[0]); + data_h_0.y = __float2half(data_h_0.y * scale * m[1]); + data_h_1.x = __float2half(data_h_1.x * scale * m[2]); + data_h_1.y = __float2half(data_h_1.y * scale * m[3]); + + data_h_0.x += residual_h_0.x; + data_h_0.y += residual_h_0.y; + data_h_1.x += residual_h_1.x; + data_h_1.y += residual_h_1.y; + + float2 result_f; + __half2* result_h = reinterpret_cast<__half2*>(&result_f); + + result_h[0] = __float22half2_rn(data_h_0); + result_h[1] = __float22half2_rn(data_h_1); + + out_cast[j] = result_f; + mask_32[j] = m_32; + } + int high_index = + ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; + if (N > high_index) { + float4 rand = curand_uniform4(&state); + float* rand_data = &(rand.x); + int k = 0; + for (int i = high_index; i < N; i++) { + float x_data = (float)input[i] + (float)bias[i % dim]; + uint8_t m = (uint8_t)(rand_data[k++] > ratio); + x_data = x_data * scale * m; + x_data += (float)residual[i]; + + out[i] = __float2half(x_data); + mask[i] = m; + } + } +} + +template +void launch_dropout(T* out, + const T* input, + const T* residual, + const T* bias, + uint8_t* mask, + int batch, + int dim, + float ratio, + cudaStream_t stream) +{ + assert(unroll_factor == 4); + + int total_count = batch * dim / unroll_factor; + dim3 grid_dim = DS_GET_BLOCKS(total_count); + dim3 block_dim = DS_CUDA_NUM_THREADS; + + uint64_t inc = (batch * dim) / grid_dim.x / block_dim.x; + std::pair seed = TrainingContext::Instance().IncrementOffset(inc); + + dropout_kernel<<>>( + total_count, dim, ratio, input, residual, bias, out, mask, seed); +} + +template void launch_dropout(float*, + const float*, + const float* residual, + const float* bias, + uint8_t* mask, + int batch, + int dim, + float ratio, + cudaStream_t stream); +template void launch_dropout(__half*, + const __half*, + const __half* residual, + const __half* bias, + uint8_t* mask, + int batch, + int dim, + float ratio, + cudaStream_t stream); diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/ds_transformer_cuda.cpp b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/ds_transformer_cuda.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b637bb710c67a4f980571cd876c4caa350f4a71b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/ds_transformer_cuda.cpp @@ -0,0 +1,1055 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include + +#include +#include +#include +#include +#include +#include +#include "Timer.h" +#include "context.h" +#include "cublas_wrappers.h" +#include "custom_cuda_layers.h" +#include "ds_transformer_cuda.h" + +static std::unordered_map> s_transformer_layers; + +const int init_seq_length = 128; + +// C++ interface + +template +unsigned get_workspace_size(unsigned maxBatchSize, + unsigned seq_len, + unsigned hidden_size, + unsigned intermediate_size, + unsigned heads, + bool training, + bool gelu_checkpoint) +{ + unsigned workSpacesize = 4 * (size_t(maxBatchSize) * seq_len * hidden_size); + if (training) { + workSpacesize += 2 * (size_t(maxBatchSize) * seq_len * hidden_size); + workSpacesize += ((std::max)((size_t(maxBatchSize) * seq_len * intermediate_size), + 2 * (size_t(maxBatchSize) * heads * seq_len * seq_len))); + if (gelu_checkpoint) + workSpacesize += 2 * (size_t(maxBatchSize) * seq_len * intermediate_size); + } + return workSpacesize; // * sizeof(T); +} + +// NOTE: AT_ASSERT has become AT_CHECK on master after 0.4. +#define CHECK_CUDA(x) AT_ASSERTM(x.is_cuda(), #x " must be a CUDA tensor") +#define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous") +#define CHECK_INPUT(x) \ + CHECK_CUDA(x); \ + CHECK_CONTIGUOUS(x) + +template +BertTransformerLayer::BertTransformerLayer(unsigned layer_id, + unsigned batch_size, + unsigned hidden_size, + unsigned num_heads, + unsigned intermediate_size, + unsigned seq_length, + float attn_prob_dropout_ratio, + float hidden_output_dropout_ratio, + float layer_norm_eps, + bool pre_or_postLayerNorm, + const std::vector>& gemm_algos, + bool attn_dropout_checkpoint, + bool normalize_invertible, + bool gelu_checkpoint, + bool stochastic_mode) + : _layer_id(layer_id), + _batch_size(batch_size), + _hidden_size(hidden_size), + _heads(num_heads), + _intermediate_size(intermediate_size), + _seq_length(seq_length), + _training(true), + _pre_or_postLayerNorm(pre_or_postLayerNorm), + _attn_dropout_checkpoint(attn_dropout_checkpoint), + _normalize_invertible(normalize_invertible), + _gelu_checkpoint(gelu_checkpoint), + _stochastic_mode(stochastic_mode), + _stream(TrainingContext::Instance().GetCurrentStream()), + _cublasHandle(TrainingContext::Instance().GetCublasHandle()), + _qkv_linear(typename FeedForward::Config(batch_size * seq_length, + 3 * hidden_size, + hidden_size, + gemm_algos[0])), + _attn_out_linear(typename FeedForward::Config(batch_size * seq_length, + hidden_size, + hidden_size, + gemm_algos[0])), + _attn_layer_norm(typename Normalize_Layer::Config(batch_size, + seq_length, + hidden_size, + layer_norm_eps, + true, + !normalize_invertible)), + _layer_norm(typename Normalize_Layer::Config(batch_size, + seq_length, + hidden_size, + layer_norm_eps, + true, + !normalize_invertible)), + _ff1(typename FeedForward::Config(batch_size * seq_length, + _intermediate_size, + hidden_size, + gemm_algos[1])), + _ff2(typename FeedForward::Config(batch_size * seq_length, + hidden_size, + _intermediate_size, + gemm_algos[2])), + _softmax(typename Softmax::Config(batch_size, num_heads, seq_length)), + _gelu(typename Gelu::Config(_intermediate_size)), + _attn_prob_dropout(typename Dropout::Config(attn_prob_dropout_ratio, _seq_length)), + _attn_output_dropout(typename Dropout::Config(hidden_output_dropout_ratio, _hidden_size)), + _layer_output_dropout(typename Dropout::Config(hidden_output_dropout_ratio, _hidden_size)), + _attn_scores(typename StridedBatchGemm::Config(_batch_size * _heads, + _seq_length, + _seq_length, + _hidden_size / _heads, + (T(1.0) / T(sqrt(_hidden_size / _heads))), + T(0.0), + CUBLAS_OP_T, + CUBLAS_OP_N, + gemm_algos[3])), + _attn_context(typename StridedBatchGemm::Config(_batch_size * _heads, + _hidden_size / _heads, + _seq_length, + _seq_length, + T(1.0), + T(0.0), + CUBLAS_OP_N, + CUBLAS_OP_N, + gemm_algos[4])) +{ + assert(_hidden_size % _heads == 0); + + Initialize(); +} + +template +BertTransformerLayer::~BertTransformerLayer() +{ +} + +template +void BertTransformerLayer::Initialize() +{ +#ifndef __HIP_PLATFORM_AMD__ + if (std::is_same::value) cublasSetMathMode(_cublasHandle, CUBLAS_TENSOR_OP_MATH); +#endif +} + +template +void BertTransformerLayer::Forward(unsigned bsz, + const T* input_ptr, + const T* input_mask_ptr, + const T* attn_qkvw_ptr, + const T* attn_qkvb_ptr, + const T* attn_ow_ptr, + const T* attn_ob_ptr, + const T* attn_nw_ptr, + const T* attn_nb_ptr, + const T* inter_w_ptr, + const T* inter_b_ptr, + const T* output_w_ptr, + const T* output_b_ptr, + const T* norm_w_ptr, + const T* norm_b_ptr, + T* out_ptr, + T* inp_norm_ptr, + T* q_tf_ptr, + T* k_tf_ptr, + T* v_tf_ptr, + T* soft_out_ptr, + T* ctx_bufB_ptr, + T* attn_o_inp_ptr, + T* add_res_ptr, + T* ff1_inp_ptr, + T* gelu_inp_ptr, + T* ff2_inp_ptr) +{ + cublasSetStream(_cublasHandle, _stream); + + if (!_stochastic_mode) cudaStreamSynchronize(_stream); + + T* workspace = static_cast(TrainingContext::Instance().GetWorkSpace()); + size_t small_buf_size = bsz * _seq_length * _hidden_size; + T* buf_0 = workspace; + T* buf_1 = buf_0 + small_buf_size; + T* buf_2 = buf_1; + + if (_normalize_invertible) { + add_res_ptr = buf_1 + 3 * small_buf_size; + buf_2 = add_res_ptr; + } + if (_gelu_checkpoint) buf_2 += small_buf_size; + if (_attn_dropout_checkpoint) + ctx_bufB_ptr = + (_gelu_checkpoint ? (buf_2 + (_intermediate_size / _hidden_size) * small_buf_size) + : (buf_1 + 4 * small_buf_size)); + + int bsz_seq = bsz * _seq_length; + + if (_pre_or_postLayerNorm) { + if (_layer_norm.UseMean()) + _layer_norm.ForwardCheckpoint( + bsz_seq, inp_norm_ptr, input_ptr, norm_w_ptr, norm_b_ptr, _stream, true); + + else + _layer_norm.Forward( + bsz_seq, inp_norm_ptr, input_ptr, norm_w_ptr, norm_b_ptr, _stream, true); + } + + if (_pre_or_postLayerNorm) + _qkv_linear.Forward(bsz_seq, inp_norm_ptr, attn_qkvw_ptr, buf_0, _cublasHandle); + else + _qkv_linear.Forward(bsz_seq, input_ptr, attn_qkvw_ptr, buf_0, _cublasHandle); + + launch_bias_add_transform_0213( + q_tf_ptr, buf_0, attn_qkvb_ptr, bsz, _seq_length, _hidden_size, _heads, _stream, 3); + + int bsz_heads = bsz * _heads; + + // attention scores + _attn_scores.Forward(bsz_heads, soft_out_ptr, k_tf_ptr, q_tf_ptr, _cublasHandle); + + // Softmax + Mask + _softmax.Forward(bsz, soft_out_ptr, input_mask_ptr, _stream); + + // attn prob dropout. + _attn_prob_dropout.Forward(bsz_heads * _seq_length, ctx_bufB_ptr, soft_out_ptr, _stream); + + // attention context + _attn_context.Forward(bsz_heads, buf_1, v_tf_ptr, ctx_bufB_ptr, _cublasHandle); + + launch_transform4d_0213( + attn_o_inp_ptr, buf_1, bsz, _heads, _seq_length, _hidden_size, _stream, 1); + + if (_pre_or_postLayerNorm) + _attn_out_linear.Forward(bsz_seq, attn_o_inp_ptr, attn_ow_ptr, buf_1, _cublasHandle); + else + _attn_out_linear.Forward(bsz_seq, attn_o_inp_ptr, attn_ow_ptr, ff1_inp_ptr, _cublasHandle); + + // attn output dropout. + if (_pre_or_postLayerNorm) + _attn_output_dropout.ForwardWithBias( + bsz_seq, add_res_ptr, buf_1, input_ptr, attn_ob_ptr, _stream); + else + _attn_output_dropout.ForwardWithBias( + bsz_seq, add_res_ptr, ff1_inp_ptr, input_ptr, attn_ob_ptr, _stream); + + if (_pre_or_postLayerNorm) { + if (_attn_layer_norm.UseMean()) + _attn_layer_norm.ForwardCheckpoint( + bsz_seq, ff1_inp_ptr, add_res_ptr, attn_nw_ptr, attn_nb_ptr, _stream, true); + else + _attn_layer_norm.Forward( + bsz_seq, ff1_inp_ptr, add_res_ptr, attn_nw_ptr, attn_nb_ptr, _stream, true); + } else { + if (_attn_layer_norm.UseMean()) + _attn_layer_norm.ForwardCheckpoint( + bsz_seq, ff1_inp_ptr, add_res_ptr, attn_nw_ptr, attn_nb_ptr, _stream, true); + else + _attn_layer_norm.Forward( + bsz_seq, ff1_inp_ptr, add_res_ptr, attn_nw_ptr, attn_nb_ptr, _stream, true); + } + + _ff1.Forward(bsz_seq, + ff1_inp_ptr, + inter_w_ptr, + (_gelu_checkpoint ? ff2_inp_ptr : gelu_inp_ptr), + _cublasHandle); + + _gelu.ForwardWithBiasAdd(bsz_seq, + (_gelu_checkpoint ? ff2_inp_ptr : gelu_inp_ptr), + inter_b_ptr, + (_gelu_checkpoint ? buf_2 : ff2_inp_ptr), + _stream); + + _ff2.Forward( + bsz_seq, (_gelu_checkpoint ? buf_2 : ff2_inp_ptr), output_w_ptr, out_ptr, _cublasHandle); + + // layer output dropout. + if (_pre_or_postLayerNorm) + _layer_output_dropout.ForwardWithBias( + bsz_seq, out_ptr, out_ptr, add_res_ptr, output_b_ptr, _stream); + else + _layer_output_dropout.ForwardWithBias( + bsz_seq, inp_norm_ptr, out_ptr, ff1_inp_ptr, output_b_ptr, _stream); + + if (!_pre_or_postLayerNorm) { + if (_layer_norm.UseMean()) + _layer_norm.ForwardCheckpoint( + bsz_seq, out_ptr, inp_norm_ptr, norm_w_ptr, norm_b_ptr, _stream, true); + else + _layer_norm.Forward( + bsz_seq, out_ptr, inp_norm_ptr, norm_w_ptr, norm_b_ptr, _stream, true); + } +} + +template +void BertTransformerLayer::Backward(unsigned bsz, + const T* grad_output_ptr, + const T* input_ptr, + const T* output_ptr, + const T* inp_norm_ptr, + const T* q_tf_ptr, + const T* k_tf_ptr, + const T* v_tf_ptr, + const T* soft_out_ptr, + const T* ctx_bufB_ptr, + const T* attn_o_inp_ptr, + const T* add_res_ptr, + const T* ff1_inp_ptr, + const T* gelu_inp_ptr, + const T* ff2_inp_ptr, + const T* input_mask_ptr, + const T* attn_qkvw_ptr, + const T* attn_ow_ptr, + const T* attn_nw_ptr, + const T* attn_nb_ptr, + const T* inter_w_ptr, + const T* inter_b_ptr, + const T* output_w_ptr, + const T* norm_w_ptr, + const T* norm_b_ptr, + + T* grad_input_ptr, + T* grad_attn_qkvw_ptr, + T* grad_attn_qkvb_ptr, + T* grad_attn_ow_ptr, + T* grad_attn_ob_ptr, + T* grad_attn_nw_ptr, + T* grad_attn_nb_ptr, + T* grad_inter_w_ptr, + T* grad_inter_b_ptr, + T* grad_output_w_ptr, + T* grad_output_b_ptr, + T* grad_norm_w_ptr, + T* grad_norm_b_ptr) +{ + cublasSetStream(_cublasHandle, _stream); + + if (!_stochastic_mode) cudaStreamSynchronize(_stream); + + T* workspace = static_cast(TrainingContext::Instance().GetWorkSpace()); + size_t small_buf_size = bsz * _seq_length * _hidden_size; + T* buf_0 = workspace; + T* buf_1 = buf_0 + small_buf_size; + T* buf_2 = buf_1 + small_buf_size; + T* buf_3 = buf_2 + small_buf_size; + + T* ff2_buf = (_gelu_checkpoint ? buf_3 + (bsz * _seq_length * _intermediate_size) + : buf_3 + small_buf_size); + T* ctx_bufB_ptr_recomp = ff2_buf + (_seq_length * _seq_length * bsz * _heads); + + cudaStream_t streams[2] = {_stream, _stream}; + + int bsz_seq = bsz * _seq_length; + int bsz_heads = bsz * _heads; + + if (!_pre_or_postLayerNorm) { + if (_layer_norm.UseMean()) + _layer_norm.Backward(bsz_seq, + grad_output_ptr, + norm_w_ptr, + grad_norm_w_ptr, + grad_norm_b_ptr, + streams, + buf_1, + inp_norm_ptr); + + else + _layer_norm.Backward(bsz_seq, + grad_output_ptr, + norm_w_ptr, + norm_b_ptr, + grad_norm_w_ptr, + grad_norm_b_ptr, + streams, + buf_1, + output_ptr); + } + + if (_pre_or_postLayerNorm) + _layer_output_dropout.Backward(bsz_seq, buf_0, grad_output_ptr, _stream); + else + _layer_output_dropout.Backward(bsz_seq, buf_0, buf_1, _stream); + + const T* layer_dropout_buf = _layer_output_dropout.HasDropout() + ? buf_0 + : (_pre_or_postLayerNorm ? grad_output_ptr : buf_1); + + if (_gelu_checkpoint) + _gelu.ForwardWithBiasAdd(bsz_seq, ff2_inp_ptr, inter_b_ptr, buf_2, _stream); + _ff2.Backward(bsz_seq, + layer_dropout_buf, + (_gelu_checkpoint ? buf_2 : ff2_inp_ptr), + output_w_ptr, + grad_output_w_ptr, + grad_output_b_ptr, + _cublasHandle, + _stream, + ff2_buf); + + _gelu.Backward( + bsz_seq, ff2_buf, (_gelu_checkpoint ? ff2_inp_ptr : gelu_inp_ptr), inter_b_ptr, _stream); + + _ff1.Backward(bsz_seq, + ff2_buf, + ff1_inp_ptr, + inter_w_ptr, + grad_inter_w_ptr, + grad_inter_b_ptr, + _cublasHandle, + _stream, + buf_3); + + if (!_pre_or_postLayerNorm) + launch_fused_add2(buf_2, buf_3, buf_1, bsz, _seq_length, _hidden_size, _stream); + + if (_pre_or_postLayerNorm) { + if (_attn_layer_norm.UseMean()) + _attn_layer_norm.BackwardFusedAdd(bsz_seq, + buf_3, + grad_output_ptr, + attn_nw_ptr, + grad_attn_nw_ptr, + grad_attn_nb_ptr, + streams, + buf_0, + add_res_ptr); + + else + _attn_layer_norm.BackwardFusedAdd(bsz_seq, + buf_3, + grad_output_ptr, + attn_nw_ptr, + attn_nb_ptr, + grad_attn_nw_ptr, + grad_attn_nb_ptr, + streams, + buf_0, + ff1_inp_ptr); + } else { + if (_attn_layer_norm.UseMean()) + _attn_layer_norm.Backward(bsz_seq, + buf_2, + attn_nw_ptr, + grad_attn_nw_ptr, + grad_attn_nb_ptr, + streams, + buf_0, + add_res_ptr); + + else + _attn_layer_norm.Backward(bsz_seq, + buf_2, + attn_nw_ptr, + attn_nb_ptr, + grad_attn_nw_ptr, + grad_attn_nb_ptr, + streams, + buf_0, + ff1_inp_ptr); + } + + _attn_output_dropout.Backward(bsz_seq, buf_2, buf_0, _stream); + + T* attn_output_dropout_buf = _attn_output_dropout.HasDropout() ? buf_2 : buf_0; + + _attn_out_linear.Backward(bsz_seq, + attn_output_dropout_buf, + attn_o_inp_ptr, + attn_ow_ptr, + grad_attn_ow_ptr, + grad_attn_ob_ptr, + _cublasHandle, + _stream, + buf_1); + + launch_transform_0213(buf_2, buf_1, bsz, _seq_length, _hidden_size, _heads, _stream); + + if (_attn_prob_dropout.HasDropout()) { + if (_attn_dropout_checkpoint) + _attn_prob_dropout.Forward( + bsz_heads * _seq_length, ctx_bufB_ptr_recomp, soft_out_ptr, _stream, true); + + _attn_context.Backward(bsz_heads, + buf_2, + v_tf_ptr, + (_attn_dropout_checkpoint ? ctx_bufB_ptr_recomp : ctx_bufB_ptr), + _cublasHandle, + buf_3, + ff2_buf); + } else + _attn_context.Backward( + bsz_heads, buf_2, v_tf_ptr, soft_out_ptr, _cublasHandle, buf_3, ff2_buf); + + _attn_prob_dropout.Backward(bsz_heads * _seq_length, ff2_buf, _stream); + + _softmax.Backward(bsz, ff2_buf, soft_out_ptr, _stream); + + _attn_scores.Backward(bsz_heads, ff2_buf, k_tf_ptr, q_tf_ptr, _cublasHandle, buf_2, buf_1); + + launch_transform4d_0213(ff2_buf, buf_1, bsz, _heads, _seq_length, _hidden_size, _stream, 3); + + if (_pre_or_postLayerNorm) + _qkv_linear.Backward(bsz_seq, + ff2_buf, + inp_norm_ptr, + attn_qkvw_ptr, + grad_attn_qkvw_ptr, + grad_attn_qkvb_ptr, + _cublasHandle, + _stream, + buf_2); + else + _qkv_linear.Backward(bsz_seq, + ff2_buf, + input_ptr, + attn_qkvw_ptr, + grad_attn_qkvw_ptr, + grad_attn_qkvb_ptr, + _cublasHandle, + _stream, + buf_2); + + if (_pre_or_postLayerNorm) { + if (_layer_norm.UseMean()) + _layer_norm.BackwardFusedAdd(bsz_seq, + buf_2, + buf_0, + norm_w_ptr, + grad_norm_w_ptr, + grad_norm_b_ptr, + streams, + grad_input_ptr, + input_ptr); + + else + _layer_norm.BackwardFusedAdd(bsz_seq, + buf_2, + buf_0, + norm_w_ptr, + norm_b_ptr, + grad_norm_w_ptr, + grad_norm_b_ptr, + streams, + grad_input_ptr, + inp_norm_ptr); + } else + launch_fused_add2(grad_input_ptr, buf_2, buf_0, bsz, _seq_length, _hidden_size, _stream); +} + +template +void BertTransformerLayer::SetTrainingMode(bool training) +{ + // Dropout will be skipped when not in training model. + _attn_prob_dropout.SetTrainingMode(training); + _attn_output_dropout.SetTrainingMode(training); + _layer_output_dropout.SetTrainingMode(training); +} + +template +void BertTransformerLayer::SetIntermediateBuffers(uint8_t* attn_prob_dropout_mask_ptr, + uint8_t* attn_output_dropout_mask_ptr, + uint8_t* layer_output_dropout_mask_ptr, + T* attn_layer_norm_var, + T* attn_layer_norm_mean, + T* layer_norm_var, + T* layer_norm_mean) +{ + _attn_prob_dropout.SetMask(attn_prob_dropout_mask_ptr); + _attn_output_dropout.SetMask(attn_output_dropout_mask_ptr); + _layer_output_dropout.SetMask(layer_output_dropout_mask_ptr); + + _attn_layer_norm.SetVar(attn_layer_norm_var); + _attn_layer_norm.SetMean(attn_layer_norm_mean); + _layer_norm.SetVar(layer_norm_var); + _layer_norm.SetMean(layer_norm_mean); +} + +template +void BertTransformerLayer::SetSeqLength(unsigned seq_len) +{ + _seq_length = seq_len; + + _softmax.SetSeqLength(_seq_length); + _attn_prob_dropout.SetDimension(_seq_length); + _attn_scores.SetConfig(_seq_length, _seq_length, _hidden_size / _heads); + _attn_context.SetConfig(_hidden_size / _heads, _seq_length, _seq_length); +} + +template +int create_transformer_layer(unsigned layer_id, + unsigned batch_size, + unsigned hidden_dim, + unsigned num_heads, + unsigned intermediate_size, + float attn_dropout_ratio, + float hidden_dropout_ratio, + float layer_norm_eps, + int seed, + bool pre_or_postLayerNorm, + bool test_gemm, + bool attn_dropout_checkpoint, + bool normalize_invertible, + bool gelu_checkpoint, + bool stochastic_mode) +{ + TrainingContext::Instance().SetSeed(seed); + TrainingContext::Instance().TestGemmFP16( + test_gemm, batch_size, init_seq_length, num_heads, hidden_dim / num_heads); + + auto layer = + std::make_shared>(layer_id, + batch_size, + hidden_dim, + num_heads, + intermediate_size, + init_seq_length, + attn_dropout_ratio, + hidden_dropout_ratio, + layer_norm_eps, + pre_or_postLayerNorm, + TrainingContext::Instance().GetGemmAlgos(), + attn_dropout_checkpoint, + normalize_invertible, + gelu_checkpoint, + stochastic_mode); + + s_transformer_layers[layer_id] = layer; + + std::string dtype = (std::is_same::value) ? "half" : "float"; + + std::cout << "layer #" << layer_id << " is created with date type [" << dtype << "]." + << std::endl; + + return 0; +} + +template +std::vector ds_transformer_forward(unsigned layer_id, + const torch::Tensor& input, + const torch::Tensor& input_mask, + const torch::Tensor& attn_qkvw, + const torch::Tensor& attn_qkvb, + const torch::Tensor& attn_ow, + const torch::Tensor& attn_ob, + const torch::Tensor& attn_nw, + const torch::Tensor& attn_nb, + const torch::Tensor& inter_w, + const torch::Tensor& inter_b, + const torch::Tensor& output_w, + const torch::Tensor& output_b, + const torch::Tensor& norm_w, + const torch::Tensor& norm_b, + bool training_mode, + bool prelayernorm, + bool attn_dropout_checkpoint, + bool normalize_invertible, + bool gelu_checkpoint) +{ + CHECK_INPUT(input); + CHECK_INPUT(input_mask); + CHECK_INPUT(attn_qkvw); + CHECK_INPUT(attn_qkvb); + CHECK_INPUT(attn_ow); + CHECK_INPUT(attn_ob); + CHECK_INPUT(attn_nw); + CHECK_INPUT(attn_nb); + CHECK_INPUT(inter_w); + CHECK_INPUT(inter_b); + CHECK_INPUT(output_w); + CHECK_INPUT(output_b); + CHECK_INPUT(norm_w); + CHECK_INPUT(norm_b); + + unsigned bsz = input.size(0); + + const T* input_ptr = (const T*)input.data_ptr(); + const T* input_mask_ptr = (const T*)input_mask.data_ptr(); + const T* attn_qkvw_ptr = (const T*)attn_qkvw.data_ptr(); + const T* attn_qkvb_ptr = (const T*)attn_qkvb.data_ptr(); + const T* attn_ow_ptr = (const T*)attn_ow.data_ptr(); + const T* attn_ob_ptr = (const T*)attn_ob.data_ptr(); + const T* attn_nw_ptr = (const T*)attn_nw.data_ptr(); + const T* attn_nb_ptr = (const T*)attn_nb.data_ptr(); + const T* inter_w_ptr = (const T*)inter_w.data_ptr(); + const T* inter_b_ptr = (const T*)inter_b.data_ptr(); + const T* output_w_ptr = (const T*)output_w.data_ptr(); + const T* output_b_ptr = (const T*)output_b.data_ptr(); + const T* norm_w_ptr = (const T*)norm_w.data_ptr(); + const T* norm_b_ptr = (const T*)norm_b.data_ptr(); + + auto output = torch::empty_like(input); + T* out_ptr = (T*)output.data_ptr(); + + auto options = torch::TensorOptions() + .dtype(input.options().dtype()) + .layout(torch::kStrided) + .device(torch::kCUDA) + .requires_grad(true); + + auto uint8_options = torch::TensorOptions() + .dtype(torch::kInt8) + .layout(torch::kStrided) + .device(torch::kCUDA) + .requires_grad(false); + + std::shared_ptr> layer = + std::static_pointer_cast>(s_transformer_layers[layer_id]); + + unsigned seq_len = layer->GetSeqLength(); + if (input.size(1) != seq_len) { + seq_len = input.size(1); + layer->SetSeqLength(seq_len); + } + + auto workspace = torch::empty({get_workspace_size(bsz, + seq_len, + layer->GetHiddenSize(), + layer->GetIntermediateSize(), + layer->GetNumHeads(), + layer->IsTrainingMode(), + layer->GeluCheckpoint())}, + options); + TrainingContext::Instance().SetWorkSpace((T*)workspace.data_ptr()); + + auto inp_norm = ((prelayernorm || !normalize_invertible) ? torch::empty_like(input) : output); + auto add_res = (normalize_invertible ? inp_norm : torch::empty_like(input)); + auto attn_o_inp = torch::empty_like(input); + auto qkv_tf = torch::empty({(bsz * seq_len), output_w.size(0) * 3}, options); + + auto attn_prob_dropout_mask = + torch::empty({(bsz * layer->GetNumHeads() * seq_len), seq_len}, uint8_options); + auto attn_output_dropout_mask = + torch::empty({(bsz * seq_len), layer->GetHiddenSize()}, uint8_options); + auto layer_output_dropout_mask = + torch::empty({(bsz * seq_len), layer->GetHiddenSize()}, uint8_options); + + auto attn_layer_norm_var = torch::empty({(bsz * seq_len)}, options); + auto attn_layer_norm_mean = torch::empty({(bsz * seq_len)}, options); + auto layer_norm_var = torch::empty({(bsz * seq_len)}, options); + auto layer_norm_mean = torch::empty({(bsz * seq_len)}, options); + + T* inp_norm_ptr = (T*)inp_norm.data_ptr(); + T* add_res_ptr = (T*)add_res.data_ptr(); + T* q_tf_ptr = (T*)qkv_tf.data_ptr(); + T* k_tf_ptr = q_tf_ptr + (bsz * seq_len * output_w.size(0)); //(T*)k_tf.data_ptr(); + T* v_tf_ptr = k_tf_ptr + (bsz * seq_len * output_w.size(0)); //(T*)v_tf.data_ptr(); + T* attn_o_inp_ptr = (T*)attn_o_inp.data_ptr(); + + torch::Tensor ff2_inp = torch::empty({(bsz * seq_len), output_w.size(1)}, options); + torch::Tensor gelu_inp = + (gelu_checkpoint ? ff2_inp : torch::empty({(bsz * seq_len), output_w.size(1)}, options)); + auto ff1_inp = torch::empty_like(input); + T* ff2_inp_ptr = (T*)ff2_inp.data_ptr(); + T* gelu_inp_ptr = (T*)gelu_inp.data_ptr(); + T* ff1_inp_ptr = (T*)ff1_inp.data_ptr(); + + torch::Tensor soft_out = + torch::empty({(bsz * layer->GetNumHeads() * seq_len), seq_len}, options); + torch::Tensor ctx_bufB = + (attn_dropout_checkpoint + ? soft_out + : torch::empty({(bsz * layer->GetNumHeads() * seq_len), seq_len}, options)); + T* soft_out_ptr = (T*)soft_out.data_ptr(); + T* ctx_bufB_ptr = (T*)ctx_bufB.data_ptr(); + + layer->SetTrainingMode(training_mode); + layer->SetIntermediateBuffers((uint8_t*)attn_prob_dropout_mask.data_ptr(), + (uint8_t*)attn_output_dropout_mask.data_ptr(), + (uint8_t*)layer_output_dropout_mask.data_ptr(), + (T*)attn_layer_norm_var.data_ptr(), + (T*)attn_layer_norm_mean.data_ptr(), + (T*)layer_norm_var.data_ptr(), + (T*)layer_norm_mean.data_ptr()); + + layer->Forward(bsz, + input_ptr, + input_mask_ptr, + attn_qkvw_ptr, + attn_qkvb_ptr, + attn_ow_ptr, + attn_ob_ptr, + attn_nw_ptr, + attn_nb_ptr, + inter_w_ptr, + inter_b_ptr, + output_w_ptr, + output_b_ptr, + norm_w_ptr, + norm_b_ptr, + out_ptr, + inp_norm_ptr, + q_tf_ptr, + k_tf_ptr, + v_tf_ptr, + soft_out_ptr, + ctx_bufB_ptr, + attn_o_inp_ptr, + add_res_ptr, + ff1_inp_ptr, + gelu_inp_ptr, + ff2_inp_ptr); + + return {output, + inp_norm, + qkv_tf, + soft_out, + ctx_bufB, + attn_o_inp, + add_res, + ff1_inp, + gelu_inp, + ff2_inp, + attn_prob_dropout_mask, + attn_output_dropout_mask, + layer_output_dropout_mask, + attn_layer_norm_var, + attn_layer_norm_mean, + layer_norm_var, + layer_norm_mean}; +} + +template +std::vector ds_transformer_backward(unsigned layer_id, + const torch::Tensor& grad_output, + const torch::Tensor& output, + const torch::Tensor& inp_norm, + const torch::Tensor& qkv_tf, + const torch::Tensor& soft_out, + const torch::Tensor& ctx_bufB, + const torch::Tensor& attn_o_inp, + const torch::Tensor& add_res, + const torch::Tensor& ff1_inp, + const torch::Tensor& gelu_inp, + const torch::Tensor& ff2_inp, + const torch::Tensor& attn_prob_dropout_mask, + const torch::Tensor& attn_output_dropout_mask, + const torch::Tensor& layer_output_dropout_mask, + const torch::Tensor& attn_layer_norm_var, + const torch::Tensor& attn_layer_norm_mean, + const torch::Tensor& layer_norm_var, + const torch::Tensor& layer_norm_mean, + const torch::Tensor& input, + const torch::Tensor& input_mask, + const torch::Tensor& attn_qkvw, + const torch::Tensor& attn_qkvb, + const torch::Tensor& attn_ow, + const torch::Tensor& attn_ob, + const torch::Tensor& attn_nw, + const torch::Tensor& attn_nb, + const torch::Tensor& inter_w, + const torch::Tensor& inter_b, + const torch::Tensor& output_w, + const torch::Tensor& output_b, + const torch::Tensor& norm_w, + const torch::Tensor& norm_b) +{ + auto g_output = grad_output.contiguous(); + CHECK_INPUT(g_output); + CHECK_INPUT(output); + CHECK_INPUT(inp_norm); + CHECK_INPUT(qkv_tf); + CHECK_INPUT(add_res); + CHECK_INPUT(soft_out); + CHECK_INPUT(ctx_bufB); + CHECK_INPUT(attn_o_inp); + CHECK_INPUT(ff1_inp); + CHECK_INPUT(gelu_inp); + CHECK_INPUT(ff2_inp); + CHECK_INPUT(input); + CHECK_INPUT(input_mask); + CHECK_INPUT(attn_qkvw); + CHECK_INPUT(attn_qkvb); + CHECK_INPUT(attn_ow); + CHECK_INPUT(attn_ob); + CHECK_INPUT(attn_nw); + CHECK_INPUT(attn_nb); + CHECK_INPUT(inter_w); + CHECK_INPUT(inter_b); + CHECK_INPUT(output_w); + CHECK_INPUT(output_b); + CHECK_INPUT(norm_w); + CHECK_INPUT(norm_b); + + unsigned bsz = g_output.size(0); + + std::shared_ptr> layer = + std::static_pointer_cast>(s_transformer_layers[layer_id]); + + unsigned seq_len = layer->GetSeqLength(); + if (g_output.size(1) != seq_len) { + seq_len = g_output.size(1); + layer->SetSeqLength(seq_len); + } + auto options = torch::TensorOptions() + .dtype(g_output.options().dtype()) + .layout(torch::kStrided) + .device(torch::kCUDA) + .requires_grad(true); + auto workspace = torch::empty({get_workspace_size(bsz, + seq_len, + layer->GetHiddenSize(), + layer->GetIntermediateSize(), + layer->GetNumHeads(), + layer->IsTrainingMode(), + layer->GeluCheckpoint())}, + options); + TrainingContext::Instance().SetWorkSpace((T*)workspace.data_ptr()); + + auto grad_input = torch::empty_like(input); + auto grad_attn_qkvw = torch::empty_like(attn_qkvw); + auto grad_attn_qkvb = torch::empty_like(attn_qkvb); + auto grad_attn_ow = torch::empty_like(attn_ow); + auto grad_attn_ob = torch::empty_like(attn_ob); + auto grad_attn_nw = torch::empty_like(attn_nw); + auto grad_attn_nb = torch::empty_like(attn_nb); + auto grad_inter_w = torch::empty_like(inter_w); + auto grad_inter_b = torch::empty_like(inter_b); + auto grad_output_w = torch::empty_like(output_w); + auto grad_output_b = torch::empty_like(output_b); + auto grad_norm_w = torch::empty_like(norm_w); + auto grad_norm_b = torch::empty_like(norm_b); + + // inputs. + const T* grad_output_ptr = (const T*)g_output.data_ptr(); + const T* input_ptr = (const T*)input.data_ptr(); + const T* output_ptr = (const T*)output.data_ptr(); + const T* inp_norm_ptr = (const T*)inp_norm.data_ptr(); + const T* q_tf_ptr = (const T*)qkv_tf.data_ptr(); + const T* add_res_ptr = (const T*)add_res.data_ptr(); + const T* k_tf_ptr = + q_tf_ptr + (bsz * layer->GetSeqLength() * output_w.size(0)); //(const T*)k_tf.data_ptr(); + const T* v_tf_ptr = + k_tf_ptr + (bsz * layer->GetSeqLength() * output_w.size(0)); //(const T*)v_tf.data_ptr(); + const T* ff1_inp_ptr = (const T*)ff1_inp.data_ptr(); + const T* gelu_inp_ptr = (const T*)gelu_inp.data_ptr(); + const T* ff2_inp_ptr = (const T*)ff2_inp.data_ptr(); + const T* ctx_bufB_ptr = (const T*)ctx_bufB.data_ptr(); + const T* soft_out_ptr = (const T*)soft_out.data_ptr(); + const T* attn_o_inp_ptr = (const T*)attn_o_inp.data_ptr(); + const T* input_mask_ptr = (const T*)input_mask.data_ptr(); + const T* attn_qkvw_ptr = (const T*)attn_qkvw.data_ptr(); + const T* attn_ow_ptr = (const T*)attn_ow.data_ptr(); + const T* attn_nw_ptr = (const T*)attn_nw.data_ptr(); + const T* attn_nb_ptr = (const T*)attn_nb.data_ptr(); + const T* inter_w_ptr = (const T*)inter_w.data_ptr(); + const T* inter_b_ptr = (const T*)inter_b.data_ptr(); + const T* output_w_ptr = (const T*)output_w.data_ptr(); + const T* norm_w_ptr = (const T*)norm_w.data_ptr(); + const T* norm_b_ptr = (const T*)norm_b.data_ptr(); + + // outputs. + T* grad_input_ptr = (T*)grad_input.data_ptr(); + T* grad_attn_qkvw_ptr = (T*)grad_attn_qkvw.data_ptr(); + T* grad_attn_qkvb_ptr = (T*)grad_attn_qkvb.data_ptr(); + T* grad_attn_ow_ptr = (T*)grad_attn_ow.data_ptr(); + T* grad_attn_ob_ptr = (T*)grad_attn_ob.data_ptr(); + T* grad_attn_nw_ptr = (T*)grad_attn_nw.data_ptr(); + T* grad_attn_nb_ptr = (T*)grad_attn_nb.data_ptr(); + T* grad_inter_w_ptr = (T*)grad_inter_w.data_ptr(); + T* grad_inter_b_ptr = (T*)grad_inter_b.data_ptr(); + T* grad_output_w_ptr = (T*)grad_output_w.data_ptr(); + T* grad_output_b_ptr = (T*)grad_output_b.data_ptr(); + T* grad_norm_w_ptr = (T*)grad_norm_w.data_ptr(); + T* grad_norm_b_ptr = (T*)grad_norm_b.data_ptr(); + + layer->SetIntermediateBuffers((uint8_t*)attn_prob_dropout_mask.data_ptr(), + (uint8_t*)attn_output_dropout_mask.data_ptr(), + (uint8_t*)layer_output_dropout_mask.data_ptr(), + (T*)attn_layer_norm_var.data_ptr(), + (T*)attn_layer_norm_mean.data_ptr(), + (T*)layer_norm_var.data_ptr(), + (T*)layer_norm_mean.data_ptr()); + + layer->Backward(bsz, + grad_output_ptr, + input_ptr, + output_ptr, + inp_norm_ptr, + q_tf_ptr, + k_tf_ptr, + v_tf_ptr, + soft_out_ptr, + ctx_bufB_ptr, + attn_o_inp_ptr, + add_res_ptr, + ff1_inp_ptr, + gelu_inp_ptr, + ff2_inp_ptr, + input_mask_ptr, + attn_qkvw_ptr, + attn_ow_ptr, + attn_nw_ptr, + attn_nb_ptr, + inter_w_ptr, + inter_b_ptr, + output_w_ptr, + norm_w_ptr, + norm_b_ptr, + + grad_input_ptr, + grad_attn_qkvw_ptr, + grad_attn_qkvb_ptr, + grad_attn_ow_ptr, + grad_attn_ob_ptr, + grad_attn_nw_ptr, + grad_attn_nb_ptr, + grad_inter_w_ptr, + grad_inter_b_ptr, + grad_output_w_ptr, + grad_output_b_ptr, + grad_norm_w_ptr, + grad_norm_b_ptr); + + return {grad_input, + grad_attn_qkvw, + grad_attn_qkvb, + grad_attn_ow, + grad_attn_ob, + grad_attn_nw, + grad_attn_nb, + grad_inter_w, + grad_inter_b, + grad_output_w, + grad_output_b, + grad_norm_w, + grad_norm_b}; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("forward_fp32", + &ds_transformer_forward, + "DeepSpeed Transformer forward with fp32 (CUDA)"); + m.def("forward_fp16", + &ds_transformer_forward<__half>, + "DeepSpeed Transformer forward with fp16 (CUDA)"); + m.def("backward_fp32", + &ds_transformer_backward, + "DeepSpeed Transformer backward with fp32 (CUDA)"); + m.def("backward_fp16", + &ds_transformer_backward<__half>, + "DeepSpeed Transformer backward with fp16 (CUDA)"); + m.def("create_transformer_layer_fp32", + &create_transformer_layer, + "Create DeepSpeed Transformer Transformer Layer with fp32 (CUDA)"); + m.def("create_transformer_layer_fp16", + &create_transformer_layer<__half>, + "Create DeepSpeed Transformer Transformer Layer with fp16 (CUDA)"); +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/gelu_kernels.cu b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/gelu_kernels.cu new file mode 100644 index 0000000000000000000000000000000000000000..273891b91923056fe3468600827f538c9cee5009 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/gelu_kernels.cu @@ -0,0 +1,335 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "custom_cuda_layers.h" + +inline __device__ float gelu(const float x) +{ + const float sqrt_param = 0.79788456080286535587989211986876f; + const float mul_param = 0.044715; + return x * 0.5f * (1.0f + tanhf(sqrt_param * (x + mul_param * x * x * x))); +} + +inline __device__ float d_gelu(const float x) +{ + const float sqrt_param = 0.79788456080286535587989211986876f; + const float mul_param = 0.044715; + + float x2mul = x * x * mul_param; + float tan_h = tanhf(sqrt_param * (x + x * x2mul)); + float dg1 = 0.5f * (1.0f + tan_h); + float dg2 = x * 0.5f * sqrt_param * (1 - tan_h * tan_h); + float dg3 = dg2 * 3 * x2mul; + return (dg1 + dg2 + dg3); +} + +/* +Fused bias add with GELU + +Loads a vector of 4 elements each iteration, for stride +iterations. It was written with the intention to launch 256 thread +threadblocks, so to launch for bert-large, we would set ITERATIONS +to 4. This is currently done automatically as a heuristic, setting +the number of iterations as blocks of 1024. + +For FP16, the values are loaded from memory as __half, but converted +to FP32 for the arithmetic itself, to prevent numerous overflow on +the intermediate hyperbolic tangent, since there's no intrinsic +that computes it directly. +*/ + +__global__ void gelu_kernel(const float* input, float* vals, int row_stride, int iterations) +{ + int row = blockIdx.x; + int id = threadIdx.x; + int loop_stride = blockDim.x; + + const float4* input_cast = reinterpret_cast(input); + float4* vals_cast = reinterpret_cast(vals); + + for (int i = 0; i < iterations; i++) { + if (i * loop_stride + id < row_stride) { + float4 data = input_cast[row * row_stride + i * loop_stride + id]; + + data.x = gelu(data.x); + data.y = gelu(data.y); + data.z = gelu(data.z); + data.w = gelu(data.w); + + vals_cast[row * row_stride + i * loop_stride + id] = data; + } + } +} + +__global__ void gelu_kernel(const __half* input, __half* vals, int row_stride, int iterations) +{ +#ifdef HALF_PRECISION_AVAILABLE + int row = blockIdx.x; + int id = threadIdx.x; + int loop_stride = blockDim.x; + + const float2* input_cast = reinterpret_cast(input); + float2* vals_cast = reinterpret_cast(vals); + + for (int i = 0; i < iterations; i++) { + if (i * loop_stride + id < row_stride) { + float2 vals_vec = input_cast[row * row_stride + i * loop_stride + id]; + + __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); + + float2 low_data = __half22float2(vals_half[0]); + float2 high_data = __half22float2(vals_half[1]); + + low_data.x = gelu(low_data.x); + low_data.y = gelu(low_data.y); + high_data.x = gelu(high_data.x); + high_data.y = gelu(high_data.y); + + vals_half[0] = __float22half2_rn(low_data); + vals_half[1] = __float22half2_rn(high_data); + + vals_cast[row * row_stride + i * loop_stride + id] = vals_vec; + } + } +#endif +} + +__global__ void fused_bias_gelu(const float* input, + const float* bias, + float* vals, + int row_stride, + int iterations) +{ + int row = blockIdx.x; + int id = threadIdx.x; + int loop_stride = blockDim.x; + + const float4* input_cast = reinterpret_cast(input); + float4* vals_cast = reinterpret_cast(vals); + const float4* bias_cast = reinterpret_cast(bias); + + for (int i = 0; i < iterations; i++) { + if (i * loop_stride + id < row_stride) { + float4 data = input_cast[row * row_stride + i * loop_stride + id]; + float4 bias_data = bias_cast[i * loop_stride + id]; + + data.x += bias_data.x; + data.y += bias_data.y; + data.z += bias_data.z; + data.w += bias_data.w; + + data.x = gelu(data.x); + data.y = gelu(data.y); + data.z = gelu(data.z); + data.w = gelu(data.w); + + vals_cast[row * row_stride + i * loop_stride + id] = data; + } + } +} + +__global__ void fused_bias_gelu(const __half* input, + const __half* bias, + __half* vals, + int row_stride, + int iterations) +{ +#ifdef HALF_PRECISION_AVAILABLE + int row = blockIdx.x; + int id = threadIdx.x; + int loop_stride = blockDim.x; + + const float2* input_cast = reinterpret_cast(input); + float2* vals_cast = reinterpret_cast(vals); + const float2* bias_cast = reinterpret_cast(bias); + + for (int i = 0; i < iterations; i++) { + if (i * loop_stride + id < row_stride) { + float2 vals_vec = input_cast[row * row_stride + i * loop_stride + id]; + float2 bias_vec = bias_cast[i * loop_stride + id]; + + __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); + __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); + + float2 low_data = __half22float2(vals_half[0]); + float2 high_data = __half22float2(vals_half[1]); + + float2 low_bias = __half22float2(bias_half[0]); + float2 high_bias = __half22float2(bias_half[1]); + + low_data.x += low_bias.x; + low_data.y += low_bias.y; + high_data.x += high_bias.x; + high_data.y += high_bias.y; + + low_data.x = gelu(low_data.x); + low_data.y = gelu(low_data.y); + high_data.x = gelu(high_data.x); + high_data.y = gelu(high_data.y); + + vals_half[0] = __float22half2_rn(low_data); + vals_half[1] = __float22half2_rn(high_data); + + vals_cast[row * row_stride + i * loop_stride + id] = vals_vec; + } + } +#endif +} + +__global__ void d_gelu_func(float* d_output, + const float* gelu_input, + const float* bias, + int row_stride, + int iterations) +{ + int row = blockIdx.x; + int id = threadIdx.x; + int loop_stride = blockDim.x; + + float4* d_output_cast = reinterpret_cast(d_output); + const float4* gelu_input_cast = reinterpret_cast(gelu_input); + const float4* bias_cast = reinterpret_cast(bias); + + for (int i = 0; i < iterations; i++) { + if (i * loop_stride + id < row_stride) { + float4 output_data = d_output_cast[row * row_stride + i * loop_stride + id]; + float4 gelu_input_data = gelu_input_cast[row * row_stride + i * loop_stride + id]; + float4 bias_data = bias_cast[i * loop_stride + id]; + + gelu_input_data.x += bias_data.x; + gelu_input_data.y += bias_data.y; + gelu_input_data.z += bias_data.z; + gelu_input_data.w += bias_data.w; + + output_data.x *= d_gelu(gelu_input_data.x); + output_data.y *= d_gelu(gelu_input_data.y); + output_data.z *= d_gelu(gelu_input_data.z); + output_data.w *= d_gelu(gelu_input_data.w); + + d_output_cast[row * row_stride + i * loop_stride + id] = output_data; + } + } +} + +__global__ void d_gelu_func(__half* d_output, + const __half* gelu_input, + const __half* bias, + int row_stride, + int iterations) +{ +#ifdef HALF_PRECISION_AVAILABLE + int row = blockIdx.x; + int id = threadIdx.x; + int loop_stride = blockDim.x; + + float2* d_output_cast = reinterpret_cast(d_output); + const float2* gelu_input_cast = reinterpret_cast(gelu_input); + const float2* bias_cast = reinterpret_cast(bias); + +#pragma unroll + for (int i = 0; i < iterations; i++) { + if (i * loop_stride + id < row_stride) { + float2 output_data = d_output_cast[row * row_stride + i * loop_stride + id]; + float2 gelu_input_data = gelu_input_cast[row * row_stride + i * loop_stride + id]; + float2 bias_vec = bias_cast[i * loop_stride + id]; + + __half2* output_data_half = reinterpret_cast<__half2*>(&output_data); + __half2* gelu_input_data_half = reinterpret_cast<__half2*>(&gelu_input_data); + __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); + + float2 output_half_0 = __half22float2(output_data_half[0]); + float2 output_half_1 = __half22float2(output_data_half[1]); + + float2 gelu_input_half_0 = __half22float2(gelu_input_data_half[0]); + float2 gelu_input_half_1 = __half22float2(gelu_input_data_half[1]); + + float2 bias_half_0 = __half22float2(bias_half[0]); + float2 bias_half_1 = __half22float2(bias_half[1]); + + gelu_input_half_0.x += bias_half_0.x; + gelu_input_half_0.y += bias_half_0.y; + gelu_input_half_1.x += bias_half_1.x; + gelu_input_half_1.y += bias_half_1.y; + + output_half_0.x *= d_gelu(gelu_input_half_0.x); + output_half_0.y *= d_gelu(gelu_input_half_0.y); + output_half_1.x *= d_gelu(gelu_input_half_1.x); + output_half_1.y *= d_gelu(gelu_input_half_1.y); + + float2 result; + __half2* result_half2 = reinterpret_cast<__half2*>(&result); + + result_half2[0] = __float22half2_rn(output_half_0); + result_half2[1] = __float22half2_rn(output_half_1); + + d_output_cast[row * row_stride + i * loop_stride + id] = result; + } + } +#endif +} + +template +void launch_bias_gelu(const T* input, + const T* bias, + T* output, + int intermediate_size, + int batch_size, + cudaStream_t stream) +{ + int iterations = (intermediate_size + 1023) / 1024; + int threads = (intermediate_size - 1) / (iterations * 4) + 1; + dim3 block_dims(threads); + dim3 grid_dims(batch_size); + + fused_bias_gelu<<>>( + input, bias, output, intermediate_size / 4, iterations); +} + +template +void launch_gelu(const T* input, + T* output, + int intermediate_size, + int batch_size, + cudaStream_t stream) +{ + int iterations = (intermediate_size + 1023) / 1024; + int threads = (intermediate_size - 1) / (iterations * 4) + 1; + dim3 block_dims(threads); + dim3 grid_dims(batch_size); + + gelu_kernel<<>>( + input, output, intermediate_size / 4, iterations); +} + +template void launch_bias_gelu(const float*, const float*, float*, int, int, cudaStream_t); +template void launch_bias_gelu<__half>(const __half*, + const __half*, + __half*, + int, + int, + cudaStream_t); + +template void launch_gelu(const float*, float*, int, int, cudaStream_t); +template void launch_gelu<__half>(const __half*, __half*, int, int, cudaStream_t); + +template +void launch_d_gelu(T* d_output, + const T* input, + const T* bias, + int intermediate_size, + int batch_size, + cudaStream_t stream) +{ + int iterations = (intermediate_size + 1023) / 1024; + int threads = (intermediate_size - 1) / (iterations * 4) + 1; + dim3 block_dims(threads); + dim3 grid_dims(batch_size); + + d_gelu_func<<>>( + d_output, input, bias, intermediate_size / 4, iterations); +} + +template void launch_d_gelu(float*, const float*, const float*, int, int, cudaStream_t); +template void launch_d_gelu<__half>(__half*, const __half*, const __half*, int, int, cudaStream_t); diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/general_kernels.cu b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/general_kernels.cu new file mode 100644 index 0000000000000000000000000000000000000000..a987eec5ef0bdc87153422eae635507f44188802 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/general_kernels.cu @@ -0,0 +1,416 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "general_kernels.h" + +namespace cg = cooperative_groups; + +template +__global__ void column_sum_reduce(const T* __restrict__ inp, + T* __restrict__ out, + int rows, + int width) +{ + __shared__ float tile[TILE_DIM][TILE_DIM + 1]; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int idx = blockDim.x * blockIdx.x + threadIdx.x; + + int y_stride = width * TILE_DIM; + + float localSum = 0; + + // Loop across matrix height + if (idx < width) { + int offset = threadIdx.y * width + idx; + for (int r = threadIdx.y; r < rows; r += TILE_DIM) { + localSum += (float)inp[offset]; + offset += y_stride; + } + } + + tile[threadIdx.x][threadIdx.y] = localSum; + + __syncthreads(); + + // Sum the shared buffer. + float sum = tile[threadIdx.y][threadIdx.x]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < TILE_DIM; i <<= 1) sum += g.shfl_down(sum, i); + + if (threadIdx.x == 0) { + int pos = blockIdx.x * TILE_DIM + threadIdx.y; + if (pos < width) out[pos] = sum; + } +} + +template +void launch_fuse_transpose_bias_kernel(const T* inp, + T* out, + int rows, + int cols, + cudaStream_t stream); + +template <> +void launch_fuse_transpose_bias_kernel(const float* inp, + float* out, + int rows, + int cols, + cudaStream_t stream) +{ + // assert(rows % TILE_DIM == 0); + // assert(cols % TILE_DIM == 0); + + dim3 grid_dim((cols - 1) / TILE_DIM + 1); + dim3 block_dim(TILE_DIM, TILE_DIM); + + column_sum_reduce<<>>(inp, out, rows, cols); +} + +template <> +void launch_fuse_transpose_bias_kernel<__half>(const __half* inp, + __half* out, + int rows, + int cols, + cudaStream_t stream) +{ + // assert(rows % TILE_DIM == 0); + // assert(cols % TILE_DIM == 0); + + dim3 grid_dim((cols - 1) / TILE_DIM + 1); + dim3 block_dim(TILE_DIM, TILE_DIM); + + column_sum_reduce<__half><<>>(inp, out, rows, cols); +} + +__global__ void fused_add2_kernel(const int N, float* out, const float* inp1, const float* inp2) +{ + const float4* inp1_4 = reinterpret_cast(inp1); + const float4* inp2_4 = reinterpret_cast(inp2); + float4* out_4 = reinterpret_cast(out); + + CUDA_1D_KERNEL_LOOP(j, N) + { + float4 val; + float4 inp1_reg = inp1_4[j]; + float4 inp2_reg = inp2_4[j]; + + val.x = inp1_reg.x + inp2_reg.x; + val.y = inp1_reg.y + inp2_reg.y; + val.z = inp1_reg.z + inp2_reg.z; + val.w = inp1_reg.w + inp2_reg.w; + + out_4[j] = val; + } +} + +__global__ void fused_add2_kernel(const int N, __half* out, const __half* inp1, const __half* inp2) +{ + float2 inp1_4; + float2 inp2_4; + + __half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4); + __half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4); + + const float2* inp1_arr = reinterpret_cast(inp1); + const float2* inp2_arr = reinterpret_cast(inp2); + + CUDA_1D_KERNEL_LOOP(j, N) + { + inp1_4 = inp1_arr[j]; + inp2_4 = inp2_arr[j]; + + float2 inp1_h_f_0 = __half22float2(inp1_h[0]); + float2 inp1_h_f_1 = __half22float2(inp1_h[1]); + + float2 inp2_h_f_0 = __half22float2(inp2_h[0]); + float2 inp2_h_f_1 = __half22float2(inp2_h[1]); + + inp1_h_f_0.x += inp2_h_f_0.x; + inp1_h_f_0.y += inp2_h_f_0.y; + inp1_h_f_1.x += inp2_h_f_1.x; + inp1_h_f_1.y += inp2_h_f_1.y; + + float2 val_f; + __half2* val_h = reinterpret_cast<__half2*>(&val_f); + + val_h[0] = __float22half2_rn(inp1_h_f_0); + val_h[1] = __float22half2_rn(inp1_h_f_1); + + float2* out_4 = reinterpret_cast(out); + out_4[j] = val_f; + } +} + +template <> +void launch_fused_add2(float* out, + const float* inp1, + const float* inp2, + int batch_size, + int seq_length, + int hidden_dim, + cudaStream_t& stream) +{ + int total_count = batch_size * seq_length * hidden_dim / 4; + dim3 grid_dim = DS_GET_BLOCKS(total_count); //(batch_size * seq_length); + + dim3 block_dim = DS_CUDA_NUM_THREADS; //(hidden_dim / 4); + + fused_add2_kernel<<>>(total_count, out, inp1, inp2); +} + +template <> +void launch_fused_add2<__half>(__half* out, + const __half* inp1, + const __half* inp2, + int batch_size, + int seq_length, + int hidden_dim, + cudaStream_t& stream) +{ + int total_count = batch_size * seq_length * hidden_dim / 4; + dim3 grid_dim = DS_GET_BLOCKS(total_count); //(batch_size * seq_length); + + dim3 block_dim = DS_CUDA_NUM_THREADS; //(hidden_dim / 4); + + fused_add2_kernel<<>>(total_count, out, inp1, inp2); +} + +__global__ void fused_add3_kernel(float* out, + const float* inp1, + const float* inp2, + const float* inp3, + int size, + int row_stride) +{ + int row = blockIdx.x; + int id = threadIdx.x; + + const float4* inp1_4 = reinterpret_cast(inp1); + const float4* inp2_4 = reinterpret_cast(inp2); + const float4* inp3_4 = reinterpret_cast(inp3); + + float4* out_4 = reinterpret_cast(out); + + float4 val; + float4 inp1_reg = inp1_4[row * row_stride + id]; + float4 inp2_reg = inp2_4[row * row_stride + id]; + float4 inp3_reg = inp3_4[row * row_stride + id]; + + val.x = inp1_reg.x + inp2_reg.x + inp3_reg.x; + val.y = inp1_reg.y + inp2_reg.y + inp3_reg.y; + val.z = inp1_reg.z + inp2_reg.z + inp3_reg.z; + val.w = inp1_reg.w + inp2_reg.w + inp3_reg.w; + + out_4[row * row_stride + id] = val; +} + +__global__ void fused_add3_kernel(__half* out, + const __half* inp1, + const __half* inp2, + const __half* inp3, + int size, + int row_stride) +{ + int row = blockIdx.x; + int id = threadIdx.x; + const float2* inp1_arr = reinterpret_cast(inp1); + const float2* inp2_arr = reinterpret_cast(inp2); + const float2* inp3_arr = reinterpret_cast(inp3); + + float2 inp1_4 = inp1_arr[row * row_stride + id]; + float2 inp2_4 = inp2_arr[row * row_stride + id]; + float2 inp3_4 = inp3_arr[row * row_stride + id]; + + __half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4); + __half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4); + __half2* inp3_h = reinterpret_cast<__half2*>(&inp3_4); + + float2 inp1_h_f_0 = __half22float2(inp1_h[0]); + float2 inp1_h_f_1 = __half22float2(inp1_h[1]); + + float2 inp2_h_f_0 = __half22float2(inp2_h[0]); + float2 inp2_h_f_1 = __half22float2(inp2_h[1]); + + float2 inp3_h_f_0 = __half22float2(inp3_h[0]); + float2 inp3_h_f_1 = __half22float2(inp3_h[1]); + + inp1_h_f_0.x += (inp2_h_f_0.x + inp3_h_f_0.x); + inp1_h_f_0.y += (inp2_h_f_0.y + inp3_h_f_0.y); + inp1_h_f_1.x += (inp2_h_f_1.x + inp3_h_f_1.x); + inp1_h_f_1.y += (inp2_h_f_1.y + inp3_h_f_1.y); + + float2 val_f; + __half2* val_h = reinterpret_cast<__half2*>(&val_f); + + val_h[0] = __float22half2_rn(inp1_h_f_0); + val_h[1] = __float22half2_rn(inp1_h_f_1); + + float2* out_4 = reinterpret_cast(out); + out_4[row * row_stride + id] = val_f; +} + +template <> +void launch_fused_add3(float* out, + const float* inp1, + const float* inp2, + const float* inp3, + int batch_size, + int seq_length, + int hidden_size, + cudaStream_t& stream) +{ + dim3 grid_dim(batch_size * seq_length); + + dim3 block_dim(hidden_size / 4); + + fused_add3_kernel<<>>( + out, inp1, inp2, inp3, (batch_size * seq_length * hidden_size), hidden_size / 4); +} + +template <> +void launch_fused_add3<__half>(__half* out, + const __half* inp1, + const __half* inp2, + const __half* inp3, + int batch_size, + int seq_length, + int hidden_size, + cudaStream_t& stream) +{ + dim3 grid_dim(batch_size * seq_length); + + dim3 block_dim(hidden_size / 4); + + fused_add3_kernel<<>>( + out, inp1, inp2, inp3, (batch_size * seq_length * hidden_size), hidden_size / 4); +} + +__global__ void fused_add4_kernel(float* out, + const float* inp1, + const float* inp2, + const float* inp3, + const float* inp4, + int size, + int row_stride) +{ + int row = blockIdx.x; + int id = threadIdx.x; + + const float4* inp1_4 = reinterpret_cast(inp1); + const float4* inp2_4 = reinterpret_cast(inp2); + const float4* inp3_4 = reinterpret_cast(inp3); + const float4* inp4_4 = reinterpret_cast(inp4); + float4* out_4 = reinterpret_cast(out); + + float4 val; + float4 inp1_reg = inp1_4[row * row_stride + id]; + float4 inp2_reg = inp2_4[row * row_stride + id]; + float4 inp3_reg = inp3_4[row * row_stride + id]; + float4 inp4_reg = inp4_4[row * row_stride + id]; + + val.x = inp1_reg.x + inp2_reg.x + inp3_reg.x + inp4_reg.x; + val.y = inp1_reg.y + inp2_reg.y + inp3_reg.y + inp4_reg.y; + val.z = inp1_reg.z + inp2_reg.z + inp3_reg.z + inp4_reg.z; + val.w = inp1_reg.w + inp2_reg.w + inp3_reg.w + inp4_reg.w; + + out_4[row * row_stride + id] = val; +} + +__global__ void fused_add4_kernel(__half* out, + const __half* inp1, + const __half* inp2, + const __half* inp3, + const __half* inp4, + int size, + int row_stride) +{ + int row = blockIdx.x; + int id = threadIdx.x; + const float2* inp1_arr = reinterpret_cast(inp1); + const float2* inp2_arr = reinterpret_cast(inp2); + const float2* inp3_arr = reinterpret_cast(inp3); + const float2* inp4_arr = reinterpret_cast(inp4); + + float2 inp1_4 = inp1_arr[row * row_stride + id]; + float2 inp2_4 = inp2_arr[row * row_stride + id]; + float2 inp3_4 = inp3_arr[row * row_stride + id]; + float2 inp4_4 = inp4_arr[row * row_stride + id]; + + __half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4); + __half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4); + __half2* inp3_h = reinterpret_cast<__half2*>(&inp3_4); + __half2* inp4_h = reinterpret_cast<__half2*>(&inp4_4); + + float2 inp1_h_f_0 = __half22float2(inp1_h[0]); + float2 inp1_h_f_1 = __half22float2(inp1_h[1]); + + float2 inp2_h_f_0 = __half22float2(inp2_h[0]); + float2 inp2_h_f_1 = __half22float2(inp2_h[1]); + + float2 inp3_h_f_0 = __half22float2(inp3_h[0]); + float2 inp3_h_f_1 = __half22float2(inp3_h[1]); + + float2 inp4_h_f_0 = __half22float2(inp4_h[0]); + float2 inp4_h_f_1 = __half22float2(inp4_h[1]); + + inp1_h_f_0.x += (inp2_h_f_0.x + inp3_h_f_0.x + inp4_h_f_0.x); + inp1_h_f_0.y += (inp2_h_f_0.y + inp3_h_f_0.y + inp4_h_f_0.y); + inp1_h_f_1.x += (inp2_h_f_1.x + inp3_h_f_1.x + inp4_h_f_1.x); + inp1_h_f_1.y += (inp2_h_f_1.y + inp3_h_f_1.y + inp4_h_f_1.y); + + float2 val_f; + __half2* val_h = reinterpret_cast<__half2*>(&val_f); + + val_h[0] = __float22half2_rn(inp1_h_f_0); + val_h[1] = __float22half2_rn(inp1_h_f_1); + + float2* out_4 = reinterpret_cast(out); + out_4[row * row_stride + id] = val_f; +} + +template <> +void launch_fused_add4(float* out, + const float* inp1, + const float* inp2, + const float* inp3, + const float* inp4, + int batch_size, + int seq_length, + int hidden_size, + cudaStream_t& stream) +{ + dim3 grid_dim(batch_size * seq_length); + + dim3 block_dim(hidden_size / 4); + + fused_add4_kernel<<>>( + out, inp1, inp2, inp3, inp4, (batch_size * seq_length * hidden_size), hidden_size / 4); +} + +template <> +void launch_fused_add4<__half>(__half* out, + const __half* inp1, + const __half* inp2, + const __half* inp3, + const __half* inp4, + int batch_size, + int seq_length, + int hidden_size, + cudaStream_t& stream) +{ + dim3 grid_dim(batch_size * seq_length); + + dim3 block_dim(hidden_size / 4); + + fused_add4_kernel<<>>( + out, inp1, inp2, inp3, inp4, (batch_size * seq_length * hidden_size), hidden_size / 4); +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/apply_rotary_pos_emb.cu b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/apply_rotary_pos_emb.cu new file mode 100644 index 0000000000000000000000000000000000000000..a06dbb48fd332c8cd04b5c96407e5efa0431178f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/apply_rotary_pos_emb.cu @@ -0,0 +1,199 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "conversion_utils.h" +#ifdef __HIP_PLATFORM_AMD__ +#include "hip/hip_cooperative_groups.h" +#else +#include "cooperative_groups.h" +#endif +#include "ds_kernel_utils.h" +#include "inference_cuda_layers.h" +#include "memory_access_utils.h" + +#ifndef __HIP_PLATFORM_AMD__ +#include +#endif + +namespace cg = cooperative_groups; + +namespace rot_half { +constexpr int threads = 256; +} // namespace rot_half + +template +__global__ void apply_rotary_pos_half(T* mixed_query, + T* key_layer, + unsigned rotary_dim, + unsigned seq_len, + unsigned seq_offset, + unsigned num_heads, + unsigned head_size, + unsigned total_count, + float rope_theta, + int max_out_tokens) +{ + constexpr int T_per_thread = granularity / sizeof(T); + constexpr int heads_per_block = rot_half::threads / threadsPerHead; + + cg::thread_block tb = cg::this_thread_block(); + cg::thread_block_tile head_group = cg::tiled_partition(tb); + + const int head_idx = blockIdx.x * heads_per_block + threadIdx.x / threadsPerHead; + const int cur_seq_idx = head_idx % seq_len; + const int offset = head_idx * head_size; + const int k_offset = (cur_seq_idx + (head_idx / seq_len) * max_out_tokens) * head_size; + + const int seq_idx = cur_seq_idx + seq_offset; + const int half_dim = rotary_dim >> 1; + const int half_dim_threads = half_dim / T_per_thread; + + if (head_idx < total_count) { + const int base_neuron_idx = head_group.thread_rank() * T_per_thread; + + T q[T_per_thread], k[T_per_thread]; + mem_access::load_global(q, mixed_query + offset + base_neuron_idx); + mem_access::load_global(k, key_layer + k_offset + base_neuron_idx); + +#pragma unroll + for (int i = 0; i < T_per_thread; i++) { + const int neuron_idx = base_neuron_idx + i; + if (neuron_idx < rotary_dim) { + float inv_freq = (float)((neuron_idx % half_dim) * 2) / (float)rotary_dim; + inv_freq = 1.0 / powf(rope_theta, inv_freq) * (float)seq_idx; + + float rotary_sign = (neuron_idx > (half_dim - 1) ? -1.0 : 1.0); + float q_rot = conversion::to(q[i]) * rotary_sign; + float k_rot = conversion::to(k[i]) * rotary_sign; + + const int target_lane = (neuron_idx < half_dim) + ? head_group.thread_rank() + half_dim_threads + : head_group.thread_rank() - half_dim_threads; + + const float q_rot_temp = head_group.shfl(q_rot, target_lane); + const float k_rot_temp = head_group.shfl(k_rot, target_lane); + + q[i] = conversion::to(conversion::to(q[i]) * cosf(inv_freq) + + q_rot_temp * sinf(inv_freq)); + k[i] = conversion::to(conversion::to(k[i]) * cosf(inv_freq) + + k_rot_temp * sinf(inv_freq)); + } + } + + mem_access::store_global(mixed_query + offset + base_neuron_idx, q); + mem_access::store_global(key_layer + k_offset + base_neuron_idx, k); + } +} + +#define LAUNCH_ROT_POS_EMB_HALF(HEAD_THREADS, ALIGNMENT) \ + apply_rotary_pos_half<<>>(mixed_query, \ + key_layer, \ + rotary_dim, \ + seq_len, \ + offset, \ + num_heads, \ + head_size, \ + total_count, \ + rope_theta, \ + max_out_tokens); + +#ifdef __HIP_PLATFORM_AMD__ +#define LAUNCH_FOR_ALIGNMENT(ALIGNMENT) \ + if (threads_per_head == 4) { \ + LAUNCH_ROT_POS_EMB_HALF(4, ALIGNMENT); \ + } else if (threads_per_head == 8) { \ + LAUNCH_ROT_POS_EMB_HALF(8, ALIGNMENT); \ + } else if (threads_per_head == 16) { \ + LAUNCH_ROT_POS_EMB_HALF(16, ALIGNMENT); \ + } else if (threads_per_head == 32) { \ + LAUNCH_ROT_POS_EMB_HALF(32, ALIGNMENT); \ + } else if (threads_per_head == 64) { \ + LAUNCH_ROT_POS_EMB_HALF(64, ALIGNMENT); \ + } else { \ + assert(false); \ + } +#else +#define LAUNCH_FOR_ALIGNMENT(ALIGNMENT) \ + if (threads_per_head == 4) { \ + LAUNCH_ROT_POS_EMB_HALF(4, ALIGNMENT); \ + } else if (threads_per_head == 8) { \ + LAUNCH_ROT_POS_EMB_HALF(8, ALIGNMENT); \ + } else if (threads_per_head == 16) { \ + LAUNCH_ROT_POS_EMB_HALF(16, ALIGNMENT); \ + } else if (threads_per_head == 32) { \ + LAUNCH_ROT_POS_EMB_HALF(32, ALIGNMENT); \ + } else { \ + assert(false); \ + } +#endif + +template +void launch_apply_rotary_pos_emb(T* mixed_query, + T* key_layer, + unsigned head_size, + unsigned seq_len, + unsigned rotary_dim, + unsigned offset, + unsigned num_heads, + unsigned batch, + float rope_theta, + cudaStream_t stream, + int max_out_tokens) +{ + const int half_dim = rotary_dim >> 1; + + int alignment = sizeof(T); + if (half_dim % (16 / sizeof(T)) == 0) { + alignment = 16; + } else if (half_dim % (8 / sizeof(T)) == 0) { + alignment = 8; + } else if (half_dim % (4 / sizeof(T)) == 0) { + alignment = 4; + } else { + assert(false); + } + const int T_per_elem = alignment / sizeof(T); + + int total_count = batch * num_heads * seq_len; + + const int padded_head_size = next_pow2(head_size); + + assert(padded_head_size <= hw_warp_size * T_per_elem); + + const int threads_per_head = padded_head_size / T_per_elem; + const int heads_per_block = rot_half::threads / threads_per_head; + + dim3 block(rot_half::threads); + dim3 grid((total_count + heads_per_block - 1) / heads_per_block); + + if (alignment == 4) { + LAUNCH_FOR_ALIGNMENT(4); + } else if (alignment == 8) { + LAUNCH_FOR_ALIGNMENT(8); + } else if (alignment == 16) { + LAUNCH_FOR_ALIGNMENT(16); + } else { + assert(false); + } +} + +#define INSTANTIATE_LAUNCH_ROTARY_POS_EMB(T) \ + template void launch_apply_rotary_pos_emb(T*, \ + T*, \ + unsigned, \ + unsigned, \ + unsigned, \ + unsigned, \ + unsigned, \ + unsigned, \ + float, \ + cudaStream_t, \ + int); + +INSTANTIATE_LAUNCH_ROTARY_POS_EMB(float); +#ifdef BF16_AVAILABLE +INSTANTIATE_LAUNCH_ROTARY_POS_EMB(__nv_bfloat16); +#endif +INSTANTIATE_LAUNCH_ROTARY_POS_EMB(__half); diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/dequantize.cu b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/dequantize.cu new file mode 100644 index 0000000000000000000000000000000000000000..7a8e7ca446b0fbc4154fd0198c8e301f77cc7815 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/dequantize.cu @@ -0,0 +1,153 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "conversion_utils.h" +#include "inference_cuda_layers.h" + +#define MAX_QUANTIZE_GROUPING 1024 + +#define loop_unroll 1 +#define loop_unroll_bits 1 + +template +__global__ void dequantize_kernel(T* output, + const int8_t* input, + const float* qscale, + int output_size, + int hidden_dim, + int groups, + int merge_count) +{ + unsigned merge_hidden = hidden_dim >> merge_count; + unsigned quantization_stride = (merge_hidden * output_size) / groups; + + unsigned bid = blockIdx.x; + unsigned tid = threadIdx.x; + + while (tid < output_size) { + unsigned w_index = bid / merge_hidden; + unsigned q_index = tid + bid * output_size; + + auto q = input[q_index]; + + unsigned merge_hidden_total = w_index * merge_hidden; + unsigned scale_index = + ((((bid - merge_hidden_total) + tid * merge_hidden) / quantization_stride) + << merge_count) + + w_index; + + float scale_data = qscale[scale_index]; + + output[q_index] = conversion::to(scale_data * (float)q); + tid += blockDim.x; + } +} + +template +void launch_dequantize(T* output, + const int8_t* input, + const float* qscale, + unsigned output_size, + unsigned hidden_dim, + unsigned groups, + unsigned merge_count, + cudaStream_t stream) +{ + unsigned threads = 1024; + dim3 block_dims(threads); + dim3 grid_dims(hidden_dim); + + dequantize_kernel<<>>( + output, input, qscale, output_size, hidden_dim, groups, merge_count); +} + +#define INSTANTIATE_DEQUANTIZE_MERGE(T) \ + template void launch_dequantize( \ + T*, const int8_t*, const float*, unsigned, unsigned, unsigned, unsigned, cudaStream_t); + +INSTANTIATE_DEQUANTIZE_MERGE(float); +#ifdef BF16_AVAILABLE +INSTANTIATE_DEQUANTIZE_MERGE(__nv_bfloat16); +#endif +INSTANTIATE_DEQUANTIZE_MERGE(__half); + +__global__ void dequantize_kernel(float* output, + const int8_t* input, + const float* qscale, + int hidden_dim, + unsigned merge_hidden, + int cnt) +{ +} + +template +__global__ void dequantize_kernel(T* output, + const int8_t* input, + const float* qscale, + unsigned hidden_dim, + unsigned merge_hidden, + int cnt) +{ + unsigned bid = blockIdx.x * gridDim.y + blockIdx.y; + unsigned tid = threadIdx.x; + + float local_scale = qscale[blockIdx.x]; + + const float* input_cast = reinterpret_cast(input); + float2* output_cast = reinterpret_cast(output); + + input_cast += bid * merge_hidden; + output_cast += bid * merge_hidden; + + for (int c = 0; c < cnt; c++) { + if (tid < merge_hidden) { + float q = input_cast[tid]; + int8_t* q_int8 = (int8_t*)&q; + + float2 q_f; + T* q_h = (T*)&q_f; + + q_h[0] = conversion::to(local_scale * (float)q_int8[0]); + q_h[1] = conversion::to(local_scale * (float)q_int8[1]); + q_h[2] = conversion::to(local_scale * (float)q_int8[2]); + q_h[3] = conversion::to(local_scale * (float)q_int8[3]); + output_cast[tid] = q_f; + tid += blockDim.x; + } + } +} + +template +void launch_dequantize(T* output, + const int8_t* input, + const float* qscale, + unsigned output_size, + unsigned hidden_dim, + unsigned groups, + cudaStream_t stream) +{ + unsigned threads = 1024; + hidden_dim /= 4; + unsigned thd_cnt = (hidden_dim - 1) / threads + 1; + + assert(output_size % groups == 0); + unsigned blocks = output_size / groups; + + dim3 block_dims(threads); + dim3 grid_dims(groups, blocks); + + dequantize_kernel<<>>( + output, input, qscale, hidden_dim, hidden_dim, thd_cnt); +} + +#define INSTANTIATE_DEQUANTIZE_NO_MERGE(T) \ + template void launch_dequantize( \ + T*, const int8_t*, const float*, unsigned, unsigned, unsigned, cudaStream_t); + +INSTANTIATE_DEQUANTIZE_NO_MERGE(float); +#ifdef BF16_AVAILABLE +INSTANTIATE_DEQUANTIZE_NO_MERGE(__nv_bfloat16); +#endif +INSTANTIATE_DEQUANTIZE_NO_MERGE(__half); diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/gelu.cu b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/gelu.cu new file mode 100644 index 0000000000000000000000000000000000000000..dc7ff4d1e7c0cad3c59933e7eab73df6113ba00a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/gelu.cu @@ -0,0 +1,710 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "conversion_utils.h" +#include "inference_cuda_layers.h" +#include "memory_access_utils.h" + +namespace cg = cooperative_groups; +#define MAX_CAP 4 +#define MAX_SEQ 2048 + +// only used to avoid compilation error due to lack of definition. +#ifndef BF16_AVAILABLE +using __nv_bfloat162 = __half2; +#endif + +inline __device__ float gelu(const float x) +{ + constexpr float sqrt_param = 0.79788456080286535587989211986876f; + constexpr float mul_param = 0.044715; + return x * 0.5f * (1.0f + tanhf(sqrt_param * (x + mul_param * x * x * x))); +} + +/* +In-place gelu(biasAdd(x)) for channels last +*/ +template +__global__ void fused_bias_gelu(T* input, const T* bias, int total_count, int intermediate_size) +{ + // Input restriction: intermediate_size % vals_per_access == 0 + constexpr int granularity = 16; + constexpr int values_per_access = granularity / sizeof(T); + const int offset = (blockIdx.x * blockDim.x + threadIdx.x) * values_per_access; + + if (offset < total_count) { + T data[values_per_access]; + T data_bias[values_per_access]; + mem_access::load_global(data, input + offset); + mem_access::load_global( + data_bias, bias + (offset % intermediate_size), bias != nullptr); + +#pragma unroll + for (int i = 0; i < values_per_access; i++) { + float data_f = conversion::to(data[i]); + float bias_f = conversion::to(data_bias[i]); + data[i] = conversion::to(gelu(data_f + bias_f)); + } + + mem_access::store_global(input + offset, data); + } +} + +template +void launch_bias_gelu(T* input, + const T* bias, + int intermediate_size, + int batch_size, + cudaStream_t stream) +{ + constexpr int threads = 1024; + constexpr int granularity = 16; + + const int total_count = batch_size * intermediate_size; + const int elems_per_block = threads * (granularity / sizeof(T)); + dim3 block_dims(threads); + dim3 grid_dims((total_count + elems_per_block - 1) / elems_per_block); + + fused_bias_gelu<<>>( + input, bias, total_count, intermediate_size); +} + +#define INSTANTIATE_LAUNCH_BIAS_GELU(T) \ + template void launch_bias_gelu(T*, const T*, int, int, cudaStream_t); + +INSTANTIATE_LAUNCH_BIAS_GELU(float) +#ifdef BF16_AVAILABLE +INSTANTIATE_LAUNCH_BIAS_GELU(__nv_bfloat16) +#endif +INSTANTIATE_LAUNCH_BIAS_GELU(__half) + +/* +In-place channels-last bias add +*/ +template +__global__ void fused_bias_add(T* input, const T* bias, int total_count, int intermediate_size) +{ + // Input restriction: intermediate_size % vals_per_access == 0 + constexpr int granularity = 16; + constexpr int values_per_access = granularity / sizeof(T); + const int offset = (blockIdx.x * blockDim.x + threadIdx.x) * values_per_access; + + if (offset < total_count) { + T data[values_per_access]; + T data_bias[values_per_access]; + mem_access::load_global(data, input + offset); + mem_access::load_global( + data_bias, bias + (offset % intermediate_size), bias != nullptr); + +#pragma unroll + for (int i = 0; i < values_per_access; i++) { + float data_f = conversion::to(data[i]); + float bias_f = conversion::to(data_bias[i]); + data[i] = conversion::to(data_f + bias_f); + } + + mem_access::store_global(input + offset, data); + } +} + +template +void launch_bias_add(T* input, + const T* bias, + int intermediate_size, + int batch_size, + cudaStream_t stream) +{ + constexpr int threads = 1024; + constexpr int granularity = 16; + + const int total_count = batch_size * intermediate_size; + const int elems_per_block = threads * (granularity / sizeof(T)); + dim3 block_dims(threads); + dim3 grid_dims((total_count + elems_per_block - 1) / elems_per_block); + + fused_bias_add<<>>( + input, bias, total_count, intermediate_size); +} + +#define INSTANTIATE_LAUNCH_BIAS_ADD(T) \ + template void launch_bias_add(T*, const T*, int, int, cudaStream_t); + +INSTANTIATE_LAUNCH_BIAS_ADD(float) +#ifdef BF16_AVAILABLE +INSTANTIATE_LAUNCH_BIAS_ADD(__nv_bfloat16) +#endif +INSTANTIATE_LAUNCH_BIAS_ADD(__half) + +__global__ void fused_bias_residual(float* residual, + const float* hidden_state, + const float* attn, + const float* bias, + const float* attn_bias, + const int total_count, + const int intermediate_size, + const float mp_scale, + const bool preln) +{ + float4* res_fl4_ptr = reinterpret_cast(residual); + const float4* hs_fl4_ptr = reinterpret_cast(hidden_state); + const float4* attn_fl4_ptr = reinterpret_cast(attn); + const float4* bias_fl4_ptr = reinterpret_cast(bias); + const float4* attn_bias_fl4_ptr = reinterpret_cast(attn_bias); + const int offset = blockIdx.x * blockDim.x + threadIdx.x; + + if (offset < total_count) { + float4 res_fl4 = res_fl4_ptr[offset]; + const float4 hs_fl4 = hs_fl4_ptr[offset]; + const float4 attn_fl4 = attn_fl4_ptr[offset]; + const float4 bias_fl4 = bias_fl4_ptr[offset % intermediate_size]; + const float4 attn_bias_fl4 = attn_bias_fl4_ptr[offset % intermediate_size]; + if (preln) { + // residual = (residual + attention + bias + attention_bias) * + // mp_scale + hidden_state + res_fl4.x = + (res_fl4.x + attn_fl4.x + bias_fl4.x + attn_bias_fl4.x) * mp_scale + (hs_fl4.x); + res_fl4.y = + (res_fl4.y + attn_fl4.y + bias_fl4.y + attn_bias_fl4.y) * mp_scale + (hs_fl4.y); + res_fl4.z = + (res_fl4.z + attn_fl4.z + bias_fl4.z + attn_bias_fl4.z) * mp_scale + (hs_fl4.z); + res_fl4.w = + (res_fl4.w + attn_fl4.w + bias_fl4.w + attn_bias_fl4.w) * mp_scale + (hs_fl4.w); + } else { + // residual += hidden_state + bias + res_fl4.x = res_fl4.x + hs_fl4.x + bias_fl4.x; + res_fl4.y = res_fl4.y + hs_fl4.y + bias_fl4.y; + res_fl4.z = res_fl4.z + hs_fl4.z + bias_fl4.z; + res_fl4.w = res_fl4.w + hs_fl4.w + bias_fl4.w; + } + res_fl4_ptr[offset] = res_fl4; + } +} + +template +__global__ void fused_bias_residual(T* residual, + const T* hidden_state, + const T* attn, + const T* bias, + const T* attn_bias, + const int total_count, + const int intermediate_size, + const float mp_scale, + const bool preln) +{ + using T2 = + typename std::conditional::value, __half2, __nv_bfloat162>::type; + float2* res_fl2_ptr = reinterpret_cast(residual); + const float2* hs_fl2_ptr = reinterpret_cast(hidden_state); + const float2* attn_fl2_ptr = reinterpret_cast(attn); + const float2* bias_fl2_ptr = reinterpret_cast(bias); + const float2* attn_bias_fl2_ptr = reinterpret_cast(attn_bias); + const int offset = blockIdx.x * blockDim.x + threadIdx.x; + + if (offset < total_count) { + float2 res_fl2 = res_fl2_ptr[offset]; + const float2 hs_fl2 = hs_fl2_ptr[offset]; + const float2 attn_fl2 = attn_fl2_ptr[offset]; + const float2 bias_fl2 = bias_fl2_ptr[offset % intermediate_size]; + const float2 attn_bias_fl2 = attn_bias_fl2_ptr[offset % intermediate_size]; + + T2* res_half2 = reinterpret_cast(&res_fl2); + const T2* hs_half2 = reinterpret_cast(&hs_fl2); + const T2* attn_half2 = reinterpret_cast(&attn_fl2); + const T2* bias_half2 = reinterpret_cast(&bias_fl2); + const T2* attn_bias_half2 = reinterpret_cast(&attn_bias_fl2); + + float2 res_low = conversion::to(res_half2[0]); + float2 res_high = conversion::to(res_half2[1]); + + const float2 hs_low = conversion::to(hs_half2[0]); + const float2 hs_high = conversion::to(hs_half2[1]); + + const float2 attn_low = conversion::to(attn_half2[0]); + const float2 attn_high = conversion::to(attn_half2[1]); + + const float2 bias_low = conversion::to(bias_half2[0]); + const float2 bias_high = conversion::to(bias_half2[1]); + + const float2 attn_bias_low = conversion::to(attn_bias_half2[0]); + const float2 attn_bias_high = conversion::to(attn_bias_half2[1]); + + if (preln) { + // residual = (residual + attention + bias + attention_bias) * + // mp_scale + hidden_state + res_low.x = + (res_low.x + attn_low.x + bias_low.x + attn_bias_low.x) * mp_scale + hs_low.x; + res_low.y = + (res_low.y + attn_low.y + bias_low.y + attn_bias_low.y) * mp_scale + hs_low.y; + res_high.x = + (res_high.x + attn_high.x + bias_high.x + attn_bias_high.x) * mp_scale + hs_high.x; + res_high.y = + (res_high.y + attn_high.y + bias_high.y + attn_bias_high.y) * mp_scale + hs_high.y; + } else { + // residual += hidden_state + bias + res_low.x = (res_low.x + hs_low.x + bias_low.x); + res_low.y = (res_low.y + hs_low.y + bias_low.y); + res_high.x = (res_high.x + hs_high.x + bias_high.x); + res_high.y = (res_high.y + hs_high.y + bias_high.y); + } + res_half2[0] = conversion::to(res_low); + res_half2[1] = conversion::to(res_high); + + res_fl2_ptr[offset] = res_fl2; + } +} + +template +void launch_bias_residual(T* residual, + T* hidden_state, + T* attn, + T* bias, + T* attn_bias, + int batch, + int hidden_dim, + int mp_size, + bool preln, + cudaStream_t stream) +{ + int total_count = batch * hidden_dim / 4; + dim3 block_dims(1024); + dim3 grid_dims((total_count - 1) / 1024 + 1); // (batch_size); + + fused_bias_residual<<>>(residual, + hidden_state, + attn, + bias, + attn_bias, + total_count, + hidden_dim / 4, + 1.0 / mp_size, + preln); +} + +#define INSTANTIATE_LAUNCH_BIAS_RESIDUAL(T) \ + template void launch_bias_residual(T*, T*, T*, T*, T*, int, int, int, bool, cudaStream_t); + +INSTANTIATE_LAUNCH_BIAS_RESIDUAL(float); +#ifdef BF16_AVAILABLE +INSTANTIATE_LAUNCH_BIAS_RESIDUAL(__nv_bfloat16); +#endif +INSTANTIATE_LAUNCH_BIAS_RESIDUAL(__half); + +__global__ void gptj_residual_add(float* residual, + const float* hidden_state, + const float* attn, + const float* bias, + const float* attn_bias, + const int total_count, + const int intermediate_size, + const float mp_scale) +{ + float4* res_fl4_ptr = reinterpret_cast(residual); + const float4* hs_fl4_ptr = reinterpret_cast(hidden_state); + const float4* attn_fl4_ptr = reinterpret_cast(attn); + const float4* bias_fl4_ptr = reinterpret_cast(bias); + const float4* attn_bias_fl4_ptr = reinterpret_cast(attn_bias); + const int offset = blockIdx.x * blockDim.x + threadIdx.x; + + if (offset < total_count) { + float4 res_fl4 = res_fl4_ptr[offset]; + const float4 hs_fl4 = hs_fl4_ptr[offset]; + const float4 attn_fl4 = attn_fl4_ptr[offset]; + const float4 bias_fl4 = bias_fl4_ptr[offset % intermediate_size]; + + if (attn_bias) { + float4 attn_bias_fl4 = attn_bias_fl4_ptr[offset % intermediate_size]; + // residual += attention_bias + res_fl4.x += attn_bias_fl4.x; + res_fl4.y += attn_bias_fl4.y; + res_fl4.z += attn_bias_fl4.z; + res_fl4.w += attn_bias_fl4.w; + } + // residual = hidden_state + attention + (residual + bias) * mp_scale + res_fl4.x = hs_fl4.x + attn_fl4.x + (res_fl4.x + bias_fl4.x) * mp_scale; + res_fl4.y = hs_fl4.y + attn_fl4.y + (res_fl4.y + bias_fl4.y) * mp_scale; + res_fl4.z = hs_fl4.z + attn_fl4.z + (res_fl4.z + bias_fl4.z) * mp_scale; + res_fl4.w = hs_fl4.w + attn_fl4.w + (res_fl4.w + bias_fl4.w) * mp_scale; + + res_fl4_ptr[offset] = res_fl4; + } +} + +template +__global__ void gptj_residual_add(T* residual, + const T* hidden_state, + const T* attn, + const T* bias, + const T* attn_bias, + const int total_count, + const int intermediate_size, + const float mp_scale) +{ + using T2 = + typename std::conditional::value, __half2, __nv_bfloat162>::type; + float2* res_fl2_ptr = reinterpret_cast(residual); + const float2* hs_fl2_ptr = reinterpret_cast(hidden_state); + const float2* attn_fl2_ptr = reinterpret_cast(attn); + const float2* bias_fl2_ptr = reinterpret_cast(bias); + const float2* attn_bias_fl2_ptr = reinterpret_cast(attn_bias); + const int offset = blockIdx.x * blockDim.x + threadIdx.x; + + if (offset < total_count) { + float2 res_fl2 = res_fl2_ptr[offset]; + const float2 hs_fl2 = hs_fl2_ptr[offset]; + const float2 attn_fl2 = attn_fl2_ptr[offset]; + const float2 bias_fl2 = bias_fl2_ptr[offset % intermediate_size]; + + T2* res_half2 = reinterpret_cast(&res_fl2); + const T2* hs_half2 = reinterpret_cast(&hs_fl2); + const T2* attn_half2 = reinterpret_cast(&attn_fl2); + const T2* bias_half2 = reinterpret_cast(&bias_fl2); + + float2 res_low = conversion::to(res_half2[0]); + float2 res_high = conversion::to(res_half2[1]); + + const float2 hs_low = conversion::to(hs_half2[0]); + const float2 hs_high = conversion::to(hs_half2[1]); + + const float2 attn_low = conversion::to(attn_half2[0]); + const float2 attn_high = conversion::to(attn_half2[1]); + + const float2 bias_low = conversion::to(bias_half2[0]); + const float2 bias_high = conversion::to(bias_half2[1]); + + if (attn_bias) { + const float2 attn_bias_fl2 = attn_bias_fl2_ptr[offset % intermediate_size]; + const T2* attn_bias_half2 = reinterpret_cast(&attn_bias_fl2); + const float2 attn_bias_low = conversion::to(attn_bias_half2[0]); + const float2 attn_bias_high = conversion::to(attn_bias_half2[1]); + // residual += attention_bias + res_low.x += attn_bias_low.x; + res_low.y += attn_bias_low.y; + res_high.x += attn_bias_high.x; + res_high.y += attn_bias_high.y; + } + // residual = hidden_state + attention + (residual + bias) * mp_scale + res_low.x = attn_low.x + hs_low.x + (res_low.x + bias_low.x) * mp_scale; + res_low.y = attn_low.y + hs_low.y + (res_low.y + bias_low.y) * mp_scale; + res_high.x = attn_high.x + hs_high.x + (res_high.x + bias_high.x) * mp_scale; + res_high.y = attn_high.y + hs_high.y + (res_high.y + bias_high.y) * mp_scale; + + res_half2[0] = conversion::to(res_low); + res_half2[1] = conversion::to(res_high); + + res_fl2_ptr[offset] = res_fl2; + } +} + +template +void launch_gptj_residual_add(T* residual, + T* hidden_state, + T* attn, + T* bias, + T* attn_bias, + int hidden_dim, + int batch, + int mp_size, + cudaStream_t stream) +{ + int total_count = batch * hidden_dim / 4; + dim3 block_dims(1024); + dim3 grid_dims((total_count - 1) / 1024 + 1); // (batch_size); + + gptj_residual_add<<>>( + residual, hidden_state, attn, bias, attn_bias, total_count, hidden_dim / 4, 1.0 / mp_size); +} + +#define INSTANTIATE_GPT_RES_ADD(T) \ + template void launch_gptj_residual_add(T*, T*, T*, T*, T*, int, int, int, cudaStream_t); + +INSTANTIATE_GPT_RES_ADD(float); +INSTANTIATE_GPT_RES_ADD(__half); +#ifdef BF16_AVAILABLE +INSTANTIATE_GPT_RES_ADD(__nv_bfloat16); +#endif + +template +__global__ void moe_res_matmul(T* residual, T* coef, T* mlp_out, int seq_len, int hidden_dim) +{ + constexpr int granularity = 16; + constexpr int vals_per_access = granularity / sizeof(T); + + T* residual_seq = residual + blockIdx.x * hidden_dim; + T* mlp_out_seq = mlp_out + blockIdx.x * hidden_dim; + + for (unsigned tid = threadIdx.x * vals_per_access; tid < hidden_dim; + tid += blockDim.x * vals_per_access) { + T mlp[vals_per_access]; + T res[vals_per_access]; + T coef1[vals_per_access]; + T coef2[vals_per_access]; + + mem_access::load_global(mlp, mlp_out_seq + tid); + mem_access::load_global(res, residual_seq + tid); + mem_access::load_global(coef1, coef + tid); + mem_access::load_global(coef2, coef + tid + hidden_dim); + +#pragma unroll + for (int idx = 0; idx < vals_per_access; idx++) { + mlp[idx] = mlp[idx] * coef2[idx] + res[idx] * coef1[idx]; + } + + mem_access::store_global(mlp_out_seq + tid, mlp); + } +} + +template +void launch_moe_res_matmul(T* residual, + T* coef, + T* mlp_out, + int seq_len, + int hidden_dim, + cudaStream_t stream) +{ + dim3 grid_dim(seq_len); + dim3 block_dim(1024); + moe_res_matmul<<>>( + residual, coef, mlp_out, seq_len, hidden_dim); +} + +#define INSTANTIATE_LAUNCH_MOE_RES_MATMUL(T) \ + template void launch_moe_res_matmul(T*, T*, T*, int, int, cudaStream_t); + +INSTANTIATE_LAUNCH_MOE_RES_MATMUL(float); +#ifdef BF16_AVAILABLE +INSTANTIATE_LAUNCH_MOE_RES_MATMUL(__nv_bfloat16); +#endif +INSTANTIATE_LAUNCH_MOE_RES_MATMUL(__half); + +template +__global__ void pad_data_kernel(T* padded_output, T* output, int head_size, int padded_head_size) +{ + using T2 = + typename std::conditional::value, __half2, __nv_bfloat162>::type; + float4* padded_output_cast = reinterpret_cast(padded_output); + float4* output_cast = reinterpret_cast(output); + int bid = blockIdx.x * (blockDim.y) + threadIdx.y; + int idx = threadIdx.x; + padded_output_cast += (bid * padded_head_size); + output_cast += (bid * head_size); + float4 ZERO; + const T2 zero_h = conversion::to(0.f); + T2* ZERO_h = reinterpret_cast(&ZERO); +#pragma unroll + for (int i = 0; i < 4; i++) ZERO_h[i] = zero_h; + if (idx < head_size) + padded_output_cast[idx] = output_cast[idx]; + else + padded_output_cast[idx] = ZERO; +} + +__global__ void pad_data_kernel(float* padded_output, + float* output, + int head_size, + int padded_head_size) +{ +} + +template +void pad_data(T* padded_output, + T* output, + int bsz, + int head_size, + int padded_head_size, + cudaStream_t stream) +{ + dim3 grid_dim((bsz - 1) / 16 + 1); + dim3 block_dim(padded_head_size / 8, 16); + pad_data_kernel<<>>( + padded_output, output, head_size / 8, padded_head_size / 8); +} + +#define INSTANTIATE_PAD_DATA(T) template void pad_data(T*, T*, int, int, int, cudaStream_t stream); + +INSTANTIATE_PAD_DATA(float); +INSTANTIATE_PAD_DATA(__half); +#ifdef BF16_AVAILABLE +INSTANTIATE_PAD_DATA(__nv_bfloat16); +#endif + +template +__global__ void pad_head_seq_kernel(T* padded_output, + T* output, + int seq_len, + int padded_seq_len, + int head_size, + int padded_head_size) +{ + using T2 = + typename std::conditional::value, __half2, __nv_bfloat162>::type; + float4* padded_output_cast = reinterpret_cast(padded_output); + float4* output_cast = reinterpret_cast(output); + int bsz = blockIdx.x; + int bid = blockIdx.y * (blockDim.y) + threadIdx.y; + int idx = threadIdx.x; + padded_output_cast += (bsz * padded_seq_len + bid) * padded_head_size; + output_cast += (bsz * seq_len + bid) * head_size; + float4 ZERO; + const T2 zero_h = conversion::to(0.f); + T2* ZERO_h = reinterpret_cast(&ZERO); +#pragma unroll + for (int i = 0; i < 4; i++) ZERO_h[i] = zero_h; + + if (idx < head_size && bid < seq_len) + padded_output_cast[idx] = output_cast[idx]; + else + padded_output_cast[idx] = ZERO; +} + +__global__ void pad_head_seq_kernel(float* padded_output, + float* output, + int seq_len, + int padded_seq_len, + int head_size, + int padded_head_size) +{ +} + +template +void pad_head_seq(T* padded_output, + T* output, + int bsz, + int seq_len, + int padded_seq_len, + int head_size, + int padded_head_size, + cudaStream_t stream) +{ + dim3 grid_dim(bsz, padded_seq_len / 16); + dim3 block_dim(padded_head_size / 8, 16); + pad_head_seq_kernel<<>>( + padded_output, output, seq_len, padded_seq_len, head_size / 8, padded_head_size / 8); +} + +#define INSTANTIATE_PAD_HEAD_SEQ(T) \ + template void pad_head_seq(T*, T*, int, int, int, int, int, cudaStream_t); + +INSTANTIATE_PAD_HEAD_SEQ(__half); +#ifdef BF16_AVAILABLE +INSTANTIATE_PAD_HEAD_SEQ(__nv_bfloat16); +#endif +INSTANTIATE_PAD_HEAD_SEQ(float); + +// TODO(cmikeh2): evaluate different GeLU performance +__device__ __forceinline__ float old_gelu(float val) +{ + // 1 / sqrt(2) + constexpr float rsqrt_2 = 0.707106769084930419922; + return val * 0.5f * (1.0f + erff(val * rsqrt_2)); +} + +namespace fused_geglu { +constexpr int threads = 256; +constexpr int steps = 2; +constexpr int granularity = 16; +} // namespace fused_geglu + +__device__ __forceinline__ float silu(float val) { return val / (1.0f + expf(-val)); } + +template +__global__ void fused_gate_activation(T* output, + const T* activation, + const T* bias, + int base_channels, + int output_stride, + int total_elems) +{ + constexpr int T_per_access = fused_geglu::granularity / sizeof(T); + constexpr int T_per_step = T_per_access * fused_geglu::threads; + constexpr int T_per_block = T_per_step * fused_geglu::steps; + + const int id = blockIdx.x * T_per_block + threadIdx.x * T_per_access; + +#pragma unroll + for (int i = 0; i < fused_geglu::steps; i++) { + T activation_buffer_1[T_per_access]; + T activation_buffer_2[T_per_access]; + T bias_buffer_1[T_per_access]; + T bias_buffer_2[T_per_access]; + + const int iter_id = id + T_per_step * i; + if (iter_id < total_elems) { + const int channel_id = iter_id % base_channels; + const int seq_id = iter_id / base_channels; + const int seq_offset = seq_id * base_channels * 2; + + mem_access::load_global(activation_buffer_1, + activation + seq_offset + channel_id); + mem_access::load_global( + activation_buffer_2, activation + seq_offset + channel_id + base_channels); + mem_access::load_global( + bias_buffer_1, bias + channel_id, bias != nullptr); + mem_access::load_global( + bias_buffer_2, bias + channel_id + base_channels, bias != nullptr); + + // Since the GeLU is going to happen at float, might as well + // convert +#pragma unroll + for (int v = 0; v < T_per_access; v++) { + T hidden_state = activation_buffer_1[v] + bias_buffer_1[v]; + T pre_gate = activation_buffer_2[v] + bias_buffer_2[v]; + float pre_gate_f = conversion::to(pre_gate); + float gate_f = (useGelu) ? old_gelu(pre_gate_f) : silu(pre_gate_f); + T gate = conversion::to(gate_f); + activation_buffer_1[v] = hidden_state * gate; + } + + mem_access::store_global( + output + seq_id * output_stride + channel_id, activation_buffer_1); + } + } +} + +template +void launch_gated_activation(T* output, + const T* activation, + const T* bias, + int rows, + int output_stride, + int elems_per_row, + bool use_gelu, + cudaStream_t stream) +{ + /* + Fused bias GEGLU is a variant of the gated activation functions. + The input here is a matrix of [batch, seq_len, 2 * intermediate_dim] + where the second half of the channels act as GeLU gates for the first + half. + */ + + // Re-derive the above figures + constexpr int T_per_access = fused_geglu::granularity / sizeof(T); + constexpr int T_per_step = T_per_access * fused_geglu::threads; + constexpr int T_per_block = T_per_step * fused_geglu::steps; + + const int base_channels = elems_per_row / 2; + const int total_elems = base_channels * rows; + + dim3 block(fused_geglu::threads); + dim3 grid((total_elems + T_per_block - 1) / T_per_block); + + if (use_gelu) { + fused_gate_activation<<>>( + output, activation, bias, base_channels, output_stride, total_elems); + } else { + fused_gate_activation<<>>( + output, activation, bias, base_channels, output_stride, total_elems); + } +} + +#define INSTANTIATE_LAUNCH_GATED_ACTIVATION(T) \ + template void launch_gated_activation( \ + T*, const T*, const T*, int, int, int, bool, cudaStream_t); + +INSTANTIATE_LAUNCH_GATED_ACTIVATION(__half); +#ifdef BF16_AVAILABLE +INSTANTIATE_LAUNCH_GATED_ACTIVATION(__nv_bfloat16); +#endif +INSTANTIATE_LAUNCH_GATED_ACTIVATION(float); diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/layer_norm.cu b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/layer_norm.cu new file mode 100644 index 0000000000000000000000000000000000000000..e5e7e89c9d2069678bc445004053f8ad5f9809b1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/layer_norm.cu @@ -0,0 +1,503 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "conversion_utils.h" +#include "ds_kernel_utils.h" +#include "inference_cuda_layers.h" +#include "memory_access_utils.h" +#include "reduction_utils.h" + +namespace cg = cooperative_groups; +using rop = reduce::ROpType; + +namespace ln { +constexpr int granularity = 16; +} // namespace ln + +/* +Primary layer norm implementation. Assumes elems_per_row % 8 +is equal to 0. + +Args: + output: buffer for output data + vals: buffer for input data + gamma: gain for normalization + beta: bias for normalization + epsilon: numeric stability + elems_per_row: number of elements each block will normalize +*/ +template +__global__ void fused_ln(T* output, + const T* vals, + const T* gamma, + const T* beta, + float epsilon, + int elems_per_row) +{ + constexpr int T_per_load = ln::granularity / sizeof(T); + + cg::thread_block tb = cg::this_thread_block(); + cg::thread_block_tile warp = cg::tiled_partition(tb); + + // X-dimension of the block + const int block_offset = (tb.group_index().x * (maxThreads / threadsPerGroup) * elems_per_row) + + (tb.thread_index().y * elems_per_row); + const int thread_offset = tb.thread_index().x * T_per_load; + const int base_offset = block_offset + thread_offset; + const int stride = blockDim.x * T_per_load; + + float sum = reduce::init(); + + const T* input_base = vals + base_offset; + + T local_buffer[unRoll * T_per_load]; + +#pragma unRoll + for (int i = 0; i < unRoll; i++) { + T* iteration_buffer = local_buffer + i * T_per_load; + + mem_access::load_global( + iteration_buffer, input_base + i * stride, thread_offset + i * stride < elems_per_row); + +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + float vals_up_cast = conversion::to(iteration_buffer[j]); + sum = reduce::element(sum, vals_up_cast); + } + } + + reduce::partitioned_block(tb, warp, sum); + const float mean = sum / elems_per_row; + + float mean_diff = reduce::init(); + +#pragma unRoll + for (int i = 0; i < unRoll; i++) { +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + // Using a 0 value here skews the variance, have to if-guard + if (thread_offset + i * stride < elems_per_row) { + float diff = (conversion::to(local_buffer[i * T_per_load + j]) - mean); + mean_diff = reduce::element(mean_diff, diff * diff); + } + } + } + + reduce::partitioned_block(tb, warp, mean_diff); + const float variance = mean_diff / elems_per_row; + const float denom = __frsqrt_rn(variance + epsilon); + + // const T mean_compute = conversion::to(mean); + // const T denom_compute = conversion::to(denom); + + T* block_output = output + block_offset; + +#pragma unRoll + for (int i = 0; i < unRoll; i++) { + T* iteration_buffer = local_buffer + i * T_per_load; + const int iter_idx = i * stride + thread_offset; + const bool do_loads = iter_idx < elems_per_row; + + T gamma_local[T_per_load], beta_local[T_per_load]; + + mem_access::load_global(gamma_local, gamma + iter_idx, do_loads); + mem_access::load_global(beta_local, beta + iter_idx, do_loads); + +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + float val = conversion::to(iteration_buffer[j]); + val = (val - mean) * denom; + val = + val * conversion::to(gamma_local[j]) + conversion::to(beta_local[j]); + iteration_buffer[j] = conversion::to(val); + } + + if (do_loads) { + mem_access::store_global(block_output + iter_idx, iteration_buffer); + } + } +} + +#define LAUNCH_FUSED_LN(unRollFactor, threadsPerGroup, maxThreads) \ + fused_ln \ + <<>>(output, vals, gamma, beta, epsilon, elems_per_row); + +template +void launch_fused_ln(T* output, + const T* vals, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream) +{ + // 8 for __half, 4 for float + constexpr int T_per_load = ln::granularity / sizeof(T); + + constexpr int maxThreads = 256; + + // For Flaoat, unRoll 4, for __half, unRoll 2 + constexpr int internal_unRoll = sizeof(T) == 4 ? 4 : 2; + + const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false; + const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internal_unRoll; + + // Scheduling concern: may be slightly faster for some inputs to assign multiple stages of + // warp-sized blocks rather than stepping up to 64/96 threads + const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step); + const int threadsPerGroup = (one_step_threads < maxThreads) ? one_step_threads : maxThreads; + + const int groups_per_block_max = + is_subblock_schedule ? (maxThreads + threadsPerGroup - 1) / threadsPerGroup : 1; + const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max; + const int groups_launch = (groups_per_block + rows - 1) / groups_per_block; + + dim3 block(threadsPerGroup, groups_per_block); + dim3 grid(groups_launch); + + const int elems_per_step = threadsPerGroup * h_per_step; + const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step; + + if (is_subblock_schedule) { + // <=128 + if (threadsPerGroup == 1) { + LAUNCH_FUSED_LN(1, 1, maxThreads); + } else if (threadsPerGroup == 2) { + LAUNCH_FUSED_LN(1, 2, maxThreads); + } else if (threadsPerGroup == 4) { + LAUNCH_FUSED_LN(1, 4, maxThreads); + } else if (threadsPerGroup == 8) { + LAUNCH_FUSED_LN(1, 8, maxThreads); + } else if (threadsPerGroup == 16) { + LAUNCH_FUSED_LN(1, 16, maxThreads); + } + } else if (external_unRoll == 1) { + // 129 - 4096 elems + // (this can launch with 1-7 warps as well) + LAUNCH_FUSED_LN(1 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 2) { + // 4097 - 8192 elems + LAUNCH_FUSED_LN(2 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 3) { + // 8193 - 12288 elems + LAUNCH_FUSED_LN(3 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 4) { + // 12289 - 16384 elems + LAUNCH_FUSED_LN(4 * internal_unRoll, maxThreads, maxThreads); + } +} + +#define INSTANTIATE_FUSED_LN(T) \ + template void launch_fused_ln(T*, const T*, const T*, const T*, float, int, int, cudaStream_t); + +INSTANTIATE_FUSED_LN(__half); +#ifdef BF16_AVAILABLE +INSTANTIATE_FUSED_LN(__nv_bfloat16); +#endif +INSTANTIATE_FUSED_LN(float); + +/* +Fused resiual + bias + layer norm implementation. Assumes elems_per_row % 8 +is equal to 0. + +TODO(cmikeh2): Goal is to deprecate this implementation. The bias + residual +need to be fused into compute-bound producer operations. + +Args: + output: buffer for output data + res_output: output of residual addition + vals: buffer for input data + residual: residual data + bias: bias of of input data + gamma: gain for normalization + beta: bias for normalization + epsilon: numeric stability + elems_per_row: number of elements each block will normalize +Template arg: + StoreResidual: controls whether the residual calculation is stored + or not. When set to false, the input `res_output` is unused. +*/ +template +__global__ void fused_residual_ln(T* output, + T* res_output, + const T* vals, + const T* residual, + const T* bias, + const T* gamma, + const T* beta, + float epsilon, + int elems_per_row) +{ + constexpr int T_per_load = ln::granularity / sizeof(T); + + cg::thread_block tb = cg::this_thread_block(); + cg::thread_block_tile warp = cg::tiled_partition(tb); + + // X-dimension of the block + const int block_offset = (tb.group_index().x * (maxThreads / threadsPerGroup) * elems_per_row) + + (tb.thread_index().y * elems_per_row); + const int thread_offset = tb.thread_index().x * T_per_load; + const int base_offset = block_offset + thread_offset; + const int stride = tb.size() * T_per_load; + + float sum = reduce::init(); + + const T* input_base = vals + base_offset; + const T* residual_base = residual + base_offset; + const T* bias_base = bias + thread_offset; + + T local_buffer[unRoll * T_per_load]; + + // Unlike a vanilla layernorm, since we're fusing the two adds as well + // an inner unRoll seems to be less valuable. If anything, a double unRoll + // makes the most sense if we find we are having performance issues. +#pragma unRoll + for (int i = 0; i < unRoll; i++) { + T* iteration_buffer = local_buffer + i * T_per_load; + T residual_buffer[T_per_load]; + T bias_buffer[T_per_load]; + + mem_access::load_global( + iteration_buffer, input_base + i * stride, thread_offset + i * stride < elems_per_row); + mem_access::load_global(residual_buffer, + residual_base + i * stride, + thread_offset + i * stride < elems_per_row); + mem_access::load_global( + bias_buffer, bias_base + i * stride, thread_offset + i * stride < elems_per_row); + +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + float vals_up_cast = conversion::to(iteration_buffer[j]); + float res_up_cast = conversion::to(residual_buffer[j]); + float bias_up_cast = conversion::to(bias_buffer[j]); + vals_up_cast = vals_up_cast + bias_up_cast + res_up_cast; + sum = reduce::element(sum, vals_up_cast); + iteration_buffer[j] = conversion::to(vals_up_cast); + } + + if (preLnResidual && (thread_offset + i * stride < elems_per_row)) { + mem_access::store_global(res_output + base_offset + i * stride, + iteration_buffer); + } + } + + reduce::partitioned_block(tb, warp, sum); + const float mean = sum / elems_per_row; + + float mean_diff = reduce::init(); +#pragma unRoll + for (int i = 0; i < unRoll; i++) { +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + // Using a 0 value here skews the variance, have to if-guard + if (thread_offset + i * stride < elems_per_row) { + float diff = (conversion::to(local_buffer[i * T_per_load + j]) - mean); + mean_diff = reduce::element(mean_diff, diff * diff); + } + } + } + + reduce::partitioned_block(tb, warp, mean_diff); + const float variance = mean_diff / elems_per_row; + const float denom = __frsqrt_rn(variance + epsilon); + + T* block_output = output + block_offset; + +#pragma unRoll + for (int i = 0; i < unRoll; i++) { + T* iteration_buffer = local_buffer + i * T_per_load; + const int iter_idx = i * stride + thread_offset; + const bool do_loads = iter_idx < elems_per_row; + + T gamma_local[T_per_load], beta_local[T_per_load]; + + mem_access::load_global(gamma_local, gamma + iter_idx, do_loads); + mem_access::load_global(beta_local, beta + iter_idx, do_loads); + +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + // iteration_buffer[j] = (iteration_buffer[j] - mean_compute) * denom_compute; + // iteration_buffer[j] = iteration_buffer[j] * gamma_local[j] + beta_local[j]; + float val = conversion::to(iteration_buffer[j]); + val = (val - mean) * denom; + val = + val * conversion::to(gamma_local[j]) + conversion::to(beta_local[j]); + iteration_buffer[j] = conversion::to(val); + } + + if (do_loads) { + mem_access::store_global(block_output + iter_idx, iteration_buffer); + } + } +} + +// TODO(cmikeh2): There's a bunch of redundancy here that needs to be removed/simplified. +#define LAUNCH_FUSED_RES_LN(unRollFactor, threadsPerGroup, maxThreads) \ + fused_residual_ln \ + <<>>( \ + output, nullptr, vals, residual, bias, gamma, beta, epsilon, elems_per_row); + +template +void launch_fused_residual_ln(T* output, + const T* vals, + const T* residual, + const T* bias, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream) +{ + // 8 for __half, 4 for float + constexpr int T_per_load = ln::granularity / sizeof(T); + + constexpr int maxThreads = 256; + + // For Flaoat, unRoll 4, for __half, unRoll 2 + constexpr int internal_unRoll = sizeof(T) == 4 ? 4 : 2; + + const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false; + const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internal_unRoll; + + // Scheduling concern: may be slightly faster for some inputs to assign multiple stages of + // warp-sized blocks rather than stepping up to 64/96 threads + const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step); + const int threadsPerGroup = (one_step_threads < maxThreads) ? one_step_threads : maxThreads; + + const int groups_per_block_max = + is_subblock_schedule ? (maxThreads + threadsPerGroup - 1) / threadsPerGroup : 1; + const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max; + const int groups_launch = (groups_per_block + rows - 1) / groups_per_block; + + dim3 block(threadsPerGroup, groups_per_block); + dim3 grid(groups_launch); + + const int elems_per_step = threadsPerGroup * h_per_step; + const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step; + + if (is_subblock_schedule) { + // <=128 + if (threadsPerGroup == 1) { + LAUNCH_FUSED_RES_LN(1, 1, maxThreads); + } else if (threadsPerGroup == 2) { + LAUNCH_FUSED_RES_LN(1, 2, maxThreads); + } else if (threadsPerGroup == 4) { + LAUNCH_FUSED_RES_LN(1, 4, maxThreads); + } else if (threadsPerGroup == 8) { + LAUNCH_FUSED_RES_LN(1, 8, maxThreads); + } else if (threadsPerGroup == 16) { + LAUNCH_FUSED_RES_LN(1, 16, maxThreads); + } + } else if (external_unRoll == 1) { + // 129 - 4096 elems + // (this can launch with 1-7 warps as well) + LAUNCH_FUSED_RES_LN(1 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 2) { + // 4097 - 8192 elems + LAUNCH_FUSED_RES_LN(2 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 3) { + // 8193 - 12288 elems + LAUNCH_FUSED_RES_LN(3 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 4) { + // 12289 - 16384 elems + LAUNCH_FUSED_RES_LN(4 * internal_unRoll, maxThreads, maxThreads); + } +} + +#define LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(unRollFactor, threadsPerGroup, maxThreads) \ + fused_residual_ln \ + <<>>( \ + norm_output, res_output, vals, residual, bias, gamma, beta, epsilon, elems_per_row); + +template +void launch_fused_residual_ln_store_pre_ln_res(T* norm_output, + T* res_output, + const T* vals, + const T* residual, + const T* bias, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream) +{ + // 8 for __half, 4 for float + constexpr int T_per_load = ln::granularity / sizeof(T); + + constexpr int maxThreads = 256; + + // For Flaoat, unRoll 4, for __half, unRoll 2 + constexpr int internal_unRoll = sizeof(T) == 4 ? 4 : 2; + + const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false; + const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internal_unRoll; + + // Scheduling concern: may be slightly faster for some inputs to assign multiple stages of + // warp-sized blocks rather than stepping up to 64/96 threads + const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step); + const int threadsPerGroup = (one_step_threads < maxThreads) ? one_step_threads : maxThreads; + + const int groups_per_block_max = + is_subblock_schedule ? (maxThreads + threadsPerGroup - 1) / threadsPerGroup : 1; + const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max; + const int groups_launch = (groups_per_block + rows - 1) / groups_per_block; + + dim3 block(threadsPerGroup, groups_per_block); + dim3 grid(groups_launch); + + const int elems_per_step = threadsPerGroup * h_per_step; + const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step; + + if (is_subblock_schedule) { + // <=128 + if (threadsPerGroup == 1) { + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 1, maxThreads); + } else if (threadsPerGroup == 2) { + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 2, maxThreads); + } else if (threadsPerGroup == 4) { + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 4, maxThreads); + } else if (threadsPerGroup == 8) { + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 8, maxThreads); + } else if (threadsPerGroup == 16) { + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 16, maxThreads); + } + } else if (external_unRoll == 1) { + // 129 - 4096 elems + // (this can launch with 1-7 warps as well) + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 2) { + // 4097 - 8192 elems + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(2 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 3) { + // 8193 - 12288 elems + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(3 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 4) { + // 12289 - 16384 elems + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(4 * internal_unRoll, maxThreads, maxThreads); + } +} + +#define INSTANTIATE_RES_LN(T) \ + template void launch_fused_residual_ln( \ + T*, const T*, const T*, const T*, const T*, const T*, float, int, int, cudaStream_t); + +#define INSTANTIATE_PRE_LN_RES(T) \ + template void launch_fused_residual_ln_store_pre_ln_res( \ + T*, T*, const T*, const T*, const T*, const T*, const T*, float, int, int, cudaStream_t); + +INSTANTIATE_RES_LN(__half); +INSTANTIATE_RES_LN(float); +#ifdef BF16_AVAILABLE +INSTANTIATE_RES_LN(__nv_bfloat16); +#endif + +INSTANTIATE_PRE_LN_RES(__half); +INSTANTIATE_PRE_LN_RES(float); +#ifdef BF16_AVAILABLE +INSTANTIATE_PRE_LN_RES(__nv_bfloat16); +#endif diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/pointwise_ops.cu b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/pointwise_ops.cu new file mode 100644 index 0000000000000000000000000000000000000000..0301ff7770426994fc76cf3e6087586287c96c0f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/pointwise_ops.cu @@ -0,0 +1,74 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include +#include "conversion_utils.h" +#include "ds_kernel_utils.h" +#include "memory_access_utils.h" + +namespace pwise { +constexpr int granularity = 16; +constexpr int unroll = 4; +constexpr int threads = 256; +} // namespace pwise + +template +__global__ void vector_add_kernel(T* out, const T* a, const T* b, float gamma, int num_elems) +{ + constexpr int T_per_access = pwise::granularity / sizeof(T); + + const int block_offset = blockIdx.x * pwise::threads * pwise::unroll * T_per_access; + const int thread_offset = threadIdx.x * T_per_access; + const int total_offset = block_offset + thread_offset; + constexpr int stride = pwise::threads * T_per_access; + +#pragma unroll + for (int i = 0; i < pwise::unroll; i++) { + T temp_buf_a[T_per_access], temp_buf_b[T_per_access]; + + const int iter_idx = total_offset + i * stride; + + mem_access::load_global(temp_buf_a, a + iter_idx, iter_idx < num_elems); + mem_access::load_global(temp_buf_b, b + iter_idx, iter_idx < num_elems); + +#pragma unroll + for (int j = 0; j < T_per_access; j++) { + float up_cast_a = conversion::to(temp_buf_a[j]); + float up_cast_b = conversion::to(temp_buf_b[j]); + temp_buf_a[j] = conversion::to((gamma * up_cast_a) + up_cast_b); + } + + if (iter_idx < num_elems) { + mem_access::store_global(out + iter_idx, temp_buf_a); + } + } +} + +template +void launch_vector_add(T* out, + const T* a, + const T* b, + float gamma, + int num_elems, + cudaStream_t stream) +{ + constexpr int T_per_access = pwise::granularity / sizeof(T); + constexpr int T_per_block = pwise::threads * T_per_access * pwise::unroll; + + dim3 block(pwise::threads); + dim3 grid((num_elems + T_per_block - 1) / T_per_block); + + vector_add_kernel<<>>(out, a, b, gamma, num_elems); +} + +#define INSTANTIATE_VECTOR_ADD(T) \ + template void launch_vector_add( \ + T * out, const T* a, const T* b, float gamma, int num_elems, cudaStream_t stream); + +INSTANTIATE_VECTOR_ADD(float) +INSTANTIATE_VECTOR_ADD(__half) +#ifdef BF16_AVAILABLE +INSTANTIATE_VECTOR_ADD(__nv_bfloat16) +#endif diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/pt_binding.cpp b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/pt_binding.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b7277d1e167826207a3d09e676efdad42a836f08 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/pt_binding.cpp @@ -0,0 +1,2020 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include +#include +#include +#include +#include "inference_context.h" +#include "inference_cublas_wrappers.h" +#include "inference_cuda_layers.h" + +std::array gemm_algos = std::array({99, 99, 99}); + +// NOTE: This activation function type enum should be always in sync +// with the python counterpart, otherwise the casting from python binding +// will be incorrect. +enum class ActivationFuncType { UNKNOWN = 0, GELU = 1, ReLU = 2, GATED_GELU = 3, GATED_SILU = 4 }; + +enum class NormType { UNKNOWN = 0, LayerNorm = 1, GroupNorm = 2, RMSNorm = 3 }; + +enum class TransformerType : uint8_t { UNKNOWN = 0, GPTType = 1, BERTType = 2 }; + +// NOTE: this is a temporary and dodgy solution to distinguish GPT and BERT style models +// based on the dimensions of the corresponding attention mask. +inline auto infer_transformer_type(at::Tensor& attn_mask) -> TransformerType +{ + auto attn_mask_num_dims = attn_mask.sizes().size(); + + if (attn_mask_num_dims > 2) { + return TransformerType::GPTType; + } else if (attn_mask_num_dims == 2) { + return TransformerType::BERTType; + } else { + return TransformerType::UNKNOWN; + } +} + +// infer stride of attention mask memory layout based on the model type. +inline auto get_attn_mask_stride(at::Tensor& attn_mask) -> int +{ + auto trnsfrmr_type = infer_transformer_type(attn_mask); + + if (trnsfrmr_type == TransformerType::GPTType) { + return attn_mask.size(2); + } else if (trnsfrmr_type == TransformerType::BERTType) { + // Bert style models have always a mask stride of 1. + return 1; + } else if (trnsfrmr_type == TransformerType::UNKNOWN) { + return 0; + } + + // this is just to make the compiler happy. + return 0; +} + +template +at::Tensor ds_softmax(at::Tensor& attn_scores, + at::Tensor& attn_mask, + at::Tensor& alibi, + bool triangular, + bool recompute, + bool local_attention, + int window_size, + bool async_op, + float layer_scale, + int head_offset, + int mp_size) +{ + auto attn_scores_c = attn_scores.contiguous(); + int bsz = attn_scores_c.size(0); + + int seq_len = attn_scores_c.size(1); + int len = attn_scores_c.sizes().size(); + if (len > 2) seq_len = attn_scores_c.size(2); + + int soft_len = attn_scores_c.size(2); + if (len > 3) soft_len = attn_scores_c.size(3); + + int heads = 1; + if (len > 1) heads = attn_scores_c.size(1); + + auto mask_stride = get_attn_mask_stride(attn_mask); + + launch_attn_softmax_v2((T*)attn_scores_c.data_ptr(), + (attn_mask.sizes().size() > 1 ? (T*)attn_mask.data_ptr() : nullptr), + (alibi.sizes().size() > 1 ? (T*)alibi.data_ptr() : nullptr), + layer_scale, + triangular, + recompute, + local_attention, + window_size, + bsz, + heads, + seq_len, + soft_len, + head_offset, + mask_stride, + mp_size, + InferenceContext::Instance().GetCurrentStream(async_op)); + + return attn_scores_c; +} + +template +void allocate_workspace(unsigned hidden_dim, + unsigned num_heads, + unsigned prompt_length, + unsigned batch_size, + unsigned num_layers, + unsigned mp_size = 1, + bool external_cache = false, + unsigned rank = 0, + unsigned max_out_tokens = 1024, + unsigned min_out_tokens = 1) +{ + InferenceContext::Instance().GenWorkSpace(num_layers, + num_heads, + batch_size, + prompt_length, + hidden_dim, + mp_size, + external_cache, + sizeof(T), + rank, + max_out_tokens, + min_out_tokens); +} + +template +at::Tensor einsum_sec_sm_ecm(at::Tensor& Q, at::Tensor& W) +{ + auto options = at::TensorOptions() + .dtype(Q.options().dtype()) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + T* workspace = (T*)InferenceContext::Instance().GetWorkSpace(); + float alpha = 1; + float gemm_beta = 0.0; + + /* + // Reallocate memory if we received a new prompt + if (!workspace || input.size(1) != 1) { + allocate_workspace(W.size(1), InferenceContext::Instance().GetMaxTokenLength(), + Q.size(0), 1, head_size); workspace = (T*)InferenceContext::Instance().GetWorkSpace(); + } + */ + + auto O = at::from_blob(workspace, {Q.size(1), Q.size(2), W.size(1)}, options); + unsigned m = W.size(1); + unsigned n = Q.size(1) * Q.size(2); + unsigned k = Q.size(0); + cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(), + CUBLAS_OP_N, + CUBLAS_OP_T, + m, + n, + k, + &alpha, + &gemm_beta, + (T*)W.data_ptr(), + (T*)Q.data_ptr(), + (T*)O.data_ptr(), +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + return O; +} + +template +void attention_unfused(at::Tensor& prev_key_cont, + at::Tensor& query_cont, + at::Tensor& attn_mask, + at::Tensor& prev_value_cont, + at::Tensor& output, + int& bsz, + int& seq_len, + int& soft_len, + int& heads, + float& norm_factor, + bool triangular, + bool recompute, + bool local_attention, + int window_size) +{ + auto options = at::TensorOptions() + .dtype(query_cont.options().dtype()) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + float alpha = norm_factor; + float gemm_beta = 0.0; + auto attn_score = at::empty({bsz, heads, seq_len, soft_len}, options); + int k = prev_value_cont.size(2) / heads; + + auto mask_stride = get_attn_mask_stride(attn_mask); + + cublasSetStream(InferenceContext::Instance().GetCublasHandle(), + InferenceContext::Instance().GetCurrentStream()); + cublas_strided_batched_gemm(InferenceContext::Instance().GetCublasHandle(), + soft_len, + seq_len, + k, + &alpha, + &gemm_beta, + (T*)prev_key_cont.data_ptr(), + (T*)query_cont.data_ptr(), + (T*)attn_score.data_ptr(), + CUBLAS_OP_N, + CUBLAS_OP_N, + soft_len * k, + seq_len * k, + seq_len * soft_len, + bsz * heads, +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + launch_attn_softmax_v2((T*)attn_score.data_ptr(), + (T*)(attn_mask.sizes().size() > 1 ? attn_mask.data_ptr() : nullptr), + (T*)nullptr, + 1.0, + triangular, + recompute, + local_attention, + window_size, + bsz, + heads, + seq_len, + soft_len, + 0, + mask_stride, + 1, + InferenceContext::Instance().GetCurrentStream(false)); + alpha = 1.0; + cublas_strided_batched_gemm(InferenceContext::Instance().GetCublasHandle(), + k, + seq_len, + soft_len, + &alpha, + &gemm_beta, + (T*)prev_value_cont.data_ptr(), + (T*)attn_score.data_ptr(), + (T*)output.data_ptr(), + CUBLAS_OP_N, + CUBLAS_OP_N, + soft_len * k, + seq_len * soft_len, + seq_len * k, + bsz * heads, +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif +} + +template +std::vector ds_softmax_context1(at::Tensor& query, + at::Tensor& prev_key, + at::Tensor& new_key, + at::Tensor& attn_mask, + at::Tensor& prev_value, + at::Tensor& new_value, + int heads, + float norm_factor, + bool merging, + bool triangular, + bool local_attention, + int window_size, + bool no_masking) +{ + auto query_cont = query.contiguous(); + auto prev_key_cont = prev_key.contiguous(); + auto prev_value_cont = prev_value.contiguous(); + + int new_size = (new_value.sizes().size() > 1 ? new_value.size(1) : 0); + + // Attn_Score [ batch Head Sequence-length Softmax-length] + + int bsz = query_cont.size(0); + int seq_len = query_cont.size(1); + int soft_len = prev_value.size(1); + + auto options = at::TensorOptions() + .dtype(query_cont.options().dtype()) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + + auto output = + at::empty({prev_value.size(0), heads, seq_len, prev_value.size(2) / heads}, options); + attention_unfused(prev_key_cont, + query_cont, + attn_mask, //(no_masking ? nullptr : (T*)attn_mask.data_ptr()), + prev_value_cont, + output, + bsz, + seq_len, + soft_len, + heads, + norm_factor, + (triangular && (new_size == 0)), + (new_size == 0), + local_attention, + window_size); + + return {output, prev_key, prev_value}; +} + +template +void ds_softmax_internal(T* attn_scores, + at::Tensor& attn_mask, + at::Tensor& alibi, + float& layer_scale, + bool triangular, + bool recompute, + bool local_attention, + int window_size, + int bsz, + int seq_len, + int soft_len, + int heads) +{ + auto mask_stride = get_attn_mask_stride(attn_mask); + + launch_attn_softmax_v2((T*)attn_scores, + (attn_mask.sizes().size() > 1 ? (T*)attn_mask.data_ptr() : nullptr), + (alibi.sizes().size() > 1 ? (T*)alibi.data_ptr() : nullptr), + layer_scale, + triangular, + recompute, + local_attention, + window_size, + bsz, + heads, + seq_len, + soft_len, + 0, + mask_stride, + 1, + at::cuda::getCurrentCUDAStream()); +} + +template +void attention_unfused(T* prev_key_cont, + T* query_cont, + at::Tensor& attn_mask, + T* prev_value_cont, + T* output, + unsigned& bsz, + int& k, + unsigned& seq_len, + unsigned& soft_len, + int& heads, + float& norm_factor, + bool triangular, + bool recompute, + bool local_attention, + int window_size, + at::Tensor& alibi, + int layer_id) +{ + float layer_scale = alibi.sizes().size() > 1 ? std::max(1, layer_id) : 1.0; + float alpha = norm_factor * norm_factor / layer_scale; + float gemm_beta = 0.0; + T* workspace = (T*)InferenceContext::Instance().GetAttentionUnfusedWorkspace(); + + cublasSetStream(InferenceContext::Instance().GetCublasHandle(), + InferenceContext::Instance().GetCurrentStream()); + cublas_strided_batched_gemm(InferenceContext::Instance().GetCublasHandle(), + soft_len, + seq_len, + k, + &alpha, + &gemm_beta, + (T*)prev_key_cont, + (T*)query_cont, + workspace, + CUBLAS_OP_T, + CUBLAS_OP_N, + InferenceContext::Instance().GetMaxTokenLength() * k, + seq_len * k, + seq_len * soft_len, + bsz * heads, +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + ds_softmax_internal(workspace, + attn_mask, + alibi, + layer_scale, + triangular, + recompute, + local_attention, + window_size, + bsz, + seq_len, + soft_len, + heads); + alpha = 1.0; + cublas_strided_batched_gemm(InferenceContext::Instance().GetCublasHandle(), + k, + seq_len, + soft_len, + &alpha, + &gemm_beta, + (T*)prev_value_cont, + workspace, + (T*)output, + CUBLAS_OP_N, + CUBLAS_OP_N, + InferenceContext::Instance().GetMaxTokenLength() * k, + seq_len * soft_len, + seq_len * k, + bsz * heads, +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif +} + +void reset_cache() { InferenceContext::Instance().reset_tokens(); } + +template +std::vector ds_softmax_context(at::Tensor& query_key_value, + at::Tensor& attn_mask, + int rotary_dim, + bool rotate_half, + bool rotate_every_two, + int heads, + int num_kv, + float norm_factor, + bool triangular, + bool local_attention, + int window_size, + bool no_masking, + unsigned layer_id, + unsigned num_layers, + at::Tensor& alibi, + float rope_theta) +{ + unsigned bsz = query_key_value.size(0); + unsigned seq_len = query_key_value.size(1); + int k = query_key_value.size(2) / (heads + 2 * (num_kv > 0 ? num_kv : heads)); + unsigned hidden_dim = heads * k; + + bool is_prompt = (seq_len > 1); + + if (is_prompt) InferenceContext::Instance().reset_tokens(seq_len); + unsigned soft_len = InferenceContext::Instance().current_tokens(); + + auto options = at::TensorOptions() + .dtype(query_key_value.options().dtype()) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + + T* workspace = (T*)InferenceContext::Instance().GetWorkSpace(); + size_t buf_size = bsz * seq_len * hidden_dim; + auto output = torch::from_blob(workspace + 4 * buf_size, {bsz, seq_len, hidden_dim}, options); + + auto query_cont = workspace + 5 * buf_size; + size_t offset = + 10 * (hidden_dim * bsz * InferenceContext::Instance().GetMaxTokenLength()) + + layer_id * 2 * bsz * InferenceContext::Instance().GetMaxTokenLength() * hidden_dim; + unsigned all_tokens = soft_len; + auto kv_cache = workspace + offset + (hidden_dim / heads) * (is_prompt ? 0 : soft_len - 1); + size_t value_offset = bsz * InferenceContext::Instance().GetMaxTokenLength() * hidden_dim; + + T* temp_buf = (T*)output.data_ptr() + at::numel(output); + launch_bias_add_transform_0213((T*)query_cont, + kv_cache, + kv_cache + value_offset, + (T*)query_key_value.data_ptr(), + nullptr, + bsz, + seq_len, + (is_prompt ? 0 : soft_len - 1), + soft_len, + hidden_dim, + heads, + (num_kv > 0 ? num_kv : heads), + rotary_dim, + rotate_half, + rotate_every_two, + InferenceContext::Instance().GetCurrentStream(), + 3, + InferenceContext::Instance().GetMaxTokenLength(), + rope_theta); + if (rotary_dim > 0 && rotate_half) + launch_apply_rotary_pos_emb(query_cont, + kv_cache, + k, + seq_len, + rotary_dim, + (is_prompt ? 0 : soft_len - 1), + heads, + bsz, + rope_theta, + InferenceContext::Instance().GetCurrentStream(), + InferenceContext::Instance().GetMaxTokenLength()); + + attention_unfused(workspace + offset, + (T*)query_cont, + attn_mask, + workspace + offset + value_offset, + temp_buf, + bsz, + k, + seq_len, + all_tokens, + heads, + norm_factor, + (triangular && is_prompt), + is_prompt, + local_attention, + window_size, + alibi, + layer_id); + launch_transform4d_0213((T*)output.data_ptr(), + temp_buf, + bsz, + heads, + seq_len, + output.size(2), + InferenceContext::Instance().GetCurrentStream(false), + 1); + + if (layer_id == num_layers - 1) InferenceContext::Instance().advance_tokens(); + auto prev_key = torch::from_blob(workspace + offset, + {bsz, heads, all_tokens, k}, + {hidden_dim * InferenceContext::Instance().GetMaxTokenLength(), + k * InferenceContext::Instance().GetMaxTokenLength(), + k, + 1}, + options); + + auto prev_value = + torch::from_blob(workspace + offset + value_offset, + {bsz, heads, all_tokens, k}, + {hidden_dim * InferenceContext::Instance().GetMaxTokenLength(), + k * InferenceContext::Instance().GetMaxTokenLength(), + k, + 1}, + options); + + return {output, prev_key, prev_value}; +} + +template +at::Tensor ds_bias_gelu(at::Tensor& input, at::Tensor& bias) +{ + auto input_cont = input.contiguous(); + + int bsz = input_cont.size(0) * input_cont.size(1); + int intermediate_size = input_cont.size(2); + + launch_bias_gelu((T*)input_cont.data_ptr(), + (T*)bias.data_ptr(), + intermediate_size, + bsz, + InferenceContext::Instance().GetCurrentStream()); + return input_cont; +} + +#define DISPATCH_GATED_ACT(T_TYPE, C_TYPE) \ + if (activation.options().dtype() == torch::T_TYPE) { \ + launch_gated_activation((C_TYPE*)output.data_ptr(), \ + (const C_TYPE*)activation.data_ptr(), \ + (const C_TYPE*)bias.data_ptr(), \ + rows, \ + out_channels, \ + channels, \ + activation_type == ActivationFuncType::GATED_GELU, \ + InferenceContext::Instance().GetCurrentStream()); \ + } + +at::Tensor ds_gated_activation(at::Tensor& activation, at::Tensor& bias, int actFun) +{ + /* + Used in FF of Stable diffusion + */ + + const ActivationFuncType activation_type = static_cast(actFun); + + assert(activation_type == ActivationFuncType::GATED_GELU || + activation_type == ActivationFuncType::GATED_SILU); + + const int batch_size = activation.size(0); + const int seq_len = activation.size(1); + const int channels = activation.size(2); + + const int rows = batch_size * seq_len; + // Dimensionality is cut in half + const int out_channels = channels / 2; + + auto output = at::empty({batch_size, seq_len, out_channels}, activation.options()); + + DISPATCH_GATED_ACT(kFloat, float); + DISPATCH_GATED_ACT(kHalf, __half); +#ifdef BF16_AVAILABLE + DISPATCH_GATED_ACT(kBFloat16, __nv_bfloat16); +#endif + + return output; +} + +template +at::Tensor ds_bias_relu(at::Tensor& input, at::Tensor& bias) +{ + auto input_cont = input.contiguous(); + + int bsz = input_cont.size(0) * input_cont.size(1); + int intermediate_size = input_cont.size(2); + + launch_bias_relu((T*)input_cont.data_ptr(), + (T*)bias.data_ptr(), + intermediate_size, + bsz, + InferenceContext::Instance().GetCurrentStream()); + return input_cont; +} + +template +at::Tensor ds_bias_add(at::Tensor& input, at::Tensor& bias) +{ + auto input_cont = input.contiguous(); + + int bsz = input_cont.size(0) * input_cont.size(1); + int hidden_size = input_cont.size(2); + + launch_bias_add((T*)input_cont.data_ptr(), + (T*)bias.data_ptr(), + hidden_size, + bsz, + InferenceContext::Instance().GetCurrentStream()); + return input_cont; +} + +template +at::Tensor ds_bias_residual(at::Tensor& input, at::Tensor& residual, at::Tensor& bias) +{ + auto input_cont = input.contiguous(); + auto residual_cont = residual.contiguous(); + + int bsz = input_cont.size(0) * input_cont.size(1); + // launch_bias_residual((T*)input_cont.data_ptr(), + // (T*)residual_cont.data_ptr(), + // (T*)bias.data_ptr(), + // bsz, + // input_cont.size(2), + // (bias.size(0) > 1), + // InferenceContext::Instance().GetCurrentStream()); + return input_cont; +} + +#define DISPATCH_LAYER_NORM(T_TYPE, C_TYPE) \ + if (input.options().dtype() == torch::T_TYPE) { \ + launch_fused_ln((C_TYPE*)output.data_ptr(), \ + (const C_TYPE*)input.data_ptr(), \ + (const C_TYPE*)gamma.data_ptr(), \ + (const C_TYPE*)beta.data_ptr(), \ + epsilon, \ + rows, \ + elems_per_row, \ + InferenceContext::Instance().GetCurrentStream()); \ + } + +at::Tensor ds_layer_norm(at::Tensor& input, at::Tensor& gamma, at::Tensor& beta, float epsilon) +{ + const int rows = input.size(0) * input.size(1); + const int elems_per_row = input.size(2); + auto output = at::empty_like(input); + + DISPATCH_LAYER_NORM(kFloat, float); + DISPATCH_LAYER_NORM(kHalf, __half); +#ifdef BF16_AVAILABLE + DISPATCH_LAYER_NORM(kBFloat16, __nv_bfloat16); +#endif + + return output; +} + +#define DISPATCH_RMS_NORM(T_TYPE, C_TYPE) \ + if (input.options().dtype() == torch::T_TYPE) { \ + launch_rms_norm((C_TYPE*)output.data_ptr(), \ + (C_TYPE*)nullptr, \ + (const C_TYPE*)input.data_ptr(), \ + (const C_TYPE*)nullptr, \ + (const C_TYPE*)gamma.data_ptr(), \ + epsilon, \ + rows, \ + elems_per_row, \ + InferenceContext::Instance().GetCurrentStream()); \ + } + +at::Tensor ds_rms_norm(at::Tensor& input, at::Tensor& gamma, float epsilon) +{ + // Get number of dims of tensor + int num_dims = input.dim(); + const int rows = (num_dims == 2) ? input.size(0) : input.size(0) * input.size(1); + const int elems_per_row = (num_dims == 2) ? input.size(1) : input.size(2); + + auto output = at::empty_like(input); + + DISPATCH_RMS_NORM(kFloat, float); + DISPATCH_RMS_NORM(kHalf, __half); +#ifdef BF16_AVAILABLE + DISPATCH_RMS_NORM(kBFloat16, __nv_bfloat16); +#endif + + return output; +} + +#define DISPATCH_PRE_RMS_NORM(T_TYPE, C_TYPE) \ + if (input.options().dtype() == torch::T_TYPE) { \ + launch_rms_norm((C_TYPE*)output.data_ptr(), \ + (C_TYPE*)res_out.data_ptr(), \ + (const C_TYPE*)input.data_ptr(), \ + (const C_TYPE*)residual.data_ptr(), \ + (const C_TYPE*)gamma.data_ptr(), \ + epsilon, \ + rows, \ + elems_per_row, \ + InferenceContext::Instance().GetCurrentStream()); \ + } + +std::vector ds_pre_rms_norm(at::Tensor& input, + at::Tensor& residual, + at::Tensor& gamma, + float epsilon) +{ + // Get number of dims of tensor + int num_dims = input.dim(); + const int rows = (num_dims == 2) ? input.size(0) : input.size(0) * input.size(1); + const int elems_per_row = (num_dims == 2) ? input.size(1) : input.size(2); + + auto output = at::empty_like(input); + auto res_out = at::empty_like(residual); + + DISPATCH_PRE_RMS_NORM(kFloat, float); + DISPATCH_PRE_RMS_NORM(kHalf, __half); +#ifdef BF16_AVAILABLE + DISPATCH_PRE_RMS_NORM(kBFloat16, __nv_bfloat16); +#endif + + return {output, res_out}; +} + +template +void ds_layer_norm_internal(T* workspace, + at::Tensor& input, + at::Tensor& gamma, + at::Tensor& beta, + float epsilon) +{ + int bsz = input.size(0) * input.size(1); + launch_fused_ln(workspace, + (const T*)input.data_ptr(), + (const T*)gamma.data_ptr(), + (const T*)beta.data_ptr(), + epsilon, + bsz, + input.size(2), + InferenceContext::Instance().GetCurrentStream()); +} + +#define DISPATCH_LAYER_NORM_RESIDUAL(T_TYPE, C_TYPE) \ + if (input.options().dtype() == torch::T_TYPE) { \ + launch_fused_residual_ln((C_TYPE*)output.data_ptr(), \ + (const C_TYPE*)input.data_ptr(), \ + (const C_TYPE*)residual.data_ptr(), \ + (const C_TYPE*)bias.data_ptr(), \ + (const C_TYPE*)gamma.data_ptr(), \ + (const C_TYPE*)beta.data_ptr(), \ + epsilon, \ + rows, \ + elems_per_row, \ + InferenceContext::Instance().GetCurrentStream()); \ + } + +/* Currently only used in unit testing */ +at::Tensor ds_layer_norm_residual(at::Tensor& input, + at::Tensor& bias, + at::Tensor& residual, + at::Tensor& gamma, + at::Tensor& beta, + float epsilon) +{ + const int rows = input.size(0) * input.size(1); + const int elems_per_row = input.size(2); + auto output = at::empty_like(input); + + DISPATCH_LAYER_NORM_RESIDUAL(kFloat, float); + DISPATCH_LAYER_NORM_RESIDUAL(kHalf, __half); +#ifdef BF16_AVAILABLE + DISPATCH_LAYER_NORM_RESIDUAL(kBFloat16, __nv_bfloat16); +#endif + + return output; +} + +#define DISPATCH_PRE_LAYER_NORM_RESIDUAL(T_TYPE, C_TYPE) \ + if (input.options().dtype() == torch::T_TYPE) { \ + launch_fused_residual_ln_store_pre_ln_res( \ + (C_TYPE*)norm_output.data_ptr(), \ + (C_TYPE*)res_output.data_ptr(), \ + (const C_TYPE*)input.data_ptr(), \ + (const C_TYPE*)residual.data_ptr(), \ + (const C_TYPE*)bias.data_ptr(), \ + (const C_TYPE*)gamma.data_ptr(), \ + (const C_TYPE*)beta.data_ptr(), \ + epsilon, \ + rows, \ + elems_per_row, \ + InferenceContext::Instance().GetCurrentStream()); \ + } + +/* Currently only used in unit testing */ +std::vector ds_layer_norm_residual_store_pre_ln_res(at::Tensor& input, + at::Tensor& bias, + at::Tensor& residual, + at::Tensor& gamma, + at::Tensor& beta, + float epsilon) +{ + const int rows = input.size(0) * input.size(1); + const int elems_per_row = input.size(2); + auto norm_output = at::empty_like(input); + auto res_output = at::empty_like(input); + + DISPATCH_PRE_LAYER_NORM_RESIDUAL(kFloat, float); + DISPATCH_PRE_LAYER_NORM_RESIDUAL(kHalf, __half); +#ifdef BF16_AVAILABLE + DISPATCH_PRE_LAYER_NORM_RESIDUAL(kBFloat16, __nv_bfloat16); +#endif + + return {norm_output, res_output}; +} + +template +void quantized_gemm(void* output, + T* input, + at::Tensor& weight, + at::Tensor& qscale, + int groups, + int bsz, + int hidden_size) +{ + // T* weight16 = (T*)InferenceContext::Instance().GetWorkSpace() + 12 * hidden_size * bsz; + + auto options = at::TensorOptions() + .dtype(at::kHalf) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + auto tmp = torch::empty(weight.sizes(), options); + T* weight16 = (T*)tmp.data_ptr(); + launch_dequantize(weight16, + (int8_t*)weight.data_ptr(), + (float*)qscale.data_ptr(), + weight.size(0), + weight.size(1), + groups, + InferenceContext::Instance().GetCurrentStream()); + + float alpha = (T)1.0; + float gemm_beta = (T)0.0; + cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(), + CUBLAS_OP_T, + CUBLAS_OP_N, + weight.size(0), + bsz, + weight.size(1), + &alpha, + &gemm_beta, + weight16, + (T*)input, + (T*)output, +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif +} + +template +at::Tensor qkv_unfused_cublas(at::Tensor& output, + at::Tensor& input, + at::Tensor& weight, + at::Tensor& q_scale, + at::Tensor& bias, + at::Tensor& gamma, + at::Tensor& beta, + const float epsilon, + bool add_bias, + bool q_int8, + bool transposed_mode) +{ + int bsz = input.size(0) * input.size(1); + T* workspace = (T*)InferenceContext::Instance().GetWorkSpace(); + workspace += (3 * bsz * input.size(2)); + ds_layer_norm_internal(workspace, input, gamma, beta, epsilon); + + if (q_int8) { + quantized_gemm( + output.data_ptr(), workspace, weight, q_scale, q_scale.size(0), bsz, input.size(2)); + } else { + float alpha = (T)1.0; + float gemm_beta = (T)0.0; + + cublasSetStream(InferenceContext::Instance().GetCublasHandle(), + InferenceContext::Instance().GetCurrentStream()); + cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(), + (transposed_mode ? CUBLAS_OP_T : CUBLAS_OP_N), + CUBLAS_OP_N, + weight.size(transposed_mode ? 0 : 1), + bsz, + input.size(2), + &alpha, + &gemm_beta, + (T*)weight.data_ptr(), + workspace, + (T*)output.data_ptr(), +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + } + if (add_bias) + launch_bias_add((T*)output.data_ptr(), + (T*)bias.data_ptr(), + (transposed_mode || q_int8) ? weight.size(0) : weight.size(1), + bsz, + InferenceContext::Instance().GetCurrentStream()); + return torch::from_blob(workspace, input.sizes(), input.options()); +} + +template +std::vector ds_rms_qkv(at::Tensor& input, + at::Tensor& weight, + at::Tensor& q_scale, + at::Tensor& gamma, + const float epsilon, + bool q_int8, + bool transposed_mode) +{ + const int bsz = input.size(0) * input.size(1); + T* workspace = (T*)InferenceContext::Instance().GetWorkSpace(); + T* rms_norm_ptr = workspace + (3 * bsz * input.size(2)); + int out_size = (transposed_mode || q_int8) ? weight.size(0) : weight.size(1); + + auto options = at::TensorOptions() + .dtype(input.options().dtype()) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + auto rms_norm = at::from_blob(rms_norm_ptr, input.sizes(), options); + auto output = at::from_blob(workspace, {input.size(0), input.size(1), out_size}, options); + + launch_rms_norm((T*)rms_norm.data_ptr(), + (T*)nullptr, + (const T*)input.data_ptr(), + (const T*)nullptr, + (const T*)gamma.data_ptr(), + epsilon, + bsz, + input.size(2), + InferenceContext::Instance().GetCurrentStream()); + + if (q_int8) { + quantized_gemm((T*)output.data_ptr(), + (T*)rms_norm.data_ptr(), + weight, + q_scale, + q_scale.size(0), + bsz, + input.size(2)); + } else { + float alpha = (T)1.0; + float gemm_beta = (T)0.0; + + cublasSetStream(InferenceContext::Instance().GetCublasHandle(), + InferenceContext::Instance().GetCurrentStream()); + cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(), + (transposed_mode ? CUBLAS_OP_T : CUBLAS_OP_N), + CUBLAS_OP_N, + weight.size(transposed_mode ? 0 : 1), + bsz, + input.size(2), + &alpha, + &gemm_beta, + (T*)weight.data_ptr(), + (T*)rms_norm.data_ptr(), + (T*)output.data_ptr(), +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + } + + return {output, rms_norm}; +} + +template +std::vector ds_qkv_gemm(at::Tensor& input, + at::Tensor& weight, + at::Tensor& q_scale, + at::Tensor& bias, + at::Tensor& gamma, + at::Tensor& beta, + const float epsilon, + bool add_bias, + bool q_int8, + bool transposed_mode) +{ + int bsz = input.size(0) * input.size(1); + T* workspace = (T*)InferenceContext::Instance().GetWorkSpace(); + int out_size = (transposed_mode || q_int8) ? weight.size(0) : weight.size(1); + + auto options = at::TensorOptions() + .dtype(input.options().dtype()) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + + auto output = at::from_blob(workspace, {input.size(0), input.size(1), out_size}, options); + auto inp_norm = qkv_unfused_cublas(output, + input, + weight, + q_scale, + bias, + gamma, + beta, + epsilon, + add_bias, + q_int8, + transposed_mode); + + return {output, inp_norm}; +} + +template +void quantized_gemm(at::Tensor& output, + at::Tensor& input, + at::Tensor& weight, + at::Tensor& qscale, + int groups, + int merge_count) +{ + int bsz = input.size(0) * input.size(1); + auto options = at::TensorOptions() + .dtype(input.options().dtype()) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + auto weight16 = at::empty({weight.size(0), weight.size(1)}, options); + + launch_dequantize((T*)weight16.data_ptr(), + (int8_t*)weight.data_ptr(), + (float*)qscale.data_ptr(), + weight.size(0), + weight.size(1), + groups, + merge_count, + InferenceContext::Instance().GetCurrentStream()); + + float alpha = (T)1.0; + float gemm_beta = (T)0.0; + cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(), + CUBLAS_OP_T, + CUBLAS_OP_N, + weight.size(0), + bsz, + input.size(2), + &alpha, + &gemm_beta, + (T*)weight16.data_ptr(), + (T*)input.data_ptr(), + (T*)output.data_ptr(), +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif +} + +template +at::Tensor ds_linear_layer(at::Tensor& input, + at::Tensor& weight, + at::Tensor& bias, + bool add_bias, + bool do_flash_attn, + int num_heads, + bool transposed_mode, + float rope_theta) +{ + auto input_cont = input.contiguous(); + auto options = at::TensorOptions() + .dtype(input_cont.options().dtype()) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + + int head_size = input_cont.size(2) / num_heads; + int bsz = input.size(0) * input.size(1); + int out_size = transposed_mode ? weight.size(0) : weight.size(1); + T* workspace = (T*)InferenceContext::Instance().GetWorkSpace(); + auto output = at::from_blob(workspace, {input.size(0), input.size(1), out_size}, options); + + float alpha = (T)1.0; + float gemm_beta = (T)0.0; + cublasSetStream(InferenceContext::Instance().GetCublasHandle(), + InferenceContext::Instance().GetCurrentStream()); + + cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(), + (transposed_mode ? CUBLAS_OP_T : CUBLAS_OP_N), + CUBLAS_OP_N, + weight.size(transposed_mode ? 0 : 1), + bsz, + input_cont.size(2), + &alpha, + &gemm_beta, + (T*)weight.data_ptr(), + (T*)input_cont.data_ptr(), + (T*)output.data_ptr(), +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + if (add_bias) + launch_bias_add((T*)output.data_ptr(), + (T*)bias.data_ptr(), + weight.size(transposed_mode ? 0 : 1), + bsz, + InferenceContext::Instance().GetCurrentStream()); + bool add_padding = (head_size % 32 != 0 && head_size < 64) || (head_size % 64 != 0); + if (do_flash_attn) { + if (add_padding) { + int padded_head_size = head_size < 32 ? 32 : (head_size < 64 ? 64 : 128); + auto padded_output = workspace + output.numel(); + auto final_output = + padded_output + (input.size(0) * input.size(1) * 3 * num_heads * padded_head_size); + pad_data(padded_output, + workspace, + 3 * bsz * num_heads, + head_size, + padded_head_size, + InferenceContext::Instance().GetCurrentStream()); + + launch_bias_add_transform_0213( + final_output, + final_output + (input.size(0) * input.size(1) * num_heads * padded_head_size), + final_output + (input.size(0) * input.size(1) * 2 * num_heads * padded_head_size), + padded_output, + nullptr, + input.size(0), + input.size(1), + 0, + input.size(1), + (num_heads * padded_head_size), + num_heads, + -1, + -1, + false, + false, + InferenceContext::Instance().GetCurrentStream(), + 3, + input.size(1), + rope_theta); + return at::from_blob(final_output, + {3, input.size(0), num_heads, input.size(1), padded_head_size}, + options); + // return at::from_blob(padded_output, {input.size(0) * input.size(1), 3, num_heads, + // padded_head_size}, options); + } else { + auto final_output = workspace + output.numel(); + launch_bias_add_transform_0213( + final_output, + final_output + (input.size(0) * input.size(1) * input_cont.size(2)), + final_output + (input.size(0) * input.size(1) * 2 * input_cont.size(2)), + workspace, + nullptr, + input.size(0), + input.size(1), + 0, + input.size(1), + input_cont.size(2), + num_heads, + -1, + -1, + false, + false, + InferenceContext::Instance().GetCurrentStream(), + 3, + input.size(1), + rope_theta); + return at::from_blob( + final_output, {3, input.size(0), num_heads, input.size(1), head_size}, options); + // return at::from_blob(workspace, {input.size(0) * input.size(1), 3, num_heads, + // head_size}, options); + } + + } else + return output; +} + +template +std::vector add_padding(at::Tensor& query, at::Tensor& key, at::Tensor& value) +{ + int head_size = query.size(3); + int padded_head_size = head_size < 32 ? 32 : (head_size < 64 ? 64 : 128); + T* workspace = (T*)InferenceContext::Instance().GetWorkSpace(); + T* key_pad_ptr = workspace + padded_head_size * query.size(0) * query.size(1) * query.size(2); + T* value_pad_ptr = key_pad_ptr + padded_head_size * query.size(0) * query.size(1) * 128; + pad_head_seq(workspace, + (T*)query.data_ptr(), + query.size(0) * query.size(1), + query.size(2), + query.size(2), + head_size, + padded_head_size, + InferenceContext::Instance().GetCurrentStream()); + pad_head_seq(key_pad_ptr, + (T*)key.data_ptr(), + query.size(0) * query.size(1), + key.size(2), + 128, + head_size, + padded_head_size, + InferenceContext::Instance().GetCurrentStream()); + pad_head_seq(value_pad_ptr, + (T*)value.data_ptr(), + query.size(0) * query.size(1), + key.size(2), + 128, + head_size, + padded_head_size, + InferenceContext::Instance().GetCurrentStream()); + return { + at::from_blob(workspace, + {query.size(0), query.size(1), query.size(2), padded_head_size}, + query.options()), + at::from_blob( + key_pad_ptr, {query.size(0), query.size(1), 128, padded_head_size}, query.options()), + at::from_blob( + value_pad_ptr, {query.size(0), query.size(1), 128, padded_head_size}, query.options())}; +} + +template +std::vector padd_add_transform(at::Tensor& query, + at::Tensor& key, + at::Tensor& value, + int heads, + bool add_padding) +{ + int head_size = query.size(2) / heads; + int key_value_length = add_padding ? 128 : key.size(1); + int padded_head_size = add_padding ? (head_size < 32 ? 32 : (head_size < 64 ? 64 : 128)) + : head_size; + T* workspace = (T*)InferenceContext::Instance().GetWorkSpace(); + T* key_pad_ptr = workspace + padded_head_size * query.size(0) * heads * query.size(1); + T* value_pad_ptr = key_pad_ptr + padded_head_size * query.size(0) * heads * key_value_length; + launch_pad_add_transform_0213(workspace, + (T*)query.data_ptr(), + query.size(0), + query.size(2), + query.size(1), + query.size(1), + heads, + padded_head_size, + InferenceContext::Instance().GetCurrentStream()); + launch_pad_add_transform_0213(key_pad_ptr, + (T*)key.data_ptr(), + key.size(0), + key.size(2), + key.size(1), + key_value_length, + heads, + padded_head_size, + InferenceContext::Instance().GetCurrentStream()); + launch_pad_add_transform_0213(value_pad_ptr, + (T*)value.data_ptr(), + value.size(0), + value.size(2), + value.size(1), + key_value_length, + heads, + padded_head_size, + InferenceContext::Instance().GetCurrentStream()); + return { + at::from_blob( + workspace, {query.size(0), heads, query.size(1), padded_head_size}, query.options()), + at::from_blob(key_pad_ptr, + {query.size(0), heads, key_value_length, padded_head_size}, + query.options()), + at::from_blob(value_pad_ptr, + {query.size(0), heads, key_value_length, padded_head_size}, + query.options())}; +} + +template +at::Tensor ds_vector_matmul(at::Tensor& input, + at::Tensor& weight, + bool async_op, + at::Tensor& q_scale, + bool q_int8, + bool transposed_mode) +{ + auto options = at::TensorOptions() + .dtype(input.options().dtype()) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + int out_size = (q_int8 || transposed_mode) ? weight.size(0) : weight.size(1); + int bsz = input.size(0) * input.size(1); + + T* workspace = (T*)InferenceContext::Instance().GetWorkSpace(); + auto output = at::from_blob(workspace, {input.size(0), input.size(1), out_size}, options); + if (q_int8) { + quantized_gemm(output.data_ptr(), + (T*)input.data_ptr(), + weight, + q_scale, + q_scale.size(0), + bsz, + input.size(2)); + } else { + float alpha = (T)1.0; + float gemm_beta = (T)0.0; + cublasSetStream(InferenceContext::Instance().GetCublasHandle(), + InferenceContext::Instance().GetCurrentStream(async_op)); + cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(), + (transposed_mode ? CUBLAS_OP_T : CUBLAS_OP_N), + CUBLAS_OP_N, + weight.size(transposed_mode ? 0 : 1), + bsz, + input.size(2), + &alpha, + &gemm_beta, + (T*)weight.data_ptr(), + (T*)input.data_ptr(), + (T*)output.data_ptr(), +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + } + return output; +} + +template +at::Tensor ds_vector_matmul_int8(at::Tensor& input, + at::Tensor& weight, + at::Tensor& q_scale, + int groups, + int merge_count) +{ + auto input_cont = input.contiguous(); + auto options = at::TensorOptions() + .dtype(input_cont.options().dtype()) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + + auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); + + quantized_gemm(output, input_cont, weight, q_scale, groups, merge_count); + return output; +} + +template +at::Tensor mlp_unfused_cublas(at::Tensor& output, + at::Tensor& input, + at::Tensor& residual, + at::Tensor& input_bias, + at::Tensor& weight, + at::Tensor& weight1, + at::Tensor& bias, + at::Tensor& gamma, + at::Tensor& beta, + const float epsilon, + bool preLayerNorm, + bool mlp_after_attn, + at::Tensor& q_scale, + at::Tensor& q_scale1, + bool q_int8, + ActivationFuncType act_func_type, + bool transposed_mode) +{ + int bsz = input.size(0) * input.size(1); + T* inp_norm = (T*)InferenceContext::Instance().GetWorkSpace() + torch::numel(input) + + torch::numel(output); + T* intermediate = inp_norm + torch::numel(input); + + if (mlp_after_attn) { + launch_fused_residual_ln((T*)inp_norm, + (const T*)input.data_ptr(), + (const T*)residual.data_ptr(), + (const T*)input_bias.data_ptr(), + (const T*)gamma.data_ptr(), + (const T*)beta.data_ptr(), + epsilon, + bsz, + input.size(2), + InferenceContext::Instance().GetCurrentStream()); + } else { + ds_layer_norm_internal(inp_norm, input, gamma, beta, epsilon); + } + if (q_int8) { + quantized_gemm( + intermediate, inp_norm, weight, q_scale, q_scale.size(0), bsz, input.size(2)); + } else { + float alpha = (T)1.0; + float gemm_beta = (T)0.0; + cublasSetStream(InferenceContext::Instance().GetCublasHandle(), + InferenceContext::Instance().GetCurrentStream()); + cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(), + (transposed_mode ? CUBLAS_OP_T : CUBLAS_OP_N), + CUBLAS_OP_N, + weight.size(transposed_mode ? 0 : 1), + bsz, + input.size(2), + &alpha, + &gemm_beta, + (T*)weight.data_ptr(), + inp_norm, + intermediate, +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + } + if (act_func_type == ActivationFuncType::GELU) { + launch_bias_gelu(intermediate, + (T*)bias.data_ptr(), + (transposed_mode || q_int8) ? weight.size(0) : weight.size(1), + bsz, + InferenceContext::Instance().GetCurrentStream()); + } else if (act_func_type == ActivationFuncType::ReLU) { + launch_bias_relu(intermediate, + (T*)bias.data_ptr(), + (transposed_mode || q_int8) ? weight.size(0) : weight.size(1), + bsz, + InferenceContext::Instance().GetCurrentStream()); + } + + if (q_int8) { + quantized_gemm(output.data_ptr(), + intermediate, + weight1, + q_scale1, + q_scale1.size(0), + bsz, + input.size(2)); + } else { + float alpha = (T)1.0; + float gemm_beta = (T)0.0; + cublasSetStream(InferenceContext::Instance().GetCublasHandle(), + InferenceContext::Instance().GetCurrentStream()); + cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(), + (transposed_mode ? CUBLAS_OP_T : CUBLAS_OP_N), + CUBLAS_OP_N, + weight1.size(transposed_mode ? 0 : 1), + bsz, + weight1.size(transposed_mode ? 1 : 0), + &alpha, + &gemm_beta, + (T*)weight1.data_ptr(), + intermediate, + (T*)output.data_ptr(), +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + } + + return torch::from_blob(inp_norm, input.sizes(), input.options()); +} + +template +std::vector ds_mlp_gemm(at::Tensor& input, + at::Tensor& residual, + at::Tensor& input_bias, + at::Tensor& weight_interm, + at::Tensor& weight_out, + at::Tensor& bias, + at::Tensor& gamma, + at::Tensor& beta, + const float epsilon, + bool preLayerNorm, + bool mlp_after_attn, + at::Tensor& q_scale, + at::Tensor& q_scale1, + bool q_int8, + int activation_type, + bool transposed_mode) +{ + auto options = at::TensorOptions() + .dtype(input.options().dtype()) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + + int out_size = (q_int8 || transposed_mode) ? weight_out.size(0) : weight_out.size(1); + auto output = + at::from_blob((T*)InferenceContext::Instance().GetWorkSpace() + torch::numel(input), + {input.size(0), input.size(1), out_size}, + options); + int bsz = input.size(0) * input.size(1); + + auto act_func_type = static_cast(activation_type); + auto res_add = mlp_unfused_cublas(output, + mlp_after_attn ? input : residual, + residual, + input_bias, + weight_interm, + weight_out, + bias, + gamma, + beta, + epsilon, + preLayerNorm, + mlp_after_attn, + q_scale, + q_scale1, + q_int8, + act_func_type, + transposed_mode); + + return {output, res_add}; +} + +template +std::vector ds_rms_mlp_gemm(at::Tensor& input, + at::Tensor& residual, + at::Tensor& weight_interm, + at::Tensor& weight_out, + at::Tensor& gamma, + const float epsilon, + at::Tensor& q_scale, + at::Tensor& q_scale1, + bool q_int8, + int activation_type, + bool transposed_mode) +{ + const int bsz = input.size(0) * input.size(1); + const size_t input_neurons = input.size(2); + const size_t mlp_1_out_neurons = transposed_mode ? weight_interm.size(0) + : weight_interm.size(1); + const size_t mlp_2_in_neurons = transposed_mode ? weight_out.size(1) : weight_out.size(0); + + auto options = at::TensorOptions() + .dtype(input.options().dtype()) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + + T* output_ptr = (T*)InferenceContext::Instance().GetWorkSpace() + torch::numel(input); + T* inp_norm_ptr = output_ptr + torch::numel(input); + T* intermediate_ptr = inp_norm_ptr + torch::numel(input); + + auto output = at::from_blob(output_ptr, input.sizes(), options); + auto inp_norm = at::from_blob(inp_norm_ptr, input.sizes(), options); + auto intermediate_gemm = + at::from_blob(intermediate_ptr, {input.size(0), input.size(1), mlp_1_out_neurons}, options); + + auto act_func_type = static_cast(activation_type); + + // RMS Norm, we'll update the residual in-place + launch_rms_norm((T*)inp_norm.data_ptr(), + (T*)residual.data_ptr(), + (const T*)input.data_ptr(), + (const T*)residual.data_ptr(), + (const T*)gamma.data_ptr(), + epsilon, + bsz, + input_neurons, + InferenceContext::Instance().GetCurrentStream()); + + if (q_int8) { + quantized_gemm(intermediate_ptr, + (T*)inp_norm.data_ptr(), + weight_interm, + q_scale, + q_scale.size(0), + bsz, + input_neurons); + } else { + float alpha = (T)1.0; + float gemm_beta = (T)0.0; + cublasSetStream(InferenceContext::Instance().GetCublasHandle(), + InferenceContext::Instance().GetCurrentStream()); + cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(), + (transposed_mode ? CUBLAS_OP_T : CUBLAS_OP_N), + CUBLAS_OP_N, + mlp_1_out_neurons, + bsz, + input_neurons, + &alpha, + &gemm_beta, + (T*)weight_interm.data_ptr(), + (T*)inp_norm.data_ptr(), + intermediate_ptr, +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + } + + if (act_func_type == ActivationFuncType::GELU) { + launch_bias_gelu(intermediate_ptr, + (T*)nullptr, + mlp_1_out_neurons, + bsz, + InferenceContext::Instance().GetCurrentStream()); + } else if (act_func_type == ActivationFuncType::ReLU) { + launch_bias_relu(intermediate_ptr, + (T*)nullptr, + mlp_1_out_neurons, + bsz, + InferenceContext::Instance().GetCurrentStream()); + } else if (act_func_type == ActivationFuncType::GATED_GELU) { + launch_gated_activation(intermediate_ptr, + (const T*)intermediate_ptr, + (const T*)nullptr, + bsz, + mlp_1_out_neurons, + mlp_1_out_neurons, + true, + InferenceContext::Instance().GetCurrentStream()); + } else if (act_func_type == ActivationFuncType::GATED_SILU) { + launch_gated_activation(intermediate_ptr, + (const T*)intermediate_ptr, + (const T*)nullptr, + bsz, + mlp_1_out_neurons, + mlp_1_out_neurons, + false, + InferenceContext::Instance().GetCurrentStream()); + } + + if (q_int8) { + quantized_gemm(output.data_ptr(), + intermediate_ptr, + weight_out, + q_scale1, + q_scale1.size(0), + bsz, + input.size(2)); + } else { + float alpha = (T)1.0; + float gemm_beta = (T)0.0; + cublasSetStream(InferenceContext::Instance().GetCublasHandle(), + InferenceContext::Instance().GetCurrentStream()); + cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(), + (transposed_mode ? CUBLAS_OP_T : CUBLAS_OP_N), + CUBLAS_OP_N, + input_neurons, + bsz, + mlp_2_in_neurons, + &alpha, + &gemm_beta, + (T*)weight_out.data_ptr(), + intermediate_ptr, + (T*)output.data_ptr(), +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard, +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP, +#endif + mlp_1_out_neurons); + } + + return {output, residual}; +} + +template +at::Tensor fused_gemm_gelu(at::Tensor& input, + at::Tensor& weight, + at::Tensor& weight_scale, + at::Tensor& bias, + at::Tensor& weight_out, + at::Tensor& weight_out_scale, + bool q_int8, + bool transposed_mode) +{ + auto options = at::TensorOptions() + .dtype(input.options().dtype()) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + + int intm_dim = (transposed_mode || q_int8) ? weight.size(0) : weight.size(1); + + // auto output = at::from_blob((T*)InferenceContext::Instance().GetWorkSpace() + + // torch::numel(input), + // {input.size(0), input.size(1), out_size}, + // options); + // T* intermediate = (T*)input.data_ptr() + torch::numel(input); + auto intermediate = at::empty({input.size(0), input.size(1), intm_dim}, options); + + int bsz = input.size(0) * input.size(1); + + float alpha = (T)1.0; + float gemm_beta = (T)0.0; + if (q_int8) { + quantized_gemm(intermediate.data_ptr(), + (T*)input.data_ptr(), + weight, + weight_scale, + weight_scale.size(0), + bsz, + input.size(2)); + } else { + cublasSetStream(InferenceContext::Instance().GetCublasHandle(), + InferenceContext::Instance().GetCurrentStream()); + cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(), + (transposed_mode ? CUBLAS_OP_T : CUBLAS_OP_N), + CUBLAS_OP_N, + intm_dim, + bsz, + input.size(2), + &alpha, + &gemm_beta, + (T*)weight.data_ptr(), + (T*)input.data_ptr(), + (T*)intermediate.data_ptr(), +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + } + launch_bias_gelu((T*)intermediate.data_ptr(), + (T*)bias.data_ptr(), + intm_dim, + bsz, + InferenceContext::Instance().GetCurrentStream()); + + int out_size = (transposed_mode || q_int8) ? weight_out.size(0) : weight_out.size(1); + auto output = at::empty({input.size(0), input.size(1), out_size}, options); + if (q_int8) { + quantized_gemm(output.data_ptr(), + (T*)intermediate.data_ptr(), + weight_out, + weight_out_scale, + weight_out_scale.size(0), + bsz, + input.size(2)); + } else { + cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(), + (transposed_mode ? CUBLAS_OP_T : CUBLAS_OP_N), + CUBLAS_OP_N, + out_size, + bsz, + intm_dim, + &alpha, + &gemm_beta, + (T*)weight_out.data_ptr(), + (T*)intermediate.data_ptr(), + (T*)output.data_ptr(), +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + } + // cudaEventRecord(InferenceContext::Instance().GetCompEvent(2), + // InferenceContext::Instance().GetCurrentStream(true)); + return output; +} + +template +at::Tensor& residual_add_bias(at::Tensor& hidden_state, + at::Tensor& residual, + const at::Tensor& attention_output, + const at::Tensor& attention_bias, + const at::Tensor& final_bias, + const int mp_size, + const bool mlp_after_attn, + const bool add_bias, + const bool preln) +{ + int bsz = residual.size(0) * residual.size(1); + int hidden_size = residual.size(2); + if (mlp_after_attn) + launch_bias_residual(static_cast(residual.data_ptr()), + static_cast(hidden_state.data_ptr()), + static_cast(attention_output.data_ptr()), + static_cast(final_bias.data_ptr()), + static_cast(attention_bias.data_ptr()), + bsz, + hidden_size, + mp_size, + preln, + InferenceContext::Instance().GetCurrentStream()); + else + launch_gptj_residual_add( + static_cast(residual.data_ptr()), + static_cast(hidden_state.data_ptr()), + static_cast(attention_output.data_ptr()), + static_cast(final_bias.data_ptr()), + static_cast((add_bias ? attention_bias.data_ptr() : nullptr)), + hidden_size, + bsz, + mp_size, + InferenceContext::Instance().GetCurrentStream()); + return residual; +} + +#define DISPATCH_VECTOR_ADD(T_TYPE, C_TYPE) \ + if (a.scalar_type() == at::k##T_TYPE) { \ + launch_vector_add((C_TYPE*)(a.data_ptr()), \ + (const C_TYPE*)(a.data_ptr()), \ + (const C_TYPE*)(b.data_ptr()), \ + gamma, \ + total_elems, \ + InferenceContext::Instance().GetCurrentStream()); \ + } + +at::Tensor& _vector_add(at::Tensor& a, at::Tensor& b, float gamma) +{ + const int total_elems = a.numel(); + + DISPATCH_VECTOR_ADD(Float, float) + DISPATCH_VECTOR_ADD(Half, __half) +#ifdef BF16_AVAILABLE + DISPATCH_VECTOR_ADD(BFloat16, __nv_bfloat16) +#endif + + return a; +} + +std::vector apply_rotary_pos_emb(at::Tensor& mixed_query, + at::Tensor& key_layer, + unsigned rotary_dim, + unsigned offset, + unsigned num_heads, + bool rotate_half, + float rope_theta) +{ + auto query_cont = mixed_query.contiguous(); + auto key_cont = key_layer.contiguous(); + + unsigned bsz = mixed_query.size(0); + unsigned head_size = mixed_query.size(2) / num_heads; + unsigned seq_len = mixed_query.size(1); + + if (mixed_query.scalar_type() == at::kFloat) + launch_apply_rotary_pos_emb((float*)query_cont.data_ptr(), + (float*)key_cont.data_ptr(), + head_size, + seq_len, + rotary_dim, + offset, + num_heads, + bsz, + rope_theta, + InferenceContext::Instance().GetCurrentStream(), + InferenceContext::Instance().GetMaxTokenLength()); + else + launch_apply_rotary_pos_emb<__half>((__half*)query_cont.data_ptr(), + (__half*)key_cont.data_ptr(), + head_size, + seq_len, + rotary_dim, + offset, + num_heads, + bsz, + rope_theta, + InferenceContext::Instance().GetCurrentStream(), + InferenceContext::Instance().GetMaxTokenLength()); + return {query_cont, key_cont}; +} + +#define DISPATCH_MOE_RESIDUAL(T_TYPE, C_TYPE) \ + if (moe_res.scalar_type() == torch::T_TYPE) { \ + launch_moe_res_matmul((C_TYPE*)moe_res.data_ptr(), \ + (C_TYPE*)coef.data_ptr(), \ + (C_TYPE*)output.data_ptr(), \ + M, \ + N, \ + InferenceContext::Instance().GetCurrentStream()); \ + } + +at::Tensor moe_res_matmul(at::Tensor& moe_res, at::Tensor& coef, at::Tensor& output) +{ + int M = moe_res.size(0) * moe_res.size(1); + int N = moe_res.size(2); + InferenceContext::Instance().SynchComm(); + + DISPATCH_MOE_RESIDUAL(kFloat, float) + DISPATCH_MOE_RESIDUAL(kHalf, __half) +#ifdef BF16_AVAILABLE + DISPATCH_MOE_RESIDUAL(kBFloat16, __nv_bfloat16) +#endif + + return output; +} + +void ds_release_workspace() { InferenceContext::Instance().release_workspace(); } + +bool ds_retake_workspace() { return InferenceContext::Instance().retake_workspace(); } + +template +at::Tensor ds_dequantize(at::Tensor& weight, at::Tensor& qscale, int groups) +{ + auto options = at::TensorOptions() + .dtype(torch::kFloat16) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + auto weight16 = at::empty({weight.size(0), weight.size(1)}, options); + + launch_dequantize((T*)weight16.data_ptr(), + (int8_t*)weight.data_ptr(), + (float*)qscale.data_ptr(), + weight.size(0), + weight.size(1), + groups, + InferenceContext::Instance().GetCurrentStream()); + + return weight16; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("softmax_context_int8", + &ds_softmax_context1<__half>, + "DeepSpeed attention with int8 (CUDA)"); + + // The following functions handle type dispatching internally + m.def("gated_activation", &ds_gated_activation, "DeepSpeed Bias GEGLU (CUDA)"); + m.def("layer_norm", &ds_layer_norm, "DeepSpeed layer norm (CUDA)"); + m.def( + "_layer_norm_residual", &ds_layer_norm_residual, "DeepSpeed layer norm + residual (CUDA)"); + m.def("layer_norm_residual_store_pre_ln_res", + &ds_layer_norm_residual_store_pre_ln_res, + "DeepSpeed layer norm + store pre Layernorm residual (CUDA)"); + m.def("rms_norm", &ds_rms_norm, "DeepSpeed rms norm (CUDA)"); + m.def("pre_rms_norm", &ds_pre_rms_norm, "DeepSpeed pre rms norm (CUDA)"); + m.def("_vector_add", &_vector_add, "DeepSpeed vector add (CUDA)"); + m.def("apply_rotary_pos_emb", &apply_rotary_pos_emb, "DeepSpeed mlp with fp16 (CUDA)"); + m.def("moe_res_matmul", &moe_res_matmul, "DeepSpeed moe residual matmul (CUDA)"); + m.def("reset_cache", &reset_cache, "Reset Cache for generation tasks"); + m.def("release_workspace", &ds_release_workspace, "DeepSpeed Release Workspace"); + m.def("retake_workspace", &ds_retake_workspace, "DeepSpeed Retake Workspace"); + + // The following functions are templated and need to be explicitly instantiated and bound + // to different python methods +#define DEF_OPS(_name, _dtype) \ + m.def("softmax_" #_name, &ds_softmax<_dtype>, "DeepSpeed SoftMax with " #_name " (CUDA)"); \ + m.def("softmax_context_" #_name, \ + &ds_softmax_context<_dtype>, \ + "DeepSpeed attention with " #_name " (CUDA)"); \ + m.def("bias_gelu_" #_name, &ds_bias_gelu<_dtype>, "DeepSpeed Gelu with " #_name " (CUDA)"); \ + m.def("bias_add_" #_name, &ds_bias_add<_dtype>, "DeepSpeed Bias Add with " #_name " (CUDA)"); \ + m.def("bias_relu_" #_name, &ds_bias_relu<_dtype>, "DeepSpeed ReLU with " #_name " (CUDA)"); \ + m.def("bias_residual_" #_name, \ + &ds_bias_residual<_dtype>, \ + "DeepSpeed residual-bias add with " #_name " (CUDA)"); \ + m.def("qkv_gemm_" #_name, &ds_qkv_gemm<_dtype>, "DeepSpeed qkv gemm with " #_name " (CUDA)"); \ + m.def("rms_qkv_gemm_" #_name, \ + &ds_rms_qkv<_dtype>, \ + "DeepSpeed rms qkv gemm with " #_name " (CUDA)"); \ + m.def("mlp_gemm_" #_name, &ds_mlp_gemm<_dtype>, "DeepSpeed mlp with " #_name " (CUDA)"); \ + m.def("rms_mlp_gemm_" #_name, \ + &ds_rms_mlp_gemm<_dtype>, \ + "DeepSpeed rms mlp gemm with " #_name " (CUDA)"); \ + m.def("vector_matmul_" #_name, \ + &ds_vector_matmul<_dtype>, \ + "DeepSpeed vector-MM with " #_name " (CUDA)"); \ + m.def("linear_layer_" #_name, \ + &ds_linear_layer<_dtype>, \ + "DeepSpeed linear_layer with " #_name " (CUDA)"); \ + m.def("fused_gemm_gelu_" #_name, \ + &fused_gemm_gelu<_dtype>, \ + "DeepSpeed mlp with " #_name " (CUDA)"); \ + m.def("residual_add_bias_" #_name, \ + &residual_add_bias<_dtype>, \ + "DeepSpeed residual add with " #_name " (CUDA)"); \ + m.def("einsum_sec_sm_ecm_" #_name, \ + &einsum_sec_sm_ecm<_dtype>, \ + "DeepSpeed vector-MM with " #_name " (CUDA)"); \ + m.def("add_padding_" #_name, \ + &add_padding<_dtype>, \ + "DeepSpeed residual add with " #_name " (CUDA)"); \ + m.def("pad_transform_" #_name, \ + &padd_add_transform<_dtype>, \ + "DeepSpeed residual add with " #_name " (CUDA)"); \ + m.def("allocate_workspace_" #_name, \ + &allocate_workspace<_dtype>, \ + "DeepSpeed memory allocation for GPT inference with " #_name " (CUDA)"); \ + m.def("dequantize_" #_name, \ + &ds_dequantize<_dtype>, \ + "DeepSpeed dequantize with " #_name " (CUDA)") + + DEF_OPS(fp32, float); + DEF_OPS(fp16, __half); +#ifdef BF16_AVAILABLE + DEF_OPS(bf16, __nv_bfloat16); +#endif +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/relu.cu b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/relu.cu new file mode 100644 index 0000000000000000000000000000000000000000..40926b776cf2f0bd3c46f44c6eda2ed3ec6bfce5 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/relu.cu @@ -0,0 +1,71 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "conversion_utils.h" +#include "inference_cuda_layers.h" +#include "memory_access_utils.h" + +namespace cg = cooperative_groups; +#define MAX_CAP 4 +#define MAX_SEQ 2048 + +inline __device__ float relu(const float x) { return x < 0 ? 0 : x; } + +/* +In-place relu(biasAdd(x)) for channels last +*/ +template +__global__ void fused_bias_relu(T* input, const T* bias, int total_count, int intermediate_size) +{ + // Input restriction: intermediate_size % vals_per_access == 0 + constexpr int granularity = 16; + constexpr int values_per_access = granularity / sizeof(T); + const int offset = (blockIdx.x * blockDim.x + threadIdx.x) * values_per_access; + + if (offset < total_count) { + T data[values_per_access]; + T data_bias[values_per_access]; + mem_access::load_global(data, input + offset); + mem_access::load_global( + data_bias, bias + (offset % intermediate_size), bias != nullptr); + +#pragma unroll + for (int i = 0; i < values_per_access; i++) { + float data_f = conversion::to(data[i]); + float bias_f = conversion::to(data_bias[i]); + data[i] = conversion::to(relu(data_f + bias_f)); + } + + mem_access::store_global(input + offset, data); + } +} + +template +void launch_bias_relu(T* input, + const T* bias, + int intermediate_size, + int batch_size, + cudaStream_t stream) +{ + constexpr int threads = 1024; + constexpr int granularity = 16; + + const int total_count = batch_size * intermediate_size; + const int elems_per_block = threads * (granularity / sizeof(T)); + dim3 block_dims(threads); + dim3 grid_dims((total_count + elems_per_block - 1) / elems_per_block); + + fused_bias_relu<<>>( + input, bias, total_count, intermediate_size); +} + +#define INSTANTIATE_LAUNCH_BIAS_RELU(T) \ + template void launch_bias_relu(T*, const T*, int, int, cudaStream_t); + +INSTANTIATE_LAUNCH_BIAS_RELU(float) +#ifdef BF16_AVAILABLE +INSTANTIATE_LAUNCH_BIAS_RELU(__nv_bfloat16) +#endif +INSTANTIATE_LAUNCH_BIAS_RELU(__half) diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/rms_norm.cu b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/rms_norm.cu new file mode 100644 index 0000000000000000000000000000000000000000..5f72a4193752cc54b8ee4e1b375e19ef5c3d1214 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/rms_norm.cu @@ -0,0 +1,263 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "conversion_utils.h" +#include "ds_kernel_utils.h" +#include "inference_cuda_layers.h" +#include "memory_access_utils.h" +#include "reduction_utils.h" + +namespace cg = cooperative_groups; +using rop = reduce::ROpType; + +namespace rms { +constexpr int granularity = 16; +} // namespace rms + +template +__global__ void rms_norm(T* output, const T* vals, const T* gamma, float epsilon, int elems_per_row) +{ + constexpr int T_per_load = rms::granularity / sizeof(T); + + cg::thread_block tb = cg::this_thread_block(); + cg::thread_block_tile warp = cg::tiled_partition(tb); + + // X-dimension of the block + const int block_offset = (tb.group_index().x * (maxThreads / threadsPerGroup) * elems_per_row) + + (tb.thread_index().y * elems_per_row); + const int thread_offset = tb.thread_index().x * T_per_load; + const int base_offset = block_offset + thread_offset; + const int stride = blockDim.x * T_per_load; + + float var_sum = reduce::init(); + + const T* input_base = vals + base_offset; + + T local_buffer[UNROLL * T_per_load]; + +#pragma unroll + for (int i = 0; i < UNROLL; i++) { + T* iteration_buffer = local_buffer + (i * T_per_load); + + mem_access::load_global(iteration_buffer, + input_base + (i * stride), + thread_offset + (i * stride) < elems_per_row); + +#pragma unroll + for (int j = 0; j < T_per_load; j++) { + float up_cast = conversion::to(iteration_buffer[j]); + float sq_val = up_cast * up_cast; + var_sum = reduce::element(var_sum, sq_val); + } + } + + reduce::partitioned_block(tb, warp, var_sum); + const float var = var_sum / elems_per_row; + const T denom = conversion::to(__frsqrt_rn(var + epsilon)); + + T* block_output = output + block_offset; + +#pragma unroll + for (int i = 0; i < UNROLL; i++) { + T* iteration_buffer = local_buffer + (i * T_per_load); + const int iter_idx = i * stride + thread_offset; + const bool do_loads = (iter_idx < elems_per_row); + + T gamma_local[T_per_load]; + + mem_access::load_global(gamma_local, gamma + iter_idx, do_loads); + +#pragma unroll + for (int j = 0; j < T_per_load; j++) { + iteration_buffer[j] *= denom; + iteration_buffer[j] *= gamma_local[j]; + } + + if (do_loads) { + mem_access::store_global(block_output + iter_idx, iteration_buffer); + } + } +} + +template +__global__ void pre_rms_norm(T* output, + T* res_out, + const T* vals, + const T* residual, + const T* gamma, + float epsilon, + int elems_per_row) +{ + constexpr int T_per_load = rms::granularity / sizeof(T); + + cg::thread_block tb = cg::this_thread_block(); + cg::thread_block_tile warp = cg::tiled_partition(tb); + + // X-dimension of the block + const int block_offset = (tb.group_index().x * (maxThreads / threadsPerGroup) * elems_per_row) + + (tb.thread_index().y * elems_per_row); + const int thread_offset = tb.thread_index().x * T_per_load; + const int base_offset = block_offset + thread_offset; + const int stride = blockDim.x * T_per_load; + + float var_sum = reduce::init(); + + const T* input_base = vals + base_offset; + const T* residual_base = residual + base_offset; + T* res_output = res_out + base_offset; + + T local_buffer[UNROLL * T_per_load]; + +#pragma unroll + for (int i = 0; i < UNROLL; i++) { + T* iteration_buffer = local_buffer + (i * T_per_load); + T residual_buffer[T_per_load]; + + const int iter_offset = i * stride + thread_offset; + const bool do_loads = (iter_offset < elems_per_row); + + mem_access::load_global( + iteration_buffer, input_base + (i * stride), do_loads); + mem_access::load_global( + residual_buffer, residual_base + (i * stride), do_loads); + +#pragma unroll + for (int j = 0; j < T_per_load; j++) { + iteration_buffer[j] += residual_buffer[j]; + float vals_up_cast = conversion::to(iteration_buffer[j]); + + var_sum = reduce::element(var_sum, vals_up_cast * vals_up_cast); + } + + if (do_loads) { + mem_access::store_global(res_output + i * stride, iteration_buffer); + } + } + + reduce::partitioned_block(tb, warp, var_sum); + const float var = var_sum / elems_per_row; + const T denom = conversion::to(__frsqrt_rn(var + epsilon)); + + T* block_output = output + block_offset; + +#pragma unroll + for (int i = 0; i < UNROLL; i++) { + T* iteration_buffer = local_buffer + (i * T_per_load); + const int iter_idx = i * stride + thread_offset; + const bool do_loads = (iter_idx < elems_per_row); + + T gamma_local[T_per_load]; + + mem_access::load_global(gamma_local, gamma + iter_idx, do_loads); + +#pragma unroll + for (int j = 0; j < T_per_load; j++) { + iteration_buffer[j] *= denom; + iteration_buffer[j] *= gamma_local[j]; + } + + if (do_loads) { + mem_access::store_global(block_output + iter_idx, iteration_buffer); + } + } +} + +#define LAUNCH_RMS_NORM(UNROLL, threadsPerGroup, maxThreads) \ + rms_norm \ + <<>>(norm_output, vals, gamma, epsilon, elems_per_row); + +#define LAUNCH_PRE_RMS_NORM(UNROLL, threadsPerGroup, maxThreads) \ + pre_rms_norm<<>>( \ + norm_output, res_output, vals, residual, gamma, epsilon, elems_per_row); + +#define LAUNCH_ALL_RMS_NORM(UNROLL, threadsPerGroup, maxThreads) \ + if (pre_norm) { \ + LAUNCH_PRE_RMS_NORM(UNROLL, threadsPerGroup, maxThreads) \ + } else { \ + LAUNCH_RMS_NORM(UNROLL, threadsPerGroup, maxThreads) \ + } + +template +void launch_rms_norm(T* norm_output, + T* res_output, + const T* vals, + const T* residual, + const T* gamma, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream) +{ + // 8 for __half, 4 for float + constexpr int T_per_load = rms::granularity / sizeof(T); + constexpr int maxThreads = 256; + constexpr int internalUnroll = sizeof(T) == 4 ? 4 : 2; + + const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false; + const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internalUnroll; + + // Scheduling concern: may be slightly faster for some inputs to assign multiple stages of + // warp-sized blocks rather than stepping up to 64/96 threads + const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step); + const int threads_per_group = (one_step_threads < maxThreads) ? one_step_threads : maxThreads; + + const int groups_per_block_max = + is_subblock_schedule ? (maxThreads + threads_per_group - 1) / threads_per_group : 1; + const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max; + const int groups_launch = (groups_per_block + rows - 1) / groups_per_block; + + dim3 block(threads_per_group, groups_per_block); + dim3 grid(groups_launch); + + const int elems_per_step = threads_per_group * h_per_step; + const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step; + + bool pre_norm = (residual == nullptr) ? false : true; + + if (is_subblock_schedule) { + // <=128 + if (threads_per_group == 1) { + LAUNCH_ALL_RMS_NORM(1, 1, maxThreads); + } else if (threads_per_group == 2) { + LAUNCH_ALL_RMS_NORM(1, 2, maxThreads); + } else if (threads_per_group == 4) { + LAUNCH_ALL_RMS_NORM(1, 4, maxThreads); + } else if (threads_per_group == 8) { + LAUNCH_ALL_RMS_NORM(1, 8, maxThreads); + } else if (threads_per_group == 16) { + LAUNCH_ALL_RMS_NORM(1, 16, maxThreads); + } + } else if (external_unRoll == 1) { + // 129 - 4096 elems + // (this can launch with 1-7 warps as well) + LAUNCH_ALL_RMS_NORM(1 * internalUnroll, maxThreads, maxThreads); + } else if (external_unRoll == 2) { + // 4097 - 8192 elems + LAUNCH_ALL_RMS_NORM(2 * internalUnroll, maxThreads, maxThreads); + } else if (external_unRoll == 3) { + // 8193 - 12288 elems + LAUNCH_ALL_RMS_NORM(3 * internalUnroll, maxThreads, maxThreads); + } else if (external_unRoll == 4) { + // 12289 - 16384 elems + LAUNCH_ALL_RMS_NORM(4 * internalUnroll, maxThreads, maxThreads); + } +} + +#define INSTANTIATE_LAUNCH_RMS_NORM(T) \ + template void launch_rms_norm(T * norm_output, \ + T * res_output, \ + const T* vals, \ + const T* residual, \ + const T* gamma, \ + float epsilon, \ + int rows, \ + int elems_per_row, \ + cudaStream_t stream); + +INSTANTIATE_LAUNCH_RMS_NORM(float) +INSTANTIATE_LAUNCH_RMS_NORM(__half) +#ifdef BF16_AVAILABLE +INSTANTIATE_LAUNCH_RMS_NORM(__nv_bfloat16) +#endif diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/softmax.cu b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/softmax.cu new file mode 100644 index 0000000000000000000000000000000000000000..bb06cc149ef4fb29dae84a7b16173d1babfa5e50 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/softmax.cu @@ -0,0 +1,562 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include +#include "conversion_utils.h" +#include "inference_cuda_layers.h" + +#ifndef __HIP_PLATFORM_AMD__ +#include +#endif +#include +#include +#include + +#define MAX_REG_SIZE 8 + +#define minus_infinity -10000.0 + +void CheckCudaErrorAux(const char* file, unsigned line) +{ + cudaError_t err = cudaGetLastError(); + if (err == cudaSuccess) return; + std::cerr << cudaGetErrorString(err) << "(" << err << ") at " << file << ":" << line + << std::endl; + throw std::runtime_error("CUDA ERROR!!!\n"); +} + +#define CUDA_CHECK_ERROR() CheckCudaErrorAux(__FILE__, __LINE__) + +namespace cg = cooperative_groups; + +template +__global__ void attn_softmax_v2(T* vals, + T* mask, + T* alibi, + float layer_scale, + bool triangular, + bool recompute, + bool local_attention, + int window_size, + int total_count, + int heads, + int sequence_length, + int num_seq, + int head_offset, + int mask_stride, + int mp_size, + int reduceWidth) +{ + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + float2 low_data[MAX_REG_SIZE]; + float2 high_data[MAX_REG_SIZE]; + const T zero_h = conversion::to(0.f); + + int wid = threadIdx.x >> 5; + int lane = threadIdx.x & 0x1f; + int warp_num = blockDim.x >> 5; + + int reduce_blocks = reduceWidth >> 5; + int seq_lane = threadIdx.x % reduceWidth; + + __shared__ float partialSum[MAX_WARP_NUM]; + + int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks); + int batch_idx = iter_offset / (num_seq * heads); + int alibi_offset = batch_idx * heads * mp_size + head_offset; + int mask_offset = batch_idx * mask_stride + (iter_offset % mask_stride); + + if (iter_offset < total_count) { + vals += (iter_offset * sequence_length); + + alibi_offset = (alibi_offset + ((iter_offset / num_seq) % heads)) * sequence_length; + mask_offset = mask_offset * sequence_length; + int seq_id = iter_offset % num_seq; + + int real_seq_id = seq_id + (num_seq == sequence_length ? 0 : sequence_length); + int window_stride4 = (local_attention && (real_seq_id >> 2) > (window_size >> 2)) + ? (real_seq_id >> 2) - (window_size >> 2) + : 0; + int window_stride = + (local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1; + + float max_val = minus_infinity; + // if (lane == 0) printf("%d, %d: %d \n", wid, blockIdx.x, mask_offset); + for (int i = 0; i < iterations; i++) { + int data_id = i * (reduceWidth << 2) + (seq_lane); + bool check = (data_id >> 2) >= window_stride4; + bool low_x_check = check && (data_id < sequence_length) && + (!triangular || (data_id <= seq_id)) && (data_id > window_stride); + bool low_y_check = check && ((data_id + reduceWidth) < sequence_length) && + (!triangular || ((data_id + reduceWidth) <= seq_id)) && + ((data_id + reduceWidth) > window_stride); + bool high_x_check = check && ((data_id + reduceWidth * 2) < sequence_length) && + (!triangular || ((data_id + reduceWidth * 2) <= seq_id)) && + ((data_id + reduceWidth * 2) > window_stride); + bool high_y_check = check && ((data_id + reduceWidth * 3) < sequence_length) && + (!triangular || ((data_id + reduceWidth * 3) <= seq_id)) && + ((data_id + reduceWidth * 3) > window_stride); + + if (mask && alibi) { + low_data[i].x = low_x_check + ? conversion::to(vals[data_id]) * layer_scale + + (conversion::to(alibi[data_id + alibi_offset])) + + (conversion::to(mask[data_id + mask_offset])) + : minus_infinity; + low_data[i].y = + low_y_check + ? conversion::to(vals[data_id + reduceWidth]) * layer_scale + + (conversion::to(alibi[data_id + alibi_offset + reduceWidth])) + + (conversion::to(mask[data_id + mask_offset + reduceWidth])) + : minus_infinity; + high_data[i].x = + high_x_check + ? conversion::to(vals[data_id + reduceWidth * 2]) * layer_scale + + (conversion::to( + alibi[data_id + alibi_offset + reduceWidth * 2])) + + (conversion::to(mask[data_id + mask_offset + reduceWidth * 2])) + : minus_infinity; + high_data[i].y = + high_y_check + ? conversion::to(vals[data_id + reduceWidth * 3]) * layer_scale + + (conversion::to( + alibi[data_id + alibi_offset + reduceWidth * 3])) + + (conversion::to(mask[data_id + mask_offset + reduceWidth * 3])) + : minus_infinity; + } else if (mask) { + low_data[i].x = low_x_check + ? conversion::to(vals[data_id]) * layer_scale + + (conversion::to(mask[data_id + mask_offset])) + : minus_infinity; + low_data[i].y = + low_y_check + ? conversion::to(vals[data_id + reduceWidth]) * layer_scale + + (conversion::to(mask[data_id + mask_offset + reduceWidth])) + : minus_infinity; + high_data[i].x = + high_x_check + ? conversion::to(vals[data_id + reduceWidth * 2]) * layer_scale + + (conversion::to(mask[data_id + mask_offset + reduceWidth * 2])) + : minus_infinity; + high_data[i].y = + high_y_check + ? conversion::to(vals[data_id + reduceWidth * 3]) * layer_scale + + (conversion::to(mask[data_id + mask_offset + reduceWidth * 3])) + : minus_infinity; + } else if (alibi) { + low_data[i].x = low_x_check + ? conversion::to(vals[data_id]) * layer_scale + + (conversion::to(alibi[data_id + alibi_offset])) + : minus_infinity; + low_data[i].y = + low_y_check + ? conversion::to(vals[data_id + reduceWidth]) * layer_scale + + (conversion::to(alibi[data_id + alibi_offset + reduceWidth])) + : minus_infinity; + high_data[i].x = + high_x_check + ? conversion::to(vals[data_id + reduceWidth * 2]) * layer_scale + + (conversion::to( + alibi[data_id + alibi_offset + reduceWidth * 2])) + : minus_infinity; + high_data[i].y = + high_y_check + ? conversion::to(vals[data_id + reduceWidth * 3]) * layer_scale + + (conversion::to( + alibi[data_id + alibi_offset + reduceWidth * 3])) + : minus_infinity; + } else { + low_data[i].x = low_x_check ? conversion::to(vals[data_id]) * layer_scale + : minus_infinity; + low_data[i].y = + low_y_check ? conversion::to(vals[data_id + reduceWidth]) * layer_scale + : minus_infinity; + high_data[i].x = + high_x_check + ? conversion::to(vals[data_id + reduceWidth * 2]) * layer_scale + : minus_infinity; + high_data[i].y = + high_y_check + ? conversion::to(vals[data_id + reduceWidth * 3]) * layer_scale + : minus_infinity; + } + + // if(lane == 0) printf("%f , %d, %d \n", low_data[i].x, data_id, seq_id); + max_val = (low_data[i].x > max_val ? low_data[i].x : max_val); + max_val = (low_data[i].y > max_val ? low_data[i].y : max_val); + max_val = (high_data[i].x > max_val ? high_data[i].x : max_val); + max_val = (high_data[i].y > max_val ? high_data[i].y : max_val); + } + + for (int i = 1; i < WARP_SIZE; i *= 2) { + auto temp = g.shfl_xor(max_val, i); + max_val = (temp > max_val ? temp : max_val); + } + + if (reduceWidth > WARP_SIZE) { + if (lane == 0) partialSum[wid] = max_val; + b.sync(); + + if (lane < warp_num) max_val = partialSum[lane]; + + b.sync(); + + for (int i = 1; i < reduce_blocks; i *= 2) { + auto temp = g.shfl_xor(max_val, i); + max_val = (temp > max_val ? temp : max_val); + } + + max_val = g.shfl(max_val, threadIdx.x / WARP_SIZE); + } + float sum = 0; + for (int i = 0; i < iterations; i++) { + low_data[i].x = __expf(low_data[i].x - max_val); + low_data[i].y = __expf(low_data[i].y - max_val); + high_data[i].x = __expf(high_data[i].x - max_val); + high_data[i].y = __expf(high_data[i].y - max_val); + + sum += (low_data[i].x + low_data[i].y + high_data[i].x + high_data[i].y); + } + + for (int i = 1; i < WARP_SIZE; i *= 2) sum += g.shfl_xor(sum, i); + + if (reduceWidth > WARP_SIZE) { + if (lane == 0) partialSum[wid] = sum; + b.sync(); + + if (lane < warp_num) sum = partialSum[lane]; + + b.sync(); + + for (int i = 1; i < reduce_blocks; i *= 2) { sum += g.shfl_xor(sum, i); } + + sum = g.shfl(sum, threadIdx.x / WARP_SIZE); + } + sum += 1e-6; + for (int i = 0; i < iterations; i++) { + int data_id = i * (reduceWidth << 2) + (seq_lane); + if (data_id < sequence_length) { + vals[data_id] = conversion::to(low_data[i].x / sum); + if ((data_id + reduceWidth) < sequence_length) + vals[data_id + reduceWidth] = conversion::to(low_data[i].y / sum); + if ((data_id + reduceWidth * 2) < sequence_length) + vals[data_id + reduceWidth * 2] = conversion::to(high_data[i].x / sum); + if ((data_id + reduceWidth * 3) < sequence_length) + vals[data_id + reduceWidth * 3] = conversion::to(high_data[i].y / sum); + } + } + } +} + +template +__global__ void attn_softmax_v2(float* vals, + float* attn_mask, + float* alibi, + float layer_scale, + bool triangular, + bool recompute, + bool local_attention, + int window_size, + int total_count, + int heads, + int sequence_length, + int num_seq, + int head_offset, + int mask_stride, + int mp_size, + int reduceWidth) +{ + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + float4 data[MAX_REG_SIZE]; + + int wid = threadIdx.x >> 5; + int lane = threadIdx.x & 0x1f; + int warp_num = blockDim.x >> 5; + + int reduce_blocks = reduceWidth >> 5; + int seq_lane = threadIdx.x % reduceWidth; + + __shared__ float partialSum[MAX_WARP_NUM]; + + int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks); + if (iter_offset < total_count) { + vals += (iter_offset * sequence_length); + + int batch_idx = iter_offset / (num_seq * heads); + int mask_offset = batch_idx * mask_stride + (iter_offset % mask_stride); + mask_offset = mask_offset * sequence_length; + int seq_id = iter_offset % num_seq; + + int real_seq_id = seq_id + (num_seq == sequence_length ? 0 : sequence_length); + int window_stride4 = (local_attention && (real_seq_id >> 2) > (window_size >> 2)) + ? (real_seq_id >> 2) - (window_size >> 2) + : 0; + int window_stride = + (local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1; + + float max_val = minus_infinity; + + for (int i = 0; i < iterations; i++) { + int data_id = i * (reduceWidth << 2) + (seq_lane); + bool check = (data_id >> 2) >= window_stride4; + bool x_check = check && (data_id < sequence_length) && + (!triangular || (data_id <= seq_id)) && (data_id > window_stride); + bool y_check = check && ((data_id + reduceWidth) < sequence_length) && + (!triangular || ((data_id + reduceWidth) <= seq_id)) && + ((data_id + reduceWidth) > window_stride); + bool z_check = check && ((data_id + reduceWidth * 2) < sequence_length) && + (!triangular || ((data_id + reduceWidth * 2) <= seq_id)) && + ((data_id + reduceWidth * 2) > window_stride); + bool w_check = check && ((data_id + reduceWidth * 3) < sequence_length) && + (!triangular || ((data_id + reduceWidth * 3) <= seq_id)) && + ((data_id + reduceWidth * 3) > window_stride); + + if (attn_mask) { + data[i].x = x_check ? vals[data_id] + attn_mask[data_id + mask_offset] + : minus_infinity; + data[i].y = y_check ? vals[data_id + reduceWidth] + + attn_mask[data_id + mask_offset + reduceWidth] + : minus_infinity; + data[i].z = z_check ? vals[data_id + reduceWidth * 2] + + attn_mask[data_id + mask_offset + reduceWidth * 2] + : minus_infinity; + data[i].w = w_check ? vals[data_id + reduceWidth * 3] + + attn_mask[data_id + mask_offset + reduceWidth * 3] + : minus_infinity; + } else { + data[i].x = x_check ? vals[data_id] : minus_infinity; + data[i].y = y_check ? vals[data_id + reduceWidth] : minus_infinity; + data[i].z = z_check ? vals[data_id + reduceWidth * 2] : minus_infinity; + data[i].w = w_check ? vals[data_id + reduceWidth * 3] : minus_infinity; + } + + max_val = (data[i].x > max_val ? data[i].x : max_val); + max_val = (data[i].y > max_val ? data[i].y : max_val); + max_val = (data[i].z > max_val ? data[i].z : max_val); + max_val = (data[i].w > max_val ? data[i].w : max_val); + } + + for (int i = 1; i < WARP_SIZE; i *= 2) { + auto temp = g.shfl_xor(max_val, i); + max_val = (temp > max_val ? temp : max_val); + } + + if (reduceWidth > WARP_SIZE) { + if (lane == 0) partialSum[wid] = max_val; + b.sync(); + + if (lane < warp_num) max_val = partialSum[lane]; + + b.sync(); + + for (int i = 1; i < reduce_blocks; i *= 2) { + auto temp = g.shfl_xor(max_val, i); + max_val = (temp > max_val ? temp : max_val); + } + + max_val = g.shfl(max_val, threadIdx.x / WARP_SIZE); + } + + float sum = 0; + for (int i = 0; i < iterations; i++) { + data[i].x = __expf(data[i].x - max_val); + data[i].y = __expf(data[i].y - max_val); + data[i].z = __expf(data[i].z - max_val); + data[i].w = __expf(data[i].w - max_val); + + sum += (data[i].x + data[i].y + data[i].z + data[i].w); + } + + for (int i = 1; i < WARP_SIZE; i *= 2) sum += g.shfl_xor(sum, i); + + if (reduceWidth > WARP_SIZE) { + if (lane == 0) partialSum[wid] = sum; + b.sync(); + + if (lane < warp_num) sum = partialSum[lane]; + + b.sync(); + + for (int i = 1; i < reduce_blocks; i *= 2) { sum += g.shfl_xor(sum, i); } + + sum = g.shfl(sum, threadIdx.x / WARP_SIZE); + } + sum += 1e-6; + + for (int i = 0; i < iterations; i++) { + int data_id = i * (reduceWidth << 2) + (seq_lane); + if (data_id < sequence_length) { + vals[data_id] = data[i].x / sum; + if ((data_id + reduceWidth) < sequence_length) + vals[data_id + reduceWidth] = data[i].y / sum; + if ((data_id + reduceWidth * 2) < sequence_length) + vals[data_id + reduceWidth * 2] = data[i].z / sum; + if ((data_id + reduceWidth * 3) < sequence_length) + vals[data_id + reduceWidth * 3] = data[i].w / sum; + } + } + } +} + +#define LAUNCH_ATTN_SOFTMAX_V2(iterations) \ + attn_softmax_v2<<>>(vals, \ + mask, \ + alibi, \ + layer_scale, \ + triangular, \ + recompute, \ + local_attention, \ + window_size, \ + total_count, \ + heads, \ + sequence_length, \ + num_seq, \ + head_offset, \ + mask_stride, \ + mp_size, \ + reduce_width); + +template +void launch_attn_softmax_v2(T* vals, + T* mask, + T* alibi, + float layer_scale, + bool triangular, + bool recompute, + bool local_attention, + int window_size, + int batch_size, + int heads, + int num_seq, + int sequence_length, + int head_offset, + int mask_stride, + int mp_size, + cudaStream_t stream) +{ + const int total_count = batch_size * heads * num_seq; + + // Scheduling Overview + // 4 element unroll with power of 2 `reduce_width` threads to a ceiling of `attn_threads` + // Each block should be partitioned into as many `reduce_width` blocks + // as can be fit. + constexpr int attn_threads = 256; + constexpr int min_reduce_width = hw_warp_size; + constexpr int internal_unroll = 4; + + // Handle internal unroll then round to next power of 2. Bump up to minimum granularity. + const int thread_steps_rounded = + next_pow2((sequence_length + internal_unroll - 1) / internal_unroll); + const int thread_steps_schedule = + (thread_steps_rounded < min_reduce_width) ? min_reduce_width : thread_steps_rounded; + // Bound reduce width to the number of threads + const int reduce_width = (thread_steps_schedule < attn_threads) ? thread_steps_schedule + : attn_threads; + // Scale for the excess + const int iterations = thread_steps_schedule / reduce_width; + // Should be safe since reduce_width is capped to attn_threads + const int partitions = attn_threads / reduce_width; + + // Launch params + dim3 grid((total_count + partitions - 1) / partitions); + dim3 block(attn_threads); + + if (sequence_length <= 32768) { + if (iterations == 1) { + LAUNCH_ATTN_SOFTMAX_V2(1); + } else if (iterations == 2) { + LAUNCH_ATTN_SOFTMAX_V2(2); + } else if (iterations == 4) { + LAUNCH_ATTN_SOFTMAX_V2(4); + } else if (iterations == 8) { + LAUNCH_ATTN_SOFTMAX_V2(8); + } else if (iterations == 16) { + LAUNCH_ATTN_SOFTMAX_V2(16); + } else if (iterations == 32) { + LAUNCH_ATTN_SOFTMAX_V2(32); + } else if (iterations == 64) { + LAUNCH_ATTN_SOFTMAX_V2(64); + } + } else + throw std::runtime_error("Unsupport Seq_Length!"); +} + +#define INSTANTIATE_LAUNCH_ATTN_SOFTMAX_V2(T) \ + template void launch_attn_softmax_v2(T* vals, \ + T* mask, \ + T* alibi, \ + float layer_scale, \ + bool triangular, \ + bool recompute, \ + bool local_attention, \ + int window_size, \ + int batch_size, \ + int heads, \ + int num_seq, \ + int sequence_length, \ + int head_offset, \ + int mask_stride, \ + int mp_size, \ + cudaStream_t stream); + +INSTANTIATE_LAUNCH_ATTN_SOFTMAX_V2(float); +#ifdef BF16_AVAILABLE +INSTANTIATE_LAUNCH_ATTN_SOFTMAX_V2(__nv_bfloat16); +#endif +INSTANTIATE_LAUNCH_ATTN_SOFTMAX_V2(__half); + +#define DEF_ATTN_SOFTMAX_V2_HALF(_iter) \ + template __global__ void attn_softmax_v2<__half, _iter>(__half * vals, \ + __half * mask, \ + __half * alibi, \ + float layer_scale, \ + bool triangular, \ + bool recompute, \ + bool local_attention, \ + int window_size, \ + int total_count, \ + int heads, \ + int sequence_length, \ + int num_seq, \ + int head_offset, \ + int mask_stride, \ + int mp_size, \ + int reduceWidth) + +#define DEF_ATTN_SOFTMAX_V2_BF16(_iter) \ + template __global__ void attn_softmax_v2<__nv_bfloat16, _iter>(__nv_bfloat16 * vals, \ + __nv_bfloat16 * mask, \ + __nv_bfloat16 * alibi, \ + float layer_scale, \ + bool triangular, \ + bool recompute, \ + bool local_attention, \ + int window_size, \ + int total_count, \ + int heads, \ + int sequence_length, \ + int num_seq, \ + int head_offset, \ + int mask_stride, \ + int mp_size, \ + int reduceWidth) + +#define FOREACH_ITERATIONS(cb) \ + cb(1); \ + cb(2); \ + cb(4); \ + cb(8); \ + cb(16); \ + cb(32); \ + cb(64) + +FOREACH_ITERATIONS(DEF_ATTN_SOFTMAX_V2_HALF); +#ifdef BF16_AVAILABLE +FOREACH_ITERATIONS(DEF_ATTN_SOFTMAX_V2_BF16); +#endif diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/transform.cu b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/transform.cu new file mode 100644 index 0000000000000000000000000000000000000000..8bc5a94e16eecbadb6a904038593b0546cbd26d9 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/transform.cu @@ -0,0 +1,727 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#ifndef __HIP_PLATFORM_AMD__ +#include +#endif +#include "conversion_utils.h" +#include "inference_cuda_layers.h" +namespace cg = cooperative_groups; + +// only used to avoid compilation error due to lack of definition. +#ifndef BF16_AVAILABLE +using __nv_bfloat162 = __half2; +#endif + +// Bias add + +__global__ void bias_add_transform_0213(float* output, + float* k_cache, + float* v_cache, + const float* vals, + const float* bias, + int hidden_dim, + int seq_length, + unsigned seq_offset, + int heads, + int head_stride, + int num_kv, + int rotary_dim, + bool rotate_half, + bool rotate_every_two, + int head_ext, + int max_out_tokens, + float rope_theta) +{ + int d0_stride = hidden_dim * seq_length; + int d1_stride = hidden_dim; + int d2_stride = hidden_dim / heads; + + int d0 = blockIdx.x; // Batch + int d1 = blockIdx.y; // Sequence ID (0-127) + int cnt = blockIdx.z / head_ext; // Hidden count + int d2 = threadIdx.y + (blockIdx.z % head_ext) * (heads / head_ext); // Head (0-11) + int d3 = threadIdx.x; // Values (groups of 4) + + int d2_out_stride = d2_stride * (cnt == 0 ? seq_length : max_out_tokens); + int d0_out_stride = hidden_dim * (cnt == 0 ? seq_length : max_out_tokens); + + const float4* vals_vec = reinterpret_cast(vals); + float4* output_vec = + reinterpret_cast(cnt == 0 ? output : (cnt == 1 ? k_cache : v_cache)); + + vals_vec += (d0 * (d1_stride + num_kv * 2 * d2_stride) * seq_length); + vals_vec += d1 * (d1_stride + num_kv * 2 * d2_stride); + vals_vec += (cnt == 0 ? 0 : d1_stride) + (cnt == 0 ? 0 : (cnt - 1) * num_kv * d2_stride); + vals_vec += ((cnt == 0 ? d2 : (d2 / head_stride)) * d2_stride); + + output_vec += (d1 * d2_stride); + output_vec += (d0 * d0_out_stride); + output_vec += (d2 * d2_out_stride); + + unsigned seq_id = d1 + seq_offset; + float4 inputs = vals_vec[d3]; + int lane = d3 & 0x1f; + if (cnt < 2 && rotary_dim > 0 && d3 < rotary_dim) { + float4 q = vals_vec[d3]; + float2* q_f = reinterpret_cast(&q); + if (rotate_every_two) { +#pragma unroll + for (int o = 0; o < 2; o++) { + float inv_freq = (float)(((d3 << 1) + o) * 2) / (float)(rotary_dim << 2); + inv_freq = 1.0 / powf(rope_theta, inv_freq) * (float)seq_id; + q_f[o].x = (-1.0 * q_f[o].y * sinf(inv_freq) + q_f[o].x * cosf(inv_freq)); + q_f[o].y = (q_f[o].x * sinf(inv_freq) + q_f[o].y * cosf(inv_freq)); + } + } + output_vec[d3] = q; + } else + output_vec[d3] = inputs; +} + +#define ATTN_H 3 +#define MAX_SEQ_LINE 10 + +template +__global__ void bias_add_transform_0213(T* output, // q + T* k_cache, + T* v_cache, + const T* vals, // qkv + const T* bias, + int hidden_dim, + int seq_length, + unsigned seq_offset, + int all_tokens, + int heads, + int head_stride, + int num_kv, + int rotary_dim, + bool rotate_half, + bool rotate_every_two, + int head_ext, + int max_out_tokens, + float rope_theta) +{ + using T2 = + typename std::conditional::value, __half2, __nv_bfloat162>::type; + unsigned half_dim = (rotary_dim << 3) >> 1; + int d0_stride = hidden_dim * seq_length; + int d1_stride = hidden_dim; + int d2_stride = hidden_dim / heads; + + int d0 = blockIdx.x; // Batch + int d1 = blockIdx.y; // Sequence ID (0-127) + int cnt = blockIdx.z / head_ext; // Hidden count + int d2 = threadIdx.y + (blockIdx.z % head_ext) * (heads / head_ext); // Head (0-11) + int d3 = threadIdx.x; // Values (groups of 4) + + int d2_out_stride = d2_stride * (cnt == 0 ? seq_length : max_out_tokens); + int d0_out_stride = hidden_dim * (cnt == 0 ? seq_length : max_out_tokens); + + float4 vals_arr; + float4 output_arr; + + T2* vals_half = reinterpret_cast(&vals_arr); + T2* output_half = reinterpret_cast(&output_arr); + + const float4* vals_vec = reinterpret_cast(vals); + float4* output_vec = + reinterpret_cast(cnt == 0 ? output : (cnt == 1 ? k_cache : v_cache)); + + vals_vec += (d0 * (d1_stride + num_kv * 2 * d2_stride) * seq_length); + vals_vec += (d1 * (d1_stride + num_kv * 2 * d2_stride)); + vals_vec += (cnt == 0 ? 0 : d1_stride) + (cnt == 0 ? 0 : (cnt - 1) * num_kv * d2_stride); + vals_vec += ((cnt == 0 ? d2 : (d2 / head_stride)) * d2_stride); + + output_vec += (d1 * d2_stride); + output_vec += (d0 * d0_out_stride); + output_vec += (d2 * d2_out_stride); + + unsigned seq_id = d1 + seq_offset; + + int lane = d3 & 0x1f; + if (cnt < 2 && rotary_dim > 0 && d3 < rotary_dim) { + float4 q = vals_vec[d3]; + T2* q_h = reinterpret_cast(&q); + if (rotate_every_two) { +#pragma unroll + for (int o = 0; o < 4; o++) { + float inv_freq = (float)(((d3 << 2) + o) * 2) / (float)(rotary_dim << 3); + inv_freq = 1.0 / powf(rope_theta, inv_freq) * (float)seq_id; + float q_data[2]; + q_data[0] = conversion::to(q_h[o].x); + q_data[1] = conversion::to(q_h[o].y); + q_h[o].x = conversion::to(-1.0 * q_data[1] * sinf(inv_freq) + + q_data[0] * cosf(inv_freq)); + q_h[o].y = + conversion::to(q_data[0] * sinf(inv_freq) + q_data[1] * cosf(inv_freq)); + } + } + output_vec[d3] = q; + } else + output_vec[d3] = vals_vec[d3]; +} + +// [B S C*H] - > C * [B A S N] +template <> +void launch_bias_add_transform_0213(float* output, + float* k_cache, + float* v_cache, + const float* vals, + const float* bias, + int batch_size, + int seq_length, + unsigned seq_offset, + int all_tokens, + int hidden_dim, + int heads, + int num_kv, + int rotary_dim, + bool rotate_half, + bool rotate_every_two, + cudaStream_t stream, + int trans_count, + int max_out_tokens, + float rope_theta) +{ + hidden_dim >>= 2; + int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; + + dim3 block_dim(hidden_dim / heads, (heads / head_ext)); + dim3 grid_dim(batch_size, seq_length, (trans_count * head_ext)); + + bias_add_transform_0213<<>>(output, + k_cache, + v_cache, + vals, + bias, + hidden_dim, + seq_length, + seq_offset, + heads, + num_kv > 0 ? (heads / num_kv) : 1, + num_kv > 0 ? num_kv : heads, + rotary_dim >> 2, + rotate_half, + rotate_every_two, + head_ext, + max_out_tokens, + rope_theta); +} + +template +void launch_bias_add_transform_0213(T* output, + T* k_cache, + T* v_cache, + const T* vals, + const T* bias, + int batch_size, + int seq_length, + unsigned seq_offset, + int all_tokens, + int hidden_dim, + int heads, + int num_kv, + int rotary_dim, + bool rotate_half, + bool rotate_every_two, + cudaStream_t stream, + int trans_count, + int max_out_tokens, + float rope_theta) +{ + hidden_dim >>= 3; + int head_ext = 1; // (hidden_dim - 1) / MAX_THREADS + 1; + dim3 block_dim(hidden_dim / heads, (heads / head_ext)); + dim3 grid_dim(batch_size, seq_length, (trans_count * head_ext)); + bias_add_transform_0213<<>>(output, + k_cache, + v_cache, + vals, + bias, + hidden_dim, + seq_length, + seq_offset, + all_tokens, + heads, + num_kv > 0 ? (heads / num_kv) : 1, + num_kv > 0 ? num_kv : heads, + rotary_dim >> 3, + rotate_half, + rotate_every_two, + head_ext, + max_out_tokens, + rope_theta); +} + +#define INSTANTIATE_LAUNCH_BIAS_ADD_TRANSFORM_0213(T) \ + template void launch_bias_add_transform_0213(T*, \ + T*, \ + T*, \ + const T*, \ + const T*, \ + int, \ + int, \ + unsigned, \ + int, \ + int, \ + int, \ + int, \ + int, \ + bool, \ + bool, \ + cudaStream_t, \ + int, \ + int, \ + float) + +#ifdef BF16_AVAILABLE +INSTANTIATE_LAUNCH_BIAS_ADD_TRANSFORM_0213(__nv_bfloat16); +#endif +INSTANTIATE_LAUNCH_BIAS_ADD_TRANSFORM_0213(__half); + +// Bias add + +__global__ void pad_add_transform_0213(float* output, + const float* vals, + int hidden_dim, + int seq_length, + int padded_seq_len, + int heads, + int padded_head_size) +{ +} + +template +__global__ void pad_add_transform_0213(T* output, + const T* vals, + int hidden_dim, + int seq_length, + int padded_seq_len, + int heads, + int padded_head_size) +{ + using T2 = + typename std::conditional::value, __half2, __nv_bfloat162>::type; + float4 ZERO; + const T2 zero_h = conversion::to(0.f); + T2* ZERO_h = reinterpret_cast(&ZERO); +#pragma unroll + for (int i = 0; i < 4; i++) ZERO_h[i] = zero_h; + + int d0_stride = hidden_dim * seq_length; + int d1_stride = hidden_dim; + int d2_stride = hidden_dim / heads; + + int d0 = blockIdx.x; // Batch + int d1 = blockIdx.y * blockDim.z + threadIdx.z; // Sequence ID (0-127) + int d2 = threadIdx.y; // Head (0-11) + int d3 = threadIdx.x; // Values (groups of 4) + + int d2_out_stride = padded_head_size * padded_seq_len; + int d0_out_stride = heads * d2_out_stride; + + const float4* vals_vec = reinterpret_cast(vals); + float4* output_vec = reinterpret_cast(output); + + vals_vec += (d0 * d0_stride); + vals_vec += (d1 * d1_stride); + vals_vec += (d2 * d2_stride); + + output_vec += (d1 * padded_head_size); + output_vec += (d0 * d0_out_stride); + output_vec += (d2 * d2_out_stride); + + if (d3 < d2_stride && d1 < seq_length) + output_vec[d3] = vals_vec[d3]; + else + output_vec[d3] = ZERO; +} + +// [B S C*H] - > C * [B A S N] +template <> +void launch_pad_add_transform_0213(float* output, + const float* vals, + int batch_size, + int hidden_dim, + int seq_length, + int padded_seq_len, + int heads, + int padded_head_size, + cudaStream_t stream) +{ +} + +template +void launch_pad_add_transform_0213(T* output, + const T* vals, + int batch_size, + int hidden_dim, + int seq_length, + int padded_seq_len, + int heads, + int padded_head_size, + cudaStream_t stream) +{ + hidden_dim >>= 3; + dim3 block_dim((padded_head_size >> 3), heads, 2); + dim3 grid_dim(batch_size, padded_seq_len / 2); + pad_add_transform_0213<<>>( + output, vals, hidden_dim, seq_length, padded_seq_len, heads, padded_head_size >> 3); +} + +#define INSTANTIATE_LAUNCH_PAD_ADD_TRANSFORM_0213_SIMPLE(T) \ + template void launch_pad_add_transform_0213( \ + T*, const T*, int, int, int, int, int, int, cudaStream_t); + +INSTANTIATE_LAUNCH_PAD_ADD_TRANSFORM_0213_SIMPLE(__half); +#ifdef BF16_AVAILABLE +INSTANTIATE_LAUNCH_PAD_ADD_TRANSFORM_0213_SIMPLE(__nv_bfloat16); +#endif + +// Bias add +template +__global__ void bias_add_transform_0213(T* output, + const T* vals, + const T* bias, + int hidden_dim, + int seq_length, + int heads, + int head_ext); + +template <> +__global__ void bias_add_transform_0213(float* output, + const float* vals, + const float* bias, + int hidden_dim, + int seq_length, + int heads, + int head_ext) +{ + int d0_stride = hidden_dim * seq_length; + int d1_stride = hidden_dim; + int d2_stride = hidden_dim / heads; + + int d0_out_stride = d0_stride; + int d1_out_stride = d2_stride; + int d2_out_stride = d2_stride * seq_length; + + int d0 = blockIdx.x; // Batch + int d1 = blockIdx.y; // Sequence ID (0-127) + int cnt = blockIdx.z / head_ext; // Hidden count + int d2 = threadIdx.y + (blockIdx.z % head_ext) * (heads / head_ext); // Head (0-11) + int d3 = threadIdx.x; // Values (groups of 4) + + const float4* vals_vec = reinterpret_cast(vals); + const float4* bias_vec = reinterpret_cast(bias); + float4* output_vec = reinterpret_cast(output); + + float4 inputs = vals_vec[d0 * d0_stride * (gridDim.z / head_ext) + cnt * d1_stride + + d1 * d1_stride * (gridDim.z / head_ext) + d2 * d2_stride + d3]; + float4 biases = bias_vec[cnt * d1_stride + d2 * d2_stride + d3]; + + float4 outputs; + outputs.x = inputs.x + biases.x; + outputs.y = inputs.y + biases.y; + outputs.z = inputs.z + biases.z; + outputs.w = inputs.w + biases.w; + + output_vec[cnt * d0_out_stride * gridDim.x + d0 * d0_out_stride + d1 * d1_out_stride + + d2 * d2_out_stride + d3] = outputs; +} + +template +__global__ void bias_add_transform_0213(T* output, + const T* vals, + const T* bias, + int hidden_dim, + int seq_length, + int heads, + int head_ext) +{ + using T2 = + typename std::conditional::value, __half2, __nv_bfloat162>::type; + int d0_stride = hidden_dim * seq_length; + int d1_stride = hidden_dim; + int d2_stride = hidden_dim / heads; + + int d2_out_stride = d2_stride * seq_length; + + int d0 = blockIdx.x; // Batch + int d1 = blockIdx.y; // Sequence ID (0-127) + int cnt = blockIdx.z / head_ext; // Hidden count + int d2 = threadIdx.y + (blockIdx.z % head_ext) * (heads / head_ext); // Head (0-11) + int d3 = threadIdx.x; // Values (groups of 4) + + float4 vals_arr; + float4 bias_arr; + float4 output_arr; + T2* vals_half = reinterpret_cast(&vals_arr); + T2* bias_half = reinterpret_cast(&bias_arr); + T2* output_half = reinterpret_cast(&output_arr); + + const float4* vals_vec = reinterpret_cast(vals); + const float4* bias_vec = reinterpret_cast(bias); + float4* output_vec = reinterpret_cast(output); + + vals_vec += (d0 * d0_stride * (gridDim.z / head_ext)); + vals_vec += (d1 * d1_stride * (gridDim.z / head_ext)); + vals_vec += (cnt * d1_stride); + vals_vec += (d2 * d2_stride); + + bias_vec += (cnt * d1_stride); + bias_vec += (d2 * d2_stride); + + output_vec += (cnt * d0_stride * gridDim.x); + output_vec += (d1 * d2_stride); + output_vec += (d0 * d0_stride); + output_vec += (d2 * d2_out_stride); + + bias_arr = bias_vec[d3]; + vals_arr = vals_vec[d3]; + + output_half[0] = vals_half[0] + bias_half[0]; + output_half[1] = vals_half[1] + bias_half[1]; + output_half[2] = vals_half[2] + bias_half[2]; + output_half[3] = vals_half[3] + bias_half[3]; + output_vec[d3] = output_arr; +} + +template +__global__ void bias_add_transform_0213_v2(T* output, + const T* vals, + const T* bias, + int hidden_dim, + int seq_length, + int heads) +{ + using T2 = + typename std::conditional::value, __half2, __nv_bfloat162>::type; + __shared__ float4 in_data[3072]; + + int d0_stride = hidden_dim * seq_length; + int d1_stride = hidden_dim; + int d2_stride = hidden_dim / heads; + int iteration_stride = d1_stride * blockDim.z; // Hidden * 3 / 8 + int batch_stride = d0_stride * blockDim.z; // Hidden * S * 3 / 8 + + int d0_out_stride = d0_stride; + int d1_out_stride = d2_stride; + int d2_out_stride = d2_stride * seq_length; + + int d0 = blockIdx.x; // Batch + int d1 = blockIdx.y; // Sequence ID (0-127) + int cnt = threadIdx.z; // blockIdx.z; // Hidden count + int d2 = threadIdx.y; // Head (0-11) + int d3 = threadIdx.x; // Values (groups of 4) + + float4 vals_arr[1]; + float4 bias_arr[1]; + float4 output_arr[1]; + T2* vals_half = reinterpret_cast(vals_arr); + T2* bias_half = reinterpret_cast(bias_arr); + T2* output_half = reinterpret_cast(output_arr); + + const float4* vals_vec = reinterpret_cast(vals); + const float4* bias_vec = reinterpret_cast(bias); + float4* output_vec = reinterpret_cast(output); + + int iter_index = cnt * d1_stride + d2 * d2_stride + d3; + int input_offset = d0 * batch_stride + d1 * (iteration_stride << 1); + bias_arr[0] = bias_vec[iter_index]; + +#pragma unroll + for (int iter = 0; iter < 2; iter++) { + int iter_id = iter * iteration_stride + iter_index; + vals_arr[0] = vals_vec[input_offset + iter_id]; + + output_half[0] = vals_half[0] + bias_half[0]; + output_half[1] = vals_half[1] + bias_half[1]; + output_half[2] = vals_half[2] + bias_half[2]; + output_half[3] = vals_half[3] + bias_half[3]; + + in_data[iter_id] = output_arr[0]; + } + __syncthreads(); + + iteration_stride = blockDim.z * (blockDim.y >> 1); + int matrix_stride = (d0_out_stride * gridDim.x); + int head_count = (d2 >> 1) + cnt * (blockDim.y >> 1); + + int out_index = d0 * d0_out_stride + d1 * (d1_out_stride << 1) + d3 + (d2 % 2) * d2_stride; + +#pragma unroll + for (int iter = 0; iter < 2; iter++) { + int iter_row = (iter * iteration_stride) + head_count; + int iter_offset = + (iter_row % blockDim.y) * d2_out_stride + (iter_row / blockDim.y) * matrix_stride; + output_vec[out_index + iter_offset] = + in_data[iter_row * d2_stride + d3 + (d2 % 2) * (d1_stride * blockDim.z)]; + } +} + +template +__global__ void transform4d_0213(T* out, + const T* in, + int heads, + int seq_length, + int hidden_dim, + int head_ext); + +template <> +__global__ void transform4d_0213(float* out, + const float* in, + int heads, + int seq_length, + int hidden_dim, + int head_ext) +{ + int d0_stride = hidden_dim * seq_length; + int d1_stride = d0_stride / heads; + int d2_stride = hidden_dim / heads; + + int d0_out_stride = d0_stride; + int d1_out_stride = d2_stride; + int d2_out_stride = hidden_dim; + + int d0 = blockIdx.x; // Batch + int d1 = blockIdx.y / ((seq_length - 1) / blockDim.y + 1); // Head + int d2 = (threadIdx.y + blockDim.y * blockIdx.y) % seq_length; + int cnt = blockIdx.z; + int d3 = threadIdx.x; // Values (groups of 8) + + if (d2 < seq_length) { + const float4* in_vec = reinterpret_cast(in); + float4* out_vec = reinterpret_cast(out); + + float4 vals_vec = in_vec[cnt * d0_stride * gridDim.x + d0 * d0_stride + d1 * d1_stride + + d2 * d2_stride + d3]; + out_vec[d0 * d0_out_stride * gridDim.z + cnt * d2_out_stride + d1 * d1_out_stride + + d2 * d2_out_stride * gridDim.z + d3] = vals_vec; + } +} + +template +__global__ void transform4d_0213(T* out, + const T* in, + int heads, + int seq_length, + int hidden_dim, + int head_ext) +{ + int d0_stride = hidden_dim * (seq_length / head_ext); + int d1_stride = hidden_dim; + int d2_stride = hidden_dim / heads; + + int d0 = blockIdx.x; // Batch + int d1 = threadIdx.y + (blockIdx.z % head_ext) * (heads / head_ext); // Head + int d2 = blockIdx.z / head_ext; // Sequence + int cnt = blockIdx.y; // Hidden count + int d3 = threadIdx.x; // Values (groups of 8) + + const float4* in_vec = reinterpret_cast(in); + float4* out_vec = reinterpret_cast(out); + + in_vec += (cnt * d0_stride * gridDim.x); + in_vec += (d0 * d0_stride); + in_vec += (d2 * d2_stride); + in_vec += (d1 * d2_stride * seq_length); + + out_vec += (cnt * d1_stride); + out_vec += (d1 * d2_stride); + out_vec += (d0 * d0_stride * gridDim.y); + out_vec += (d2 * d1_stride * gridDim.y); + + out_vec[d3] = in_vec[d3]; +} + +template +__global__ void transform4d_0213_v2(T* out, const T* in, int heads, int seq_length, int hidden_dim) +{ + __shared__ float4 in_data[3072]; + + int d0_stride = hidden_dim * seq_length; + int d1_stride = hidden_dim; + int d2_stride = hidden_dim / heads; + + int d0 = blockIdx.x; // Batch + int d1 = threadIdx.y; // Head + int d2 = blockIdx.y; // Sequence + int cnt = threadIdx.z; // Hidden count + int d3 = threadIdx.x; // Values (groups of 8) + + const float4* in_vec = reinterpret_cast(in); + float4* out_vec = reinterpret_cast(out); + + int input_offset = d0 * d0_stride + d2 * (d2_stride << 1) + d3 + (d1 % 2) * d2_stride; + int head_count = (d1 >> 1) + cnt * (blockDim.y >> 1); + int iteration_stride = blockDim.z * (blockDim.y >> 1); + int matrix_stride = (d0_stride * gridDim.x); + +#pragma unroll + for (int iter = 0; iter < 2; iter++) { + int iter_row = iter * iteration_stride + head_count; + int iter_offset = (iter_row % blockDim.y) * d2_stride; + + in_data[d3 + iter_offset + (iter_row / blockDim.y + (d1 % 2) * blockDim.z) * d1_stride] = + in_vec[input_offset + iter_offset * seq_length + + (iter_row / blockDim.y) * matrix_stride]; + } + __syncthreads(); + + iteration_stride = d1_stride * blockDim.z; + int iter_index = cnt * d1_stride + d1 * d2_stride + d3; + int output_offset = d0 * d0_stride * blockDim.z + d2 * (iteration_stride << 1); + +#pragma unroll + for (int iter = 0; iter < 2; iter++) { + int iter_id = iter * iteration_stride + iter_index; + out_vec[output_offset + iter_id] = in_data[iter_id]; + } +} + +// 3 * [B A S N] - > [B S C*H] +template <> +void launch_transform4d_0213(float* out, + const float* in, + int batch_size, + int heads, + int seq_length, + int hidden_dim, + cudaStream_t stream, + int trans_count) +{ + hidden_dim >>= 2; + dim3 grid_dims(batch_size, heads * ((seq_length - 1) / 8 + 1), trans_count); + dim3 block_dims(hidden_dim / heads, 8); + transform4d_0213 + <<>>(out, in, heads, seq_length, hidden_dim, 1); +} + +template +void launch_transform4d_0213(T* out, + const T* in, + int batch_size, + int heads, + int seq_length, + int hidden_dim, + cudaStream_t stream, + int trans_count) +{ + hidden_dim >>= 3; + int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; + dim3 grid_dims(batch_size, trans_count, (seq_length * head_ext)); + dim3 block_dims(hidden_dim / heads, (heads / head_ext)); + transform4d_0213<<>>( + out, in, heads, seq_length, hidden_dim, head_ext); +} + +#define INSTANTIATE_2B_LAUNCH_TRANSFORM4D(T) \ + template void launch_transform4d_0213(T*, const T*, int, int, int, int, cudaStream_t, int); + +INSTANTIATE_2B_LAUNCH_TRANSFORM4D(__half) +#ifdef BF16_AVAILABLE +INSTANTIATE_2B_LAUNCH_TRANSFORM4D(__nv_bfloat16) +#endif diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/includes/inference_context.h b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/includes/inference_context.h new file mode 100644 index 0000000000000000000000000000000000000000..378fd4e5e990d5be9b642e2092ab30370506b191 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/includes/inference_context.h @@ -0,0 +1,292 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include +#include +#include +#include +#include +#include "cublas_v2.h" +#include "cuda.h" + +#define MEGABYTE (1024 * 1024) +#define GIGABYTE (1024 * 1024 * 1024) + +// TODO: refactor out +#define WARP_SIZE 32 + +#define CUDA_CHECK(callstr) \ + { \ + cudaError_t error_code = callstr; \ + if (error_code != cudaSuccess) { \ + std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__; \ + assert(0); \ + } \ + } + +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) + +#define CUDA_2D_KERNEL_LOOP(i, n, j, m) \ + for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) \ + for (size_t j = blockIdx.y * blockDim.y + threadIdx.y; j < (m); j += blockDim.y * gridDim.y) + +#define DS_CUDA_NUM_THREADS 512 +#define DS_MAXIMUM_NUM_BLOCKS 262144 + +inline int DS_GET_BLOCKS(const int N) +{ + return std::max( + std::min((N + DS_CUDA_NUM_THREADS - 1) / DS_CUDA_NUM_THREADS, DS_MAXIMUM_NUM_BLOCKS), + // Use at least 1 block, since CUDA does not allow empty block + 1); +} + +class InferenceContext { +public: + InferenceContext() + : _workspace(nullptr), + _seed(42), + _curr_offset(0), + _stream(0), + _free_memory_size(0), + _num_tokens(1), + _attention_unfused_workspace_offset(0), + _workSpaceSize(0) + { + _workSpaceSize = 0; + _workspace = 0; + + cublasStatus_t stat = cublasCreate(&_cublasHandle); + if (stat != CUBLAS_STATUS_SUCCESS) { + // It would be nice to use cublasGetStatusName and + // cublasGetStatusString, but they were only added in CUDA 11.4.2. + auto message = std::string("Failed to create cublas handle: cublasStatus_t was ") + + std::to_string(stat); + std::cerr << message << std::endl; + throw std::runtime_error(message); + } +#ifndef __HIP_PLATFORM_AMD__ + cublasSetMathMode(_cublasHandle, CUBLAS_TENSOR_OP_MATH); +#endif + cudaEventCreate(&_comp1_event); + cudaEventCreate(&_comp2_event); + cudaEventCreate(&_comp_event); + cudaEventCreate(&_comm_event); + } + + virtual ~InferenceContext() + { + cublasDestroy(_cublasHandle); + cudaFree(_workspace); + cudaEventDestroy(_comp1_event); + cudaEventDestroy(_comp2_event); + cudaEventDestroy(_comp_event); + cudaEventDestroy(_comm_event); + } + + static InferenceContext& Instance() + { + static InferenceContext _ctx; + return _ctx; + } + + void GenWorkSpace(const unsigned& num_layers, + const unsigned& num_heads, + const size_t& batch_size, + const size_t& prompt_len, + const size_t& hidden_dim, + const unsigned& mp_size, + const bool& external_cache, + const size_t& elem_size, + const unsigned& rank, + unsigned max_out_tokens, + unsigned min_out_tokens) + { + size_t total_size; + if (!_free_memory_size) { cudaMemGetInfo(&_free_memory_size, &total_size); } + + // Flash attention requires padded heads and we'll conservatively allocate + // for that here. Flash attention is only enabled for head size <= 128 right now + const int head_size = hidden_dim / num_heads; + const int padded_head_size = head_size <= 32 ? 32 : (head_size <= 64 ? 64 : 128); + const int effective_head_size = (head_size > 128) ? head_size : padded_head_size; + + size_t activation_size = 10 * (num_heads * effective_head_size) * batch_size; + // Other sequence length dimension is added when the final workSpaceSize is calculated + size_t temp_size = batch_size * (num_heads / mp_size) * max_out_tokens; + size_t cache_size = + num_layers * batch_size * ((num_heads * effective_head_size) / mp_size) * 2; + size_t minimal_requirements = + temp_size + (_free_memory_size > GIGABYTE ? 500 : 100) * MEGABYTE; + if (_free_memory_size < minimal_requirements) { + printf("Requested:\t%lu\nFree:\t%lu\nTotal:\t%lu\n", + minimal_requirements, + _free_memory_size, + total_size); + throw std::runtime_error("Workspace can't be allocated, no enough memory."); + } + + _max_seq_len = ((_free_memory_size - minimal_requirements) / elem_size) / + (activation_size + temp_size + cache_size); + _max_seq_len = std::min((size_t)max_out_tokens, _max_seq_len); + size_t workSpaceSize = ((external_cache ? (activation_size + temp_size) + : (activation_size + temp_size + cache_size))) * + _max_seq_len * elem_size; + temp_size *= _max_seq_len * elem_size; + + if (_max_seq_len < min_out_tokens) { + printf( + "Allocatable workspace available (%ld tokens) is less than minimum requested " + "workspace (%d tokens)\n", + _max_seq_len, + min_out_tokens); + throw std::runtime_error("Workspace can't be allocated, not enough memory"); + } + + if (!_workspace) { + assert(_workspace == nullptr); + cudaMalloc(&_workspace, workSpaceSize); + } else if (_workSpaceSize < workSpaceSize) { + cudaFree(_workspace); + cudaMalloc(&_workspace, workSpaceSize); + } + if (rank == 0 && (!_workspace || _workSpaceSize < workSpaceSize)) + printf( + "------------------------------------------------------\n" + "Free memory : %f (GigaBytes) \n" + "Total memory: %f (GigaBytes) \n" + "Requested memory: %f (GigaBytes) \n" + "Setting maximum total tokens (input + output) to %lu \n" + "WorkSpace: %p \n" + "------------------------------------------------------\n", + (float)_free_memory_size / GIGABYTE, + (float)total_size / GIGABYTE, + (float)workSpaceSize / GIGABYTE, + _max_seq_len, + _workspace); + + if (!_workspace) { + printf("Requested:\t%lu\nFree:\t%lu\nTotal:\t%lu\n", + workSpaceSize, + _free_memory_size, + total_size); + throw std::runtime_error("Workspace is null."); + } + _workSpaceSize = workSpaceSize; + _attention_unfused_workspace_offset = workSpaceSize - temp_size; + } + inline size_t GetMaxTokenLength() const { return _max_seq_len; } + + cudaEvent_t GetCompEvent(int id) { return id == 1 ? _comp1_event : _comp2_event; } + + size_t get_workspace_size() const { return _workSpaceSize; } + void* GetWorkSpace() { return _workspace; } + void* GetAttentionUnfusedWorkspace() + { + return (char*)_workspace + _attention_unfused_workspace_offset; + } + + inline unsigned new_token(unsigned layer_id) + { + if (layer_id == 0) _token_length++; + return _token_length; + } + + inline void reset_tokens(unsigned initial_tokens = 1) + { + _num_tokens = initial_tokens; + } //_token_length = 0; } + + inline unsigned current_tokens() const { return _num_tokens; } + + inline void advance_tokens() { _num_tokens++; } + + cudaStream_t GetCommStream(bool async_op = false) + { + if (!_comm_stream) + _comm_stream = async_op ? at::cuda::getStreamFromPool(true) + : at::cuda::getCurrentCUDAStream(); + return _comm_stream; + } + cudaStream_t GetCurrentStream(bool other_stream = false) + { + // get current pytorch stream. + if (other_stream) { + if (!_stream) _stream = at::cuda::getStreamFromPool(true); + return _stream; + } + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + return stream; + } + + void release_workspace() + { + cudaFree(_workspace); + _workspace = nullptr; + } + bool retake_workspace() + { + if (_workspace != nullptr || _workSpaceSize == 0) return true; + cudaMalloc(&_workspace, _workSpaceSize); + return _workspace != nullptr; + } + cublasHandle_t GetCublasHandle() { return _cublasHandle; } + + std::pair IncrementOffset(uint64_t offset_inc) + { + uint64_t offset = _curr_offset; + _curr_offset += offset_inc; + return std::pair(_seed, offset); + } + + void SetSeed(uint64_t new_seed) { _seed = new_seed; } + + const std::vector>& GetGemmAlgos() const { return _gemm_algos; } + + inline void SynchComp() + { + cudaEventRecord(_comp_event, _comp_stream); + cudaStreamWaitEvent(_comm_stream, _comp_event, 0); + } + inline void SynchComm() + { + cudaEventRecord(_comm_event, _comm_stream); + cudaStreamWaitEvent(_comp_stream, _comm_event, 0); + } + +private: + cublasHandle_t _cublasHandle; + + cudaEvent_t _comp_event; + cudaEvent_t _comm_event; + + void* _workspace; + // offset from _workspace for attention unfused memory + size_t _attention_unfused_workspace_offset; + uint64_t _seed; + uint64_t _curr_offset; + + size_t _workSpaceSize; + size_t _free_memory_size; + + size_t _max_seq_len; + + cudaEvent_t _comp1_event; + cudaEvent_t _comp2_event; + + cudaStream_t _stream; + + unsigned _token_length; + unsigned _num_tokens; + std::vector> _gemm_algos; + + cudaStream_t _comp_stream; + cudaStream_t _comm_stream; + + std::unordered_map _world_sizes; +}; diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/includes/inference_cublas_wrappers.h b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/includes/inference_cublas_wrappers.h new file mode 100644 index 0000000000000000000000000000000000000000..640751b12c8f1ea290e1803cac665f6bfbb72185 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/includes/inference_cublas_wrappers.h @@ -0,0 +1,435 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include +#include +#include +#ifdef BF16_AVAILABLE +#include +#endif +#include +#include +#ifndef __HIP_PLATFORM_AMD__ +#include +#endif +#include + +#ifdef __HIP_PLATFORM_AMD__ +int cublas_gemm_ex(rocblas_handle handle, + rocblas_operation transa, + rocblas_operation transb, + int m, + int n, + int k, + const float* alpha, + const float* beta, + const float* A, + const float* B, + float* C, + rocblas_gemm_algo algo, + int b_stride = -1) +#else +int cublas_gemm_ex(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const float* alpha, + const float* beta, + const float* A, + const float* B, + float* C, + cublasGemmAlgo_t algo, + int b_stride = -1) +#endif +{ + const int ldb = (b_stride == -1) ? ((transb == CUBLAS_OP_N) ? k : n) : b_stride; +#ifdef __HIP_PLATFORM_AMD__ + rocblas_status status = rocblas_gemm_ex(handle, + transa, + transb, + m, + n, + k, + (const void*)alpha, + (const void*)A, + rocblas_datatype_f32_r, + (transa == rocblas_operation_none) ? m : k, + (const void*)B, + rocblas_datatype_f32_r, + ldb, + (const void*)beta, + C, + rocblas_datatype_f32_r, + m, + C, + rocblas_datatype_f32_r, + m, + rocblas_datatype_f32_r, + algo, + 0, + 0); +#else + cublasStatus_t status = cublasGemmEx(handle, + transa, + transb, + m, + n, + k, + (const void*)alpha, + (const void*)A, + CUDA_R_32F, + (transa == CUBLAS_OP_N) ? m : k, + (const void*)B, + CUDA_R_32F, + ldb, + (const void*)beta, + C, + CUDA_R_32F, + m, + CUDA_R_32F, + algo); +#endif + +#ifdef __HIP_PLATFORM_AMD__ + if (status != rocblas_status_success) { +#else + if (status != CUBLAS_STATUS_SUCCESS) { +#endif + fprintf(stderr, + "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", + m, + n, + k, + (int)status); + return EXIT_FAILURE; + } + return 0; +} + +template +#ifdef __HIP_PLATFORM_AMD__ +int cublas_gemm_ex(rocblas_handle handle, + rocblas_operation transa, + rocblas_operation transb, + int m, + int n, + int k, + const float* alpha, + const float* beta, + const T* A, + const T* B, + T* C, + rocblas_gemm_algo algo, + int b_stride = -1) +#else +int cublas_gemm_ex(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const float* alpha, + const float* beta, + const T* A, + const T* B, + T* C, + cublasGemmAlgo_t algo, + int b_stride = -1) +#endif +{ + const int ldb = (b_stride == -1) ? ((transb == CUBLAS_OP_N) ? k : n) : b_stride; +#ifdef __HIP_PLATFORM_AMD__ + constexpr auto rocblas_dtype_16 = std::is_same::value ? rocblas_datatype_f16_r + : rocblas_datatype_bf16_r; + rocblas_status status = rocblas_gemm_ex(handle, + transa, + transb, + m, + n, + k, + (const void*)alpha, + (const void*)A, + rocblas_dtype_16, + (transa == rocblas_operation_none) ? m : k, + (const void*)B, + rocblas_dtype_16, + ldb, + (const void*)beta, + (void*)C, + rocblas_dtype_16, + m, + (void*)C, + rocblas_dtype_16, + m, + rocblas_datatype_f32_r, + algo, + 0, + 0); +#else + constexpr auto cublas_dtype_16 = std::is_same::value ? CUDA_R_16F : CUDA_R_16BF; + cublasStatus_t status = cublasGemmEx(handle, + transa, + transb, + m, + n, + k, + (const void*)alpha, + (const void*)A, + cublas_dtype_16, + (transa == CUBLAS_OP_N) ? m : k, + (const void*)B, + cublas_dtype_16, + ldb, + (const void*)beta, + (void*)C, + cublas_dtype_16, + m, + CUDA_R_32F, + algo); +#endif + +#ifdef __HIP_PLATFORM_AMD__ + if (status != rocblas_status_success) { +#else + if (status != CUBLAS_STATUS_SUCCESS) { +#endif + fprintf(stderr, + "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", + m, + n, + k, + (int)status); + return EXIT_FAILURE; + } + return 0; +} + +#ifdef __HIP_PLATFORM_AMD__ +int cublas_strided_batched_gemm(rocblas_handle handle, + int m, + int n, + int k, + const float* alpha, + const float* beta, + const float* A, + const float* B, + float* C, + rocblas_operation op_A, + rocblas_operation op_B, + int stride_A, + int stride_B, + int stride_C, + int batch, + rocblas_gemm_algo algo) +#else +int cublas_strided_batched_gemm(cublasHandle_t handle, + int m, + int n, + int k, + const float* alpha, + const float* beta, + const float* A, + const float* B, + float* C, + cublasOperation_t op_A, + cublasOperation_t op_B, + int stride_A, + int stride_B, + int stride_C, + int batch, + cublasGemmAlgo_t algo) +#endif +{ +#ifdef __HIP_PLATFORM_AMD__ + rocblas_status status = + rocblas_gemm_strided_batched_ex(handle, + op_A, + op_B, + m, + n, + k, + alpha, + A, + rocblas_datatype_f32_r, + (op_A == rocblas_operation_none) ? m : k, + stride_A, + B, + rocblas_datatype_f32_r, + (op_B == rocblas_operation_none) ? k : n, + stride_B, + beta, + C, + rocblas_datatype_f32_r, + m, + stride_C, + C, + rocblas_datatype_f32_r, + m, + stride_C, + batch, + rocblas_datatype_f32_r, + algo, + 0, + 0); +#else + cublasStatus_t status = cublasGemmStridedBatchedEx(handle, + op_A, + op_B, + m, + n, + k, + alpha, + A, + CUDA_R_32F, + (op_A == CUBLAS_OP_N) ? m : k, + stride_A, + B, + CUDA_R_32F, + (op_B == CUBLAS_OP_N) ? k : n, + stride_B, + beta, + C, + CUDA_R_32F, + m, + stride_C, + batch, + CUDA_R_32F, + algo); +#endif + +#ifdef __HIP_PLATFORM_AMD__ + if (status != rocblas_status_success) { +#else + if (status != CUBLAS_STATUS_SUCCESS) { +#endif + fprintf(stderr, + "!!!! kernel execution error. (batch: %d, m: %d, n: %d, k: %d, error: %d) \n", + batch, + m, + n, + k, + (int)status); + return EXIT_FAILURE; + } + return 0; +} + +template +#ifdef __HIP_PLATFORM_AMD__ +int cublas_strided_batched_gemm(rocblas_handle handle, + int m, + int n, + int k, + const float* alpha, + const float* beta, + const T* A, + const T* B, + T* C, + rocblas_operation op_A, + rocblas_operation op_B, + int stride_A, + int stride_B, + int stride_C, + int batch, + rocblas_gemm_algo algo) +#else +int cublas_strided_batched_gemm(cublasHandle_t handle, + int m, + int n, + int k, + const float* alpha, + const float* beta, + const T* A, + const T* B, + T* C, + cublasOperation_t op_A, + cublasOperation_t op_B, + int stride_A, + int stride_B, + int stride_C, + int batch, + cublasGemmAlgo_t algo) +#endif +{ +#ifdef __HIP_PLATFORM_AMD__ + constexpr auto rocblas_dtype_16 = std::is_same::value ? rocblas_datatype_f16_r + : rocblas_datatype_bf16_r; + rocblas_status status = + rocblas_gemm_strided_batched_ex(handle, + op_A, + op_B, + m, + n, + k, + alpha, + A, + rocblas_dtype_16, + (op_A == rocblas_operation_none) ? m : k, + stride_A, + B, + rocblas_dtype_16, + (op_B == rocblas_operation_none) ? k : n, + stride_B, + beta, + C, + rocblas_dtype_16, + m, + stride_C, + C, + rocblas_dtype_16, + m, + stride_C, + batch, + rocblas_datatype_f32_r, + algo, + 0, + 0); +#else + constexpr auto cublas_dtype_16 = std::is_same::value ? CUDA_R_16F : CUDA_R_16BF; + cublasStatus_t status = cublasGemmStridedBatchedEx(handle, + op_A, + op_B, + m, + n, + k, + alpha, + A, + cublas_dtype_16, + (op_A == CUBLAS_OP_N) ? m : k, + stride_A, + B, + cublas_dtype_16, + (op_B == CUBLAS_OP_N) ? k : n, + stride_B, + beta, + C, + cublas_dtype_16, + m, + stride_C, + batch, + CUDA_R_32F, + algo); +#endif + +#ifdef __HIP_PLATFORM_AMD__ + if (status != rocblas_status_success) { +#else + if (status != CUBLAS_STATUS_SUCCESS) { +#endif + fprintf(stderr, + "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", + m, + n, + k, + (int)status); + return EXIT_FAILURE; + } + + return 0; +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/includes/inference_cuda_layers.h b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/includes/inference_cuda_layers.h new file mode 100644 index 0000000000000000000000000000000000000000..dcc020483687f751f5997863307504449743fa35 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/includes/inference_cuda_layers.h @@ -0,0 +1,248 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include "ds_kernel_utils.h" + +#include +#ifdef BF16_AVAILABLE +#include +#endif +#include +#include +#include +#include +#include + +#define MAX_WARP_NUM 32 +#define WARP_SIZE 32 + +#define MAX_THREADS 1024 +#define SMs 80 + +#define MAX_REGISTERS 256 + +template +void launch_attn_softmax_v2(T* vals, + T* mask, + T* alibi, + float layer_scale, + bool triangular, + bool recompute, + bool local_attention, + int window_size, + int batch_size, + int heads, + int num_seq, + int sequence_length, + int offset, + int mask_stride, + int mp_size, + cudaStream_t stream); + +// Fused bias add with gelu activation +template +void launch_bias_gelu(T* input, + const T* bias, + int intermediate_size, + int batch_size, + cudaStream_t stream); + +template +void launch_gated_activation(T* output, + const T* activation, + const T* bias, + int rows, + int output_stride, + int elems_per_row, + bool use_gelu, + cudaStream_t stream); + +// Fused bias add with relu activation +template +void launch_bias_relu(T* input, + const T* bias, + int intermediate_size, + int batch_size, + cudaStream_t stream); + +template +void launch_bias_add(T* input, const T* bias, int hidden_size, int batch_size, cudaStream_t stream); + +template +void launch_bias_residual(T* input, + T* output, + T* attn, + T* bias, + T* attn_bias, + int batch, + int hidden_dim, + int mp_size, + bool preln, + cudaStream_t stream); + +template +void launch_fused_ln(T* output, + const T* vals, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream); + +template +void launch_fused_residual_ln(T* output, + const T* vals, + const T* residual, + const T* bias, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream); + +template +void launch_fused_residual_ln_store_pre_ln_res(T* norm_output, + T* res_output, + const T* vals, + const T* residual, + const T* bias, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream); + +template +void launch_rms_norm(T* norm_output, + T* res_output, + const T* vals, + const T* residual, + const T* gamma, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream); + +template +void launch_dequantize(T* output, + const int8_t* input, + const float* qscale, + unsigned output_size, + unsigned hidden_dim, + unsigned groups, + unsigned merge_count, + cudaStream_t stream); + +template +void launch_dequantize(T* output, + const int8_t* input, + const float* qscale, + unsigned output_size, + unsigned hidden_dim, + unsigned groups, + cudaStream_t stream); +template +void launch_gptj_residual_add(T* input, + T* output, + T* attn, + T* bias, + T* attn_bias, + int batch, + int head_size, + int mp_size, + cudaStream_t stream); + +template +void launch_apply_rotary_pos_emb(T* mixed_query, + T* key_layer, + unsigned head_size, + unsigned seq_len, + unsigned rotary_dim, + unsigned offset, + unsigned num_heads, + unsigned batch, + float rope_theta, + cudaStream_t stream, + int max_out_tokens); + +template +void launch_moe_res_matmul(T* residual, + T* coef, + T* mlp_out, + int seq_len, + int hidden_dim, + cudaStream_t stream); + +// 4D transform [0, 1, 2, 3] -> [0, 2, 1, 3] +template +void launch_transform4d_0213(T* out, + const T* in, + int batch_size, + int heads, + int seq_length, + int hidden_dim, + cudaStream_t stream, + int trans_count); +template +void launch_bias_add_transform_0213(T* outputs, + T* vals, + T* vals1, + const T* vals2, + const T* bias, + int batch_size, + int seq_length, + unsigned seq_offset, + int seq_length1, + int hidden_dim, + int heads, + int num_kv, + int rotary_dim, + bool rotate_half, + bool rotate_every_two, + cudaStream_t stream, + int trans_count, + int max_out_tokens, + float rope_theta); +template +void pad_data(T* padded_output, + T* output, + int bsz, + int head_size, + int padded_head_size, + cudaStream_t stream); + +template +void pad_head_seq(T* padded_output, + T* output, + int bsz, + int seq_len, + int padded_seq_len, + int head_size, + int padded_head_size, + cudaStream_t stream); + +template +void launch_pad_add_transform_0213(T* output, + const T* vals, + int batch_size, + int hidden_dim, + int seq_length, + int padded_seq_len, + int heads, + int padded_head_size, + cudaStream_t stream); + +template +void launch_vector_add(T* out, + const T* a, + const T* b, + float gamma, + int num_elems, + cudaStream_t stream); diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/normalize_kernels.cu b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/normalize_kernels.cu new file mode 100644 index 0000000000000000000000000000000000000000..8b9dbaea6c86d98ed7e431073684b89b8d1499ee --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/normalize_kernels.cu @@ -0,0 +1,2134 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "custom_cuda_layers.h" + +namespace cg = cooperative_groups; + +/* +Fused bias add, residual (elementwise) add, and normalization layer. + +For FP16, this kernel does not promote to FP32 in order to utilize the 2x throughput for +__half2 instructions, and avoid the conversion overhead (1/8 of __hal2 arithmetic). + +For specific launch constraints, see the launch functions. +*/ + +#define NORM_REG (MAX_REGISTERS / 4) + +__global__ void fused_bias_residual_layer_norm(float* vals, + const float* residual, + const float* gamma, + const float* beta, + float epsilon, + bool preLayerNorm, + bool training, + float* vars, + float* means, + int row_stride) +{ + int iteration_stride = blockDim.x; + int iterations = row_stride / iteration_stride; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int row = blockIdx.x; + int id = threadIdx.x; + int gid = id / WARP_SIZE; + + float vals_arr[NORM_REG]; + __shared__ float shr[MAX_WARP_NUM]; + + residual += (row * row_stride); + vals += (row * row_stride); + + float sum = 0.f; + int high_index = iterations * iteration_stride + id; +#pragma unroll + for (int i = 0; i < iterations; i++) { + vals_arr[i] = residual[i * iteration_stride + id]; + sum += vals_arr[i]; + } + if (high_index < row_stride) { + vals_arr[iterations] = residual[high_index]; + sum += vals_arr[iterations]; + iterations++; + } + + for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) shr[gid] = sum; + + b.sync(); + + if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()]; + +#if !defined(__STOCHASTIC_MODE__) || __CUDA_ARCH__ < 700 + b.sync(); +#endif + + for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { + sum += g.shfl_down(sum, i); + } + + sum = g.shfl(sum, 0); + float mean = sum / row_stride; + if (training) + if (threadIdx.x == 0) means[row] = mean; + float variance = 0.f; + for (int i = 0; i < iterations; i++) { + vals_arr[i] -= mean; + variance += vals_arr[i] * vals_arr[i]; + } + + for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); } + + if (g.thread_rank() == 0) shr[gid] = variance; + + b.sync(); + + if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + b.sync(); +#endif + + for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { + variance += g.shfl_down(variance, i); + } + variance = g.shfl(variance, 0); + variance /= row_stride; + variance += epsilon; + if (training) + if (threadIdx.x == 0) vars[row] = variance; + + iterations = row_stride / iteration_stride; + for (int i = 0; i < iterations; i++) { + vals_arr[i] = vals_arr[i] * rsqrtf(variance); + vals_arr[i] = + vals_arr[i] * gamma[i * iteration_stride + id] + beta[i * iteration_stride + id]; + vals[i * iteration_stride + id] = vals_arr[i]; + } + if ((high_index) < row_stride) { + vals_arr[iterations] = vals_arr[iterations] * rsqrtf(variance); + vals_arr[iterations] = vals_arr[iterations] * gamma[high_index] + beta[high_index]; + vals[high_index] = vals_arr[iterations]; + } +} + +__global__ void fused_bias_residual_layer_norm(__half* vals, + const __half* residual, + const __half* gamma, + const __half* beta, + float epsilon, + bool preLayerNorm, + bool training, + __half* vars, + __half* means, + int row_stride) +{ +#ifdef HALF_PRECISION_AVAILABLE + int iteration_stride = blockDim.x; + int iterations = row_stride / iteration_stride; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); + + int row = blockIdx.x; + int id = threadIdx.x; + int gid = id >> WARP_SIZE_BITS; + + float2 vals_f[NORM_REG]; + __shared__ float shr[MAX_WARP_NUM]; + + __half2* vals_cast = reinterpret_cast<__half2*>(vals); + const __half2* residual_cast = reinterpret_cast(residual); + + residual_cast += (row * row_stride); + vals_cast += (row * row_stride); + + float sum = 0.f; + int high_index = iterations * iteration_stride + id; +#pragma unroll + for (int i = 0; i < iterations; i++) { + vals_f[i] = __half22float2(residual_cast[i * iteration_stride + id]); + sum += vals_f[i].x; + sum += vals_f[i].y; + } + if ((high_index) < row_stride) { + vals_f[iterations] = __half22float2(residual_cast[high_index]); + sum += vals_f[iterations].x; + sum += vals_f[iterations].y; + iterations++; + } + + for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) shr[gid] = sum; + + b.sync(); + + if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + b.sync(); +#endif + + for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { + sum += g.shfl_down(sum, i); + } + sum = g.shfl(sum, 0); + float mean = sum / (row_stride * 2); + + float variance = 0.f; + for (int i = 0; i < iterations; i++) { + vals_f[i].x -= mean; + vals_f[i].y -= mean; + variance += vals_f[i].x * vals_f[i].x; + variance += vals_f[i].y * vals_f[i].y; + } + + for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); } + + if (g.thread_rank() == 0) shr[gid] = variance; + + b.sync(); + + if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + b.sync(); +#endif + + for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { + variance += g.shfl_down(variance, i); + } + variance = g.shfl(variance, 0); + variance /= (row_stride * 2); + variance += epsilon; + + __half2 variance_h = __float2half2_rn(variance); + const __half2* gamma_cast = reinterpret_cast(gamma); + const __half2* beta_cast = reinterpret_cast(beta); + + if (training && threadIdx.x == 0) { + vars[row] = __float2half(variance); + means[row] = __float2half(mean); + } + iterations = row_stride / iteration_stride; + for (int i = 0; i < iterations; i++) { + __half2 vals_arr = __float22half2_rn(vals_f[i]); + vals_arr = vals_arr * h2rsqrt(variance_h); + vals_arr = + vals_arr * gamma_cast[i * iteration_stride + id] + beta_cast[i * iteration_stride + id]; + vals_cast[i * iteration_stride + id] = vals_arr; + } + if ((high_index) < row_stride) { + __half2 vals_arr = __float22half2_rn(vals_f[iterations]); + vals_arr = vals_arr * h2rsqrt(variance_h); + vals_arr = vals_arr * gamma_cast[high_index] + beta_cast[high_index]; + vals_cast[high_index] = vals_arr; + } +#endif +} + +template +void launch_bias_residual_layer_norm(T* vals, + const T* residual, + const T* gamma, + const T* beta, + float epsilon, + int batch_size, + int hidden_dim, + cudaStream_t stream, + bool preLayerNorm, + bool training, + T* vars, + T* means); + +template <> +void launch_bias_residual_layer_norm(float* vals, + const float* residual, + const float* gamma, + const float* beta, + float epsilon, + int batch_size, + int hidden_dim, + cudaStream_t stream, + bool preLayerNorm, + bool training, + float* vars, + float* means) +{ + int threads = THREADS; + + dim3 grid_dim(batch_size); + + if (hidden_dim > 16384 && hidden_dim <= 32768) + threads <<= 1; + else if (hidden_dim > 32768 && hidden_dim <= 65536) + threads <<= 2; + else if (hidden_dim > 65536) + throw std::runtime_error("Unsupport hidden_dim."); + + dim3 block_dim(threads); + + fused_bias_residual_layer_norm<<>>( + vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, means, hidden_dim); +} + +template <> +void launch_bias_residual_layer_norm<__half>(__half* vals, + const __half* residual, + const __half* gamma, + const __half* beta, + float epsilon, + int batch_size, + int hidden_dim, + cudaStream_t stream, + bool preLayerNorm, + bool training, + __half* vars, + __half* means) +{ + int threads = 128; + + dim3 grid_dim(batch_size); + + if (hidden_dim > 8192 && hidden_dim <= 16384) + threads <<= 1; + else if (hidden_dim > 16384 && hidden_dim <= 32768) + threads <<= 2; + else if (hidden_dim > 32768 && hidden_dim <= 65536) + threads <<= 3; + else if (hidden_dim > 65536) + throw std::runtime_error("Unsupport hidden_dim."); + + dim3 block_dim(threads); + + fused_bias_residual_layer_norm<<>>( + vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, means, hidden_dim / 2); +} + +__global__ void fused_bias_residual_layer_norm(float* vals, + const float* residual, + const float* gamma, + const float* beta, + float epsilon, + bool preLayerNorm, + bool training, + float* vars, + int row_stride) +{ + int iteration_stride = blockDim.x; + int iterations = row_stride / iteration_stride; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); + + int row = blockIdx.x; + int id = threadIdx.x; + int gid = id / 32; + + float vals_arr[NORM_REG]; + __shared__ float shr[MAX_WARP_NUM]; + + residual += (row * row_stride); + vals += (row * row_stride); + + float sum = 0.f; + int high_index = iterations * iteration_stride + id; +#pragma unroll + for (int i = 0; i < iterations; i++) { + vals_arr[i] = residual[i * iteration_stride + id]; + sum += vals_arr[i]; + } + if ((high_index) < row_stride) { + vals_arr[iterations] = residual[high_index]; + sum += vals_arr[iterations]; + iterations++; + } + + for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) shr[gid] = sum; + + b.sync(); + + if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()]; + +#if !defined(__STOCHASTIC_MODE__) || __CUDA_ARCH__ < 700 + b.sync(); +#endif + + for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { + sum += g.shfl_down(sum, i); + } + + sum = g.shfl(sum, 0); + float mean = sum / row_stride; + float variance = 0.f; + for (int i = 0; i < iterations; i++) { + vals_arr[i] -= mean; + variance += vals_arr[i] * vals_arr[i]; + } + + for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); } + + if (g.thread_rank() == 0) shr[gid] = variance; + + b.sync(); + + if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + b.sync(); +#endif + + for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { + variance += g.shfl_down(variance, i); + } + variance = g.shfl(variance, 0); + variance /= row_stride; + variance += epsilon; + if (training) + if (threadIdx.x == 0) vars[row] = variance; + + iterations = row_stride / iteration_stride; + for (int i = 0; i < iterations; i++) { + vals_arr[i] = vals_arr[i] * rsqrtf(variance); + vals_arr[i] = + vals_arr[i] * gamma[i * iteration_stride + id] + beta[i * iteration_stride + id]; + vals[i * iteration_stride + id] = vals_arr[i]; + } + if ((high_index) < row_stride) { + vals_arr[iterations] = vals_arr[iterations] * rsqrtf(variance); + vals_arr[iterations] = vals_arr[iterations] * gamma[high_index] + beta[high_index]; + vals[high_index] = vals_arr[iterations]; + } +} + +__global__ void fused_bias_residual_layer_norm(__half* vals, + const __half* residual, + const __half* gamma, + const __half* beta, + float epsilon, + bool preLayerNorm, + bool training, + __half* vars, + int row_stride) +{ +#ifdef HALF_PRECISION_AVAILABLE + + int iteration_stride = blockDim.x; + int iterations = row_stride / iteration_stride; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); + + int row = blockIdx.x; + int id = threadIdx.x; + int gid = id >> WARP_SIZE_BITS; + + float2 vals_f[NORM_REG]; + __shared__ float shr[MAX_WARP_NUM]; + + __half2* vals_cast = reinterpret_cast<__half2*>(vals); + const __half2* residual_cast = reinterpret_cast(residual); + + residual_cast += (row * row_stride); + vals_cast += (row * row_stride); + + float sum = 0.f; + int high_index = iterations * iteration_stride + id; +#pragma unroll + for (int i = 0; i < iterations; i++) { + vals_f[i] = __half22float2(residual_cast[i * iteration_stride + id]); + sum += vals_f[i].x; + sum += vals_f[i].y; + } + if ((high_index) < row_stride) { + vals_f[iterations] = __half22float2(residual_cast[high_index]); + sum += vals_f[iterations].x; + sum += vals_f[iterations].y; + iterations++; + } + + for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) shr[gid] = sum; + + b.sync(); + + if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + b.sync(); +#endif + + for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { + sum += g.shfl_down(sum, i); + } + sum = g.shfl(sum, 0); + float mean = sum / (row_stride * 2); + + float variance = 0.f; + for (int i = 0; i < iterations; i++) { + vals_f[i].x -= mean; + vals_f[i].y -= mean; + variance += vals_f[i].x * vals_f[i].x; + variance += vals_f[i].y * vals_f[i].y; + } + + for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); } + + if (g.thread_rank() == 0) shr[gid] = variance; + + b.sync(); + + if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + b.sync(); +#endif + + for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { + variance += g.shfl_down(variance, i); + } + variance = g.shfl(variance, 0); + variance /= (row_stride * 2); + variance += epsilon; + + __half2 variance_h = __float2half2_rn(variance); + const __half2* gamma_cast = reinterpret_cast(gamma); + const __half2* beta_cast = reinterpret_cast(beta); + + if (training && threadIdx.x == 0) vars[row] = __float2half(variance); + + iterations = row_stride / iteration_stride; + for (int i = 0; i < iterations; i++) { + __half2 vals_arr = __float22half2_rn(vals_f[i]); + vals_arr = vals_arr * h2rsqrt(variance_h); + vals_arr = + vals_arr * gamma_cast[i * iteration_stride + id] + beta_cast[i * iteration_stride + id]; + vals_cast[i * iteration_stride + id] = vals_arr; + } + if ((high_index) < row_stride) { + __half2 vals_arr = __float22half2_rn(vals_f[iterations]); + vals_arr = vals_arr * h2rsqrt(variance_h); + vals_arr = vals_arr * gamma_cast[high_index] + beta_cast[high_index]; + vals_cast[high_index] = vals_arr; + } +#endif +} + +template +void launch_bias_residual_layer_norm(T* vals, + const T* residual, + const T* gamma, + const T* beta, + float epsilon, + int batch_size, + int hidden_dim, + cudaStream_t stream, + bool preLayerNorm, + bool training, + T* vars); + +/* +To tune this launch the following restrictions must be met: + +For float: +row_stride == hidden_size +threads * iterations == row_stride +threads is in [32, 64, 128, 256, 512, 1024] + +For half: +row_stride == hidden_size / 2 +threads * iterations == row_stride +threads is in [32, 64, 128, 256, 512, 1024] + +*/ + +template <> +void launch_bias_residual_layer_norm(float* vals, + const float* residual, + const float* gamma, + const float* beta, + float epsilon, + int batch_size, + int hidden_dim, + cudaStream_t stream, + bool preLayerNorm, + bool training, + float* vars) +{ + int threads = THREADS; + + dim3 grid_dim(batch_size); + + // There are some limitations to call below functions, now just enumerate the situations. + + if (hidden_dim > 16384 && hidden_dim <= 32768) + threads <<= 1; + else if (hidden_dim > 32768 && hidden_dim <= 65536) + threads <<= 2; + else if (hidden_dim > 65536) + throw std::runtime_error("Unsupport hidden_dim."); + + dim3 block_dim(threads); + + fused_bias_residual_layer_norm<<>>( + vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, hidden_dim); +} + +template <> +void launch_bias_residual_layer_norm<__half>(__half* vals, + const __half* residual, + const __half* gamma, + const __half* beta, + float epsilon, + int batch_size, + int hidden_dim, + cudaStream_t stream, + bool preLayerNorm, + bool training, + __half* vars) +{ + int threads = 128; + + dim3 grid_dim(batch_size); + + // There are some limitations to call below functions, now just enumerate the situations. + + if (hidden_dim > 8192 && hidden_dim <= 16384) + threads <<= 1; + else if (hidden_dim > 16384 && hidden_dim <= 32768) + threads <<= 2; + else if (hidden_dim > 32768 && hidden_dim <= 65536) + threads <<= 3; + else if (hidden_dim > 65536) + throw std::runtime_error("Unsupport hidden_dim."); + + dim3 block_dim(threads); + fused_bias_residual_layer_norm<<>>( + vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, hidden_dim / 2); +} + +/* Normalize Gamma & Betta gradients + * Compute gradients using either X_hat or + * normalize input (invertible). + * Combine transpose with gradients computation. + */ + +template +__global__ void LayerNormBackward1(const T* __restrict__ out_grad, + const T* __restrict__ vals_hat, + const T* __restrict__ gamma, + const T* __restrict__ betta, + T* __restrict__ gamma_grad, + T* __restrict__ betta_grad, + int rows, + int width, + bool invertible) +{ + __shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1]; + __shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1]; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int idx = blockDim.x * blockIdx.x + threadIdx.x; + int offset = threadIdx.y * width + idx; + int y_stride = width * TILE_DIM; + + float betta_reg = (invertible ? (float)betta[idx] : 0.0f); + float gamma_reg = (float)gamma[idx]; + + // Loop across matrix height + float betta_tmp = 0; + float gamma_tmp = 0; + for (int r = threadIdx.y; r < rows; r += TILE_DIM) { + float grad = (float)out_grad[offset]; + float val = (invertible ? ((float)vals_hat[offset] - betta_reg) / gamma_reg + : (float)vals_hat[offset]); + betta_tmp += grad; + gamma_tmp += (val * grad); + + offset += y_stride; + } + + betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp; + gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp; + + __syncthreads(); + + // Sum the shared buffer. + float s1 = betta_buffer[threadIdx.y][threadIdx.x]; + float s2 = gamma_buffer[threadIdx.y][threadIdx.x]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < TILE_DIM; i <<= 1) { + s1 += g.shfl_down(s1, i); + s2 += g.shfl_down(s2, i); + } + + if (threadIdx.x == 0) { + int pos = blockIdx.x * TILE_DIM + threadIdx.y; + betta_grad[pos] = s1; + gamma_grad[pos] = s2; + } +} + +/* Normalize Gamma & Betta gradients + * Compute gradients using the input to + * the normalize. + * Combine transpose with gradients computation. + */ + +template +__global__ void LayerNormBackward1(const T* __restrict__ out_grad, + const T* __restrict__ X_data, + const T* __restrict__ vars, + const T* __restrict__ means, + T* __restrict__ gamma_grad, + T* __restrict__ betta_grad, + int rows, + int width) +{ + __shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1]; + __shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1]; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int idx = blockDim.x * blockIdx.x + threadIdx.x; + int offset = threadIdx.y * width + idx; + int y_stride = width * TILE_DIM; + + int pos = blockIdx.x * TILE_DIM + threadIdx.y; + // Loop across matrix height + + float betta_tmp = 0; + float gamma_tmp = 0; + for (int r = threadIdx.y; r < rows; r += TILE_DIM) { + float grad = (float)out_grad[offset]; + float val = (float)X_data[offset]; + val = (val - (float)means[r]) * rsqrtf((float)vars[r]); + betta_tmp += grad; + gamma_tmp += (val * grad); + + offset += y_stride; + } + + betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp; + gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp; + + __syncthreads(); + + // Sum the shared buffer. + float s1 = betta_buffer[threadIdx.y][threadIdx.x]; + float s2 = gamma_buffer[threadIdx.y][threadIdx.x]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < TILE_DIM; i <<= 1) { + s1 += g.shfl_down(s1, i); + s2 += g.shfl_down(s2, i); + } + + if (threadIdx.x == 0) { + betta_grad[pos] = s1; + gamma_grad[pos] = s2; + } +} +/* + +/* Backward Normalize (Input-Gradient) + * Using the means and variances from the input + * This type of backward is invertible! + * We do the backward using the X_hat (X - u) / sqrt(variance) or the output of Normalization. + */ + +__global__ void LayerNormBackward2(const float* out_grad, + const float* vals_hat, + const float* gamma, + const float* betta, + const float* vars, + float* inp_grad, + bool invertible, + int row_stride) +{ + int iteration_stride = blockDim.x; + int iterations = row_stride / iteration_stride; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int row = blockIdx.x; + int id = threadIdx.x; + int wid = id / WARP_SIZE; + int warp_num = iteration_stride >> WARP_SIZE_BITS; + __shared__ float partialSum[MAX_WARP_NUM]; + + out_grad += (row * row_stride); + vals_hat += (row * row_stride); + inp_grad += (row * row_stride); + + float vals_arr[NORM_REG]; + float vals_hat_arr[NORM_REG]; + int high_index = iterations * iteration_stride + id; +#pragma unroll + for (int i = 0; i < iterations; i++) { + float gamma_reg = gamma[i * iteration_stride + id]; + vals_arr[i] = out_grad[i * iteration_stride + id]; + vals_arr[i] *= gamma_reg; + vals_hat_arr[i] = + (invertible ? (vals_hat[i * iteration_stride + id] - betta[i * iteration_stride + id]) / + gamma_reg + : vals_hat[i * iteration_stride + id]); + } + if ((high_index) < row_stride) { + float gamma_reg = gamma[high_index]; + vals_arr[iterations] = out_grad[high_index]; + vals_arr[iterations] *= gamma_reg; + vals_hat_arr[iterations] = + (invertible ? (vals_hat[high_index] - betta[high_index]) / gamma_reg + : vals_hat[high_index]); + iterations++; + } + + float var_reg = vars[row]; + + float sum = 0; + for (int i = 0; i < iterations; i++) { + sum += vals_hat_arr[i] * vals_arr[i] * + sqrtf(var_reg); // dval_hat = gamma * (x - u) * out_grad + vals_arr[i] *= rsqrtf(var_reg); // dvar_inv = gamma * out_grad / sqrt(var) + } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + + sum = g.shfl(sum, 0); + sum /= row_stride; + + for (int i = 0; i < iterations; i++) { vals_arr[i] += ((-sum * vals_hat_arr[i]) / var_reg); } + + sum = 0; + for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + sum = g.shfl(sum, 0); + sum /= row_stride; + + iterations = row_stride / iteration_stride; + for (int i = 0; i < iterations; i++) inp_grad[i * iteration_stride + id] = (vals_arr[i] - sum); + if ((high_index) < row_stride) inp_grad[high_index] = (vals_arr[iterations] - sum); +} + +__global__ void LayerNormBackward2(const __half* out_grad, + const __half* vals_hat, + const __half* gamma, + const __half* betta, + const __half* vars, + __half* inp_grad, + bool invertible, + int row_stride) +{ +#ifdef HALF_PRECISION_AVAILABLE + int iteration_stride = blockDim.x; + int iterations = row_stride / iteration_stride; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int row = blockIdx.x; + int id = threadIdx.x; + int wid = id / WARP_SIZE; + int warp_num = iteration_stride >> WARP_SIZE_BITS; + __shared__ float partialSum[MAX_WARP_NUM]; + + __half2 vals_arr[NORM_REG]; + float2 vals_arr_f[NORM_REG]; + __half2 vals_hat_arr[NORM_REG]; + + __half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad); + const __half2* out_grad_h = reinterpret_cast(out_grad); + const __half2* vals_hat_h = reinterpret_cast(vals_hat); + + inp_grad_h += (row * row_stride); + out_grad_h += (row * row_stride); + vals_hat_h += (row * row_stride); + + const __half2* gamma_h = reinterpret_cast(gamma); + const __half2* betta_h = (invertible ? reinterpret_cast(betta) : nullptr); + int high_index = iterations * iteration_stride + id; +#pragma unroll + for (int i = 0; i < iterations; i++) { + __half2 gamma_reg = gamma_h[i * iteration_stride + id]; + vals_arr[i] = out_grad_h[i * iteration_stride + id]; + vals_arr[i] *= gamma_reg; + vals_hat_arr[i] = + (invertible + ? (vals_hat_h[i * iteration_stride + id] - betta_h[i * iteration_stride + id]) / + gamma_reg + : vals_hat_h[i * iteration_stride + id]); + } + if ((high_index) < row_stride) { + __half2 gamma_reg = gamma_h[high_index]; + vals_arr[iterations] = out_grad_h[high_index]; + vals_arr[iterations] *= gamma_reg; + vals_hat_arr[iterations] = + (invertible ? (vals_hat_h[high_index] - betta_h[high_index]) / gamma_reg + : vals_hat_h[high_index]); + iterations++; + } + __half var_h = vars[row]; + __half2 var_reg = __halves2half2(var_h, var_h); + + float sum = 0.f; + for (int i = 0; i < iterations; i++) { + __half2 result_h = (vals_hat_arr[i] * vals_arr[i] * h2sqrt(var_reg)); + float2 result_f = __half22float2(result_h); + sum += result_f.x; + sum += result_f.y; + vals_arr[i] *= h2rsqrt(var_reg); + } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + + sum = g.shfl(sum, 0); + sum /= (2 * row_stride); + __half2 sum_h = __float2half2_rn(sum); + + for (int i = 0; i < iterations; i++) { + __half2 temp = ((-sum_h * vals_hat_arr[i]) / (var_reg)); + vals_arr_f[i] = __half22float2(vals_arr[i]); + float2 temp_f = __half22float2(temp); + vals_arr_f[i].x += temp_f.x; + vals_arr_f[i].y += temp_f.y; + } + sum = 0.f; + + for (int i = 0; i < iterations; i++) { + sum += (vals_arr_f[i].x); + sum += (vals_arr_f[i].y); + } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + + sum = g.shfl(sum, 0); + sum /= (2 * row_stride); + + iterations = row_stride / iteration_stride; + for (int i = 0; i < iterations; i++) { + vals_arr_f[i].x -= sum; + vals_arr_f[i].y -= sum; + __half2 temp = __float22half2_rn(vals_arr_f[i]); + + inp_grad_h[i * iteration_stride + id] = temp; + } + if ((high_index) < row_stride) { + vals_arr_f[iterations].x -= sum; + vals_arr_f[iterations].y -= sum; + __half2 temp = __float22half2_rn(vals_arr_f[iterations]); + + inp_grad_h[high_index] = temp; + } +#endif +} + +template <> +void launch_layerNorm_backward(const float* out_grad, + const float* vals_hat, + const float* vars, + const float* gamma, + float* gamma_grad, + float* betta_grad, + float* inp_grad, + int batch, + int hidden_dim, + cudaStream_t stream[2], + bool invertible, + const float* betta) +{ + int threads = THREADS; + + dim3 grid_dim(hidden_dim / TILE_DIM); + dim3 block_dim(TILE_DIM, TILE_DIM); + + LayerNormBackward1<<>>( + out_grad, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible); + + dim3 grid_dim2(batch); + + if (hidden_dim > 16384 && hidden_dim <= 32768) + threads <<= 1; + else if (hidden_dim > 32768 && hidden_dim <= 65536) + threads <<= 2; + else if (hidden_dim > 65536) + throw std::runtime_error("Unsupport hidden_dim."); + + dim3 block_dim2(threads); + + LayerNormBackward2<<>>( + out_grad, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim); +} + +template <> +void launch_layerNorm_backward<__half>(const __half* out_grad, + const __half* vals_hat, + const __half* vars, + const __half* gamma, + __half* gamma_grad, + __half* betta_grad, + __half* inp_grad, + int batch, + int hidden_dim, + cudaStream_t stream[2], + bool invertible, + const __half* betta) +{ + int threads = THREADS; + + dim3 grid_dim(hidden_dim / TILE_DIM); + dim3 block_dim(TILE_DIM, TILE_DIM); + + // LayerNormBackward1<__half><<>>( + // out_grad, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible); + + dim3 grid_dim2(batch); + + if (hidden_dim > 8192 && hidden_dim <= 16384) + threads <<= 1; + else if (hidden_dim > 16384 && hidden_dim <= 32768) + threads <<= 2; + else if (hidden_dim > 32768 && hidden_dim <= 65536) + threads <<= 3; + else if (hidden_dim > 65536) + throw std::runtime_error("Unsupport hidden_dim."); + + dim3 block_dim2(threads / 2); + + LayerNormBackward2<<>>( + out_grad, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim / 2); +} + +/* Backward Normalize (Input-Gradient) + * Using the means and variances from the input + * This type of backward is not invertible! + * We do the backward using the input (X) + */ + +__global__ void LayerNormBackward2(const float* out_grad, + const float* X_vals, + const float* gamma, + const float* vars, + const float* means, + float* inp_grad, + int row_stride) +{ + int iteration_stride = blockDim.x; + int iterations = row_stride / iteration_stride; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int row = blockIdx.x; + int id = threadIdx.x; + int wid = id >> WARP_SIZE_BITS; + int warp_num = iteration_stride >> WARP_SIZE_BITS; + __shared__ float partialSum[MAX_WARP_NUM]; + + out_grad += (row * row_stride); + X_vals += (row * row_stride); + inp_grad += (row * row_stride); + + float vals_arr[NORM_REG]; + int high_index = iterations * iteration_stride + id; +#pragma unroll + for (int i = 0; i < iterations; i++) { + float gamma_reg = gamma[i * iteration_stride + id]; + vals_arr[i] = out_grad[i * iteration_stride + id]; + vals_arr[i] *= gamma_reg; + } + if ((high_index) < row_stride) { + float gamma_reg = gamma[high_index]; + vals_arr[iterations] = out_grad[high_index]; + vals_arr[iterations] *= gamma_reg; + iterations++; + } + + float var_reg = vars[row]; + float mean_reg = means[row]; + + float sum = 0; + float xu[NORM_REG]; + for (int i = 0; i < iterations; i++) { + xu[i] = (X_vals[i * iteration_stride + id] - mean_reg); + sum += vals_arr[i] * xu[i]; + vals_arr[i] *= rsqrtf(var_reg); + } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + + sum = g.shfl(sum, 0); + sum /= row_stride; + + for (int i = 0; i < iterations; i++) { + vals_arr[i] += (-sum * xu[i] * rsqrtf(var_reg) / (var_reg)); + } + + sum = 0; + for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + sum = g.shfl(sum, 0); + sum /= row_stride; + + iterations = row_stride / iteration_stride; + for (int i = 0; i < iterations; i++) inp_grad[i * iteration_stride + id] = (vals_arr[i] - sum); + if ((high_index) < row_stride) inp_grad[high_index] = (vals_arr[iterations] - sum); +} + +__global__ void LayerNormBackward2(const __half* out_grad, + const __half* X_vals, + const __half* gamma, + const __half* vars, + const __half* means, + __half* inp_grad, + int row_stride) +{ +#ifdef HALF_PRECISION_AVAILABLE + int iteration_stride = blockDim.x; + int iterations = row_stride / iteration_stride; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int row = blockIdx.x; + int id = threadIdx.x; + int wid = id >> WARP_SIZE_BITS; + int warp_num = iteration_stride >> WARP_SIZE_BITS; + + __shared__ float partialSum[MAX_WARP_NUM]; + + __half2 vals_arr[NORM_REG]; + float2 vals_arr_f[NORM_REG]; + __half2 xu[NORM_REG]; + + __half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad); + const __half2* out_grad_h = reinterpret_cast(out_grad); + const __half2* vals_hat_h = reinterpret_cast(X_vals); + + inp_grad_h += (row * row_stride); + out_grad_h += (row * row_stride); + vals_hat_h += (row * row_stride); + + const __half2* gamma_h = reinterpret_cast(gamma); + int high_index = iterations * iteration_stride + id; + + __half mean_h = means[row]; + __half2 mean_reg = __halves2half2(mean_h, mean_h); +#pragma unroll + for (int i = 0; i < iterations; i++) { + __half2 gamma_reg = gamma_h[i * iteration_stride + id]; + vals_arr[i] = out_grad_h[i * iteration_stride + id]; + vals_arr[i] *= gamma_reg; // out_grad * gamma + xu[i] = (vals_hat_h[i * iteration_stride + id] - mean_reg); + } + if ((high_index) < row_stride) { + __half2 gamma_reg = gamma_h[high_index]; + vals_arr[iterations] = out_grad_h[high_index]; + vals_arr[iterations] *= gamma_reg; // out_grad * gamma + xu[iterations] = (vals_hat_h[high_index] - mean_reg); + iterations++; + } + __half var_h = vars[row]; + __half2 var_reg = __halves2half2(var_h, var_h); + + float sum = 0.f; + for (int i = 0; i < iterations; i++) { + __half2 result_h = (xu[i] * vals_arr[i]); + float2 result_f = __half22float2(result_h); + sum += result_f.x; + sum += result_f.y; + vals_arr[i] *= h2rsqrt(var_reg); + } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + + sum = g.shfl(sum, 0); + sum /= (2 * row_stride); + __half2 sum_h = __float2half2_rn(sum); + + for (int i = 0; i < iterations; i++) { + __half2 xu_grad = ((-sum_h * xu[i] * h2rsqrt(var_reg)) / (var_reg)); + vals_arr_f[i] = __half22float2(vals_arr[i]); + float2 xu_grad_f = __half22float2(xu_grad); + vals_arr_f[i].x += xu_grad_f.x; + vals_arr_f[i].y += xu_grad_f.y; + } + + sum = 0.f; + for (int i = 0; i < iterations; i++) { + sum += (vals_arr_f[i].x); + sum += (vals_arr_f[i].y); + } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + + sum = g.shfl(sum, 0); + sum /= (2 * row_stride); + + iterations = row_stride / iteration_stride; + for (int i = 0; i < iterations; i++) { + vals_arr_f[i].x -= sum; + vals_arr_f[i].y -= sum; + __half2 temp = __float22half2_rn(vals_arr_f[i]); + inp_grad_h[i * iteration_stride + id] = temp; + } + if ((high_index) < row_stride) { + vals_arr_f[iterations].x -= sum; + vals_arr_f[iterations].y -= sum; + __half2 temp = __float22half2_rn(vals_arr_f[iterations]); + inp_grad_h[high_index] = temp; + } +#endif +} + +template <> +void launch_layerNorm_backward(const float* out_grad, + const float* X_data, + const float* vars, + const float* means, + const float* gamma, + float* gamma_grad, + float* betta_grad, + float* inp_grad, + int batch, + int hidden_dim, + cudaStream_t stream[2]) +{ + int threads = THREADS; + + dim3 grid_dim(hidden_dim / TILE_DIM); + dim3 block_dim(TILE_DIM, TILE_DIM); + + LayerNormBackward1<<>>( + out_grad, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim); + + dim3 grid_dim2(batch); + + if (hidden_dim > 16384 && hidden_dim <= 32768) + threads <<= 1; + else if (hidden_dim > 32768 && hidden_dim <= 65536) + threads <<= 2; + else if (hidden_dim > 65536) + throw std::runtime_error("Unsupport hidden_dim."); + + dim3 block_dim2(threads); + LayerNormBackward2<<>>( + out_grad, X_data, gamma, vars, means, inp_grad, hidden_dim); +} + +template <> +void launch_layerNorm_backward<__half>(const __half* out_grad, + const __half* X_data, + const __half* vars, + const __half* means, + const __half* gamma, + __half* gamma_grad, + __half* betta_grad, + __half* inp_grad, + int batch, + int hidden_dim, + cudaStream_t stream[2]) +{ + int threads = THREADS; + + dim3 grid_dim(hidden_dim / TILE_DIM); + dim3 block_dim(TILE_DIM, TILE_DIM); + + LayerNormBackward1<__half><<>>( + out_grad, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim); + + dim3 grid_dim2(batch); + + if (hidden_dim > 8192 && hidden_dim <= 16384) + threads <<= 1; + else if (hidden_dim > 16384 && hidden_dim <= 32768) + threads <<= 2; + else if (hidden_dim > 32768 && hidden_dim <= 65536) + threads <<= 3; + else if (hidden_dim > 65536) + throw std::runtime_error("Unsupport hidden_dim."); + + dim3 block_dim2(threads / 2); + LayerNormBackward2<<>>( + out_grad, X_data, gamma, vars, means, inp_grad, hidden_dim / 2); +} + +template +__global__ void LayerNormBackward1_fused_add(const T* __restrict__ out_grad1, + const T* __restrict__ out_grad2, + const T* __restrict__ vals_hat, + const T* __restrict__ gamma, + const T* __restrict__ betta, + T* __restrict__ gamma_grad, + T* __restrict__ betta_grad, + int rows, + int width, + bool invertible) +{ + __shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1]; + __shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1]; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int idx = blockDim.x * blockIdx.x + threadIdx.x; + int offset = threadIdx.y * width + idx; + int y_stride = width * TILE_DIM; + + float betta_reg = (invertible ? (float)betta[idx] : 0.0f); + float gamma_reg = (float)gamma[idx]; + + // Loop across matrix height + float betta_tmp = 0; + float gamma_tmp = 0; + for (int r = threadIdx.y; r < rows; r += TILE_DIM) { + float grad = (float)out_grad1[offset] + (float)out_grad2[offset]; + float val = (invertible ? ((float)vals_hat[offset] - betta_reg) / gamma_reg + : (float)vals_hat[offset]); + betta_tmp += grad; + gamma_tmp += (val * grad); + + offset += y_stride; + } + + betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp; + gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp; + + __syncthreads(); + + // Sum the shared buffer. + float s1 = betta_buffer[threadIdx.y][threadIdx.x]; + float s2 = gamma_buffer[threadIdx.y][threadIdx.x]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < TILE_DIM; i <<= 1) { + s1 += g.shfl_down(s1, i); + s2 += g.shfl_down(s2, i); + } + + if (threadIdx.x == 0) { + int pos = blockIdx.x * TILE_DIM + threadIdx.y; + betta_grad[pos] = s1; + gamma_grad[pos] = s2; + } +} + +template +__global__ void LayerNormBackward1_fused_add(const T* __restrict__ out_grad1, + const T* __restrict__ out_grad2, + const T* __restrict__ X_data, + const T* __restrict__ vars, + const T* __restrict__ means, + T* __restrict__ gamma_grad, + T* __restrict__ betta_grad, + int rows, + int width) +{ + __shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1]; + __shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1]; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int idx = blockDim.x * blockIdx.x + threadIdx.x; + int offset = threadIdx.y * width + idx; + int y_stride = width * TILE_DIM; + + int pos = blockIdx.x * TILE_DIM + threadIdx.y; + // Loop across matrix height + + float betta_tmp = 0; + float gamma_tmp = 0; + for (int r = threadIdx.y; r < rows; r += TILE_DIM) { + float grad = (float)out_grad1[offset] + (float)out_grad2[offset]; + float val = (float)X_data[offset]; + val = (val - (float)means[r]) * rsqrtf((float)vars[r]); + betta_tmp += grad; + gamma_tmp += (val * grad); + + offset += y_stride; + } + + betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp; + gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp; + + __syncthreads(); + + // Sum the shared buffer. + float s1 = betta_buffer[threadIdx.y][threadIdx.x]; + float s2 = gamma_buffer[threadIdx.y][threadIdx.x]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < TILE_DIM; i <<= 1) { + s1 += g.shfl_down(s1, i); + s2 += g.shfl_down(s2, i); + } + + if (threadIdx.x == 0) { + betta_grad[pos] = s1; + gamma_grad[pos] = s2; + } +} + +__global__ void LayerNormBackward2_fused_add(const float* out_grad1, + const float* out_grad2, + const float* vals_hat, + const float* gamma, + const float* betta, + const float* vars, + float* inp_grad, + bool invertible, + int row_stride) +{ + int iteration_stride = blockDim.x; + int iterations = row_stride / iteration_stride; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int row = blockIdx.x; + int id = threadIdx.x; + int wid = id / WARP_SIZE; + int warp_num = iteration_stride >> WARP_SIZE_BITS; + __shared__ float partialSum[MAX_WARP_NUM]; + + out_grad1 += (row * row_stride); + out_grad2 += (row * row_stride); + vals_hat += (row * row_stride); + inp_grad += (row * row_stride); + + float vals_arr[NORM_REG]; + float vals_hat_arr[NORM_REG]; + int high_index = iterations * iteration_stride + id; +#pragma unroll + for (int i = 0; i < iterations; i++) { + float gamma_reg = gamma[i * iteration_stride + id]; + vals_arr[i] = out_grad1[i * iteration_stride + id]; + vals_arr[i] *= gamma_reg; + vals_hat_arr[i] = + (invertible ? (vals_hat[i * iteration_stride + id] - betta[i * iteration_stride + id]) / + gamma_reg + : vals_hat[i * iteration_stride + id]); + } + if ((high_index) < row_stride) { + float gamma_reg = gamma[high_index]; + vals_arr[iterations] = out_grad1[high_index]; + vals_arr[iterations] *= gamma_reg; + vals_hat_arr[iterations] = + (invertible ? (vals_hat[high_index] - betta[high_index]) / gamma_reg + : vals_hat[high_index]); + iterations++; + } + + float var_reg = vars[row]; + + float sum = 0; + for (int i = 0; i < iterations; i++) { + sum += vals_hat_arr[i] * vals_arr[i] * sqrtf(var_reg); + vals_arr[i] *= rsqrtf(var_reg); + } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + + sum = g.shfl(sum, 0); + sum /= row_stride; + + for (int i = 0; i < iterations; i++) { vals_arr[i] += ((-sum * vals_hat_arr[i]) / var_reg); } + + sum = 0; + for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + sum = g.shfl(sum, 0); + sum /= row_stride; + + iterations = row_stride / iteration_stride; + for (int i = 0; i < iterations; i++) + inp_grad[i * iteration_stride + id] = + (vals_arr[i] - sum) + out_grad2[i * iteration_stride + id]; + if ((high_index) < row_stride) + inp_grad[high_index] = (vals_arr[iterations] - sum) + out_grad2[high_index]; +} + +__global__ void LayerNormBackward2_fused_add(const __half* out_grad1, + const __half* out_grad2, + const __half* vals_hat, + const __half* gamma, + const __half* betta, + const __half* vars, + __half* inp_grad, + bool invertible, + int row_stride) +{ +#ifdef HALF_PRECISION_AVAILABLE + int iteration_stride = blockDim.x; + int iterations = row_stride / iteration_stride; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int row = blockIdx.x; + int id = threadIdx.x; + int wid = id / WARP_SIZE; + int warp_num = iteration_stride >> WARP_SIZE_BITS; + __shared__ float partialSum[MAX_WARP_NUM]; + + __half2 vals_arr[NORM_REG]; + float2 vals_arr_f[NORM_REG]; + __half2 vals_hat_arr[NORM_REG]; + + // float2 result[iterations]; + + __half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad); + const __half2* out_grad_h1 = reinterpret_cast(out_grad1); + const __half2* out_grad_h2 = reinterpret_cast(out_grad2); + const __half2* vals_hat_h = reinterpret_cast(vals_hat); + + inp_grad_h += (row * row_stride); + out_grad_h1 += (row * row_stride); + out_grad_h2 += (row * row_stride); + vals_hat_h += (row * row_stride); + + const __half2* gamma_h = reinterpret_cast(gamma); + const __half2* betta_h = (invertible ? reinterpret_cast(betta) : nullptr); + int high_index = iterations * iteration_stride + id; +#pragma unroll + for (int i = 0; i < iterations; i++) { + __half2 gamma_reg = gamma_h[i * iteration_stride + id]; + vals_arr[i] = out_grad_h1[i * iteration_stride + id]; + vals_arr[i] *= gamma_reg; // out_grad * gamma + vals_hat_arr[i] = + (invertible + ? (vals_hat_h[i * iteration_stride + id] - betta_h[i * iteration_stride + id]) / + gamma_reg + : vals_hat_h[i * iteration_stride + id]); + } + if ((high_index) < row_stride) { + __half2 gamma_reg = gamma_h[high_index]; + vals_arr[iterations] = out_grad_h1[high_index]; + vals_arr[iterations] *= gamma_reg; // out_grad * gamma + vals_hat_arr[iterations] = + (invertible ? (vals_hat_h[high_index] - betta_h[high_index]) / gamma_reg + : vals_hat_h[high_index]); + iterations++; + } + __half var_h = vars[row]; + __half2 var_reg = __halves2half2(var_h, var_h); + + float sum = 0.f; + for (int i = 0; i < iterations; i++) { + __half2 result_h = (vals_hat_arr[i] * vals_arr[i] * h2sqrt(var_reg)); + float2 result_f = __half22float2(result_h); + sum += result_f.x; + sum += result_f.y; + vals_arr[i] *= h2rsqrt(var_reg); + } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + + sum = g.shfl(sum, 0); + sum /= (2 * row_stride); + __half2 sum_h = __float2half2_rn(sum); + + for (int i = 0; i < iterations; i++) { + __half2 temp = ((-sum_h * vals_hat_arr[i]) / (var_reg)); + vals_arr_f[i] = __half22float2(vals_arr[i]); + float2 temp_f = __half22float2(temp); + vals_arr_f[i].x += temp_f.x; + vals_arr_f[i].y += temp_f.y; + } + sum = 0.f; + for (int i = 0; i < iterations; i++) { + sum += (vals_arr_f[i].x); + sum += (vals_arr_f[i].y); + } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + + sum = g.shfl(sum, 0); + sum /= (2 * row_stride); + + iterations = row_stride / iteration_stride; + for (int i = 0; i < iterations; i++) { + vals_arr_f[i].x -= sum; + vals_arr_f[i].y -= sum; + __half2 temp = __float22half2_rn(vals_arr_f[i]); + + inp_grad_h[i * iteration_stride + id] = temp + out_grad_h2[i * iteration_stride + id]; + } + if ((high_index) < row_stride) { + vals_arr_f[iterations].x -= sum; + vals_arr_f[iterations].y -= sum; + __half2 temp = __float22half2_rn(vals_arr_f[iterations]); + + inp_grad_h[high_index] = temp + out_grad_h2[high_index]; + } +#endif +} + +template <> +void launch_layerNorm_backward_fused_add(const float* out_grad1, + const float* out_grad2, + const float* vals_hat, + const float* vars, + const float* gamma, + float* gamma_grad, + float* betta_grad, + float* inp_grad, + int batch, + int hidden_dim, + cudaStream_t stream[2], + bool invertible, + const float* betta) +{ + int threads = THREADS; + + dim3 grid_dim(hidden_dim / TILE_DIM); + dim3 block_dim(TILE_DIM, TILE_DIM); + LayerNormBackward1<<>>( + out_grad1, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible); + + dim3 grid_dim2(batch); + + if (hidden_dim > 16384 && hidden_dim <= 32768) + threads <<= 1; + else if (hidden_dim > 32768 && hidden_dim <= 65536) + threads <<= 2; + else if (hidden_dim > 65536) + throw std::runtime_error("Unsupport hidden_dim."); + + dim3 block_dim2(threads); + LayerNormBackward2_fused_add<<>>( + out_grad1, out_grad2, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim); +} + +template <> +void launch_layerNorm_backward_fused_add<__half>(const __half* out_grad1, + const __half* out_grad2, + const __half* vals_hat, + const __half* vars, + const __half* gamma, + __half* gamma_grad, + __half* betta_grad, + __half* inp_grad, + int batch, + int hidden_dim, + cudaStream_t stream[2], + bool invertible, + const __half* betta) +{ + int threads = THREADS; + + dim3 grid_dim(hidden_dim / TILE_DIM); + dim3 block_dim(TILE_DIM, TILE_DIM); + + LayerNormBackward1<__half><<>>( + out_grad1, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible); + + dim3 grid_dim2(batch); + + if (hidden_dim > 8192 && hidden_dim <= 16384) + threads <<= 1; + else if (hidden_dim > 16384 && hidden_dim <= 32768) + threads <<= 2; + else if (hidden_dim > 32768 && hidden_dim <= 65536) + threads <<= 3; + else if (hidden_dim > 65536) + throw std::runtime_error("Unsupport hidden_dim."); + + dim3 block_dim2(threads / 2); + LayerNormBackward2_fused_add<<>>( + out_grad1, out_grad2, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim / 2); +} + +/* Backward Normalize (Input-Gradient) + * Using the means and variances from the input + * This type of backward is not invertible! + * We do the backward using the input (X) + */ + +__global__ void LayerNormBackward2_fused_add(const float* out_grad1, + const float* out_grad2, + const float* X_vals, + const float* gamma, + const float* vars, + const float* means, + float* inp_grad, + int row_stride) +{ + int iteration_stride = blockDim.x; + int iterations = row_stride / iteration_stride; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int row = blockIdx.x; + int id = threadIdx.x; + int wid = id / WARP_SIZE; + int warp_num = iteration_stride >> WARP_SIZE_BITS; + __shared__ float partialSum[MAX_WARP_NUM]; + + float vals_arr[NORM_REG]; + float vals_hat_arr[NORM_REG]; + + out_grad1 += (row * row_stride); + out_grad2 += (row * row_stride); + X_vals += (row * row_stride); + inp_grad += (row * row_stride); + int high_index = iterations * iteration_stride + id; +#pragma unroll + for (int i = 0; i < iterations; i++) { + float gamma_reg = gamma[i * iteration_stride + id]; + vals_arr[i] = out_grad1[i * iteration_stride + id]; + vals_arr[i] *= gamma_reg; + vals_hat_arr[i] = X_vals[i * iteration_stride + id]; + } + if ((high_index) < row_stride) { + float gamma_reg = gamma[high_index]; + vals_arr[iterations] = out_grad1[high_index]; + vals_arr[iterations] *= gamma_reg; + vals_hat_arr[iterations] = X_vals[high_index]; + iterations++; + } + + float var_reg = vars[row]; + float mean_reg = means[row]; + + float sum = 0; + float xu[NORM_REG]; + for (int i = 0; i < iterations; i++) { + xu[i] = (vals_hat_arr[i] - mean_reg); + sum += vals_arr[i] * xu[i]; + vals_arr[i] *= rsqrtf(var_reg); + } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + + sum = g.shfl(sum, 0); + sum /= row_stride; + + for (int i = 0; i < iterations; i++) { + vals_arr[i] += (-sum * xu[i] * rsqrtf(var_reg) / (var_reg)); + } + + sum = 0; + for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + sum = g.shfl(sum, 0); + sum /= row_stride; + + iterations = row_stride / iteration_stride; + for (int i = 0; i < iterations; i++) + inp_grad[i * iteration_stride + id] = + (vals_arr[i] - sum) + out_grad2[i * iteration_stride + id]; + if ((high_index) < row_stride) + inp_grad[high_index] = (vals_arr[iterations] - sum) + out_grad2[high_index]; +} + +__global__ void LayerNormBackward2_fused_add(const __half* out_grad1, + const __half* out_grad2, + const __half* X_vals, + const __half* gamma, + const __half* vars, + const __half* means, + __half* inp_grad, + int row_stride) +{ +#ifdef HALF_PRECISION_AVAILABLE + int iteration_stride = blockDim.x; + int iterations = row_stride / iteration_stride; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int row = blockIdx.x; + int id = threadIdx.x; + int wid = id / WARP_SIZE; + int warp_num = iteration_stride >> WARP_SIZE_BITS; + + __shared__ float partialSum[MAX_WARP_NUM]; + + __half2 vals_arr[NORM_REG]; + float2 vals_arr_f[NORM_REG]; + __half2 vals_hat_arr[NORM_REG]; + + __half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad); + const __half2* out_grad_h1 = reinterpret_cast(out_grad1); + const __half2* out_grad_h2 = reinterpret_cast(out_grad2); + const __half2* vals_hat_h = reinterpret_cast(X_vals); + + out_grad_h1 += (row * row_stride); + out_grad_h2 += (row * row_stride); + inp_grad_h += (row * row_stride); + vals_hat_h += (row * row_stride); + + const __half2* gamma_h = reinterpret_cast(gamma); + int high_index = iterations * iteration_stride + id; +#pragma unroll + for (int i = 0; i < iterations; i++) { + __half2 gamma_reg = gamma_h[i * iteration_stride + id]; + vals_arr[i] = out_grad_h1[i * iteration_stride + id]; + vals_arr[i] *= gamma_reg; // out_grad * gamma + vals_hat_arr[i] = vals_hat_h[i * iteration_stride + id]; + } + if ((high_index) < row_stride) { + __half2 gamma_reg = gamma_h[high_index]; + vals_arr[iterations] = out_grad_h1[high_index]; + vals_arr[iterations] *= gamma_reg; // out_grad * gamma + vals_hat_arr[iterations] = vals_hat_h[high_index]; + iterations++; + } + + __half mean_h = means[row]; + __half var_h = vars[row]; + __half2 var_reg = __halves2half2(var_h, var_h); + __half2 mean_reg = __halves2half2(mean_h, mean_h); + __half2 xu[NORM_REG]; + + float sum = 0.f; + for (int i = 0; i < iterations; i++) { + xu[i] = (vals_hat_arr[i] - mean_reg); + __half2 result_h = (xu[i] * vals_arr[i]); + float2 result_f = __half22float2(result_h); + sum += result_f.x; + sum += result_f.y; + vals_arr[i] *= h2rsqrt(var_reg); + } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + + sum = g.shfl(sum, 0); + sum /= (2 * row_stride); + __half2 sum_h = __float2half2_rn(sum); + + for (int i = 0; i < iterations; i++) { + __half2 xu_grad = ((-sum_h * xu[i] * h2rsqrt(var_reg)) / (var_reg)); + vals_arr_f[i] = __half22float2(vals_arr[i]); + float2 xu_grad_f = __half22float2(xu_grad); + vals_arr_f[i].x += xu_grad_f.x; + vals_arr_f[i].y += xu_grad_f.y; + } + + sum = 0.f; + for (int i = 0; i < iterations; i++) { + sum += (vals_arr_f[i].x); + sum += (vals_arr_f[i].y); + } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + + sum = g.shfl(sum, 0); + sum /= (2 * row_stride); + + iterations = row_stride / iteration_stride; + for (int i = 0; i < iterations; i++) { + vals_arr_f[i].x -= sum; + vals_arr_f[i].y -= sum; + __half2 temp = __float22half2_rn(vals_arr_f[i]); + inp_grad_h[i * iteration_stride + id] = temp + out_grad_h2[i * iteration_stride + id]; + } + if ((high_index) < row_stride) { + vals_arr_f[iterations].x -= sum; + vals_arr_f[iterations].y -= sum; + __half2 temp = __float22half2_rn(vals_arr_f[iterations]); + inp_grad_h[high_index] = temp + out_grad_h2[high_index]; + } +#endif +} + +template <> +void launch_layerNorm_backward_fused_add(const float* out_grad1, + const float* out_grad2, + const float* X_data, + const float* vars, + const float* means, + const float* gamma, + float* gamma_grad, + float* betta_grad, + float* inp_grad, + int batch, + int hidden_dim, + cudaStream_t stream[2]) +{ + int threads = THREADS; + + dim3 grid_dim(hidden_dim / TILE_DIM); + dim3 block_dim(TILE_DIM, TILE_DIM); + + LayerNormBackward1<<>>( + out_grad1, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim); + + dim3 grid_dim2(batch); + + if (hidden_dim > 16384 && hidden_dim <= 32768) + threads <<= 1; + else if (hidden_dim > 32768 && hidden_dim <= 65536) + threads <<= 2; + else if (hidden_dim > 65536) + throw std::runtime_error("Unsupport hidden_dim."); + + dim3 block_dim2(threads); + LayerNormBackward2_fused_add<<>>( + out_grad1, out_grad2, X_data, gamma, vars, means, inp_grad, hidden_dim); +} + +template <> +void launch_layerNorm_backward_fused_add<__half>(const __half* out_grad1, + const __half* out_grad2, + const __half* X_data, + const __half* vars, + const __half* means, + const __half* gamma, + __half* gamma_grad, + __half* betta_grad, + __half* inp_grad, + int batch, + int hidden_dim, + cudaStream_t stream[2]) +{ + int threads = THREADS; + + dim3 grid_dim(hidden_dim / TILE_DIM); + dim3 block_dim(TILE_DIM, TILE_DIM); + + LayerNormBackward1<__half><<>>( + out_grad1, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim); + + dim3 grid_dim2(batch); + + if (hidden_dim > 8192 && hidden_dim <= 16384) + threads <<= 1; + else if (hidden_dim > 16384 && hidden_dim <= 32768) + threads <<= 2; + else if (hidden_dim > 32768 && hidden_dim <= 65536) + threads <<= 3; + else if (hidden_dim > 65536) + throw std::runtime_error("Unsupport hidden_dim."); + + dim3 block_dim2(threads / 2); + LayerNormBackward2_fused_add<<>>( + out_grad1, out_grad2, X_data, gamma, vars, means, inp_grad, hidden_dim / 2); +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/softmax_kernels.cu b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/softmax_kernels.cu new file mode 100644 index 0000000000000000000000000000000000000000..d9efed286462f1be971922c2345efb8d2049e227 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/softmax_kernels.cu @@ -0,0 +1,701 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include +#include "custom_cuda_layers.h" +#include "general_kernels.h" + +namespace cg = cooperative_groups; + +dim3 get_attn_softmax_grid(int batch_size, int heads, int sequence_length, int threads) +{ + int seq_length4 = sequence_length / 4; + int block_compute_size = + (seq_length4 < threads ? (int)pow(2.0, floor(log2((float)(threads / seq_length4)))) : 1); + // Note that the Y and Z dimensions are limited to 65535, while X is basically unlimited: + // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#features-and-technical-specifications + // The batch size is typically relatively small, while the sequence length could potentially be + // arbitrarily large. We therefore place the batch size second to avoid hitting the Y limit. + unsigned x = heads * sequence_length / block_compute_size; + unsigned y = batch_size; + return {x, y}; +} + +// Fused attention + softmax +template +__global__ void attn_softmax(float* vals, + const float* attn_mask, + int heads, + int seq_length, + int iterations) +{ + __shared__ float partialSum[MAX_WARP_NUM]; + + int warp_num = blockDim.x >> WARP_SIZE_BITS; + + int iteration_stride = blockDim.x; + int block_width = blockStride * seq_length; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int batch = blockIdx.y; + int row = blockIdx.x; + int max_threads_in_sequence = std::max(seq_length, tbSeq); + int seq_lane = threadIdx.x % max_threads_in_sequence; + + int data_offset = batch * (gridDim.x * block_width) + row * block_width + + (threadIdx.x / max_threads_in_sequence) * seq_length; + int mask_offset = batch * seq_length; + + int wid = threadIdx.x >> WARP_SIZE_BITS; + int lane = threadIdx.x & 0x1f; + + float4* val_cast = reinterpret_cast(vals); + const float4* attn_mask_cast = reinterpret_cast(attn_mask); + + float4 data[MAX_THREAD_ITERATIONS]; + + float max_val = minus_infinity; + + for (int i = 0; i < iterations; i++) { + int data_id = i * iteration_stride + seq_lane; + if (data_id < seq_length) { + float4 mask = attn_mask_cast[mask_offset + data_id]; + data[i] = val_cast[data_offset + data_id]; + + data[i].x += mask.x; + data[i].y += mask.y; + data[i].z += mask.z; + data[i].w += mask.w; + + max_val = (data[i].x > max_val ? data[i].x : max_val); + max_val = (data[i].y > max_val ? data[i].y : max_val); + max_val = (data[i].z > max_val ? data[i].z : max_val); + max_val = (data[i].w > max_val ? data[i].w : max_val); + } else { + data[i].x = minus_infinity; + data[i].y = minus_infinity; + data[i].z = minus_infinity; + data[i].w = minus_infinity; + } + } + + for (int i = 1; i < tbSize; i *= 2) { + auto temp = g.shfl_xor(max_val, i); + max_val = (temp > max_val ? temp : max_val); + } + + if (seq_length > tbSize) { + if (lane == 0) partialSum[wid] = max_val; + b.sync(); + + if (lane < warp_num) max_val = partialSum[lane]; + +#ifndef __STOCHASTIC_MODE__ + b.sync(); +#endif + + int iters = warp_num; + if (seq_length < iteration_stride) + iters = warp_num / (iteration_stride / max_threads_in_sequence); + + for (int i = 1; i < iters; i *= 2) { + auto temp = g.shfl_xor(max_val, i); + max_val = (temp > max_val ? temp : max_val); + } + + max_val = g.shfl(max_val, threadIdx.x / tbSize); + } + + float sum = 0; + for (int i = 0; i < iterations; i++) { + data[i].x = __expf(data[i].x - max_val); + data[i].y = __expf(data[i].y - max_val); + data[i].z = __expf(data[i].z - max_val); + data[i].w = __expf(data[i].w - max_val); + + sum += (data[i].x + data[i].y + data[i].z + data[i].w); + } + + for (int i = 1; i < tbSize; i *= 2) { sum += g.shfl_xor(sum, i); } + + if (seq_length > tbSize) { + if (lane == 0) partialSum[wid] = sum; + b.sync(); + + if (lane < warp_num) sum = partialSum[lane]; + +#ifndef __STOCHASTIC_MODE__ + b.sync(); +#endif + + int iters = warp_num; + if (seq_length < iteration_stride) + iters = warp_num / (iteration_stride / max_threads_in_sequence); + + for (int i = 1; i < iters; i *= 2) { sum += g.shfl_xor(sum, i); } + + sum = g.shfl(sum, threadIdx.x / tbSize); + } + + sum += 1e-6; + + for (int i = 0; i < iterations; i++) { + data[i].x /= sum; + data[i].y /= sum; + data[i].z /= sum; + data[i].w /= sum; + + int data_id = i * iteration_stride + seq_lane; + if (data_id < seq_length) val_cast[data_offset + data_id] = data[i]; + } +} + +template +__global__ void attn_softmax(__half* vals, + const __half* attn_mask, + int heads, + int seq_length, + int iterations) +{ +#ifdef HALF_PRECISION_AVAILABLE + __shared__ float partialSum[MAX_WARP_NUM]; + + int warp_num = blockDim.x >> WARP_SIZE_BITS; + + int iteration_stride = blockDim.x; + int block_width = blockStride * seq_length; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int batch = blockIdx.y; + int row = blockIdx.x; + int max_threads_in_sequence = std::max(seq_length, tbSeq); + int seq_lane = threadIdx.x % max_threads_in_sequence; + + int data_offset = batch * (gridDim.x * block_width) + row * block_width + + (threadIdx.x / max_threads_in_sequence) * seq_length; + int mask_offset = batch * seq_length; + + int wid = threadIdx.x >> WARP_SIZE_BITS; + int lane = threadIdx.x & 0x1f; + + float2* val_cast = reinterpret_cast(vals); + const float2* attn_mask_cast = reinterpret_cast(attn_mask); + + val_cast += data_offset; + attn_mask_cast += mask_offset; + + float2 low_data[MAX_THREAD_ITERATIONS]; + float2 high_data[MAX_THREAD_ITERATIONS]; + + float max_val = minus_infinity; + + for (int i = 0; i < iterations; i++) { + int data_id = i * iteration_stride + seq_lane; + if (data_id < seq_length) { + float2 data = val_cast[data_id]; + float2 mask = attn_mask_cast[data_id]; + + __half2* data_arr = reinterpret_cast<__half2*>(&data); + __half2* mask_arr = reinterpret_cast<__half2*>(&mask); + + low_data[i] = __half22float2(data_arr[0]); + high_data[i] = __half22float2(data_arr[1]); + float2 low_mask = __half22float2(mask_arr[0]); + float2 high_mask = __half22float2(mask_arr[1]); + + low_data[i].x += low_mask.x; + low_data[i].y += low_mask.y; + high_data[i].x += high_mask.x; + high_data[i].y += high_mask.y; + + max_val = (low_data[i].x > max_val ? low_data[i].x : max_val); + max_val = (low_data[i].y > max_val ? low_data[i].y : max_val); + max_val = (high_data[i].x > max_val ? high_data[i].x : max_val); + max_val = (high_data[i].y > max_val ? high_data[i].y : max_val); + } + } + + for (int i = 1; i < tbSize; i *= 2) { + auto temp = g.shfl_xor(max_val, i); + max_val = (temp > max_val ? temp : max_val); + } + + if (seq_length > tbSize) { + if (lane == 0) partialSum[wid] = max_val; + b.sync(); + + if (lane < warp_num) max_val = partialSum[lane]; + +#ifndef __STOCHASTIC_MODE__ + b.sync(); +#endif + + int iters = warp_num; + if (seq_length < iteration_stride) + iters = warp_num / (iteration_stride / max_threads_in_sequence); + + for (int i = 1; i < iters; i *= 2) { + auto temp = g.shfl_xor(max_val, i); + max_val = (temp > max_val ? temp : max_val); + } + + max_val = g.shfl(max_val, threadIdx.x / tbSize); + } + + float sum = 0; + for (int i = 0; i < iterations; i++) { + int data_id = i * iteration_stride + seq_lane; + if (data_id < seq_length) { + low_data[i].x = __expf(low_data[i].x - max_val); + low_data[i].y = __expf(low_data[i].y - max_val); + high_data[i].x = __expf(high_data[i].x - max_val); + high_data[i].y = __expf(high_data[i].y - max_val); + + sum += (low_data[i].x + low_data[i].y + high_data[i].x + high_data[i].y); + } + } + + for (int i = 1; i < tbSize; i *= 2) { sum += g.shfl_xor(sum, i); } + + if (seq_length > tbSize) { + if (lane == 0) partialSum[wid] = sum; + b.sync(); + + if (lane < warp_num) sum = partialSum[lane]; + +#ifndef __STOCHASTIC_MODE__ + b.sync(); +#endif + + int iters = warp_num; + if (seq_length < iteration_stride) + iters = warp_num / (iteration_stride / max_threads_in_sequence); + + for (int i = 1; i < iters; i *= 2) { sum += g.shfl_xor(sum, i); } + + sum = g.shfl(sum, threadIdx.x / tbSize); + } + + sum += 1e-6; + + for (int i = 0; i < iterations; i++) { + int data_id = i * iteration_stride + seq_lane; + if (data_id < seq_length) { + float2 result_f; + __half2* result_h = reinterpret_cast<__half2*>(&result_f); + + low_data[i].x /= sum; + low_data[i].y /= sum; + high_data[i].x /= sum; + high_data[i].y /= sum; + + result_h[0] = __float22half2_rn(low_data[i]); + result_h[1] = __float22half2_rn(high_data[i]); + + val_cast[data_id] = result_f; + } + } + +#endif +} + +template +void launch_attn_softmax(T*, const T*, int, int, int, cudaStream_t); + +template <> +void launch_attn_softmax(float* vals, + const float* attn_mask, + int batch_size, + int heads, + int sequence_length, + cudaStream_t stream) +{ + const int threads = 128; + int seq_length4 = sequence_length / 4; + + dim3 grid_dim = get_attn_softmax_grid(batch_size, heads, sequence_length, threads); + + int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads; + + dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) / + subblock_max_workload * threads) + : threads); + int iterations = + (sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads + : MAX_THREAD_ITERATIONS); + + if (sequence_length <= 8) + attn_softmax<2, (threads / 2), 2> + <<>>(vals, attn_mask, heads, seq_length4, iterations); + else if (sequence_length <= 16) + attn_softmax<4, (threads / 4), 4> + <<>>(vals, attn_mask, heads, seq_length4, iterations); + else if (sequence_length <= 32) + attn_softmax<8, (threads / 8), 8> + <<>>(vals, attn_mask, heads, seq_length4, iterations); + else if (sequence_length <= 64) + attn_softmax<16, (threads / 16), 16> + <<>>(vals, attn_mask, heads, seq_length4, iterations); + else if (sequence_length <= 128) + attn_softmax<32, (threads / 32), 32> + <<>>(vals, attn_mask, heads, seq_length4, iterations); + else if (sequence_length <= 256) + attn_softmax<32, (threads / 64), 64> + <<>>(vals, attn_mask, heads, seq_length4, iterations); + else { + const int threads = 256; + dim3 grid_dim = get_attn_softmax_grid(batch_size, heads, sequence_length, threads); + + int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads; + + dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) / + subblock_max_workload * threads) + : threads); + iterations = + (sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads + : MAX_THREAD_ITERATIONS); + if (sequence_length <= 512) + attn_softmax<32, (threads / 128), 128><<>>( + vals, attn_mask, heads, seq_length4, iterations); + else if (sequence_length < (MAX_THREADS * MAX_THREAD_ITERATIONS * 4)) + attn_softmax<32, 1, 128><<>>( + vals, attn_mask, heads, seq_length4, iterations); + else + throw std::runtime_error( + "Unsupport Seq_Length! Check the restriction of the max_threads and " + "max_thread_iterations!"); + } +} + +template <> +void launch_attn_softmax<__half>(__half* vals, + const __half* attn_mask, + int batch_size, + int heads, + int sequence_length, + cudaStream_t stream) +{ + const int threads = 128; + int seq_length4 = sequence_length / 4; + + dim3 grid_dim = get_attn_softmax_grid(batch_size, heads, sequence_length, threads); + + int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads; + + dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) / + subblock_max_workload * threads) + : threads); + + int iterations = + (sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads + : MAX_THREAD_ITERATIONS); + + if (sequence_length <= 8) + attn_softmax<2, (threads / 2), 2> + <<>>(vals, attn_mask, heads, seq_length4, iterations); + else if (sequence_length <= 16) + attn_softmax<4, (threads / 4), 4> + <<>>(vals, attn_mask, heads, seq_length4, iterations); + else if (sequence_length <= 32) + attn_softmax<8, (threads / 8), 8> + <<>>(vals, attn_mask, heads, seq_length4, iterations); + else if (sequence_length <= 64) + attn_softmax<16, (threads / 16), 16> + <<>>(vals, attn_mask, heads, seq_length4, iterations); + else if (sequence_length <= 128) + attn_softmax<32, (threads / 32), 32> + <<>>(vals, attn_mask, heads, seq_length4, iterations); + else if (sequence_length <= 256) + attn_softmax<32, (threads / 64), 64> + <<>>(vals, attn_mask, heads, seq_length4, iterations); + else { + const int threads = 256; + dim3 grid_dim = get_attn_softmax_grid(batch_size, heads, sequence_length, threads); + + int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads; + + dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) / + subblock_max_workload * threads) + : threads); + iterations = + (sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads + : MAX_THREAD_ITERATIONS); + if (sequence_length <= 512) + attn_softmax<32, (threads / 128), 128><<>>( + vals, attn_mask, heads, seq_length4, iterations); + else if (sequence_length < (MAX_THREADS * MAX_THREAD_ITERATIONS * 4)) + attn_softmax<32, 1, 128><<>>( + vals, attn_mask, heads, seq_length4, iterations); + else + throw std::runtime_error( + "Unsupport Seq_Length! Check the restriction of the max_threads and " + "max_thread_iterations!"); + } +} + +template +__global__ void softmax_backward_kernel(T* out_grad, const T* soft_inp, int seq_length) +{ + __shared__ float partialSum[MAX_WARP_NUM]; + + int warp_num = blockDim.x >> WARP_SIZE_BITS; // warp-count = num_threads / WARP_SIZE (32) + + int iteration_stride = blockDim.x; + int block_width = blockStride * seq_length; + + int iterations = (seq_length < (MAX_THREAD_ITERATIONS * iteration_stride) + ? (seq_length + iteration_stride - 1) / iteration_stride + : MAX_THREAD_ITERATIONS); + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int row = blockIdx.x; + int id = threadIdx.x; + + int wid = id >> WARP_SIZE_BITS; + int lane = id & 0x1f; + + T val_reg[MAX_THREAD_ITERATIONS]; + T soft_reg[MAX_THREAD_ITERATIONS]; + float grad_reg = 0.0f; + +#pragma unroll + for (int i = 0; i < iterations; i++) { + int data_id = i * iteration_stride + id; + if (data_id < block_width) { + val_reg[i] = out_grad[row * block_width + data_id]; + soft_reg[i] = soft_inp[row * block_width + data_id]; + + grad_reg += ((float)val_reg[i] * + (float)soft_reg[i]); // if done in half, the multiplication, we may lose + // 2% of accuracy in computation!! + } + } + for (int i = 1; i < tbSize; i *= 2) grad_reg += g.shfl_xor(grad_reg, i); + + if (seq_length > tbSize) { + if (lane == 0) partialSum[wid] = grad_reg; + b.sync(); + + if (lane < warp_num) grad_reg = partialSum[lane]; + + int iters = warp_num; + if (seq_length < iteration_stride) iters = warp_num / (iteration_stride / seq_length); + + for (int i = 1; i < iters; i *= 2) grad_reg += g.shfl_xor(grad_reg, i); + + grad_reg = g.shfl(grad_reg, id / tbSize); + } + + for (int i = 0; i < iterations; i++) { + int data_id = i * iteration_stride + id; + if (data_id < block_width) { + float temp = (float)soft_reg[i] * ((float)val_reg[i] - grad_reg); + out_grad[row * block_width + data_id] = (T)temp; + } + } +} + +template +__global__ void softmax_backward_kernel_v2(T* grad /* input & output*/, + const T* output, + int softmax_length) +{ + int batch_idx = blockIdx.x * blockDim.y + threadIdx.y; + int offset = batch_idx * softmax_length + threadIdx.x; + + grad += offset; + output += offset; + + T grad_reg[ITERATIONS]; + T output_reg[ITERATIONS]; + float sum = 0.0; + +#pragma unroll + for (int i = 0; i < ITERATIONS; ++i) { + int curr_idx = threadIdx.x + i * WARP_SIZE; + if (curr_idx < softmax_length) { + grad_reg[i] = grad[i * WARP_SIZE]; + output_reg[i] = output[i * WARP_SIZE]; + sum += (float)grad_reg[i] * (float)output_reg[i]; + } + } + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + for (int i = 1; i < WARP_SIZE; i <<= 1) sum += g.shfl_xor(sum, i); + +#pragma unroll + for (int i = 0; i < ITERATIONS; ++i) { + int curr_idx = threadIdx.x + i * WARP_SIZE; + if (curr_idx < softmax_length) + grad[i * WARP_SIZE] = (float)output_reg[i] * ((float)grad_reg[i] - sum); + } +} + +__global__ void softmax_backward_kernel_arbitrary_length(__half* grad /* input & output*/, + const __half* output, + int softmax_length) +{ + int batch_idx = blockIdx.x * blockDim.y + threadIdx.y; + int offset = batch_idx * softmax_length + threadIdx.x; + + const float4* output_cast = reinterpret_cast(output); + float4* grad_cast = reinterpret_cast(grad); + + grad_cast += offset; + output_cast += offset; + + float sum = 0.0; + int curr_idx = threadIdx.x; + while (curr_idx < softmax_length) { + float4 out_reg = output_cast[curr_idx]; + float4 grad_reg = grad_cast[curr_idx]; + __half2* out_h = reinterpret_cast<__half2*>(&out_reg); + __half2* grad_h = reinterpret_cast<__half2*>(&grad_reg); +#pragma unroll + for (int m = 0; m < 4; m++) grad_h[m] *= out_h[m]; + sum += ((float)grad_h[0].x + (float)grad_h[0].y + (float)grad_h[1].x + (float)grad_h[1].y) + + ((float)grad_h[2].x + (float)grad_h[2].y + (float)grad_h[3].x + (float)grad_h[3].y); + curr_idx += WARP_SIZE; + } + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + +#pragma unroll + for (int i = 1; i < WARP_SIZE; i <<= 1) sum += g.shfl_xor(sum, i); + + curr_idx = threadIdx.x; + while (curr_idx < softmax_length) { + float4 out_reg = output_cast[curr_idx]; + float4 grad_reg = grad_cast[curr_idx]; + __half* grad_h = reinterpret_cast<__half*>(&grad_reg); + __half* out_h = reinterpret_cast<__half*>(&out_reg); + +#pragma unroll + for (int m = 0; m < 8; m++) grad_h[m] = (float)out_h[m] * ((float)grad_h[m] - sum); + + grad_cast[curr_idx] = grad_reg; + curr_idx += WARP_SIZE; + } +} + +__global__ void softmax_backward_kernel_arbitrary_length(float* grad /* input & output*/, + const float* output, + int softmax_length) +{ + int batch_idx = blockIdx.x * blockDim.y + threadIdx.y; + int offset = batch_idx * softmax_length + threadIdx.x; + + const float4* output_cast = reinterpret_cast(output); + float4* grad_cast = reinterpret_cast(grad); + + grad_cast += offset; + output_cast += offset; + + float sum = 0.0; + int curr_idx = threadIdx.x; + while (curr_idx < softmax_length) { + float4 out_reg = output_cast[curr_idx]; + float4 grad_reg = grad_cast[curr_idx]; + + grad_reg.x *= out_reg.x; + grad_reg.y *= out_reg.y; + grad_reg.z *= out_reg.z; + grad_reg.w *= out_reg.w; + sum += (grad_reg.x + grad_reg.y + grad_reg.z + grad_reg.w); + + curr_idx += WARP_SIZE; + } + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + +#pragma unroll + for (int i = 1; i < WARP_SIZE; i <<= 1) sum += g.shfl_xor(sum, i); + + curr_idx = threadIdx.x; + while (curr_idx < softmax_length) { + float4 out_reg = output_cast[curr_idx]; + float4 grad_reg = grad_cast[curr_idx]; + grad_reg.x = out_reg.x * (grad_reg.x - sum); + grad_reg.y = out_reg.y * (grad_reg.y - sum); + grad_reg.z = out_reg.z * (grad_reg.z - sum); + grad_reg.w = out_reg.w * (grad_reg.w - sum); + + grad_cast[curr_idx] = grad_reg; + curr_idx += WARP_SIZE; + } +} + +template +void launch_attn_softmax_backward_v2(T* out_grad, + const T* soft_inp, + int batch_size, + int heads, + int seq_length, + cudaStream_t stream) +{ + const int warps_per_block = 4; + dim3 grid_dim(batch_size * heads * seq_length / warps_per_block); + dim3 block_dim(WARP_SIZE, warps_per_block); + + if (seq_length <= 32) + softmax_backward_kernel_v2 + <<>>(out_grad, soft_inp, seq_length); + else if (seq_length <= 64) + softmax_backward_kernel_v2 + <<>>(out_grad, soft_inp, seq_length); + else if (seq_length <= 128) + softmax_backward_kernel_v2 + <<>>(out_grad, soft_inp, seq_length); + else if (seq_length <= 256) + softmax_backward_kernel_v2 + <<>>(out_grad, soft_inp, seq_length); + else if (seq_length <= 384) + softmax_backward_kernel_v2 + <<>>(out_grad, soft_inp, seq_length); + else if (seq_length <= 512) + softmax_backward_kernel_v2 + <<>>(out_grad, soft_inp, seq_length); + else if (seq_length <= 768) + softmax_backward_kernel_v2 + <<>>(out_grad, soft_inp, seq_length); + else if (seq_length <= 1024) + softmax_backward_kernel_v2 + <<>>(out_grad, soft_inp, seq_length); + else if (seq_length <= 2048) + softmax_backward_kernel_v2 + <<>>(out_grad, soft_inp, seq_length); + else if (seq_length <= 4096) + softmax_backward_kernel_v2 + <<>>(out_grad, soft_inp, seq_length); + else if (seq_length <= 8192) + softmax_backward_kernel_v2 + <<>>(out_grad, soft_inp, seq_length); + else + softmax_backward_kernel_arbitrary_length<<>>( + out_grad, soft_inp, seq_length / (4 << ((sizeof(T) & 2) >> 1))); +} + +template void launch_attn_softmax_backward_v2<__half>(__half* out_grad, + const __half* soft_inp, + int batch_size, + int heads, + int seq_length, + cudaStream_t stream); +template void launch_attn_softmax_backward_v2(float* out_grad, + const float* soft_inp, + int batch_size, + int heads, + int seq_length, + cudaStream_t stream); diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/utils/flatten_unflatten.cpp b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/utils/flatten_unflatten.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ab95ee1914647c85e3e0ad5ebf625877d57a5ad6 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/utils/flatten_unflatten.cpp @@ -0,0 +1,29 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +Copyright NVIDIA/apex +This file is adapted from fused adam in NVIDIA/apex, commit a109f85 +*/ + +#include +#include +// https://github.com/pytorch/pytorch/blob/master/torch/csrc/utils/tensor_flatten.h + +at::Tensor flatten(std::vector tensors) +{ + return torch::utils::flatten_dense_tensors(tensors); +} + +std::vector unflatten(at::Tensor flat, std::vector tensors) +{ + return torch::utils::unflatten_dense_tensors(flat, tensors); +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("flatten", &flatten, "Flatten dense tensors"); + m.def("unflatten", &unflatten, "Unflatten dense tensors"); +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/adam/cpu_adam.cpp b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/adam/cpu_adam.cpp new file mode 100644 index 0000000000000000000000000000000000000000..96809827f3e13fc02e0755906d694c7e721e0384 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/adam/cpu_adam.cpp @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "cpu_adam.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("adam_update", &ds_adam_step, "DeepSpeed CPU Adam update (C++)"); + m.def("adam_update_copy", + &ds_adam_step_plus_copy, + "DeepSpeed CPU Adam update and param copy (C++)"); + m.def("create_adam", &create_adam_optimizer, "DeepSpeed CPU Adam (C++)"); + m.def("destroy_adam", &destroy_adam_optimizer, "DeepSpeed CPU Adam destroy (C++)"); +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/adam/cpu_adam_impl.cpp b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/adam/cpu_adam_impl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c24a08fc64ebfba0dfcd202f619aa3c05b173986 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/adam/cpu_adam_impl.cpp @@ -0,0 +1,247 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include +#include +#include +#include +#include +#include +#include +#include "cpu_adam.h" + +static std::unordered_map> s_optimizers; + +// C++ interface + +void Adam_Optimizer::Step_1(float* _params, + float* grads, + float* _exp_avg, + float* _exp_avg_sq, + size_t _param_size, + ds_half_precision_t* dev_params, + bool half_precision) +{ + size_t rounded_size = 0; +#if defined(__AVX512__) or defined(__AVX256__) + Step_AVX<1>(&rounded_size, + _params, + grads, + _exp_avg, + _exp_avg_sq, + _param_size, + dev_params, + half_precision); +#endif + if (_param_size > rounded_size) { + float betta1_minus1 = 1 - _betta1; + float betta2_minus1 = 1 - _betta2; + + float step_size = -1 * _alpha / _bias_correction1; + float w_decay = -1 * _alpha * _weight_decay; + ds_half_precision_t* grads_cast_h; + ds_half_precision_t* params_cast_h; + if (half_precision) { + grads_cast_h = reinterpret_cast(grads); + params_cast_h = reinterpret_cast(_params); + } + + for (size_t t = rounded_size; t < _param_size; t += TILE) { + size_t copy_size = TILE; + if ((t + TILE) > _param_size) copy_size = _param_size - t; + size_t offset = copy_size + t; +#pragma omp parallel for + for (size_t k = t; k < offset; k++) { + float grad = half_precision ? (float)grads_cast_h[k] : grads[k]; + float param = half_precision ? (float)params_cast_h[k] : _params[k]; + float momentum = _exp_avg[k]; + float variance = _exp_avg_sq[k]; + if (_weight_decay > 0 && !_adamw_mode) { grad = param * _weight_decay + grad; } + momentum = momentum * _betta1; + momentum = grad * betta1_minus1 + momentum; + + variance = variance * _betta2; + grad = grad * grad; + variance = grad * betta2_minus1 + variance; + + grad = sqrt(variance); + grad = grad * _bias_correction2 + _eps; + grad = momentum / grad; + if (_weight_decay > 0 && _adamw_mode) { param += w_decay * param; } + param = grad * step_size + param; + if (half_precision) + params_cast_h[k] = (ds_half_precision_t)param; + else + _params[k] = param; + _exp_avg[k] = momentum; + _exp_avg_sq[k] = variance; + } + } + } +} + +void Adam_Optimizer::Step_4(float* _params, + float* grads, + float* _exp_avg, + float* _exp_avg_sq, + size_t _param_size, + ds_half_precision_t* dev_params, + bool half_precision) +{ + size_t rounded_size = 0; +#if defined(__AVX512__) or defined(__AVX256__) + Step_AVX<4>(&rounded_size, + _params, + grads, + _exp_avg, + _exp_avg_sq, + _param_size, + dev_params, + half_precision); +#endif + if (_param_size > rounded_size) + Step_1((_params + rounded_size), + (grads + rounded_size), + (_exp_avg + rounded_size), + (_exp_avg_sq + rounded_size), + (_param_size - rounded_size), + (dev_params != nullptr ? (dev_params + rounded_size) : dev_params), + half_precision); +} + +int create_adam_optimizer(int optimizer_id, + float alpha, + float betta1, + float betta2, + float eps, + float weight_decay, + bool adamw_mode, + bool should_log) +{ + auto opt = + std::make_shared(alpha, betta1, betta2, eps, weight_decay, adamw_mode); + + s_optimizers[optimizer_id] = opt; + + if (should_log) { + std::string avx_type = ""; +#if defined(__AVX512__) + avx_type = "AVX512"; +#else +#if defined(__AVX256__) + avx_type = "AVX2"; +#else + avx_type = "scalar"; +#endif +#endif + + printf("Adam Optimizer #%d is created with %s arithmetic capability.\n", + optimizer_id, + avx_type.c_str()); + printf("Config: alpha=%f, betas=(%f, %f), weight_decay=%f, adam_w=%d\n", + alpha, + betta1, + betta2, + weight_decay, + (int)adamw_mode); + } + + return 0; +} + +void Adam_Optimizer::Step_8(float* _params, + float* grads, + float* _exp_avg, + float* _exp_avg_sq, + size_t _param_size, + ds_half_precision_t* dev_params, + bool half_precision) +{ + size_t rounded_size = 0; +#if defined(__AVX512__) or defined(__AVX256__) + Step_AVX<8>(&rounded_size, + _params, + grads, + _exp_avg, + _exp_avg_sq, + _param_size, + dev_params, + half_precision); +#endif + if (_param_size > rounded_size) + Step_4((_params + rounded_size), + (grads + rounded_size), + (_exp_avg + rounded_size), + (_exp_avg_sq + rounded_size), + (_param_size - rounded_size), + (dev_params != nullptr ? (dev_params + rounded_size) : dev_params), + half_precision); +} + +int ds_adam_step(int optimizer_id, + size_t step, + float lr, + float beta1, + float beta2, + float epsilon, + float weight_decay, + bool bias_correction, + torch::Tensor& params, + torch::Tensor& grads, + torch::Tensor& exp_avg, + torch::Tensor& exp_avg_sq) +{ + auto params_c = params.contiguous(); + auto grads_c = grads.contiguous(); + auto exp_avg_c = exp_avg.contiguous(); + auto exp_avg_sq_c = exp_avg_sq.contiguous(); + + // assert(params.options().dtype() == grads.options().dtype()); + + float* params_ptr = (float*)params_c.data_ptr(); + float* grads_ptr = (float*)grads_c.data_ptr(); + float* exp_avg_ptr = (float*)exp_avg_c.data_ptr(); + float* exp_avg_sq_ptr = (float*)exp_avg_sq_c.data_ptr(); + + std::shared_ptr opt = + std::static_pointer_cast(s_optimizers[optimizer_id]); + opt->IncrementStep(step, beta1, beta2); + opt->update_state(lr, epsilon, weight_decay, bias_correction); + + opt->Step_8(params_ptr, + grads_ptr, + exp_avg_ptr, + exp_avg_sq_ptr, + params_c.numel(), + nullptr, + (params.options().dtype() == at::kHalf)); + + return 0; +} + +int ds_adam_step_plus_copy(int optimizer_id, + size_t step, + float lr, + float beta1, + float beta2, + float epsilon, + float weight_decay, + bool bias_correction, + torch::Tensor& params, + torch::Tensor& grads, + torch::Tensor& exp_avg, + torch::Tensor& exp_avg_sq, + torch::Tensor& gpu_params) +{ + assert(false); + return 0; +} + +int destroy_adam_optimizer(int optimizer_id) +{ + s_optimizers.erase(optimizer_id); + + return 0; +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/adam/fused_adam_frontend.cpp b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/adam/fused_adam_frontend.cpp new file mode 100644 index 0000000000000000000000000000000000000000..13b390248608b046dab443f85346b5446a47d722 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/adam/fused_adam_frontend.cpp @@ -0,0 +1,25 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include + +void multi_tensor_adam_cuda(int chunk_size, + at::Tensor noop_flag, + std::vector> tensor_lists, + const float lr, + const float beta1, + const float beta2, + const float epsilon, + const int step, + const int mode, + const int bias_correction, + const float weight_decay); + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("multi_tensor_adam", + &multi_tensor_adam_cuda, + "Compute and apply gradient update to parameters for Adam optimizer"); +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/adam/multi_tensor_adam.dp.cpp b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/adam/multi_tensor_adam.dp.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0720a020247a3ebf80fc231e11333ea56cb09924 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/adam/multi_tensor_adam.dp.cpp @@ -0,0 +1,159 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +Copyright NVIDIA/apex +This file is adapted from fused adam in NVIDIA/apex, commit a109f85 +*/ + +#include +#include +#include + +#include + +#include +#include "multi_tensor_apply.dp.hpp" +#include "type_shim.h" + +#define BLOCK_SIZE 512 +#define ILP 4 + +typedef enum : int { + ADAM_MODE_0 = 0, // L2 regularization mode + ADAM_MODE_1 = 1 // Decoupled weight decay mode(AdamW) +} adamMode_t; + +using MATH_T = float; + +template +struct AdamFunctor { + __inline__ __attribute__((always_inline)) void operator()(int chunk_size, + volatile int* noop_gmem, + TensorListMetadata<4>& tl, + const float beta1, + const float beta2, + const float beta1_correction, + const float beta2_correction, + const float epsilon, + const float lr, + adamMode_t mode, + const float decay) + { + auto item_ct1 = sycl::ext::oneapi::experimental::this_nd_item<3>(); + int tensor_loc = tl.block_to_tensor[item_ct1.get_group(2)]; + + int chunk_idx = tl.block_to_chunk[item_ct1.get_group(2)]; + int n = tl.sizes[tensor_loc]; + + T* g = (T*)tl.addresses[0][tensor_loc]; + g += chunk_idx * chunk_size; + + T* p = (T*)tl.addresses[1][tensor_loc]; + p += chunk_idx * chunk_size; + + T* m = (T*)tl.addresses[2][tensor_loc]; + m += chunk_idx * chunk_size; + + T* v = (T*)tl.addresses[3][tensor_loc]; + v += chunk_idx * chunk_size; + + n -= chunk_idx * chunk_size; + + // see note in multi_tensor_scale_kernel.cu + for (int i_start = 0; i_start < n && i_start < chunk_size; + i_start += item_ct1.get_local_range(2) * ILP) { + MATH_T r_g[ILP]; + MATH_T r_p[ILP]; + MATH_T r_m[ILP]; + MATH_T r_v[ILP]; +#pragma unroll + for (int ii = 0; ii < ILP; ii++) { + int i = i_start + item_ct1.get_local_id(2) + ii * item_ct1.get_local_range(2); + if (i < n && i < chunk_size) { + r_g[ii] = g[i]; + r_p[ii] = p[i]; + r_m[ii] = m[i]; + r_v[ii] = v[i]; + } else { + r_g[ii] = MATH_T(0); + r_p[ii] = MATH_T(0); + r_m[ii] = MATH_T(0); + r_v[ii] = MATH_T(0); + } + } +#pragma unroll + for (int ii = 0; ii < ILP; ii++) { + if (mode == ADAM_MODE_0) { // L2 + r_g[ii] = r_g[ii] + (decay * r_p[ii]); + r_m[ii] = beta1 * r_m[ii] + (1 - beta1) * r_g[ii]; + r_v[ii] = beta2 * r_v[ii] + (1 - beta2) * r_g[ii] * r_g[ii]; + MATH_T next_m_unbiased = r_m[ii] / beta1_correction; + MATH_T next_v_unbiased = r_v[ii] / beta2_correction; + MATH_T denom = sycl::sqrt(next_v_unbiased) + epsilon; + MATH_T update = next_m_unbiased / denom; + r_p[ii] = r_p[ii] - (lr * update); + } else { // weight decay + r_m[ii] = beta1 * r_m[ii] + (1 - beta1) * r_g[ii]; + r_v[ii] = beta2 * r_v[ii] + (1 - beta2) * r_g[ii] * r_g[ii]; + MATH_T next_m_unbiased = r_m[ii] / beta1_correction; + MATH_T next_v_unbiased = r_v[ii] / beta2_correction; + MATH_T denom = sycl::sqrt(next_v_unbiased) + epsilon; + MATH_T update = (next_m_unbiased / denom) + (decay * r_p[ii]); + r_p[ii] = r_p[ii] - (lr * update); + } + } +#pragma unroll + for (int ii = 0; ii < ILP; ii++) { + int i = i_start + item_ct1.get_local_id(2) + ii * item_ct1.get_local_range(2); + if (i < n && i < chunk_size) { + p[i] = r_p[ii]; + m[i] = r_m[ii]; + v[i] = r_v[ii]; + } + } + } + } +}; + +void multi_tensor_adam_cuda(int chunk_size, + at::Tensor noop_flag, + std::vector> tensor_lists, + const float lr, + const float beta1, + const float beta2, + const float epsilon, + const int step, + const int mode, + const int bias_correction, + const float weight_decay) +{ + using namespace at; + + // Handle bias correction mode + float bias_correction1 = 1.0f, bias_correction2 = 1.0f; + if (bias_correction == 1) { + bias_correction1 = 1 - std::pow(beta1, step); + bias_correction2 = 1 - std::pow(beta2, step); + } + + // Assume single type across p,g,m1,m2 now + DISPATCH_DOUBLE_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), + 0, + "adam", + multi_tensor_apply<4>(BLOCK_SIZE, + chunk_size, + noop_flag, + tensor_lists, + AdamFunctor(), + beta1, + beta2, + bias_correction1, + bias_correction2, + epsilon, + lr, + (adamMode_t)mode, + weight_decay);) +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/common/custom_cuda_kernel.dp.cpp b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/common/custom_cuda_kernel.dp.cpp new file mode 100644 index 0000000000000000000000000000000000000000..cfd004ef13577c02f958fa18d116131398443562 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/common/custom_cuda_kernel.dp.cpp @@ -0,0 +1,92 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include + +inline void has_capability_or_fail(const sycl::device& dev, + const std::initializer_list& props) +{ + for (const auto& it : props) { + if (dev.has(it)) continue; + switch (it) { + case sycl::aspect::fp64: + throw std::runtime_error("'double' is not supported in '" + + dev.get_info() + "' device"); + break; + case sycl::aspect::fp16: + throw std::runtime_error("'half' is not supported in '" + + dev.get_info() + "' device"); + break; + default: +#define __SYCL_ASPECT(ASPECT, ID) \ + case sycl::aspect::ASPECT: return #ASPECT; +#define __SYCL_ASPECT_DEPRECATED(ASPECT, ID, MESSAGE) __SYCL_ASPECT(ASPECT, ID) +#define __SYCL_ASPECT_DEPRECATED_ALIAS(ASPECT, ID, MESSAGE) + auto getAspectNameStr = [](sycl::aspect AspectNum) -> std::string { + switch (AspectNum) { +#include +#include + default: return "unknown aspect"; + } + }; +#undef __SYCL_ASPECT_DEPRECATED_ALIAS +#undef __SYCL_ASPECT_DEPRECATED +#undef __SYCL_ASPECT + throw std::runtime_error("'" + getAspectNameStr(it) + "' is not supported in '" + + dev.get_info() + "' device"); + } + break; + } +} + +void param_update_kernel(const float* input, sycl::half* output, int size) +{ + auto item_ct1 = sycl::ext::oneapi::experimental::this_nd_item<3>(); + int id = item_ct1.get_group(2) * item_ct1.get_local_range(2) + item_ct1.get_local_id(2); + + if (id < size) { output[id] = (sycl::half)input[id]; } +} + +void launch_param_update(const float* input, sycl::half* output, int size, sycl::queue* stream) +{ + int threads = 1024; + + sycl::range<3> grid_dim(1, 1, (size - 1) / threads + 1); + sycl::range<3> block_dim(1, 1, threads); + + { + has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); + stream->parallel_for( + sycl::nd_range<3>(grid_dim * block_dim, block_dim), + [=](sycl::nd_item<3> item_ct1) { param_update_kernel(input, output, size); }); + } +} + +void param_update_kernel_half(const float* input, sycl::half* output, int size) +{ + auto item_ct1 = sycl::ext::oneapi::experimental::this_nd_item<3>(); + int id = item_ct1.get_group(2) * item_ct1.get_local_range(2) + item_ct1.get_local_id(2); + sycl::half2* output_cast = reinterpret_cast(output); + if (id < size) { + float input_f = input[id]; + sycl::half2* input_h = reinterpret_cast(&input_f); + output_cast[id] = *input_h; + } +} + +void launch_param_update_half(const float* input, sycl::half* output, int size, sycl::queue* stream) +{ + int threads = 1024; + size /= 2; + sycl::range<3> grid_dim(1, 1, (size - 1) / threads + 1); + sycl::range<3> block_dim(1, 1, threads); + + { + has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); + stream->parallel_for( + sycl::nd_range<3>(grid_dim * block_dim, block_dim), + [=](sycl::nd_item<3> item_ct1) { param_update_kernel_half(input, output, size); }); + } +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/includes/cpu_adagrad.h b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/includes/cpu_adagrad.h new file mode 100644 index 0000000000000000000000000000000000000000..660f860917f6c8b345825e6110a7508ba19a584f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/includes/cpu_adagrad.h @@ -0,0 +1,120 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#define NOMINMAX // Windows idiosyncrasy + // https://stackoverflow.com/questions/4913922/possible-problems-with-nominmax-on-visual-c + +#include +#include +#include "simd.h" + +typedef unsigned short ds_half_precision_t; + +#define STEP(SPAN) \ + void Step_##SPAN(float* _params, \ + float* grads, \ + float* _exp_avg_sq, \ + size_t _param_size, \ + ds_half_precision_t* dev_param = nullptr, \ + bool half_precision = false); + +class Adagrad_Optimizer { +public: + Adagrad_Optimizer(float alpha = 1e-2, float eps = 1e-8, float weight_decay = 0) + : _alpha(alpha), _eps(eps), _weight_decay(weight_decay) + { + } + ~Adagrad_Optimizer() {} +#if defined(__AVX512__) or defined(__AVX256__) + template + void Step_AVX(size_t* rounded_size, + float* _params, + float* grads, + float* _exp_avg_sq, + size_t param_size, + ds_half_precision_t* dev_param = nullptr, + bool half_precision = false); +#endif + STEP(1) + STEP(4) + STEP(8) + inline void IncrementStep(size_t step) + { + _step++; + if (_step != step) { _step = step; } + } + inline void update_state(float lr, float epsilon, float weight_decay) + { + _alpha = lr; + _eps = epsilon; + _weight_decay = weight_decay; + } + +private: + float _alpha; + float _eps; + float _weight_decay; + + float _betta1_t; + float _betta2_t; + size_t _step; +}; + +#if defined(__AVX512__) or defined(__AVX256__) +template +void Adagrad_Optimizer::Step_AVX(size_t* rounded_size, + float* _params, + float* grads, + float* _exp_avg_sq, + size_t _param_size, + ds_half_precision_t* dev_params, + bool half_precision) +{ + size_t new_rounded_size = 0; + AVX_Data eps_4; + eps_4.data = SIMD_SET(_eps); + + float step_size = -1 * _alpha; + AVX_Data step_size_4; + step_size_4.data = SIMD_SET(step_size); + + AVX_Data weight_decay4; + if (_weight_decay > 0) weight_decay4.data = SIMD_SET(_weight_decay); + new_rounded_size = ROUND_DOWN(_param_size, SIMD_WIDTH * span); + for (size_t t = 0; t < new_rounded_size; t += TILE) { + size_t copy_size = TILE; + if ((t + TILE) > new_rounded_size) copy_size = new_rounded_size - t; + size_t offset = copy_size + t; +#pragma omp parallel for + for (size_t i = t; i < offset; i += SIMD_WIDTH * span) { + AVX_Data grad_4[span]; + simd_load(grad_4, grads + i, half_precision); + + AVX_Data momentum_4[span]; + simd_load(momentum_4, grads + i, false); + + AVX_Data variance_4[span]; + simd_load(variance_4, _exp_avg_sq + i, false); + + AVX_Data param_4[span]; + simd_load(param_4, _params + i, half_precision); + + if (_weight_decay > 0) { simd_fma(grad_4, param_4, weight_decay4, grad_4); } + + simd_fma(variance_4, grad_4, grad_4, variance_4); + simd_sqrt(grad_4, variance_4); + simd_add(grad_4, grad_4, eps_4); + simd_div(grad_4, momentum_4, grad_4); + simd_fma(param_4, grad_4, step_size_4, param_4); + + simd_store(_params + i, param_4, half_precision); + simd_store(_exp_avg_sq + i, variance_4, false); + } + } + *rounded_size = new_rounded_size; +} +#endif diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/includes/type_shim.h b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/includes/type_shim.h new file mode 100644 index 0000000000000000000000000000000000000000..fa41757c895b53c3ff6e98050b14e0602374e167 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/includes/type_shim.h @@ -0,0 +1,155 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* Taken from NVIDIA/apex commit 855808f3fc268e9715d613f3c2e56469d8c986d8 */ +#include +/* #include */ +#include + +// Forward/backward compatibility hack around +// https://github.com/pytorch/pytorch/commit/3aeb78079bcd68282fe9117088e138b77318e288 +// pending more future-proof guidance from upstream. +// struct TypeShim +// { +// const at::Type& payload; +// TypeShim(const at::Type& type) : payload(type) {} +// // Enable trivial conversion to a const at::Type& for pre-3aeb78 +// operator const at::Type&(){ return payload; }; +// // Enable dispatch switch statements to take *this directly for post-3aeb78 +// //operator at::ScalarType(){ return payload.; }; +// }; + +#define DISPATCH_FLOAT_AND_HALF(TYPE, LEVEL, NAME, ...) \ + switch (TYPE) { \ + case at::ScalarType::Float: { \ + using scalar_t_##LEVEL = float; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Half: { \ + using scalar_t_##LEVEL = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: { \ + using scalar_t_##LEVEL = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \ + } + +#define DISPATCH_DOUBLE_FLOAT_AND_HALF(TYPE, LEVEL, NAME, ...) \ + switch (TYPE) { \ + case at::ScalarType::Double: { \ + using scalar_t_##LEVEL = double; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Float: { \ + using scalar_t_##LEVEL = float; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Half: { \ + using scalar_t_##LEVEL = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: { \ + using scalar_t_##LEVEL = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \ + } + +#define DISPATCH_DOUBLE_AND_FLOAT(TYPE, LEVEL, NAME, ...) \ + switch (TYPE) { \ + case at::ScalarType::Double: { \ + using scalar_t_##LEVEL = double; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Float: { \ + using scalar_t_##LEVEL = float; \ + __VA_ARGS__; \ + break; \ + } \ + default: AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \ + } + +template +__inline__ __attribute__((always_inline)) T reduce_block_into_lanes( + T* x, + T val, + int lanes = 1, + bool share_result = false) // lanes is intended to be <= 32. +{ + auto item_ct1 = sycl::ext::oneapi::experimental::this_nd_item<3>(); + int tid = item_ct1.get_local_id(2) + item_ct1.get_local_id(1) * item_ct1.get_local_range(2); + int blockSize = item_ct1.get_local_range(2) * + item_ct1.get_local_range(1); // blockSize is intended to be a multiple of 32. + + if (blockSize >= 64) { + x[tid] = val; + /* + DPCT1118:1: SYCL group functions and algorithms must be encountered in converged control + flow. You may need to adjust the code. + */ + /* + DPCT1065:6: Consider replacing sycl::nd_item::barrier() with + sycl::nd_item::barrier(sycl::access::fence_space::local_space) for better performance if + there is no access to global memory. + */ + item_ct1.barrier(); + } + +#pragma unroll + for (int i = (blockSize >> 1); i >= 64; i >>= 1) { + if (tid < i) x[tid] = x[tid] + x[tid + i]; + /* + DPCT1118:2: SYCL group functions and algorithms must be encountered in converged control + flow. You may need to adjust the code. + */ + /* + DPCT1065:7: Consider replacing sycl::nd_item::barrier() with + sycl::nd_item::barrier(sycl::access::fence_space::local_space) for better performance if + there is no access to global memory. + */ + item_ct1.barrier(); + } + + T final; + + if (tid < 32) { + if (blockSize >= 64) + final = x[tid] + x[tid + 32]; + else + final = val; + // __SYNCWARP(); + +#pragma unroll + for (int i = 16; i >= lanes; i >>= 1) + final = final + __shfl_down_sync(0xffffffff, final, i); + } + + if (share_result) { + if (tid < lanes) x[tid] = final; // EpilogueOp + // Make sure the smem result is visible to all warps. + /* + DPCT1118:3: SYCL group functions and algorithms must be encountered in converged control + flow. You may need to adjust the code. + */ + /* + DPCT1065:8: Consider replacing sycl::nd_item::barrier() with + sycl::nd_item::barrier(sycl::access::fence_space::local_space) for better performance if + there is no access to global memory. + */ + item_ct1.barrier(); + } + + return final; +} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/__init__.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b80fe2b4ba714611b6bcb652d9e559d87c7ed6fd --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .transformer import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig +from .inference.config import DeepSpeedInferenceConfig +from ...model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference +from .inference.moe_inference import DeepSpeedMoEInferenceConfig, DeepSpeedMoEInference diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/bias_add.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/bias_add.py new file mode 100644 index 0000000000000000000000000000000000000000..253784f001aeb3431a0d60812e4d2068c0bd5455 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/bias_add.py @@ -0,0 +1,26 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Optional +import torch +from deepspeed.ops.op_builder import SpatialInferenceBuilder + +spatial_cuda_module = None + + +def nhwc_bias_add(activation: torch.Tensor, + bias: torch.Tensor, + other: Optional[torch.Tensor] = None, + other_bias: Optional[torch.Tensor] = None) -> torch.Tensor: + global spatial_cuda_module + if spatial_cuda_module is None: + spatial_cuda_module = SpatialInferenceBuilder().load() + + if other is None: + return spatial_cuda_module.nhwc_bias_add(activation, bias) + elif other_bias is None: + return spatial_cuda_module.nhwc_bias_add_add(activation, bias, other) + else: + return spatial_cuda_module.nhwc_bias_add_bias_add(activation, bias, other, other_bias) diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/config.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/config.py new file mode 100644 index 0000000000000000000000000000000000000000..d5aff4f541f7f836c241d913f12b7e4cf59189f0 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/config.py @@ -0,0 +1,131 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import json +import torch +from deepspeed.utils.types import ActivationFuncType, NormType + + +class TransformerConfig(): + + def __init__(self, hidden_size, intermediate_size, heads, num_hidden_layers): + self.layer_id = -1 + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.heads = heads + self.num_hidden_layers = num_hidden_layers + + +class DeepSpeedInferenceConfig(TransformerConfig): + """Initialize the DeepSpeed Transformer Config. + Arguments: + hidden_size: The hidden size of the transformer layer + intermediate_size: The intermediate size of the feed-forward part of transformer layer + heads: The number of heads in the self-attention of the transformer layer + num_hidden_layers: The number of transformer layers + layer_norm_eps: The epsilon value for the layer norm + local_rank: Optional: The rank of GPU running the transformer kernel, it is not required + to use if the model already set the current device, otherwise need to set it + so that the transformer kernel can work on the right device + mp_size (optional): This argument is mainly used to create the parameters on the kernel side + using model-parallel architecture. If the client model already takes care of this, there is no + need to pass this argument. + pre_layer_norm: Select between Pre-LN or Post-LN transformer architecture + stochastic_mode: Enable for high performance, please note that this flag has some level of + non-determinism and can produce different results on different runs. However, we have seen + that by enabling it, the pretraining tasks such as BERT are not affected and can obtain + a high accuracy level. On the other hand, for the downstream tasks, such as fine-tuning, we recommend + to turn it off in order to be able to reproduce the same result through the regular kernel execution. + + scale_attention: If true, both q and k are scaled by 1/sqrt(attention_heads) before attention computation. + return_tuple: if True, returns the transformer output as a tuple, otherwise returns as a tensor + bigscience_bloom: This flag is added temporarily for supporting the BLOOM-176B model architecture. + use_triton: This flag is to enable triton kernels in inference or not. + """ + + def __init__(self, + hidden_size=-1, + intermediate_size=-1, + heads=-1, + num_hidden_layers=-1, + layer_norm_eps=1e-12, + local_rank=-1, + mp_size=1, + dtype=torch.float16, + pre_layer_norm=True, + norm_type=NormType.LayerNorm, + stochastic_mode=False, + scale_attention=True, + triangular_masking=True, + local_attention=False, + window_size=256, + rotary_dim=-1, + rotate_half=False, + rotate_every_two=True, + return_tuple=True, + mlp_after_attn=True, + mlp_act_func_type=ActivationFuncType.GELU, + training_mp_size=1, + bigscience_bloom=False, + max_out_tokens=1024, + min_out_tokens=1, + enable_qkv_quantization=False, + use_mup=False, + scale_attn_by_inverse_layer_idx=False, + return_single_tuple=False, + set_empty_params=False, + transposed_mode=False, + use_triton=False, + triton_autotune=False, + num_kv=-1, + rope_theta=10000): + super(DeepSpeedInferenceConfig, + self).__init__(hidden_size, (intermediate_size if intermediate_size > 0 else 4 * hidden_size), heads, + num_hidden_layers) + self.dtype = dtype + self.pre_layer_norm = pre_layer_norm + self.norm_type = norm_type + self.local_rank = local_rank + self.stochastic_mode = stochastic_mode + self.epsilon = layer_norm_eps + self.mp_size = mp_size + self.scale_attention = scale_attention + self.triangular_masking = triangular_masking + self.local_attention = local_attention + self.window_size = window_size + self.rotary_dim = rotary_dim + self.rotate_half = rotate_half + self.rotate_every_two = rotate_every_two + self.return_tuple = return_tuple + self.mlp_after_attn = mlp_after_attn + self.mlp_act_func_type = mlp_act_func_type + self.specialized_mode = False + self.training_mp_size = training_mp_size + self.bigscience_bloom = bigscience_bloom + self.max_out_tokens = max_out_tokens + self.min_out_tokens = min_out_tokens + self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx + self.enable_qkv_quantization = enable_qkv_quantization + self.use_mup = use_mup + self.return_single_tuple = return_single_tuple + self.set_empty_params = set_empty_params + self.transposed_mode = transposed_mode + self.use_triton = use_triton + self.triton_autotune = triton_autotune + self.num_kv = num_kv + self.rope_theta = rope_theta + + @classmethod + def from_dict(cls, json_object): + config = DeepSpeedInferenceConfig() + for key, value in json_object.items(): + config.__dict__[key] = value + return config + + @classmethod + def from_json_file(cls, json_file): + with open(json_file, "r", encoding='utf-8') as reader: + text = reader.read() + return cls.from_dict(json.loads(text)) diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/diffusers_attention.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/diffusers_attention.py new file mode 100644 index 0000000000000000000000000000000000000000..5efc560db75e4e34c193879bdd2cf4dfe78431d6 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/diffusers_attention.py @@ -0,0 +1,196 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import math +import torch +from torch.autograd import Function +import torch.nn as nn +from packaging import version as pkg_version +from deepspeed.utils.logging import log_dist +from deepspeed.accelerator import get_accelerator +from deepspeed.ops.op_builder import InferenceBuilder + +# Cuda modules will be imported if needed +inference_module = None +minus_inf = -10000.0 +triton_flash_attn = None + + +def load_triton_flash_attn(): + global triton_flash_attn + try: + import triton + except ImportError: + raise ImportError("Please install triton 2.0+ or `pip install deepspeed[sd]`") + + if pkg_version.parse(triton.__version__) < pkg_version.parse("2.0"): + raise ImportError("Please install triton 2.0+ or `pip install deepspeed[sd]`") + + from .triton_ops import triton_flash_attn + + +class DeepSpeedDiffusersAttentionFunction(Function): + + @staticmethod + def forward(ctx, input, context, input_mask, config, attn_qkvw, attn_qw, attn_kw, attn_vw, attn_qkvb, + num_attention_heads_per_partition, norm_factor, hidden_size_per_partition, attn_ow, attn_ob, + do_out_bias, score_context_func, linear_func, triton_flash_attn_kernel, rope_theta): + + def _transpose_for_context(x): + x = x.permute(0, 2, 1, 3) + new_x_layer_shape = x.size()[:-2] + \ + (hidden_size_per_partition,) + return x.reshape(*new_x_layer_shape) + + def _transpose_for_scores(x): + attention_head_size = x.shape[-1] // num_attention_heads_per_partition + new_x_shape = x.size()[:-1] + (num_attention_heads_per_partition, attention_head_size) + x = x.reshape(*new_x_shape) + x = x.permute(0, 2, 1, 3) + return x.contiguous() + + def selfAttention_fp(input, context, input_mask): + if config.dtype in [torch.half, torch.float16] and input.dtype == torch.float32: + input = input.half() + head_size = input.shape[-1] // config.heads + do_flash_attn = (head_size <= 128) + scale = (1 / norm_factor) * (1 / norm_factor) + if do_flash_attn and context is None: + qkv_out = linear_func(input, attn_qkvw, attn_qkvb if attn_qkvb is not None else attn_qkvw, attn_qkvb + is not None, do_flash_attn, config.heads, False, rope_theta) + + context_layer = triton_flash_attn_kernel(qkv_out[0], qkv_out[1], qkv_out[2], scale, + input.shape[-2] % 128 == 0) + context_layer = _transpose_for_context(context_layer[:, :, :, :head_size]) + + else: + do_flash_attn = False + if context is not None: + query = torch.matmul(input, attn_qw) + key = torch.matmul(context, attn_kw) + value = torch.matmul(context, attn_vw) + else: + qkv = torch.matmul(input, attn_qkvw) + query, key, value = qkv.chunk(3, dim=-1) + query = query.contiguous() + key = key.contiguous() + value = value.contiguous() + query, key, value = inference_module.pad_transform_fp16(query, key, value, config.heads, do_flash_attn) + attention_scores = (torch.matmul(query, key.transpose(-1, -2)) * scale).softmax(dim=-1) + context_layer = _transpose_for_context(torch.matmul(attention_scores, value)) + + output = linear_func(context_layer, attn_ow, attn_ob, do_out_bias, False, config.heads, False, rope_theta) + return output + + output = selfAttention_fp(input, context, input_mask) + + return output + + @staticmethod + def backward(ctx, grad_output, grad_output1, grad_output2, grad_output3): + raise RuntimeError('You are running with DeepSpeed Inference mode. \ + Please switch to Training mode for running backward!') + + +class DeepSpeedDiffusersAttention(nn.Module): + """Initialize the DeepSpeed Transformer Layer. + Arguments: + layer_id: The layer index starting from 0, e.g. if model has 24 transformer layers, + layer_id will be 0,1,2...23 when each layer object is instantiated + config: An object of DeepSpeedInferenceConfig + """ + layer_id = 0 + + def __init__( + self, + config, + ): + super(DeepSpeedDiffusersAttention, self).__init__() + + self.config = config + self.config.layer_id = DeepSpeedDiffusersAttention.layer_id + DeepSpeedDiffusersAttention.layer_id += 1 + device = get_accelerator().current_device_name() if config.bigscience_bloom else 'cpu' + qkv_size_per_partition = (self.config.hidden_size // self.config.mp_size) * 3 + + data_type = self.config.dtype + data_type_fp = torch.half if self.config.dtype == torch.int8 else self.config.dtype + global inference_module + if inference_module is None: + builder = InferenceBuilder() + inference_module = builder.load() + + if DeepSpeedDiffusersAttention.layer_id == 1: + log_dist(f"DeepSpeed-Attention config: {self.config.__dict__}", [0]) + + self.attn_qkvw = nn.Parameter(torch.empty(self.config.hidden_size, + qkv_size_per_partition, + dtype=data_type, + device=device), + requires_grad=False) + self.attn_kw = nn.Parameter(torch.empty(self.config.hidden_size, + self.config.hidden_size, + dtype=data_type, + device=device), + requires_grad=False) + self.attn_vw = nn.Parameter(torch.empty(self.config.hidden_size, + self.config.hidden_size, + dtype=data_type, + device=device), + requires_grad=False) + self.attn_qw = nn.Parameter(torch.empty(self.config.hidden_size, + self.config.hidden_size, + dtype=data_type, + device=device), + requires_grad=False) + self.attn_qkvb = nn.Parameter(torch.empty(qkv_size_per_partition, dtype=data_type_fp, device=device), + requires_grad=False) + out_size_per_partition = self.config.hidden_size // self.config.mp_size + self.attn_ow = nn.Parameter(torch.empty(out_size_per_partition, + self.config.hidden_size, + dtype=data_type, + device=device), + requires_grad=False) + + self.attn_ob = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device), + requires_grad=False) + self.do_out_bias = True + + if triton_flash_attn is None: + load_triton_flash_attn() + self.triton_flash_attn_kernel = triton_flash_attn() + self.num_attention_heads_per_partition = self.config.heads // self.config.mp_size + self.hidden_size_per_partition = self.config.hidden_size // self.config.mp_size + self.hidden_size_per_attention_head = self.config.hidden_size // self.config.heads + + self.norm_factor = math.sqrt(math.sqrt(self.config.hidden_size // self.config.heads)) + + if self.config.scale_attn_by_inverse_layer_idx is True: + self.norm_factor *= math.sqrt(self.config.layer_id + 1) + # https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/models/gpt2/modeling_gpt2.py#L191 + + if self.config.dtype in [torch.float16, torch.int8]: + self.score_context_func = inference_module.softmax_context_fp16 + self.linear_func = inference_module.linear_layer_fp16 + self.allocate_workspace = inference_module.allocate_workspace_fp16 + else: + self.score_context_func = inference_module.softmax_context_fp32 + self.linear_func = inference_module.linear_layer_fp32 + self.allocate_workspace = inference_module.allocate_workspace_fp32 + + def forward(self, input, context=None, input_mask=None): + if self.config.layer_id == 0: + self.allocate_workspace(self.config.hidden_size, self.config.heads, + input.size()[1], + input.size()[0], DeepSpeedDiffusersAttention.layer_id, self.config.mp_size, False, + 0, self.config.max_out_tokens, self.config.min_out_tokens) + output = DeepSpeedDiffusersAttentionFunction.apply(input, context, input_mask, self.config, self.attn_qkvw, + self.attn_qw, self.attn_kw, self.attn_vw, self.attn_qkvb, + self.num_attention_heads_per_partition, self.norm_factor, + self.hidden_size_per_partition, self.attn_ow, self.attn_ob, + self.do_out_bias, self.score_context_func, self.linear_func, + self.triton_flash_attn_kernel, self.config.rope_theta) + + return output diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/ds_attention.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/ds_attention.py new file mode 100644 index 0000000000000000000000000000000000000000..eb6ce2f75c6940164a429c78b3d763c11e564ab9 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/ds_attention.py @@ -0,0 +1,286 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import math +import torch +import torch.nn as nn +from deepspeed import comm as dist +from deepspeed.accelerator import get_accelerator +from .op_binding import LinearOp, VectorMatMulOp, SoftmaxContextOp, QKVGemmOp, SoftmaxOp + +minus_inf = -10000.0 + + +class DeepSpeedSelfAttention(nn.Module): + num_layers = 0 + _qkv_buffers = [] + + def __init__(self, config, mp_group=None, q_scales=None, q_groups=1, merge_count=1): + super(DeepSpeedSelfAttention, self).__init__() + self.config = config + data_type = self.config.dtype + data_type_fp = torch.half if self.config.dtype == torch.int8 else self.config.dtype + self.config.layer_id = DeepSpeedSelfAttention.num_layers + DeepSpeedSelfAttention.num_layers = DeepSpeedSelfAttention.num_layers + 1 + device = get_accelerator().current_device_name() #if config.bigscience_bloom else 'cpu' + if self.config.set_empty_params: + self.attn_qw = None + self.attn_qb = None + self.attn_kw = None + self.attn_kb = None + self.attn_vw = None + self.attn_vb = None + self.attn_qkvw = None + self.attn_qkvb = None + self.attn_ow = None + self.attn_ob = None + else: + qkv_size_per_partition = (self.config.hidden_size // self.config.mp_size) * 3 if config.num_kv < 0 else \ + ((self.config.heads + self.config.num_kv * 2) // self.config.mp_size) * (self.config.hidden_size // self.config.heads) + self.attn_qkvw = nn.Parameter(torch.empty(self.config.hidden_size, + qkv_size_per_partition, + dtype=data_type, + device=device), + requires_grad=False) + self.attn_qkvb = nn.Parameter(torch.empty(qkv_size_per_partition, dtype=data_type_fp, device=device), + requires_grad=False) + out_size_per_partition = self.config.hidden_size // self.config.mp_size + self.attn_ow = nn.Parameter(torch.empty(out_size_per_partition, + self.config.hidden_size, + dtype=data_type, + device=device), + requires_grad=False) + + self.attn_ob = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device), + requires_grad=False) + + self.num_attention_heads_per_partition = self.config.heads // self.config.mp_size + self.num_kv_partition = self.config.num_kv // self.config.mp_size + self.hidden_size_per_partition = self.config.hidden_size // self.config.mp_size + self.hidden_size_per_attention_head = self.config.hidden_size // self.config.heads + + self.mp_group = mp_group + + # used for quantization + self.q_scales = q_scales + self.q_groups = q_groups + self.merge_count = int(math.log2(merge_count)) + + self.norm_factor = math.sqrt(self.config.hidden_size // self.config.heads) + if not config.use_mup: + self.norm_factor = math.sqrt(self.norm_factor) + + if self.config.scale_attn_by_inverse_layer_idx is True: + self.norm_factor *= math.sqrt(self.config.layer_id + 1) + # https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/models/gpt2/modeling_gpt2.py#L191 + + self.qkv_func = QKVGemmOp(config) + self.score_context_func = SoftmaxContextOp(config) + self.linear_func = LinearOp(config) + self.vector_matmul_func = VectorMatMulOp(config) + if len(DeepSpeedSelfAttention._qkv_buffers) == 0: + DeepSpeedSelfAttention._qkv_buffers = [ + torch.empty(self.hidden_size_per_partition * 3, + self.config.hidden_size, + dtype=data_type_fp, + device=device), + torch.empty(self.hidden_size_per_partition * 3, dtype=data_type_fp, device=device) + ] + + def compute_attention(self, qkv_out, input_mask, layer_past, alibi): + if isinstance(qkv_out, list) or isinstance(qkv_out, tuple): + qkv_out = qkv_out[0] + + no_masking = input_mask is None + + if no_masking: + input_mask = torch.empty(1) + + attn_key_value = self.score_context_func( + query_key_value=qkv_out, + attn_mask=((1 - input_mask).to(qkv_out.dtype) * + minus_inf) if input_mask.dtype == torch.int64 else input_mask, + heads=self.num_attention_heads_per_partition, + num_kv=self.num_kv_partition, + norm_factor=(1 / self.norm_factor if self.config.scale_attention else 1.0), + no_masking=no_masking, + layer_id=self.config.layer_id, + num_layers=DeepSpeedSelfAttention.num_layers, + alibi=alibi) + + context_layer, key_layer, value_layer = attn_key_value + return context_layer, key_layer, value_layer + + def _merge_qkv(self): + qvkw = DeepSpeedSelfAttention._qkv_buffers[0] + qvkw[:self.hidden_size_per_partition, :] = self.attn_qw # type: ignore + qvkw[self.hidden_size_per_partition:2 * self.hidden_size_per_partition, :] = self.attn_kw # type: ignore + qvkw[2 * self.hidden_size_per_partition:, :] = self.attn_vw # type: ignore + if self.attn_qb is not None: + qvkb = DeepSpeedSelfAttention._qkv_buffers[1] + qvkb[:self.hidden_size_per_partition] = self.attn_qb + qvkb[self.hidden_size_per_partition:2 * self.hidden_size_per_partition] = self.attn_kb # type: ignore + qvkb[2 * self.hidden_size_per_partition:] = self.attn_vb # type: ignore + return DeepSpeedSelfAttention._qkv_buffers + + def forward(self, + input, + input_mask, + head_mask=None, + layer_past=None, + get_present=False, + encoder_hidden_states=None, + encoder_attention_mask=None, + output_attentions=False, + norm_w=None, + norm_b=None, + alibi=None): + if self.attn_qkvw is None: + self._attn_qkvw, self._attn_qkvb = self._merge_qkv() + else: + self._attn_qkvw = self.attn_qkvw + self._attn_qkvb = self.attn_qkvb + if not self.config.pre_layer_norm: + qkv_out = self.linear_func(input=input, + weight=self._attn_qkvw, + bias=self._attn_qkvb, + add_bias=self.attn_qkvb is not None, + do_flash_attn=False, + num_heads=self.num_attention_heads_per_partition, + num_layers=DeepSpeedSelfAttention.num_layers) + else: + qkv_out = self.qkv_func(input=input, + weight=self._attn_qkvw, + bias=self._attn_qkvb, + gamma=norm_w, + beta=norm_b) + + context_layer, key_layer, value_layer = self.compute_attention(qkv_out=qkv_out, + input_mask=input_mask, + layer_past=layer_past, + alibi=alibi) + + output = self.vector_matmul_func(input=context_layer, weight=self.attn_ow) + inp_norm = qkv_out[-1] + + if self.config.mlp_after_attn and self.mp_group is not None and dist.get_world_size(group=self.mp_group) > 1: + dist.all_reduce(output, group=self.mp_group) + return (output, key_layer, value_layer, context_layer, inp_norm) + + +class BloomSelfAttention(DeepSpeedSelfAttention): + + def __init__(self, *args, **kwargs): + super(BloomSelfAttention, self).__init__(*args, **kwargs) + self.softmax_func = SoftmaxOp(self.config) + + ########### This part is taken/modified form the HF modeling_bloom.py ################ + # Reference: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py + + def _transpose_for_context(self, x): + x = x.permute(0, 2, 1, 3).contiguous() + new_x_layer_shape = x.size()[:-2] + \ + (self.hidden_size_per_partition,) + return x.view(*new_x_layer_shape).contiguous() + + def _split_tensor_along_last_dim(self, tensor, num_partitions, contiguous_split_chunks=True): + """Split a tensor along its last dimension. + + Args: + tensor: ([`torch.tensor`], *required*): + input tensor to split + num_partitions ([`int`], *required*): + number of partitions to split the tensor + contiguous_split_chunks ([`bool`], *optional*, default=`False`):: + If True, make each chunk contiguous in memory. + """ + # Get the size and dimension. + last_dim = tensor.dim() - 1 + numerator, denominator = tensor.size()[last_dim], num_partitions + if not (numerator % denominator == 0): + raise ValueError(f"{numerator} is not divisible by {denominator}") + last_dim_size = numerator // denominator + # Split. + tensor_list = torch.split(tensor, last_dim_size, dim=last_dim) + # Note: torch.split does not create contiguous tensors by default. + if contiguous_split_chunks: + return tuple(chunk.contiguous() for chunk in tensor_list) + + return tensor_list + + def compute_attention(self, qkv_out, input_mask, layer_past, alibi): + if isinstance(qkv_out, list) or isinstance(qkv_out, tuple): + qkv_out = qkv_out[0] + + no_masking = input_mask is None + + if no_masking: + input_mask = torch.empty(1) + + mixed_x_layer = qkv_out + alibi = alibi.to(get_accelerator().current_device_name()) + head_dim = self.hidden_size_per_partition // self.num_attention_heads_per_partition + new_tensor_shape = mixed_x_layer.size()[:-1] + (self.num_attention_heads_per_partition, 3 * head_dim) + mixed_x_layer = mixed_x_layer.view(*new_tensor_shape) + + query_layer, key_layer, value_layer = self._split_tensor_along_last_dim(mixed_x_layer, 3) + + # [batch_size, head_dim, q_length, k_length] + output_size = (query_layer.size(0), query_layer.size(2), query_layer.size(1), key_layer.size(1)) + # [batch_size, q_length, num_heads, head_dim] -> [q_length, batch_size * num_heads, head_dim] + query_layer = query_layer.transpose(1, 2).reshape(output_size[0] * output_size[1], output_size[2], -1) + # [batch_size, k_length, num_heads, head_dim] -> [k_length, batch_size * num_heads, head_dim] + key_layer = key_layer.transpose(1, 2).reshape(output_size[0] * output_size[1], output_size[3], + -1).transpose(-1, -2) + value_layer = value_layer.transpose(1, 2).reshape(output_size[0] * output_size[1], output_size[3], -1) + if layer_past is not None: + past_key, past_value = layer_past + # concatenate along seq_length dimension -> [batch_size, qk_length, num_heads, head_dim] + key_layer = torch.cat((past_key.type_as(key_layer), key_layer), dim=-1) + value_layer = torch.cat((past_value.type_as(value_layer), value_layer), dim=-2) + + presents = (key_layer, value_layer) + # Raw attention scores. [batch_size * num_heads, q_length, k_length] + matmul_result = torch.matmul(query_layer, key_layer) + # change view to [batch_size, num_heads, q_length, k_length] + attention_scores = matmul_result.view(output_size[0], output_size[1], output_size[2], -1) + + offset = dist.get_rank() * self.num_attention_heads_per_partition if dist.is_initialized() else 0 + target_dtype = torch.float16 if self.config.dtype == torch.int8 else self.config.dtype + + # When using the hybrid engine with BLOOM, input_mask needs to be converted from torch.bool -> torch.int64 + if input_mask.dtype == torch.bool: + input_mask = input_mask.long() + + attention_probs = self.softmax_func(attn_scores=attention_scores, + attn_mask=((1 - input_mask).to(target_dtype) * minus_inf), + alibi=alibi, + triangular=(self.config.triangular_masking + and (attention_scores.shape[-2] > 1)), + recompute=False, + local_attention=False, + window_size=1, + async_op=False, + layer_scale=1 / (self.norm_factor * self.norm_factor), + head_offset=offset) + + # change view [batch_size x num_heads, q_length, k_length] + attention_probs_reshaped = attention_probs.view(*matmul_result.shape) + + # matmul: [batch_size * num_heads, q_length, head_dim] + context_layer = torch.bmm(attention_probs_reshaped, value_layer) + + # change view [batch_size, num_heads, q_length, head_dim] + context_layer = context_layer.view( + context_layer.size(0) // self.num_attention_heads_per_partition, self.num_attention_heads_per_partition, + context_layer.size(1), context_layer.shape[-1]) + + context_layer = self._transpose_for_context(context_layer) + key_layer = presents[0] + value_layer = presents[1] + + return context_layer, key_layer, value_layer + + ###################### End of HF modeling_bloom addition ######################## diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/ds_mlp.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/ds_mlp.py new file mode 100644 index 0000000000000000000000000000000000000000..36de06db920fb51adf31809e02d74a797494938c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/ds_mlp.py @@ -0,0 +1,124 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import math +import torch +import torch.nn as nn +from deepspeed import comm as dist +from deepspeed.utils.types import GATED_ACTIVATION_TYPES +from deepspeed.accelerator import get_accelerator +from .op_binding import MLPGemmOp, VectorMatMulOp, GELUGemmOp, ResidualAddOp + + +class DeepSpeedMLP(nn.Module): + _inter_w_buffers = [] + + def __init__(self, config, mp_group=None, q_scales=None, q_groups=1, merge_count=1, mlp_extra_grouping=False): + super(DeepSpeedMLP, self).__init__() + + self.config = config + + data_type = torch.int8 if self.config.dtype == torch.int8 else self.config.dtype + data_type_fp = torch.half if self.config.dtype == torch.int8 else self.config.dtype + device = get_accelerator().current_device_name() + + proj_factor = 2 if self.config.mlp_act_func_type in GATED_ACTIVATION_TYPES else 1 + self.config.intermediate_size = self.config.intermediate_size if self.config.intermediate_size > 0 else 4 * self.config.hidden_size + self.intm_w_sz_per_partition = self.config.intermediate_size * proj_factor // self.config.mp_size + self.intm_o_sz_per_partition = self.config.intermediate_size // self.config.mp_size + + if self.config.set_empty_params: + self.attn_nw = None + self.attn_nb = None + self.inter_w = None + self.inter_b = None + self.inter_up_w = None + self.inter_up_b = None + self.inter_gate_w = None + self.inter_gate_b = None + self.output_w = None + self.output_b = None + else: + self.attn_nw = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device), + requires_grad=False) + self.attn_nb = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device), + requires_grad=False) + + self.inter_w = nn.Parameter(torch.empty(self.config.hidden_size, + self.intm_w_sz_per_partition, + dtype=data_type, + device=device), + requires_grad=False) + self.inter_b = nn.Parameter(torch.empty(self.intm_w_sz_per_partition, dtype=data_type_fp, device=device), + requires_grad=False) + self.output_w = nn.Parameter(torch.empty(self.intm_o_sz_per_partition, + self.config.hidden_size, + dtype=data_type, + device=device), + requires_grad=False) + self.output_b = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device), + requires_grad=False) + + # used for quantization + self.q_scales = q_scales + self.q_groups = q_groups * 2 if mlp_extra_grouping else q_groups + self.merge_count = int(math.log2(merge_count)) + self.mp_group = mp_group + + self.mlp_gemm_func = MLPGemmOp(config) + self.vector_matmul_func = VectorMatMulOp(config) + self.fused_gemm_gelu = GELUGemmOp(config) + self.residual_add_func = ResidualAddOp(config) + + if len(DeepSpeedMLP._inter_w_buffers) == 0: + DeepSpeedMLP._inter_w_buffers = [ + torch.empty(self.intm_w_sz_per_partition, self.config.hidden_size, dtype=data_type, device=device), + torch.empty(self.intm_w_sz_per_partition, dtype=data_type_fp, device=device) + ] + + def _merge_inter_w(self): + inter_w = DeepSpeedMLP._inter_w_buffers[0] + inter_w[:self.intm_w_sz_per_partition // 2, :] = self.inter_up_w # type: ignore + inter_w[self.intm_w_sz_per_partition // 2:, :] = self.inter_gate_w # type: ignore + if self.inter_up_b is not None: + inter_b = DeepSpeedMLP._inter_w_buffers[1] + inter_b[:self.intm_w_sz_per_partition // 2] = self.inter_up_b # type: ignore + inter_b[self.intm_w_sz_per_partition // 2:] = self.inter_gate_b # type: ignore + return DeepSpeedMLP._inter_w_buffers + + def forward(self, input, residual, residual_norm, bias): + if self.inter_w is None: + self._inter_w, self._inter_b = self._merge_inter_w() + else: + self._inter_w = self.inter_w + self._inter_b = self.inter_b + + residual_add = None + if self.attn_nw is None: + output = self.fused_gemm_gelu(input=residual_norm, + weight=self._inter_w, + bias=self._inter_b, + weight_out=self.output_w) + else: + output, residual_add = self.mlp_gemm_func(input=input, + residual=residual, + weight_interm=self._inter_w, + weight_out=self.output_w, + input_bias=bias, + bias=self._inter_b, + gamma=self.attn_nw, + beta=self.attn_nb) + + residual = self.residual_add_func(hidden_state=output, + residual=residual, + add_bias=bias is not None, + attention_output=input, + attention_bias=bias if bias is not None else self.output_b, + final_bias=self.output_b, + residual_add=residual_add) + if self.mp_group is not None and dist.get_world_size(group=self.mp_group) > 1: + dist.all_reduce(residual, group=self.mp_group) + + return residual diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/moe_inference.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/moe_inference.py new file mode 100644 index 0000000000000000000000000000000000000000..90bfcae81bf268e4f571f7b909db45437c031c96 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/moe_inference.py @@ -0,0 +1,365 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import json +import math +import torch +from torch.autograd import Function +# accelerator modules will be imported if needed +inference_module = None +specialized_mode = None +import torch.nn as nn +from .ds_attention import DeepSpeedSelfAttention +from .config import DeepSpeedInferenceConfig +from ....moe.sharded_moe import TopKGate +from deepspeed import comm as dist +from deepspeed.accelerator import get_accelerator +from deepspeed.ops.op_builder import InferenceBuilder + + +class DeepSpeedMoEInferenceConfig(DeepSpeedInferenceConfig): + """Initialize the DeepSpeed Transformer Config. + Arguments: + hidden_size: The hidden size of the transformer layer + intermediate_size: The intermediate size of the feed-forward part of transformer layer + heads: The number of heads in the self-attention of the transformer layer + num_hidden_layers: The number of transformer layers + layer_norm_eps: The epsilon value for the layer norm + local_rank: Optional: The rank of GPU running the transformer kernel, it is not required + to use if the model already set the current device, otherwise need to set it + so that the transformer kernel can work on the right device + mp_size (optional): This argument is mainly used to create the parameters on the kernel side + using model-parallel architecture. If the client model already takes care of this, there is no + need to pass this argument. + fp16: Enable half-precision computation + bf16: Enable bf16 floating point computation + pre_layer_norm: Select between Pre-LN or Post-LN transformer architecture + stochastic_mode: Enable for high performance, please note that this flag has some level of + non-determinism and can produce different results on different runs. However, we have seen + that by enabling it, the pretraining tasks such as BERT are not affected and can obtain + a high accuracy level. On the other hand, for the downstream tasks, such as fine-tuning, we recommend + to turn it off in order to be able to reproduce the same result through the regular kernel execution. + + scale_attention: If true, both q and k are scaled by 1/sqrt(attention_heads) before attention computation. + return_tuple: if True, returns the transformer output as a tuple, otherwise returns as a tensor + """ + + def __init__(self, + hidden_size=-1, + intermediate_size=-1, + heads=-1, + num_hidden_layers=-1, + layer_norm_eps=1e-12, + local_rank=-1, + mp_size=1, + fp16=False, + bf16=False, + q_int8=False, + pre_layer_norm=True, + stochastic_mode=False, + scale_attention=True, + triangular_masking=True, + local_attention=False, + window_size=256, + return_tuple=True, + moe_experts=1, + global_experts=1, + k=1, + capacity_factor=1., + eval_capacity_factor=1., + min_capacity=1, + noisy_gate_policy=None, + drop_tokens=True, + use_rts=False, + mlp_type='standard', + scale_attn_by_inverse_layer_idx=False): + super(DeepSpeedMoEInferenceConfig, + self).__init__(hidden_size, (intermediate_size if intermediate_size > 0 else 4 * hidden_size), heads, + num_hidden_layers, layer_norm_eps, local_rank, mp_size, fp16, bf16, q_int8, + pre_layer_norm, stochastic_mode, scale_attention, triangular_masking, local_attention, + window_size, return_tuple) + self.moe_experts = moe_experts + self.k = k + self.capacity_factor = capacity_factor + self.eval_capacity_factor = eval_capacity_factor + self.min_capacity = min_capacity + self.noisy_gate_policy = noisy_gate_policy + self.drop_tokens = drop_tokens + self.use_rts = use_rts + self.global_experts = global_experts + self.mlp_type = mlp_type + self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx + + @classmethod + def from_dict(cls, json_object): + config = DeepSpeedInferenceConfig() + for key, value in json_object.items(): + config.__dict__[key] = value + return config + + @classmethod + def from_json_file(cls, json_file): + with open(json_file, "r", encoding='utf-8') as reader: + text = reader.read() + return cls.from_dict(json.loads(text)) + + +class DeepSpeedMLPFunction(Function): + + @staticmethod + def forward(ctx, input, inter_w, inter_b, config, output_b, output_w, q_scales, q_groups, merge_count, mp_group, + async_op): + if config.q_int8: + intermediate = inference_module.fused_gemm_gelu_int8(input, inter_w, inter_b, config.epsilon, q_scales[2], + (q_groups * (2**merge_count)), config.pre_layer_norm) + output = inference_module.vector_matmul_int8(intermediate, output_w, q_scales[3], q_groups, (merge_count)) + else: + mlp_gemm_func = inference_module.fused_gemm_gelu_fp16 if config.fp16 else \ + inference_module.fused_gemm_gelu_fp32 + + output = mlp_gemm_func(input, inter_w, inter_b, output_w, config.epsilon, config.pre_layer_norm, async_op) + if mp_group is not None and dist.get_world_size(group=mp_group) > 1: + dist.all_reduce(output, group=mp_group, async_op=async_op) + + return output + output_b + + @staticmethod + def backward(ctx, grad_output): + raise RuntimeError('You are running with DeepSpeed Inference mode. \ + Please switch to Training mode for running backward!') + + +class DeepSpeedMoEMLP(nn.Module): + + def __init__(self, config, q_scales=None, q_groups=1, merge_count=1, mlp_extra_grouping=False, mp_group=None): + super(DeepSpeedMoEMLP, self).__init__() + + self.config = config + self.attn_nw = nn.Parameter(torch.Tensor(self.config.hidden_size)) + self.attn_nb = nn.Parameter(torch.Tensor(self.config.hidden_size)) + interm_size = self.config.intermediate_size // (1 if mp_group is None else dist.get_world_size(group=mp_group)) + self.inter_w = nn.Parameter(torch.Tensor(self.config.hidden_size, interm_size)) + self.inter_b = nn.Parameter(torch.Tensor(interm_size)) + self.output_w = nn.Parameter(torch.Tensor((interm_size), self.config.hidden_size)) + self.output_b = nn.Parameter(torch.Tensor(self.config.hidden_size)) + + # used for quantization + self.q_scales = q_scales + self.q_groups = q_groups * 2 if mlp_extra_grouping else q_groups + self.merge_count = int(math.log2(merge_count)) + self.mp_group = mp_group + + def forward(self, input, async_op=False): + return DeepSpeedMLPFunction.apply(input, self.inter_w, self.inter_b, self.config, self.output_b, self.output_w, + self.q_scales, self.q_groups, self.merge_count, self.mp_group, async_op) + + +class DeepSpeedMoEInference(nn.Module): + """Initialize the DeepSpeed MoE Transformer Layer. + Arguments: + layer_id: The layer index starting from 0, e.g. if model has 24 transformer layers, + layer_id will be 0,1,2...23 when each layer object is instantiated + config: An object of DeepSpeedInferenceConfig + mp_group: Model parallelism group initialized on the modeling side. + quantize_scales: This argument groups all the layers' scales used for quantization + quantize_groups: Number of groups used for quantizing the model + merge_count: Shows the number of model-parallel checkpoints merged before running inference. + We use this argument to control the quantization scale for the model parameters if a bigger + quantize-grouping than 1 is used. + mlp_extra_grouping: This flag is used to show a 2x higher number of groups used for the MLP part + of a Transformer layer. We use this feature for quantization to reduce the convergence impact + for specific downstream tasks. + """ + layer_id = 0 + + def __init__(self, + config, + mp_group=None, + ep_group=None, + expert_mp_group=None, + quantize_scales=None, + quantize_groups=1, + merge_count=1, + mlp_extra_grouping=False): + super(DeepSpeedMoEInference, self).__init__() + + self.config = config + self.config.layer_id = DeepSpeedMoEInference.layer_id + global inference_module + global specialized_mode + if inference_module is None: + specialized_mode = False + # InferenceSpecializedBuilder is not among DeepSpeed provided builder yet, so we infer by builder name string + builder = get_accelerator().create_op_builder("InferenceSpecializedBuilder") + if builder is not None and builder.is_compatible(): + inference_module = builder.load() + specialized_mode = True + else: + inference_module = InferenceBuilder().load() + self.config.specialized_mode = specialized_mode + assert self.config.dtype != torch.bfloat16, "DeepSpeed MoE Transformer Inference not yet tested for bfloat support" + + DeepSpeedMoEInference.layer_id += 1 + self.attention = DeepSpeedSelfAttention(self.config, mp_group, quantize_scales, quantize_groups, merge_count) + self.attn_nw = nn.Parameter(torch.Tensor(self.config.hidden_size)) + self.attn_nb = nn.Parameter(torch.Tensor(self.config.hidden_size)) + + self.norm_w = nn.Parameter(torch.Tensor(self.config.hidden_size)) + self.norm_b = nn.Parameter(torch.Tensor(self.config.hidden_size)) + + if config.mlp_type == 'residual': + self.res_mlp = DeepSpeedMoEMLP(config, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping, + mp_group) + self.res_coef = nn.Parameter(torch.Tensor(self.config.hidden_size, 2)) + self.coef_func = inference_module.softmax_fp16 if self.config.dtype in [torch.float16, torch.int8] else \ + inference_module.softmax_fp32 + self.vector_matmul_func = inference_module.vector_matmul_fp16 if self.config.dtype == torch.float16 else \ + inference_module.vector_matmul_fp32 + + config.mp_size = 1 + self.mlp = nn.ModuleList( + DeepSpeedMoEMLP(config, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping, expert_mp_group) + for i in range(self.config.moe_experts)) + + self.moe_gate = TopKGate(self.config.hidden_size, self.config.global_experts, self.config.k, + self.config.capacity_factor, self.config.eval_capacity_factor, + self.config.min_capacity, self.config.noisy_gate_policy, self.config.drop_tokens, + self.config.use_rts) + + self.ep_group = ep_group + self.mp_group = mp_group + self.expert_mp_group = expert_mp_group + + print("DeepSpeed MoE Transformer Inference config is ", self.config.__dict__) + + self.bias_residual_func = inference_module.bias_residual_fp16 if self.config.dtype in [torch.float16, torch.int8] else \ + inference_module.bias_residual_fp32 + self.ds_layernorm = inference_module.layer_norm_fp16 if self.config.dtype in [torch.float16, torch.int8] else \ + inference_module.layer_norm_fp32 + self.einsum_sec_sm_ecm = inference_module.einsum_sec_sm_ecm_fp16 if self.config.dtype in [torch.float16, torch.int8] else \ + inference_module.einsum_sec_sm_ecm_fp32 + + def res_coef_func(self, inp, async_op): + inp = self.vector_matmul_func(inp, self.res_coef, async_op) + return self.coef_func(inp, torch.empty(1), False, False, False, 256, async_op) + + def moe_gate_einsum(self, attention_output): + _, combined_weights, dispatch_mask, _ = self.moe_gate( + attention_output.view(-1, self.config.hidden_size), + None, + ) + dispatched_attention = self.einsum_sec_sm_ecm(dispatch_mask.type_as(attention_output), + attention_output.view(-1, self.config.hidden_size)) + return dispatched_attention, combined_weights + + def expert_exec(self, dispatched_input): + dispatched_input = dispatched_input.reshape(self.config.global_experts // self.config.moe_experts, + self.config.moe_experts, -1, self.config.hidden_size) + + chunks = dispatched_input.chunk(self.config.moe_experts, dim=1) + expert_outputs = torch.empty(( + self.config.moe_experts, + chunks[0].shape[0], + ) + chunks[0].shape[2:], + dtype=dispatched_input.dtype, + device=dispatched_input.device) + for chunk, expert in zip(chunks, range(len(self.mlp))): + expert_outputs[expert] = self.mlp[expert](chunk.view(-1, dispatched_input.shape[-2], + dispatched_input.shape[-1])) + return expert_outputs + + def _alltoall(self, dispatched_attention): + if dist.get_world_size(group=self.ep_group) > 1: + dispatched_input = torch.empty_like(dispatched_attention) + dist.all_to_all_single(dispatched_input, dispatched_attention, group=self.ep_group) + return dispatched_input + else: + return dispatched_attention + + def scale_expert_output(self, attention_output, expert_output, combined_weights): + combined_output = torch.matmul( + combined_weights.type_as(attention_output).reshape(combined_weights.shape[0], -1), + expert_output.reshape(-1, expert_output.shape[-1])) + return combined_output.reshape(attention_output.shape) + + def forward(self, + input, + input_mask=None, + attention_mask=None, + head_mask=None, + layer_past=None, + get_key_value=False, + get_present=False, + encoder_output=None, + enc_dec_attn_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + use_cache=False, + output_attentions=False): + get_present = (get_present or get_key_value or use_cache) + input_mask = input_mask if attention_mask is None else attention_mask + input_type = input.dtype + + if (self.config.dtype in [torch.float16, torch.int8]) and input_type == torch.float: + input = input.half() + + with torch.no_grad(): + attention_output = self.attention(input, input_mask, head_mask, layer_past, get_present, + encoder_hidden_states, encoder_attention_mask, output_attentions, + self.norm_w, self.norm_b) + + if get_present: + attention_output, p_key, p_value = attention_output[0:3] + presents = (p_key, p_value) + elif output_attentions: + attention_output, _, _, context_output = attention_output[0:4] + else: + attention_output = attention_output[0] + + residual_add = attention_output + self.attention.attn_ob + attention_output = self.ds_layernorm(residual_add, self.attn_nw, self.attn_nb, self.config.epsilon) + + if self.config.mlp_type == 'residual': + res_mlp_out = self.res_mlp(attention_output, async_op=True) + res_coef_out = self.res_coef_func(attention_output, async_op=True) + + if self.expert_mp_group is not None: + world_size = dist.get_world_size(group=self.expert_mp_group) + gather_buffer = torch.zeros(world_size * attention_output.numel(), + dtype=attention_output.dtype, + device=attention_output.device) + dist.all_gather_into_tensor(gather_buffer, attention_output, group=self.expert_mp_group) + attention_output = gather_buffer.view(-1, *attention_output.size()[1:]) + + ############## MoE Gating + Experts ############### + dispatched_attention, combined_weights = self.moe_gate_einsum(attention_output) + dispatched_input = self._alltoall(dispatched_attention) + expert_outputs = self.expert_exec(dispatched_input) + expert_output = self._alltoall(expert_outputs) + output = self.scale_expert_output(attention_output, expert_output, combined_weights) + ################################################ + + if self.expert_mp_group is not None: + output = output.split(output.shape[0] // dist.get_world_size(group=self.expert_mp_group), + dim=0)[dist.get_rank(group=self.expert_mp_group)] + + if self.config.mlp_type == 'residual': + inference_module.moe_res_matmul(res_mlp_out, res_coef_out, output) + + output = self.bias_residual_func(output, residual_add, torch.empty(1)) + + if not self.config.pre_layer_norm: + output = self.ds_layernorm(output, self.norm_w, self.norm_b, self.config.epsilon) + + if input_type != output.dtype: + output = output.to(input_type) + + if get_present: + output = (output, presents) + + if self.config.return_tuple: + return output if type(output) is tuple else (output, ) + else: + return output diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__init__.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..20b7bf12a917865a753d9db41851562edc8cb337 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .linear import LinearOp +from .vector_matmul import VectorMatMulOp +from .softmax_context import SoftmaxContextOp +from .qkv_gemm import QKVGemmOp +from .softmax import SoftmaxOp +from .mlp_gemm import MLPGemmOp +from .gelu_gemm import GELUGemmOp +from .residual_add import ResidualAddOp diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f127cc408500bf48b17a4b443cc0a9c3347bde7 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/base.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66f82d0c7722b99ec34f44cd4933e3bbd10791e8 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/base.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/gelu_gemm.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/gelu_gemm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53e38d5c5dca352dc1bfe6ad3b8ad4b63428cf13 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/gelu_gemm.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/linear.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/linear.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11de77a6fc0ec2ee31ed0cc702d3754007d0a6dc Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/linear.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/mlp_gemm.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/mlp_gemm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6198e878e331f18ffe3fe7aabbdc338719b55c1 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/mlp_gemm.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/softmax.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/softmax.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec8a1fd59bbee3e6293c68df9f5f5c56a0c9734a Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/softmax.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/softmax_context.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/softmax_context.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6fa1d6d41301625b33b11d2fa665289e52d56fd2 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/softmax_context.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/vector_matmul.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/vector_matmul.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b22d3a92ad7b2462d79e7448e9dd7d797854eea Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/vector_matmul.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/base.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/base.py new file mode 100644 index 0000000000000000000000000000000000000000..5a997f95d5cc53e3323180ad7cd70ed9e8210a1b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/base.py @@ -0,0 +1,20 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig + +from deepspeed.ops.op_builder import InferenceBuilder + + +class BaseOp(torch.nn.Module): + inference_module = None + + def __init__(self, config: DeepSpeedInferenceConfig): + super(BaseOp, self).__init__() + self.config = config + if BaseOp.inference_module is None: + builder = InferenceBuilder() + BaseOp.inference_module = builder.load() diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/gelu_gemm.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/gelu_gemm.py new file mode 100644 index 0000000000000000000000000000000000000000..63323c150752b9b98e1120e427251ced382b115a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/gelu_gemm.py @@ -0,0 +1,45 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp +import deepspeed + + +class GELUGemmOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig): + super(GELUGemmOp, self).__init__(config) + try: + if self.config.dtype in [torch.float16, torch.int8]: + if deepspeed.HAS_TRITON and self.config.use_triton and self.config.dtype == torch.float16: + from deepspeed.ops.transformer.inference.triton.ops import fused_gemm_gelu as _triton_fused_gemm_gelu + self.fused_gemm_gelu = _triton_fused_gemm_gelu # type: ignore + else: + self.fused_gemm_gelu = self.inference_module.fused_gemm_gelu_fp16 # type: ignore + elif self.config.dtype == torch.bfloat16: + self.fused_gemm_gelu = self.inference_module.fused_gemm_gelu_bf16 # type: ignore + else: + self.fused_gemm_gelu = self.inference_module.fused_gemm_gelu_fp32 # type: ignore + except AttributeError: + self.fused_gemm_gelu = self.gelu_gemm_fallback + + def gelu_gemm_fallback(self, input, weight, scale, bias, out, out_scale, dtype, transpose): + raise NotImplementedError + + def forward(self, input: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, weight_out: torch.Tensor): + + output = self.fused_gemm_gelu( + input, + weight, + weight.scale if hasattr(weight, 'scale') else torch.empty(1), # type: ignore + bias, + weight_out, + weight_out.scale if hasattr(weight_out, 'scale') else torch.empty(1), # type: ignore + self.config.dtype == torch.int8, + self.config.transposed_mode) + + return output diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/linear.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/linear.py new file mode 100644 index 0000000000000000000000000000000000000000..b8decb6dc5ea4333dd2ae117db352936de9b6c63 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/linear.py @@ -0,0 +1,60 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp +import deepspeed + + +class LinearOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig): + super(LinearOp, self).__init__(config) + try: + if self.config.dtype in [torch.float16, torch.int8]: + if deepspeed.HAS_TRITON and self.config.use_triton and self.config.dtype == torch.float16: + from deepspeed.ops.transformer.inference.triton.ops import linear_func as _triton_linear_func + self.linear_func = _triton_linear_func + triton_autotune = config.triton_autotune and config.layer_id == 0 + if triton_autotune: + __class__._triton_autotune(2, self.config.max_out_tokens, self.config.hidden_size) + else: + self.linear_func = self.inference_module.linear_layer_fp16 + self.linear_func = self.inference_module.linear_layer_fp16 + elif self.config.dtype == torch.bfloat16: + self.linear_func = self.inference_module.linear_layer_bf16 + else: + self.linear_func = self.inference_module.linear_layer_fp32 + except AttributeError: + self.linear_func = self.linear_fallback + + def linear_fallback(self, input, weight, bias, add_bias, do_flash_attn, num_heads, transpose, rope_theta): + raise NotImplementedError + + def forward(self, + input: torch.Tensor, + weight: torch.Tensor, + bias: torch.Tensor, + add_bias: bool, + do_flash_attn: bool, + num_heads: int, + external_cache: bool = None, + num_layers: int = None): + qkv_out = self.linear_func(input, weight, bias, add_bias, do_flash_attn, num_heads, + self.config.transposed_mode, self.config.rope_theta) + return qkv_out + + @staticmethod + def _triton_autotune(min_seqlen, max_seqlen, hidden_size, dtype=torch.float16): + from deepspeed.ops.transformer.inference.triton.matmul_ext import Fp16Matmul, matmul + seqlen = [(min_seqlen + i) + for i in range(0, max_seqlen - min_seqlen + Fp16Matmul._cache_stride + 1, Fp16Matmul._cache_stride)] + Fp16Matmul._read_autotune_table() + for N in seqlen: + A = torch.randn((N, hidden_size), dtype=dtype, device='cuda') + B = torch.randn((hidden_size, 3 * hidden_size), dtype=dtype, device='cuda') + matmul(A, B) + Fp16Matmul._update_autotune_table() diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/mlp_gemm.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/mlp_gemm.py new file mode 100644 index 0000000000000000000000000000000000000000..3064c00d1755d9a98085c8e2773a2ec53682a016 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/mlp_gemm.py @@ -0,0 +1,102 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Optional + +import os +import torch +import torch.nn.functional as F +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp +from deepspeed.utils.types import NormType + + +class MLPGemmOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig): + super(MLPGemmOp, self).__init__(config) + try: + if self.config.norm_type == NormType.LayerNorm: + if self.config.dtype in [ + torch.float16, torch.int8 + ]: # non-triton cuda kernel has a higher performance in MLP than mlp_gemm_func in triton.ops + self.mlp_gemm_func = self.inference_module.mlp_gemm_fp16 # type: ignore + elif self.config.dtype == torch.bfloat16: + self.mlp_gemm_func = self.inference_module.mlp_gemm_bf16 + else: + self.mlp_gemm_func = self.inference_module.mlp_gemm_fp32 # type: ignore + elif self.config.norm_type == NormType.RMSNorm: + if self.config.dtype in [torch.float16, torch.int8]: + self.mlp_gemm_func = self.inference_module.rms_mlp_gemm_fp16 # type: ignore + elif self.config.dtype == torch.bfloat16: + self.mlp_gemm_func = self.inference_module.rms_mlp_gemm_bf16 + else: + self.mlp_gemm_func = self.inference_module.rms_mlp_gemm_fp32 # type: ignore + except AttributeError: + if self.config.norm_type == NormType.LayerNorm: + self.mlp_gemm_func = self.mlp_gemm_fallback + elif self.config.norm_type == NormType.RMSNorm: + self.mlp_gemm_func = self.rms_mlp_gemm_fallback + + def mlp_gemm_fallback(self, input, residual, input_bias, weight_interm, weight_out, bias, gamma, beta, eps, + pre_layer_norm, mlp_after_attn, interm_scale, out_scale, dtype, mlp_act_func_type, + transpose): + if os.environ.get('DS_KI_FALLBACK') == 'True' and mlp_after_attn and not transpose: + residual_add = F.layer_norm(input + residual + input_bias, (input.shape[2], ), gamma, beta, + self.config.epsilon) + tmp = torch.matmul(residual_add, weight_interm) + tmp = F.gelu(tmp + bias) + output = torch.matmul(tmp, weight_out) + return (output, residual_add) + else: + raise NotImplementedError + + def rms_mlp_gemm_fallback(self, input, residual, weight_interm, weight_out, gamma, eps, interm_scale, out_scale, + dtype, mlp_act_func_type, transpose): + raise NotImplementedError + + def forward(self, + input: torch.Tensor, + residual: torch.Tensor, + weight_interm: torch.Tensor, + weight_out: torch.Tensor, + input_bias: Optional[torch.Tensor] = None, + bias: Optional[torch.Tensor] = None, + gamma: Optional[torch.Tensor] = None, + beta: Optional[torch.Tensor] = None): + if self.config.norm_type == NormType.LayerNorm: + output, residual_add = self.mlp_gemm_func( + input, + residual, + input_bias, + weight_interm, + weight_out, + bias, + gamma, + beta, + self.config.epsilon, + self.config.pre_layer_norm, + self.config.mlp_after_attn, + weight_interm.scale if hasattr(weight_interm, 'scale') else torch.empty(1), # type: ignore + weight_out.scale if hasattr(weight_out, 'scale') else torch.empty(1), # type: ignore + self.config.dtype == torch.int8, + self.config.mlp_act_func_type, + self.config.transposed_mode) + else: + if input_bias is not None: + input += input_bias + output, residual_add = self.mlp_gemm_func( + input, + residual, + weight_interm, + weight_out, + gamma, + self.config.epsilon, + weight_interm.scale if hasattr(weight_interm, 'scale') else torch.empty(1), # type: ignore + weight_out.scale if hasattr(weight_out, 'scale') else torch.empty(1), # type: ignore + self.config.dtype == torch.int8, + self.config.mlp_act_func_type, + self.config.transposed_mode) + return output, residual_add diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/qkv_gemm.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/qkv_gemm.py new file mode 100644 index 0000000000000000000000000000000000000000..250bf9864e1e77aba86e54dde5e0010428212fe1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/qkv_gemm.py @@ -0,0 +1,90 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +import torch +import torch.nn.functional as F +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp +import deepspeed +from deepspeed.utils.types import NormType + + +class QKVGemmOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig): + super(QKVGemmOp, self).__init__(config) + try: + if self.config.norm_type == NormType.LayerNorm: + if self.config.dtype in [torch.float16, torch.int8]: + if deepspeed.HAS_TRITON and self.config.use_triton and self.config.dtype == torch.float16: + from deepspeed.ops.transformer.inference.triton.ops import qkv_gemm_func as _triton_qkv_gemm_func + self.qkv_gemm_func = _triton_qkv_gemm_func + triton_autotune = config.triton_autotune and config.layer_id == 0 + if triton_autotune: + __class__._triton_autotune(2, self.config.max_out_tokens, self.config.hidden_size) + else: + self.qkv_gemm_func = self.inference_module.qkv_gemm_fp16 # type: ignore + elif self.config.dtype == torch.bfloat16: + self.qkv_gemm_func = self.inference_module.qkv_gemm_bf16 + else: + self.qkv_gemm_func = self.inference_module.qkv_gemm_fp32 # type: ignore + elif self.config.norm_type == NormType.RMSNorm: + if self.config.dtype in [torch.float16, torch.int8]: + self.qkv_gemm_func = self.inference_module.rms_qkv_gemm_fp16 # type: ignore + elif self.config.dtype == torch.bfloat16: + self.qkv_gemm_func = self.inference_module.rms_qkv_gemm_bf16 + else: + self.qkv_gemm_func = self.inference_module.rms_qkv_gemm_fp32 # type: ignore + except AttributeError: + if self.config.norm_type == NormType.LayerNorm: + self.qkv_gemm_func = self.qkv_gemm_fallback + elif self.config.norm_type == NormType.RMSNorm: + self.qkv_gemm_func = self.rms_qkv_gemm_fallback + + @staticmethod + def _triton_autotune(min_seqlen, max_seqlen, hidden_size, dtype=torch.float16): + from deepspeed.ops.transformer.inference.triton.matmul_ext import Fp16Matmul, matmul + seqlen = [(min_seqlen + i) + for i in range(0, max_seqlen - min_seqlen + Fp16Matmul._cache_stride + 1, Fp16Matmul._cache_stride)] + Fp16Matmul._read_autotune_table() + for N in seqlen: + A = torch.randn((N, hidden_size), dtype=dtype, device='cuda') + B = torch.randn((hidden_size, 3 * hidden_size), dtype=dtype, device='cuda') + matmul(A, B) + Fp16Matmul._update_autotune_table() + + def qkv_gemm_fallback(self, input, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose): + if os.environ.get('DS_KI_FALLBACK') == 'True' and not transpose: + inp_norm = F.layer_norm(input, (input.shape[2], ), gamma, beta, eps) + tmp = torch.matmul(inp_norm, weight) + if add_bias: + tmp += bias + output = [tmp, inp_norm] + return output + else: + raise NotImplementedError + + def rms_qkv_gemm_fallback(self, input, weight, q_scale, gamma, eps, q_int8, transpose): + raise NotImplementedError + + def forward(self, input: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, gamma: torch.Tensor, + beta: torch.Tensor): + + add_bias = bias is not None + bias = bias if add_bias else torch.empty(1) # type: ignore + q_scale = weight.scale if hasattr(weight, 'scale') else torch.empty(1) # type: ignore + q_int8 = self.config.dtype == torch.int8 + + if self.config.norm_type == NormType.LayerNorm: + output, norm = self.qkv_gemm_func(input, weight, q_scale, bias, gamma, beta, self.config.epsilon, add_bias, + q_int8, self.config.transposed_mode) + else: + output, norm = self.qkv_gemm_func(input, weight, q_scale, gamma, self.config.epsilon, q_int8, + self.config.transposed_mode) + if add_bias: + output += bias + + return output, norm diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/residual_add.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/residual_add.py new file mode 100644 index 0000000000000000000000000000000000000000..6f9b35cbc05d7a1b233847530a728576e0c0aa5b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/residual_add.py @@ -0,0 +1,64 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +import torch +from typing import Optional +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class ResidualAddOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig): + super(ResidualAddOp, self).__init__(config) + try: + if self.config.dtype in [torch.float16, torch.int8]: + self.residual_add_func = self.inference_module.residual_add_bias_fp16 + elif self.config.dtype == torch.bfloat16: + self.residual_add_func = self.inference_module.residual_add_bias_bf16 + else: + self.residual_add_func = self.inference_module.residual_add_bias_fp32 + except AttributeError: + self.residual_add_func = None + try: + self._vector_add = self.inference_module._vector_add + except AttributeError: + self._vector_add = None + + def forward(self, + hidden_state: torch.Tensor, + residual: torch.Tensor, + add_bias: bool, + attention_output: Optional[torch.Tensor] = None, + residual_add: Optional[torch.Tensor] = None, + attention_bias: Optional[torch.Tensor] = None, + final_bias: Optional[torch.Tensor] = None): + + if self.residual_add_func is not None: + if final_bias is None: + residual = self._vector_add(residual, hidden_state, 1.0 / self.config.mp_size) + else: + if not self.config.pre_layer_norm and residual_add is not None: + # only use residual add if its set and we are not pre layer norm + residual = residual_add + + self.residual_add_func(hidden_state, residual, attention_output, attention_bias, final_bias, + self.config.mp_size, self.config.mlp_after_attn, add_bias, + self.config.pre_layer_norm) + else: + # fallback + if os.environ.get('DS_KI_FALLBACK') == 'True' and self.config.mlp_after_attn: + if self.config.pre_layer_norm: + tmp = (residual.float() + attention_output.float() + attention_bias.float() + + final_bias.float()) / self.config.mp_size + hidden_state.float() + else: + tmp = residual.float() + hidden_state.float() + final_bias.float() + + input_dtype = hidden_state.dtype + residual = tmp.to(input_dtype) + else: + raise NotImplementedError + return residual diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/softmax.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/softmax.py new file mode 100644 index 0000000000000000000000000000000000000000..bc309d94df147b98d9b3c915de93282ba7e97e6f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/softmax.py @@ -0,0 +1,53 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +import torch +import torch.nn.functional as F +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class SoftmaxOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig): + super(SoftmaxOp, self).__init__(config) + self.num_attention_heads_per_partition = config.heads // config.mp_size + try: + if self.config.dtype in [torch.float16, torch.int8]: + self.softmax_func = self.inference_module.softmax_fp16 + elif self.config.dtype == torch.bfloat16: + self.softmax_func = self.inference_module.softmax_bf16 + else: + self.softmax_func = self.inference_module.softmax_fp32 + except AttributeError: + self.softmax_func = self.softmax_fallback + + def softmax_fallback(self, attn_scores, attn_mask, alibi, triangular, recompute, local_attention, window_size, + async_op, layer_scale, head_offset, mp_size): + if os.environ.get('DS_KI_FALLBACK') == 'True': + alibi = alibi[head_offset:head_offset + self.num_attention_heads_per_partition] + input_dtype = attn_scores.dtype + if (triangular): + tri = ~torch.tril(torch.ones(attn_scores.size(), device=attn_scores.device)).to(bool) + attn_scores = torch.masked_fill(attn_scores * layer_scale, tri, torch.finfo(input_dtype).min) + if alibi is not None: + attn_scores += alibi + if attn_mask is not None: + # expand atten_mask from two dim into 4 dim, insert two dims in the middle + attn_mask = attn_mask[:, None, None, :] + attn_scores += attn_mask + output = F.softmax(attn_scores, dim=-1, dtype=torch.float32).to(input_dtype) + return output + else: + raise NotImplementedError + + def forward(self, attn_scores: torch.Tensor, attn_mask: torch.Tensor, alibi: torch.Tensor, triangular: bool, + recompute: bool, local_attention: bool, window_size: int, async_op: bool, layer_scale: float, + head_offset: int): + output = self.softmax_func(attn_scores, attn_mask, alibi, triangular, recompute, local_attention, window_size, + async_op, layer_scale, head_offset, self.config.mp_size) + + return output diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/softmax_context.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/softmax_context.py new file mode 100644 index 0000000000000000000000000000000000000000..0dc4e08a36335bdc999db8333ff7cb942b648958 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/softmax_context.py @@ -0,0 +1,47 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from deepspeed import comm as dist +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class SoftmaxContextOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig): + super(SoftmaxContextOp, self).__init__(config) + try: + if self.config.dtype in [torch.float16, torch.int8]: + self.softmax_context_func = self.inference_module.softmax_context_fp16 + elif self.config.dtype == torch.bfloat16: + self.softmax_context_func = self.inference_module.softmax_context_bf16 + else: + self.softmax_context_func = self.inference_module.softmax_context_fp32 + except AttributeError: + self.softmax_context_func = self.softmax_context_fallback + + def softmax_context_fallback(self, query_key_value, attn_mask, rotary_dim, rotate_half, rotate_every_two, heads, + num_kv, norm_factor, triangular_masking, local_attention, window_size, no_masking, + layer_id, num_layers, alibi, rope_theta): + raise NotImplementedError + + def forward(self, query_key_value: torch.Tensor, attn_mask: torch.Tensor, heads: int, num_kv: int, + norm_factor: float, no_masking: bool, layer_id: int, num_layers: int, alibi: torch.Tensor): + + if alibi is not None: + batch_heads = query_key_value.shape[0] * heads + offset = dist.get_rank() * batch_heads if dist.is_initialized() else 0 + alibi = alibi[offset:batch_heads + offset, :, :] + else: + alibi = torch.empty(1) + + output = self.softmax_context_func(query_key_value, attn_mask, self.config.rotary_dim, self.config.rotate_half, + self.config.rotate_every_two, heads, num_kv, norm_factor, + self.config.triangular_masking, self.config.local_attention, + self.config.window_size, no_masking, layer_id, num_layers, alibi, + self.config.rope_theta) + + return output diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__init__.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b7d1968df62a99849992c8b1e93698d9e51cec30 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .residual_add import residual_add_bias +from .layer_norm import layer_norm, layer_norm_residual +from .gelu import gelu +from .softmax import softmax +from .ops import * +from .matmul_ext import fp16_matmul, matmul_4d, score_4d_matmul, context_4d_matmul diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/ops.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..466d9b72e5de6f6cd8d804a0a11cd671015f0674 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/ops.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/attention.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/attention.py new file mode 100644 index 0000000000000000000000000000000000000000..c05370ec74e59745933bc0476075883e0e2b15f0 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/attention.py @@ -0,0 +1,387 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import math +import torch +import torch.nn as nn +import triton +import triton.language as tl +from deepspeed.accelerator import get_accelerator +from deepspeed import comm as dist +from deepspeed.ops.transformer.inference.op_binding import LinearOp, VectorMatMulOp, SoftmaxContextOp, QKVGemmOp +from deepspeed.ops.transformer.inference.triton import ( + softmax, + score_4d_matmul, + context_4d_matmul, +) + +minus_inf = -10000.0 + + +class TritonSelfAttention(nn.Module): + num_layers = 0 + + def __init__(self, config, mp_group=None, q_scales=None, q_groups=1, merge_count=1, qkv_merging=False): + super(TritonSelfAttention, self).__init__() + self.config = config + data_type = self.config.dtype + data_type_fp = torch.half if self.config.dtype == torch.int8 else self.config.dtype + assert data_type_fp == torch.half, "triton supports fp16 data_type_fp" + + self.config.layer_id = TritonSelfAttention.num_layers + TritonSelfAttention.num_layers = TritonSelfAttention.num_layers + 1 + device = get_accelerator().current_device_name() #if config.bigscience_bloom else 'cpu' + + assert config.mp_size == 1, "mp_size has to be 1 with triton attention yet" + if self.config.set_empty_params: + self.attn_qw = None + self.attn_qb = None + self.attn_kw = None + self.attn_kb = None + self.attn_vw = None + self.attn_vb = None + self.attn_qkvw = None + self.attn_qkvb = None + self.attn_ow = None + self.attn_ob = None + else: + qkv_size_per_partition = (self.config.hidden_size // self.config.mp_size) * 3 + self.attn_qkvw = nn.Parameter(torch.empty(self.config.hidden_size, + qkv_size_per_partition, + dtype=data_type, + device=device), + requires_grad=False) + self.attn_qkvb = nn.Parameter(torch.empty(qkv_size_per_partition, dtype=data_type_fp, device=device), + requires_grad=False) + # self-ouput weights + out_size_per_partition = self.config.hidden_size // self.config.mp_size + self.attn_ow = nn.Parameter(torch.empty(out_size_per_partition, + self.config.hidden_size, + dtype=data_type, + device=device), + requires_grad=False) + + self.attn_ob = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device), + requires_grad=False) + + self.num_attention_heads_per_partition = self.config.heads // self.config.mp_size + self.hidden_size_per_partition = self.config.hidden_size // self.config.mp_size + self.hidden_size_per_attention_head = self.config.hidden_size // self.config.heads + + self.mp_group = mp_group + self.use_flash = False + # triton flash attention is enabled when the compute capability >= 8.0 + if get_accelerator().is_triton_supported(): + self.use_flash = True + + # used for quantization + self.q_scales = q_scales + self.q_groups = q_groups + self.merge_count = int(math.log2(merge_count)) + + self.norm_factor = math.sqrt(self.config.hidden_size // self.config.heads) + if not config.use_mup: + self.norm_factor = math.sqrt(self.norm_factor) + + if self.config.scale_attn_by_inverse_layer_idx is True: + self.norm_factor *= math.sqrt(self.config.layer_id + 1) + # https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/models/gpt2/modeling_gpt2.py#L191 + + triton_autotune = self.config.triton_autotune and self.config.layer_id == 0 + self.qkv_func = QKVGemmOp(config) + self.score_context_func = SoftmaxContextOp(config) + self.linear_func = LinearOp(config) + self.vector_matmul_func = VectorMatMulOp(config) + + self.hidden_size = config.hidden_size + self.head_size = config.hidden_size // config.heads + self.scale = (1 / self.norm_factor / self.norm_factor if self.config.scale_attention else 1.0 + ) # making it back to 1/sqrt(head_size) + self.triangular_masking = self.config.triangular_masking + + # triton autotune table update for score/context matmul + if triton_autotune: + print(f"running triton autotune for regular attention kernel") + __class__._triton_autotune(2, self.config.max_out_tokens, self.head_size, self.config.hidden_size, + self.triangular_masking, self.scale) + + @staticmethod + def _triton_autotune(min_seqlen, + max_seqlen, + head_size, + hidden_size, + triangular_masking, + scale, + dtype=torch.float16): + from deepspeed.ops.transformer.inference.triton.matmul_ext import Fp16Matmul, score_4d_matmul, context_4d_matmul + seqlen = [(min_seqlen + i) + for i in range(0, max_seqlen - min_seqlen + Fp16Matmul._cache_stride + 1, Fp16Matmul._cache_stride)] + Fp16Matmul._read_autotune_table() + for N in seqlen: + qkv = torch.randn((1, N, 3 * hidden_size), dtype=dtype, device='cuda') + output = score_4d_matmul(qkv, head_size, triangular_masking, scale) + context_4d_matmul(output, qkv, head_size) + Fp16Matmul._update_autotune_table() + + def ds_compute_attention(self, qkv_out, input_mask, layer_past, alibi): + if isinstance(qkv_out, list): + qkv_out = qkv_out[0] + + no_masking = input_mask is None + + if no_masking: + input_mask = torch.empty(1) + + attn_key_value = self.score_context_func( + query_key_value=qkv_out, + attn_mask=((1 - input_mask).to(qkv_out.dtype) * + minus_inf) if input_mask.dtype == torch.int64 else input_mask, + heads=self.num_attention_heads_per_partition, + norm_factor=(1 / self.norm_factor if self.config.scale_attention else 1.0), + no_masking=no_masking, + layer_id=self.config.layer_id, + num_layers=TritonSelfAttention.num_layers, + alibi=alibi) + + context_layer, key_layer, value_layer = attn_key_value + return context_layer, key_layer, value_layer + + def forward( + self, + input, + input_mask, + head_mask=None, + layer_past=None, + get_present=False, # not used + encoder_hidden_states=None, # not used + encoder_attention_mask=None, # not used + triangularutput_attentions=False, # not used + norm_w=None, + norm_b=None, + alibi=None, + use_triton_attention=True): + + if not self.config.pre_layer_norm: + qkv_out = self.linear_func(input=input, + weight=self.attn_qkvw, + bias=self.attn_qkvb, + add_bias=self.attn_qkvb is not None, + do_flash_attn=False, + num_heads=self.num_attention_heads_per_partition, + num_layers=TritonSelfAttention.num_layers) + qkv = qkv_out + else: + qkv_out = self.qkv_func(input=input, + weight=self.attn_qkvw, + bias=(self.attn_qkvb if self.attn_qkvb is not None else norm_b), + gamma=norm_w, + beta=norm_b) + qkv = qkv_out[0] + + if use_triton_attention and (alibi is None): + context_layer = _triton_attention(qkv=qkv, + input_mask=input_mask, + scale=self.scale, + layer_past=layer_past, + alibi=alibi, + head_size=self.head_size, + use_triton_flash=self.use_flash, + use_cuda_flash=False, + triangular=self.triangular_masking) + key_layer, value_layer = qkv[:, :, self.hidden_size:2 * self.hidden_size], qkv[:, :, 2 * self.hidden_size:] + else: + context_layer, key_layer, value_layer = self.ds_compute_attention(qkv_out=qkv_out, + input_mask=input_mask, + layer_past=layer_past, + alibi=alibi) + output = self.vector_matmul_func(input=context_layer, weight=self.attn_ow) + + inp_norm = qkv_out[-1] + + if self.config.mlp_after_attn and self.mp_group is not None and dist.get_world_size(group=self.mp_group) > 1: + dist.all_reduce(output, group=self.mp_group) + + return (output, key_layer, value_layer, context_layer, inp_norm) + + +global inference_module + + +def _triton_attention(qkv, + input_mask, + layer_past, + alibi, + scale, + head_size, + triangular=False, + use_cuda_flash=False, + use_triton_flash=False, + use_ds_attention=False): + if isinstance(qkv, list): + qkv = qkv[0] + + assert alibi is None, "layer_past not supported in alibi yet" + + if use_triton_flash: + output = _triton_packed_flash(qkv, + head_size, + input_mask, + scale, + causal=triangular, + add_mask=(not triangular and input_mask is not None)) + else: + output = score_4d_matmul(qkv, head_size, triangular, scale) + if triangular: + output = softmax(output) + else: + output = softmax(output, input_mask) + output = context_4d_matmul(output, qkv, head_size) + + return output + + +''' +flash attention 2 +modified the triton kernel in +https://github.com/openai/triton/blob/08c16589573621fcb8cd5a9c3b8a0537077f876d/python/tutorials/06-fused-attention.py +''' + + +@triton.jit +def _flash_packed_kernel( + QKV, + mask, + ADD_MASK: tl.constexpr, + IS_CAUSAL: tl.constexpr, + sm_scale, + Out, + stride_qz, + stride_qn, + stride_qm, + stride_mz, + stride_oz, + stride_on, + Z, + H, + N_CTX, + P_SEQ, + hidden_size, + BLOCK_M: tl.constexpr, + BLOCK_DMODEL: tl.constexpr, + BLOCK_N: tl.constexpr, +): + start_m = tl.program_id(0) + off_hz = tl.program_id(1) + batch = off_hz // H + head = off_hz % H + + q_offset = batch * stride_qz + head * BLOCK_DMODEL + k_offset = q_offset + hidden_size + v_offset = k_offset + hidden_size + + # initialize offsets + offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) + offs_n = tl.arange(0, BLOCK_N) + offs_d = tl.arange(0, BLOCK_DMODEL) + + q_ptrs = QKV + q_offset + offs_m[:, None] * stride_qn + offs_d[None, :] + k_ptrs = QKV + hidden_size + q_offset + offs_n[:, None] * stride_qn + offs_d[None, :] + v_ptrs = QKV + 2 * hidden_size + q_offset + offs_n[:, None] * stride_qn + offs_d[None, :] + + # mask + off_mask = batch * stride_mz + offs_n[None, :] + mask_ptrs = mask + off_mask + + # initialize pointer to m and l + m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf") + l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32) + # scale sm_scale by log_2(e) and use + # 2^x instead of exp in the loop because CSE and LICM + # don't work as expected with `exp` in the loop + qk_scale = sm_scale * 1.44269504 + # load q: it will stay in SRAM throughout + q = tl.load(q_ptrs, mask=offs_m[:, None] < N_CTX, other=0.0) + q = (q * qk_scale).to(tl.float16) + # loop over k, v and update accumulator + lo = 0 + hi = P_SEQ + (start_m + 1) * BLOCK_M if IS_CAUSAL else N_CTX + P_SEQ + for start_n in range(lo, hi, BLOCK_N): + # -- load k, v -- + k = tl.load(k_ptrs + start_n * stride_qn, mask=(start_n + offs_n)[:, None] < N_CTX, other=0.0) + v = tl.load(v_ptrs + start_n * stride_qn, mask=(start_n + offs_n)[:, None] < N_CTX, other=0.0) + # -- compute qk --- + qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float16) + + if ADD_MASK: + mask_val = tl.load(mask_ptrs) + mask_ptrs += BLOCK_N + qk = qk + mask_val.to(tl.float32) + + if IS_CAUSAL: + qk = tl.where(P_SEQ + offs_m[:, None] >= (start_n + offs_n[None, :]), qk, float("-inf")) + + qk += tl.dot(q, tl.trans(k), out_dtype=tl.float16) + qk += tl.where((start_n + offs_n)[None, :] < N_CTX, 0, minus_inf) + # -- compute scaling constant --- + m_i_new = tl.maximum(m_i, tl.max(qk, 1)) + alpha = tl.math.exp2(m_i - m_i_new) + p = tl.math.exp2(qk - m_i_new[:, None]) + # -- scale and update acc -- + acc_scale = l_i * 0 + alpha # workaround some compiler bug + acc *= acc_scale[:, None] + acc += tl.dot(p.to(tl.float16), v.to(tl.float16)) + # -- update m_i and l_i -- + l_i = l_i * alpha + tl.sum(p, 1) + m_i = m_i_new + + # write back l and m + acc = acc / l_i[:, None] + o_offset = batch * stride_oz + head * BLOCK_DMODEL + out_ptrs = Out + o_offset + (offs_m[:, None] * stride_on + offs_d[None, :]) + tl.store(out_ptrs, acc.to(tl.float16), mask=offs_m[:, None] < N_CTX) + + +def _triton_packed_flash(qkv, head_size, mask, sm_scale, causal=False, add_mask=True): + heads = qkv.shape[-1] // 3 // head_size + hidden_size = qkv.shape[-1] // 3 + + BLOCK_M = 128 + BLOCK_N = 64 if head_size <= 64 else 32 + + o = torch.empty((qkv.shape[0], qkv.shape[1], hidden_size), device=qkv.device, dtype=torch.half) + if mask is None: + mask = torch.empty(0) + add_mask = False + + grid = (triton.cdiv(qkv.shape[1], BLOCK_M), qkv.shape[0] * heads, 1) + num_stages = 4 if head_size <= 64 else 3 + num_warps = 4 + P_SEQ = 0 + + _flash_packed_kernel[grid](qkv, + mask, + add_mask, + causal, + sm_scale, + o, + qkv.stride(0), + qkv.stride(1), + qkv.stride(2), + mask.stride(1) if add_mask else 0, + o.stride(0), + o.stride(1), + qkv.shape[0], + heads, + qkv.shape[1], + P_SEQ, + hidden_size, + BLOCK_M=BLOCK_M, + BLOCK_N=BLOCK_N, + BLOCK_DMODEL=head_size, + num_warps=num_warps, + num_stages=num_stages) + + return o diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/gelu.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/gelu.py new file mode 100644 index 0000000000000000000000000000000000000000..738d7d96a1c9d57c3de3558452922737b9b4f7b4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/gelu.py @@ -0,0 +1,38 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import triton +import triton.language as tl +from deepspeed.accelerator import get_accelerator + + +@triton.jit +def gelu_functor(x): + # Using approximation introduces greater parity errors. + # return tl.sigmoid(1.702 * x) * x + return x * 0.5 * (1.0 + tl.math.erf(x / 1.41421356237)) + + +@triton.jit +def gelu_kernel(x_ptr, output_ptr, n_elements, BLOCK_SIZE: tl.constexpr): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(x_ptr + offsets, mask=mask) + output = gelu_functor(x) + tl.store(output_ptr + offsets, output, mask=mask) + + +def gelu(activations: torch.Tensor) -> torch.Tensor: + assert activations.is_contiguous() + assert get_accelerator().on_accelerator(activations) + + output = torch.empty_like(activations) + n_elements = output.numel() + grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']), ) + gelu_kernel[grid](activations, output, n_elements, BLOCK_SIZE=1024) + return output diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py new file mode 100644 index 0000000000000000000000000000000000000000..d6f72b4efb0b20829cfe147782932f421e734478 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py @@ -0,0 +1,444 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import triton +import os +from filelock import FileLock +import deepspeed.ops.transformer.inference.triton.triton_matmul_kernel as triton_matmul_kernel +import pickle +from io import open +import deepspeed +from pathlib import Path +import atexit + + +# ----------------------------------------------------------------------------- +# util class/functions for triton +def _default_cache_dir(): + return os.path.join(Path.home(), ".triton", "autotune") + + +def bias_add_activation(C, bias=None, activation=""): + if bias is not None: + C += bias + # activation + if activation == "relu": + relu = torch.nn.Relu() + C = relu(C) + elif activation == "leaky_relu": + leaky_relu = torch.nn.LeakyReLU(0.01) + C = leaky_relu(C) + elif activation == "gelu": + sigmoid = torch.nn.Sigmoid() + C = sigmoid(1.702 * C) * C + elif activation == "sigmoid": + sigmoid = torch.nn.Sigmoid() + C = sigmoid(C) + return C + + +class AutotuneCacheManager: + """ + Cache manager for autotune + """ + + def __init__(self, key): + self.key = key + self.file_path = None + self.lock_path = None + # if caching is enabled, get the lock and bin path + self.cache_dir = os.environ.get('TRITON_CACHE_DIR', _default_cache_dir()) + if self.cache_dir: + os.makedirs(self.cache_dir, exist_ok=True) + if self.cache_dir: + self.file_path = os.path.join(self.cache_dir, self.key + ".pickle") + self.lock_path = self.file_path + ".lock" + + def has_file(self): + return self.file_path and os.path.exists(self.file_path) + + def put(self, table): + if self.file_path: + assert self.lock_path is not None + with FileLock(self.lock_path): + with open(self.file_path + ".tmp", 'wb') as handle: + pickle.dump(table, handle) + os.rename(self.file_path + ".tmp", self.file_path) + + def load(self): + if os.path.exists(self.file_path): + with open(self.file_path, 'rb') as handle: + loaded_dict = pickle.load(handle) + return loaded_dict + else: + return None + + +# ----------------------------------------------------------------------------- +# triton matmul class + + +class MatmulExt(torch.autograd.Function): + """ + a wrapper class that can call different triton matmul kernels depending on the input parameters + """ + + @staticmethod + def forward(A, B, bias=None, activation="", use_triton=True, update_autotune_table=False): + """ + A: input, activation matrix A + B: input, weight matrix B + """ + matmul = None + quantize_activation = False + Batch = 0 + + if len(A.shape) == 3: # if A is 3d-tensor where batch index is given as 0-axis + assert A.is_contiguous(), "matrix A must be contiguous" + Batch, M, K = A.shape + A = A.view(-1, K) + + # fp16 activation and fp16 weight matmul into fp16 output + matmul = fp16_matmul + C = matmul.forward(A, B, use_triton=use_triton, bias=bias, activation=activation) + + if matmul and update_autotune_table: + matmul._update_autotune_table() + + if Batch > 0: + C = C.view(Batch, M, -1) + + return C + + +class TritonMatmul(torch.autograd.Function): + """ + triton matmul kernel superclass + """ + + def __init__(self): + pass + + @staticmethod + def _ref_forward(A, B, ref_dtype=torch.float32): + C = torch.matmul(A.type(ref_dtype), B.type(ref_dtype)) + return C + + @staticmethod + def _read_autotune_table(cache_key, triton_kernel): + cache_manager = AutotuneCacheManager(cache_key) + table = cache_manager.load() + if table: + triton_kernel.cache = table + + @staticmethod + def _write_autotune_table(cache_key, triton_kernel): + cache_manager = AutotuneCacheManager(cache_key) + cache_manager.put(triton_kernel.cache) + + @staticmethod + def _update_autotune_table(cache_key, triton_kernel): + cache_manager = AutotuneCacheManager(cache_key) + autotune_table = cache_manager.load() + if autotune_table is None: + autotune_table = dict() + autotune_table.update(triton_kernel.cache) # always overwrite with the new autotune results + cache_manager = AutotuneCacheManager(cache_key) + cache_manager.put(autotune_table) + + @staticmethod + def forward( + A, + B, + ref_dtype=torch.float32, # fp32 only + bias=None, + activation=""): + C = torch.matmul(A.type(ref_dtype), B.type(ref_dtype)) + C = bias_add_activation(C, bias, activation) + return C + + +class Fp16Matmul(TritonMatmul): + """ + fp16 matrix multiplication kernel + dtypes: fp16 x fp16 = fp16 + """ + + _2d_kernel = triton_matmul_kernel._fp_matmul + _4d_kernel = triton_matmul_kernel.matmul_4d_kernel + _cache_stride = 32 + + def __init__(self, read_cache=True): + super().__init__() + if read_cache: + __class__._read_autotune_table() + + def skip_autotune(self): + __class__._2d_kernel.configs = [__class__._2d_kernel.configs[0]] + __class__._4d_kernel.configs = [__class__._4d_kernel.configs[0]] + + @staticmethod + def forward(A, B, use_triton=True, bias=None, activation=""): + if use_triton: + device = A.device + # handle non-contiguous inputs if necessary + if A.stride(0) > 1 and A.stride(1) > 1: + A = A.contiguous() + if B.stride(0) > 1 and B.stride(1) > 1: + B = B.contiguous() + # checks constraints + assert A.shape[1] == B.shape[0], "incompatible dimensions" + M, K = A.shape + _, N = B.shape + # allocates output + C = torch.empty((M, N), device=device, dtype=A.dtype) + # accumulator types + ACC_TYPE = triton.language.float32 if A.dtype in [torch.float16, torch.bfloat16, torch.float32 + ] else triton.language.int32 + # launch kernel + grid = lambda META: (triton.cdiv(M, META['BLOCK_M']) * triton.cdiv(N, META['BLOCK_N']), META['SPLIT_K']) + __class__._2d_kernel[grid](A, + B, + C, + M, + N, + K, + bias, + A.stride(0), + A.stride(1), + B.stride(0), + B.stride(1), + C.stride(0), + C.stride(1), + M // __class__._cache_stride, + N // __class__._cache_stride, + K // __class__._cache_stride, + GROUP_M=8, + ACC_TYPE=ACC_TYPE, + BIAS_ADD=(0 if bias is None else 1), + ACTIVATION=activation) + else: + C = torch.matmul(A, B) + return C + + @staticmethod + def _matmul_4d(a, b): + assert a.shape[-1] == b.shape[-2], "incompatible dimensions" + assert a.is_contiguous(), "matrix A must be contiguous" + assert b.is_contiguous(), "matrix B must be contiguous" + + B, H, M, K = a.shape + B, H, K, N = b.shape + + assert K > 1, "inner-product dimension K should be larger than 1" + + c = torch.empty((B, H, M, N), device=a.device, dtype=a.dtype) + + grid = lambda META: ( + triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']), + H, + B, + ) + + __class__._4d_kernel[grid]( + a, + b, + c, + M, + N, + K, + M // __class__._cache_stride, + N // __class__._cache_stride, + K // __class__._cache_stride, + a.stride(0), + a.stride(1), + a.stride(2), + a.stride(3), + b.stride(0), + b.stride(1), + b.stride(2), + b.stride(3), + c.stride(0), + c.stride(1), + c.stride(2), + c.stride(3), + scale=-1.0, + MASK=False, + ) + return c + + @staticmethod + def _score_4d_matmul(input, head_size, input_mask, scale=-1.0): + assert input.is_contiguous(), "matrix input must be contiguous" + + batches = input.shape[0] + d_model = input.shape[-1] // 3 + num_of_heads = d_model // head_size + + q = input[:, :, :d_model] + k = input[:, :, d_model:d_model * 2] + + q = q.view(batches, -1, num_of_heads, head_size) + k = k.view(batches, -1, num_of_heads, head_size) + + # checks constraints + assert q.shape == k.shape, "incompatible dimensions" + B, M, H, K = q.shape + B, N, H, K = k.shape + + assert K > 1, "inner-product dimension K should be larger than 1" + + # allocates output + output = torch.empty((B, H, M, N), device=q.device, dtype=q.dtype) + grid = lambda META: ( + triton.cdiv(M, META["BLOCK_SIZE_M"]) * triton.cdiv(N, META["BLOCK_SIZE_N"]), + H, + B, + ) + __class__._4d_kernel[grid]( + q, + k, + output, + M, + N, + K, + M // __class__._cache_stride, + N // __class__._cache_stride, + K // __class__._cache_stride, + q.stride(0), + q.stride(2), + q.stride(1), + q.stride(3), + k.stride(0), + k.stride(2), + k.stride(3), + k.stride(1), + output.stride(0), + output.stride(1), + output.stride(2), + output.stride(3), + scale=scale, + MASK=False, + ) + return output + + @staticmethod + def _context_4d_matmul(prob, input, head_size): + assert prob.is_contiguous(), "matrix prob must be contiguous" + assert input.is_contiguous(), "matrix input must be contiguous" + + batches = input.shape[0] + d_model = input.shape[-1] // 3 + num_of_heads = d_model // head_size + + v = input[:, :, d_model * 2:] + + v = v.view(batches, -1, num_of_heads, head_size) + + # checks constraints + assert (prob.shape[0] == v.shape[0] and prob.shape[1] == v.shape[2] and prob.shape[2] == v.shape[1] + and prob.shape[3] == v.shape[1]), "incompatible dimensions" + B, H, M, K = prob.shape + B, K, H, N = v.shape + + assert K > 1, "inner-product dimension K should be larger than 1" + + # allocates output + output = torch.empty((B, M, H, N), device=v.device, dtype=v.dtype) + grid = lambda META: ( + triton.cdiv(M, META["BLOCK_SIZE_M"]) * triton.cdiv(N, META["BLOCK_SIZE_N"]), + H, + B, + ) + + __class__._4d_kernel[grid]( + prob, + v, + output, + M, + N, + K, + M // __class__._cache_stride, + N // __class__._cache_stride, + K // __class__._cache_stride, + prob.stride(0), + prob.stride(1), + prob.stride(2), + prob.stride(3), + v.stride(0), + v.stride(2), + v.stride(1), + v.stride(3), + # Here we also transpose the output when writing to memory. + output.stride(0), + output.stride(2), + output.stride(1), + output.stride(3), + scale=-1, + MASK=False, + ) + return output.view(batches, -1, d_model) + + @staticmethod + def _ref_forward(A, B, ref_dtype=torch.float32, bias=None, activation=""): + C = torch.matmul(A.type(ref_dtype), B.type(ref_dtype)) + C = bias_add_activation(C, bias, activation) + return C + + @staticmethod + def _check_parity(A, + B, + output_dtype, + SA=None, + SB=None, + qblock_size=None, + ref_dtype=torch.float32, + tol=0.01, + use_triton=True, + bias=None, + activation=""): + torch_output = __class__._ref_forward(A, B, ref_dtype=ref_dtype, bias=bias, activation=activation) + triton_output = __class__.forward(A, B, use_triton=use_triton, bias=bias, activation=activation) + assert torch.allclose(triton_output.cpu().type(torch_output.dtype), torch_output.cpu(), rtol=tol) + print(f"{__class__.__name__}: PASSed the parity check") + return triton_output, torch_output + + @staticmethod + def _read_autotune_table(): + TritonMatmul._read_autotune_table(__class__.__name__ + "_2d_kernel", __class__._2d_kernel) + TritonMatmul._read_autotune_table(__class__.__name__ + "_4d_kernel", __class__._4d_kernel) + + @staticmethod + def _write_autotune_table(): + TritonMatmul._write_autotune_table(__class__.__name__ + "_2d_kernel", __class__._2d_kernel) + TritonMatmul._write_autotune_table(__class__.__name__ + "_4d_kernel", __class__._4d_kernel) + + @staticmethod + def _update_autotune_table(): + TritonMatmul._update_autotune_table(__class__.__name__ + "_2d_kernel", __class__._2d_kernel) + TritonMatmul._update_autotune_table(__class__.__name__ + "_4d_kernel", __class__._4d_kernel) + + +# ----------------------------------------------------------------------------- +# mapping +if deepspeed.HAS_TRITON: + fp16_matmul = Fp16Matmul() + matmul = MatmulExt.forward + matmul_4d = fp16_matmul._matmul_4d + score_4d_matmul = fp16_matmul._score_4d_matmul + context_4d_matmul = fp16_matmul._context_4d_matmul +else: + fp16_matmul = None + matmul = None + matmul_4d = None + score_4d_matmul = None + context_4d_matmul = None + + +@atexit.register +def matmul_ext_update_autotune_table(): + if deepspeed.HAS_TRITON: + fp16_matmul._update_autotune_table() diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/softmax.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/softmax.py new file mode 100644 index 0000000000000000000000000000000000000000..1ee10d63e6cf8bfa6723856b53b7ca9ec30d3fdd --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/softmax.py @@ -0,0 +1,89 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import triton +import triton.language as tl +''' +softmax +modified the triton kernel in +https://github.com/openai/triton/blob/34817ecc954a6f4ca7b4dfb352fdde1f8bd49ca5/python/tutorials/02-fused-softmax.py +''' + + +@triton.jit +def softmax_kernel(output_ptr, input_ptr, stride, n_cols, BLOCK_SIZE: tl.constexpr): + row_idx = tl.program_id(0) + row_start_ptr = input_ptr + row_idx * stride + col_offsets = tl.arange(0, BLOCK_SIZE) + input_ptrs = row_start_ptr + col_offsets + row = tl.load(input_ptrs, mask=col_offsets < n_cols, other=-float('inf')).to(tl.float32) + row_minus_max = row - tl.max(row, axis=0) + numerator = tl.exp(row_minus_max) + denominator = tl.sum(numerator, axis=0) + softmax_output = numerator / denominator + output_row_start_ptr = output_ptr + row_idx * stride + output_ptrs = output_row_start_ptr + col_offsets + tl.store(output_ptrs, softmax_output, mask=col_offsets < n_cols) + + +@triton.jit +def masked_softmax_kernel(output_ptr, input_ptr, stride, mask_ptr, mask_stride, n_cols, BLOCK_SIZE: tl.constexpr): + row_idx = tl.program_id(0) + row_start_ptr = input_ptr + row_idx * stride + col_offsets = tl.arange(0, BLOCK_SIZE) + input_ptrs = row_start_ptr + col_offsets + mask_ptrs = mask_ptr + col_offsets + row_idx * mask_stride # mask_stride is 0 for 1d mask + row = tl.load(input_ptrs, mask=col_offsets < n_cols, other=-float('inf')).to(tl.float32) + mask = tl.load(mask_ptrs, mask=col_offsets < n_cols, other=0).to(tl.float32) + row_minus_max = row - tl.max(row, axis=0) + row_minus_max = row_minus_max + mask + numerator = tl.exp(row_minus_max) + denominator = tl.sum(numerator, axis=0) + softmax_output = numerator / denominator + output_row_start_ptr = output_ptr + row_idx * stride + output_ptrs = output_row_start_ptr + col_offsets + tl.store(output_ptrs, softmax_output, mask=col_offsets < n_cols) + + +def softmax(input: torch.Tensor, mask: torch.Tensor = None, dim=-1) -> torch.Tensor: + assert input.is_contiguous() + assert (dim == -1) or (dim == len(input.shape) - 1), "Only dim=-1 is supported" + + use_mask = False if mask is None else True + input_arg = input.view(-1, input.shape[-1]) + n_rows, n_cols = input_arg.shape + BLOCK_SIZE = max(triton.next_power_of_2(n_cols), 2) + num_warps = 4 + if BLOCK_SIZE >= 2048: + num_warps = 8 + if BLOCK_SIZE >= 4096: + num_warps = 16 + # Allocate output + output = torch.empty_like(input) + if use_mask: + assert mask.is_contiguous() + mask = mask.view(-1, mask.shape[-1]) + mask_stride = mask.shape[-1] if mask.shape[-2] > 1 else 0 + masked_softmax_kernel[(n_rows, )]( + output, + input, + input_arg.stride(0), + mask, + mask_stride, + n_cols, + num_warps=num_warps, + BLOCK_SIZE=BLOCK_SIZE, + ) + else: + softmax_kernel[(n_rows, )]( + output, + input, + input_arg.stride(0), + n_cols, + num_warps=num_warps, + BLOCK_SIZE=BLOCK_SIZE, + ) + return output diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/transformer.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..bfd4d60dcb1ceafff2d87b62a78a8cf480ab9448 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/transformer.py @@ -0,0 +1,412 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import json +import math +import torch +from torch import nn +from torch.autograd import Function +from deepspeed.accelerator import get_accelerator +from deepspeed.ops.op_builder import TransformerBuilder, StochasticTransformerBuilder + +# Cuda modules will be imported if needed +transformer_cuda_module = None +stochastic_transformer_cuda_module = None + + +class TransformerConfig(): + + def __init__(self, batch_size, hidden_size, intermediate_size, heads, attn_dropout_ratio, hidden_dropout_ratio, + num_hidden_layers, initializer_range): + self.layer_id = -1 + self.batch_size = batch_size + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.heads = heads + self.attn_dropout_ratio = attn_dropout_ratio + self.hidden_dropout_ratio = hidden_dropout_ratio + self.num_hidden_layers = num_hidden_layers + self.initializer_range = initializer_range + + +class DeepSpeedTransformerConfig(TransformerConfig): + """Initialize the DeepSpeed Transformer Config. + + Arguments: + batch_size: The maximum batch size used for running the kernel on each GPU + + hidden_size: The hidden size of the transformer layer + + intermediate_size: The intermediate size of the feed-forward part of transformer layer + + heads: The number of heads in the self-attention of the transformer layer + + attn_dropout_ratio: The ratio of dropout for the attention's output + + hidden_dropout_ratio: The ratio of dropout for the transformer's output + + num_hidden_layers: The number of transformer layers + + initializer_range: BERT model's initializer range for initializing parameter data + + local_rank: Optional: The rank of GPU running the transformer kernel, it is not required + to use if the model already set the current device, otherwise need to set it + so that the transformer kernel can work on the right device + + seed: The random seed for the dropout layers + + fp16: Enable half-precision computation + + pre_layer_norm: Select between Pre-LN or Post-LN transformer architecture + + normalize_invertible: Optional: Enable invertible LayerNorm execution (dropping the input activation), + default is False + + gelu_checkpoint: Optional: Enable checkpointing of Gelu activation output to save memory, + default is False + + adjust_init_range: Optional: Set as True (default) if the model adjusts the weight initial values of + its self-attention output and layer output, False keeps the initializer_range no change. + See the adjustment below: + output_std = self.config.initializer_range / math.sqrt(2.0 * num_layers) + + attn_dropout_checkpoint: Optional: Enable checkpointing of attention dropout to save memory, + default is False + + stochastic_mode: Enable for high performance, please note that this flag has some level of + non-determinism and can produce different results on different runs. However, we have seen + that by enabling it, the pretraining tasks such as BERT are not affected and can obtain + a high accuracy level. On the other hand, for the downstream tasks, such as fine-tuning, we recommend + to turn it off in order to be able to reproduce the same result through the regular kernel execution. + + return_tuple: Enable if using the return_tuple interface style for sending out the forward results. + + training: Enable for training rather than inference. + """ + + def __init__(self, + batch_size=-1, + hidden_size=-1, + intermediate_size=-1, + heads=-1, + attn_dropout_ratio=-1, + hidden_dropout_ratio=-1, + num_hidden_layers=-1, + initializer_range=-1, + layer_norm_eps=1e-12, + local_rank=-1, + seed=-1, + fp16=False, + pre_layer_norm=True, + normalize_invertible=False, + gelu_checkpoint=False, + adjust_init_range=True, + attn_dropout_checkpoint=False, + stochastic_mode=False, + return_tuple=False, + training=True): + super(DeepSpeedTransformerConfig, + self).__init__(batch_size, hidden_size, + (intermediate_size if intermediate_size > 0 else 4 * hidden_size), heads, + attn_dropout_ratio, hidden_dropout_ratio, num_hidden_layers, initializer_range) + self.fp16 = fp16 + self.pre_layer_norm = pre_layer_norm + self.local_rank = local_rank + self.seed = seed + self.normalize_invertible = normalize_invertible + self.gelu_checkpoint = gelu_checkpoint # True: if higher batch size is required + self.adjust_init_range = adjust_init_range + self.test_gemm = False + self.layer_norm_eps = layer_norm_eps + self.training = training + self.is_grad_enabled = True + self.attn_dropout_checkpoint = attn_dropout_checkpoint + self.stochastic_mode = stochastic_mode + self.return_tuple = return_tuple + + @classmethod + def from_dict(cls, json_object): + config = DeepSpeedTransformerConfig() + for key, value in json_object.items(): + config.__dict__[key] = value + return config + + @classmethod + def from_json_file(cls, json_file): + with open(json_file, "r", encoding='utf-16') as reader: + text = reader.read() + return cls.from_dict(json.loads(text)) + + +class DeepSpeedTransformerFunction(Function): + + @staticmethod + def forward(ctx, input, input_mask, self, grads, layer_id, attn_qkvw, attn_qkvb, attn_ow, attn_ob, attn_nw, + attn_nb, inter_w, inter_b, output_w, output_b, norm_w, norm_b, config): + + cuda_module = stochastic_transformer_cuda_module if config.stochastic_mode else transformer_cuda_module + forward_func = cuda_module.forward_fp16 if config.fp16 else cuda_module.forward_fp32 + + inp_size = input.size() + if inp_size[1] % 16 != 0: + input = torch.cat( + (input, + torch.randn( + (inp_size[0], (16 - (inp_size[1] % 16)), inp_size[2]), device=input.device, dtype=input.dtype)), + 1) + input_mask = torch.cat((input_mask, torch.ones((inp_size[0], input_mask.shape[1], input_mask.shape[2], \ + (16 - (inp_size[1] % 16))), device=input_mask.device, dtype=input_mask.dtype) * -10000), 3) + + (output, inp_norm, qkv_tf, soft_inp, ctx_bufB, attn_o_inp, add_res, ff1_inp, gelu_inp, ff2_inp, + attn_prob_dropout_mask, attn_output_dropout_mask, layer_output_dropout_mask, attn_layer_norm_var, + attn_layer_norm_mean, layer_norm_var, layer_norm_mean) = forward_func( + config.layer_id, input, input_mask, attn_qkvw, attn_qkvb, attn_ow, attn_ob, attn_nw, attn_nb, inter_w, + inter_b, output_w, output_b, norm_w, norm_b, config.training and config.is_grad_enabled, + config.pre_layer_norm, config.attn_dropout_checkpoint, config.normalize_invertible, + config.gelu_checkpoint) + + # For testing only. + if grads is not None: + for i in [2]: + attn_qkvw.register_hook(lambda x, i=i, self=self: grads.append([ + x[i * attn_ow.size(0):(i + 1) * attn_ow.size(0)], ("Q_W" if i == 0 else "K_W" if i == 1 else "V_W") + ])) + for i in [2]: + attn_qkvb.register_hook(lambda x, i=i, self=self: grads.append([ + x[i * attn_ow.size(0):(i + 1) * attn_ow.size(0)], ("Q_B" if i == 0 else "K_B" if i == 1 else "V_B") + ])) + + attn_ow.register_hook(lambda x, self=self: grads.append([x, "O_W"])) + attn_ob.register_hook(lambda x, self=self: grads.append([x, "O_B"])) + attn_nw.register_hook(lambda x, self=self: grads.append([x, "N2_W"])) + attn_nb.register_hook(lambda x, self=self: grads.append([x, "N2_B"])) + inter_w.register_hook(lambda x, self=self: grads.append([x, "int_W"])) + inter_b.register_hook(lambda x, self=self: grads.append([x, "int_B"])) + output_w.register_hook(lambda x, self=self: grads.append([x, "out_W"])) + output_b.register_hook(lambda x, self=self: grads.append([x, "out_B"])) + norm_w.register_hook(lambda x, self=self: grads.append([x, "norm_W"])) + norm_b.register_hook(lambda x, self=self: grads.append([x, "norm_B"])) + + if config.is_grad_enabled and config.training: + if (config.pre_layer_norm and config.normalize_invertible): + ctx.save_for_backward(input_mask, attn_qkvw, attn_qkvb, attn_ow, attn_ob, attn_nw, attn_nb, inter_w, + inter_b, output_w, output_b, norm_w, norm_b) + else: + ctx.save_for_backward(output, input, input_mask, attn_qkvw, attn_qkvb, attn_ow, attn_ob, attn_nw, + attn_nb, inter_w, inter_b, output_w, output_b, norm_w, norm_b) + + ctx.config = config + if (config.pre_layer_norm or not config.normalize_invertible): + ctx.inp_norm = inp_norm + + ctx.qkv_tf = qkv_tf + ctx.soft_inp = soft_inp + if not config.attn_dropout_checkpoint: + ctx.ctx_bufB = ctx_bufB + + ctx.attn_o_inp = attn_o_inp + if not config.normalize_invertible: + ctx.add_res = add_res + + ctx.attn_layer_norm_mean = attn_layer_norm_mean + ctx.layer_norm_mean = layer_norm_mean + + ctx.ff1_inp = ff1_inp + if not config.gelu_checkpoint: + ctx.gelu_inp = gelu_inp + + ctx.ff2_inp = ff2_inp + ctx.attn_prob_dropout_mask = attn_prob_dropout_mask + ctx.attn_output_dropout_mask = attn_output_dropout_mask + ctx.layer_output_dropout_mask = layer_output_dropout_mask + ctx.attn_layer_norm_var = attn_layer_norm_var + ctx.layer_norm_var = layer_norm_var + + if inp_size[1] % 16 != 0: + output = torch.narrow(output, 1, 0, inp_size[1]) + + if config.return_tuple: + return (output, ) # outputs -> (output) : outputs[0] = output + else: + return output + + @staticmethod + def backward(ctx, grad_output): + bsz = grad_output.shape[0] + grad_output_shape = grad_output.size() + if grad_output_shape[1] % 16 != 0: + grad_output = torch.cat((grad_output, torch.zeros((bsz, (16 - (grad_output_shape[1] % 16)), \ + grad_output_shape[2]), device=grad_output.device, dtype=grad_output.dtype)), 1) + + assert ctx.config.training + + if (ctx.config.pre_layer_norm and ctx.config.normalize_invertible): + (input_mask, attn_qkvw, attn_qkvb, attn_ow, attn_ob, attn_nw, attn_nb, inter_w, inter_b, output_w, + output_b, norm_w, norm_b) = ctx.saved_tensors + else: + (output, input, input_mask, attn_qkvw, attn_qkvb, attn_ow, attn_ob, attn_nw, attn_nb, inter_w, inter_b, + output_w, output_b, norm_w, norm_b) = ctx.saved_tensors + + cuda_module = stochastic_transformer_cuda_module if ctx.config.stochastic_mode else transformer_cuda_module + backward_func = cuda_module.backward_fp16 if ctx.config.fp16 else cuda_module.backward_fp32 + + (grad_input, grad_attn_qkvw, grad_attn_qkvb, grad_attn_ow, grad_attn_ob, grad_attn_nw, grad_attn_nb, + grad_inter_w, grad_inter_b, grad_output_w, grad_output_b, grad_norm_w, grad_norm_b) = backward_func( + ctx.config.layer_id, grad_output, + (ctx.inp_norm if (ctx.config.pre_layer_norm and ctx.config.normalize_invertible) else output), + (ctx.inp_norm if (ctx.config.pre_layer_norm or not ctx.config.normalize_invertible) else input), + ctx.qkv_tf, ctx.soft_inp, (ctx.soft_inp if ctx.config.attn_dropout_checkpoint else ctx.ctx_bufB), + ctx.attn_o_inp, (ctx.ff1_inp if ctx.config.normalize_invertible else ctx.add_res), ctx.ff1_inp, + (ctx.ff2_inp if ctx.config.gelu_checkpoint else ctx.gelu_inp), ctx.ff2_inp, ctx.attn_prob_dropout_mask, + ctx.attn_output_dropout_mask, ctx.layer_output_dropout_mask, ctx.attn_layer_norm_var, + ctx.attn_layer_norm_mean, ctx.layer_norm_var, ctx.layer_norm_mean, + (ctx.inp_norm if + (ctx.config.pre_layer_norm and ctx.config.normalize_invertible) else input), input_mask, attn_qkvw, + attn_qkvb, attn_ow, attn_ob, attn_nw, attn_nb, inter_w, inter_b, output_w, output_b, norm_w, norm_b) + + # This appears to be an effective way to release context memory + ctx.qkv_tf = None + ctx.soft_inp = None + ctx.ctx_bufB = None + ctx.gelu_inp = None + ctx.ff2_inp = None + ctx.attn_o_inp = None + ctx.ff1_inp = None + ctx.add_res = None + ctx.inp_norm = None + ctx.config = None + ctx.attn_layer_norm_mean = None + ctx.layer_norm_mean = None + ctx.attn_prob_dropout_mask = None + ctx.attn_output_dropout_mask = None + ctx.layer_output_dropout_mask = None + ctx.attn_layer_norm_var = None + ctx.layer_norm_var = None + + if grad_output_shape[1] % 16 != 0: + grad_input = torch.narrow(grad_input, 1, 0, grad_output_shape[1]) + + return (grad_input, None, None, None, None, grad_attn_qkvw, grad_attn_qkvb, grad_attn_ow, grad_attn_ob, + grad_attn_nw, grad_attn_nb, grad_inter_w, grad_inter_b, grad_output_w, grad_output_b, grad_norm_w, + grad_norm_b, None) + + +class DeepSpeedTransformerLayer(nn.Module): + """Initialize the DeepSpeed Transformer Layer. + + Static variable: + layer_id: The layer-index counter starting from 0 and incrementing by 1 every time a layer object is instantiated, + e.g. if a model has 24 transformer layers, layer_id goes from 0 to 23. + Arguments: + config: An object of DeepSpeedTransformerConfig + + initial_weights: Optional: Only used for unit test + + initial_biases: Optional: Only used for unit test + """ + layer_id = 0 + + def __init__(self, config, initial_weights=None, initial_biases=None): + super(DeepSpeedTransformerLayer, self).__init__() + + self.config = config + self.config.layer_id = DeepSpeedTransformerLayer.layer_id + DeepSpeedTransformerLayer.layer_id = DeepSpeedTransformerLayer.layer_id + 1 + + print("DeepSpeed Transformer config is ", self.config.__dict__) + + if self.config.local_rank >= 0: + get_accelerator().set_device(self.config.local_rank) + + if initial_weights is None and initial_biases is None: + self.attn_qkvw = nn.Parameter(torch.Tensor(self.config.hidden_size * 3, self.config.hidden_size)) + self.attn_qkvb = nn.Parameter(torch.Tensor(self.config.hidden_size * 3)) + self.attn_ow = nn.Parameter(torch.Tensor(self.config.hidden_size, self.config.hidden_size)) + self.attn_ob = nn.Parameter(torch.Tensor(self.config.hidden_size)) + self.attn_nw = nn.Parameter(torch.Tensor(self.config.hidden_size)) + self.attn_nb = nn.Parameter(torch.Tensor(self.config.hidden_size)) + self.inter_w = nn.Parameter(torch.Tensor(self.config.intermediate_size, self.config.hidden_size)) + self.inter_b = nn.Parameter(torch.Tensor(self.config.intermediate_size)) + self.output_w = nn.Parameter(torch.Tensor(self.config.hidden_size, self.config.intermediate_size)) + self.output_b = nn.Parameter(torch.Tensor(self.config.hidden_size)) + self.norm_w = nn.Parameter(torch.Tensor(self.config.hidden_size)) + self.norm_b = nn.Parameter(torch.Tensor(self.config.hidden_size)) + self.init_transformer_weights(self.config.adjust_init_range) + else: + # For testing only. + q = initial_weights[0].data + k = initial_weights[1].data + v = initial_weights[2].data + + self.attn_qkvw = nn.Parameter(torch.cat((q, k, v))) + #self.attn_qkvw[i * self.config.hidden_size:(i + 1) * self.config.hidden_size] = \ + # initial_weights[i].clone() + #torch.empty_like(initial_weights[i]).data.copy_(initial_weights[i].data) + self.attn_qkvb = nn.Parameter(torch.Tensor(self.config.hidden_size * 3)) + self.attn_qkvb.data.zero_() + self.attn_ow = initial_weights[3] + self.attn_ob = initial_biases[3] + self.attn_nw = initial_weights[4] + self.attn_nb = initial_biases[4] + self.inter_w = initial_weights[5] + self.inter_b = initial_biases[5] + self.output_w = initial_weights[6] + self.output_b = initial_biases[6] + self.norm_w = initial_weights[7] + self.norm_b = initial_biases[7] + + # Load cuda modules if needed + global transformer_cuda_module, stochastic_transformer_cuda_module + if transformer_cuda_module is None and not self.config.stochastic_mode: + transformer_cuda_module = TransformerBuilder().load() + if stochastic_transformer_cuda_module is None and self.config.stochastic_mode: + stochastic_transformer_cuda_module = StochasticTransformerBuilder().load() + + # create the layer in cuda kernels. + cuda_module = stochastic_transformer_cuda_module if self.config.stochastic_mode else transformer_cuda_module + create_layer_func = cuda_module.create_transformer_layer_fp16 if self.config.fp16 else cuda_module.create_transformer_layer_fp32 + + create_layer_func(self.config.layer_id, self.config.batch_size, self.config.hidden_size, self.config.heads, + self.config.intermediate_size, self.config.attn_dropout_ratio, + self.config.hidden_dropout_ratio, self.config.layer_norm_eps, self.config.seed, + self.config.pre_layer_norm, self.config.test_gemm, self.config.attn_dropout_checkpoint, + self.config.normalize_invertible, self.config.gelu_checkpoint, self.config.stochastic_mode) + + def init_transformer_weights(self, adjust_init_range=False): + num_layers = self.config.num_hidden_layers + output_std = self.config.initializer_range + if adjust_init_range and self.config.local_rank == 0: + print("Accounting for accumulation on the residual path") + output_std = self.config.initializer_range / math.sqrt(2.0 * num_layers) + + self.attn_qkvw.data.normal_(mean=0.0, std=self.config.initializer_range) + self.attn_qkvb.data.zero_() + self.attn_ow.data.normal_(mean=0.0, std=output_std) + self.attn_ob.data.zero_() + self.attn_nw.data.fill_(1.0) + self.attn_nb.data.zero_() + self.inter_w.data.normal_(mean=0.0, std=self.config.initializer_range) + self.inter_b.data.zero_() + self.output_w.data.normal_(mean=0.0, std=output_std) + self.output_b.data.zero_() + self.norm_w.data.fill_(1.0) + self.norm_b.data.zero_() + + def forward(self, + hidden_states, + attention_mask=None, + head_mask=None, + layer_head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + grads=None): + self.config.is_grad_enabled = torch.is_grad_enabled() + self.config.training = self.training + return DeepSpeedTransformerFunction.apply(hidden_states, attention_mask, self, grads, self.config.layer_id, + self.attn_qkvw, self.attn_qkvb, self.attn_ow, self.attn_ob, + self.attn_nw, self.attn_nb, self.inter_w, self.inter_b, + self.output_w, self.output_b, self.norm_w, self.norm_b, self.config)