id
int64 0
755k
| file_name
stringlengths 3
109
| file_path
stringlengths 13
185
| content
stringlengths 31
9.38M
| size
int64 31
9.38M
| language
stringclasses 1
value | extension
stringclasses 11
values | total_lines
int64 1
340k
| avg_line_length
float64 2.18
149k
| max_line_length
int64 7
2.22M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 6
65
| repo_stars
int64 100
47.3k
| repo_forks
int64 0
12k
| repo_open_issues
int64 0
3.4k
| repo_license
stringclasses 9
values | repo_extraction_date
stringclasses 92
values | exact_duplicates_redpajama
bool 2
classes | near_duplicates_redpajama
bool 2
classes | exact_duplicates_githubcode
bool 2
classes | exact_duplicates_stackv2
bool 1
class | exact_duplicates_stackv1
bool 2
classes | near_duplicates_githubcode
bool 2
classes | near_duplicates_stackv1
bool 2
classes | near_duplicates_stackv2
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
18,504
|
util-unix.cpp
|
AGWA_git-crypt/util-unix.cpp
|
/*
* Copyright 2012, 2014 Andrew Ayer
*
* This file is part of git-crypt.
*
* git-crypt is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* git-crypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with git-crypt. If not, see <http://www.gnu.org/licenses/>.
*
* Additional permission under GNU GPL version 3 section 7:
*
* If you modify the Program, or any covered work, by linking or
* combining it with the OpenSSL project's OpenSSL library (or a
* modified version of that library), containing parts covered by the
* terms of the OpenSSL or SSLeay licenses, the licensors of the Program
* grant you additional permission to convey the resulting work.
* Corresponding Source for a non-source form of such a combination
* shall include the source code for the parts of OpenSSL used as well
* as that of the covered work.
*/
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/time.h>
#include <errno.h>
#include <utime.h>
#include <unistd.h>
#include <stdio.h>
#include <limits.h>
#include <fcntl.h>
#include <stdlib.h>
#include <dirent.h>
#include <vector>
#include <string>
#include <cstring>
#include <cstddef>
#include <algorithm>
std::string System_error::message () const
{
std::string mesg(action);
if (!target.empty()) {
mesg += ": ";
mesg += target;
}
if (error) {
mesg += ": ";
mesg += strerror(error);
}
return mesg;
}
void temp_fstream::open (std::ios_base::openmode mode)
{
close();
const char* tmpdir = getenv("TMPDIR");
size_t tmpdir_len = tmpdir ? std::strlen(tmpdir) : 0;
if (tmpdir_len == 0 || tmpdir_len > 4096) {
// no $TMPDIR or it's excessively long => fall back to /tmp
tmpdir = "/tmp";
tmpdir_len = 4;
}
std::vector<char> path_buffer(tmpdir_len + 18);
char* path = &path_buffer[0];
std::strcpy(path, tmpdir);
std::strcpy(path + tmpdir_len, "/git-crypt.XXXXXX");
mode_t old_umask = umask(0077);
int fd = mkstemp(path);
if (fd == -1) {
int mkstemp_errno = errno;
umask(old_umask);
throw System_error("mkstemp", "", mkstemp_errno);
}
umask(old_umask);
std::fstream::open(path, mode);
if (!std::fstream::is_open()) {
unlink(path);
::close(fd);
throw System_error("std::fstream::open", path, 0);
}
unlink(path);
::close(fd);
}
void temp_fstream::close ()
{
if (std::fstream::is_open()) {
std::fstream::close();
}
}
void mkdir_parent (const std::string& path)
{
std::string::size_type slash(path.find('/', 1));
while (slash != std::string::npos) {
std::string prefix(path.substr(0, slash));
struct stat status;
if (stat(prefix.c_str(), &status) == 0) {
// already exists - make sure it's a directory
if (!S_ISDIR(status.st_mode)) {
throw System_error("mkdir_parent", prefix, ENOTDIR);
}
} else {
if (errno != ENOENT) {
throw System_error("mkdir_parent", prefix, errno);
}
// doesn't exist - mkdir it
if (mkdir(prefix.c_str(), 0777) == -1) {
throw System_error("mkdir", prefix, errno);
}
}
slash = path.find('/', slash + 1);
}
}
std::string our_exe_path ()
{
if (argv0[0] == '/') {
// argv[0] starts with / => it's an absolute path
return argv0;
} else if (std::strchr(argv0, '/')) {
// argv[0] contains / => it a relative path that should be resolved
char* resolved_path_p = realpath(argv0, nullptr);
std::string resolved_path(resolved_path_p);
free(resolved_path_p);
return resolved_path;
} else {
// argv[0] is just a bare filename => not much we can do
return argv0;
}
}
int exit_status (int wait_status)
{
return wait_status != -1 && WIFEXITED(wait_status) ? WEXITSTATUS(wait_status) : -1;
}
void touch_file (const std::string& filename)
{
if (utimes(filename.c_str(), nullptr) == -1 && errno != ENOENT) {
throw System_error("utimes", filename, errno);
}
}
void remove_file (const std::string& filename)
{
if (unlink(filename.c_str()) == -1 && errno != ENOENT) {
throw System_error("unlink", filename, errno);
}
}
static void init_std_streams_platform ()
{
}
void create_protected_file (const char* path)
{
int fd = open(path, O_WRONLY | O_CREAT, 0600);
if (fd == -1) {
throw System_error("open", path, errno);
}
close(fd);
}
int util_rename (const char* from, const char* to)
{
return rename(from, to);
}
std::vector<std::string> get_directory_contents (const char* path)
{
std::vector<std::string> contents;
DIR* dir = opendir(path);
if (!dir) {
throw System_error("opendir", path, errno);
}
try {
errno = 0;
// Note: readdir is reentrant in new implementations. In old implementations,
// it might not be, but git-crypt isn't multi-threaded so that's OK.
// We don't use readdir_r because it's buggy and deprecated:
// https://womble.decadent.org.uk/readdir_r-advisory.html
// http://austingroupbugs.net/view.php?id=696
// http://man7.org/linux/man-pages/man3/readdir_r.3.html
while (struct dirent* ent = readdir(dir)) {
if (!(std::strcmp(ent->d_name, ".") == 0 || std::strcmp(ent->d_name, "..") == 0)) {
contents.push_back(ent->d_name);
}
}
if (errno) {
throw System_error("readdir", path, errno);
}
} catch (...) {
closedir(dir);
throw;
}
closedir(dir);
std::sort(contents.begin(), contents.end());
return contents;
}
| 5,682
|
C++
|
.cpp
| 197
| 26.614213
| 86
| 0.680205
|
AGWA/git-crypt
| 8,224
| 478
| 134
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
18,505
|
crypto-openssl-11.cpp
|
AGWA_git-crypt/crypto-openssl-11.cpp
|
/*
* Copyright 2012, 2014 Andrew Ayer
*
* This file is part of git-crypt.
*
* git-crypt is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* git-crypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with git-crypt. If not, see <http://www.gnu.org/licenses/>.
*
* Additional permission under GNU GPL version 3 section 7:
*
* If you modify the Program, or any covered work, by linking or
* combining it with the OpenSSL project's OpenSSL library (or a
* modified version of that library), containing parts covered by the
* terms of the OpenSSL or SSLeay licenses, the licensors of the Program
* grant you additional permission to convey the resulting work.
* Corresponding Source for a non-source form of such a combination
* shall include the source code for the parts of OpenSSL used as well
* as that of the covered work.
*/
#include <openssl/opensslconf.h>
#if defined(OPENSSL_API_COMPAT)
#include "crypto.hpp"
#include "key.hpp"
#include "util.hpp"
#include <openssl/aes.h>
#include <openssl/sha.h>
#include <openssl/hmac.h>
#include <openssl/evp.h>
#include <openssl/rand.h>
#include <openssl/err.h>
#include <sstream>
#include <cstring>
void init_crypto ()
{
ERR_load_crypto_strings();
}
struct Aes_ecb_encryptor::Aes_impl {
AES_KEY key;
};
Aes_ecb_encryptor::Aes_ecb_encryptor (const unsigned char* raw_key)
: impl(new Aes_impl)
{
if (AES_set_encrypt_key(raw_key, KEY_LEN * 8, &(impl->key)) != 0) {
throw Crypto_error("Aes_ctr_encryptor::Aes_ctr_encryptor", "AES_set_encrypt_key failed");
}
}
Aes_ecb_encryptor::~Aes_ecb_encryptor ()
{
// Note: Explicit destructor necessary because class contains an unique_ptr
// which contains an incomplete type when the unique_ptr is declared.
explicit_memset(&impl->key, '\0', sizeof(impl->key));
}
void Aes_ecb_encryptor::encrypt(const unsigned char* plain, unsigned char* cipher)
{
AES_encrypt(plain, cipher, &(impl->key));
}
struct Hmac_sha1_state::Hmac_impl {
HMAC_CTX *ctx;
};
Hmac_sha1_state::Hmac_sha1_state (const unsigned char* key, size_t key_len)
: impl(new Hmac_impl)
{
impl->ctx = HMAC_CTX_new();
HMAC_Init_ex(impl->ctx, key, key_len, EVP_sha1(), nullptr);
}
Hmac_sha1_state::~Hmac_sha1_state ()
{
HMAC_CTX_free(impl->ctx);
}
void Hmac_sha1_state::add (const unsigned char* buffer, size_t buffer_len)
{
HMAC_Update(impl->ctx, buffer, buffer_len);
}
void Hmac_sha1_state::get (unsigned char* digest)
{
unsigned int len;
HMAC_Final(impl->ctx, digest, &len);
}
void random_bytes (unsigned char* buffer, size_t len)
{
if (RAND_bytes(buffer, len) != 1) {
std::ostringstream message;
while (unsigned long code = ERR_get_error()) {
char error_string[120];
ERR_error_string_n(code, error_string, sizeof(error_string));
message << "OpenSSL Error: " << error_string << "; ";
}
throw Crypto_error("random_bytes", message.str());
}
}
#endif
| 3,312
|
C++
|
.cpp
| 101
| 30.980198
| 91
| 0.736298
|
AGWA/git-crypt
| 8,224
| 478
| 134
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
18,506
|
commands.cpp
|
AGWA_git-crypt/commands.cpp
|
/*
* Copyright 2012, 2014 Andrew Ayer
*
* This file is part of git-crypt.
*
* git-crypt is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* git-crypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with git-crypt. If not, see <http://www.gnu.org/licenses/>.
*
* Additional permission under GNU GPL version 3 section 7:
*
* If you modify the Program, or any covered work, by linking or
* combining it with the OpenSSL project's OpenSSL library (or a
* modified version of that library), containing parts covered by the
* terms of the OpenSSL or SSLeay licenses, the licensors of the Program
* grant you additional permission to convey the resulting work.
* Corresponding Source for a non-source form of such a combination
* shall include the source code for the parts of OpenSSL used as well
* as that of the covered work.
*/
#include "commands.hpp"
#include "crypto.hpp"
#include "util.hpp"
#include "key.hpp"
#include "gpg.hpp"
#include "parse_options.hpp"
#include "coprocess.hpp"
#include <unistd.h>
#include <stdint.h>
#include <algorithm>
#include <string>
#include <fstream>
#include <sstream>
#include <iostream>
#include <cstddef>
#include <cstring>
#include <cctype>
#include <stdio.h>
#include <string.h>
#include <errno.h>
#include <exception>
#include <vector>
enum {
// # of arguments per git checkout call; must be large enough to be efficient but small
// enough to avoid operating system limits on argument length
GIT_CHECKOUT_BATCH_SIZE = 100
};
static std::string attribute_name (const char* key_name)
{
if (key_name) {
// named key
return std::string("git-crypt-") + key_name;
} else {
// default key
return "git-crypt";
}
}
static std::string git_version_string ()
{
std::vector<std::string> command;
command.push_back("git");
command.push_back("version");
std::stringstream output;
if (!successful_exit(exec_command(command, output))) {
throw Error("'git version' failed - is Git installed?");
}
std::string word;
output >> word; // "git"
output >> word; // "version"
output >> word; // "1.7.10.4"
return word;
}
static std::vector<int> parse_version (const std::string& str)
{
std::istringstream in(str);
std::vector<int> version;
std::string component;
while (std::getline(in, component, '.')) {
version.push_back(std::atoi(component.c_str()));
}
return version;
}
static const std::vector<int>& git_version ()
{
static const std::vector<int> version(parse_version(git_version_string()));
return version;
}
static std::vector<int> make_version (int a, int b, int c)
{
std::vector<int> version;
version.push_back(a);
version.push_back(b);
version.push_back(c);
return version;
}
static void git_config (const std::string& name, const std::string& value)
{
std::vector<std::string> command;
command.push_back("git");
command.push_back("config");
command.push_back(name);
command.push_back(value);
if (!successful_exit(exec_command(command))) {
throw Error("'git config' failed");
}
}
static bool git_has_config (const std::string& name)
{
std::vector<std::string> command;
command.push_back("git");
command.push_back("config");
command.push_back("--get-all");
command.push_back(name);
std::stringstream output;
switch (exit_status(exec_command(command, output))) {
case 0: return true;
case 1: return false;
default: throw Error("'git config' failed");
}
}
static void git_deconfig (const std::string& name)
{
std::vector<std::string> command;
command.push_back("git");
command.push_back("config");
command.push_back("--remove-section");
command.push_back(name);
if (!successful_exit(exec_command(command))) {
throw Error("'git config' failed");
}
}
static void configure_git_filters (const char* key_name)
{
std::string escaped_git_crypt_path(escape_shell_arg(our_exe_path()));
if (key_name) {
// Note: key_name contains only shell-safe characters so it need not be escaped.
git_config(std::string("filter.git-crypt-") + key_name + ".smudge",
escaped_git_crypt_path + " smudge --key-name=" + key_name);
git_config(std::string("filter.git-crypt-") + key_name + ".clean",
escaped_git_crypt_path + " clean --key-name=" + key_name);
git_config(std::string("filter.git-crypt-") + key_name + ".required", "true");
git_config(std::string("diff.git-crypt-") + key_name + ".textconv",
escaped_git_crypt_path + " diff --key-name=" + key_name);
} else {
git_config("filter.git-crypt.smudge", escaped_git_crypt_path + " smudge");
git_config("filter.git-crypt.clean", escaped_git_crypt_path + " clean");
git_config("filter.git-crypt.required", "true");
git_config("diff.git-crypt.textconv", escaped_git_crypt_path + " diff");
}
}
static void deconfigure_git_filters (const char* key_name)
{
// deconfigure the git-crypt filters
if (git_has_config("filter." + attribute_name(key_name) + ".smudge") ||
git_has_config("filter." + attribute_name(key_name) + ".clean") ||
git_has_config("filter." + attribute_name(key_name) + ".required")) {
git_deconfig("filter." + attribute_name(key_name));
}
if (git_has_config("diff." + attribute_name(key_name) + ".textconv")) {
git_deconfig("diff." + attribute_name(key_name));
}
}
static bool git_checkout_batch (std::vector<std::string>::const_iterator paths_begin, std::vector<std::string>::const_iterator paths_end)
{
if (paths_begin == paths_end) {
return true;
}
std::vector<std::string> command;
command.push_back("git");
command.push_back("checkout");
command.push_back("--");
for (auto path(paths_begin); path != paths_end; ++path) {
command.push_back(*path);
}
if (!successful_exit(exec_command(command))) {
return false;
}
return true;
}
static bool git_checkout (const std::vector<std::string>& paths)
{
auto paths_begin(paths.begin());
while (paths.end() - paths_begin >= GIT_CHECKOUT_BATCH_SIZE) {
if (!git_checkout_batch(paths_begin, paths_begin + GIT_CHECKOUT_BATCH_SIZE)) {
return false;
}
paths_begin += GIT_CHECKOUT_BATCH_SIZE;
}
return git_checkout_batch(paths_begin, paths.end());
}
static bool same_key_name (const char* a, const char* b)
{
return (!a && !b) || (a && b && std::strcmp(a, b) == 0);
}
static void validate_key_name_or_throw (const char* key_name)
{
std::string reason;
if (!validate_key_name(key_name, &reason)) {
throw Error(reason);
}
}
static std::string get_internal_state_path ()
{
// git rev-parse --git-dir
std::vector<std::string> command;
command.push_back("git");
command.push_back("rev-parse");
command.push_back("--git-dir");
std::stringstream output;
if (!successful_exit(exec_command(command, output))) {
throw Error("'git rev-parse --git-dir' failed - is this a Git repository?");
}
std::string path;
std::getline(output, path);
path += "/git-crypt";
return path;
}
static std::string get_internal_keys_path (const std::string& internal_state_path)
{
return internal_state_path + "/keys";
}
static std::string get_internal_keys_path ()
{
return get_internal_keys_path(get_internal_state_path());
}
static std::string get_internal_key_path (const char* key_name)
{
std::string path(get_internal_keys_path());
path += "/";
path += key_name ? key_name : "default";
return path;
}
std::string get_git_config (const std::string& name)
{
// git config --get
std::vector<std::string> command;
command.push_back("git");
command.push_back("config");
command.push_back("--get");
command.push_back(name);
std::stringstream output;
if (!successful_exit(exec_command(command, output))) {
throw Error("'git config' missing value for key '" + name +"'");
}
std::string value;
std::getline(output, value);
return value;
}
static std::string get_repo_state_path ()
{
// git rev-parse --show-toplevel
std::vector<std::string> command;
command.push_back("git");
command.push_back("rev-parse");
command.push_back("--show-toplevel");
std::stringstream output;
if (!successful_exit(exec_command(command, output))) {
throw Error("'git rev-parse --show-toplevel' failed - is this a Git repository?");
}
std::string path;
std::getline(output, path);
if (path.empty()) {
// could happen for a bare repo
throw Error("Could not determine Git working tree - is this a non-bare repo?");
}
// Check if the repo state dir has been explicitly configured. If so, use that in path construction.
if (git_has_config("git-crypt.repoStateDir")) {
std::string repoStateDir = get_git_config("git-crypt.repoStateDir");
// The repoStateDir value must always be relative to git work tree to ensure the repoStateDir can be committed
// along with the remainder of the repository.
path += '/' + repoStateDir;
} else {
// There is no explicitly configured repo state dir configured, so use the default.
path += "/.git-crypt";
}
return path;
}
static std::string get_repo_keys_path (const std::string& repo_state_path)
{
return repo_state_path + "/keys";
}
static std::string get_repo_keys_path ()
{
return get_repo_keys_path(get_repo_state_path());
}
static std::string get_path_to_top ()
{
// git rev-parse --show-cdup
std::vector<std::string> command;
command.push_back("git");
command.push_back("rev-parse");
command.push_back("--show-cdup");
std::stringstream output;
if (!successful_exit(exec_command(command, output))) {
throw Error("'git rev-parse --show-cdup' failed - is this a Git repository?");
}
std::string path_to_top;
std::getline(output, path_to_top);
return path_to_top;
}
static void get_git_status (std::ostream& output)
{
// git status -uno --porcelain
std::vector<std::string> command;
command.push_back("git");
command.push_back("status");
command.push_back("-uno"); // don't show untracked files
command.push_back("--porcelain");
if (!successful_exit(exec_command(command, output))) {
throw Error("'git status' failed - is this a Git repository?");
}
}
// returns filter and diff attributes as a pair
static std::pair<std::string, std::string> get_file_attributes (const std::string& filename)
{
// git check-attr filter diff -- filename
std::vector<std::string> command;
command.push_back("git");
command.push_back("check-attr");
command.push_back("filter");
command.push_back("diff");
command.push_back("--");
command.push_back(filename);
std::stringstream output;
if (!successful_exit(exec_command(command, output))) {
throw Error("'git check-attr' failed - is this a Git repository?");
}
std::string filter_attr;
std::string diff_attr;
std::string line;
// Example output:
// filename: filter: git-crypt
// filename: diff: git-crypt
while (std::getline(output, line)) {
// filename might contain ": ", so parse line backwards
// filename: attr_name: attr_value
// ^name_pos ^value_pos
const std::string::size_type value_pos(line.rfind(": "));
if (value_pos == std::string::npos || value_pos == 0) {
continue;
}
const std::string::size_type name_pos(line.rfind(": ", value_pos - 1));
if (name_pos == std::string::npos) {
continue;
}
const std::string attr_name(line.substr(name_pos + 2, value_pos - (name_pos + 2)));
const std::string attr_value(line.substr(value_pos + 2));
if (attr_value != "unspecified" && attr_value != "unset" && attr_value != "set") {
if (attr_name == "filter") {
filter_attr = attr_value;
} else if (attr_name == "diff") {
diff_attr = attr_value;
}
}
}
return std::make_pair(filter_attr, diff_attr);
}
// returns filter and diff attributes as a pair
static std::pair<std::string, std::string> get_file_attributes (const std::string& filename, std::ostream& check_attr_stdin, std::istream& check_attr_stdout)
{
check_attr_stdin << filename << '\0' << std::flush;
std::string filter_attr;
std::string diff_attr;
// Example output:
// filename\0filter\0git-crypt\0filename\0diff\0git-crypt\0
for (int i = 0; i < 2; ++i) {
std::string filename;
std::string attr_name;
std::string attr_value;
std::getline(check_attr_stdout, filename, '\0');
std::getline(check_attr_stdout, attr_name, '\0');
std::getline(check_attr_stdout, attr_value, '\0');
if (attr_value != "unspecified" && attr_value != "unset" && attr_value != "set") {
if (attr_name == "filter") {
filter_attr = attr_value;
} else if (attr_name == "diff") {
diff_attr = attr_value;
}
}
}
return std::make_pair(filter_attr, diff_attr);
}
static bool check_if_blob_is_encrypted (const std::string& object_id)
{
// git cat-file blob object_id
std::vector<std::string> command;
command.push_back("git");
command.push_back("cat-file");
command.push_back("blob");
command.push_back(object_id);
// TODO: do this more efficiently - don't read entire command output into buffer, only read what we need
std::stringstream output;
if (!successful_exit(exec_command(command, output))) {
throw Error("'git cat-file' failed - is this a Git repository?");
}
char header[10];
output.read(header, sizeof(header));
return output.gcount() == sizeof(header) && std::memcmp(header, "\0GITCRYPT\0", 10) == 0;
}
static bool check_if_file_is_encrypted (const std::string& filename)
{
// git ls-files -sz filename
std::vector<std::string> command;
command.push_back("git");
command.push_back("ls-files");
command.push_back("-sz");
command.push_back("--");
command.push_back(filename);
std::stringstream output;
if (!successful_exit(exec_command(command, output))) {
throw Error("'git ls-files' failed - is this a Git repository?");
}
if (output.peek() == -1) {
return false;
}
std::string mode;
std::string object_id;
output >> mode >> object_id;
return check_if_blob_is_encrypted(object_id);
}
static bool is_git_file_mode (const std::string& mode)
{
return (std::strtoul(mode.c_str(), nullptr, 8) & 0170000) == 0100000;
}
static void get_encrypted_files (std::vector<std::string>& files, const char* key_name)
{
// git ls-files -cz -- path_to_top
std::vector<std::string> ls_files_command;
ls_files_command.push_back("git");
ls_files_command.push_back("ls-files");
ls_files_command.push_back("-csz");
ls_files_command.push_back("--");
const std::string path_to_top(get_path_to_top());
if (!path_to_top.empty()) {
ls_files_command.push_back(path_to_top);
}
Coprocess ls_files;
std::istream* ls_files_stdout = ls_files.stdout_pipe();
ls_files.spawn(ls_files_command);
Coprocess check_attr;
std::ostream* check_attr_stdin = nullptr;
std::istream* check_attr_stdout = nullptr;
if (git_version() >= make_version(1, 8, 5)) {
// In Git 1.8.5 (released 27 Nov 2013) and higher, we use a single `git check-attr` process
// to get the attributes of all files at once. In prior versions, we have to fork and exec
// a separate `git check-attr` process for each file, since -z and --stdin aren't supported.
// In a repository with thousands of files, this results in an almost 100x speedup.
std::vector<std::string> check_attr_command;
check_attr_command.push_back("git");
check_attr_command.push_back("check-attr");
check_attr_command.push_back("--stdin");
check_attr_command.push_back("-z");
check_attr_command.push_back("filter");
check_attr_command.push_back("diff");
check_attr_stdin = check_attr.stdin_pipe();
check_attr_stdout = check_attr.stdout_pipe();
check_attr.spawn(check_attr_command);
}
while (ls_files_stdout->peek() != -1) {
std::string mode;
std::string object_id;
std::string stage;
std::string filename;
*ls_files_stdout >> mode >> object_id >> stage >> std::ws;
std::getline(*ls_files_stdout, filename, '\0');
if (is_git_file_mode(mode)) {
std::string filter_attribute;
if (check_attr_stdin) {
filter_attribute = get_file_attributes(filename, *check_attr_stdin, *check_attr_stdout).first;
} else {
filter_attribute = get_file_attributes(filename).first;
}
if (filter_attribute == attribute_name(key_name)) {
files.push_back(filename);
}
}
}
if (!successful_exit(ls_files.wait())) {
throw Error("'git ls-files' failed - is this a Git repository?");
}
if (check_attr_stdin) {
check_attr.close_stdin();
if (!successful_exit(check_attr.wait())) {
throw Error("'git check-attr' failed - is this a Git repository?");
}
}
}
static void load_key (Key_file& key_file, const char* key_name, const char* key_path =0, const char* legacy_path =0)
{
if (legacy_path) {
std::ifstream key_file_in(legacy_path, std::fstream::binary);
if (!key_file_in) {
throw Error(std::string("Unable to open key file: ") + legacy_path);
}
key_file.load_legacy(key_file_in);
} else if (key_path) {
std::ifstream key_file_in(key_path, std::fstream::binary);
if (!key_file_in) {
throw Error(std::string("Unable to open key file: ") + key_path);
}
key_file.load(key_file_in);
} else {
std::ifstream key_file_in(get_internal_key_path(key_name).c_str(), std::fstream::binary);
if (!key_file_in) {
// TODO: include key name in error message
throw Error("Unable to open key file - have you unlocked/initialized this repository yet?");
}
key_file.load(key_file_in);
}
}
static bool decrypt_repo_key (Key_file& key_file, const char* key_name, uint32_t key_version, const std::vector<std::string>& secret_keys, const std::string& keys_path)
{
std::exception_ptr gpg_error;
for (std::vector<std::string>::const_iterator seckey(secret_keys.begin()); seckey != secret_keys.end(); ++seckey) {
std::ostringstream path_builder;
path_builder << keys_path << '/' << (key_name ? key_name : "default") << '/' << key_version << '/' << *seckey << ".gpg";
std::string path(path_builder.str());
if (access(path.c_str(), F_OK) == 0) {
std::stringstream decrypted_contents;
try {
gpg_decrypt_from_file(path, decrypted_contents);
} catch (const Gpg_error&) {
gpg_error = std::current_exception();
continue;
}
Key_file this_version_key_file;
this_version_key_file.load(decrypted_contents);
const Key_file::Entry* this_version_entry = this_version_key_file.get(key_version);
if (!this_version_entry) {
throw Error("GPG-encrypted keyfile is malformed because it does not contain expected key version");
}
if (!same_key_name(key_name, this_version_key_file.get_key_name())) {
throw Error("GPG-encrypted keyfile is malformed because it does not contain expected key name");
}
key_file.set_key_name(key_name);
key_file.add(*this_version_entry);
return true;
}
}
if (gpg_error) {
std::rethrow_exception(gpg_error);
}
return false;
}
static bool decrypt_repo_keys (std::vector<Key_file>& key_files, uint32_t key_version, const std::vector<std::string>& secret_keys, const std::string& keys_path)
{
bool successful = false;
std::vector<std::string> dirents;
if (access(keys_path.c_str(), F_OK) == 0) {
dirents = get_directory_contents(keys_path.c_str());
}
for (std::vector<std::string>::const_iterator dirent(dirents.begin()); dirent != dirents.end(); ++dirent) {
const char* key_name = 0;
if (*dirent != "default") {
if (!validate_key_name(dirent->c_str())) {
continue;
}
key_name = dirent->c_str();
}
Key_file key_file;
if (decrypt_repo_key(key_file, key_name, key_version, secret_keys, keys_path)) {
key_files.push_back(key_file);
successful = true;
}
}
return successful;
}
static void encrypt_repo_key (const char* key_name, const Key_file::Entry& key, const std::vector<std::pair<std::string, bool> >& collab_keys, const std::string& keys_path, std::vector<std::string>* new_files)
{
std::string key_file_data;
{
Key_file this_version_key_file;
this_version_key_file.set_key_name(key_name);
this_version_key_file.add(key);
key_file_data = this_version_key_file.store_to_string();
}
for (std::vector<std::pair<std::string, bool> >::const_iterator collab(collab_keys.begin()); collab != collab_keys.end(); ++collab) {
const std::string& fingerprint(collab->first);
const bool key_is_trusted(collab->second);
std::ostringstream path_builder;
path_builder << keys_path << '/' << (key_name ? key_name : "default") << '/' << key.version << '/' << fingerprint << ".gpg";
std::string path(path_builder.str());
if (access(path.c_str(), F_OK) == 0) {
continue;
}
mkdir_parent(path);
gpg_encrypt_to_file(path, fingerprint, key_is_trusted, key_file_data.data(), key_file_data.size());
new_files->push_back(path);
}
}
static int parse_plumbing_options (const char** key_name, const char** key_file, int argc, const char** argv)
{
Options_list options;
options.push_back(Option_def("-k", key_name));
options.push_back(Option_def("--key-name", key_name));
options.push_back(Option_def("--key-file", key_file));
return parse_options(options, argc, argv);
}
// Encrypt contents of stdin and write to stdout
int clean (int argc, const char** argv)
{
const char* key_name = 0;
const char* key_path = 0;
const char* legacy_key_path = 0;
int argi = parse_plumbing_options(&key_name, &key_path, argc, argv);
if (argc - argi == 0) {
} else if (!key_name && !key_path && argc - argi == 1) { // Deprecated - for compatibility with pre-0.4
legacy_key_path = argv[argi];
} else {
std::clog << "Usage: git-crypt clean [--key-name=NAME] [--key-file=PATH]" << std::endl;
return 2;
}
Key_file key_file;
load_key(key_file, key_name, key_path, legacy_key_path);
const Key_file::Entry* key = key_file.get_latest();
if (!key) {
std::clog << "git-crypt: error: key file is empty" << std::endl;
return 1;
}
// Read the entire file
Hmac_sha1_state hmac(key->hmac_key, HMAC_KEY_LEN); // Calculate the file's SHA1 HMAC as we go
uint64_t file_size = 0; // Keep track of the length, make sure it doesn't get too big
std::string file_contents; // First 8MB or so of the file go here
temp_fstream temp_file; // The rest of the file spills into a temporary file on disk
temp_file.exceptions(std::fstream::badbit);
char buffer[1024];
while (std::cin && file_size < Aes_ctr_encryptor::MAX_CRYPT_BYTES) {
std::cin.read(buffer, sizeof(buffer));
const size_t bytes_read = std::cin.gcount();
hmac.add(reinterpret_cast<unsigned char*>(buffer), bytes_read);
file_size += bytes_read;
if (file_size <= 8388608) {
file_contents.append(buffer, bytes_read);
} else {
if (!temp_file.is_open()) {
temp_file.open(std::fstream::in | std::fstream::out | std::fstream::binary | std::fstream::app);
}
temp_file.write(buffer, bytes_read);
}
}
// Make sure the file isn't so large we'll overflow the counter value (which would doom security)
if (file_size >= Aes_ctr_encryptor::MAX_CRYPT_BYTES) {
std::clog << "git-crypt: error: file too long to encrypt securely" << std::endl;
return 1;
}
// We use an HMAC of the file as the encryption nonce (IV) for CTR mode.
// By using a hash of the file we ensure that the encryption is
// deterministic so git doesn't think the file has changed when it really
// hasn't. CTR mode with a synthetic IV is provably semantically secure
// under deterministic CPA as long as the synthetic IV is derived from a
// secure PRF applied to the message. Since HMAC-SHA1 is a secure PRF, this
// encryption scheme is semantically secure under deterministic CPA.
//
// Informally, consider that if a file changes just a tiny bit, the IV will
// be completely different, resulting in a completely different ciphertext
// that leaks no information about the similarities of the plaintexts. Also,
// since we're using the output from a secure hash function plus a counter
// as the input to our block cipher, we should never have a situation where
// two different plaintext blocks get encrypted with the same CTR value. A
// nonce will be reused only if the entire file is the same, which leaks no
// information except that the files are the same.
//
// To prevent an attacker from building a dictionary of hash values and then
// looking up the nonce (which must be stored in the clear to allow for
// decryption), we use an HMAC as opposed to a straight hash.
// Note: Hmac_sha1_state::LEN >= Aes_ctr_encryptor::NONCE_LEN
unsigned char digest[Hmac_sha1_state::LEN];
hmac.get(digest);
// Write a header that...
std::cout.write("\0GITCRYPT\0", 10); // ...identifies this as an encrypted file
std::cout.write(reinterpret_cast<char*>(digest), Aes_ctr_encryptor::NONCE_LEN); // ...includes the nonce
// Now encrypt the file and write to stdout
Aes_ctr_encryptor aes(key->aes_key, digest);
// First read from the in-memory copy
const unsigned char* file_data = reinterpret_cast<const unsigned char*>(file_contents.data());
size_t file_data_len = file_contents.size();
while (file_data_len > 0) {
const size_t buffer_len = std::min(sizeof(buffer), file_data_len);
aes.process(file_data, reinterpret_cast<unsigned char*>(buffer), buffer_len);
std::cout.write(buffer, buffer_len);
file_data += buffer_len;
file_data_len -= buffer_len;
}
// Then read from the temporary file if applicable
if (temp_file.is_open()) {
temp_file.seekg(0);
while (temp_file.peek() != -1) {
temp_file.read(buffer, sizeof(buffer));
const size_t buffer_len = temp_file.gcount();
aes.process(reinterpret_cast<unsigned char*>(buffer),
reinterpret_cast<unsigned char*>(buffer),
buffer_len);
std::cout.write(buffer, buffer_len);
}
}
return 0;
}
static int decrypt_file_to_stdout (const Key_file& key_file, const unsigned char* header, std::istream& in)
{
const unsigned char* nonce = header + 10;
uint32_t key_version = 0; // TODO: get the version from the file header
const Key_file::Entry* key = key_file.get(key_version);
if (!key) {
std::clog << "git-crypt: error: key version " << key_version << " not available - please unlock with the latest version of the key." << std::endl;
return 1;
}
Aes_ctr_decryptor aes(key->aes_key, nonce);
Hmac_sha1_state hmac(key->hmac_key, HMAC_KEY_LEN);
while (in) {
unsigned char buffer[1024];
in.read(reinterpret_cast<char*>(buffer), sizeof(buffer));
aes.process(buffer, buffer, in.gcount());
hmac.add(buffer, in.gcount());
std::cout.write(reinterpret_cast<char*>(buffer), in.gcount());
}
unsigned char digest[Hmac_sha1_state::LEN];
hmac.get(digest);
if (!leakless_equals(digest, nonce, Aes_ctr_decryptor::NONCE_LEN)) {
std::clog << "git-crypt: error: encrypted file has been tampered with!" << std::endl;
// Although we've already written the tampered file to stdout, exiting
// with a non-zero status will tell git the file has not been filtered,
// so git will not replace it.
return 1;
}
return 0;
}
// Decrypt contents of stdin and write to stdout
int smudge (int argc, const char** argv)
{
const char* key_name = 0;
const char* key_path = 0;
const char* legacy_key_path = 0;
int argi = parse_plumbing_options(&key_name, &key_path, argc, argv);
if (argc - argi == 0) {
} else if (!key_name && !key_path && argc - argi == 1) { // Deprecated - for compatibility with pre-0.4
legacy_key_path = argv[argi];
} else {
std::clog << "Usage: git-crypt smudge [--key-name=NAME] [--key-file=PATH]" << std::endl;
return 2;
}
Key_file key_file;
load_key(key_file, key_name, key_path, legacy_key_path);
// Read the header to get the nonce and make sure it's actually encrypted
unsigned char header[10 + Aes_ctr_decryptor::NONCE_LEN];
std::cin.read(reinterpret_cast<char*>(header), sizeof(header));
if (std::cin.gcount() != sizeof(header) || std::memcmp(header, "\0GITCRYPT\0", 10) != 0) {
// File not encrypted - just copy it out to stdout
std::clog << "git-crypt: Warning: file not encrypted" << std::endl;
std::clog << "git-crypt: Run 'git-crypt status' to make sure all files are properly encrypted." << std::endl;
std::clog << "git-crypt: If 'git-crypt status' reports no problems, then an older version of" << std::endl;
std::clog << "git-crypt: this file may be unencrypted in the repository's history. If this" << std::endl;
std::clog << "git-crypt: file contains sensitive information, you can use 'git filter-branch'" << std::endl;
std::clog << "git-crypt: to remove its old versions from the history." << std::endl;
std::cout.write(reinterpret_cast<char*>(header), std::cin.gcount()); // include the bytes which we already read
std::cout << std::cin.rdbuf();
return 0;
}
return decrypt_file_to_stdout(key_file, header, std::cin);
}
int diff (int argc, const char** argv)
{
const char* key_name = 0;
const char* key_path = 0;
const char* filename = 0;
const char* legacy_key_path = 0;
int argi = parse_plumbing_options(&key_name, &key_path, argc, argv);
if (argc - argi == 1) {
filename = argv[argi];
} else if (!key_name && !key_path && argc - argi == 2) { // Deprecated - for compatibility with pre-0.4
legacy_key_path = argv[argi];
filename = argv[argi + 1];
} else {
std::clog << "Usage: git-crypt diff [--key-name=NAME] [--key-file=PATH] FILENAME" << std::endl;
return 2;
}
Key_file key_file;
load_key(key_file, key_name, key_path, legacy_key_path);
// Open the file
std::ifstream in(filename, std::fstream::binary);
if (!in) {
std::clog << "git-crypt: " << filename << ": unable to open for reading" << std::endl;
return 1;
}
in.exceptions(std::fstream::badbit);
// Read the header to get the nonce and determine if it's actually encrypted
unsigned char header[10 + Aes_ctr_decryptor::NONCE_LEN];
in.read(reinterpret_cast<char*>(header), sizeof(header));
if (in.gcount() != sizeof(header) || std::memcmp(header, "\0GITCRYPT\0", 10) != 0) {
// File not encrypted - just copy it out to stdout
std::cout.write(reinterpret_cast<char*>(header), in.gcount()); // include the bytes which we already read
std::cout << in.rdbuf();
return 0;
}
// Go ahead and decrypt it
return decrypt_file_to_stdout(key_file, header, in);
}
void help_init (std::ostream& out)
{
// |--------------------------------------------------------------------------------| 80 chars
out << "Usage: git-crypt init [OPTIONS]" << std::endl;
out << std::endl;
out << " -k, --key-name KEYNAME Initialize the given key, instead of the default" << std::endl;
out << std::endl;
}
int init (int argc, const char** argv)
{
const char* key_name = 0;
Options_list options;
options.push_back(Option_def("-k", &key_name));
options.push_back(Option_def("--key-name", &key_name));
int argi = parse_options(options, argc, argv);
if (!key_name && argc - argi == 1) {
std::clog << "Warning: 'git-crypt init' with a key file is deprecated as of git-crypt 0.4" << std::endl;
std::clog << "and will be removed in a future release. Please get in the habit of using" << std::endl;
std::clog << "'git-crypt unlock KEYFILE' instead." << std::endl;
return unlock(argc, argv);
}
if (argc - argi != 0) {
std::clog << "Error: git-crypt init takes no arguments" << std::endl;
help_init(std::clog);
return 2;
}
if (key_name) {
validate_key_name_or_throw(key_name);
}
std::string internal_key_path(get_internal_key_path(key_name));
if (access(internal_key_path.c_str(), F_OK) == 0) {
// TODO: add a -f option to reinitialize the repo anyways (this should probably imply a refresh)
// TODO: include key_name in error message
std::clog << "Error: this repository has already been initialized with git-crypt." << std::endl;
return 1;
}
// 1. Generate a key and install it
std::clog << "Generating key..." << std::endl;
Key_file key_file;
key_file.set_key_name(key_name);
key_file.generate();
mkdir_parent(internal_key_path);
if (!key_file.store_to_file(internal_key_path.c_str())) {
std::clog << "Error: " << internal_key_path << ": unable to write key file" << std::endl;
return 1;
}
// 2. Configure git for git-crypt
configure_git_filters(key_name);
return 0;
}
void help_unlock (std::ostream& out)
{
// |--------------------------------------------------------------------------------| 80 chars
out << "Usage: git-crypt unlock" << std::endl;
out << " or: git-crypt unlock KEY_FILE ..." << std::endl;
}
int unlock (int argc, const char** argv)
{
// 1. Make sure working directory is clean (ignoring untracked files)
// We do this because we check out files later, and we don't want the
// user to lose any changes. (TODO: only care if encrypted files are
// modified, since we only check out encrypted files)
// Running 'git status' also serves as a check that the Git repo is accessible.
std::stringstream status_output;
get_git_status(status_output);
if (status_output.peek() != -1) {
std::clog << "Error: Working directory not clean." << std::endl;
std::clog << "Please commit your changes or 'git stash' them before running 'git-crypt unlock'." << std::endl;
return 1;
}
// 2. Load the key(s)
std::vector<Key_file> key_files;
if (argc > 0) {
// Read from the symmetric key file(s)
for (int argi = 0; argi < argc; ++argi) {
const char* symmetric_key_file = argv[argi];
Key_file key_file;
try {
if (std::strcmp(symmetric_key_file, "-") == 0) {
key_file.load(std::cin);
} else {
if (!key_file.load_from_file(symmetric_key_file)) {
std::clog << "Error: " << symmetric_key_file << ": unable to read key file" << std::endl;
return 1;
}
}
} catch (Key_file::Incompatible) {
std::clog << "Error: " << symmetric_key_file << " is in an incompatible format" << std::endl;
std::clog << "Please upgrade to a newer version of git-crypt." << std::endl;
return 1;
} catch (Key_file::Malformed) {
std::clog << "Error: " << symmetric_key_file << ": not a valid git-crypt key file" << std::endl;
std::clog << "If this key was created prior to git-crypt 0.4, you need to migrate it" << std::endl;
std::clog << "by running 'git-crypt migrate-key /path/to/old_key /path/to/migrated_key'." << std::endl;
return 1;
}
key_files.push_back(key_file);
}
} else {
// Decrypt GPG key from root of repo
std::string repo_keys_path(get_repo_keys_path());
std::vector<std::string> gpg_secret_keys(gpg_list_secret_keys());
// TODO: command-line option to specify the precise secret key to use
// TODO: don't hard code key version 0 here - instead, determine the most recent version and try to decrypt that, or decrypt all versions if command-line option specified
// TODO: command line option to only unlock specific key instead of all of them
// TODO: avoid decrypting repo keys which are already unlocked in the .git directory
if (!decrypt_repo_keys(key_files, 0, gpg_secret_keys, repo_keys_path)) {
std::clog << "Error: no GPG secret key available to unlock this repository." << std::endl;
std::clog << "To unlock with a shared symmetric key instead, specify the path to the symmetric key as an argument to 'git-crypt unlock'." << std::endl;
// TODO std::clog << "To see a list of GPG keys authorized to unlock this repository, run 'git-crypt ls-gpg-users'." << std::endl;
return 1;
}
}
// 3. Install the key(s) and configure the git filters
std::vector<std::string> encrypted_files;
for (std::vector<Key_file>::iterator key_file(key_files.begin()); key_file != key_files.end(); ++key_file) {
std::string internal_key_path(get_internal_key_path(key_file->get_key_name()));
// TODO: croak if internal_key_path already exists???
mkdir_parent(internal_key_path);
if (!key_file->store_to_file(internal_key_path.c_str())) {
std::clog << "Error: " << internal_key_path << ": unable to write key file" << std::endl;
return 1;
}
configure_git_filters(key_file->get_key_name());
get_encrypted_files(encrypted_files, key_file->get_key_name());
}
// 4. Check out the files that are currently encrypted.
// Git won't check out a file if its mtime hasn't changed, so touch every file first.
for (std::vector<std::string>::const_iterator file(encrypted_files.begin()); file != encrypted_files.end(); ++file) {
touch_file(*file);
}
if (!git_checkout(encrypted_files)) {
std::clog << "Error: 'git checkout' failed" << std::endl;
std::clog << "git-crypt has been set up but existing encrypted files have not been decrypted" << std::endl;
return 1;
}
return 0;
}
void help_lock (std::ostream& out)
{
// |--------------------------------------------------------------------------------| 80 chars
out << "Usage: git-crypt lock [OPTIONS]" << std::endl;
out << std::endl;
out << " -a, --all Lock all keys, instead of just the default" << std::endl;
out << " -k, --key-name KEYNAME Lock the given key, instead of the default" << std::endl;
out << " -f, --force Lock even if unclean (you may lose uncommited work)" << std::endl;
out << std::endl;
}
int lock (int argc, const char** argv)
{
const char* key_name = 0;
bool all_keys = false;
bool force = false;
Options_list options;
options.push_back(Option_def("-k", &key_name));
options.push_back(Option_def("--key-name", &key_name));
options.push_back(Option_def("-a", &all_keys));
options.push_back(Option_def("--all", &all_keys));
options.push_back(Option_def("-f", &force));
options.push_back(Option_def("--force", &force));
int argi = parse_options(options, argc, argv);
if (argc - argi != 0) {
std::clog << "Error: git-crypt lock takes no arguments" << std::endl;
help_lock(std::clog);
return 2;
}
if (all_keys && key_name) {
std::clog << "Error: -k and --all options are mutually exclusive" << std::endl;
return 2;
}
// 1. Make sure working directory is clean (ignoring untracked files)
// We do this because we check out files later, and we don't want the
// user to lose any changes. (TODO: only care if encrypted files are
// modified, since we only check out encrypted files)
// Running 'git status' also serves as a check that the Git repo is accessible.
std::stringstream status_output;
get_git_status(status_output);
if (!force && status_output.peek() != -1) {
std::clog << "Error: Working directory not clean." << std::endl;
std::clog << "Please commit your changes or 'git stash' them before running 'git-crypt lock'." << std::endl;
std::clog << "Or, use 'git-crypt lock --force' and possibly lose uncommitted changes." << std::endl;
return 1;
}
// 2. deconfigure the git filters and remove decrypted keys
std::vector<std::string> encrypted_files;
if (all_keys) {
// deconfigure for all keys
std::vector<std::string> dirents = get_directory_contents(get_internal_keys_path().c_str());
for (std::vector<std::string>::const_iterator dirent(dirents.begin()); dirent != dirents.end(); ++dirent) {
const char* this_key_name = (*dirent == "default" ? 0 : dirent->c_str());
remove_file(get_internal_key_path(this_key_name));
deconfigure_git_filters(this_key_name);
get_encrypted_files(encrypted_files, this_key_name);
}
} else {
// just handle the given key
std::string internal_key_path(get_internal_key_path(key_name));
if (access(internal_key_path.c_str(), F_OK) == -1 && errno == ENOENT) {
std::clog << "Error: this repository is already locked";
if (key_name) {
std::clog << " with key '" << key_name << "'";
}
std::clog << "." << std::endl;
return 1;
}
remove_file(internal_key_path);
deconfigure_git_filters(key_name);
get_encrypted_files(encrypted_files, key_name);
}
// 3. Check out the files that are currently decrypted but should be encrypted.
// Git won't check out a file if its mtime hasn't changed, so touch every file first.
for (std::vector<std::string>::const_iterator file(encrypted_files.begin()); file != encrypted_files.end(); ++file) {
touch_file(*file);
}
if (!git_checkout(encrypted_files)) {
std::clog << "Error: 'git checkout' failed" << std::endl;
std::clog << "git-crypt has been locked up but existing decrypted files have not been encrypted" << std::endl;
return 1;
}
return 0;
}
void help_add_gpg_user (std::ostream& out)
{
// |--------------------------------------------------------------------------------| 80 chars
out << "Usage: git-crypt add-gpg-user [OPTIONS] GPG_USER_ID ..." << std::endl;
out << std::endl;
out << " -k, --key-name KEYNAME Add GPG user to given key, instead of default" << std::endl;
out << " -n, --no-commit Don't automatically commit" << std::endl;
out << " --trusted Assume the GPG user IDs are trusted" << std::endl;
out << std::endl;
}
int add_gpg_user (int argc, const char** argv)
{
const char* key_name = 0;
bool no_commit = false;
bool trusted = false;
Options_list options;
options.push_back(Option_def("-k", &key_name));
options.push_back(Option_def("--key-name", &key_name));
options.push_back(Option_def("-n", &no_commit));
options.push_back(Option_def("--no-commit", &no_commit));
options.push_back(Option_def("--trusted", &trusted));
int argi = parse_options(options, argc, argv);
if (argc - argi == 0) {
std::clog << "Error: no GPG user ID specified" << std::endl;
help_add_gpg_user(std::clog);
return 2;
}
// build a list of key fingerprints, and whether the key is trusted, for every collaborator specified on the command line
std::vector<std::pair<std::string, bool> > collab_keys;
for (int i = argi; i < argc; ++i) {
std::vector<std::string> keys(gpg_lookup_key(argv[i]));
if (keys.empty()) {
std::clog << "Error: public key for '" << argv[i] << "' not found in your GPG keyring" << std::endl;
return 1;
}
if (keys.size() > 1) {
std::clog << "Error: more than one public key matches '" << argv[i] << "' - please be more specific" << std::endl;
return 1;
}
const bool is_full_fingerprint(std::strncmp(argv[i], "0x", 2) == 0 && std::strlen(argv[i]) == 42);
collab_keys.push_back(std::make_pair(keys[0], trusted || is_full_fingerprint));
}
// TODO: have a retroactive option to grant access to all key versions, not just the most recent
Key_file key_file;
load_key(key_file, key_name);
const Key_file::Entry* key = key_file.get_latest();
if (!key) {
std::clog << "Error: key file is empty" << std::endl;
return 1;
}
const std::string state_path(get_repo_state_path());
std::vector<std::string> new_files;
encrypt_repo_key(key_name, *key, collab_keys, get_repo_keys_path(state_path), &new_files);
// Add a .gitatributes file to the repo state directory to prevent files in it from being encrypted.
const std::string state_gitattributes_path(state_path + "/.gitattributes");
if (access(state_gitattributes_path.c_str(), F_OK) != 0) {
std::ofstream state_gitattributes_file(state_gitattributes_path.c_str());
// |--------------------------------------------------------------------------------| 80 chars
state_gitattributes_file << "# Do not edit this file. To specify the files to encrypt, create your own\n";
state_gitattributes_file << "# .gitattributes file in the directory where your files are.\n";
state_gitattributes_file << "* !filter !diff\n";
state_gitattributes_file << "*.gpg binary\n";
state_gitattributes_file.close();
if (!state_gitattributes_file) {
std::clog << "Error: unable to write " << state_gitattributes_path << std::endl;
return 1;
}
new_files.push_back(state_gitattributes_path);
}
// add/commit the new files
if (!new_files.empty()) {
// git add NEW_FILE ...
std::vector<std::string> command;
command.push_back("git");
command.push_back("add");
command.push_back("--");
command.insert(command.end(), new_files.begin(), new_files.end());
if (!successful_exit(exec_command(command))) {
std::clog << "Error: 'git add' failed" << std::endl;
return 1;
}
// git commit ...
if (!no_commit) {
// TODO: include key_name in commit message
std::ostringstream commit_message_builder;
commit_message_builder << "Add " << collab_keys.size() << " git-crypt collaborator" << (collab_keys.size() != 1 ? "s" : "") << "\n\nNew collaborators:\n\n";
for (std::vector<std::pair<std::string, bool> >::const_iterator collab(collab_keys.begin()); collab != collab_keys.end(); ++collab) {
commit_message_builder << " " << collab->first << '\n';
commit_message_builder << " " << gpg_get_uid(collab->first) << '\n';
}
// git commit -m MESSAGE NEW_FILE ...
command.clear();
command.push_back("git");
command.push_back("commit");
command.push_back("-m");
command.push_back(commit_message_builder.str());
command.push_back("--");
command.insert(command.end(), new_files.begin(), new_files.end());
if (!successful_exit(exec_command(command))) {
std::clog << "Error: 'git commit' failed" << std::endl;
return 1;
}
}
}
return 0;
}
void help_rm_gpg_user (std::ostream& out)
{
// |--------------------------------------------------------------------------------| 80 chars
out << "Usage: git-crypt rm-gpg-user [OPTIONS] GPG_USER_ID ..." << std::endl;
out << std::endl;
out << " -k, --key-name KEYNAME Remove user from given key, instead of default" << std::endl;
out << " -n, --no-commit Don't automatically commit" << std::endl;
out << std::endl;
}
int rm_gpg_user (int argc, const char** argv) // TODO
{
std::clog << "Error: rm-gpg-user is not yet implemented." << std::endl;
return 1;
}
void help_ls_gpg_users (std::ostream& out)
{
// |--------------------------------------------------------------------------------| 80 chars
out << "Usage: git-crypt ls-gpg-users" << std::endl;
}
int ls_gpg_users (int argc, const char** argv) // TODO
{
// Sketch:
// Scan the sub-directories in .git-crypt/keys, outputting something like this:
// ====
// Key version 0:
// 0x143DE9B3F7316900 Andrew Ayer <andrew@example.com>
// 0x4E386D9C9C61702F ???
// Key version 1:
// 0x143DE9B3F7316900 Andrew Ayer <andrew@example.com>
// 0x1727274463D27F40 John Smith <smith@example.com>
// 0x4E386D9C9C61702F ???
// ====
// To resolve a long hex ID, use a command like this:
// gpg --options /dev/null --fixed-list-mode --batch --with-colons --list-keys 0x143DE9B3F7316900
std::clog << "Error: ls-gpg-users is not yet implemented." << std::endl;
return 1;
}
void help_export_key (std::ostream& out)
{
// |--------------------------------------------------------------------------------| 80 chars
out << "Usage: git-crypt export-key [OPTIONS] FILENAME" << std::endl;
out << std::endl;
out << " -k, --key-name KEYNAME Export the given key, instead of the default" << std::endl;
out << std::endl;
out << "When FILENAME is -, export to standard out." << std::endl;
}
int export_key (int argc, const char** argv)
{
// TODO: provide options to export only certain key versions
const char* key_name = 0;
Options_list options;
options.push_back(Option_def("-k", &key_name));
options.push_back(Option_def("--key-name", &key_name));
int argi = parse_options(options, argc, argv);
if (argc - argi != 1) {
std::clog << "Error: no filename specified" << std::endl;
help_export_key(std::clog);
return 2;
}
Key_file key_file;
load_key(key_file, key_name);
const char* out_file_name = argv[argi];
if (std::strcmp(out_file_name, "-") == 0) {
key_file.store(std::cout);
} else {
if (!key_file.store_to_file(out_file_name)) {
std::clog << "Error: " << out_file_name << ": unable to write key file" << std::endl;
return 1;
}
}
return 0;
}
void help_keygen (std::ostream& out)
{
// |--------------------------------------------------------------------------------| 80 chars
out << "Usage: git-crypt keygen FILENAME" << std::endl;
out << std::endl;
out << "When FILENAME is -, write to standard out." << std::endl;
}
int keygen (int argc, const char** argv)
{
if (argc != 1) {
std::clog << "Error: no filename specified" << std::endl;
help_keygen(std::clog);
return 2;
}
const char* key_file_name = argv[0];
if (std::strcmp(key_file_name, "-") != 0 && access(key_file_name, F_OK) == 0) {
std::clog << key_file_name << ": File already exists" << std::endl;
return 1;
}
std::clog << "Generating key..." << std::endl;
Key_file key_file;
key_file.generate();
if (std::strcmp(key_file_name, "-") == 0) {
key_file.store(std::cout);
} else {
if (!key_file.store_to_file(key_file_name)) {
std::clog << "Error: " << key_file_name << ": unable to write key file" << std::endl;
return 1;
}
}
return 0;
}
void help_migrate_key (std::ostream& out)
{
// |--------------------------------------------------------------------------------| 80 chars
out << "Usage: git-crypt migrate-key OLDFILENAME NEWFILENAME" << std::endl;
out << std::endl;
out << "Use - to read from standard in/write to standard out." << std::endl;
}
int migrate_key (int argc, const char** argv)
{
if (argc != 2) {
std::clog << "Error: filenames not specified" << std::endl;
help_migrate_key(std::clog);
return 2;
}
const char* key_file_name = argv[0];
const char* new_key_file_name = argv[1];
Key_file key_file;
try {
if (std::strcmp(key_file_name, "-") == 0) {
key_file.load_legacy(std::cin);
} else {
std::ifstream in(key_file_name, std::fstream::binary);
if (!in) {
std::clog << "Error: " << key_file_name << ": unable to open for reading" << std::endl;
return 1;
}
key_file.load_legacy(in);
}
if (std::strcmp(new_key_file_name, "-") == 0) {
key_file.store(std::cout);
} else {
if (!key_file.store_to_file(new_key_file_name)) {
std::clog << "Error: " << new_key_file_name << ": unable to write key file" << std::endl;
return 1;
}
}
} catch (Key_file::Malformed) {
std::clog << "Error: " << key_file_name << ": not a valid legacy git-crypt key file" << std::endl;
return 1;
}
return 0;
}
void help_refresh (std::ostream& out)
{
// |--------------------------------------------------------------------------------| 80 chars
out << "Usage: git-crypt refresh" << std::endl;
}
int refresh (int argc, const char** argv) // TODO: do a force checkout, much like in unlock
{
std::clog << "Error: refresh is not yet implemented." << std::endl;
return 1;
}
void help_status (std::ostream& out)
{
// |--------------------------------------------------------------------------------| 80 chars
out << "Usage: git-crypt status [OPTIONS] [FILE ...]" << std::endl;
//out << " or: git-crypt status -r [OPTIONS]" << std::endl;
//out << " or: git-crypt status -f" << std::endl;
out << std::endl;
out << " -e Show encrypted files only" << std::endl;
out << " -u Show unencrypted files only" << std::endl;
//out << " -r Show repository status only" << std::endl;
out << " -f, --fix Fix problems with the repository" << std::endl;
//out << " -z Machine-parseable output" << std::endl;
out << std::endl;
}
int status (int argc, const char** argv)
{
// Usage:
// git-crypt status -r [-z] Show repo status
// git-crypt status [-e | -u] [-z] [FILE ...] Show encrypted status of files
// git-crypt status -f Fix unencrypted blobs
bool repo_status_only = false; // -r show repo status only
bool show_encrypted_only = false; // -e show encrypted files only
bool show_unencrypted_only = false; // -u show unencrypted files only
bool fix_problems = false; // -f fix problems
bool machine_output = false; // -z machine-parseable output
Options_list options;
options.push_back(Option_def("-r", &repo_status_only));
options.push_back(Option_def("-e", &show_encrypted_only));
options.push_back(Option_def("-u", &show_unencrypted_only));
options.push_back(Option_def("-f", &fix_problems));
options.push_back(Option_def("--fix", &fix_problems));
options.push_back(Option_def("-z", &machine_output));
int argi = parse_options(options, argc, argv);
if (repo_status_only) {
if (show_encrypted_only || show_unencrypted_only) {
std::clog << "Error: -e and -u options cannot be used with -r" << std::endl;
return 2;
}
if (fix_problems) {
std::clog << "Error: -f option cannot be used with -r" << std::endl;
return 2;
}
if (argc - argi != 0) {
std::clog << "Error: filenames cannot be specified when -r is used" << std::endl;
return 2;
}
}
if (show_encrypted_only && show_unencrypted_only) {
std::clog << "Error: -e and -u options are mutually exclusive" << std::endl;
return 2;
}
if (fix_problems && (show_encrypted_only || show_unencrypted_only)) {
std::clog << "Error: -e and -u options cannot be used with -f" << std::endl;
return 2;
}
if (machine_output) {
// TODO: implement machine-parseable output
std::clog << "Sorry, machine-parseable output is not yet implemented" << std::endl;
return 2;
}
if (argc - argi == 0) {
// TODO: check repo status:
// is it set up for git-crypt?
// which keys are unlocked?
// --> check for filter config (see configure_git_filters()) and corresponding internal key
if (repo_status_only) {
return 0;
}
}
// git ls-files -cotsz --exclude-standard ...
std::vector<std::string> command;
command.push_back("git");
command.push_back("ls-files");
command.push_back("-cotsz");
command.push_back("--exclude-standard");
command.push_back("--");
if (argc - argi == 0) {
const std::string path_to_top(get_path_to_top());
if (!path_to_top.empty()) {
command.push_back(path_to_top);
}
} else {
for (int i = argi; i < argc; ++i) {
command.push_back(argv[i]);
}
}
std::stringstream output;
if (!successful_exit(exec_command(command, output))) {
throw Error("'git ls-files' failed - is this a Git repository?");
}
// Output looks like (w/o newlines):
// ? .gitignore\0
// H 100644 06ec22e5ed0de9280731ef000a10f9c3fbc26338 0 afile\0
std::vector<std::string> files;
bool attribute_errors = false;
bool unencrypted_blob_errors = false;
unsigned int nbr_of_fixed_blobs = 0;
unsigned int nbr_of_fix_errors = 0;
while (output.peek() != -1) {
std::string tag;
std::string object_id;
std::string filename;
output >> tag;
if (tag != "?") {
std::string mode;
std::string stage;
output >> mode >> object_id >> stage;
if (!is_git_file_mode(mode)) {
continue;
}
}
output >> std::ws;
std::getline(output, filename, '\0');
// TODO: get file attributes en masse for efficiency... unfortunately this requires machine-parseable output from git check-attr to be workable, and this is only supported in Git 1.8.5 and above (released 27 Nov 2013)
const std::pair<std::string, std::string> file_attrs(get_file_attributes(filename));
if (file_attrs.first == "git-crypt" || std::strncmp(file_attrs.first.c_str(), "git-crypt-", 10) == 0) {
// File is encrypted
const bool blob_is_unencrypted = !object_id.empty() && !check_if_blob_is_encrypted(object_id);
if (fix_problems && blob_is_unencrypted) {
if (access(filename.c_str(), F_OK) != 0) {
std::clog << "Error: " << filename << ": cannot stage encrypted version because not present in working tree - please 'git rm' or 'git checkout' it" << std::endl;
++nbr_of_fix_errors;
} else {
touch_file(filename);
std::vector<std::string> git_add_command;
git_add_command.push_back("git");
git_add_command.push_back("add");
git_add_command.push_back("--");
git_add_command.push_back(filename);
if (!successful_exit(exec_command(git_add_command))) {
throw Error("'git-add' failed");
}
if (check_if_file_is_encrypted(filename)) {
std::cout << filename << ": staged encrypted version" << std::endl;
++nbr_of_fixed_blobs;
} else {
std::clog << "Error: " << filename << ": still unencrypted even after staging" << std::endl;
++nbr_of_fix_errors;
}
}
} else if (!fix_problems && !show_unencrypted_only) {
// TODO: output the key name used to encrypt this file
std::cout << " encrypted: " << filename;
if (file_attrs.second != file_attrs.first) {
// but diff filter is not properly set
std::cout << " *** WARNING: diff=" << file_attrs.first << " attribute not set ***";
attribute_errors = true;
}
if (blob_is_unencrypted) {
// File not actually encrypted
std::cout << " *** WARNING: staged/committed version is NOT ENCRYPTED! ***";
unencrypted_blob_errors = true;
}
std::cout << std::endl;
}
} else {
// File not encrypted
if (!fix_problems && !show_encrypted_only) {
std::cout << "not encrypted: " << filename << std::endl;
}
}
}
int exit_status = 0;
if (attribute_errors) {
std::cout << std::endl;
std::cout << "Warning: one or more files has a git-crypt filter attribute but not a" << std::endl;
std::cout << "corresponding git-crypt diff attribute. For proper 'git diff' operation" << std::endl;
std::cout << "you should fix the .gitattributes file to specify the correct diff attribute." << std::endl;
std::cout << "Consult the git-crypt documentation for help." << std::endl;
exit_status = 1;
}
if (unencrypted_blob_errors) {
std::cout << std::endl;
std::cout << "Warning: one or more files is marked for encryption via .gitattributes but" << std::endl;
std::cout << "was staged and/or committed before the .gitattributes file was in effect." << std::endl;
std::cout << "Run 'git-crypt status' with the '-f' option to stage an encrypted version." << std::endl;
exit_status = 1;
}
if (nbr_of_fixed_blobs) {
std::cout << "Staged " << nbr_of_fixed_blobs << " encrypted file" << (nbr_of_fixed_blobs != 1 ? "s" : "") << "." << std::endl;
std::cout << "Warning: if these files were previously committed, unencrypted versions still exist in the repository's history." << std::endl;
}
if (nbr_of_fix_errors) {
std::cout << "Unable to stage " << nbr_of_fix_errors << " file" << (nbr_of_fix_errors != 1 ? "s" : "") << "." << std::endl;
exit_status = 1;
}
return exit_status;
}
| 58,290
|
C++
|
.cpp
| 1,476
| 36.836721
| 219
| 0.662967
|
AGWA/git-crypt
| 8,224
| 478
| 134
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
18,507
|
fhstream.cpp
|
AGWA_git-crypt/fhstream.cpp
|
/*
* Copyright (C) 2012, 2015 Andrew Ayer
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Except as contained in this notice, the name(s) of the above copyright
* holders shall not be used in advertising or otherwise to promote the
* sale, use or other dealings in this Software without prior written
* authorization.
*/
#include <cstring>
#include <algorithm> // for std::min
#include "fhstream.hpp"
/*
* ofhstream
*/
ofhbuf::ofhbuf (void* arg_handle, size_t (*arg_write_fun)(void*, const void*, size_t))
: handle(arg_handle),
write_fun(arg_write_fun),
buffer(new char[default_buffer_size]),
buffer_size(default_buffer_size)
{
reset_buffer();
}
ofhbuf::~ofhbuf ()
{
if (handle) {
try {
sync();
} catch (...) {
// Ignore exception since we're in the destructor.
// To catch write errors, call sync() explicitly.
}
}
delete[] buffer;
}
ofhbuf::int_type ofhbuf::overflow (ofhbuf::int_type c)
{
const char* p = pbase();
std::streamsize bytes_to_write = pptr() - p;
if (!is_eof(c)) {
*pptr() = c;
++bytes_to_write;
}
while (bytes_to_write > 0) {
const size_t bytes_written = write_fun(handle, p, bytes_to_write);
bytes_to_write -= bytes_written;
p += bytes_written;
}
reset_buffer();
return traits_type::to_int_type(0);
}
int ofhbuf::sync ()
{
return !is_eof(overflow(traits_type::eof())) ? 0 : -1;
}
std::streamsize ofhbuf::xsputn (const char* s, std::streamsize n)
{
// Use heuristic to decide whether to write directly or just use buffer
// Write directly only if n >= MIN(4096, available buffer capacity)
// (this is similar to what basic_filebuf does)
if (n < std::min<std::streamsize>(4096, epptr() - pptr())) {
// Not worth it to do a direct write
return std::streambuf::xsputn(s, n);
}
// Before we can do a direct write of this string, we need to flush
// out the current contents of the buffer.
if (pbase() != pptr()) {
overflow(traits_type::eof()); // throws an exception or it succeeds
}
// Now we can go ahead and write out the string.
size_t bytes_to_write = n;
while (bytes_to_write > 0) {
const size_t bytes_written = write_fun(handle, s, bytes_to_write);
bytes_to_write -= bytes_written;
s += bytes_written;
}
return n; // Return the total bytes written
}
std::streambuf* ofhbuf::setbuf (char* s, std::streamsize n)
{
if (s == 0 && n == 0) {
// Switch to unbuffered
// This won't take effect until the next overflow or sync
// (We defer it taking effect so that write errors can be properly reported)
// To cause it to take effect as soon as possible, we artificially reduce the
// size of the buffer so it has no space left. This will trigger an overflow
// on the next put.
std::streambuf::setp(pbase(), pptr());
std::streambuf::pbump(pptr() - pbase());
buffer_size = 1;
}
return this;
}
/*
* ifhstream
*/
ifhbuf::ifhbuf (void* arg_handle, size_t (*arg_read_fun)(void*, void*, size_t))
: handle(arg_handle),
read_fun(arg_read_fun),
buffer(new char[default_buffer_size + putback_size]),
buffer_size(default_buffer_size)
{
reset_buffer(0, 0);
}
ifhbuf::~ifhbuf ()
{
delete[] buffer;
}
ifhbuf::int_type ifhbuf::underflow ()
{
if (gptr() >= egptr()) { // A true underflow (no bytes in buffer left to read)
// Move the putback_size most-recently-read characters into the putback area
size_t nputback = std::min<size_t>(gptr() - eback(), putback_size);
std::memmove(buffer + (putback_size - nputback), gptr() - nputback, nputback);
// Now read new characters from the file descriptor
const size_t nread = read_fun(handle, buffer + putback_size, buffer_size);
if (nread == 0) {
// EOF
return traits_type::eof();
}
// Reset the buffer
reset_buffer(nputback, nread);
}
// Return the next character
return traits_type::to_int_type(*gptr());
}
std::streamsize ifhbuf::xsgetn (char* s, std::streamsize n)
{
// Use heuristic to decide whether to read directly
// Read directly only if n >= bytes_available + 4096
std::streamsize bytes_available = egptr() - gptr();
if (n < bytes_available + 4096) {
// Not worth it to do a direct read
return std::streambuf::xsgetn(s, n);
}
std::streamsize total_bytes_read = 0;
// First, copy out the bytes currently in the buffer
std::memcpy(s, gptr(), bytes_available);
s += bytes_available;
n -= bytes_available;
total_bytes_read += bytes_available;
// Now do the direct read
while (n > 0) {
const size_t bytes_read = read_fun(handle, s, n);
if (bytes_read == 0) {
// EOF
break;
}
s += bytes_read;
n -= bytes_read;
total_bytes_read += bytes_read;
}
// Fill up the putback area with the most recently read characters
size_t nputback = std::min<size_t>(total_bytes_read, putback_size);
std::memcpy(buffer + (putback_size - nputback), s - nputback, nputback);
// Reset the buffer with no bytes available for reading, but with some putback characters
reset_buffer(nputback, 0);
// Return the total number of bytes read
return total_bytes_read;
}
std::streambuf* ifhbuf::setbuf (char* s, std::streamsize n)
{
if (s == 0 && n == 0) {
// Switch to unbuffered
// This won't take effect until the next underflow (we don't want to
// lose what's currently in the buffer!)
buffer_size = 1;
}
return this;
}
| 6,313
|
C++
|
.cpp
| 187
| 31.368984
| 90
| 0.704239
|
AGWA/git-crypt
| 8,224
| 478
| 134
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
18,508
|
coprocess-win32.cpp
|
AGWA_git-crypt/coprocess-win32.cpp
|
/*
* Copyright 2015 Andrew Ayer
*
* This file is part of git-crypt.
*
* git-crypt is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* git-crypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with git-crypt. If not, see <http://www.gnu.org/licenses/>.
*
* Additional permission under GNU GPL version 3 section 7:
*
* If you modify the Program, or any covered work, by linking or
* combining it with the OpenSSL project's OpenSSL library (or a
* modified version of that library), containing parts covered by the
* terms of the OpenSSL or SSLeay licenses, the licensors of the Program
* grant you additional permission to convey the resulting work.
* Corresponding Source for a non-source form of such a combination
* shall include the source code for the parts of OpenSSL used as well
* as that of the covered work.
*/
#include "coprocess-win32.hpp"
#include "util.hpp"
static void escape_cmdline_argument (std::string& cmdline, const std::string& arg)
{
// For an explanation of Win32's arcane argument quoting rules, see:
// http://msdn.microsoft.com/en-us/library/17w5ykft%28v=vs.85%29.aspx
// http://msdn.microsoft.com/en-us/library/bb776391%28v=vs.85%29.aspx
// http://blogs.msdn.com/b/twistylittlepassagesallalike/archive/2011/04/23/everyone-quotes-arguments-the-wrong-way.aspx
// http://blogs.msdn.com/b/oldnewthing/archive/2010/09/17/10063629.aspx
cmdline.push_back('"');
std::string::const_iterator p(arg.begin());
while (p != arg.end()) {
if (*p == '"') {
cmdline.push_back('\\');
cmdline.push_back('"');
++p;
} else if (*p == '\\') {
unsigned int num_backslashes = 0;
while (p != arg.end() && *p == '\\') {
++num_backslashes;
++p;
}
if (p == arg.end() || *p == '"') {
// Backslashes need to be escaped
num_backslashes *= 2;
}
while (num_backslashes--) {
cmdline.push_back('\\');
}
} else {
cmdline.push_back(*p++);
}
}
cmdline.push_back('"');
}
static std::string format_cmdline (const std::vector<std::string>& command)
{
std::string cmdline;
for (std::vector<std::string>::const_iterator arg(command.begin()); arg != command.end(); ++arg) {
if (arg != command.begin()) {
cmdline.push_back(' ');
}
escape_cmdline_argument(cmdline, *arg);
}
return cmdline;
}
static HANDLE spawn_command (const std::vector<std::string>& command, HANDLE stdin_handle, HANDLE stdout_handle, HANDLE stderr_handle)
{
PROCESS_INFORMATION proc_info;
ZeroMemory(&proc_info, sizeof(proc_info));
STARTUPINFO start_info;
ZeroMemory(&start_info, sizeof(start_info));
start_info.cb = sizeof(STARTUPINFO);
start_info.hStdInput = stdin_handle ? stdin_handle : GetStdHandle(STD_INPUT_HANDLE);
start_info.hStdOutput = stdout_handle ? stdout_handle : GetStdHandle(STD_OUTPUT_HANDLE);
start_info.hStdError = stderr_handle ? stderr_handle : GetStdHandle(STD_ERROR_HANDLE);
start_info.dwFlags |= STARTF_USESTDHANDLES;
std::string cmdline(format_cmdline(command));
if (!CreateProcessA(nullptr, // application name (nullptr to use command line)
const_cast<char*>(cmdline.c_str()),
nullptr, // process security attributes
nullptr, // primary thread security attributes
TRUE, // handles are inherited
0, // creation flags
nullptr, // use parent's environment
nullptr, // use parent's current directory
&start_info,
&proc_info)) {
throw System_error("CreateProcess", cmdline, GetLastError());
}
CloseHandle(proc_info.hThread);
return proc_info.hProcess;
}
Coprocess::Coprocess ()
{
proc_handle = nullptr;
stdin_pipe_reader = nullptr;
stdin_pipe_writer = nullptr;
stdin_pipe_ostream = nullptr;
stdout_pipe_reader = nullptr;
stdout_pipe_writer = nullptr;
stdout_pipe_istream = nullptr;
}
Coprocess::~Coprocess ()
{
close_stdin();
close_stdout();
if (proc_handle) {
CloseHandle(proc_handle);
}
}
std::ostream* Coprocess::stdin_pipe ()
{
if (!stdin_pipe_ostream) {
SECURITY_ATTRIBUTES sec_attr;
// Set the bInheritHandle flag so pipe handles are inherited.
sec_attr.nLength = sizeof(SECURITY_ATTRIBUTES);
sec_attr.bInheritHandle = TRUE;
sec_attr.lpSecurityDescriptor = nullptr;
// Create a pipe for the child process's STDIN.
if (!CreatePipe(&stdin_pipe_reader, &stdin_pipe_writer, &sec_attr, 0)) {
throw System_error("CreatePipe", "", GetLastError());
}
// Ensure the write handle to the pipe for STDIN is not inherited.
if (!SetHandleInformation(stdin_pipe_writer, HANDLE_FLAG_INHERIT, 0)) {
throw System_error("SetHandleInformation", "", GetLastError());
}
stdin_pipe_ostream = new ofhstream(this, write_stdin);
}
return stdin_pipe_ostream;
}
void Coprocess::close_stdin ()
{
delete stdin_pipe_ostream;
stdin_pipe_ostream = nullptr;
if (stdin_pipe_writer) {
CloseHandle(stdin_pipe_writer);
stdin_pipe_writer = nullptr;
}
if (stdin_pipe_reader) {
CloseHandle(stdin_pipe_reader);
stdin_pipe_reader = nullptr;
}
}
std::istream* Coprocess::stdout_pipe ()
{
if (!stdout_pipe_istream) {
SECURITY_ATTRIBUTES sec_attr;
// Set the bInheritHandle flag so pipe handles are inherited.
sec_attr.nLength = sizeof(SECURITY_ATTRIBUTES);
sec_attr.bInheritHandle = TRUE;
sec_attr.lpSecurityDescriptor = nullptr;
// Create a pipe for the child process's STDOUT.
if (!CreatePipe(&stdout_pipe_reader, &stdout_pipe_writer, &sec_attr, 0)) {
throw System_error("CreatePipe", "", GetLastError());
}
// Ensure the read handle to the pipe for STDOUT is not inherited.
if (!SetHandleInformation(stdout_pipe_reader, HANDLE_FLAG_INHERIT, 0)) {
throw System_error("SetHandleInformation", "", GetLastError());
}
stdout_pipe_istream = new ifhstream(this, read_stdout);
}
return stdout_pipe_istream;
}
void Coprocess::close_stdout ()
{
delete stdout_pipe_istream;
stdout_pipe_istream = nullptr;
if (stdout_pipe_writer) {
CloseHandle(stdout_pipe_writer);
stdout_pipe_writer = nullptr;
}
if (stdout_pipe_reader) {
CloseHandle(stdout_pipe_reader);
stdout_pipe_reader = nullptr;
}
}
void Coprocess::spawn (const std::vector<std::string>& args)
{
proc_handle = spawn_command(args, stdin_pipe_reader, stdout_pipe_writer, nullptr);
if (stdin_pipe_reader) {
CloseHandle(stdin_pipe_reader);
stdin_pipe_reader = nullptr;
}
if (stdout_pipe_writer) {
CloseHandle(stdout_pipe_writer);
stdout_pipe_writer = nullptr;
}
}
int Coprocess::wait ()
{
if (WaitForSingleObject(proc_handle, INFINITE) == WAIT_FAILED) {
throw System_error("WaitForSingleObject", "", GetLastError());
}
DWORD exit_code;
if (!GetExitCodeProcess(proc_handle, &exit_code)) {
throw System_error("GetExitCodeProcess", "", GetLastError());
}
return exit_code;
}
size_t Coprocess::write_stdin (void* handle, const void* buf, size_t count)
{
DWORD bytes_written;
if (!WriteFile(static_cast<Coprocess*>(handle)->stdin_pipe_writer, buf, count, &bytes_written, nullptr)) {
throw System_error("WriteFile", "", GetLastError());
}
return bytes_written;
}
size_t Coprocess::read_stdout (void* handle, void* buf, size_t count)
{
// Note that ReadFile on a pipe may return with bytes_read==0 if the other
// end of the pipe writes zero bytes, so retry when this happens.
// When the other end of the pipe actually closes, ReadFile
// fails with ERROR_BROKEN_PIPE.
DWORD bytes_read;
do {
if (!ReadFile(static_cast<Coprocess*>(handle)->stdout_pipe_reader, buf, count, &bytes_read, nullptr)) {
const DWORD read_error = GetLastError();
if (read_error != ERROR_BROKEN_PIPE) {
throw System_error("ReadFile", "", read_error);
}
return 0;
}
} while (bytes_read == 0);
return bytes_read;
}
| 8,080
|
C++
|
.cpp
| 235
| 31.842553
| 134
| 0.722059
|
AGWA/git-crypt
| 8,224
| 478
| 134
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
18,509
|
util-win32.cpp
|
AGWA_git-crypt/util-win32.cpp
|
/*
* Copyright 2014 Andrew Ayer
*
* This file is part of git-crypt.
*
* git-crypt is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* git-crypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with git-crypt. If not, see <http://www.gnu.org/licenses/>.
*
* Additional permission under GNU GPL version 3 section 7:
*
* If you modify the Program, or any covered work, by linking or
* combining it with the OpenSSL project's OpenSSL library (or a
* modified version of that library), containing parts covered by the
* terms of the OpenSSL or SSLeay licenses, the licensors of the Program
* grant you additional permission to convey the resulting work.
* Corresponding Source for a non-source form of such a combination
* shall include the source code for the parts of OpenSSL used as well
* as that of the covered work.
*/
#include <io.h>
#include <stdio.h>
#include <fcntl.h>
#include <windows.h>
#include <vector>
#include <cstring>
std::string System_error::message () const
{
std::string mesg(action);
if (!target.empty()) {
mesg += ": ";
mesg += target;
}
if (error) {
LPTSTR error_message;
FormatMessageA(
FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
nullptr,
error,
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
reinterpret_cast<LPTSTR>(&error_message),
0,
nullptr);
mesg += error_message;
LocalFree(error_message);
}
return mesg;
}
void temp_fstream::open (std::ios_base::openmode mode)
{
close();
char tmpdir[MAX_PATH + 1];
DWORD ret = GetTempPath(sizeof(tmpdir), tmpdir);
if (ret == 0) {
throw System_error("GetTempPath", "", GetLastError());
} else if (ret > sizeof(tmpdir) - 1) {
throw System_error("GetTempPath", "", ERROR_BUFFER_OVERFLOW);
}
char tmpfilename[MAX_PATH + 1];
if (GetTempFileName(tmpdir, TEXT("git-crypt"), 0, tmpfilename) == 0) {
throw System_error("GetTempFileName", "", GetLastError());
}
filename = tmpfilename;
std::fstream::open(filename.c_str(), mode);
if (!std::fstream::is_open()) {
DeleteFile(filename.c_str());
throw System_error("std::fstream::open", filename, 0);
}
}
void temp_fstream::close ()
{
if (std::fstream::is_open()) {
std::fstream::close();
DeleteFile(filename.c_str());
}
}
void mkdir_parent (const std::string& path)
{
std::string::size_type slash(path.find('/', 1));
while (slash != std::string::npos) {
std::string prefix(path.substr(0, slash));
if (GetFileAttributes(prefix.c_str()) == INVALID_FILE_ATTRIBUTES) {
// prefix does not exist, so try to create it
if (!CreateDirectory(prefix.c_str(), nullptr)) {
throw System_error("CreateDirectory", prefix, GetLastError());
}
}
slash = path.find('/', slash + 1);
}
}
std::string our_exe_path ()
{
std::vector<char> buffer(128);
size_t len;
while ((len = GetModuleFileNameA(nullptr, &buffer[0], buffer.size())) == buffer.size()) {
// buffer may have been truncated - grow and try again
buffer.resize(buffer.size() * 2);
}
if (len == 0) {
throw System_error("GetModuleFileNameA", "", GetLastError());
}
return std::string(buffer.begin(), buffer.begin() + len);
}
int exit_status (int status)
{
return status;
}
void touch_file (const std::string& filename)
{
HANDLE fh = CreateFileA(filename.c_str(), FILE_WRITE_ATTRIBUTES, FILE_SHARE_READ, nullptr, OPEN_EXISTING, 0, nullptr);
if (fh == INVALID_HANDLE_VALUE) {
DWORD error = GetLastError();
if (error == ERROR_FILE_NOT_FOUND) {
return;
} else {
throw System_error("CreateFileA", filename, error);
}
}
SYSTEMTIME system_time;
GetSystemTime(&system_time);
FILETIME file_time;
SystemTimeToFileTime(&system_time, &file_time);
if (!SetFileTime(fh, nullptr, nullptr, &file_time)) {
DWORD error = GetLastError();
CloseHandle(fh);
throw System_error("SetFileTime", filename, error);
}
CloseHandle(fh);
}
void remove_file (const std::string& filename)
{
if (!DeleteFileA(filename.c_str())) {
DWORD error = GetLastError();
if (error == ERROR_FILE_NOT_FOUND) {
return;
} else {
throw System_error("DeleteFileA", filename, error);
}
}
}
static void init_std_streams_platform ()
{
_setmode(_fileno(stdin), _O_BINARY);
_setmode(_fileno(stdout), _O_BINARY);
}
void create_protected_file (const char* path) // TODO
{
}
int util_rename (const char* from, const char* to)
{
// On Windows OS, it is necessary to ensure target file doesn't exist
unlink(to);
return rename(from, to);
}
std::vector<std::string> get_directory_contents (const char* path)
{
std::vector<std::string> filenames;
std::string patt(path);
if (!patt.empty() && patt[patt.size() - 1] != '/' && patt[patt.size() - 1] != '\\') {
patt.push_back('\\');
}
patt.push_back('*');
WIN32_FIND_DATAA ffd;
HANDLE h = FindFirstFileA(patt.c_str(), &ffd);
if (h == INVALID_HANDLE_VALUE) {
throw System_error("FindFirstFileA", patt, GetLastError());
}
do {
if (std::strcmp(ffd.cFileName, ".") != 0 && std::strcmp(ffd.cFileName, "..") != 0) {
filenames.push_back(ffd.cFileName);
}
} while (FindNextFileA(h, &ffd) != 0);
DWORD err = GetLastError();
if (err != ERROR_NO_MORE_FILES) {
throw System_error("FileNextFileA", patt, err);
}
FindClose(h);
return filenames;
}
| 5,696
|
C++
|
.cpp
| 188
| 28.031915
| 119
| 0.699854
|
AGWA/git-crypt
| 8,224
| 478
| 134
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
18,510
|
gpg.cpp
|
AGWA_git-crypt/gpg.cpp
|
/*
* Copyright 2014 Andrew Ayer
*
* This file is part of git-crypt.
*
* git-crypt is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* git-crypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with git-crypt. If not, see <http://www.gnu.org/licenses/>.
*
* Additional permission under GNU GPL version 3 section 7:
*
* If you modify the Program, or any covered work, by linking or
* combining it with the OpenSSL project's OpenSSL library (or a
* modified version of that library), containing parts covered by the
* terms of the OpenSSL or SSLeay licenses, the licensors of the Program
* grant you additional permission to convey the resulting work.
* Corresponding Source for a non-source form of such a combination
* shall include the source code for the parts of OpenSSL used as well
* as that of the covered work.
*/
#include "gpg.hpp"
#include "util.hpp"
#include "commands.hpp"
#include <sstream>
static std::string gpg_get_executable()
{
std::string gpgbin = "gpg";
try {
gpgbin = get_git_config("gpg.program");
} catch (...) {
}
return gpgbin;
}
static std::string gpg_nth_column (const std::string& line, unsigned int col)
{
std::string::size_type pos = 0;
for (unsigned int i = 0; i < col; ++i) {
pos = line.find_first_of(':', pos);
if (pos == std::string::npos) {
throw Gpg_error("Malformed output from gpg");
}
pos = pos + 1;
}
const std::string::size_type end_pos = line.find_first_of(':', pos);
return end_pos != std::string::npos ?
line.substr(pos, end_pos - pos) :
line.substr(pos);
}
// given a key fingerprint, return the key's UID (e.g. "John Smith <jsmith@example.com>")
std::string gpg_get_uid (const std::string& fingerprint)
{
// gpg --batch --with-colons --fixed-list-mode --list-keys 0x7A399B2DB06D039020CD1CE1D0F3702D61489532
std::vector<std::string> command;
command.push_back(gpg_get_executable());
command.push_back("--batch");
command.push_back("--with-colons");
command.push_back("--fixed-list-mode");
command.push_back("--list-keys");
command.push_back("0x" + fingerprint);
std::stringstream command_output;
if (!successful_exit(exec_command(command, command_output))) {
// This could happen if the keyring does not contain a public key with this fingerprint
return "";
}
while (command_output.peek() != -1) {
std::string line;
std::getline(command_output, line);
if (line.substr(0, 4) == "uid:") {
// uid:u::::1395975462::AB97D6E3E5D8789988CA55E5F77D9E7397D05229::John Smith <jsmith@example.com>:
// want the 9th column (counting from 0)
return gpg_nth_column(line, 9);
}
}
return "";
}
// return a list of fingerprints of public keys matching the given search query (such as jsmith@example.com)
std::vector<std::string> gpg_lookup_key (const std::string& query)
{
std::vector<std::string> fingerprints;
// gpg --batch --with-colons --fingerprint --list-keys jsmith@example.com
std::vector<std::string> command;
command.push_back(gpg_get_executable());
command.push_back("--batch");
command.push_back("--with-colons");
command.push_back("--fingerprint");
command.push_back("--list-keys");
command.push_back(query);
std::stringstream command_output;
if (successful_exit(exec_command(command, command_output))) {
bool is_pubkey = false;
while (command_output.peek() != -1) {
std::string line;
std::getline(command_output, line);
if (line.substr(0, 4) == "pub:") {
is_pubkey = true;
} else if (line.substr(0, 4) == "sub:") {
is_pubkey = false;
} else if (is_pubkey && line.substr(0, 4) == "fpr:") {
// fpr:::::::::7A399B2DB06D039020CD1CE1D0F3702D61489532:
// want the 9th column (counting from 0)
fingerprints.push_back(gpg_nth_column(line, 9));
}
}
}
return fingerprints;
}
std::vector<std::string> gpg_list_secret_keys ()
{
// gpg --batch --with-colons --list-secret-keys --fingerprint
std::vector<std::string> command;
command.push_back(gpg_get_executable());
command.push_back("--batch");
command.push_back("--with-colons");
command.push_back("--list-secret-keys");
command.push_back("--fingerprint");
std::stringstream command_output;
if (!successful_exit(exec_command(command, command_output))) {
throw Gpg_error("gpg --list-secret-keys failed");
}
std::vector<std::string> secret_keys;
while (command_output.peek() != -1) {
std::string line;
std::getline(command_output, line);
if (line.substr(0, 4) == "fpr:") {
// fpr:::::::::7A399B2DB06D039020CD1CE1D0F3702D61489532:
// want the 9th column (counting from 0)
secret_keys.push_back(gpg_nth_column(line, 9));
}
}
return secret_keys;
}
void gpg_encrypt_to_file (const std::string& filename, const std::string& recipient_fingerprint, bool key_is_trusted, const char* p, size_t len)
{
// gpg --batch -o FILENAME -r RECIPIENT -e
std::vector<std::string> command;
command.push_back(gpg_get_executable());
command.push_back("--batch");
if (key_is_trusted) {
command.push_back("--trust-model");
command.push_back("always");
}
command.push_back("-o");
command.push_back(filename);
command.push_back("-r");
command.push_back("0x" + recipient_fingerprint);
command.push_back("-e");
if (!successful_exit(exec_command_with_input(command, p, len))) {
throw Gpg_error("Failed to encrypt");
}
}
void gpg_decrypt_from_file (const std::string& filename, std::ostream& output)
{
// gpg -q -d FILENAME
std::vector<std::string> command;
command.push_back(gpg_get_executable());
command.push_back("-q");
command.push_back("-d");
command.push_back(filename);
if (!successful_exit(exec_command(command, output))) {
throw Gpg_error("Failed to decrypt");
}
}
| 6,112
|
C++
|
.cpp
| 171
| 33.327485
| 144
| 0.70777
|
AGWA/git-crypt
| 8,224
| 478
| 134
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
18,511
|
git-crypt.cpp
|
AGWA_git-crypt/git-crypt.cpp
|
/*
* Copyright 2012, 2014 Andrew Ayer
*
* This file is part of git-crypt.
*
* git-crypt is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* git-crypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with git-crypt. If not, see <http://www.gnu.org/licenses/>.
*
* Additional permission under GNU GPL version 3 section 7:
*
* If you modify the Program, or any covered work, by linking or
* combining it with the OpenSSL project's OpenSSL library (or a
* modified version of that library), containing parts covered by the
* terms of the OpenSSL or SSLeay licenses, the licensors of the Program
* grant you additional permission to convey the resulting work.
* Corresponding Source for a non-source form of such a combination
* shall include the source code for the parts of OpenSSL used as well
* as that of the covered work.
*/
#include "git-crypt.hpp"
#include "commands.hpp"
#include "util.hpp"
#include "crypto.hpp"
#include "key.hpp"
#include "gpg.hpp"
#include "parse_options.hpp"
#include <cstring>
#include <unistd.h>
#include <iostream>
#include <string.h>
const char* argv0;
static void print_usage (std::ostream& out)
{
out << "Usage: " << argv0 << " COMMAND [ARGS ...]" << std::endl;
out << std::endl;
// |--------------------------------------------------------------------------------| 80 characters
out << "Common commands:" << std::endl;
out << " init generate a key and prepare repo to use git-crypt" << std::endl;
out << " status display which files are encrypted" << std::endl;
//out << " refresh ensure all files in the repo are properly decrypted" << std::endl;
out << " lock de-configure git-crypt and re-encrypt files in work tree" << std::endl;
out << std::endl;
out << "GPG commands:" << std::endl;
out << " add-gpg-user USERID add the user with the given GPG user ID as a collaborator" << std::endl;
//out << " rm-gpg-user USERID revoke collaborator status from the given GPG user ID" << std::endl;
//out << " ls-gpg-users list the GPG key IDs of collaborators" << std::endl;
out << " unlock decrypt this repo using the in-repo GPG-encrypted key" << std::endl;
out << std::endl;
out << "Symmetric key commands:" << std::endl;
out << " export-key FILE export this repo's symmetric key to the given file" << std::endl;
out << " unlock KEYFILE decrypt this repo using the given symmetric key" << std::endl;
out << std::endl;
out << "Legacy commands:" << std::endl;
out << " init KEYFILE alias for 'unlock KEYFILE'" << std::endl;
out << " keygen KEYFILE generate a git-crypt key in the given file" << std::endl;
out << " migrate-key OLD NEW migrate the legacy key file OLD to the new format in NEW" << std::endl;
/*
out << std::endl;
out << "Plumbing commands (not to be used directly):" << std::endl;
out << " clean [LEGACY-KEYFILE]" << std::endl;
out << " smudge [LEGACY-KEYFILE]" << std::endl;
out << " diff [LEGACY-KEYFILE] FILE" << std::endl;
*/
out << std::endl;
out << "See 'git-crypt help COMMAND' for more information on a specific command." << std::endl;
}
static void print_version (std::ostream& out)
{
out << "git-crypt " << VERSION << std::endl;
}
static bool help_for_command (const char* command, std::ostream& out)
{
if (std::strcmp(command, "init") == 0) {
help_init(out);
} else if (std::strcmp(command, "unlock") == 0) {
help_unlock(out);
} else if (std::strcmp(command, "lock") == 0) {
help_lock(out);
} else if (std::strcmp(command, "add-gpg-user") == 0) {
help_add_gpg_user(out);
} else if (std::strcmp(command, "rm-gpg-user") == 0) {
help_rm_gpg_user(out);
} else if (std::strcmp(command, "ls-gpg-users") == 0) {
help_ls_gpg_users(out);
} else if (std::strcmp(command, "export-key") == 0) {
help_export_key(out);
} else if (std::strcmp(command, "keygen") == 0) {
help_keygen(out);
} else if (std::strcmp(command, "migrate-key") == 0) {
help_migrate_key(out);
} else if (std::strcmp(command, "refresh") == 0) {
help_refresh(out);
} else if (std::strcmp(command, "status") == 0) {
help_status(out);
} else {
return false;
}
return true;
}
static int help (int argc, const char** argv)
{
if (argc == 0) {
print_usage(std::cout);
} else {
if (!help_for_command(argv[0], std::cout)) {
std::clog << "Error: '" << argv[0] << "' is not a git-crypt command. See 'git-crypt help'." << std::endl;
return 1;
}
}
return 0;
}
static int version (int argc, const char** argv)
{
print_version(std::cout);
return 0;
}
int main (int argc, const char** argv)
try {
argv0 = argv[0];
/*
* General initialization
*/
init_std_streams();
init_crypto();
/*
* Parse command line arguments
*/
int arg_index = 1;
while (arg_index < argc && argv[arg_index][0] == '-') {
if (std::strcmp(argv[arg_index], "--help") == 0) {
print_usage(std::clog);
return 0;
} else if (std::strcmp(argv[arg_index], "--version") == 0) {
print_version(std::clog);
return 0;
} else if (std::strcmp(argv[arg_index], "--") == 0) {
++arg_index;
break;
} else {
std::clog << argv0 << ": " << argv[arg_index] << ": Unknown option" << std::endl;
print_usage(std::clog);
return 2;
}
}
argc -= arg_index;
argv += arg_index;
if (argc == 0) {
print_usage(std::clog);
return 2;
}
/*
* Pass off to command handler
*/
const char* command = argv[0];
--argc;
++argv;
try {
// Public commands:
if (std::strcmp(command, "help") == 0) {
return help(argc, argv);
}
if (std::strcmp(command, "version") == 0) {
return version(argc, argv);
}
if (std::strcmp(command, "init") == 0) {
return init(argc, argv);
}
if (std::strcmp(command, "unlock") == 0) {
return unlock(argc, argv);
}
if (std::strcmp(command, "lock") == 0) {
return lock(argc, argv);
}
if (std::strcmp(command, "add-gpg-user") == 0) {
return add_gpg_user(argc, argv);
}
if (std::strcmp(command, "rm-gpg-user") == 0) {
return rm_gpg_user(argc, argv);
}
if (std::strcmp(command, "ls-gpg-users") == 0) {
return ls_gpg_users(argc, argv);
}
if (std::strcmp(command, "export-key") == 0) {
return export_key(argc, argv);
}
if (std::strcmp(command, "keygen") == 0) {
return keygen(argc, argv);
}
if (std::strcmp(command, "migrate-key") == 0) {
return migrate_key(argc, argv);
}
if (std::strcmp(command, "refresh") == 0) {
return refresh(argc, argv);
}
if (std::strcmp(command, "status") == 0) {
return status(argc, argv);
}
// Plumbing commands (executed by git, not by user):
if (std::strcmp(command, "clean") == 0) {
return clean(argc, argv);
}
if (std::strcmp(command, "smudge") == 0) {
return smudge(argc, argv);
}
if (std::strcmp(command, "diff") == 0) {
return diff(argc, argv);
}
} catch (const Option_error& e) {
std::clog << "git-crypt: Error: " << e.option_name << ": " << e.message << std::endl;
help_for_command(command, std::clog);
return 2;
}
std::clog << "Error: '" << command << "' is not a git-crypt command. See 'git-crypt help'." << std::endl;
return 2;
} catch (const Error& e) {
std::cerr << "git-crypt: Error: " << e.message << std::endl;
return 1;
} catch (const Gpg_error& e) {
std::cerr << "git-crypt: GPG error: " << e.message << std::endl;
return 1;
} catch (const System_error& e) {
std::cerr << "git-crypt: System error: " << e.message() << std::endl;
return 1;
} catch (const Crypto_error& e) {
std::cerr << "git-crypt: Crypto error: " << e.where << ": " << e.message << std::endl;
return 1;
} catch (Key_file::Incompatible) {
std::cerr << "git-crypt: This repository contains a incompatible key file. Please upgrade git-crypt." << std::endl;
return 1;
} catch (Key_file::Malformed) {
std::cerr << "git-crypt: This repository contains a malformed key file. It may be corrupted." << std::endl;
return 1;
} catch (const std::ios_base::failure& e) {
std::cerr << "git-crypt: I/O error: " << e.what() << std::endl;
return 1;
}
| 8,505
|
C++
|
.cpp
| 246
| 32.162602
| 117
| 0.633572
|
AGWA/git-crypt
| 8,224
| 478
| 134
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
18,512
|
crypto.cpp
|
AGWA_git-crypt/crypto.cpp
|
/*
* Copyright 2012, 2014 Andrew Ayer
*
* This file is part of git-crypt.
*
* git-crypt is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* git-crypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with git-crypt. If not, see <http://www.gnu.org/licenses/>.
*
* Additional permission under GNU GPL version 3 section 7:
*
* If you modify the Program, or any covered work, by linking or
* combining it with the OpenSSL project's OpenSSL library (or a
* modified version of that library), containing parts covered by the
* terms of the OpenSSL or SSLeay licenses, the licensors of the Program
* grant you additional permission to convey the resulting work.
* Corresponding Source for a non-source form of such a combination
* shall include the source code for the parts of OpenSSL used as well
* as that of the covered work.
*/
#include "crypto.hpp"
#include "util.hpp"
#include <cstring>
Aes_ctr_encryptor::Aes_ctr_encryptor (const unsigned char* raw_key, const unsigned char* nonce)
: ecb(raw_key)
{
// Set first 12 bytes of the CTR value to the nonce.
// This stays the same for the entirety of this object's lifetime.
std::memcpy(ctr_value, nonce, NONCE_LEN);
byte_counter = 0;
}
Aes_ctr_encryptor::~Aes_ctr_encryptor ()
{
explicit_memset(pad, '\0', BLOCK_LEN);
}
void Aes_ctr_encryptor::process (const unsigned char* in, unsigned char* out, size_t len)
{
for (size_t i = 0; i < len; ++i) {
if (byte_counter % BLOCK_LEN == 0) {
// Set last 4 bytes of CTR to the (big-endian) block number (sequentially increasing with each block)
store_be32(ctr_value + NONCE_LEN, byte_counter / BLOCK_LEN);
// Generate a new pad
ecb.encrypt(ctr_value, pad);
}
// encrypt one byte
out[i] = in[i] ^ pad[byte_counter++ % BLOCK_LEN];
if (byte_counter == 0) {
throw Crypto_error("Aes_ctr_encryptor::process", "Too much data to encrypt securely");
}
}
}
// Encrypt/decrypt an entire input stream, writing to the given output stream
void Aes_ctr_encryptor::process_stream (std::istream& in, std::ostream& out, const unsigned char* key, const unsigned char* nonce)
{
Aes_ctr_encryptor aes(key, nonce);
unsigned char buffer[1024];
while (in) {
in.read(reinterpret_cast<char*>(buffer), sizeof(buffer));
aes.process(buffer, buffer, in.gcount());
out.write(reinterpret_cast<char*>(buffer), in.gcount());
}
}
| 2,807
|
C++
|
.cpp
| 71
| 37.380282
| 130
| 0.733309
|
AGWA/git-crypt
| 8,224
| 478
| 134
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
18,513
|
parse_options.cpp
|
AGWA_git-crypt/parse_options.cpp
|
/*
* Copyright 2014 Andrew Ayer
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Except as contained in this notice, the name(s) of the above copyright
* holders shall not be used in advertising or otherwise to promote the
* sale, use or other dealings in this Software without prior written
* authorization.
*/
#include "parse_options.hpp"
#include <cstring>
static const Option_def* find_option (const Options_list& options, const std::string& name)
{
for (Options_list::const_iterator opt(options.begin()); opt != options.end(); ++opt) {
if (opt->name == name) {
return &*opt;
}
}
return 0;
}
int parse_options (const Options_list& options, int argc, const char** argv)
{
int argi = 0;
while (argi < argc && argv[argi][0] == '-' && argv[argi][1] != '\0') {
if (std::strcmp(argv[argi], "--") == 0) {
++argi;
break;
} else if (std::strncmp(argv[argi], "--", 2) == 0) {
std::string option_name;
const char* option_value = 0;
if (const char* eq = std::strchr(argv[argi], '=')) {
option_name.assign(argv[argi], eq);
option_value = eq + 1;
} else {
option_name = argv[argi];
}
++argi;
const Option_def* opt(find_option(options, option_name));
if (!opt) {
throw Option_error(option_name, "Invalid option");
}
if (opt->is_set) {
*opt->is_set = true;
}
if (opt->value) {
if (option_value) {
*opt->value = option_value;
} else {
if (argi >= argc) {
throw Option_error(option_name, "Option requires a value");
}
*opt->value = argv[argi];
++argi;
}
} else {
if (option_value) {
throw Option_error(option_name, "Option takes no value");
}
}
} else {
const char* arg = argv[argi] + 1;
++argi;
while (*arg) {
std::string option_name("-");
option_name.push_back(*arg);
++arg;
const Option_def* opt(find_option(options, option_name));
if (!opt) {
throw Option_error(option_name, "Invalid option");
}
if (opt->is_set) {
*opt->is_set = true;
}
if (opt->value) {
if (*arg) {
*opt->value = arg;
} else {
if (argi >= argc) {
throw Option_error(option_name, "Option requires a value");
}
*opt->value = argv[argi];
++argi;
}
break;
}
}
}
}
return argi;
}
| 3,352
|
C++
|
.cpp
| 107
| 27.542056
| 91
| 0.654001
|
AGWA/git-crypt
| 8,224
| 478
| 134
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
18,514
|
coprocess-win32.hpp
|
AGWA_git-crypt/coprocess-win32.hpp
|
/*
* Copyright 2015 Andrew Ayer
*
* This file is part of git-crypt.
*
* git-crypt is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* git-crypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with git-crypt. If not, see <http://www.gnu.org/licenses/>.
*
* Additional permission under GNU GPL version 3 section 7:
*
* If you modify the Program, or any covered work, by linking or
* combining it with the OpenSSL project's OpenSSL library (or a
* modified version of that library), containing parts covered by the
* terms of the OpenSSL or SSLeay licenses, the licensors of the Program
* grant you additional permission to convey the resulting work.
* Corresponding Source for a non-source form of such a combination
* shall include the source code for the parts of OpenSSL used as well
* as that of the covered work.
*/
#ifndef GIT_CRYPT_COPROCESS_HPP
#define GIT_CRYPT_COPROCESS_HPP
#include "fhstream.hpp"
#include <windows.h>
#include <vector>
class Coprocess {
HANDLE proc_handle;
HANDLE stdin_pipe_reader;
HANDLE stdin_pipe_writer;
ofhstream* stdin_pipe_ostream;
static size_t write_stdin (void*, const void*, size_t);
HANDLE stdout_pipe_reader;
HANDLE stdout_pipe_writer;
ifhstream* stdout_pipe_istream;
static size_t read_stdout (void*, void*, size_t);
Coprocess (const Coprocess&); // Disallow copy
Coprocess& operator= (const Coprocess&); // Disallow assignment
public:
Coprocess ();
~Coprocess ();
std::ostream* stdin_pipe ();
void close_stdin ();
std::istream* stdout_pipe ();
void close_stdout ();
void spawn (const std::vector<std::string>&);
int wait ();
};
#endif
| 2,094
|
C++
|
.h
| 57
| 34.614035
| 72
| 0.751234
|
AGWA/git-crypt
| 8,224
| 478
| 134
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
18,515
|
crypto.hpp
|
AGWA_git-crypt/crypto.hpp
|
/*
* Copyright 2012, 2014 Andrew Ayer
*
* This file is part of git-crypt.
*
* git-crypt is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* git-crypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with git-crypt. If not, see <http://www.gnu.org/licenses/>.
*
* Additional permission under GNU GPL version 3 section 7:
*
* If you modify the Program, or any covered work, by linking or
* combining it with the OpenSSL project's OpenSSL library (or a
* modified version of that library), containing parts covered by the
* terms of the OpenSSL or SSLeay licenses, the licensors of the Program
* grant you additional permission to convey the resulting work.
* Corresponding Source for a non-source form of such a combination
* shall include the source code for the parts of OpenSSL used as well
* as that of the covered work.
*/
#ifndef GIT_CRYPT_CRYPTO_HPP
#define GIT_CRYPT_CRYPTO_HPP
#include "key.hpp"
#include <stdint.h>
#include <stddef.h>
#include <iosfwd>
#include <string>
#include <memory>
void init_crypto ();
struct Crypto_error {
std::string where;
std::string message;
Crypto_error (const std::string& w, const std::string& m) : where(w), message(m) { }
};
class Aes_ecb_encryptor {
public:
enum {
KEY_LEN = AES_KEY_LEN,
BLOCK_LEN = 16
};
private:
struct Aes_impl;
std::unique_ptr<Aes_impl> impl;
public:
Aes_ecb_encryptor (const unsigned char* key);
~Aes_ecb_encryptor ();
void encrypt (const unsigned char* plain, unsigned char* cipher);
};
class Aes_ctr_encryptor {
public:
enum {
NONCE_LEN = 12,
KEY_LEN = AES_KEY_LEN,
BLOCK_LEN = 16,
MAX_CRYPT_BYTES = (1ULL<<32)*16 // Don't encrypt more than this or the CTR value will repeat itself
};
private:
Aes_ecb_encryptor ecb;
unsigned char ctr_value[BLOCK_LEN]; // Current CTR value (used as input to AES to derive pad)
unsigned char pad[BLOCK_LEN]; // Current encryption pad (output of AES)
uint32_t byte_counter; // How many bytes processed so far?
public:
Aes_ctr_encryptor (const unsigned char* key, const unsigned char* nonce);
~Aes_ctr_encryptor ();
void process (const unsigned char* in, unsigned char* out, size_t len);
// Encrypt/decrypt an entire input stream, writing to the given output stream
static void process_stream (std::istream& in, std::ostream& out, const unsigned char* key, const unsigned char* nonce);
};
typedef Aes_ctr_encryptor Aes_ctr_decryptor;
class Hmac_sha1_state {
public:
enum {
LEN = 20,
KEY_LEN = HMAC_KEY_LEN
};
private:
struct Hmac_impl;
std::unique_ptr<Hmac_impl> impl;
public:
Hmac_sha1_state (const unsigned char* key, size_t key_len);
~Hmac_sha1_state ();
void add (const unsigned char* buffer, size_t buffer_len);
void get (unsigned char*);
};
void random_bytes (unsigned char*, size_t);
#endif
| 3,241
|
C++
|
.h
| 95
| 32.115789
| 120
| 0.743598
|
AGWA/git-crypt
| 8,224
| 478
| 134
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
18,516
|
fhstream.hpp
|
AGWA_git-crypt/fhstream.hpp
|
/*
* Copyright (C) 2012, 2015 Andrew Ayer
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Except as contained in this notice, the name(s) of the above copyright
* holders shall not be used in advertising or otherwise to promote the
* sale, use or other dealings in this Software without prior written
* authorization.
*/
#ifndef GIT_CRYPT_FHSTREAM_HPP
#define GIT_CRYPT_FHSTREAM_HPP
#include <ostream>
#include <istream>
#include <streambuf>
/*
* ofhstream
*/
class ofhbuf : public std::streambuf {
enum { default_buffer_size = 8192 };
void* handle;
size_t (*write_fun)(void*, const void*, size_t);
char* buffer;
size_t buffer_size;
inline void reset_buffer ()
{
std::streambuf::setp(buffer, buffer + buffer_size - 1);
}
static inline bool is_eof (int_type ch) { return traits_type::eq_int_type(ch, traits_type::eof()); }
// Disallow copy
#if __cplusplus >= 201103L /* C++11 */
ofhbuf (const ofhbuf&) = delete;
ofhbuf& operator= (const ofhbuf&) = delete;
#else
ofhbuf (const ofhbuf&);
ofhbuf& operator= (const ofhbuf&);
#endif
protected:
virtual int_type overflow (int_type ch =traits_type::eof());
virtual int sync ();
virtual std::streamsize xsputn (const char*, std::streamsize);
virtual std::streambuf* setbuf (char*, std::streamsize);
public:
ofhbuf (void*, size_t (*)(void*, const void*, size_t));
~ofhbuf (); // WARNING: calls sync() and ignores exceptions
};
class ofhstream : public std::ostream {
mutable ofhbuf buf;
public:
ofhstream (void* handle, size_t (*write_fun)(void*, const void*, size_t))
: std::ostream(0), buf(handle, write_fun)
{
std::ostream::rdbuf(&buf);
}
ofhbuf* rdbuf () const { return &buf; }
};
/*
* ifhstream
*/
class ifhbuf : public std::streambuf {
enum {
default_buffer_size = 8192,
putback_size = 4
};
void* handle;
size_t (*read_fun)(void*, void*, size_t);
char* buffer;
size_t buffer_size;
inline void reset_buffer (size_t nputback, size_t nread)
{
std::streambuf::setg(buffer + (putback_size - nputback), buffer + putback_size, buffer + putback_size + nread);
}
// Disallow copy
#if __cplusplus >= 201103L /* C++11 */
ifhbuf (const ifhbuf&) = delete;
ifhbuf& operator= (const ifhbuf&) = delete;
#else
ifhbuf (const ifhbuf&);
ifhbuf& operator= (const ifhbuf&);
#endif
protected:
virtual int_type underflow ();
virtual std::streamsize xsgetn (char*, std::streamsize);
virtual std::streambuf* setbuf (char*, std::streamsize);
public:
ifhbuf (void*, size_t (*)(void*, void*, size_t));
~ifhbuf (); // Can't fail
};
class ifhstream : public std::istream {
mutable ifhbuf buf;
public:
explicit ifhstream (void* handle, size_t (*read_fun)(void*, void*, size_t))
: std::istream(0), buf(handle, read_fun)
{
std::istream::rdbuf(&buf);
}
ifhbuf* rdbuf () const { return &buf; }
};
#endif
| 3,855
|
C++
|
.h
| 115
| 31.556522
| 113
| 0.719699
|
AGWA/git-crypt
| 8,224
| 478
| 134
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
18,517
|
commands.hpp
|
AGWA_git-crypt/commands.hpp
|
/*
* Copyright 2012, 2014 Andrew Ayer
*
* This file is part of git-crypt.
*
* git-crypt is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* git-crypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with git-crypt. If not, see <http://www.gnu.org/licenses/>.
*
* Additional permission under GNU GPL version 3 section 7:
*
* If you modify the Program, or any covered work, by linking or
* combining it with the OpenSSL project's OpenSSL library (or a
* modified version of that library), containing parts covered by the
* terms of the OpenSSL or SSLeay licenses, the licensors of the Program
* grant you additional permission to convey the resulting work.
* Corresponding Source for a non-source form of such a combination
* shall include the source code for the parts of OpenSSL used as well
* as that of the covered work.
*/
#ifndef GIT_CRYPT_COMMANDS_HPP
#define GIT_CRYPT_COMMANDS_HPP
#include <string>
#include <iosfwd>
struct Error {
std::string message;
explicit Error (std::string m) : message(m) { }
};
// Plumbing commands:
int clean (int argc, const char** argv);
int smudge (int argc, const char** argv);
int diff (int argc, const char** argv);
// Public commands:
int init (int argc, const char** argv);
int unlock (int argc, const char** argv);
int lock (int argc, const char** argv);
int add_gpg_user (int argc, const char** argv);
int rm_gpg_user (int argc, const char** argv);
int ls_gpg_users (int argc, const char** argv);
int export_key (int argc, const char** argv);
int keygen (int argc, const char** argv);
int migrate_key (int argc, const char** argv);
int refresh (int argc, const char** argv);
int status (int argc, const char** argv);
// Help messages:
void help_init (std::ostream&);
void help_unlock (std::ostream&);
void help_lock (std::ostream&);
void help_add_gpg_user (std::ostream&);
void help_rm_gpg_user (std::ostream&);
void help_ls_gpg_users (std::ostream&);
void help_export_key (std::ostream&);
void help_keygen (std::ostream&);
void help_migrate_key (std::ostream&);
void help_refresh (std::ostream&);
void help_status (std::ostream&);
// other
std::string get_git_config (const std::string& name);
#endif
| 2,626
|
C++
|
.h
| 68
| 37.058824
| 72
| 0.741176
|
AGWA/git-crypt
| 8,224
| 478
| 134
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
18,518
|
git-crypt.hpp
|
AGWA_git-crypt/git-crypt.hpp
|
/*
* Copyright 2014 Andrew Ayer
*
* This file is part of git-crypt.
*
* git-crypt is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* git-crypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with git-crypt. If not, see <http://www.gnu.org/licenses/>.
*
* Additional permission under GNU GPL version 3 section 7:
*
* If you modify the Program, or any covered work, by linking or
* combining it with the OpenSSL project's OpenSSL library (or a
* modified version of that library), containing parts covered by the
* terms of the OpenSSL or SSLeay licenses, the licensors of the Program
* grant you additional permission to convey the resulting work.
* Corresponding Source for a non-source form of such a combination
* shall include the source code for the parts of OpenSSL used as well
* as that of the covered work.
*/
#ifndef GIT_CRYPT_GIT_CRYPT_HPP
#define GIT_CRYPT_GIT_CRYPT_HPP
#define VERSION "0.7.0"
extern const char* argv0; // initialized in main() to argv[0]
#endif
| 1,451
|
C++
|
.h
| 34
| 40.735294
| 72
| 0.763623
|
AGWA/git-crypt
| 8,224
| 478
| 134
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
18,519
|
gpg.hpp
|
AGWA_git-crypt/gpg.hpp
|
/*
* Copyright 2014 Andrew Ayer
*
* This file is part of git-crypt.
*
* git-crypt is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* git-crypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with git-crypt. If not, see <http://www.gnu.org/licenses/>.
*
* Additional permission under GNU GPL version 3 section 7:
*
* If you modify the Program, or any covered work, by linking or
* combining it with the OpenSSL project's OpenSSL library (or a
* modified version of that library), containing parts covered by the
* terms of the OpenSSL or SSLeay licenses, the licensors of the Program
* grant you additional permission to convey the resulting work.
* Corresponding Source for a non-source form of such a combination
* shall include the source code for the parts of OpenSSL used as well
* as that of the covered work.
*/
#ifndef GIT_CRYPT_GPG_HPP
#define GIT_CRYPT_GPG_HPP
#include <string>
#include <vector>
#include <cstddef>
struct Gpg_error {
std::string message;
explicit Gpg_error (std::string m) : message(m) { }
};
std::string gpg_get_uid (const std::string& fingerprint);
std::vector<std::string> gpg_lookup_key (const std::string& query);
std::vector<std::string> gpg_list_secret_keys ();
void gpg_encrypt_to_file (const std::string& filename, const std::string& recipient_fingerprint, bool key_is_trusted, const char* p, size_t len);
void gpg_decrypt_from_file (const std::string& filename, std::ostream&);
#endif
| 1,910
|
C++
|
.h
| 44
| 41.590909
| 148
| 0.754839
|
AGWA/git-crypt
| 8,224
| 478
| 134
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
18,520
|
key.hpp
|
AGWA_git-crypt/key.hpp
|
/*
* Copyright 2014 Andrew Ayer
*
* This file is part of git-crypt.
*
* git-crypt is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* git-crypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with git-crypt. If not, see <http://www.gnu.org/licenses/>.
*
* Additional permission under GNU GPL version 3 section 7:
*
* If you modify the Program, or any covered work, by linking or
* combining it with the OpenSSL project's OpenSSL library (or a
* modified version of that library), containing parts covered by the
* terms of the OpenSSL or SSLeay licenses, the licensors of the Program
* grant you additional permission to convey the resulting work.
* Corresponding Source for a non-source form of such a combination
* shall include the source code for the parts of OpenSSL used as well
* as that of the covered work.
*/
#ifndef GIT_CRYPT_KEY_HPP
#define GIT_CRYPT_KEY_HPP
#include <map>
#include <functional>
#include <stdint.h>
#include <iosfwd>
#include <string>
enum {
HMAC_KEY_LEN = 64,
AES_KEY_LEN = 32
};
struct Key_file {
public:
struct Entry {
uint32_t version;
unsigned char aes_key[AES_KEY_LEN];
unsigned char hmac_key[HMAC_KEY_LEN];
Entry ();
void load (std::istream&);
void load_legacy (uint32_t version, std::istream&);
void store (std::ostream&) const;
void generate (uint32_t version);
};
struct Malformed { }; // exception class
struct Incompatible { }; // exception class
const Entry* get_latest () const;
const Entry* get (uint32_t version) const;
void add (const Entry&);
void load_legacy (std::istream&);
void load (std::istream&);
void store (std::ostream&) const;
bool load_from_file (const char* filename);
bool store_to_file (const char* filename) const;
std::string store_to_string () const;
void generate ();
bool is_empty () const { return entries.empty(); }
bool is_filled () const { return !is_empty(); }
uint32_t latest () const;
void set_key_name (const char* k) { key_name = k ? k : ""; }
const char* get_key_name () const { return key_name.empty() ? 0 : key_name.c_str(); }
private:
typedef std::map<uint32_t, Entry, std::greater<uint32_t> > Map;
enum { FORMAT_VERSION = 2 };
Map entries;
std::string key_name;
void load_header (std::istream&);
enum {
HEADER_FIELD_END = 0,
HEADER_FIELD_KEY_NAME = 1
};
enum {
KEY_FIELD_END = 0,
KEY_FIELD_VERSION = 1,
KEY_FIELD_AES_KEY = 3,
KEY_FIELD_HMAC_KEY = 5
};
enum {
MAX_FIELD_LEN = 1<<20
};
};
enum {
KEY_NAME_MAX_LEN = 128
};
bool validate_key_name (const char* key_name, std::string* reason =0);
#endif
| 3,096
|
C++
|
.h
| 94
| 30.734043
| 88
| 0.705369
|
AGWA/git-crypt
| 8,224
| 478
| 134
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
18,521
|
parse_options.hpp
|
AGWA_git-crypt/parse_options.hpp
|
/*
* Copyright 2014 Andrew Ayer
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Except as contained in this notice, the name(s) of the above copyright
* holders shall not be used in advertising or otherwise to promote the
* sale, use or other dealings in this Software without prior written
* authorization.
*/
#ifndef PARSE_OPTIONS_HPP
#define PARSE_OPTIONS_HPP
#include <string>
#include <vector>
struct Option_def {
std::string name;
bool* is_set;
const char** value;
Option_def () : is_set(0), value(0) { }
Option_def (const std::string& arg_name, bool* arg_is_set)
: name(arg_name), is_set(arg_is_set), value(0) { }
Option_def (const std::string& arg_name, const char** arg_value)
: name(arg_name), is_set(0), value(arg_value) { }
};
typedef std::vector<Option_def> Options_list;
int parse_options (const Options_list& options, int argc, const char** argv);
struct Option_error {
std::string option_name;
std::string message;
Option_error (const std::string& n, const std::string& m) : option_name(n), message(m) { }
};
#endif
| 2,091
|
C++
|
.h
| 48
| 41.625
| 91
| 0.752212
|
AGWA/git-crypt
| 8,224
| 478
| 134
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
18,522
|
coprocess-unix.hpp
|
AGWA_git-crypt/coprocess-unix.hpp
|
/*
* Copyright 2015 Andrew Ayer
*
* This file is part of git-crypt.
*
* git-crypt is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* git-crypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with git-crypt. If not, see <http://www.gnu.org/licenses/>.
*
* Additional permission under GNU GPL version 3 section 7:
*
* If you modify the Program, or any covered work, by linking or
* combining it with the OpenSSL project's OpenSSL library (or a
* modified version of that library), containing parts covered by the
* terms of the OpenSSL or SSLeay licenses, the licensors of the Program
* grant you additional permission to convey the resulting work.
* Corresponding Source for a non-source form of such a combination
* shall include the source code for the parts of OpenSSL used as well
* as that of the covered work.
*/
#ifndef GIT_CRYPT_COPROCESS_HPP
#define GIT_CRYPT_COPROCESS_HPP
#include "fhstream.hpp"
#include <unistd.h>
#include <vector>
class Coprocess {
pid_t pid;
int stdin_pipe_reader;
int stdin_pipe_writer;
ofhstream* stdin_pipe_ostream;
static size_t write_stdin (void*, const void*, size_t);
int stdout_pipe_reader;
int stdout_pipe_writer;
ifhstream* stdout_pipe_istream;
static size_t read_stdout (void*, void*, size_t);
Coprocess (const Coprocess&); // Disallow copy
Coprocess& operator= (const Coprocess&); // Disallow assignment
public:
Coprocess ();
~Coprocess ();
std::ostream* stdin_pipe ();
void close_stdin ();
std::istream* stdout_pipe ();
void close_stdout ();
void spawn (const std::vector<std::string>&);
int wait ();
};
#endif
| 2,072
|
C++
|
.h
| 57
| 34.22807
| 72
| 0.748503
|
AGWA/git-crypt
| 8,224
| 478
| 134
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
18,523
|
util.hpp
|
AGWA_git-crypt/util.hpp
|
/*
* Copyright 2012, 2014 Andrew Ayer
*
* This file is part of git-crypt.
*
* git-crypt is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* git-crypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with git-crypt. If not, see <http://www.gnu.org/licenses/>.
*
* Additional permission under GNU GPL version 3 section 7:
*
* If you modify the Program, or any covered work, by linking or
* combining it with the OpenSSL project's OpenSSL library (or a
* modified version of that library), containing parts covered by the
* terms of the OpenSSL or SSLeay licenses, the licensors of the Program
* grant you additional permission to convey the resulting work.
* Corresponding Source for a non-source form of such a combination
* shall include the source code for the parts of OpenSSL used as well
* as that of the covered work.
*/
#ifndef GIT_CRYPT_UTIL_HPP
#define GIT_CRYPT_UTIL_HPP
#include <string>
#include <ios>
#include <iosfwd>
#include <stdint.h>
#include <sys/types.h>
#include <fstream>
#include <vector>
struct System_error {
std::string action;
std::string target;
int error;
System_error (const std::string& a, const std::string& t, int e) : action(a), target(t), error(e) { }
std::string message () const;
};
class temp_fstream : public std::fstream {
std::string filename;
public:
~temp_fstream () { close(); }
void open (std::ios_base::openmode);
void close ();
};
void mkdir_parent (const std::string& path); // Create parent directories of path, __but not path itself__
std::string our_exe_path ();
int exec_command (const std::vector<std::string>&);
int exec_command (const std::vector<std::string>&, std::ostream& output);
int exec_command_with_input (const std::vector<std::string>&, const char* p, size_t len);
int exit_status (int wait_status); // returns -1 if process did not exit (but was signaled, etc.)
inline bool successful_exit (int wait_status) { return exit_status(wait_status) == 0; }
void touch_file (const std::string&); // ignores non-existent files
void remove_file (const std::string&); // ignores non-existent files
std::string escape_shell_arg (const std::string&);
uint32_t load_be32 (const unsigned char*);
void store_be32 (unsigned char*, uint32_t);
bool read_be32 (std::istream& in, uint32_t&);
void write_be32 (std::ostream& out, uint32_t);
void* explicit_memset (void* s, int c, size_t n); // memset that won't be optimized away
bool leakless_equals (const void* a, const void* b, size_t len); // compare bytes w/o leaking timing
void init_std_streams ();
void create_protected_file (const char* path); // create empty file accessible only by current user
int util_rename (const char*, const char*);
std::vector<std::string> get_directory_contents (const char* path);
#endif
| 3,225
|
C++
|
.h
| 73
| 42.534247
| 107
| 0.738065
|
AGWA/git-crypt
| 8,224
| 478
| 134
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
18,525
|
testUtils.cpp
|
Const-me_Whisper/Tools/compareTraces/testUtils.cpp
|
#include "stdafx.h"
#include "../../Whisper/ML/testUtils.h"
#include <immintrin.h>
using namespace DirectCompute;
namespace
{
using DirectCompute::sTensorDiff;
__forceinline __m256 load( const float* rsi )
{
return _mm256_loadu_ps( rsi );
}
__forceinline __m256 load( const uint16_t* rsi )
{
const __m128i iv = _mm_load_si128( ( const __m128i* )rsi );
return _mm256_cvtph_ps( iv );
}
__forceinline void loadPartial( const uint16_t* x, const uint16_t* y, size_t count, __m256& fx, __m256& fy )
{
__m128i ix, iy;
switch( count )
{
case 1: // load 2 bytes
ix = _mm_cvtsi32_si128( *x );
iy = _mm_cvtsi32_si128( *y );
break;
case 2: // load 4 bytes
ix = _mm_cvtsi32_si128( *(const int*)x );
iy = _mm_cvtsi32_si128( *(const int*)y );
break;
case 3: // load 6 bytes
ix = _mm_cvtsi32_si128( *(const int*)x );
iy = _mm_cvtsi32_si128( *(const int*)y );
ix = _mm_insert_epi16( ix, x[ 2 ], 2 );
iy = _mm_insert_epi16( iy, y[ 2 ], 2 );
break;
case 4: // load 8 bytes
ix = _mm_cvtsi64_si128( *(const int64_t*)x );
iy = _mm_cvtsi64_si128( *(const int64_t*)y );
break;
case 5: // load 10 bytes
ix = _mm_cvtsi64_si128( *(const int64_t*)x );
iy = _mm_cvtsi64_si128( *(const int64_t*)y );
ix = _mm_insert_epi16( ix, x[ 4 ], 4 );
iy = _mm_insert_epi16( iy, y[ 4 ], 4 );
break;
case 6: // load 12 bytes
ix = _mm_cvtsi64_si128( *(const int64_t*)x );
iy = _mm_cvtsi64_si128( *(const int64_t*)y );
ix = _mm_insert_epi32( ix, *(const int*)( x + 4 ), 2 );
iy = _mm_insert_epi32( iy, *(const int*)( y + 4 ), 2 );
break;
case 7: // load 14 bytes
ix = _mm_cvtsi64_si128( *(const int64_t*)x );
iy = _mm_cvtsi64_si128( *(const int64_t*)y );
ix = _mm_insert_epi32( ix, *(const int*)( x + 4 ), 2 );
iy = _mm_insert_epi32( iy, *(const int*)( y + 4 ), 2 );
ix = _mm_insert_epi16( ix, x[ 6 ], 6 );
iy = _mm_insert_epi16( iy, y[ 6 ], 6 );
break;
default:
fx = fy = _mm256_setzero_ps();
return;
}
fx = _mm256_cvtph_ps( ix );
fy = _mm256_cvtph_ps( iy );
}
inline __m128 loadFloat2( const float* rsi )
{
return _mm_castpd_ps( _mm_load_sd( (const double*)rsi ) );
}
inline __m128 loadFloat3( const float* rsi )
{
__m128 f = loadFloat2( rsi );
f = _mm_insert_ps( f, _mm_load_ss( rsi + 2 ), 0x20 );
return f;
}
__forceinline void loadPartial( const float* x, const float* y, size_t count, __m256& fx, __m256& fy )
{
__m128 low1, high1;
__m128 low2, high2;
high1 = high2 = _mm_setzero_ps();
switch( count )
{
case 1:
low1 = _mm_load_ss( x );
low2 = _mm_load_ss( y );
break;
case 2:
low1 = loadFloat2( x );
low2 = loadFloat2( y );
break;
case 3:
low1 = loadFloat3( x );
low2 = loadFloat3( y );
break;
case 4:
low1 = _mm_loadu_ps( x );
low2 = _mm_loadu_ps( y );
break;
case 5:
low1 = _mm_loadu_ps( x );
low2 = _mm_loadu_ps( y );
high1 = _mm_load_ss( x + 4 );
high2 = _mm_load_ss( y + 4 );
break;
case 6:
low1 = _mm_loadu_ps( x );
low2 = _mm_loadu_ps( y );
high1 = loadFloat2( x + 4 );
high2 = loadFloat2( y + 4 );
break;
case 7: // load 14 bytes
low1 = _mm_loadu_ps( x );
low2 = _mm_loadu_ps( y );
high1 = loadFloat3( x + 4 );
high2 = loadFloat3( y + 4 );
break;
default:
fx = fy = _mm256_setzero_ps();
return;
}
fx = _mm256_setr_m128( low1, high1 );
fy = _mm256_setr_m128( low2, high2 );
}
__forceinline float horizontalMaximum( __m256 v )
{
__m128 s = _mm256_extractf128_ps( v, 1 );
s = _mm_max_ps( s, _mm256_castps256_ps128( v ) );
s = _mm_max_ps( s, _mm_movehl_ps( s, s ) );
s = _mm_max_ss( s, _mm_movehdup_ps( s ) );
return _mm_cvtss_f32( s );
}
__forceinline double horizontalSum( __m256 v )
{
__m256d d = _mm256_cvtps_pd( _mm256_extractf128_ps( v, 1 ) );
d = _mm256_add_pd( d, _mm256_cvtps_pd( _mm256_castps256_ps128( v ) ) );
__m128d s = _mm256_extractf128_pd( d, 1 );
s = _mm_add_pd( s, _mm256_castpd256_pd128( d ) );
s = _mm_add_sd( s, _mm_unpackhi_pd( s, s ) );
return _mm_cvtsd_f64( s );
}
__m256 maskInfNan( __m256 diff, __m256 a, __m256 b )
{
__m256i ai = _mm256_castps_si256( a );
__m256i bi = _mm256_castps_si256( b );
__m256i eqi = _mm256_cmpeq_epi32( ai, bi );
__m256 eq = _mm256_castsi256_ps( eqi );
return _mm256_andnot_ps( eq, diff );
}
class DiffAcc
{
__m256 maxAbs = _mm256_setzero_ps();
__m256 sumSquares = _mm256_setzero_ps();
public:
__forceinline void add( __m256 a, __m256 b )
{
const __m256 neg0 = _mm256_set1_ps( -0.0f );
__m256 diff = _mm256_sub_ps( b, a );
diff = maskInfNan( diff, a, b );
sumSquares = _mm256_fmadd_ps( diff, diff, sumSquares );
const __m256 absDiff = _mm256_andnot_ps( neg0, diff );
maxAbs = _mm256_max_ps( maxAbs, absDiff );
}
__forceinline sTensorDiff reduce( size_t count )
{
sTensorDiff res;
res.maxAbsDiff = horizontalMaximum( maxAbs );
res.avgDiffSquared = (float)( horizontalSum( sumSquares ) / (double)(int64_t)count );
res.length = count;
return res;
}
};
template<class E>
static sTensorDiff __declspec( noinline ) diffVectors( const E* a, const E* b, size_t length )
{
// const E* const aEnd = a + length;
const E* const aEndAligned = a + ( length / 8 ) * 8;
const size_t remainder = length % 8;
DiffAcc acc;
for( ; a < aEndAligned; a += 8, b += 8 )
acc.add( load( a ), load( b ) );
if( remainder != 0 )
{
__m256 va, vb;
loadPartial( a, b, remainder, va, vb );
acc.add( va, vb );
}
return acc.reduce( length );
}
}
sTensorDiff DirectCompute::computeDiff( const float* a, const float* b, size_t length )
{
return diffVectors( a, b, length );
}
sTensorDiff DirectCompute::computeDiff( const uint16_t* a, const uint16_t* b, size_t length )
{
return diffVectors( a, b, length );
}
void DirectCompute::sTensorDiff::print() const
{
printf( "%zu elements, maxAbsDiff = %g, avgDiffSquared = %g\n", length, maxAbsDiff, avgDiffSquared );
}
| 5,976
|
C++
|
.cpp
| 202
| 26.450495
| 109
| 0.598992
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,526
|
TraceReader.cpp
|
Const-me_Whisper/Tools/compareTraces/TraceReader.cpp
|
#include "stdafx.h"
#include "TraceReader.h"
using namespace Tracing;
const sTraceItem& TraceReader::operator[]( size_t idx ) const
{
if( idx >= countItems )
throw E_BOUNDS;
return items[ idx ];
}
CStringA TraceReader::getName( const sTraceItem& item ) const
{
const size_t idx = item.stringIndex;
if( idx >= countStrings )
throw E_BOUNDS;
const char* const source = stringData + stringIndex[ idx ];
CStringA res;
res.Format( source, item.formatArgs[ 0 ], item.formatArgs[ 1 ], item.formatArgs[ 2 ], item.formatArgs[ 3 ] );
return res;
}
HRESULT TraceReader::open( LPCTSTR path )
{
CHECK( file.Create( path, GENERIC_READ, FILE_SHARE_READ, OPEN_EXISTING ) );
CHECK( mapping.MapFile( file ) );
const uint8_t* rsi = mapping;
const sFileHeader& header = *(const sFileHeader*)rsi;
if( header.magic != header.correctMagic )
return E_INVALIDARG;
countItems = header.countItems;
countStrings = header.countStrings;
rsi += sizeof( sFileHeader );
payloadPointer = rsi;
rsi += header.bytesPayload;
stringIndex = (const uint32_t*)( rsi );
stringData = (const char*)( rsi + countStrings * 4 );
rsi += header.bytesStrings;
items = (const sTraceItem*)rsi;
return S_OK;
}
| 1,192
|
C++
|
.cpp
| 38
| 29.421053
| 110
| 0.72973
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,527
|
stdafx.cpp
|
Const-me_Whisper/Tools/compareTraces/stdafx.cpp
|
#include "stdafx.h"
namespace
{
wchar_t* formatMessage( HRESULT hr )
{
wchar_t* err;
if( FormatMessage( FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM,
NULL,
hr,
MAKELANGID( LANG_NEUTRAL, SUBLANG_DEFAULT ),
(LPTSTR)&err,
0,
NULL ) )
return err;
return nullptr;
}
}
void printError( HRESULT hr )
{
const wchar_t* err = formatMessage( hr );
if( nullptr != err )
{
fwprintf( stderr, L"%s\n", err );
LocalFree( (HLOCAL)err );
}
else
fprintf( stderr, "Error code %i (0x%08X)\n", hr, hr );
}
| 541
|
C++
|
.cpp
| 28
| 16.821429
| 81
| 0.666016
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,528
|
CommandLineArgs.cpp
|
Const-me_Whisper/Tools/compareTraces/CommandLineArgs.cpp
|
#include "stdafx.h"
#include "CommandLineArgs.h"
#include <charconv>
static bool printUsage()
{
fprintf( stderr, "Usage: compareTraces.exe trace1.bin trace2.bin [-diff N]\n" );
return false;
}
bool CommandLineArgs::parse( int argc, wchar_t* argv[] )
{
size_t idx = 0;
CString sw;
CStringA tmp;
for( int i = 1; i < argc; i++ )
{
if( argv[ i ][ 0 ] != L'-' )
{
if( idx >= 2 )
return printUsage();
inputs[ idx ] = argv[ i ];
idx++;
continue;
}
sw = argv[ i ];
if( 0 == sw.CompareNoCase( L"-diff" ) )
{
i++;
if( i >= argc )
return printUsage();
tmp.Format( "%S", argv[ i ] );
tmp.Trim();
uint64_t v;
auto res = std::from_chars( tmp, cstr( tmp ) + tmp.GetLength(), v );
if( res.ec != (std::errc)0 )
{
fprintf( stderr, "Unable to parse string into number\n" );
return false;
}
printDiff = v;
continue;
}
return printUsage();
}
if( idx != 2 )
return printUsage();
return true;
}
| 964
|
C++
|
.cpp
| 47
| 17.553191
| 81
| 0.592998
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,529
|
compare.cpp
|
Const-me_Whisper/Tools/compareTraces/compare.cpp
|
#include "stdafx.h"
#include "../../Whisper/API/iContext.cl.h"
#include "TraceReader.h"
#include "../../Whisper/ML/testUtils.h"
#include "compare.h"
using namespace Tracing;
using namespace DirectCompute;
namespace
{
inline const char* cstr( eItemType it )
{
switch( it )
{
case eItemType::Buffer: return "Buffer";
case eItemType::Tensor: return "Tensor";
}
throw E_INVALIDARG;
}
inline const char* cstr( const CStringA& s ) { return s; }
inline int tensorDims( __m128i vec )
{
const __m128i one = _mm_set1_epi32( 1 );
const uint32_t bitmapOnes = (uint32_t)_mm_movemask_ps( _mm_castsi128_ps( _mm_cmpeq_epi32( vec, one ) ) );
const uint32_t bitmapNotOnes = bitmapOnes ^ 0b1111u;
unsigned long idx;
if( !_BitScanReverse( &idx, bitmapNotOnes ) )
return 0;
return idx + 1;
}
int printSize( __m128i vec )
{
const int sz = tensorDims( vec );
switch( sz )
{
case 0:
printf( "[ scalar ]" );
break;
case 1:
printf( "[ %i ]", _mm_cvtsi128_si32( vec ) );
break;
case 2:
printf( "[ %i, %i ]", _mm_cvtsi128_si32( vec ), _mm_extract_epi32( vec, 1 ) );
break;
case 3:
printf( "[ %i, %i, %i ]", _mm_cvtsi128_si32( vec ), _mm_extract_epi32( vec, 1 ), _mm_extract_epi32( vec, 2 ) );
break;
case 4:
printf( "[ %i, %i, %i, %i ]", _mm_cvtsi128_si32( vec ), _mm_extract_epi32( vec, 1 ), _mm_extract_epi32( vec, 2 ), _mm_extract_epi32( vec, 3 ) );
break;
default:
throw E_UNEXPECTED;
}
return sz;
}
class Comparer
{
TraceReader& readerA;
TraceReader& readerB;
bool diffBuffers( size_t i, const sTraceItem& a, const sTraceItem& b, const CStringA& name )
{
const size_t lenA = *(const uint64_t*)a.size.data();
const size_t lenB = *(const uint64_t*)b.size.data();
if( lenA != lenB )
{
printf( "Buffer %zu \"%s\": different size, %zu in trace A, %zu in trace B\n", i, cstr( name ), lenA, lenB );
return false;
}
if( a.dataType != b.dataType )
{
printf( "Buffer %zu \"%s\": different data types\n", i, cstr( name ) );
return false;
}
switch( a.dataType )
{
case eDataType::FP32:
return buffersFp32( i, name, (const float*)readerA.payload( a ), (const float*)readerB.payload( b ), lenA );
}
throw E_NOTIMPL;
}
bool diffTensors( size_t i, const sTraceItem& a, const sTraceItem& b, const CStringA& name )
{
const __m128i ne1 = load( a.size );
const __m128i ne2 = load( b.size );
if( !vectorEqual( ne1, ne2 ) )
{
printf( "Tensor %zu \"%s\" - different size: trace A size is ", i, cstr( name ) );
printSize( ne1 );
printf( ", trace B size is " );
printSize( ne2 );
printf( "\n" );
return false;
}
const __m128i stride1 = load( a.stride );
const __m128i stride2 = load( b.stride );
if( !vectorEqual( stride1, stride2 ) )
{
printf( "Tensor %zu \"%s\" - different memory layout\n", i, cstr( name ) );
return false;
}
if( a.dataType != b.dataType )
{
printf( "Tensor %zu \"%s\": different data types\n", i, cstr( name ) );
return false;
}
size_t elements = (uint32_t)_mm_cvtsi128_si32( ne1 );
elements *= (uint32_t)_mm_extract_epi32( ne1, 1 );
elements *= (uint32_t)_mm_extract_epi32( ne1, 2 );
elements *= (uint32_t)_mm_extract_epi32( ne1, 3 );
switch( a.dataType )
{
case eDataType::FP32:
return tensorsFp32( i, name, (const float*)readerA.payload( a ), (const float*)readerB.payload( b ), elements, ne1, stride1 );
}
throw E_NOTIMPL;
}
protected:
virtual bool buffersFp32( size_t idx, const CStringA& name, const float* a, const float* b, size_t length ) = 0;
virtual bool tensorsFp32( size_t idx, const CStringA& name, const float* a, const float* b, size_t length, __m128i ne, __m128i nb ) = 0;
public:
Comparer( TraceReader& t1, TraceReader& t2 ) :
readerA( t1 ), readerB( t2 ) { }
bool compare( size_t i )
{
const sTraceItem& a = readerA[ i ];
const sTraceItem& b = readerB[ i ];
CStringA name1 = readerA.getName( a );
CStringA name2 = readerB.getName( b );
if( a.itemType != b.itemType )
{
printf( "Item %zu: different type, trace A %s \"%s\", trace B %s \"%s\"\n", i,
cstr( a.itemType ), cstr( name1 ), cstr( b.itemType ), cstr( name2 ) );
return false;
}
if( name1 != name2 )
{
printf( "%s %zu: different names, they are \"%s\" and \"%s\"\n", cstr( a.itemType ), i, cstr( name1 ), cstr( name2 ) );
return false;
}
switch( a.itemType )
{
case eItemType::Buffer:
return diffBuffers( i, a, b, name1 );
case eItemType::Tensor:
return diffTensors( i, a, b, name1 );
default:
throw E_INVALIDARG;
}
}
};
class PrintSummary : public Comparer
{
bool buffersFp32( size_t idx, const CStringA& name, const float* a, const float* b, size_t length ) override;
bool tensorsFp32( size_t idx, const CStringA& name, const float* a, const float* b, size_t length, __m128i ne, __m128i nb ) override;
public:
PrintSummary( TraceReader& a, TraceReader& b ) : Comparer( a, b ) { }
};
bool PrintSummary::buffersFp32( size_t idx, const CStringA& name, const float* a, const float* b, size_t length )
{
sTensorDiff diff = computeDiff( a, b, length );
printf( "%s %zu \"%s\": ", cstr( eItemType::Buffer ), idx, cstr( name ) );
diff.print();
return true;
}
bool PrintSummary::tensorsFp32( size_t idx, const CStringA& name, const float* a, const float* b, size_t length, __m128i ne, __m128i nb )
{
printSize( ne );
printf( " " );
sTensorDiff diff = computeDiff( a, b, length );
printf( "%s %zu \"%s\": ", cstr( eItemType::Tensor ), idx, cstr( name ) );
diff.print();
return true;
}
class PrintDiff : public Comparer
{
bool buffersFp32( size_t idx, const CStringA& name, const float* a, const float* b, size_t length ) override;
bool tensorsFp32( size_t idx, const CStringA& name, const float* a, const float* b, size_t length, __m128i ne, __m128i nb ) override;
public:
PrintDiff( TraceReader& a, TraceReader& b ) : Comparer( a, b ) { }
};
bool PrintDiff::buffersFp32( size_t idx, const CStringA& name, const float* A, const float* B, size_t length )
{
printf( "idx\tA\tB\tA(hex)\tB(hex)\tdiff\n" );
for( size_t i = 0; i < length; i++ )
{
const float a = *A;
const float b = *B;
__m128 vf = _mm_setr_ps( a, b, 0, 0 );
__m128i vi = _mm_castps_si128( vf );
const float diff = std::abs( a - b );
printf( "%zu\t%g\t%g\t0x%08X\t0x%08X\t%g\n",
i, a, b, _mm_cvtsi128_si32( vi ), _mm_extract_epi32( vi, 1 ), diff );
}
return true;
}
std::array<uint32_t, 4> storeSize( __m128i v )
{
std::array<uint32_t, 4> a;
_mm_storeu_si128( ( __m128i* )a.data(), v );
return a;
}
std::array<size_t, 4> storeStrides( __m128i v )
{
const __m128i zero = _mm_setzero_si128();
std::array<size_t, 4> a;
_mm_storeu_si128( ( __m128i* ) & a[ 0 ], _mm_unpacklo_epi32( v, zero ) );
_mm_storeu_si128( ( __m128i* ) & a[ 2 ], _mm_unpackhi_epi32( v, zero ) );
return a;
}
bool PrintDiff::tensorsFp32( size_t idx, const CStringA& name, const float* A, const float* B, size_t length, __m128i ne, __m128i nb )
{
const int dims = tensorDims( ne );
const std::array<uint32_t, 4> size = storeSize( ne );
const std::array<size_t, 4> strides = storeStrides( ne );
CStringA line;
if( dims > 4 )
throw E_UNEXPECTED;
for( int i = 0; i < dims; i++ )
{
const char c = "xyzw"[ i ];
line.AppendChar( c );
line.AppendChar( '\t' );
}
line += "A\tB\tA(hex)\tB(hex)\tdiff\n";
printf( "%s", cstr( line ) );
if( 0 == dims )
{
const float a = *A;
const float b = *B;
__m128 vf = _mm_setr_ps( a, b, 0, 0 );
__m128i vi = _mm_castps_si128( vf );
const float diff = std::abs( a - b );
printf( "%g\t%g\t0x%08X\t0x%08X\t%g\n",
a, b, _mm_cvtsi128_si32( vi ), _mm_extract_epi32( vi, 1 ), diff );
return true;
}
size_t offLayer2 = 0;
for( uint32_t w = 0; w < size[ 3 ]; w++, offLayer2 += strides[ 3 ] )
{
size_t offLayer = offLayer2;
for( uint32_t z = 0; z < size[ 2 ]; z++, offLayer += strides[ 2 ] )
{
size_t offRow = offLayer;
for( uint32_t y = 0; y < size[ 1 ]; y++, offRow += strides[ 1 ] )
{
size_t off = offRow;
for( uint32_t x = 0; x < size[ 0 ]; x++, off += strides[ 0 ] )
{
line.Format( "%i\t", x );
if( dims > 1 )
line.AppendFormat( "%i\t", y );
if( dims > 2 )
line.AppendFormat( "%i\t", z );
if( dims > 3 )
line.AppendFormat( "%i\t", w );
const float a = A[ off ];
const float b = B[ off ];
__m128 vf = _mm_setr_ps( a, b, 0, 0 );
__m128i vi = _mm_castps_si128( vf );
const float diff = std::abs( a - b );
line.AppendFormat( "%g\t%g\t0x%08X\t0x%08X\t%g\n",
a, b, _mm_cvtsi128_si32( vi ), _mm_extract_epi32( vi, 1 ), diff );
printf( "%s", cstr( line ) );
}
}
}
}
return true;
}
}
HRESULT compareTraces( const CommandLineArgs& arguments )
{
const wchar_t* pathA = arguments.inputs[ 0 ];
const wchar_t* pathB = arguments.inputs[ 1 ];
TraceReader a, b;
HRESULT hr = a.open( pathA );
if( FAILED( hr ) )
{
fwprintf( stderr, L"Unable to load trace A from \"%s\"", pathA );
printError( hr );
return hr;
}
hr = b.open( pathB );
if( FAILED( hr ) )
{
fwprintf( stderr, L"Unable to load trace B from \"%s\"", pathA );
printError( hr );
return hr;
}
wprintf( L"Trace A: %s\n", pathA );
wprintf( L"Trace B: %s\n", pathB );
const size_t sizeA = a.size();
const size_t sizeB = b.size();
const size_t count = std::min( sizeA, sizeB );
if( arguments.printDiff >= 0 )
{
if( arguments.printDiff >= (int64_t)count )
{
fprintf( stderr, "Trace A has %zu entries, trace B %zu entries; entry %zu ain't there\n",
sizeA, sizeB, (size_t)arguments.printDiff );
return E_INVALIDARG;
}
try
{
PrintDiff print{ a, b };
print.compare( arguments.printDiff );
return S_OK;
}
catch( HRESULT hr )
{
return hr;
}
}
printf( "Trace A has %zu entries, trace B %zu entries, comparing first %zu\n", sizeA, sizeB, count );
try
{
PrintSummary print{ a, b };
for( size_t i = 0; i < count; i++ )
if( !print.compare( i ) )
return S_FALSE;
return S_OK;
}
catch( HRESULT hr )
{
return hr;
}
}
| 10,262
|
C++
|
.cpp
| 326
| 27.935583
| 147
| 0.609759
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,530
|
compareTraces.cpp
|
Const-me_Whisper/Tools/compareTraces/compareTraces.cpp
|
#include "stdafx.h"
#include <stdio.h>
#include "compare.h"
#include "CommandLineArgs.h"
int wmain( int argc, wchar_t* argv[] )
{
CommandLineArgs cla;
if( !cla.parse( argc, argv ) )
return 1;
HRESULT hr = compareTraces( cla );
if( SUCCEEDED( hr ) )
return 0;
return hr;
}
| 282
|
C++
|
.cpp
| 14
| 18.428571
| 38
| 0.696629
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,531
|
modelFactory.cpp
|
Const-me_Whisper/Whisper/modelFactory.cpp
|
#include "stdafx.h"
#include "modelFactory.h"
#include "API/iContext.cl.h"
HRESULT COMLIGHTCALL Whisper::loadModel( const wchar_t* path, const sModelSetup& setup, const sLoadModelCallbacks* callbacks, iModel** pp )
{
switch( setup.impl )
{
case eModelImplementation::GPU:
case eModelImplementation::Hybrid:
return loadGpuModel( path, setup, callbacks, pp );
case eModelImplementation::Reference:
if( 0 != setup.flags )
logWarning( u8"The reference model doesn’t currently use any flags, argument ignored" );
return loadReferenceCpuModel( path, pp );
}
logError( u8"Unknown model implementation 0x%X", (int)setup.impl );
return E_INVALIDARG;
}
| 666
|
C++
|
.cpp
| 18
| 34.722222
| 139
| 0.76947
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,532
|
DllMain.cpp
|
Const-me_Whisper/Whisper/DllMain.cpp
|
#include "stdafx.h"
BOOL __stdcall DllMain( HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved )
{
// Perform actions based on the reason for calling.
switch( fdwReason )
{
case DLL_PROCESS_ATTACH:
// Initialize once for each new process. Return FALSE to fail DLL load.
DisableThreadLibraryCalls( (HMODULE)hinstDLL );
break;
case DLL_THREAD_ATTACH:
// Do thread-specific initialization.
break;
case DLL_THREAD_DETACH:
// Do thread-specific cleanup.
break;
case DLL_PROCESS_DETACH:
if( lpvReserved != nullptr )
{
break; // do not do cleanup if process termination scenario
}
// Perform any necessary cleanup
break;
}
return TRUE; // Successful DLL_PROCESS_ATTACH.
}
| 707
|
C++
|
.cpp
| 26
| 24.807692
| 81
| 0.748899
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
18,533
|
whisperCom.cpp
|
Const-me_Whisper/Whisper/whisperCom.cpp
|
#include "stdafx.h"
#include "ML/Tensor.h"
#include "API/iMediaFoundation.cl.h"
#include "API/iContext.cl.h"
#include "API/sFullParams.h"
#include "Utils/ReadStream.h"
#include "ML/testUtils.h"
#include "Utils/Trace/tracing.h"
#include "modelFactory.h"
#if BUILD_BOTH_VERSIONS
#ifndef __AVX__
#error Reference version requires AVX build, and AVX2 CPU
#endif // !__AVX__
namespace
{
LPCTSTR traceFilePath = LR"(C:\Temp\2remove\Whisper\ref.bin)";
using ComLight::iReadStream;
}
struct whisper_context;
struct ggml_tensor;
class GpuEncTest
{
DirectCompute::Tensor mel, gpuResult;
DirectCompute::Tensor tempGpu;
const ggml_tensor* tempRef = nullptr;
public:
GpuEncTest( const whisper_context& wctx, const int mel_offset );
void compare( const ggml_tensor* expected ) const;
void compareMel( const ggml_tensor* expected ) const;
};
class GpuDecTest
{
std::vector<float> logits, probs;
const ggml_tensor* tempRef = nullptr;
public:
GpuDecTest( const whisper_context& wctx, const int* tokens, const int n_tokens, const int n_past );
void postpone( const ggml_tensor* t );
void comparePostponed();
void compare( const std::vector<float>& cpuLogits, const std::vector<float>& cpuProbs ) const;
};
static DirectCompute::Tensor gpuEncode( const whisper_context& wctx, const int mel_offset );
#include "source/whisper.cpp"
#include "API/iContext.cl.h"
#include "../ComLightLib/comLightServer.h"
#include "Whisper/WhisperContext.h"
#include "Whisper/ModelLoader.h"
#include "Whisper/WhisperModel.h"
#include "source.compat/convertThings.h"
namespace Whisper
{
inline HRESULT isZero( int i )
{
return ( 0 == i ) ? S_OK : E_FAIL;
}
class Context : public ComLight::ObjectRoot<iContext>,
public iModel
{
virtual HRESULT COMLIGHTCALL isMultilingual() override final
{
return whisper_is_multilingual( &ctx ) ? S_OK : S_FALSE;
}
virtual const char* COMLIGHTCALL stringFromToken( whisper_token token ) override final
{
return whisper_token_to_str( &ctx, token );
}
virtual HRESULT COMLIGHTCALL getSpecialTokens( SpecialTokens& rdi )
{
rdi.TranscriptionEnd = whisper_token_eot( &ctx );
rdi.TranscriptionStart = whisper_token_sot( &ctx );
rdi.PreviousWord = whisper_token_prev( &ctx );
rdi.SentenceStart = whisper_token_solm( &ctx );
rdi.Not = whisper_token_not( &ctx );
rdi.TranscriptionBegin = whisper_token_beg( &ctx );
rdi.TaskTranslate = whisper_token_translate();
rdi.TaskTranscribe = whisper_token_transcribe();
return S_OK;
}
HRESULT COMLIGHTCALL tokenize( const char* text, pfnDecodedTokens pfn, void* pv ) override final
{
const auto res = ::tokenize( ctx.vocab, text );
if( !res.empty() )
pfn( res.data(), res.size(), pv );
return S_OK;
}
HRESULT COMLIGHTCALL clone( iModel** rdi ) override final
{
logError( u8"Reference CPU model doesn’t support clone()" );
return E_NOTIMPL;
}
HRESULT COMLIGHTCALL detectSpeaker( const sTimeInterval& time, eSpeakerChannel& result ) const override final
{
logError( u8"Reference CPU model doesn’t support speaker detection" );
return E_NOTIMPL;
}
// Performance information
virtual HRESULT COMLIGHTCALL timingsPrint() override final
{
whisper_print_timings( &ctx );
return S_OK;
}
virtual HRESULT COMLIGHTCALL timingsReset() override final
{
whisper_reset_timings( &ctx );
return S_OK;
}
virtual HRESULT COMLIGHTCALL fullDefaultParams( eSamplingStrategy strategy, sFullParams* rdi )
{
static_assert( (int)eSamplingStrategy::Greedy == whisper_sampling_strategy::WHISPER_SAMPLING_GREEDY );
static_assert( (int)eSamplingStrategy::BeamSearch == whisper_sampling_strategy::WHISPER_SAMPLING_BEAM_SEARCH );
const whisper_sampling_strategy wss = (whisper_sampling_strategy)(int)strategy;
whisper_full_params wfp = whisper_full_default_params( wss );
*rdi = makeNewParams( wfp );
return S_OK;
}
HRESULT COMLIGHTCALL runFull( const sFullParams& params, const iAudioBuffer* buffer ) override final
{
whisper_full_params wfp = makeOldParams( params, this );
const float* const samples = buffer->getPcmMono();
const uint32_t n_samples = buffer->countSamples();
return isZero( whisper_full( &ctx, wfp, samples, (int)n_samples ) );
}
HRESULT COMLIGHTCALL runStreamed( const sFullParams& params, const sProgressSink& progress, const iAudioReader* reader ) override final
{
logError( u8"The CPU reference implementation doesn’t support streaming" );
return E_NOTIMPL;
}
HRESULT COMLIGHTCALL runCapture( const sFullParams& params, const sCaptureCallbacks& callbacks, const iAudioCapture* reader ) override final
{
logError( u8"The CPU reference implementation doesn’t support audio capture" );
return E_NOTIMPL;
}
HRESULT COMLIGHTCALL getResults( eResultFlags flags, iTranscribeResult** pp ) const override final
{
makeNewResults( &ctx, flags, pp );
return S_OK;
}
HRESULT loadImpl( iReadStream* stm );
virtual HRESULT COMLIGHTCALL createContext( iContext** pp ) override final
{
if( nullptr == pp )
return E_POINTER;
*pp = this;
( *pp )->AddRef();
return S_OK;
}
virtual HRESULT COMLIGHTCALL getModel( iModel** pp ) override final
{
if( nullptr == pp )
return E_POINTER;
*pp = this;
( *pp )->AddRef();
return S_OK;
}
public:
Context()
{
if( nullptr != traceFilePath )
Tracing::traceCreate( traceFilePath );
}
mutable whisper_context ctx;
HRESULT load( iReadStream* stm );
~Context()
{
Tracing::traceClose();
if( ctx.model.ctx )
{
ggml_free( ctx.model.ctx );
ctx.model.ctx = nullptr;
}
if( ctx.model.ctx_mem )
{
ggml_free( ctx.model.ctx_mem );
ctx.model.ctx_mem = nullptr;
}
if( ctx.buf_model )
{
delete ctx.buf_model;
ctx.buf_model = nullptr;
}
}
BEGIN_COM_MAP()
COM_INTERFACE_ENTRY( iModel );
END_COM_MAP()
};
inline HRESULT readBytes( iReadStream* stm, void* rdi, size_t cb )
{
if( cb > INT_MAX )
return DISP_E_OVERFLOW;
if( cb == 0 )
return S_FALSE;
int n;
CHECK( stm->read( rdi, (int)cb, n ) );
if( n != (int)cb )
return E_EOF;
return S_OK;
}
template<typename T>
inline HRESULT readStruct( iReadStream* stm, T& dest )
{
return readBytes( stm, &dest, sizeof( T ) );
}
template<typename E>
inline HRESULT readVector( iReadStream* stm, std::vector<E>& vec )
{
const size_t cb = sizeof( E ) * vec.size();
if( cb > 0 )
return readBytes( stm, vec.data(), cb );
return S_FALSE;
}
inline HRESULT readString( iReadStream* stm, std::string& str )
{
uint32_t len;
CHECK( readStruct( stm, len ) );
if( len > 0 )
{
str.resize( len );
return readBytes( stm, str.data(), len );
}
else
{
str.clear();
return S_FALSE;
}
}
// load the model from a ggml file
// file format:
// - hparams
// - pre-computed mel filters
// - vocab
// - weights
// see the convert-pt-to-ggml.py script for details
HRESULT Context::loadImpl( iReadStream* stm )
{
// WhisperModel wm;
// return wm.load( stm );
// Copy-pasted from whisper_model_load() function
auto& model = ctx.model;
auto& vocab = ctx.vocab;
// verify magic
{
uint32_t magic;
int cbRead;
CHECK( stm->read( &magic, 4, cbRead ) );
if( magic != 0x67676d6c )
{
logError( u8"Invalid model file, bad magic" );
return E_INVALIDARG;
}
}
//load hparams
{
auto& hparams = model.hparams;
CHECK( readStruct( stm, hparams ) );
assert( hparams.n_text_state == hparams.n_audio_state );
if( hparams.n_audio_layer == 4 )
model.type = e_model::MODEL_TINY;
if( hparams.n_audio_layer == 6 )
model.type = e_model::MODEL_BASE;
if( hparams.n_audio_layer == 12 )
model.type = e_model::MODEL_SMALL;
if( hparams.n_audio_layer == 24 )
model.type = e_model::MODEL_MEDIUM;
if( hparams.n_audio_layer == 32 )
model.type = e_model::MODEL_LARGE;
logDebug( u8"%s: n_vocab = %d", __func__, hparams.n_vocab );
logDebug( u8"%s: n_audio_ctx = %d", __func__, hparams.n_audio_ctx );
logDebug( u8"%s: n_audio_state = %d", __func__, hparams.n_audio_state );
logDebug( u8"%s: n_audio_head = %d", __func__, hparams.n_audio_head );
logDebug( u8"%s: n_audio_layer = %d", __func__, hparams.n_audio_layer );
logDebug( u8"%s: n_text_ctx = %d", __func__, hparams.n_text_ctx );
logDebug( u8"%s: n_text_state = %d", __func__, hparams.n_text_state );
logDebug( u8"%s: n_text_head = %d", __func__, hparams.n_text_head );
logDebug( u8"%s: n_text_layer = %d", __func__, hparams.n_text_layer );
logDebug( u8"%s: n_mels = %d", __func__, hparams.n_mels );
logDebug( u8"%s: f16 = %d", __func__, hparams.f16 );
logDebug( u8"%s: type = %d", __func__, model.type );
ctx.buf_model = new std::vector<uint8_t>();
ctx.buf_model->resize( MEM_REQ_MODEL.at( model.type ) );
ctx.buf_memory.resize( MEM_REQ_MEMORY.at( model.type ) );
ctx.buf_compute.resize( std::max( MEM_REQ_ENCODE.at( model.type ), MEM_REQ_DECODE.at( model.type ) ) );
ctx.buf_compute_layer.resize( std::max( MEM_REQ_ENCODE_LAYER.at( model.type ), MEM_REQ_DECODE_LAYER.at( model.type ) ) );
}
// load mel filters
{
auto& filters = ctx.model.filters;
CHECK( readStruct( stm, filters.n_mel ) );
CHECK( readStruct( stm, filters.n_fft ) );
filters.data.resize( filters.n_mel * filters.n_fft );
CHECK( readVector( stm, filters.data ) );
}
// load vocab
{
int32_t n_vocab = 0;
CHECK( readStruct( stm, n_vocab ) );
//if (n_vocab != model.hparams.n_vocab) {
// fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n",
// __func__, fname.c_str(), n_vocab, model.hparams.n_vocab);
// return false;
//}
std::string word;
for( int i = 0; i < n_vocab; i++ )
{
CHECK( readString( stm, word ) );
vocab.token_to_id[ word ] = i;
vocab.id_to_token[ i ] = word;
}
vocab.n_vocab = model.hparams.n_vocab;
if( vocab.is_multilingual() )
{
vocab.token_eot++;
vocab.token_sot++;
vocab.token_prev++;
vocab.token_solm++;
vocab.token_not++;
vocab.token_beg++;
}
if( n_vocab < model.hparams.n_vocab )
{
logDebug( u8"%s: adding %d extra tokens", __func__, model.hparams.n_vocab - n_vocab );
for( int i = n_vocab; i < model.hparams.n_vocab; i++ )
{
if( i > vocab.token_beg )
word = "[_TT_" + std::to_string( i - vocab.token_beg ) + "]";
else if( i == vocab.token_eot )
word = "[_EOT_]";
else if( i == vocab.token_sot )
word = "[_SOT_]";
else if( i == vocab.token_prev )
word = "[_PREV_]";
else if( i == vocab.token_not )
word = "[_NOT_]";
else if( i == vocab.token_beg )
word = "[_BEG_]";
else
word = "[_extra_token_" + std::to_string( i ) + "]";
vocab.token_to_id[ word ] = i;
vocab.id_to_token[ i ] = word;
}
}
}
{
// this is the total memory required to run the inference
const size_t mem_required =
ctx.buf_model->size() +
ctx.buf_memory.size() +
ctx.buf_compute.size() +
ctx.buf_compute_layer.size();
logDebug( u8"%s: mem_required = %7.2f MB", __func__, mem_required / 1024.0 / 1024.0 );
}
// for the big tensors, we have the option to store the data in 16-bit floats
// in order to save memory and also to speed up the computation
const ggml_type wtype = model.hparams.f16 ? GGML_TYPE_F16 : GGML_TYPE_F32;
size_t ctx_size = 0;
size_t ctx_mem_size = 0;
{
const auto& hparams = model.hparams;
const int n_vocab = hparams.n_vocab;
const int n_audio_ctx = hparams.n_audio_ctx;
const int n_audio_state = hparams.n_audio_state;
const int n_audio_layer = hparams.n_audio_layer;
const int n_text_ctx = hparams.n_text_ctx;
const int n_text_state = hparams.n_text_state;
const int n_text_layer = hparams.n_text_layer;
const int n_mels = hparams.n_mels;
// encoder
{
// TODO: F16 .. maybe not?
ctx_size += n_audio_ctx * n_audio_state * ggml_type_size( GGML_TYPE_F32 ); // e_pe;
ctx_size += 3 * n_mels * n_audio_state * ggml_type_size( wtype ); // e_conv_1_w
ctx_size += n_audio_state * ggml_type_size( GGML_TYPE_F32 ); // e_conv_1_b
ctx_size += 3 * n_audio_state * n_audio_state * ggml_type_size( wtype ); // e_conv_2_w
ctx_size += n_audio_state * ggml_type_size( GGML_TYPE_F32 ); // e_conv_2_b
ctx_size += n_audio_state * ggml_type_size( GGML_TYPE_F32 ); // e_ln_w;
ctx_size += n_audio_state * ggml_type_size( GGML_TYPE_F32 ); // e_ln_b;
}
// decoder
{
// TODO: F16 .. maybe not?
ctx_size += n_text_ctx * n_text_state * ggml_type_size( GGML_TYPE_F32 ); // d_pe;
ctx_size += n_vocab * n_text_state * ggml_type_size( wtype ); // d_te;
ctx_size += n_text_state * ggml_type_size( GGML_TYPE_F32 ); // d_ln_w;
ctx_size += n_text_state * ggml_type_size( GGML_TYPE_F32 ); // d_ln_b;
}
// encoder layers
{
ctx_size += n_audio_layer * ( n_audio_state * ggml_type_size( GGML_TYPE_F32 ) ); // mlp_ln_w
ctx_size += n_audio_layer * ( n_audio_state * ggml_type_size( GGML_TYPE_F32 ) ); // mlp_ln_b
ctx_size += n_audio_layer * ( 4 * n_audio_state * n_audio_state * ggml_type_size( wtype ) ); // mlp_0_w
ctx_size += n_audio_layer * ( 4 * n_audio_state * ggml_type_size( GGML_TYPE_F32 ) ); // mlp_0_b
ctx_size += n_audio_layer * ( 4 * n_audio_state * n_audio_state * ggml_type_size( wtype ) ); // mlp_1_w
ctx_size += n_audio_layer * ( n_audio_state * ggml_type_size( GGML_TYPE_F32 ) ); // mlp_1_b
ctx_size += n_audio_layer * ( n_audio_state * ggml_type_size( GGML_TYPE_F32 ) ); // attn_ln_0_w
ctx_size += n_audio_layer * ( n_audio_state * ggml_type_size( GGML_TYPE_F32 ) ); // attn_ln_0_b
ctx_size += n_audio_layer * ( n_audio_state * n_audio_state * ggml_type_size( wtype ) ); // attn_q_w
ctx_size += n_audio_layer * ( n_audio_state * ggml_type_size( GGML_TYPE_F32 ) ); // attn_q_b
ctx_size += n_audio_layer * ( n_audio_state * n_audio_state * ggml_type_size( wtype ) ); // attn_k_w
ctx_size += n_audio_layer * ( n_audio_state * n_audio_state * ggml_type_size( wtype ) ); // attn_v_w
ctx_size += n_audio_layer * ( n_audio_state * ggml_type_size( GGML_TYPE_F32 ) ); // attn_v_b
ctx_size += n_audio_layer * ( n_audio_state * n_audio_state * ggml_type_size( wtype ) ); // attn_ln_1_w
ctx_size += n_audio_layer * ( n_audio_state * ggml_type_size( GGML_TYPE_F32 ) ); // attn_ln_1_b
}
// decoder layers
{
ctx_size += n_text_layer * ( n_text_state * ggml_type_size( GGML_TYPE_F32 ) ); // mlp_ln_w
ctx_size += n_text_layer * ( n_text_state * ggml_type_size( GGML_TYPE_F32 ) ); // mlp_ln_b
ctx_size += n_text_layer * ( 4 * n_text_state * n_text_state * ggml_type_size( wtype ) ); // mlp_0_w
ctx_size += n_text_layer * ( 4 * n_text_state * ggml_type_size( GGML_TYPE_F32 ) ); // mlp_0_b
ctx_size += n_text_layer * ( 4 * n_text_state * n_text_state * ggml_type_size( wtype ) ); // mlp_1_w
ctx_size += n_text_layer * ( n_text_state * ggml_type_size( GGML_TYPE_F32 ) ); // mlp_1_b
ctx_size += n_text_layer * ( n_text_state * ggml_type_size( GGML_TYPE_F32 ) ); // attn_ln_0_w
ctx_size += n_text_layer * ( n_text_state * ggml_type_size( GGML_TYPE_F32 ) ); // attn_ln_0_b
ctx_size += n_text_layer * ( n_text_state * n_text_state * ggml_type_size( wtype ) ); // attn_q_w
ctx_size += n_text_layer * ( n_text_state * ggml_type_size( GGML_TYPE_F32 ) ); // attn_q_b
ctx_size += n_text_layer * ( n_text_state * n_text_state * ggml_type_size( wtype ) ); // attn_k_w
ctx_size += n_text_layer * ( n_text_state * n_text_state * ggml_type_size( wtype ) ); // attn_v_w
ctx_size += n_text_layer * ( n_text_state * ggml_type_size( GGML_TYPE_F32 ) ); // attn_v_b
ctx_size += n_text_layer * ( n_text_state * n_text_state * ggml_type_size( wtype ) ); // attn_ln_1_w
ctx_size += n_text_layer * ( n_text_state * ggml_type_size( GGML_TYPE_F32 ) ); // attn_ln_1_b
//
ctx_size += n_text_layer * ( n_text_state * ggml_type_size( GGML_TYPE_F32 ) ); // cross_attn_ln_0_w
ctx_size += n_text_layer * ( n_text_state * ggml_type_size( GGML_TYPE_F32 ) ); // cross_attn_ln_0_b
ctx_size += n_text_layer * ( n_text_state * n_text_state * ggml_type_size( wtype ) ); // cross_attn_q_w
ctx_size += n_text_layer * ( n_text_state * ggml_type_size( GGML_TYPE_F32 ) ); // cross_attn_q_b
ctx_size += n_text_layer * ( n_text_state * n_text_state * ggml_type_size( wtype ) ); // cross_attn_k_w
ctx_size += n_text_layer * ( n_text_state * n_text_state * ggml_type_size( wtype ) ); // cross_attn_v_w
ctx_size += n_text_layer * ( n_text_state * ggml_type_size( GGML_TYPE_F32 ) ); // cross_attn_v_b
ctx_size += n_text_layer * ( n_text_state * n_text_state * ggml_type_size( wtype ) ); // cross_attn_ln_1_w
ctx_size += n_text_layer * ( n_text_state * ggml_type_size( GGML_TYPE_F32 ) ); // cross_attn_ln_1_b
}
ctx_mem_size += n_text_layer * n_text_ctx * n_text_state * ggml_type_size( GGML_TYPE_F16 ); // memory_k
ctx_mem_size += n_text_layer * n_text_ctx * n_text_state * ggml_type_size( GGML_TYPE_F16 ); // memory_v
ctx_mem_size += n_text_layer * n_audio_ctx * n_text_state * ggml_type_size( GGML_TYPE_F16 ); // memory_cross_k
ctx_mem_size += n_text_layer * n_audio_ctx * n_text_state * ggml_type_size( GGML_TYPE_F16 ); // memory_cross_v
ctx_size += ( 15 + 15 * n_audio_layer + 24 * n_text_layer ) * 256; // object overhead
logDebug( u8"%s: ggml ctx size = %7.2f MB", __func__, ctx_size / ( 1024.0 * 1024.0 ) );
}
// create the ggml context
{
struct ggml_init_params params;
params.mem_size = ctx.buf_model->size();
params.mem_buffer = ctx.buf_model->data();
model.ctx = ggml_init( params );
if( !model.ctx )
{
logError( u8"%s: ggml_init() failed", __func__ );
return E_INVALIDARG;
}
}
std::map<std::string, struct ggml_tensor*> tensors;
DirectCompute::ModelLoader loader{ model.hparams.n_audio_layer, model.hparams.n_text_layer };
// prepare memory for the weights
{
auto& ctx = model.ctx;
const auto& hparams = model.hparams;
const int n_vocab = hparams.n_vocab;
const int n_audio_ctx = hparams.n_audio_ctx;
const int n_audio_state = hparams.n_audio_state;
const int n_audio_layer = hparams.n_audio_layer;
const int n_text_ctx = hparams.n_text_ctx;
const int n_text_state = hparams.n_text_state;
const int n_text_layer = hparams.n_text_layer;
const int n_mels = hparams.n_mels;
model.layers_encoder.resize( n_audio_layer );
model.layers_decoder.resize( n_text_layer );
// encoder
{
model.e_pe = ggml_new_tensor_2d( ctx, GGML_TYPE_F32, n_audio_state, n_audio_ctx );
loader.add( model.e_pe, loader.model.enc.positionalEmbedding );
model.e_conv_1_w = ggml_new_tensor_3d( ctx, wtype, 3, n_mels, n_audio_state );
model.e_conv_1_b = ggml_new_tensor_2d( ctx, GGML_TYPE_F32, 1, n_audio_state );
loader.add( model.e_conv_1_w, model.e_conv_1_b, loader.model.enc.conv1 );
model.e_conv_2_w = ggml_new_tensor_3d( ctx, wtype, 3, n_audio_state, n_audio_state );
model.e_conv_2_b = ggml_new_tensor_2d( ctx, GGML_TYPE_F32, 1, n_audio_state );
loader.add( model.e_conv_2_w, model.e_conv_2_b, loader.model.enc.conv2 );
model.e_ln_w = ggml_new_tensor_1d( ctx, GGML_TYPE_F32, n_audio_state );
model.e_ln_b = ggml_new_tensor_1d( ctx, GGML_TYPE_F32, n_audio_state );
loader.add( model.e_ln_w, model.e_ln_b, loader.model.enc.lnPost );
// map by name
tensors[ "encoder.positional_embedding" ] = model.e_pe;
tensors[ "encoder.conv1.weight" ] = model.e_conv_1_w;
tensors[ "encoder.conv1.bias" ] = model.e_conv_1_b;
tensors[ "encoder.conv2.weight" ] = model.e_conv_2_w;
tensors[ "encoder.conv2.bias" ] = model.e_conv_2_b;
tensors[ "encoder.ln_post.weight" ] = model.e_ln_w;
tensors[ "encoder.ln_post.bias" ] = model.e_ln_b;
for( int i = 0; i < n_audio_layer; ++i )
{
auto& layer = model.layers_encoder[ i ];
auto& gpu = loader.model.enc.layers[ i ];
layer.mlp_ln_w = ggml_new_tensor_1d( ctx, GGML_TYPE_F32, n_audio_state );
layer.mlp_ln_b = ggml_new_tensor_1d( ctx, GGML_TYPE_F32, n_audio_state );
loader.add( layer.mlp_ln_w, layer.mlp_ln_b, gpu.mlpLn );
layer.mlp_0_w = ggml_new_tensor_2d( ctx, wtype, n_audio_state, 4 * n_audio_state );
layer.mlp_0_b = ggml_new_tensor_1d( ctx, GGML_TYPE_F32, 4 * n_audio_state );
loader.add( layer.mlp_0_w, layer.mlp_0_b, gpu.mlp0 );
layer.mlp_1_w = ggml_new_tensor_2d( ctx, wtype, 4 * n_audio_state, n_audio_state );
layer.mlp_1_b = ggml_new_tensor_1d( ctx, GGML_TYPE_F32, n_audio_state );
loader.add( layer.mlp_1_w, layer.mlp_1_b, gpu.mlp1 );
layer.attn_ln_0_w = ggml_new_tensor_1d( ctx, GGML_TYPE_F32, n_audio_state );
layer.attn_ln_0_b = ggml_new_tensor_1d( ctx, GGML_TYPE_F32, n_audio_state );
loader.add( layer.attn_ln_0_w, layer.attn_ln_0_b, gpu.attnLn0 );
layer.attn_q_w = ggml_new_tensor_2d( ctx, wtype, n_audio_state, n_audio_state );
layer.attn_q_b = ggml_new_tensor_1d( ctx, GGML_TYPE_F32, n_audio_state );
loader.add( layer.attn_q_w, layer.attn_q_b, gpu.attnQuery );
layer.attn_k_w = ggml_new_tensor_2d( ctx, wtype, n_audio_state, n_audio_state );
loader.add( layer.attn_k_w, gpu.attnKey );
layer.attn_v_w = ggml_new_tensor_2d( ctx, wtype, n_audio_state, n_audio_state );
layer.attn_v_b = ggml_new_tensor_1d( ctx, GGML_TYPE_F32, n_audio_state );
loader.add( layer.attn_v_w, layer.attn_v_b, gpu.attnValue );
layer.attn_ln_1_w = ggml_new_tensor_2d( ctx, wtype, n_audio_state, n_audio_state );
layer.attn_ln_1_b = ggml_new_tensor_1d( ctx, GGML_TYPE_F32, n_audio_state );
loader.add( layer.attn_ln_1_w, layer.attn_ln_1_b, gpu.attnLn1 );
// map by name
tensors[ "encoder.blocks." + std::to_string( i ) + ".mlp_ln.weight" ] = layer.mlp_ln_w;
tensors[ "encoder.blocks." + std::to_string( i ) + ".mlp_ln.bias" ] = layer.mlp_ln_b;
tensors[ "encoder.blocks." + std::to_string( i ) + ".mlp.0.weight" ] = layer.mlp_0_w;
tensors[ "encoder.blocks." + std::to_string( i ) + ".mlp.0.bias" ] = layer.mlp_0_b;
tensors[ "encoder.blocks." + std::to_string( i ) + ".mlp.2.weight" ] = layer.mlp_1_w;
tensors[ "encoder.blocks." + std::to_string( i ) + ".mlp.2.bias" ] = layer.mlp_1_b;
tensors[ "encoder.blocks." + std::to_string( i ) + ".attn_ln.weight" ] = layer.attn_ln_0_w;
tensors[ "encoder.blocks." + std::to_string( i ) + ".attn_ln.bias" ] = layer.attn_ln_0_b;
tensors[ "encoder.blocks." + std::to_string( i ) + ".attn.query.weight" ] = layer.attn_q_w;
tensors[ "encoder.blocks." + std::to_string( i ) + ".attn.query.bias" ] = layer.attn_q_b;
tensors[ "encoder.blocks." + std::to_string( i ) + ".attn.key.weight" ] = layer.attn_k_w;
tensors[ "encoder.blocks." + std::to_string( i ) + ".attn.value.weight" ] = layer.attn_v_w;
tensors[ "encoder.blocks." + std::to_string( i ) + ".attn.value.bias" ] = layer.attn_v_b;
tensors[ "encoder.blocks." + std::to_string( i ) + ".attn.out.weight" ] = layer.attn_ln_1_w;
tensors[ "encoder.blocks." + std::to_string( i ) + ".attn.out.bias" ] = layer.attn_ln_1_b;
}
}
// decoder
{
model.d_pe = ggml_new_tensor_2d( ctx, GGML_TYPE_F32, n_text_state, n_text_ctx );
loader.add( model.d_pe, loader.model.dec.positionalEmbedding );
model.d_te = ggml_new_tensor_2d( ctx, wtype, n_text_state, n_vocab );
loader.add( model.d_te, loader.model.dec.tokenEmbedding );
model.d_ln_w = ggml_new_tensor_1d( ctx, GGML_TYPE_F32, n_text_state );
model.d_ln_b = ggml_new_tensor_1d( ctx, GGML_TYPE_F32, n_text_state );
loader.add( model.d_ln_w, model.d_ln_b, loader.model.dec.ln );
// map by name
tensors[ "decoder.positional_embedding" ] = model.d_pe;
tensors[ "decoder.token_embedding.weight" ] = model.d_te;
tensors[ "decoder.ln.weight" ] = model.d_ln_w;
tensors[ "decoder.ln.bias" ] = model.d_ln_b;
for( int i = 0; i < n_text_layer; ++i ) {
auto& layer = model.layers_decoder[ i ];
auto& gpu = loader.model.dec.layers[ i ];
layer.mlp_ln_w = ggml_new_tensor_1d( ctx, GGML_TYPE_F32, n_text_state );
layer.mlp_ln_b = ggml_new_tensor_1d( ctx, GGML_TYPE_F32, n_text_state );
loader.add( layer.mlp_ln_w, layer.mlp_ln_b, gpu.mlpLn );
layer.mlp_0_w = ggml_new_tensor_2d( ctx, wtype, n_text_state, 4 * n_text_state );
layer.mlp_0_b = ggml_new_tensor_1d( ctx, GGML_TYPE_F32, 4 * n_text_state );
loader.add( layer.mlp_0_w, layer.mlp_0_b, gpu.mlp0 );
layer.mlp_1_w = ggml_new_tensor_2d( ctx, wtype, 4 * n_text_state, n_text_state );
layer.mlp_1_b = ggml_new_tensor_1d( ctx, GGML_TYPE_F32, n_text_state );
loader.add( layer.mlp_1_w, layer.mlp_1_b, gpu.mlp1 );
layer.attn_ln_0_w = ggml_new_tensor_1d( ctx, GGML_TYPE_F32, n_text_state );
layer.attn_ln_0_b = ggml_new_tensor_1d( ctx, GGML_TYPE_F32, n_text_state );
loader.add( layer.attn_ln_0_w, layer.attn_ln_0_b, gpu.attnLn0 );
layer.attn_q_w = ggml_new_tensor_2d( ctx, wtype, n_text_state, n_text_state );
layer.attn_q_b = ggml_new_tensor_1d( ctx, GGML_TYPE_F32, n_text_state );
loader.add( layer.attn_q_w, layer.attn_q_b, gpu.attnQuery );
layer.attn_k_w = ggml_new_tensor_2d( ctx, wtype, n_text_state, n_text_state );
loader.add( layer.attn_k_w, gpu.attnKey );
layer.attn_v_w = ggml_new_tensor_2d( ctx, wtype, n_text_state, n_text_state );
layer.attn_v_b = ggml_new_tensor_1d( ctx, GGML_TYPE_F32, n_text_state );
loader.add( layer.attn_v_w, layer.attn_v_b, gpu.attnValue );
layer.attn_ln_1_w = ggml_new_tensor_2d( ctx, wtype, n_text_state, n_text_state );
layer.attn_ln_1_b = ggml_new_tensor_1d( ctx, GGML_TYPE_F32, n_text_state );
loader.add( layer.attn_ln_1_w, layer.attn_ln_1_b, gpu.attnLn1 );
layer.cross_attn_ln_0_w = ggml_new_tensor_1d( ctx, GGML_TYPE_F32, n_text_state );
layer.cross_attn_ln_0_b = ggml_new_tensor_1d( ctx, GGML_TYPE_F32, n_text_state );
loader.add( layer.cross_attn_ln_0_w, layer.cross_attn_ln_0_b, gpu.crossAttnLn0 );
layer.cross_attn_q_w = ggml_new_tensor_2d( ctx, wtype, n_text_state, n_text_state );
layer.cross_attn_q_b = ggml_new_tensor_1d( ctx, GGML_TYPE_F32, n_text_state );
loader.add( layer.cross_attn_q_w, layer.cross_attn_q_b, gpu.crossAttnQuery );
layer.cross_attn_k_w = ggml_new_tensor_2d( ctx, wtype, n_text_state, n_text_state );
loader.add( layer.cross_attn_k_w, gpu.crossAttnKey );
layer.cross_attn_v_w = ggml_new_tensor_2d( ctx, wtype, n_text_state, n_text_state );
layer.cross_attn_v_b = ggml_new_tensor_1d( ctx, GGML_TYPE_F32, n_text_state );
loader.add( layer.cross_attn_v_w, layer.cross_attn_v_b, gpu.crossAttnValue );
layer.cross_attn_ln_1_w = ggml_new_tensor_2d( ctx, wtype, n_text_state, n_text_state );
layer.cross_attn_ln_1_b = ggml_new_tensor_1d( ctx, GGML_TYPE_F32, n_text_state );
loader.add( layer.cross_attn_ln_1_w, layer.cross_attn_ln_1_b, gpu.crossAttnLn1 );
// map by name
tensors[ "decoder.blocks." + std::to_string( i ) + ".mlp_ln.weight" ] = layer.mlp_ln_w;
tensors[ "decoder.blocks." + std::to_string( i ) + ".mlp_ln.bias" ] = layer.mlp_ln_b;
tensors[ "decoder.blocks." + std::to_string( i ) + ".mlp.0.weight" ] = layer.mlp_0_w;
tensors[ "decoder.blocks." + std::to_string( i ) + ".mlp.0.bias" ] = layer.mlp_0_b;
tensors[ "decoder.blocks." + std::to_string( i ) + ".mlp.2.weight" ] = layer.mlp_1_w;
tensors[ "decoder.blocks." + std::to_string( i ) + ".mlp.2.bias" ] = layer.mlp_1_b;
tensors[ "decoder.blocks." + std::to_string( i ) + ".attn_ln.weight" ] = layer.attn_ln_0_w;
tensors[ "decoder.blocks." + std::to_string( i ) + ".attn_ln.bias" ] = layer.attn_ln_0_b;
tensors[ "decoder.blocks." + std::to_string( i ) + ".attn.query.weight" ] = layer.attn_q_w;
tensors[ "decoder.blocks." + std::to_string( i ) + ".attn.query.bias" ] = layer.attn_q_b;
tensors[ "decoder.blocks." + std::to_string( i ) + ".attn.key.weight" ] = layer.attn_k_w;
tensors[ "decoder.blocks." + std::to_string( i ) + ".attn.value.weight" ] = layer.attn_v_w;
tensors[ "decoder.blocks." + std::to_string( i ) + ".attn.value.bias" ] = layer.attn_v_b;
tensors[ "decoder.blocks." + std::to_string( i ) + ".attn.out.weight" ] = layer.attn_ln_1_w;
tensors[ "decoder.blocks." + std::to_string( i ) + ".attn.out.bias" ] = layer.attn_ln_1_b;
tensors[ "decoder.blocks." + std::to_string( i ) + ".cross_attn_ln.weight" ] = layer.cross_attn_ln_0_w;
tensors[ "decoder.blocks." + std::to_string( i ) + ".cross_attn_ln.bias" ] = layer.cross_attn_ln_0_b;
tensors[ "decoder.blocks." + std::to_string( i ) + ".cross_attn.query.weight" ] = layer.cross_attn_q_w;
tensors[ "decoder.blocks." + std::to_string( i ) + ".cross_attn.query.bias" ] = layer.cross_attn_q_b;
tensors[ "decoder.blocks." + std::to_string( i ) + ".cross_attn.key.weight" ] = layer.cross_attn_k_w;
tensors[ "decoder.blocks." + std::to_string( i ) + ".cross_attn.value.weight" ] = layer.cross_attn_v_w;
tensors[ "decoder.blocks." + std::to_string( i ) + ".cross_attn.value.bias" ] = layer.cross_attn_v_b;
tensors[ "decoder.blocks." + std::to_string( i ) + ".cross_attn.out.weight" ] = layer.cross_attn_ln_1_w;
tensors[ "decoder.blocks." + std::to_string( i ) + ".cross_attn.out.bias" ] = layer.cross_attn_ln_1_b;
}
}
}
// create the ggml memory context
{
struct ggml_init_params params;
params.mem_size = ctx.buf_memory.size();
params.mem_buffer = ctx.buf_memory.data();
model.ctx_mem = ggml_init( params );
if( !model.ctx_mem )
{
logError( u8"%s: ggml_init() failed", __func__ );
return E_INVALIDARG;
}
}
// key + value memory
{
auto& ctx = model.ctx_mem;
const auto& hparams = model.hparams;
const int n_text_state = hparams.n_text_state;
const int n_text_layer = hparams.n_text_layer;
const int n_text_ctx = hparams.n_text_ctx;
// key/value memory for the self-attention layer
{
const int n_mem = n_text_layer * n_text_ctx;
const int n_elements = n_text_state * n_mem;
model.memory_k = ggml_new_tensor_1d( ctx, GGML_TYPE_F16, n_elements );
model.memory_v = ggml_new_tensor_1d( ctx, GGML_TYPE_F16, n_elements );
}
// key/value memory for the cross-attention layer
{
const int n_audio_ctx = hparams.n_audio_ctx;
const int n_mem = n_text_layer * n_audio_ctx;
const int n_elements = n_text_state * n_mem;
model.memory_cross_k = ggml_new_tensor_1d( ctx, GGML_TYPE_F16, n_elements );
model.memory_cross_v = ggml_new_tensor_1d( ctx, GGML_TYPE_F16, n_elements );
}
const size_t memory_size =
ggml_nbytes( model.memory_k ) + ggml_nbytes( model.memory_v ) +
ggml_nbytes( model.memory_cross_k ) + ggml_nbytes( model.memory_cross_v );
logDebug( u8"%s: memory size = %7.2f MB", __func__, memory_size / 1024.0 / 1024.0 );
}
// load weights
{
size_t total_size = 0;
int n_loaded = 0;
std::string name;
while( true )
{
int32_t n_dims;
int32_t length;
int32_t ftype;
HRESULT hr = readStruct( stm, n_dims );
if( hr == E_EOF )
break;
CHECK( hr );
CHECK( readStruct( stm, length ) );
CHECK( readStruct( stm, ftype ) );
int32_t nelements = 1;
int32_t ne[ 3 ] = { 1, 1, 1 };
for( int i = 0; i < n_dims; ++i )
{
CHECK( readStruct( stm, ne[ i ] ) );
nelements *= ne[ i ];
}
name.resize( length );
CHECK( readBytes( stm, name.data(), length ) );
if( tensors.find( name.data() ) == tensors.end() )
{
logError( u8"%s: unknown tensor '%s' in model file", __func__, name.data() );
return E_INVALIDARG;
}
auto tensor = tensors[ name.data() ];
if( ggml_nelements( tensor ) != nelements )
{
logError( u8"%s: tensor '%s' has wrong size in model file", __func__, name.data() );
return E_INVALIDARG;
}
if( tensor->ne[ 0 ] != ne[ 0 ] || tensor->ne[ 1 ] != ne[ 1 ] || tensor->ne[ 2 ] != ne[ 2 ] )
{
logError( u8"%s: tensor '%s' has wrong shape in model file: got [%d, %d, %d], expected [%d, %d, %d]",
__func__, name.data(), tensor->ne[ 0 ], tensor->ne[ 1 ], tensor->ne[ 2 ], ne[ 0 ], ne[ 1 ], ne[ 2 ] );
return E_INVALIDARG;
}
const size_t bpe = ( ftype == 0 ) ? sizeof( float ) : sizeof( ggml_fp16_t );
if( nelements * bpe != ggml_nbytes( tensor ) )
{
logError( u8"%s: tensor '%s' has wrong size in model file: got %zu, expected %zu",
__func__, name.data(), ggml_nbytes( tensor ), nelements * bpe );
return E_INVALIDARG;
}
CHECK( readBytes( stm, tensor->data, ggml_nbytes( tensor ) ) );
//printf("%48s - [%5d, %5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ne[2], ftype == 0 ? "float" : "f16", ggml_nbytes(tensor)/1024.0/1024.0);
total_size += ggml_nbytes( tensor );
n_loaded++;
// loader.tryLoad( tensor );
}
logDebug( u8"%s: model size = %7.2f MB", __func__, total_size / 1024.0 / 1024.0 );
if( n_loaded == 0 )
{
logError( u8"%s: no tensors loaded from model file", __func__ );
return E_INVALIDARG;
}
else if( n_loaded != (int)tensors.size() )
{
logError( u8"%s: not all tensors loaded from model file - expected %zu, got %d", __func__, tensors.size(), n_loaded );
return E_INVALIDARG;
}
model.n_loaded = n_loaded;
}
return S_OK;
}
HRESULT Context::load( iReadStream* stm )
{
const int64_t t_start_us = ggml_time_us();
ctx.t_start_us = t_start_us;
HRESULT hr = loadImpl( stm );
ctx.t_load_us = ggml_time_us() - t_start_us;
return hr;
}
HRESULT __stdcall loadReferenceCpuModel( const wchar_t* path, iModel** pp )
{
if( nullptr == path || nullptr == pp )
return E_POINTER;
ComLight::Object<ReadStream> stream;
CHECK( stream.open( path ) );
ggml_time_init();
ComLight::CComPtr<ComLight::Object<Context>> obj;
CHECK( ComLight::Object<Context>::create( obj ) );
CHECK( obj->load( &stream ) );
obj.detach( pp );
return S_OK;
}
}
#include "Whisper/WhisperContext.h"
#include "Whisper/ModelBuffers.h"
#include "ML/testUtils.h"
using namespace DirectCompute;
static DirectCompute::Tensor gpuEncode( const whisper_context& wctx, const int mel_offset )
{
return DirectCompute::Tensor{};
#if 0
using namespace DirectCompute;
WhisperContext& ctx = WhisperContext::current();
Tensor cur;
sEncodeParams whisperParams;
const auto& mel_inp = wctx.mel;
{
const auto& model = wctx.model;
const auto& hparams = model.hparams;
whisperParams.n_len = (uint32_t)mel_inp.n_len;
whisperParams.n_mel = (uint32_t)mel_inp.n_mel;
const int n_ctx = wctx.exp_n_audio_ctx > 0 ? wctx.exp_n_audio_ctx : wctx.model.hparams.n_audio_ctx;
assert( n_ctx > 0 );
whisperParams.n_ctx = (uint32_t)n_ctx;
const int n_mels = hparams.n_mels;
assert( n_mels > 0 );
whisperParams.n_mels = (uint32_t)n_mels;
assert( mel_offset >= 0 );
whisperParams.mel_offset = (uint32_t)mel_offset;
const int layersCount = hparams.n_audio_layer;
assert( layersCount > 0 );
whisperParams.layersCount = (uint32_t)layersCount;
const int n_state = hparams.n_audio_state;
const int n_head = hparams.n_audio_head;
assert( n_state >= 0 );
assert( n_head >= 0 );
whisperParams.n_state = (uint32_t)n_state;
whisperParams.n_head = (uint32_t)n_head;
int n_audio_ctx = hparams.n_audio_ctx;
assert( n_audio_ctx > 0 );
whisperParams.n_audio_ctx = (uint32_t)n_audio_ctx;
int n_text_state = hparams.n_text_state;
assert( n_text_state > 0 );
whisperParams.n_text_state = (uint32_t)n_text_state;
int n_text_layer = hparams.n_text_layer;
assert( n_text_layer > 0 );
whisperParams.n_text_layer = (uint32_t)n_text_layer;
int n_text_ctx = hparams.n_text_ctx;
assert( n_text_ctx > 0 );
whisperParams.n_text_ctx = (uint32_t)n_text_ctx;
}
return ctx.encode( mel_inp.data, whisperParams );
#endif
}
GpuEncTest::GpuEncTest( const whisper_context& wctx, const int mel_offset )
{
return;
gpuResult = gpuEncode( wctx, mel_offset );
}
void GpuEncTest::compare( const ggml_tensor* expected ) const
{
return;
WhisperContext& ctx = WhisperContext::current();
ctx.dbgPrintDifference( expected, gpuResult, "GpuEncTest.compare", false );
}
void GpuEncTest::compareMel( const ggml_tensor* expected ) const
{
return;
WhisperContext& ctx = WhisperContext::current();
ctx.dbgPrintDifference( expected, mel, "GpuEncTest.compareMel", false );
}
/*
void GpuEncTest::comparePostponed()
{
if( nullptr == tempRef )
return;
WhisperContext& ctx = WhisperContext::current();
ctx.dbgPrintDifference( tempRef, tempGpu, "comparePostponed" );
tempRef = nullptr;
} */
__declspec( noinline ) GpuDecTest::GpuDecTest( const whisper_context& wctx, const int* tokens, const int n_tokens, const int n_past )
{
#if 1
return;
#else
sDecodeParams dp;
{
WhisperContext& ctx = WhisperContext::current();
const auto& model = wctx.model;
const auto& hparams = model.hparams;
dp.n_state = hparams.n_text_state;
dp.n_head = hparams.n_text_head;
dp.n_ctx = hparams.n_text_ctx;
dp.n_past = n_past;
dp.M = wctx.exp_n_audio_ctx > 0 ? wctx.exp_n_audio_ctx : hparams.n_audio_ctx;
dp.n_text_layer = hparams.n_text_layer;
dp.n_vocab = hparams.n_vocab;
}
WhisperContext& ctx = WhisperContext::current();
ctx.decode( tokens, n_tokens, dp, logits, probs );
#endif
}
void __declspec( noinline ) GpuDecTest::compare( const std::vector<float>& cpuLogits, const std::vector<float>& cpuProbs ) const
{
return;
if( cpuLogits.size() != logits.size() )
{
printf( "GpuDecTest.compare fail, size different\n" );
return;
}
computeDiff( logits.data(), cpuLogits.data(), logits.size() ).print( "GpuDecTest.compare logits" );
computeDiff( probs.data(), cpuProbs.data(), probs.size() ).print( "GpuDecTest.compare probs" );
}
void __declspec( noinline ) GpuDecTest::postpone( const ggml_tensor* t )
{
return;
if( nullptr != tempRef )
return;
tempRef = t;
}
void __declspec( noinline ) GpuDecTest::comparePostponed()
{
#if 1
return;
#else
if( nullptr == tempRef )
return;
WhisperContext& ctx = WhisperContext::current();
ID3D11ShaderResourceView* srv = ctx.dbgDecodeTest;
if( nullptr == srv )
return;
ctx.dbgPrintDifference( tempRef, ctx.dbgDecodeTest, "GpuDecTest.comparePostponed" );
tempRef = nullptr;
#endif
}
#else
HRESULT __stdcall Whisper::loadReferenceCpuModel( const wchar_t* path, Whisper::iModel** pp )
{
logError( u8"This build of the DLL doesn’t implement the reference CPU-running Whisper model." );
return E_NOTIMPL;
}
#endif
| 38,934
|
C++
|
.cpp
| 882
| 40.086168
| 162
| 0.643697
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,534
|
shaders.cpp
|
Const-me_Whisper/Whisper/D3D/shaders.cpp
|
#include "stdafx.h"
#include "shaders.h"
#include "device.h"
#include "../Utils/LZ4/lz4.h"
namespace
{
#ifdef _DEBUG
#include "shaderData-Debug.inl"
#else
#include "shaderData-Release.inl"
#endif
// static std::vector<CComPtr<ID3D11ComputeShader>> s_shaders;
}
HRESULT DirectCompute::createComputeShaders( std::vector<CComPtr<ID3D11ComputeShader>>& shaders )
{
constexpr size_t countBinaries = s_shaderOffsets.size() - 1;
const size_t cbDecompressedLength = s_shaderOffsets[ countBinaries ];
constexpr size_t countShaders = s_shaderBlobs32.size();
std::vector<uint8_t> dxbc;
try
{
shaders.resize( countShaders );
dxbc.resize( cbDecompressedLength );
}
catch( const std::bad_alloc& )
{
return E_OUTOFMEMORY;
}
const int lz4Status = LZ4_decompress_safe( (const char*)s_compressedShaders.data(), (char*)dxbc.data(), (int)s_compressedShaders.size(), (int)cbDecompressedLength );
if( lz4Status != (int)cbDecompressedLength )
{
logError( u8"LZ4_decompress_safe failed with status %i", lz4Status );
return PLA_E_CABAPI_FAILURE;
}
ID3D11Device* const dev = device();
const auto& blobs = gpuInfo().wave64() ? s_shaderBlobs64 : s_shaderBlobs32;
for( size_t i = 0; i < countShaders; i++ )
{
const size_t idxBinary = blobs[ i ];
const uint32_t offThis = s_shaderOffsets[ idxBinary ];
const uint8_t* rsi = &dxbc[ offThis ];
const size_t len = s_shaderOffsets[ idxBinary + 1 ] - offThis;
const HRESULT hr = dev->CreateComputeShader( rsi, len, nullptr, &shaders[ i ] );
if( SUCCEEDED( hr ) )
continue;
const uint64_t binaryBit = ( 1ull << idxBinary );
if( 0 != ( binaryBit & fp64ShadersBitmap ) )
continue; // This shader uses FP64 math, the support for that is optional. When not supported, CreateComputeShader method is expected to fail.
// TODO [low]: ideally, query for the support when creating the device, and don't even try creating these compute shaders
return hr;
}
return S_OK;
}
| 1,940
|
C++
|
.cpp
| 53
| 34.377358
| 166
| 0.733369
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,535
|
Binder.cpp
|
Const-me_Whisper/Whisper/D3D/Binder.cpp
|
#include "stdafx.h"
#include "Binder.h"
#include <algorithm>
using namespace DirectCompute;
void Binder::bind( ID3D11ShaderResourceView* srv0, ID3D11UnorderedAccessView* uav0 )
{
ID3D11DeviceContext* const ctx = context();
ctx->CSSetUnorderedAccessViews( 0, 1, &uav0, nullptr );
ctx->CSSetShaderResources( 0, 1, &srv0 );
maxSrv = std::max( maxSrv, (uint8_t)1 );
maxUav = std::max( maxUav, (uint8_t)1 );
}
void Binder::bind( ID3D11UnorderedAccessView* uav0 )
{
context()->CSSetUnorderedAccessViews( 0, 1, &uav0, nullptr );
maxUav = std::max( maxUav, (uint8_t)1 );
}
void Binder::bind( ID3D11ShaderResourceView* srv0, ID3D11ShaderResourceView* srv1, ID3D11UnorderedAccessView* uav0 )
{
ID3D11DeviceContext* const ctx = context();
ctx->CSSetUnorderedAccessViews( 0, 1, &uav0, nullptr );
std::array< ID3D11ShaderResourceView*, 2> arr = { srv0, srv1 };
ctx->CSSetShaderResources( 0, 2, arr.data() );
maxSrv = std::max( maxSrv, (uint8_t)2 );
maxUav = std::max( maxUav, (uint8_t)1 );
}
void Binder::bind( std::initializer_list<ID3D11ShaderResourceView*> srvs, std::initializer_list<ID3D11UnorderedAccessView*> uavs )
{
ID3D11DeviceContext* const ctx = context();
const size_t lengthResources = srvs.size();
const size_t lengthUnordered = uavs.size();
assert( lengthResources > 0 && lengthResources < D3D11_COMMONSHADER_INPUT_RESOURCE_REGISTER_COUNT );
assert( lengthUnordered > 0 && lengthUnordered < D3D11_PS_CS_UAV_REGISTER_COUNT );
ctx->CSSetUnorderedAccessViews( 0, (UINT)lengthUnordered, uavs.begin(), nullptr );
ctx->CSSetShaderResources( 0, (UINT)lengthResources, srvs.begin() );
maxSrv = std::max( maxSrv, (uint8_t)lengthResources );
maxUav = std::max( maxUav, (uint8_t)lengthUnordered );
}
Binder::~Binder()
{
uint8_t count = std::max( maxSrv, maxUav );
if( 0 == count )
return;
#pragma warning (disable: 6255) // Compiler doesn't know we have very few of these things
size_t* arr = (size_t*)_alloca( count * sizeof( size_t ) );
memset( arr, 0, count * sizeof( size_t ) );
ID3D11DeviceContext* const ctx = context();
ctx->CSSetShaderResources( 0, maxSrv, (ID3D11ShaderResourceView**)arr );
ctx->CSSetUnorderedAccessViews( 0, maxUav, (ID3D11UnorderedAccessView**)arr, nullptr );
}
| 2,225
|
C++
|
.cpp
| 50
| 42.64
| 130
| 0.738326
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,536
|
createBuffer.cpp
|
Const-me_Whisper/Whisper/D3D/createBuffer.cpp
|
#include "stdafx.h"
#include "createBuffer.h"
#define CHECK( hr ) { const HRESULT __hr = ( hr ); if( FAILED( __hr ) ) return __hr; }
HRESULT DirectCompute::createBuffer( eBufferUse use, size_t totalBytes, ID3D11Buffer** ppGpuBuffer, const void* rsi, ID3D11Buffer** ppStagingBuffer, bool shared )
{
if( totalBytes > INT_MAX )
return DISP_E_OVERFLOW;
if( nullptr == ppGpuBuffer )
return E_POINTER;
CD3D11_BUFFER_DESC bufferDesc{ (UINT)totalBytes, D3D11_BIND_SHADER_RESOURCE };
switch( use )
{
case eBufferUse::Immutable:
if( nullptr == rsi )
return E_INVALIDARG;
bufferDesc.Usage = D3D11_USAGE_IMMUTABLE;
if( gpuInfo().cloneableModel() )
{
// According to D3D11 documentation, the only resources that can be shared are 2D non-mipmapped textures.
// https://learn.microsoft.com/en-us/windows/win32/api/d3d11/ne-d3d11-d3d11_resource_misc_flag
// However, D3D9 documentation says all resource types are supported, as long as they are in the default pool.
// https://learn.microsoft.com/en-us/windows/win32/direct3d9/dx9lh?redirectedfrom=MSDN#sharing-resources
// Appears to work on my computer
bufferDesc.Usage = D3D11_USAGE_DEFAULT;
bufferDesc.MiscFlags |= D3D11_RESOURCE_MISC_SHARED;
}
break;
case eBufferUse::ReadWrite:
case eBufferUse::ReadWriteDownload:
bufferDesc.BindFlags |= D3D11_BIND_UNORDERED_ACCESS;
if( shared && gpuInfo().cloneableModel() )
bufferDesc.MiscFlags |= D3D11_RESOURCE_MISC_SHARED;
break;
case eBufferUse::Dynamic:
bufferDesc.Usage = D3D11_USAGE_DYNAMIC;
bufferDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
break;
}
D3D11_SUBRESOURCE_DATA srd;
D3D11_SUBRESOURCE_DATA* pSrd = nullptr;
if( nullptr != rsi )
{
srd.pSysMem = rsi;
srd.SysMemPitch = srd.SysMemSlicePitch = 0;
pSrd = &srd;
}
CHECK( device()->CreateBuffer( &bufferDesc, pSrd, ppGpuBuffer ) );
if( nullptr != ppStagingBuffer && use == eBufferUse::ReadWriteDownload )
{
bufferDesc.Usage = D3D11_USAGE_STAGING;
bufferDesc.BindFlags = 0;
bufferDesc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
CHECK( device()->CreateBuffer( &bufferDesc, nullptr, ppStagingBuffer ) );
}
return S_OK;
}
| 2,150
|
C++
|
.cpp
| 56
| 35.696429
| 162
| 0.744732
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,537
|
listGPUs.cpp
|
Const-me_Whisper/Whisper/D3D/listGPUs.cpp
|
#include "stdafx.h"
#include "listGPUs.h"
#pragma comment(lib, "DXGI.lib")
#include <charconv>
#include <optional>
namespace DirectCompute
{
static HRESULT createFactory( CComPtr<IDXGIFactory1>& rdi )
{
HRESULT hr = CreateDXGIFactory1( IID_PPV_ARGS( &rdi ) );
if( SUCCEEDED( hr ) )
return S_OK;
return hr;
}
inline void setName( std::wstring& rdi, const DXGI_ADAPTER_DESC1& desc )
{
const size_t descLen = wcsnlen_s( desc.Description, 128 );
const wchar_t* rsi = &desc.Description[ 0 ];
rdi.assign( rsi, rsi + descLen );
}
// If the UTF16 string contains a small non-negative number, return that number.
std::optional<uint32_t> parseGpuIndex( const std::wstring& requestedName )
{
if( requestedName.length() > 3 )
return {};
char buffer[ 4 ];
*(uint32_t*)( &buffer[ 0 ] ) = 0;
for( size_t i = 0; i < requestedName.length(); i++ )
{
const wchar_t wc = requestedName[ i ];
if( wc < L'0' || wc > L'9' )
return {};
buffer[ i ] = (char)(uint8_t)wc;
}
uint32_t result;
auto res = std::from_chars( buffer, &buffer[ 0 ] + requestedName.length(), result );
if( res.ec == std::errc{} )
return result;
return {};
}
CComPtr<IDXGIAdapter1> selectAdapter( const std::wstring& requestedName )
{
if( requestedName.empty() )
return nullptr;
CComPtr<IDXGIFactory1> dxgi;
HRESULT hr = createFactory( dxgi );
if( FAILED( hr ) )
{
logWarningHr( hr, u8"CreateDXGIFactory1 failed" );
return nullptr;
}
const auto idx = parseGpuIndex( requestedName );
if( idx.has_value() )
{
// User has specified 0-based GPU index instead of the name
// https://github.com/Const-me/Whisper/issues/72
CComPtr<IDXGIAdapter1> adapter;
hr = dxgi->EnumAdapters1( idx.value(), &adapter );
if( hr == DXGI_ERROR_NOT_FOUND )
{
logWarning( u8"Requested GPU #%i not found", (int)idx.value() );
return nullptr;
}
if( FAILED( hr ) )
{
logWarningHr( hr, u8"IDXGIFactory1.EnumAdapters1 failed" );
return nullptr;
}
return adapter;
}
std::wstring name;
for( UINT i = 0; true; i++ )
{
CComPtr<IDXGIAdapter1> adapter;
hr = dxgi->EnumAdapters1( i, &adapter );
if( hr == DXGI_ERROR_NOT_FOUND )
{
logWarning16( L"Requested GPU not found: \"%s\"", requestedName.c_str() );
return nullptr;
}
if( FAILED( hr ) )
{
logWarningHr( hr, u8"IDXGIFactory1.EnumAdapters1 failed" );
return nullptr;
}
DXGI_ADAPTER_DESC1 desc;
adapter->GetDesc1( &desc );
setName( name, desc );
if( name == requestedName )
return adapter;
}
}
}
HRESULT COMLIGHTCALL Whisper::listGPUs( pfnListAdapters pfn, void* pv )
{
using namespace DirectCompute;
CComPtr<IDXGIFactory1> dxgi;
HRESULT hr = createFactory( dxgi );
if( FAILED( hr ) )
{
logErrorHr( hr, u8"CreateDXGIFactory1 failed" );
return hr;
}
std::wstring name;
for( UINT i = 0; true; i++ )
{
CComPtr<IDXGIAdapter1> adapter;
hr = dxgi->EnumAdapters1( i, &adapter );
if( hr == DXGI_ERROR_NOT_FOUND )
return S_OK;
if( FAILED( hr ) )
{
logErrorHr( hr, u8"IDXGIFactory1.EnumAdapters1 failed" );
return hr;
}
DXGI_ADAPTER_DESC1 desc;
adapter->GetDesc1( &desc );
setName( name, desc );
pfn( name.c_str(), pv );
}
}
| 3,245
|
C++
|
.cpp
| 121
| 23.578512
| 86
| 0.665378
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,538
|
createDevice.cpp
|
Const-me_Whisper/Whisper/D3D/createDevice.cpp
|
#include "stdafx.h"
#include "createDevice.h"
#include "listGPUs.h"
#include "RenderDoc/renderDoc.h"
#pragma comment(lib, "D3D11.lib")
#include <atlstr.h>
HRESULT DirectCompute::createDevice( const std::wstring& adapterName, ID3D11Device** dev, ID3D11DeviceContext** context )
{
CComPtr<IDXGIAdapter1> adapter = selectAdapter( adapterName );
const D3D_DRIVER_TYPE driverType = adapter ? D3D_DRIVER_TYPE_UNKNOWN : D3D_DRIVER_TYPE_HARDWARE;
const std::array<D3D_FEATURE_LEVEL, 4> levels = { D3D_FEATURE_LEVEL_12_1 , D3D_FEATURE_LEVEL_12_0 , D3D_FEATURE_LEVEL_11_1 , D3D_FEATURE_LEVEL_11_0 };
UINT flags = D3D11_CREATE_DEVICE_DISABLE_GPU_TIMEOUT | D3D11_CREATE_DEVICE_SINGLETHREADED;
bool renderDoc = initializeRenderDoc();
#ifdef _DEBUG
if( !renderDoc )
{
// Last time I checked, RenderDoc crashed with debug version of D3D11 runtime
// Only setting this flag unless renderdoc.dll is loaded to the current process
flags |= D3D11_CREATE_DEVICE_DEBUG;
}
#endif
constexpr UINT levelsCount = (UINT)levels.size();
HRESULT hr = D3D11CreateDevice( adapter, driverType, nullptr, flags, levels.data(), levelsCount, D3D11_SDK_VERSION, dev, nullptr, context );
if( SUCCEEDED( hr ) )
return S_OK;
// D3D11_CREATE_DEVICE_DISABLE_GPU_TIMEOUT: This value is not supported until Direct3D 11.1
// https://learn.microsoft.com/en-us/windows/win32/api/d3d11/ne-d3d11-d3d11_create_device_flag
flags = _andn_u32( D3D11_CREATE_DEVICE_DISABLE_GPU_TIMEOUT, flags );
hr = D3D11CreateDevice( adapter, driverType, nullptr, flags, levels.data(), levelsCount, D3D11_SDK_VERSION, dev, nullptr, context );
if( SUCCEEDED( hr ) )
return S_OK;
return hr;
}
HRESULT DirectCompute::cloneDevice( ID3D11Device* source, ID3D11Device** dev, ID3D11DeviceContext** context )
{
CComPtr<IDXGIDevice> dxgiDev;
CHECK( source->QueryInterface( &dxgiDev ) );
CComPtr<IDXGIAdapter> adapter;
CHECK( dxgiDev->GetAdapter( &adapter ) );
const uint32_t flags = source->GetCreationFlags();
const D3D_FEATURE_LEVEL level = source->GetFeatureLevel();
return D3D11CreateDevice( adapter, D3D_DRIVER_TYPE_UNKNOWN, nullptr, flags, &level, 1,
D3D11_SDK_VERSION, dev, nullptr, context );
}
namespace
{
using Whisper::eGpuModelFlags;
inline constexpr uint32_t operator|( eGpuModelFlags a, eGpuModelFlags b )
{
return (uint32_t)a | (uint32_t)b;
}
inline bool operator&( uint32_t flags, eGpuModelFlags bit )
{
return 0 != ( flags & (uint32_t)bit );
}
inline bool merge3( uint32_t flags, eGpuModelFlags enabled, eGpuModelFlags disabled, bool def )
{
if( flags & enabled )
return true;
if( flags & disabled )
return false;
return def;
}
}
HRESULT DirectCompute::validateFlags( uint32_t flags )
{
constexpr uint32_t waveBoth = eGpuModelFlags::Wave32 | eGpuModelFlags::Wave64;
if( ( flags & waveBoth ) == waveBoth )
{
logError( u8"eGpuModelFlags.%s and eGpuModelFlags.%s are mutually exclusive", "Wave32", "Wave64" );
return E_INVALIDARG;
}
constexpr uint32_t reshapedBoth = eGpuModelFlags::NoReshapedMatMul | eGpuModelFlags::UseReshapedMatMul;
if( ( flags & reshapedBoth ) == reshapedBoth )
{
logError( u8"eGpuModelFlags.%s and eGpuModelFlags.%s are mutually exclusive", "NoReshapedMatMul", "UseReshapedMatMul" );
return E_INVALIDARG;
}
return S_OK;
}
HRESULT DirectCompute::queryDeviceInfo( sGpuInfo& rdi, ID3D11Device* dev, uint32_t flags )
{
if( nullptr == dev )
return OLE_E_BLANK;
CComPtr<IDXGIDevice> dd;
CHECK( dev->QueryInterface( &dd ) );
CComPtr<IDXGIAdapter> adapter;
CHECK( dd->GetAdapter( &adapter ) );
DXGI_ADAPTER_DESC desc;
adapter->GetDesc( &desc );
const size_t descLen = wcsnlen_s( desc.Description, 128 );
const wchar_t* rsi = &desc.Description[ 0 ];
rdi.description.assign( rsi, rsi + descLen );
rdi.vendor = (eGpuVendor)desc.VendorId;
rdi.device = (uint16_t)desc.DeviceId;
rdi.revision = (uint16_t)desc.Revision;
rdi.subsystem = desc.SubSysId;
rdi.vramDedicated = desc.DedicatedVideoMemory;
rdi.ramDedicated = desc.DedicatedSystemMemory;
rdi.ramShared = desc.SharedSystemMemory;
// Set up these flags
uint8_t ef = 0;
const bool amd = ( rdi.vendor == eGpuVendor::AMD );
if( merge3( flags, eGpuModelFlags::Wave64, eGpuModelFlags::Wave32, amd ) )
ef |= (uint8_t)eGpuEffectiveFlags::Wave64;
if( merge3( flags, eGpuModelFlags::UseReshapedMatMul, eGpuModelFlags::NoReshapedMatMul, amd ) )
ef |= (uint8_t)eGpuEffectiveFlags::ReshapedMatMul;
if( 0 != ( flags & eGpuModelFlags::Cloneable ) )
ef |= (uint8_t)eGpuEffectiveFlags::Cloneable;
rdi.flags = (eGpuEffectiveFlags)ef;
if( willLogMessage( Whisper::eLogLevel::Debug ) )
{
const int fl = dev->GetFeatureLevel();
const int flMajor = ( fl >> 12 ) & 0xF;
const int flMinor = ( fl >> 8 ) & 0xF;
CStringA flagsString;
flagsString.Format( "%s | %s", rdi.wave64() ? "Wave64" : "Wave32",
rdi.useReshapedMatMul() ? "UseReshapedMatMul" : "NoReshapedMatMul" );
if( rdi.cloneableModel() )
flagsString += " | Cloneable";
logDebug16( L"Using GPU \"%s\", feature level %i.%i, effective flags %S",
rdi.description.c_str(), flMajor, flMinor,
flagsString.operator const char* ( ) );
}
return S_OK;
}
| 5,133
|
C++
|
.cpp
| 126
| 38.484127
| 151
| 0.742084
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,539
|
downloadBuffer.cpp
|
Const-me_Whisper/Whisper/D3D/downloadBuffer.cpp
|
#include "stdafx.h"
#include "downloadBuffer.h"
#include "device.h"
#include "MappedResource.h"
namespace
{
struct BufferInfo
{
D3D11_SHADER_RESOURCE_VIEW_DESC viewDesc;
D3D11_BUFFER_DESC bufferDesc;
CComPtr<ID3D11Buffer> source;
HRESULT create( ID3D11ShaderResourceView* srv )
{
srv->GetDesc( &viewDesc );
if( viewDesc.ViewDimension != D3D_SRV_DIMENSION_BUFFER )
return E_INVALIDARG;
CComPtr<ID3D11Resource> res;
srv->GetResource( &res );
CHECK( res.QueryInterface( &source ) );
source->GetDesc( &bufferDesc );
return S_OK;
}
HRESULT download( void* rdi )
{
bufferDesc.BindFlags = 0;
bufferDesc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
bufferDesc.Usage = D3D11_USAGE_STAGING;
CComPtr<ID3D11Buffer> staging;
using namespace DirectCompute;
CHECK( device()->CreateBuffer( &bufferDesc, nullptr, &staging ) );
context()->CopyResource( staging, source );
MappedResource mapped;
mapped.map( staging, true );
memcpy( rdi, mapped.data(), bufferDesc.ByteWidth );
return S_OK;
}
};
size_t dxgiSizeof( DXGI_FORMAT fmt )
{
switch( fmt )
{
case DXGI_FORMAT_R16_FLOAT: return 2;
case DXGI_FORMAT_R32_FLOAT: return 4;
}
return 0;
}
}
template<class E>
HRESULT DirectCompute::downloadBuffer( ID3D11ShaderResourceView* srv, std::vector<E>& vec )
{
BufferInfo bi;
CHECK( bi.create( srv ) );
const size_t cb = dxgiSizeof( bi.viewDesc.Format );
if( cb != sizeof( E ) )
return E_INVALIDARG;
vec.resize( bi.bufferDesc.ByteWidth / cb );
return bi.download( vec.data() );
}
template HRESULT DirectCompute::downloadBuffer( ID3D11ShaderResourceView* srv, std::vector<uint16_t>& vec );
template HRESULT DirectCompute::downloadBuffer( ID3D11ShaderResourceView* srv, std::vector<float>& vec );
| 1,777
|
C++
|
.cpp
| 60
| 26.733333
| 108
| 0.732708
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,540
|
enums.cpp
|
Const-me_Whisper/Whisper/D3D/enums.cpp
|
#include "stdafx.h"
#include "enums.h"
static const alignas( 16 ) std::array<DXGI_FORMAT, 3> s_tensorViewFormats = { DXGI_FORMAT_R16_FLOAT, DXGI_FORMAT_R32_FLOAT, DXGI_FORMAT_R32_UINT };
DXGI_FORMAT DirectCompute::viewFormat( eDataType dt )
{
return s_tensorViewFormats[ (uint8_t)dt ];
}
| 290
|
C++
|
.cpp
| 7
| 40.142857
| 147
| 0.755319
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,541
|
MappedResource.cpp
|
Const-me_Whisper/Whisper/D3D/MappedResource.cpp
|
#include "stdafx.h"
#include "MappedResource.h"
using namespace DirectCompute;
#define CHECK( hr ) { const HRESULT __hr = ( hr ); if( FAILED( __hr ) ) return __hr; }
MappedResource::MappedResource()
{
mapped.pData = nullptr;
mapped.RowPitch = mapped.DepthPitch = 0;
resource = nullptr;
}
HRESULT MappedResource::map( ID3D11Resource* res, bool reading )
{
if( nullptr == resource )
{
D3D11_MAP mt = reading ? D3D11_MAP_READ : D3D11_MAP_WRITE_DISCARD;
CHECK( context()->Map( res, 0, mt, 0, &mapped ) );
resource = res;
return S_OK;
}
return HRESULT_FROM_WIN32( ERROR_ALREADY_INITIALIZED );
}
MappedResource::~MappedResource()
{
if( nullptr != resource )
{
context()->Unmap( resource, 0 );
resource = nullptr;
mapped.pData = nullptr;
}
}
| 761
|
C++
|
.cpp
| 30
| 23.5
| 86
| 0.702332
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,542
|
shaderNames.cpp
|
Const-me_Whisper/Whisper/D3D/shaderNames.cpp
|
// This source file is generated by a tool
#include "stdafx.h"
#include "shaderNames.h"
static const std::array<const char*, 41> s_shaderNames =
{
"add",
"addInPlace",
"addRepeat",
"addRepeatEx",
"addRepeatGelu",
"addRepeatScale",
"addRows",
"convolutionMain",
"convolutionMain2",
"convolutionMain2Fixed",
"convolutionPrep1",
"convolutionPrep2",
"copyConvert",
"copyTranspose",
"dbgFindNaN",
"diagMaskInf",
"flashAttention",
"flashAttentionCompat1",
"flashAttentionCompat2",
"flashAttentionCompat3",
"fmaRepeat1",
"fmaRepeat2",
"matReshapePanels",
"mulMatByRow",
"mulMatByRowTiled",
"mulMatByRowTiledEx",
"mulMatByScalar",
"mulMatDotMain",
"mulMatDotReshape",
"mulMatMadMain",
"mulMatTiled",
"mulMatTiledEx",
"norm",
"normCompat",
"normFixed",
"scaleInPlace",
"softMax",
"softMaxCompat",
"softMaxFixed",
"softMaxLong",
"zeroMemory",
};
const char* DirectCompute::computeShaderName( eComputeShader cs )
{
const uint16_t i = (uint16_t)cs;
if( i < s_shaderNames.size() )
return s_shaderNames[ i ];
return nullptr;
}
| 1,061
|
C++
|
.cpp
| 54
| 17.759259
| 65
| 0.752485
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,543
|
renderDoc.cpp
|
Const-me_Whisper/Whisper/D3D/RenderDoc/renderDoc.cpp
|
#include "stdafx.h"
#include "renderDoc.h"
#include "renderdoc_app.h"
#include "../device.h"
#define ENABLE_RENDERDOC_DEBUGGER 1
#if ENABLE_RENDERDOC_DEBUGGER
namespace
{
static HMODULE hmRenderDoc = nullptr;
static RENDERDOC_API_1_6_0* api = nullptr;
}
bool DirectCompute::initializeRenderDoc()
{
hmRenderDoc = GetModuleHandleW( L"renderdoc.dll" );
if( nullptr == hmRenderDoc )
return false;
pRENDERDOC_GetAPI getApi = (pRENDERDOC_GetAPI)GetProcAddress( hmRenderDoc, "RENDERDOC_GetAPI" );
if( nullptr == getApi )
return false;
if( 1 != getApi( eRENDERDOC_API_Version_1_6_0, (void**)&api ) )
return false;
if( nullptr == api )
return false;
return true;
}
namespace
{
using namespace DirectCompute;
inline bool isKeyPressed( int vKey )
{
return 0 != ( GetAsyncKeyState( vKey ) & 0x8000 );
}
}
CaptureRaii::CaptureRaii() : capturing( false )
{
if( nullptr == api )
return;
if( !isKeyPressed( VK_F12 ) )
return;
ID3D11Device* const dev = device();
if( nullptr == dev )
return;
api->StartFrameCapture( dev, nullptr );
capturing = true;
}
CaptureRaii::~CaptureRaii()
{
if( !capturing )
return;
api->EndFrameCapture( device(), nullptr );
}
#else // !ENABLE_RENDERDOC_DEBUGGER
bool DirectCompute::initializeRenderDoc()
{
return false;
}
DirectCompute::CaptureRaii::CaptureRaii() : capturing( false )
{
}
DirectCompute::CaptureRaii::~CaptureRaii()
{
}
#endif
| 1,400
|
C++
|
.cpp
| 63
| 20.460317
| 97
| 0.738901
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,544
|
mulMatImpl.cpp
|
Const-me_Whisper/Whisper/CPU/mulMatImpl.cpp
|
#include "stdafx.h"
#include <intrin.h>
#include "mulMatImpl.h"
#include "mulMat.kernel.hpp"
#define DBG_TRACK_TEMPLATE_INSTANTIATION 0
#if DBG_TRACK_TEMPLATE_INSTANTIATION
#include <unordered_set>
static std::unordered_set<uint16_t> g_mulMatTemplates;
#endif
namespace
{
using namespace CpuCompute;
bool checkAvx2Support()
{
int cpuInfo[ 4 ];
__cpuid( cpuInfo, 7 );
return ( cpuInfo[ 1 ] & ( 1 << 5 ) ) != 0;
}
// a / b, rounded up to the next integer
inline uint32_t divRoundUp( uint32_t a, uint32_t b )
{
assert( b != 0 );
return ( a + ( b - 1 ) ) / b;
}
}
const bool MulMatBase::haveAvx2 = checkAvx2Support();
MulMatBase::MulMatBase( Tensor& result, const Tensor& a, const Tensor& b, ParallelForRunner& pfor, uint8_t panelHeightRegs, uint8_t tileWidthFloats ) :
resultPointer( result.fp32() ),
pa( a.data() ),
pb( b.data() ),
runner( pfor )
{
length = a.ne[ 0 ];
resultStrides[ 0 ] = result.nb[ 1 ];
resultStrides[ 1 ] = result.nb[ 2 ];
resultStrides[ 2 ] = result.nb[ 3 ];
store( resultSize, result.sizeVec() );
store( stridesA, a.stridesVec() );
store( stridesB, b.stridesVec() );
countPanels = divRoundUp( resultSize[ 0 ], panelHeightRegs * 8 );
completeTilesPerPanel = resultSize[ 1 ] / tileWidthFloats;
lastColumnsInPanel = (uint8_t)( resultSize[ 1 ] % tileWidthFloats );
this->panelHeightRegisters = panelHeightRegs;
this->tileWidth = tileWidthFloats;
// Pick a method which reshapes a panel of the matrix A into the shape we need to compute the product
// Store the pointer to that method in the field of this class
if( a.nb[ 0 ] == 1 )
{
if( haveAvx2 )
pfnMakePanel = &MulMatBase::transposePanelAvx2;
else
pfnMakePanel = &MulMatBase::transposePanel;
}
else if( a.nb[ 1 ] == 1 )
{
switch( panelHeightRegs )
{
case 1:
pfnMakePanel = &MulMatBase::copyPanelColumnMajor8;
break;
case 2:
pfnMakePanel = &MulMatBase::copyPanelColumnMajor16;
break;
case 4:
pfnMakePanel = &MulMatBase::copyPanelColumnMajor32;
break;
default:
throw E_NOTIMPL;
}
}
else
pfnMakePanel = &MulMatBase::gatherPanel;
// That last version is generic and very simple, unlikely to have weird bugs
// pfnMakePanel = &MulMatBase::gatherPanel;
#if DBG_TRACK_TEMPLATE_INSTANTIATION
uint16_t key = panelHeightRegs;
key = key << 8;
key |= tileWidthFloats;
if( !g_mulMatTemplates.emplace( key ).second )
return;
logDebug( u8"MulMatImpl<panelHeightRegs = %i, tileWidthFloats = %i>", (int)panelHeightRegs, (int)tileWidthFloats );
#endif
}
HRESULT MulMatBase::run( ParallelForRunner& pfor )
{
size_t length = (size_t)countPanels * resultSize[ 2 ] * resultSize[ 3 ];
return pfor.parallelFor( *this, length );
}
const float* MulMatBase::getLayerB( size_t m2, size_t m3 ) const
{
const float* rsi = (const float*)this->pb;
rsi += m2 * stridesB[ 2 ];
rsi += m3 * stridesB[ 3 ];
return rsi;
}
// This method is the main one, it’s called by the thread pool
template<uint8_t panelHeightRegs, uint8_t tileWidthFloats>
HRESULT __stdcall MulMatImpl<panelHeightRegs, tileWidthFloats>::compute( size_t i, size_t end ) const noexcept
{
// Allocate a thread-local buffer for the transposed panel
constexpr size_t panelHeightFloats = panelHeightRegs * 8;
uint16_t* const panel = (uint16_t*)runner.threadLocalBuffer( floatsPerPanel() * 2 );
const size_t resultStride = resultStrides[ 0 ];
// Load a few numbers from this class into local variables, while upcasting from DWORD into size_t
const size_t length = this->length;
const std::array<size_t, 2> stridesB{ this->stridesB[ 0 ], this->stridesB[ 1 ] };
// This outer loop iterates over the panels assigned to the current thread
// For example, matrix A of size [ 1024, 1024 ] may be split into panels of size [ 1024, 16 ]
// Each iteration of that loop computes matrix product of that panel, with the complete matrix B
for( ; i < end; i++ )
{
const size_t iPanel = i % countPanels;
size_t j = i / countPanels;
const size_t m2 = j % (size_t)resultSize[ 2 ];
const size_t m3 = j / (size_t)resultSize[ 2 ];
CHECK( ( this->*pfnMakePanel )( panel, iPanel, m2, m3 ) );
// We got a column-major panel in the thread local buffer, of size [ length, panelHeightRegs * 8 ]
// Hopefully, these buffers should all fit at least in L3 cache
// The longest matrix I saw in the debugger had 4096 elements, with panelHeightRegs = 4 that's 256 kb of data in the panel
const float* pb = getLayerB( m2, m3 );
float* rdi = getPanelDest( iPanel, m2, m3 );
const size_t storeWidth = std::min( panelHeightFloats, (size_t)resultSize[ 0 ] - iPanel * panelHeightFloats );
std::array<__m256, panelHeightRegs> vecPanel;
#if 1
ResultTile<panelHeightRegs, tileWidthFloats> tile;
// This loop iterates over tiles within the panel.
// Each iteration of the loop computes an output tile of the result matrix.
for( j = 0; j < completeTilesPerPanel; j++, pb += tileWidthFloats * stridesB[ 1 ], rdi += resultStride * tileWidthFloats )
{
setZero( tile.arr );
const uint16_t* rsiA = panel;
const uint16_t* const rsiAEnd = panel + length * panelHeightFloats;
const float* rsiB = pb;
// This loop runs for `length` iterations, iterates over the first dimensions of both matrices, accumulating these dot products we're after
for( ; rsiA < rsiAEnd; rsiA += panelHeightFloats, rsiB += stridesB[ 0 ] )
{
loadPanel( rsiA, vecPanel );
tile.kernel( vecPanel, rsiB, stridesB[ 1 ] );
}
tile.store( rdi, storeWidth, tileWidthFloats, resultStride );
}
if( 0 != lastColumnsInPanel )
{
setZero( tile.arr );
const uint16_t* rsiA = panel;
const uint16_t* rsiAEnd = panel + length * panelHeightFloats;
const float* rsiB = pb;
for( ; rsiA < rsiAEnd; rsiA += panelHeightFloats, rsiB += stridesB[ 0 ] )
{
loadPanel( rsiA, vecPanel );
tile.kernelPartial( vecPanel, rsiB, stridesB[ 1 ], lastColumnsInPanel );
}
tile.store( rdi, storeWidth, lastColumnsInPanel, resultStride );
}
#else
// This version bypasses horizontal tiling, instead implements a brute force algorithm to multiply the current panel by the complete B matrix
// Not terribly efficient, only implemented for debugging purposes
const size_t resHeight = resultSize[ 1 ];
std::array<__m256, panelHeightRegs> tile;
for( size_t j = 0; j < resHeight; j++, pb += stridesB[ 1 ], rdi += resultStride )
{
setZero( tile );
const uint16_t* rsiA = panel;
const uint16_t* const rsiAEnd = panel + length * panelHeightFloats;
const float* rsiB = pb;
for( size_t k = 0; k < length; k++, rsiA += panelHeightFloats, rsiB += stridesB[ 0 ] )
{
loadPanel( rsiA, vecPanel );
const __m256 b = _mm256_broadcast_ss( rsiB );
for( size_t r = 0; r < panelHeightRegs; r++ )
tile[ r ] = _mm256_fmadd_ps( vecPanel[ r ], b, tile[ r ] );
}
alignas( 32 ) std::array<float, panelHeightFloats> arr;
for( size_t k = 0; k < panelHeightRegs; k++ )
_mm256_store_ps( &arr[ k * 8 ], tile[ k ] );
memcpy( rdi, arr.data(), storeWidth * 4 );
}
#endif
}
return S_OK;
}
// Instantiate the templates we need
template class MulMatImpl<4, 1>;
template class MulMatImpl<1, 1>;
template class MulMatImpl<4, 2>;
template class MulMatImpl<1, 2>;
template class MulMatImpl<2, 3>;
template class MulMatImpl<1, 3>;
template class MulMatImpl<2, 4>;
template class MulMatImpl<1, 4>;
| 7,344
|
C++
|
.cpp
| 190
| 35.994737
| 151
| 0.706254
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,545
|
MlContextCpu.cpp
|
Const-me_Whisper/Whisper/CPU/MlContextCpu.cpp
|
#include "stdafx.h"
#include "MlContext.h"
#include "simdUtils.h"
#include "mulMat.h"
using namespace CpuCompute;
MlContext::MlContext( int threads ) : pfor( threads )
{
}
Tensor MlContext::createTensor( eDataType type, const std::array<uint32_t, 4>& size )
{
Tensor res;
check( res.create( type, size, allocator ) );
return res;
}
Tensor MlContext::createTensor( eDataType type, std::initializer_list<uint32_t> size )
{
Tensor res;
check( res.create( type, size, allocator ) );
return res;
}
namespace
{
inline const uint16_t* getRow16( const Tensor& t, size_t index )
{
const uint16_t* rsi = t.fp16();
rsi += index * t.nb[ 1 ];
return rsi;
}
inline const float* getRow32( const Tensor& t, size_t index )
{
const float* rsi = t.fp32();
rsi += index * t.nb[ 1 ];
return rsi;
}
}
Tensor MlContext::addRows( const Tensor& d_te, const Tensor& d_pe, const int* tokens, const int n_tokens, const int n_past )
{
if( d_te.type() != eDataType::FP16 || d_pe.type() != eDataType::FP32 )
throw E_INVALIDARG;
if( d_te.ne[ 0 ] != d_pe.ne[ 0 ] )
throw E_INVALIDARG;
if( n_tokens <= 0 )
throw E_BOUNDS;
Tensor res = createTensor( eDataType::FP32, { d_te.ne[ 0 ], (uint32_t)n_tokens } );
const size_t inner = (size_t)d_te.ne[ 0 ];
const size_t outer = (size_t)n_tokens;
float* rdi = res.fp32();
for( size_t i = 0; i < outer; i++, rdi += inner, tokens++ )
{
const uint16_t* const source1 = getRow16( d_te, *(const uint32_t*)tokens );
const float* const source2 = getRow32( d_pe, i + (size_t)n_past );
addF16to32( rdi, source1, source2, inner );
}
return res;
}
namespace
{
class DispatchHelper3
{
std::array<uint32_t, 3> ne;
public:
DispatchHelper3() = default;
DispatchHelper3( uint32_t x, uint32_t y, uint32_t z )
{
assert( x > 0 && y > 0 && z > 0 );
ne[ 0 ] = x;
ne[ 1 ] = y;
ne[ 2 ] = z;
}
size_t groupsCount() const
{
size_t res = ne[ 0 ];
res *= ne[ 1 ];
res *= ne[ 2 ];
return res;
}
std::array<uint32_t, 3> unpack( size_t idx ) const
{
assert( idx < groupsCount() );
std::array<uint32_t, 3> res;
res[ 0 ] = (uint32_t)( idx % ne[ 0 ] );
idx = idx / ne[ 0 ];
res[ 1 ] = (uint32_t)( idx % ne[ 1 ] );
res[ 2 ] = (uint32_t)( idx / ne[ 1 ] );
return res;
}
void next( std::array<uint32_t, 3>& i ) const
{
i[ 0 ]++;
if( i[ 0 ] < ne[ 0 ] )
return;
i[ 0 ] = 0;
i[ 1 ]++;
if( i[ 1 ] < ne[ 1 ] )
return;
i[ 1 ] = 0;
i[ 2 ]++;
}
};
inline const float* sourceRow( const float* rsi, const std::array<uint32_t, 3>& idx, size_t nb0, size_t nb1, size_t nb2 )
{
const size_t r0 = idx[ 0 ] * nb0;
const size_t r1 = idx[ 1 ] * nb1;
const size_t r2 = idx[ 2 ] * nb2;
rsi = rsi + r0 + r1 + r2;
return rsi;
}
struct NormContext : public iComputeRange
{
const float* source;
float* result;
size_t inner;
DispatchHelper3 threads;
std::array<uint32_t, 3> nbInput;
HRESULT __stdcall compute( size_t i, size_t end ) const override final
{
ALIGNED_SPAN( temp, inner );
std::array<uint32_t, 3> idx = threads.unpack( i );
float* rdi = result + i * inner;
for( ; i < end; i++, rdi += inner, threads.next( idx ) )
{
const float* rsi = sourceRow( source, idx, nbInput[ 0 ], nbInput[ 1 ], nbInput[ 2 ] );
norm( rdi, temp, rsi, inner );
}
return S_OK;
}
};
}
Tensor MlContext::norm( const Tensor& arg )
{
if( arg.type() != eDataType::FP32 || arg.nb[ 0 ] != 1 )
throw E_INVALIDARG;
Tensor res = createTensor( eDataType::FP32, arg.ne );
NormContext context;
context.source = arg.fp32();
context.result = res.fp32();
context.inner = arg.ne[ 0 ];
context.threads = DispatchHelper3( arg.ne[ 1 ], arg.ne[ 2 ], arg.ne[ 3 ] );
context.nbInput = { arg.nb[ 1 ], arg.nb[ 2 ], arg.nb[ 3 ] };
check( pfor.parallelFor( context, context.threads.groupsCount() ) );
return res;
}
void MlContext::fmaRepeat( Tensor& cur, const Tensor& w, const Tensor& b )
{
if( !( cur.isContinuous() && w.isContinuous() && b.isContinuous() ) )
throw E_INVALIDARG;
if( !( cur.type() == eDataType::FP32 && w.type() == eDataType::FP32 && b.type() == eDataType::FP32 ) )
throw E_INVALIDARG;
if( !isSameShape( w, b ) )
throw E_INVALIDARG;
DispatchHelper3 helper{ cur.ne[ 1 ], cur.ne[ 2 ], cur.ne[ 3 ] };
std::array<uint32_t, 3> idx = { 0, 0, 0 };
const size_t countRows = helper.groupsCount();
const size_t innerRes = cur.ne[ 0 ];
const size_t innerPattern = w.ne[ 0 ];
float* rdi = cur.fp32();
for( size_t i = 0; i < countRows; i++, helper.next( idx ), rdi += innerRes )
{
std::array<uint32_t, 3> idxPattern;
idxPattern[ 0 ] = idx[ 0 ] % w.ne[ 1 ];
idxPattern[ 1 ] = idx[ 1 ] % w.ne[ 2 ];
idxPattern[ 2 ] = idx[ 2 ] % w.ne[ 3 ];
const float* s1 = sourceRow( w.fp32(), idxPattern, w.nb[ 1 ], w.nb[ 2 ], w.nb[ 3 ] );
const float* s2 = sourceRow( b.fp32(), idxPattern, b.nb[ 1 ], b.nb[ 2 ], b.nb[ 3 ] );
fmaRepeatRow( rdi, innerRes, s1, s2, innerPattern );
}
}
Tensor MlContext::mulMat( const Tensor& a, const Tensor& b )
{
if( !DirectCompute::canMulMat( a, b ) )
throw E_INVALIDARG;
std::array<uint32_t, 4> ne{ a.ne[ 1 ], b.ne[ 1 ], a.ne[ 2 ], b.ne[ 3 ] };
Tensor result = createTensor( eDataType::FP32, ne );
check( CpuCompute::mulMat( result, a, b, pfor ) );
return result;
}
// cur = add( repeat( b, cur ), cur ); cur = scale(cur, scaling)
void MlContext::addRepeatScale( Tensor& cur, const Tensor& b, float scaling )
{
if( !( cur.isContinuous() && b.isContinuous() ) )
throw E_INVALIDARG;
if( !( cur.type() == eDataType::FP32 && b.type() == eDataType::FP32 ) )
throw E_INVALIDARG;
DispatchHelper3 helper{ cur.ne[ 1 ], cur.ne[ 2 ], cur.ne[ 3 ] };
std::array<uint32_t, 3> idx = { 0, 0, 0 };
const size_t countRows = helper.groupsCount();
const size_t innerRes = (uint32_t)cur.ne[ 0 ];
const size_t innerPattern = (uint32_t)b.ne[ 0 ];
float* rdi = cur.fp32();
const __m256 scale = _mm256_set1_ps( scaling );
for( size_t i = 0; i < countRows; i++, helper.next( idx ), rdi += innerRes )
{
std::array<uint32_t, 3> idxPattern;
idxPattern[ 0 ] = idx[ 0 ] % (uint32_t)b.ne[ 1 ];
idxPattern[ 1 ] = idx[ 1 ] % (uint32_t)b.ne[ 2 ];
idxPattern[ 2 ] = idx[ 2 ] % (uint32_t)b.ne[ 3 ];
const float* source = sourceRow( b.fp32(), idxPattern, b.nb[ 1 ], b.nb[ 2 ], b.nb[ 3 ] );
addRepeatScaleRow( rdi, innerRes, source, innerPattern, scale );
}
}
void MlContext::addRepeat( Tensor& cur, const Tensor& b )
{
if( !( cur.isContinuous() && b.isContinuous() ) )
throw E_INVALIDARG;
if( !( cur.type() == eDataType::FP32 && b.type() == eDataType::FP32 ) )
throw E_INVALIDARG;
DispatchHelper3 helper{ cur.ne[ 1 ], cur.ne[ 2 ], cur.ne[ 3 ] };
std::array<uint32_t, 3> idx = { 0, 0, 0 };
const size_t countRows = helper.groupsCount();
const size_t innerRes = (uint32_t)cur.ne[ 0 ];
const size_t innerPattern = (uint32_t)b.ne[ 0 ];
float* rdi = cur.fp32();
for( size_t i = 0; i < countRows; i++, helper.next( idx ), rdi += innerRes )
{
std::array<uint32_t, 3> idxPattern;
idxPattern[ 0 ] = idx[ 0 ] % (uint32_t)b.ne[ 1 ];
idxPattern[ 1 ] = idx[ 1 ] % (uint32_t)b.ne[ 2 ];
idxPattern[ 2 ] = idx[ 2 ] % (uint32_t)b.ne[ 3 ];
const float* source = sourceRow( b.fp32(), idxPattern, b.nb[ 1 ], b.nb[ 2 ], b.nb[ 3 ] );
addRepeatRow( rdi, innerRes, source, innerPattern );
}
}
// cur = scale(cur, scaling)
void MlContext::scale( Tensor& cur, float scaling )
{
if( !( cur.isContinuous() && cur.type() == eDataType::FP32 ) )
throw E_INVALIDARG;
const size_t len = cur.countElements();
const __m256 scale = _mm256_set1_ps( scaling );
scaleRow( cur.fp32(), len, scale );
}
void MlContext::diagMaskInf( Tensor& cur, uint32_t n_past )
{
if( !( cur.isContinuous() && cur.type() == eDataType::FP32 ) )
throw E_INVALIDARG;
const size_t n = cur.countRows();
const size_t nc = cur.ne[ 0 ];
const size_t nr = cur.ne[ 1 ];
const size_t nz = n / nr;
for( size_t k = 0; k < nz; k++ )
{
for( size_t j = 0; j < nr; j++ )
{
float* const rdi = cur.fp32() + k * cur.nb[ 2 ] + j * cur.nb[ 1 ];
// +1 because the original code checked for `if( i > n_past + j )`
// That's why the first index to write is ( n_past + j + 1 )
const size_t start = n_past + j + 1;
const ptrdiff_t len = (ptrdiff_t)nc - (ptrdiff_t)start;
if( len <= 0 )
continue;
// Generates a store string instruction (rep stosd).
// The magic number is negative infinity in FP32: https://www.h-schmidt.net/FloatConverter/IEEE754.html
__stosd( (DWORD*)( rdi + start ), 0xff800000u, (size_t)len );
}
}
}
void MlContext::softMax( Tensor& cur, float inputScale )
{
if( !( cur.isContinuous() && cur.type() == eDataType::FP32 ) )
throw E_INVALIDARG;
struct SoftMaxContext : public iComputeRange
{
float* data;
float inputScale;
size_t length, stride;
HRESULT __stdcall compute( size_t i, size_t end ) const override final
{
float* rdi = data + stride * i;
for( ; i < end; i++, rdi += stride )
::softMax( rdi, length, inputScale );
return S_OK;
}
};
SoftMaxContext context;
context.data = cur.fp32();
context.inputScale = inputScale;
context.length = cur.ne[ 0 ];
context.stride = cur.nb[ 1 ];
const size_t n = cur.countRows();
pfor.parallelFor( context, n );
}
namespace
{
template<class R, class S>
__forceinline void copyElement( R* rdi, const S* rsi )
{
static_assert( std::is_same<R, S>() );
*rdi = *rsi;
}
template<>
__forceinline void copyElement<float, uint16_t>( float* rdi, const uint16_t* rsi )
{
__m128i iv = _mm_cvtsi32_si128( *rsi );
__m128 fv = _mm_cvtph_ps( iv );
_mm_store_ss( rdi, fv );
}
template<>
__forceinline void copyElement<uint16_t, float>( uint16_t* rdi, const float* rsi )
{
__m128 fv = _mm_load_ss( rsi );
__m128i iv = _mm_cvtps_ph( fv, 0 );
*rdi = (uint16_t)(uint32_t)_mm_cvtsi128_si32( iv );
}
template<class R, class S>
__forceinline void copyRow( R* rdi, const S* rsi, size_t length )
{
static_assert( std::is_same<R, S>() );
memcpy( rdi, rsi, length * sizeof( R ) );
}
template<>
__forceinline void copyRow<uint16_t, float>( uint16_t* rdi, const float* rsi, size_t length )
{
floatsDowncast( rdi, rsi, length );
}
template<>
__forceinline void copyRow<float, uint16_t>( float* rdi, const uint16_t* rsi, size_t length )
{
floatsUpcast( rdi, rsi, length );
}
template<class R, class S>
static void __declspec( noinline ) copyImpl( R* rdi, const S* rsi, const TensorShape& shape )
{
const bool continuousRows = shape.nb[ 0 ] == 1;
for( size_t i03 = 0; i03 < shape.ne[ 3 ]; i03++, rsi += shape.nb[ 3 ] )
{
const S* source2 = rsi;
for( size_t i02 = 0; i02 < shape.ne[ 2 ]; i02++, source2 += shape.nb[ 2 ] )
{
const S* source1 = source2;
for( size_t i01 = 0; i01 < shape.ne[ 1 ]; i01++, source1 += shape.nb[ 1 ] )
{
// Performance optimization here: when the rows are dense, we can copy them much faster with memcpy()
// Or at least with AVX, when we need to convert between numeric types
if( continuousRows )
{
// This branch is very predictable, same outcome for all loop iterations
copyRow( rdi, source1, shape.ne[ 0 ] );
rdi += shape.ne[ 0 ];
}
else
{
const S* source0 = source1;
for( size_t i00 = 0; i00 < shape.ne[ 0 ]; i00++, source0 += shape.nb[ 0 ] )
{
copyElement( rdi, source0 );
rdi++;
}
}
}
}
}
}
}
HRESULT MlContext::copyImpl( Tensor& result, const Tensor& source )
{
if( !( result.isContinuous() && ( result.countElements() == source.countElements() ) ) )
return E_INVALIDARG;
const eDataType typeResult = result.type();
const eDataType typeSource = source.type();
if( source.isContinuous() )
{
const size_t elts = result.countElements();
if( typeResult == typeSource )
{
const size_t bytes = elts * elementSize( typeResult );
memcpy( result.data(), source.data(), bytes );
return S_OK;
}
if( typeSource == eDataType::FP16 && typeResult == eDataType::FP32 )
{
floatsUpcast( result.fp32(), source.fp16(), elts );
return S_OK;
}
if( typeSource == eDataType::FP32 && typeResult == eDataType::FP16 )
{
floatsDowncast( result.fp16(), source.fp32(), elts );
return S_OK;
}
return E_UNEXPECTED;
}
else
{
if( typeSource == eDataType::FP16 && typeResult == eDataType::FP16 )
{
::copyImpl( result.fp16(), source.fp16(), source );
return S_OK;
}
if( typeSource == eDataType::FP32 && typeResult == eDataType::FP32 )
{
::copyImpl( result.fp32(), source.fp32(), source );
return S_OK;
}
if( typeSource == eDataType::FP16 && typeResult == eDataType::FP32 )
{
::copyImpl( result.fp32(), source.fp16(), source );
return S_OK;
}
if( typeSource == eDataType::FP32 && typeResult == eDataType::FP16 )
{
::copyImpl( result.fp16(), source.fp32(), source );
return S_OK;
}
return E_UNEXPECTED;
}
}
Tensor MlContext::copy( const Tensor& a, eDataType type, std::initializer_list<uint32_t> size )
{
const size_t dims = size.size();
if( 0 == dims || dims > 4 )
throw E_BOUNDS;
size_t nRequested = 1;
for( size_t i = 0; i < dims; i++ )
{
uint32_t n = size.begin()[ i ];
nRequested *= n;
}
if( nRequested != a.countElements() )
throw E_INVALIDARG;
if( a.type() == type && a.isContinuous() )
{
// Same type, and it's dense - no need to move data, equal to reshape
Tensor res{ a };
for( size_t i = 0; i < dims; i++ )
res.ne[ i ] = size.begin()[ i ];;
for( size_t i = dims; i < 4; i++ )
res.ne[ i ] = 1;
res.setDenseStrides();
return res;
}
else
{
// Need to convert types, and/or transpose the tensor. Make another tensor for the output
Tensor res = createTensor( type, size );
check( copyImpl( res, a ) );
return res;
}
}
Tensor MlContext::permute( const Tensor& a, uint8_t axis0, uint8_t axis1, uint8_t axis2, uint8_t axis3 )
{
assert( axis0 < 4 );
assert( axis1 < 4 );
assert( axis2 < 4 );
assert( axis3 < 4 );
assert( axis0 != axis1 );
assert( axis0 != axis2 );
assert( axis0 != axis3 );
assert( axis1 != axis2 );
assert( axis1 != axis3 );
assert( axis2 != axis3 );
Tensor res = a;
res.ne[ axis0 ] = a.ne[ 0 ];
res.ne[ axis1 ] = a.ne[ 1 ];
res.ne[ axis2 ] = a.ne[ 2 ];
res.ne[ axis3 ] = a.ne[ 3 ];
res.nb[ axis0 ] = a.nb[ 0 ];
res.nb[ axis1 ] = a.nb[ 1 ];
res.nb[ axis2 ] = a.nb[ 2 ];
res.nb[ axis3 ] = a.nb[ 3 ];
return res;
}
void MlContext::copyInPlace( Tensor& dest, const Tensor& a, eDataType type, std::initializer_list<uint32_t> size )
{
assert( type == dest.type() );
const size_t dims = size.size();
if( 0 == dims || dims > 4 )
throw E_BOUNDS;
size_t nRequested = 1;
for( size_t i = 0; i < dims; i++ )
{
uint32_t n = size.begin()[ i ];
nRequested *= n;
}
if( nRequested != a.countElements() || nRequested != dest.countElements() )
throw E_INVALIDARG;
// Reshape the destination
for( size_t i = 0; i < dims; i++ )
dest.ne[ i ] = size.begin()[ i ];
for( size_t i = dims; i < 4; i++ )
dest.ne[ i ] = 1;
dest.setDenseStrides();
// Copy the data
check( copyImpl( dest, a ) );
}
void MlContext::addInPlace( Tensor& a, const Tensor& b )
{
if( !( a.isContinuous() && b.isContinuous() && a.type() == eDataType::FP32 && b.type() == eDataType::FP32 ) )
throw E_NOTIMPL;
const size_t length = a.countElements();
addRowInPlace( a.fp32(), b.fp32(), length );
}
Tensor MlContext::add( const Tensor& a, const Tensor& b )
{
if( !( a.isContinuous() && b.isContinuous() && a.type() == eDataType::FP32 && b.type() == eDataType::FP32 ) )
throw E_NOTIMPL;
Tensor res = createTensor( eDataType::FP32, a.ne );
const size_t length = a.countElements();
addRow( res.fp32(), a.fp32(), b.fp32(), length );
return res;
}
void MlContext::addRepeatGelu( Tensor& cur, const Tensor& b )
{
if( !( cur.isContinuous() && b.isContinuous() ) )
throw E_INVALIDARG;
if( !( cur.type() == eDataType::FP32 && b.type() == eDataType::FP32 ) )
throw E_INVALIDARG;
DispatchHelper3 helper{ cur.ne[ 1 ], cur.ne[ 2 ], cur.ne[ 3 ] };
std::array<uint32_t, 3> idx = { 0, 0, 0 };
const size_t countRows = helper.groupsCount();
const size_t innerRes = (uint32_t)cur.ne[ 0 ];
const size_t innerPattern = (uint32_t)b.ne[ 0 ];
float* rdi = cur.fp32();
auto& lookupTables = getLookupTables();
for( size_t i = 0; i < countRows; i++, helper.next( idx ), rdi += innerRes )
{
std::array<uint32_t, 3> idxPattern;
idxPattern[ 0 ] = idx[ 0 ] % (uint32_t)b.ne[ 1 ];
idxPattern[ 1 ] = idx[ 1 ] % (uint32_t)b.ne[ 2 ];
idxPattern[ 2 ] = idx[ 2 ] % (uint32_t)b.ne[ 3 ];
const float* source = sourceRow( b.fp32(), idxPattern, b.nb[ 1 ], b.nb[ 2 ], b.nb[ 3 ] );
addRepeatGeluRow( rdi, innerRes, source, innerPattern, lookupTables );
}
return;
}
| 16,717
|
C++
|
.cpp
| 523
| 29.231358
| 124
| 0.624899
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,546
|
KvTensorsCpu.cpp
|
Const-me_Whisper/Whisper/CPU/KvTensorsCpu.cpp
|
#include "stdafx.h"
#include "KvTensors.h"
using namespace CpuCompute;
// Create these two large tensors, FP16 precision
HRESULT KvTensors::create( const Whisper::sModelParams& mp )
{
const uint32_t n_mem = mp.n_text_layer * mp.n_text_ctx;
const uint32_t n_elements = mp.n_text_state * n_mem;
const size_t cb = sizeof( uint16_t ) * (size_t)n_elements * 2;
CHECK( memory.allocate( cb ) );
uint16_t* pointer = (uint16_t*)memory.pointer();
keys = pointer;
values = pointer + n_elements;
size = n_elements;
return S_OK;
}
| 529
|
C++
|
.cpp
| 16
| 31.375
| 63
| 0.7182
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,547
|
mulMat.cpp
|
Const-me_Whisper/Whisper/CPU/mulMat.cpp
|
#include "stdafx.h"
#include "mulMat.h"
#include "mulMatImpl.h"
using namespace CpuCompute;
namespace
{
template<uint8_t panelHeightRegs, uint8_t tileWidthFloats>
static HRESULT mulMatImpl( Tensor& result, const Tensor& a, const Tensor& b, ParallelForRunner& pfor )
{
MulMatImpl<panelHeightRegs, tileWidthFloats> impl{ result, a, b, pfor };
return impl.run( pfor );
}
}
HRESULT CpuCompute::mulMat( Tensor& result, const Tensor& a, const Tensor& b, ParallelForRunner& pfor )
{
if( a.type() != eDataType::FP16 )
return E_NOTIMPL;
if( b.type() != eDataType::FP32 )
return E_NOTIMPL;
// return mulMatImpl<1, 1>( result, a, b, pfor );
if( b.ne[ 1 ] == 1 )
{
// Multiplying by a single row
if( a.ne[ 1 ] >= 32 )
return mulMatImpl<4, 1>( result, a, b, pfor );
else
return mulMatImpl<1, 1>( result, a, b, pfor );
}
else if( b.ne[ 1 ] == 2 )
{
if( a.ne[ 1 ] >= 32 )
return mulMatImpl<4, 2>( result, a, b, pfor );
else
return mulMatImpl<1, 2>( result, a, b, pfor );
}
else if( b.ne[ 1 ] == 3 )
{
if( a.ne[ 1 ] >= 16 )
return mulMatImpl<2, 3>( result, a, b, pfor );
else
return mulMatImpl<1, 3>( result, a, b, pfor );
}
else
{
if( a.ne[ 1 ] >= 16 )
return mulMatImpl<2, 4>( result, a, b, pfor );
else
return mulMatImpl<1, 4>( result, a, b, pfor );
}
}
| 1,316
|
C++
|
.cpp
| 50
| 23.82
| 103
| 0.638095
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,548
|
simdUtils.cpp
|
Const-me_Whisper/Whisper/CPU/simdUtils.cpp
|
#include "stdafx.h"
#include "simdUtils.h"
#include "../ML/LookupTablesData.h"
#include <cmath>
#include <memory>
namespace
{
constexpr size_t maskAlign8 = ~(size_t)7;
__forceinline __m256 load8( const uint16_t* rsi )
{
__m128i i = _mm_loadu_si128( ( const __m128i* )rsi );
return _mm256_cvtph_ps( i );
}
__forceinline void loadPartial( const uint16_t* x, const uint16_t* y, size_t count, __m256& fx, __m256& fy )
{
assert( count < 8 );
__m128i ix, iy;
switch( count )
{
case 1: // load 2 bytes
ix = _mm_cvtsi32_si128( *x );
iy = _mm_cvtsi32_si128( *y );
break;
case 2: // load 4 bytes
ix = _mm_cvtsi32_si128( *(const int*)x );
iy = _mm_cvtsi32_si128( *(const int*)y );
break;
case 3: // load 6 bytes
ix = _mm_cvtsi32_si128( *(const int*)x );
iy = _mm_cvtsi32_si128( *(const int*)y );
ix = _mm_insert_epi16( ix, x[ 2 ], 2 );
iy = _mm_insert_epi16( iy, y[ 2 ], 2 );
break;
case 4: // load 8 bytes
ix = _mm_cvtsi64_si128( *(const int64_t*)x );
iy = _mm_cvtsi64_si128( *(const int64_t*)y );
break;
case 5: // load 10 bytes
ix = _mm_cvtsi64_si128( *(const int64_t*)x );
iy = _mm_cvtsi64_si128( *(const int64_t*)y );
ix = _mm_insert_epi16( ix, x[ 4 ], 4 );
iy = _mm_insert_epi16( iy, y[ 4 ], 4 );
break;
case 6: // load 12 bytes
ix = _mm_cvtsi64_si128( *(const int64_t*)x );
iy = _mm_cvtsi64_si128( *(const int64_t*)y );
ix = _mm_insert_epi32( ix, *(const int*)( x + 4 ), 2 );
iy = _mm_insert_epi32( iy, *(const int*)( y + 4 ), 2 );
break;
case 7: // load 14 bytes
ix = _mm_cvtsi64_si128( *(const int64_t*)x );
iy = _mm_cvtsi64_si128( *(const int64_t*)y );
ix = _mm_insert_epi32( ix, *(const int*)( x + 4 ), 2 );
iy = _mm_insert_epi32( iy, *(const int*)( y + 4 ), 2 );
ix = _mm_insert_epi16( ix, x[ 6 ], 6 );
iy = _mm_insert_epi16( iy, y[ 6 ], 6 );
break;
default:
fx = fy = _mm256_setzero_ps();
return;
}
fx = _mm256_cvtph_ps( ix );
fy = _mm256_cvtph_ps( iy );
}
__forceinline __m256 loadPartial( const uint16_t* x, size_t count )
{
assert( count < 8 );
__m128i ix;
switch( count )
{
case 1: // load 2 bytes
ix = _mm_cvtsi32_si128( *x );
break;
case 2: // load 4 bytes
ix = _mm_cvtsi32_si128( *(const int*)x );
break;
case 3: // load 6 bytes
ix = _mm_cvtsi32_si128( *(const int*)x );
ix = _mm_insert_epi16( ix, x[ 2 ], 2 );
break;
case 4: // load 8 bytes
ix = _mm_cvtsi64_si128( *(const int64_t*)x );
break;
case 5: // load 10 bytes
ix = _mm_cvtsi64_si128( *(const int64_t*)x );
ix = _mm_insert_epi16( ix, x[ 4 ], 4 );
break;
case 6: // load 12 bytes
ix = _mm_cvtsi64_si128( *(const int64_t*)x );
ix = _mm_insert_epi32( ix, *(const int*)( x + 4 ), 2 );
break;
case 7: // load 14 bytes
ix = _mm_cvtsi64_si128( *(const int64_t*)x );
ix = _mm_insert_epi32( ix, *(const int*)( x + 4 ), 2 );
ix = _mm_insert_epi16( ix, x[ 6 ], 6 );
break;
default:
return _mm256_setzero_ps();
}
return _mm256_cvtph_ps( ix );
}
__forceinline __m128 loadFloat2( const float* rsi )
{
return _mm_castpd_ps( _mm_load_sd( (const double*)rsi ) );
}
__forceinline __m128 loadFloat3( const float* rsi )
{
__m128 f = loadFloat2( rsi );
f = _mm_insert_ps( f, _mm_load_ss( rsi + 2 ), 0x20 );
return f;
}
__forceinline __m256 loadPartial( const float* rsi, size_t count )
{
assert( count < 8 );
__m128 low = _mm_setzero_ps();
__m128 high = _mm_setzero_ps();
switch( count )
{
case 1:
low = _mm_load_ss( rsi );
break;
case 2:
low = loadFloat2( rsi );
break;
case 3:
low = loadFloat3( rsi );
break;
case 4:
low = _mm_loadu_ps( rsi );
break;
case 5:
low = _mm_loadu_ps( rsi );
high = _mm_load_ss( rsi + 4 );
break;
case 6:
low = _mm_loadu_ps( rsi );
high = loadFloat2( rsi + 4 );
break;
case 7:
low = _mm_loadu_ps( rsi );
high = loadFloat3( rsi + 4 );
break;
}
return _mm256_setr_m128( low, high );
}
__forceinline void storeFloat2( float* rdi, __m128 vec )
{
_mm_store_sd( (double*)rdi, _mm_castps_pd( vec ) );
}
__forceinline void storePartial( float* rdi, __m256 vec, size_t count )
{
assert( count < 8 );
__m128 tmp = _mm256_castps256_ps128( vec );
if( count >= 4 )
{
_mm_storeu_ps( rdi, tmp );
if( count == 4 )
return;
count -= 4;
rdi += 4;
tmp = _mm256_extractf128_ps( vec, 1 );
}
switch( count )
{
case 1:
_mm_store_ss( rdi, tmp );
return;
case 2:
storeFloat2( rdi, tmp );
return;
case 3:
storeFloat2( rdi, tmp );
( (int*)rdi )[ 2 ] = _mm_extract_ps( tmp, 2 );
return;
}
}
}
void addF16to32( float* rdi, const uint16_t* a, const uint16_t* b, size_t length )
{
const uint16_t* const endAligned = a + ( length & maskAlign8 );
const size_t rem = length % 8;
for( ; a < endAligned; a += 8, b += 8, rdi += 8 )
{
__m256 f1 = load8( a );
__m256 f2 = load8( b );
__m256 res = _mm256_add_ps( f1, f2 );
_mm256_storeu_ps( rdi, res );
}
if( rem != 0 )
{
__m256 f1, f2;
loadPartial( a, b, rem, f1, f2 );
__m256 res = _mm256_add_ps( f1, f2 );
storePartial( rdi, res, rem );
}
}
void addF16to32( float* rdi, const uint16_t* a, const float* b, size_t length )
{
const uint16_t* const endAligned = a + ( length & maskAlign8 );
const size_t rem = length % 8;
for( ; a < endAligned; a += 8, b += 8, rdi += 8 )
{
__m256 f1 = load8( a );
__m256 f2 = _mm256_loadu_ps( b );
__m256 res = _mm256_add_ps( f1, f2 );
_mm256_storeu_ps( rdi, res );
}
if( rem != 0 )
{
__m256 f1 = loadPartial( a, rem );
__m256 f2 = loadPartial( b, rem );
__m256 res = _mm256_add_ps( f1, f2 );
storePartial( rdi, res, rem );
}
}
alignas( 64 ) const std::array<int, 16> s_zeroTailMask =
{
-1,-1,-1,-1,-1,-1,-1,-1,
0, 0, 0, 0, 0, 0, 0, 0,
};
namespace
{
__forceinline float horizontalSum( __m256 vec )
{
__m128 v = _mm256_extractf128_ps( vec, 1 );
v = _mm_add_ps( v, _mm256_castps256_ps128( vec ) );
v = _mm_add_ps( v, _mm_movehl_ps( v, v ) );
v = _mm_add_ss( v, _mm_movehdup_ps( v ) );
return _mm_cvtss_f32( v );
}
}
void norm( float* rdi, float* temp, const float* rsi, size_t length )
{
assert( (size_t)temp % 32 == 0 );
const float* rsiEndAligned = rsi + ( length & maskAlign8 );
const size_t rem = length % 8;
// First pass: copy to temp buffer, and compute the sum; computeVectorSum() in HLSL
__m256 sum = _mm256_setzero_ps();
float* t;
for( t = temp; rsi < rsiEndAligned; rsi += 8, t += 8 )
{
__m256 v = _mm256_loadu_ps( rsi );
sum = _mm256_add_ps( sum, v );
_mm256_store_ps( t, v );
}
float* const tEndAligned = t;
if( 0 != rem )
{
__m256 v = loadPartial( rsi, rem );
sum = _mm256_add_ps( sum, v );
_mm256_store_ps( t, v );
t += 8;
}
const float lengthFloat = (float)(int)length;
const float meanScalar = horizontalSum( sum ) / lengthFloat;
const __m256 mean = _mm256_set1_ps( meanScalar );
// Second pass, offsetAndComputeSumSquares() in HLSL
sum = _mm256_setzero_ps();
for( t = temp; t < tEndAligned; t += 8 )
{
__m256 v = _mm256_load_ps( t );
v = _mm256_sub_ps( v, mean );
_mm256_store_ps( t, v );
sum = _mm256_fmadd_ps( v, v, sum );
}
if( 0 != rem )
{
__m256 v = _mm256_load_ps( t );
v = _mm256_sub_ps( v, mean );
v = _mm256_and_ps( v, loadTailMaskFloats( rem ) );
_mm256_store_ps( t, v );
sum = _mm256_fmadd_ps( v, v, sum );
}
// Final pass: scale, and copy from temporary buffer into the destination row
constexpr float eps = 1e-5f; // TODO: make this a parameter
const float scaleScalar = 1.0f / std::sqrtf( horizontalSum( sum ) / lengthFloat + eps );
const __m256 scale = _mm256_set1_ps( scaleScalar );
for( t = temp; t < tEndAligned; t += 8, rdi += 8 )
{
__m256 v = _mm256_load_ps( t );
v = _mm256_mul_ps( v, scale );
_mm256_storeu_ps( rdi, v );
}
if( 0 != rem )
{
__m256 v = _mm256_load_ps( t );
v = _mm256_mul_ps( v, scale );
storePartial( rdi, v, rem );
}
}
void fmaRepeatRow( float* rdi, size_t len, const float* w, const float* b, size_t lenPattern )
{
float* rdiEndAligned = rdi + ( len & maskAlign8 );
const size_t rem = len % 8;
if( 1 == lenPattern )
{
const __m256 v1 = _mm256_broadcast_ss( w );
const __m256 v2 = _mm256_broadcast_ss( b );
for( ; rdi < rdiEndAligned; rdi += 8 )
{
__m256 v = _mm256_loadu_ps( rdi );
v = _mm256_fmadd_ps( v, v1, v2 );
_mm256_storeu_ps( rdi, v );
}
if( 0 != rem )
{
const __m256i mask = loadTailMaskInt( rem );
__m256 v = _mm256_maskload_ps( rdi, mask );
v = _mm256_fmadd_ps( v, v1, v2 );
_mm256_maskstore_ps( rdi, mask, v );
}
}
else if( len == lenPattern )
{
for( ; rdi < rdiEndAligned; rdi += 8, w += 8, b += 8 )
{
__m256 v = _mm256_loadu_ps( rdi );
__m256 v1 = _mm256_loadu_ps( w );
__m256 v2 = _mm256_loadu_ps( b );
v = _mm256_fmadd_ps( v, v1, v2 );
_mm256_storeu_ps( rdi, v );
}
if( 0 != rem )
{
const __m256i mask = loadTailMaskInt( rem );
__m256 v = _mm256_maskload_ps( rdi, mask );
__m256 v1 = _mm256_maskload_ps( w, mask );
__m256 v2 = _mm256_maskload_ps( b, mask );
v = _mm256_fmadd_ps( v, v1, v2 );
_mm256_maskstore_ps( rdi, mask, v );
}
}
else
{
// TODO: implement if this actually happens
throw E_NOTIMPL;
}
}
void __vectorcall addRepeatScaleRow( float* rdi, size_t len, const float* b, size_t lenPattern, const __m256 scale )
{
float* rdiEndAligned = rdi + ( len & maskAlign8 );
const size_t rem = len % 8;
if( 1 == lenPattern )
{
const __m256 v2 = _mm256_broadcast_ss( b );
for( ; rdi < rdiEndAligned; rdi += 8 )
{
__m256 v = _mm256_loadu_ps( rdi );
v = _mm256_add_ps( v, v2 );
v = _mm256_mul_ps( v, scale );
_mm256_storeu_ps( rdi, v );
}
if( 0 != rem )
{
const __m256i mask = loadTailMaskInt( rem );
__m256 v = _mm256_maskload_ps( rdi, mask );
v = _mm256_add_ps( v, v2 );
v = _mm256_mul_ps( v, scale );
_mm256_maskstore_ps( rdi, mask, v );
}
return;
}
else if( len == lenPattern )
{
for( ; rdi < rdiEndAligned; rdi += 8, b += 8 )
{
__m256 v = _mm256_loadu_ps( rdi );
__m256 v2 = _mm256_loadu_ps( b );
v = _mm256_add_ps( v, v2 );
v = _mm256_mul_ps( v, scale );
_mm256_storeu_ps( rdi, v );
}
if( 0 != rem )
{
const __m256i mask = loadTailMaskInt( rem );
__m256 v = _mm256_maskload_ps( rdi, mask );
__m256 v2 = _mm256_maskload_ps( b, mask );
v = _mm256_add_ps( v, v2 );
v = _mm256_mul_ps( v, scale );
_mm256_maskstore_ps( rdi, mask, v );
}
return;
}
else
{
// TODO: implement if this actually happens
throw E_NOTIMPL;
}
}
void addRepeatRow( float* rdi, size_t len, const float* b, size_t lenPattern )
{
float* rdiEndAligned = rdi + ( len & maskAlign8 );
const size_t rem = len % 8;
if( 1 == lenPattern )
{
const __m256 v2 = _mm256_broadcast_ss( b );
for( ; rdi < rdiEndAligned; rdi += 8 )
{
__m256 v = _mm256_loadu_ps( rdi );
v = _mm256_add_ps( v, v2 );
_mm256_storeu_ps( rdi, v );
}
if( 0 != rem )
{
const __m256i mask = loadTailMaskInt( rem );
__m256 v = _mm256_maskload_ps( rdi, mask );
v = _mm256_add_ps( v, v2 );
_mm256_maskstore_ps( rdi, mask, v );
}
return;
}
else if( len == lenPattern )
{
for( ; rdi < rdiEndAligned; rdi += 8, b += 8 )
{
__m256 v = _mm256_loadu_ps( rdi );
__m256 v2 = _mm256_loadu_ps( b );
v = _mm256_add_ps( v, v2 );
_mm256_storeu_ps( rdi, v );
}
if( 0 != rem )
{
const __m256i mask = loadTailMaskInt( rem );
__m256 v = _mm256_maskload_ps( rdi, mask );
__m256 v2 = _mm256_maskload_ps( b, mask );
v = _mm256_add_ps( v, v2 );
_mm256_maskstore_ps( rdi, mask, v );
}
return;
}
else
{
// TODO: implement if this actually happens
throw E_NOTIMPL;
}
}
namespace
{
__forceinline __m256 gelu( __m256 x, const DirectCompute::LookupTablesData& lookup )
{
__m128i iv = _mm256_cvtps_ph( x, 0 );
alignas( 16 ) std::array<uint16_t, 8> arr;
_mm_store_si128( ( __m128i* )arr.data(), iv );
for( uint16_t& a : arr )
a = lookup.gelu[ a ];
iv = _mm_load_si128( ( __m128i* )arr.data() );
return _mm256_cvtph_ps( iv );
}
}
void addRepeatGeluRow( float* rdi, size_t len, const float* b, size_t lenPattern, const DirectCompute::LookupTablesData& lookup )
{
float* rdiEndAligned = rdi + ( len & maskAlign8 );
const size_t rem = len % 8;
if( 1 == lenPattern )
{
const __m256 v2 = _mm256_broadcast_ss( b );
for( ; rdi < rdiEndAligned; rdi += 8 )
{
__m256 v = _mm256_loadu_ps( rdi );
v = _mm256_add_ps( v, v2 );
v = gelu( v, lookup );
_mm256_storeu_ps( rdi, v );
}
if( 0 != rem )
{
const __m256i mask = loadTailMaskInt( rem );
__m256 v = _mm256_maskload_ps( rdi, mask );
v = _mm256_add_ps( v, v2 );
v = gelu( v, lookup );
_mm256_maskstore_ps( rdi, mask, v );
}
return;
}
else if( len == lenPattern )
{
for( ; rdi < rdiEndAligned; rdi += 8, b += 8 )
{
__m256 v = _mm256_loadu_ps( rdi );
__m256 v2 = _mm256_loadu_ps( b );
v = _mm256_add_ps( v, v2 );
v = gelu( v, lookup );
_mm256_storeu_ps( rdi, v );
}
if( 0 != rem )
{
const __m256i mask = loadTailMaskInt( rem );
__m256 v = _mm256_maskload_ps( rdi, mask );
__m256 v2 = _mm256_maskload_ps( b, mask );
v = _mm256_add_ps( v, v2 );
v = gelu( v, lookup );
_mm256_maskstore_ps( rdi, mask, v );
}
return;
}
else
{
// TODO: implement if this actually happens
throw E_NOTIMPL;
}
}
void __vectorcall scaleRow( float* rdi, size_t len, const __m256 scale )
{
float* rdiEndAligned = rdi + ( len & maskAlign8 );
const size_t rem = len % 8;
for( ; rdi < rdiEndAligned; rdi += 8 )
{
__m256 v = _mm256_loadu_ps( rdi );
v = _mm256_mul_ps( v, scale );
_mm256_storeu_ps( rdi, v );
}
if( 0 != rem )
{
const __m256i mask = loadTailMaskInt( rem );
__m256 v = _mm256_maskload_ps( rdi, mask );
v = _mm256_mul_ps( v, scale );
_mm256_maskstore_ps( rdi, mask, v );
}
}
namespace
{
using DirectCompute::LookupTablesData;
__forceinline float horizontalMax( __m256 vec )
{
__m128 v = _mm256_extractf128_ps( vec, 1 );
v = _mm_max_ps( v, _mm256_castps256_ps128( vec ) );
v = _mm_max_ps( v, _mm_movehl_ps( v, v ) );
v = _mm_max_ss( v, _mm_movehdup_ps( v ) );
return _mm_cvtss_f32( v );
}
__forceinline float _cvtsh_ss( uint16_t f16 )
{
__m128i i = _mm_cvtsi32_si128( f16 );
__m128 f = _mm_cvtph_ps( i );
return _mm_cvtss_f32( f );
}
__forceinline uint16_t _cvtss_sh( float f, int rounding )
{
assert( 0 == rounding );
__m128 v = _mm_set_ss( f );
__m128i i = _mm_cvtps_ph( v, 0 );
return (uint16_t)(uint32_t)_mm_cvtsi128_si32( i );
}
}
const LookupTablesData& getLookupTables()
{
static const std::unique_ptr<LookupTablesData> res = std::make_unique<LookupTablesData>();
return *res;
}
void softMax( float* rdi, size_t length, const float inputScale )
{
float* const rdiBegin = rdi;
float* const rdiEndAligned = rdi + ( length & maskAlign8 );
const size_t remainder = length % 8;
// First pass, compute maximum
__m256 max = _mm256_set1_ps( -INFINITY );
for( rdi = rdiBegin; rdi < rdiEndAligned; rdi += 8 )
{
__m256 v = _mm256_loadu_ps( rdi );
max = _mm256_max_ps( max, v );
}
__m256i tailMask;
if( 0 != remainder )
{
tailMask = loadTailMaskInt( remainder );
__m256 v = _mm256_maskload_ps( rdi, tailMask );
v = _mm256_max_ps( max, v );
max = _mm256_blendv_ps( max, v, _mm256_castsi256_ps( tailMask ) );
}
// Second pass: apply initial scale, compute the exponent, and compute total sum over the row
const LookupTablesData& lookup = getLookupTables();
const float maxScalar = horizontalMax( max );
float* const rdiEnd = rdiBegin + length;
double sum = 0;
for( rdi = rdiBegin; rdi < rdiEnd; rdi++ )
{
// Possible to vectorize, but relatively hard
// An easy way is upcast the complete lookup table to FP32 and then use two _mm256_i32gather_ps instructions per iteration
// However, that instruction is from AVX2 set. Let's hope this loop won't be a bottleneck.
float f = *rdi;
if( f != -INFINITY )
{
f = ( f - maxScalar ) * inputScale;
uint16_t f16 = _cvtss_sh( f, 0 );
f16 = lookup.exponent[ f16 ];
f = _cvtsh_ss( f16 );
sum += f;
}
else
f = 0;
*rdi = f;
}
// Final pass: apply the final scale
const __m256 finalScale = _mm256_set1_ps( (float)( 1.0 / sum ) );
for( rdi = rdiBegin; rdi < rdiEndAligned; rdi += 8 )
{
__m256 v = _mm256_loadu_ps( rdi );
v = _mm256_mul_ps( v, finalScale );
_mm256_storeu_ps( rdi, v );
}
if( 0 != remainder )
{
__m256 v = _mm256_maskload_ps( rdi, tailMask );
v = _mm256_mul_ps( v, finalScale );
_mm256_maskstore_ps( rdi, tailMask, v );
}
}
void floatsUpcast( float* rdi, const uint16_t* rsi, size_t length )
{
const uint16_t* rsiEndAligned = rsi + ( length & maskAlign8 );
const size_t rem = length % 8;
for( ; rsi < rsiEndAligned; rsi += 8, rdi += 8 )
_mm256_storeu_ps( rdi, load8( rsi ) );
if( 0 != rem )
{
__m256 v = loadPartial( rsi, rem );
_mm256_maskstore_ps( rdi, loadTailMaskInt( rem ), v );
}
}
void floatsDowncast( uint16_t* rdi, const float* rsi, size_t length )
{
const float* rsiEndAligned = rsi + ( length & maskAlign8 );
size_t rem = length % 8;
for( ; rsi < rsiEndAligned; rsi += 8, rdi += 8 )
{
__m256 vf = _mm256_loadu_ps( rsi );
__m128i vi = _mm256_cvtps_ph( vf, 0 );
store16( rdi, vi );
}
if( 0 != rem )
{
__m256 vf = _mm256_maskload_ps( rsi, loadTailMaskInt( rem ) );
__m128i vi = _mm256_cvtps_ph( vf, 0 );
for( size_t i = 0; i < rem; i++, rdi++ )
{
*rdi = (uint16_t)(uint32_t)_mm_cvtsi128_si32( vi );
vi = _mm_srli_si128( vi, 2 );
}
}
}
void addRowInPlace( float* rdi, const float* rsi, size_t length )
{
const float* rdiEndAligned = rdi + ( length & maskAlign8 );
size_t rem = length % 8;
for( ; rdi < rdiEndAligned; rdi += 8, rsi += 8 )
{
__m256 a = _mm256_loadu_ps( rdi );
__m256 b = _mm256_loadu_ps( rsi );
a = _mm256_add_ps( a, b );
_mm256_storeu_ps( rdi, a );
}
if( 0 != rem )
{
const __m256i mask = loadTailMaskInt( rem );
__m256 a = _mm256_maskload_ps( rdi, mask );
__m256 b = _mm256_maskload_ps( rsi, mask );
a = _mm256_add_ps( a, b );
_mm256_maskstore_ps( rdi, mask, a );
}
}
void addRow( float* rdi, const float* a, const float* b, size_t length )
{
const float* aEndAligned = a + ( length & maskAlign8 );
size_t rem = length % 8;
for( ; a < aEndAligned; a += 8, b += 8, rdi += 8 )
{
__m256 x = _mm256_loadu_ps( a );
__m256 y = _mm256_loadu_ps( b );
x = _mm256_add_ps( x, y );
_mm256_storeu_ps( rdi, x );
}
if( 0 != rem )
{
const __m256i mask = loadTailMaskInt( rem );
__m256 x = _mm256_maskload_ps( a, mask );
__m256 y = _mm256_maskload_ps( b, mask );
x = _mm256_add_ps( x, y );
_mm256_maskstore_ps( rdi, mask, x );
}
}
| 18,734
|
C++
|
.cpp
| 679
| 24.743741
| 129
| 0.593654
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,549
|
BufferAllocator.cpp
|
Const-me_Whisper/Whisper/CPU/BufferAllocator.cpp
|
#include <stdafx.h>
#include "BufferAllocator.h"
#include <immintrin.h>
#include <ammintrin.h>
using namespace CpuCompute;
HRESULT BufferAllocator::create( size_t cb )
{
CHECK( buffer.allocate( cb ) );
head = 0;
size = cb;
dbgMarkUninitializedMemory( buffer.pointer(), cb );
return S_OK;
}
namespace
{
// Round up the integer by 32 bytes
__forceinline size_t roundUpAlloc( size_t cb )
{
const size_t mask = 31;
cb += mask;
// We require AVX1+FMA3 support, might as well use BMI1
return _andn_u64( mask, cb );
}
}
void* BufferAllocator::allocate( size_t cb, size_t align ) noexcept
{
assert( align <= 32 );
cb = roundUpAlloc( cb );
uint8_t* pointer = buffer.pointer();
if( head + cb > size || nullptr == pointer )
{
logError( u8"BufferAllocator.allocate, not enough capacity" );
return nullptr;
}
void* const res = pointer + head;
head += cb;
assert( head <= size );
dbgMarkUninitializedMemory( res, cb );
return res;
}
namespace
{
// 2 MB of memory, we hope the OS kernel will then be smart enough to give us large pages.
constexpr size_t virtualAllocGranularityExp2 = 21;
constexpr size_t virtualAllocGranularityMask = ( ( (size_t)1 ) << virtualAllocGranularityExp2 ) - 1;
// Round up the integer by 2 megabytes
__forceinline size_t roundUpVirtualAlloc( size_t cb )
{
const size_t mask = virtualAllocGranularityMask;
cb += mask;
return _andn_u64( mask, cb );
}
}
HRESULT VirtualAllocator::create( size_t cb )
{
if( nullptr != pointer )
return HRESULT_FROM_WIN32( ERROR_ALREADY_INITIALIZED );
cb = roundUpVirtualAlloc( cb );
pointer = (uint8_t*)VirtualAlloc( NULL, cb, MEM_RESERVE, PAGE_READWRITE );
if( nullptr != pointer )
{
head = 0;
sizeAllocated = 0;
sizeVirtual = cb;
return S_OK;
}
const HRESULT hr = getLastHr();
logErrorHr( hr, u8"VirtualAlloc failed" );
return hr;
}
void* VirtualAllocator::allocate( size_t cb, size_t align ) noexcept
{
assert( align <= 32 );
cb = roundUpAlloc( cb );
const size_t newHead = head + cb;
if( newHead <= sizeAllocated )
{
void* const res = pointer + head;
head = newHead;
dbgMarkUninitializedMemory( res, cb );
return res;
}
if( newHead <= sizeVirtual )
{
uint8_t* const ptrCommit = pointer + sizeAllocated;
const size_t cbCommit = roundUpVirtualAlloc( newHead ) - sizeAllocated;
void* const res = VirtualAlloc( ptrCommit, cbCommit, MEM_COMMIT, PAGE_READWRITE );
if( nullptr != res )
{
sizeAllocated += cbCommit;
assert( sizeAllocated <= sizeVirtual );
void* const res = pointer + head;
head = newHead;
dbgMarkUninitializedMemory( res, cb );
return res;
}
const HRESULT hr = getLastHr();
logErrorHr( hr, u8"VirtualAllocator.allocate, VirtualAlloc failed" );
return nullptr;
}
logError( u8"VirtualAllocator.allocate, not enough arena capacity" );
return nullptr;
}
VirtualAllocator::~VirtualAllocator()
{
if( nullptr == pointer )
return;
if( VirtualFree( pointer, 0, MEM_RELEASE ) )
{
pointer = nullptr;
return;
}
const HRESULT hr = getLastHr();
logErrorHr( hr, u8"VirtualFree failed" );
}
#ifndef NDEBUG
// Reusing Microsoft's magic numbers: https://asawicki.info/news_1292_magic_numbers_in_visual_c
void CpuCompute::dbgMarkUninitializedMemory( void* pv, size_t cb )
{
__stosb( (uint8_t*)pv, 0xCD, cb );
}
void CpuCompute::dbgMarkFreedMemory( void* pv, size_t cb )
{
__stosd( (DWORD*)pv, 0xFEEEFEEEu, cb / 4 );
}
#endif
| 3,412
|
C++
|
.cpp
| 126
| 24.880952
| 101
| 0.716646
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,550
|
LargeBuffer.cpp
|
Const-me_Whisper/Whisper/CPU/LargeBuffer.cpp
|
#include "stdafx.h"
#include "LargeBuffer.h"
using namespace CpuCompute;
void LargeBuffer::deallocate()
{
if( nullptr == pv )
return;
VirtualFree( pv, 0, MEM_RELEASE );
pv = nullptr;
}
HRESULT LargeBuffer::allocate( size_t cb )
{
deallocate();
pv = VirtualAlloc( nullptr, cb, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE );
if( nullptr != pv )
return S_OK;
return HRESULT_FROM_WIN32( GetLastError() );
}
HRESULT LargeBuffer::setReadOnly( size_t cb )
{
if( nullptr != pv )
{
DWORD op = 0;
if( VirtualProtect( pv, cb, PAGE_READONLY, &op ) )
return S_OK;
return HRESULT_FROM_WIN32( GetLastError() );
}
else
return OLE_E_BLANK;
}
| 652
|
C++
|
.cpp
| 30
| 19.733333
| 76
| 0.703883
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,551
|
mulMatImpl.panel.cpp
|
Const-me_Whisper/Whisper/CPU/mulMatImpl.panel.cpp
|
#include "stdafx.h"
#include <intrin.h>
#include "mulMatImpl.h"
#include "mulMatUtils.hpp"
using namespace CpuCompute;
// We want to keep code size reasonable, that's why these panel reshaping methods are in the base class
HRESULT MulMatBase::transposePanel( uint16_t* rdi, size_t i, size_t m2, size_t m3 ) const
{
assert( stridesA[ 0 ] == 1 );
const size_t heightFloats = (size_t)panelHeightRegisters * 8;
i *= heightFloats;
const uint16_t* rsi = (const uint16_t*)pa;
rsi += m3 * stridesA[ 3 ];
rsi += m2 * stridesA[ 2 ];
rsi += i * stridesA[ 1 ];
const size_t resultStride = heightFloats;
if( i + heightFloats <= resultSize[ 0 ] )
{
// A complete panel
for( size_t i = 0; i < panelHeightRegisters; i++ )
{
transpose8( rdi, length, rsi, stridesA[ 1 ], resultStride );
// Advance by 8 floats in the output buffer
rdi += 8;
// Advance by 8 rows in the source matrix
rsi += 8 * stridesA[ 1 ];
}
}
else
{
// A partial panel, at the bottom of the first argument matrix
const size_t remainder = resultSize[ 0 ] - i;
assert( remainder > 0 && remainder < heightFloats );
zeroAlignedMemory( rdi, resultStride * length * sizeof( uint16_t ) );
const size_t completePanels = remainder / 8;
for( size_t i = 0; i < completePanels; i++ )
{
transpose8( rdi, length, rsi, stridesA[ 1 ], resultStride );
rdi += 8;
rsi += 8 * stridesA[ 1 ];
}
const size_t lastPanel = remainder % 8;
if( 0 != lastPanel )
transpose8Partial( rdi, length, lastPanel, rsi, stridesA[ 1 ], resultStride );
}
return S_OK;
}
inline const uint16_t* MulMatBase::getPanelA( size_t i, size_t m2, size_t m3 ) const
{
const uint16_t* rsi = (const uint16_t*)pa;
rsi += m3 * stridesA[ 3 ];
rsi += m2 * stridesA[ 2 ];
rsi += i * stridesA[ 1 ];
return rsi;
}
HRESULT MulMatBase::copyPanelColumnMajor8( uint16_t* rdi, size_t i, size_t m2, size_t m3 ) const
{
assert( stridesA[ 1 ] == 1 );
assert( panelHeightRegisters == 1 );
constexpr size_t heightFloats = 8;
i *= heightFloats;
const uint16_t* rsi = getPanelA( i, m2, m3 );
constexpr size_t resultStride = heightFloats;
if( i + heightFloats <= resultSize[ 0 ] )
{
// A complete panel, height = 8 elements
copyColumnMajor( rdi, length, rsi, stridesA[ 0 ], resultStride );
}
else
{
// A partial panel, at the bottom of the first argument matrix
const size_t remainder = resultSize[ 0 ] - i;
assert( remainder > 0 && remainder < heightFloats );
copyColumnMajorPartial( rdi, length, remainder, rsi, stridesA[ 0 ], resultStride );
}
return S_OK;
}
__forceinline __m128i load8Partial( const uint16_t* x, size_t len )
{
assert( len > 0 && len < 8 );
__m128i ix = _mm_setzero_si128();
switch( len )
{
case 1: // load 2 bytes
ix = _mm_cvtsi32_si128( *x );
break;
case 2: // load 4 bytes
ix = _mm_cvtsi32_si128( *(const int*)x );
break;
case 3: // load 6 bytes
ix = _mm_cvtsi32_si128( *(const int*)x );
ix = _mm_insert_epi16( ix, x[ 2 ], 2 );
break;
case 4: // load 8 bytes
ix = _mm_cvtsi64_si128( *(const int64_t*)x );
break;
case 5: // load 10 bytes
ix = _mm_cvtsi64_si128( *(const int64_t*)x );
ix = _mm_insert_epi16( ix, x[ 4 ], 4 );
break;
case 6: // load 12 bytes
ix = _mm_cvtsi64_si128( *(const int64_t*)x );
ix = _mm_insert_epi32( ix, *(const int*)( x + 4 ), 2 );
break;
case 7: // load 14 bytes
ix = _mm_cvtsi64_si128( *(const int64_t*)x );
ix = _mm_insert_epi32( ix, *(const int*)( x + 4 ), 2 );
ix = _mm_insert_epi16( ix, x[ 6 ], 6 );
break;
}
return ix;
}
__forceinline __m256i load16Partial( const uint16_t* rsi, size_t len )
{
assert( len > 0 && len < 16 );
if( len < 8 )
{
__m128i low = load8Partial( rsi, len );
return _mm256_setr_m128i( low, _mm_setzero_si128() );
}
else if( len > 8 )
{
__m128i low = load16( (const int*)rsi );
__m128i high = load8Partial( rsi + 8, len - 8 );
return _mm256_setr_m128i( low, high );
}
else
{
__m128i low = load16( (const int*)rsi );
return _mm256_setr_m128i( low, _mm_setzero_si128() );
}
}
HRESULT MulMatBase::copyPanelColumnMajor16( uint16_t* rdi, size_t i, size_t m2, size_t m3 ) const
{
assert( stridesA[ 1 ] == 1 );
assert( panelHeightRegisters == 2 );
constexpr size_t heightFloats = 16;
i *= heightFloats;
const uint16_t* rsi = getPanelA( i, m2, m3 );
uint16_t* const rdiEnd = rdi + 16 * length;
if( i + heightFloats <= resultSize[ 0 ] )
{
// A complete panel, height = 16 elements
for( ; rdi < rdiEnd; rdi += 16, rsi += stridesA[ 0 ] )
{
__m256i v = _mm256_loadu_si256( ( const __m256i* )rsi );
_mm256_store_si256( ( __m256i* )rdi, v );
}
}
else
{
// A partial panel, at the bottom of the first argument matrix
const size_t remainder = resultSize[ 0 ] - i;
assert( remainder > 0 && remainder < heightFloats );
for( ; rdi < rdiEnd; rdi += 16, rsi += stridesA[ 0 ] )
{
__m256i v = load16Partial( rsi, remainder );
_mm256_store_si256( ( __m256i* )rdi, v );
}
}
return S_OK;
}
HRESULT MulMatBase::copyPanelColumnMajor32( uint16_t* rdi, size_t i, size_t m2, size_t m3 ) const
{
assert( stridesA[ 1 ] == 1 );
assert( panelHeightRegisters == 4 );
constexpr size_t heightFloats = 32;
i *= heightFloats;
const uint16_t* rsi = getPanelA( i, m2, m3 );
uint16_t* const rdiEnd = rdi + 32 * length;
if( i + heightFloats <= resultSize[ 0 ] )
{
// A complete panel, height = 32 elements
for( ; rdi < rdiEnd; rdi += 32, rsi += stridesA[ 0 ] )
{
__m256i v = _mm256_loadu_si256( ( const __m256i* )rsi );
_mm256_store_si256( ( __m256i* )rdi, v );
v = _mm256_loadu_si256( ( const __m256i* )( rsi + 16 ) );
_mm256_store_si256( ( __m256i* )( rdi + 16 ), v );
}
}
else
{
// A partial panel, at the bottom of the first argument matrix
const size_t remainder = resultSize[ 0 ] - i;
assert( remainder > 0 && remainder < heightFloats );
// _mm256_setzero_si256 probably compiles into vpxor, that's AVX2, we don't want that here
const __m256 zero = _mm256_setzero_ps();
for( ; rdi < rdiEnd; rdi += 32, rsi += stridesA[ 0 ] )
{
if( remainder < 16 )
{
__m256i v = load16Partial( rsi, remainder );
_mm256_store_si256( ( __m256i* )rdi, v );
_mm256_store_ps( (float*)( rdi + 16 ), zero );
}
else if( remainder > 16 )
{
__m256i v = _mm256_loadu_si256( ( const __m256i* )rsi );
_mm256_store_si256( ( __m256i* )rdi, v );
v = load16Partial( rsi + 16, remainder - 16 );
_mm256_store_si256( ( __m256i* )( rdi + 16 ), v );
}
else
{
__m256i v = _mm256_loadu_si256( ( const __m256i* )rsi );
_mm256_store_si256( ( __m256i* )rdi, v );
_mm256_store_ps( (float*)( rdi + 16 ), zero );
}
}
}
return S_OK;
}
HRESULT MulMatBase::gatherPanel( uint16_t* rdi, size_t i, size_t m2, size_t m3 ) const
{
// BTW, I never saw this method called.
const size_t heightFloats = (size_t)panelHeightRegisters * 8;
const size_t length = this->length;
zeroAlignedMemory( rdi, length * heightFloats * sizeof( uint16_t ) );
const size_t height = std::min( heightFloats, resultSize[ 0 ] - i );
const size_t strideElement = stridesA[ 0 ];
const size_t strideRow = stridesA[ 1 ];
const uint16_t* rsi = getPanelA( i * heightFloats, m2, m3 );
if( strideElement < strideRow )
{
for( size_t r = 0; r < height; r++, rsi += strideRow, rdi++ )
{
const uint16_t* sourceRow = rsi;
uint16_t* destRow = rdi;
for( size_t c = 0; c < length; c++, sourceRow += strideElement, destRow += heightFloats )
*destRow = *sourceRow;
}
}
else
{
for( size_t c = 0; c < length; c++, rsi += strideElement, rdi += heightFloats )
{
const uint16_t* sourceCol = rsi;
uint16_t* destCol = rdi;
for( size_t r = 0; r < height; r++, sourceCol += strideRow, destCol++ )
*destCol = *sourceCol;
}
}
return S_OK;
}
| 7,767
|
C++
|
.cpp
| 245
| 29
| 103
| 0.635175
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,552
|
DecoderTensors.cpp
|
Const-me_Whisper/Whisper/CPU/DecoderTensors.cpp
|
#include "stdafx.h"
#include "DecoderTensors.h"
using namespace CpuCompute;
#if TENSOR_GGML_COMPAT
namespace
{
class CompatContext
{
std::vector<ggml_tensor>& vec;
size_t index;
public:
CompatContext( std::vector<ggml_tensor>& dest, size_t layers ) :
vec( dest )
{
constexpr size_t tensorsPerLayer = 21;
const size_t count = tensorsPerLayer * layers + 4;
vec.resize( count );
index = 0;
}
void add( const Tensor& rsi, ggml_tensor*& res )
{
ggml_tensor& ten = vec[ index ];
index++;
ten = rsi.ggml();
res = &ten;
}
void add2( const TensorPair& rsi, ggml_tensor*& w, ggml_tensor*& b )
{
add( rsi.w, w );
add( rsi.b, b );
}
bool isComplete() const
{
return index == vec.size();
}
};
}
void DecoderTensors::makeCompatTensors()
{
CompatContext ctx( ggml, layers.size() );
ctx.add( positionalEmbedding, d_pe );
ctx.add( tokenEmbedding, d_te );
ctx.add2( ln, d_ln_w, d_ln_b );
for( auto& i : layers )
{
ctx.add2( i.attnLn0, i.attn_ln_0_w, i.attn_ln_0_b );
ctx.add2( i.attnLn1, i.attn_ln_1_w, i.attn_ln_1_b );
ctx.add2( i.attnQuery, i.attn_q_w, i.attn_q_b );
ctx.add( i.attnKey, i.attn_k_w );
ctx.add2( i.attnValue, i.attn_v_w, i.attn_v_b );
ctx.add2( i.crossAttnLn0, i.cross_attn_ln_0_w, i.cross_attn_ln_0_b );
ctx.add2( i.crossAttnLn1, i.cross_attn_ln_1_w, i.cross_attn_ln_1_b );
ctx.add2( i.crossAttnQuery, i.cross_attn_q_w, i.cross_attn_q_b );
ctx.add2( i.mlpLn, i.mlp_ln_w, i.mlp_ln_b );
ctx.add2( i.mlp0, i.mlp_0_w, i.mlp_0_b );
ctx.add2( i.mlp1, i.mlp_1_w, i.mlp_1_b );
}
assert( ctx.isComplete() );
}
#endif
| 1,613
|
C++
|
.cpp
| 60
| 24.133333
| 71
| 0.646184
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,553
|
HybridLoader.cpp
|
Const-me_Whisper/Whisper/CPU/HybridLoader.cpp
|
#include "stdafx.h"
#include "HybridLoader.h"
using namespace CpuCompute;
using namespace ComLight;
static void populateDecodeTensorsMap( CAtlMap<CStringA, Tensor*>& map, int layersDec, DecoderTensors& dec )
{
dec.layers.resize( layersDec );
map[ "decoder.positional_embedding" ] = &dec.positionalEmbedding;
map[ "decoder.token_embedding.weight" ] = &dec.tokenEmbedding;
map[ "decoder.ln.weight" ] = &dec.ln.w;
map[ "decoder.ln.bias" ] = &dec.ln.b;
CStringA tempString;
auto add = [ & ]( const char* name, int i, Tensor& t )
{
tempString.Format( "decoder.blocks.%i.%s", i, name );
map[ tempString ] = &t;
};
auto add2 = [ & ]( const char* name, int i, TensorPair& tensors )
{
tempString.Format( "decoder.blocks.%i.%s.weight", i, name );
map[ tempString ] = &tensors.w;
tempString.Format( "decoder.blocks.%i.%s.bias", i, name );
map[ tempString ] = &tensors.b;
};
for( int i = 0; i < layersDec; i++ )
{
auto& gpu = dec.layers[ i ];
add2( "mlp_ln", i, gpu.mlpLn );
add2( "mlp.0", i, gpu.mlp0 );
add2( "mlp.2", i, gpu.mlp1 );
add2( "attn_ln", i, gpu.attnLn0 );
add2( "attn.query", i, gpu.attnQuery );
add( "attn.key.weight", i, gpu.attnKey );
add2( "attn.value", i, gpu.attnValue );
add2( "attn.out", i, gpu.attnLn1 );
add2( "cross_attn_ln", i, gpu.crossAttnLn0 );
add2( "cross_attn.query", i, gpu.crossAttnQuery );
// These 3 tensors are used by the encode() method, to compute cross-attention buffers
// Need them in VRAM even for the hybrid model
// add( "cross_attn.key.weight", i, gpu.cross_attn_k_w );
// add2( "cross_attn.value", i, gpu.cross_attn_v_w, gpu.cross_attn_v_b );
add2( "cross_attn.out", i, gpu.crossAttnLn1 );
}
}
HybridLoader::HybridLoader( DecoderTensors& m, int countLayers ) :
destination( m )
{
populateDecodeTensorsMap( map, countLayers, destination );
pending.reserve( map.GetCount() );
}
HRESULT HybridLoader::setupTensor( const CStringA& name, int n_dims, int ftype, const std::array<int, 4>& ne, ComLight::iReadStream* stream, int64_t& postponedBytes )
{
auto p = map.Lookup( name );
if( nullptr == p )
return S_FALSE;
Tensor& rdi = *p->m_value;
PendingTensor& pt = pending.emplace_back();
__m128i vec = load16( ne.data() );
vec = _mm_insert_epi32( vec, 1, 3 );
store16( &rdi.ne, vec );
rdi.setDenseStrides();
pt.destPointer = p->m_value;
CHECK( stream->getPosition( pt.streamOffset ) );
pt.bufferOffset = bufferBytes;
size_t cbElement;
if( ftype == 0 )
{
rdi.setType( eDataType::FP32 );
cbElement = 4;
}
else
{
rdi.setType( eDataType::FP16 );
cbElement = 2;
}
const size_t totalElts = (size_t)(uint32_t)ne[ 0 ] * (uint32_t)ne[ 1 ] * (uint32_t)ne[ 2 ];
if( totalElts * cbElement > UINT_MAX )
return DISP_E_OVERFLOW;
size_t payloadBytes = cbElement * totalElts;
pt.payloadBytes = payloadBytes;
CHECK( stream->seek( payloadBytes, eSeekOrigin::Current ) );
postponedBytes += (int64_t)payloadBytes;
payloadBytes = ( payloadBytes + 31 ) & ( ~( (size_t)31 ) );
bufferBytes += payloadBytes;
return S_OK;
}
HRESULT HybridLoader::completeLoad( ComLight::iReadStream* stream, iLoaderProgressSink& progressSink )
{
if( pending.size() != map.GetCount() )
{
logError( u8"Not all tensors loaded from model file - expected %zu, got %zu", map.GetCount(), pending.size() );
return E_INVALIDARG;
}
LargeBuffer buffer;
CHECK( buffer.allocate( bufferBytes ) );
uint8_t* rdi = buffer.pointer();
for( const auto& pt : pending )
{
if( pt.payloadBytes > INT_MAX )
return DISP_E_OVERFLOW;
CHECK( stream->seek( pt.streamOffset, eSeekOrigin::Begin ) );
int written = 0;
CHECK( stream->read( rdi, (int)pt.payloadBytes, written ) );
CHECK( progressSink.gotBytes( (int64_t)pt.payloadBytes ) );
pt.destPointer->setDataPointer( rdi );
const size_t cb = ( pt.payloadBytes + 31 ) & ( ~( (size_t)31 ) );
rdi += cb;
}
CHECK( buffer.setReadOnly( bufferBytes ) );
destination.setMemoryBuffer( std::move( buffer ) );
constexpr double mulMb = 1.0 / ( 1 << 20 );
logDebug( u8"Loaded %zu decoder tensors, %g MB RAM", pending.size(), mulMb * (double)(int64_t)bufferBytes );
return S_OK;
}
| 4,135
|
C++
|
.cpp
| 114
| 33.842105
| 166
| 0.683433
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,554
|
ParallelForRunner.cpp
|
Const-me_Whisper/Whisper/CPU/ParallelForRunner.cpp
|
#include "stdafx.h"
#include "ParallelForRunner.h"
using namespace CpuCompute;
ParallelForRunner::ParallelForRunner( int threads ) :
maxThreads( threads )
{
if( maxThreads <= 1 )
{
threadBuffers.resize( 1 );
return;
}
work = CreateThreadpoolWork( &workCallbackStatic, this, nullptr );
if( nullptr == work )
throw getLastHr();
threadBuffers.resize( maxThreads );
}
HRESULT ParallelForRunner::setThreadsCount( int threads )
{
maxThreads = threads;
if( threads <= 1 )
{
threadBuffers.resize( 1 );
return S_OK;
}
threadBuffers.resize( maxThreads );
if( nullptr == work )
{
work = CreateThreadpoolWork( &workCallbackStatic, this, nullptr );
if( nullptr == work )
return getLastHr();
}
return S_OK;
}
ParallelForRunner::~ParallelForRunner()
{
if( nullptr != work )
{
if( S_FALSE == status )
WaitForThreadpoolWorkCallbacks( work, FALSE );
CloseThreadpoolWork( work );
}
}
namespace
{
thread_local uint32_t currentThreadIndex = UINT_MAX;
}
void ParallelForRunner::runBatch( size_t ith ) noexcept
{
currentThreadIndex = (uint32_t)ith;
const size_t begin = ( ith * countItems ) / countThreads;
const size_t end = ( ( ith + 1 ) * countItems ) / countThreads;
HRESULT hr = E_UNEXPECTED;
try
{
hr = computeRange->compute( begin, end );
}
catch( HRESULT code )
{
hr = code;
}
catch( const std::bad_alloc& )
{
hr = E_OUTOFMEMORY;
}
catch( const std::exception& )
{
hr = E_FAIL;
}
currentThreadIndex = UINT_MAX;
if( SUCCEEDED( hr ) )
return;
InterlockedCompareExchange( &status, hr, S_FALSE );
}
void* ParallelForRunner::threadLocalBuffer( size_t cb )
{
const uint32_t idx = currentThreadIndex;
if( idx < threadBuffers.size() )
{
ThreadBuffer& tb = threadBuffers[ idx ];
if( tb.cb >= cb )
{
// We already have large enough buffer for the current thread
return tb.memory.pointer();
}
tb.memory.deallocate();
check( tb.memory.allocate( cb ) );
tb.cb = cb;
return tb.memory.pointer();
}
if( idx != UINT_MAX )
throw E_BOUNDS;
else
{
logError( u8"threadLocalBuffer() method only works from inside a pool callback" );
throw E_UNEXPECTED;
}
}
void __stdcall ParallelForRunner::workCallbackStatic( PTP_CALLBACK_INSTANCE Instance, void* pv, PTP_WORK Work ) noexcept
{
ParallelForRunner& context = *(ParallelForRunner*)pv;
const size_t ith = (uint32_t)( InterlockedIncrement( &context.threadIndex ) );
context.runBatch( ith );
}
HRESULT ParallelForRunner::parallelFor( iComputeRange& compute, size_t length, size_t minBatch )
{
if( maxThreads <= 1 )
{
currentThreadIndex = 0;
const HRESULT hr1 = compute.compute( 0, length );
currentThreadIndex = UINT_MAX;
return hr1;
}
assert( minBatch > 0 );
size_t nth = length / minBatch;
nth = std::min( nth, (size_t)(uint32_t)maxThreads );
computeRange = &compute;
countItems = length;
countThreads = nth;
threadIndex = 0;
status = S_FALSE;
for( size_t i = 1; i < nth; i++ )
SubmitThreadpoolWork( work );
runBatch( 0 );
if( nth > 1 )
WaitForThreadpoolWorkCallbacks( work, FALSE );
computeRange = nullptr;
const HRESULT hr = status;
status = S_OK;
if( SUCCEEDED( hr ) )
return S_OK;
return hr;
}
| 3,170
|
C++
|
.cpp
| 132
| 21.795455
| 120
| 0.713766
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,555
|
mulMatImpl.avx2.cpp
|
Const-me_Whisper/Whisper/CPU/mulMatImpl.avx2.cpp
|
#include "stdafx.h"
#include "mulMatImpl.h"
#include <immintrin.h>
#include "mulMatUtils.hpp"
using namespace CpuCompute;
namespace
{
constexpr size_t prefetchBytes = 96;
constexpr int prefetchHint = _MM_HINT_T0;
constexpr size_t maskAlign16 = ~(size_t)15;
__forceinline __m256i load( const void* rsi )
{
return _mm256_loadu_si256( ( const __m256i* )rsi );
}
#define TRANSPOSE_8X16() \
\
__m256i t0 = _mm256_unpacklo_epi16( r0, r1 ); \
__m256i t1 = _mm256_unpackhi_epi16( r0, r1 ); \
__m256i t2 = _mm256_unpacklo_epi16( r2, r3 ); \
__m256i t3 = _mm256_unpackhi_epi16( r2, r3 ); \
__m256i t4 = _mm256_unpacklo_epi16( r4, r5 ); \
__m256i t5 = _mm256_unpackhi_epi16( r4, r5 ); \
__m256i t6 = _mm256_unpacklo_epi16( r6, r7 ); \
__m256i t7 = _mm256_unpackhi_epi16( r6, r7 ); \
\
r0 = _mm256_unpacklo_epi32( t0, t2 ); \
r1 = _mm256_unpackhi_epi32( t0, t2 ); \
r2 = _mm256_unpacklo_epi32( t1, t3 ); \
r3 = _mm256_unpackhi_epi32( t1, t3 ); \
r4 = _mm256_unpacklo_epi32( t4, t6 ); \
r5 = _mm256_unpackhi_epi32( t4, t6 ); \
r6 = _mm256_unpacklo_epi32( t5, t7 ); \
r7 = _mm256_unpackhi_epi32( t5, t7 ); \
\
t0 = _mm256_unpacklo_epi64( r0, r4 ); \
t1 = _mm256_unpackhi_epi64( r0, r4 ); \
t2 = _mm256_unpacklo_epi64( r1, r5 ); \
t3 = _mm256_unpackhi_epi64( r1, r5 ); \
t4 = _mm256_unpacklo_epi64( r2, r6 ); \
t5 = _mm256_unpackhi_epi64( r2, r6 ); \
t6 = _mm256_unpacklo_epi64( r3, r7 ); \
t7 = _mm256_unpackhi_epi64( r3, r7 )
__forceinline void storeLow( void* rdi, __m256i v )
{
__m128i i = _mm256_castsi256_si128( v );
_mm_store_si128( ( __m128i* )rdi, i );
}
#define STORE_8X16_LOW() \
storeLow( rdi, t0 ); \
storeLow( rdi + destStride, t1 ); \
storeLow( rdi + destStride * 2, t2 ); \
rdi += destStride * 8; \
storeLow( rdiMid, t3 ); \
storeLow( rdiMid + destStride, t4 ); \
storeLow( rdiMid + destStride * 2, t5 ); \
rdiMid += destStride * 8; \
storeLow( rdiLast, t6 ); \
storeLow( rdiLast + destStride, t7 ); \
rdiLast += destStride * 8
__forceinline void storeHigh( void* rdi, __m256i v )
{
__m128i i = _mm256_extracti128_si256( v, 1 );
_mm_store_si128( ( __m128i* )rdi, i );
}
#define STORE_8X16_HIGH() \
storeHigh( rdi, t0 ); \
storeHigh( rdi + destStride, t1 ); \
storeHigh( rdi + destStride * 2, t2 ); \
rdi += destStride * 8; \
storeHigh( rdiMid, t3 ); \
storeHigh( rdiMid + destStride, t4 ); \
storeHigh( rdiMid + destStride * 2, t5 ); \
rdiMid += destStride * 8; \
storeHigh( rdiLast, t6 ); \
storeHigh( rdiLast + destStride, t7 ); \
rdiLast += destStride * 8
__forceinline void prefetch( const uint8_t* p )
{
_mm_prefetch( (const char*)p, prefetchHint );
}
__forceinline void transpose8Avx2( uint16_t* rdiWords, size_t w, const uint16_t* rsiWords, size_t sourceStride, size_t destStride )
{
assert( 0 == ( (size_t)rdiWords ) % 16 );
assert( 0 == destStride % 8 );
assert( w <= sourceStride );
// Scale strides to bytes, and cast the pointers
sourceStride *= 2;
destStride *= 2;
uint8_t* rdi = (uint8_t*)rdiWords;
const uint8_t* rsi = (const uint8_t*)rsiWords;
const uint8_t* const rsiEndAligned = rsi + ( w & maskAlign16 ) * 2;
const uint8_t* const rsiEnd = rsi + w * 2;
const uint8_t* rsiMid = rsi + sourceStride * 3;
const uint8_t* rsiLast = rsi + sourceStride * 6;
uint8_t* rdiMid = rdi + destStride * 3;
uint8_t* rdiLast = rdi + destStride * 6;
while( rsi < rsiEndAligned )
{
// Load 16x8 block into 8 registers
__m256i r0 = load( rsi );
__m256i r1 = load( rsi + sourceStride );
__m256i r2 = load( rsi + sourceStride * 2 );
rsi += 32;
__m256i r3 = load( rsiMid );
__m256i r4 = load( rsiMid + sourceStride );
__m256i r5 = load( rsiMid + sourceStride * 2 );
rsiMid += 32;
__m256i r6 = load( rsiLast );
__m256i r7 = load( rsiLast + sourceStride );
rsiLast += 32;
// Transpose FP16 values in registers
TRANSPOSE_8X16();
// Store
STORE_8X16_LOW();
STORE_8X16_HIGH();
if constexpr( prefetchBytes > 0 )
{
if( rsi + prefetchBytes < rsiEnd )
{
prefetch( rsi + prefetchBytes );
prefetch( rsi + sourceStride + prefetchBytes );
prefetch( rsi + sourceStride * 2 + prefetchBytes );
prefetch( rsiMid + prefetchBytes );
prefetch( rsiMid + sourceStride + prefetchBytes );
prefetch( rsiMid + sourceStride * 2 + prefetchBytes );
prefetch( rsiLast + prefetchBytes );
prefetch( rsiLast + sourceStride + prefetchBytes );
}
}
}
if( rsi < rsiEnd )
{
// Loading 8 elements into corresponding lanes of 8 vectors
// This way there's no data dependencies between these load instructions
// Out of order execution should hopefully do it's magic in the CPU, running all these loads in parallel.
__m128i r0;
__m128i r1 = _mm_setzero_si128();
__m128i r2 = _mm_setzero_si128();
__m128i r3 = _mm_setzero_si128();
__m128i r4 = _mm_setzero_si128();
__m128i r5 = _mm_setzero_si128();
__m128i r6 = _mm_setzero_si128();
__m128i r7 = _mm_setzero_si128();
__m128i t0, t1, t2, t3, t4, t5, t6;
#pragma loop( no_vector )
while( rsi < rsiEnd )
{
r0 = _mm_cvtsi32_si128( *(const uint16_t*)rsi );
r1 = _mm_insert_epi16( r1, *(const int16_t*)( rsi + sourceStride ), 1 );
r2 = _mm_insert_epi16( r2, *(const int16_t*)( rsi + sourceStride * 2 ), 2 );
rsi += 2;
r3 = _mm_insert_epi16( r3, *(const int16_t*)( rsiMid ), 3 );
r4 = _mm_insert_epi16( r4, *(const int16_t*)( rsiMid + sourceStride ), 4 );
r5 = _mm_insert_epi16( r5, *(const int16_t*)( rsiMid + sourceStride * 2 ), 5 );
rsiMid += 2;
r6 = _mm_insert_epi16( r6, *(const int16_t*)( rsiLast ), 6 );
r7 = _mm_insert_epi16( r7, *(const int16_t*)( rsiLast + sourceStride ), 7 );
rsiLast += 2;
// Bitwise operations are pretty fast, AMD Zen3 CPU can run 4 of them every clock cycle
// Combine 8 vectors into one
t0 = _mm_or_si128( r0, r1 );
t1 = _mm_or_si128( r2, r3 );
t2 = _mm_or_si128( r4, r5 );
t3 = _mm_or_si128( r6, r7 );
t4 = _mm_or_si128( t0, t1 );
t5 = _mm_or_si128( t2, t3 );
t6 = _mm_or_si128( t4, t5 );
// Store 8 FP16 values, the destination is aligned
_mm_store_si128( ( __m128i* )rdi, t6 );
rdi += destStride;
}
}
}
__forceinline void transpose8PartialAvx2( uint16_t* rdiWords, size_t w, size_t h, const uint16_t* rsiWords, size_t sourceStride, size_t destStride )
{
assert( 0 == ( (size_t)rdiWords ) % 16 );
assert( 0 == destStride % 8 );
assert( w <= sourceStride );
assert( h > 0 && h < 8 );
// Scale strides to bytes, and cast the pointers
sourceStride *= 2;
destStride *= 2;
uint8_t* rdi = (uint8_t*)rdiWords;
const uint8_t* rsi = (const uint8_t*)rsiWords;
const uint8_t* const rsiEndAligned = rsi + ( w & maskAlign16 ) * 2;
const uint8_t* const rsiEnd = rsi + w * 2;
const uint8_t* rsiMid = rsi + sourceStride * 3;
const uint8_t* rsiLast = rsi + sourceStride * 6;
uint8_t* rdiMid = rdi + destStride * 3;
uint8_t* rdiLast = rdi + destStride * 6;
while( rsi < rsiEndAligned )
{
// Load the block into 8 registers, set unused rows to zero
__m256i r0 = load( rsi );
__m256i r1 = _mm256_setzero_si256();
__m256i r2 = _mm256_setzero_si256();
__m256i r3 = _mm256_setzero_si256();
__m256i r4 = _mm256_setzero_si256();
__m256i r5 = _mm256_setzero_si256();
__m256i r6 = _mm256_setzero_si256();
// These branches, whether direct or indirect, are very predictable: same outcome for all iterations of the outer loop
switch( h )
{
case 7:
r6 = load( rsiLast );
case 6:
r5 = load( rsiMid + sourceStride * 2 );
case 5:
r4 = load( rsiMid + sourceStride );
case 4:
r3 = load( rsiMid );
case 3:
r2 = load( rsi + sourceStride * 2 );
case 2:
r1 = load( rsi + sourceStride );
}
rsi += 32;
rsiMid += 32;
rsiLast += 32;
__m256i r7 = _mm256_setzero_si256();
// Transpose FP16 values in registers
TRANSPOSE_8X16();
// Store
STORE_8X16_LOW();
STORE_8X16_HIGH();
}
if( rsi < rsiEnd )
{
// Loading 8 elements into corresponding lanes of 8 vectors
// This way there's no data dependencies between these load instructions
// Out of order execution should hopefully do it's magic in the CPU, running all these loads in parallel.
__m128i r0;
__m128i r1 = _mm_setzero_si128();
__m128i r2 = _mm_setzero_si128();
__m128i r3 = _mm_setzero_si128();
__m128i r4 = _mm_setzero_si128();
__m128i r5 = _mm_setzero_si128();
__m128i r6 = _mm_setzero_si128();
__m128i t0, t1, t2, t3, t4, t5;
#pragma loop( no_vector )
while( rsi < rsiEnd )
{
r0 = _mm_cvtsi32_si128( *(const uint16_t*)rsi );
switch( h )
{
case 7:
r6 = _mm_insert_epi16( r6, *(const int16_t*)( rsiLast ), 6 );
case 6:
r5 = _mm_insert_epi16( r5, *(const int16_t*)( rsiMid + sourceStride * 2 ), 5 );
case 5:
r4 = _mm_insert_epi16( r4, *(const int16_t*)( rsiMid + sourceStride ), 4 );
case 4:
r3 = _mm_insert_epi16( r3, *(const int16_t*)( rsiMid ), 3 );
case 3:
r2 = _mm_insert_epi16( r2, *(const int16_t*)( rsi + sourceStride * 2 ), 2 );
case 2:
r1 = _mm_insert_epi16( r1, *(const int16_t*)( rsi + sourceStride ), 1 );
}
rsi += 2;
rsiMid += 2;
rsiLast += 2;
// Bitwise operations are pretty fast, AMD Zen3 CPU can run 4 of them every clock cycle
// Combine 7 vectors into one
t0 = _mm_or_si128( r0, r1 );
t1 = _mm_or_si128( r2, r3 );
t2 = _mm_or_si128( r4, r5 );
t3 = _mm_or_si128( t0, t1 );
t4 = _mm_or_si128( t2, r6 );
t5 = _mm_or_si128( t3, t4 );
// Store 8 FP16 values, the destination is aligned
_mm_store_si128( ( __m128i* )rdi, t5 );
rdi += destStride;
}
}
}
}
// At least for the hybrid decoder, this method absolutely dominates the CPU time.
// And not due to the integer shuffles - the bottleneck is loading data from the source matrix.
HRESULT MulMatBase::transposePanelAvx2( uint16_t* rdi, size_t i, size_t m2, size_t m3 ) const
{
assert( stridesA[ 0 ] == 1 );
const size_t heightFloats = (size_t)panelHeightRegisters * 8;
i *= heightFloats;
const uint16_t* rsi = (const uint16_t*)pa;
rsi += m3 * stridesA[ 3 ];
rsi += m2 * stridesA[ 2 ];
rsi += i * stridesA[ 1 ];
const size_t resultStride = heightFloats;
if( i + heightFloats <= resultSize[ 0 ] )
{
// A complete panel
for( size_t i = 0; i < panelHeightRegisters; i++ )
{
transpose8Avx2( rdi, length, rsi, stridesA[ 1 ], resultStride );
// Advance by 8 floats in the output buffer
rdi += 8;
// Advance by 8 rows in the source matrix
rsi += 8 * stridesA[ 1 ];
}
}
else
{
// A partial panel, at the bottom of the first argument matrix
const size_t remainder = resultSize[ 0 ] - i;
assert( remainder > 0 && remainder < heightFloats );
zeroAlignedMemory( rdi, resultStride * length * sizeof( uint16_t ) );
const size_t completePanels = remainder / 8;
for( size_t i = 0; i < completePanels; i++ )
{
transpose8Avx2( rdi, length, rsi, stridesA[ 1 ], resultStride );
rdi += 8;
rsi += 8 * stridesA[ 1 ];
}
const size_t lastPanel = remainder % 8;
if( 0 != lastPanel )
transpose8PartialAvx2( rdi, length, lastPanel, rsi, stridesA[ 1 ], resultStride );
}
return S_OK;
}
| 11,828
|
C++
|
.cpp
| 319
| 33.106583
| 149
| 0.596058
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,556
|
TensorCpu.cpp
|
Const-me_Whisper/Whisper/CPU/TensorCpu.cpp
|
#include <stdafx.h>
#include <atomic>
#include "Tensor.h"
using namespace CpuCompute;
#if TENSOR_INTERNAL_ALLOC
namespace
{
// This structure is immediately before the payload of every tensor which has an internally-allocated memory buffer
class alignas( 32 ) sTensorMemoryHeader
{
std::atomic_ptrdiff_t refCounter;
public:
// Reset the counter to the specified value
void reset( ptrdiff_t rc )
{
refCounter = rc;
}
// Increment the ref.counter
void increment()
{
refCounter++;
}
// Decrement the ref.counter, and return true if it reached zero as the result
bool decrement()
{
ptrdiff_t val = --refCounter;
assert( val >= 0 );
return 0 == val;
}
};
inline sTensorMemoryHeader* getMemBlockHeader( void* pv )
{
assert( nullptr != pv );
uint8_t* pb = (uint8_t*)pv;
static_assert( sizeof( sTensorMemoryHeader ) == 32 );
return (sTensorMemoryHeader*)( pb - sizeof( sTensorMemoryHeader ) );
}
inline void releaseBlock( sTensorMemoryHeader* pointer )
{
assert( nullptr != pointer );
_aligned_free( pointer );
}
inline void* allocateBlock( size_t cb, ptrdiff_t initialRefCounter = 1 )
{
cb += sizeof( sTensorMemoryHeader );
void* pv = _aligned_malloc( cb, 32 );
if( nullptr == pv )
return nullptr;
sTensorMemoryHeader* header = (sTensorMemoryHeader*)pv;
header->reset( initialRefCounter );
return ( (uint8_t*)pv ) + sizeof( sTensorMemoryHeader );
}
}
void Tensor::deallocate()
{
if( ownsMemory && nullptr != m_data )
{
sTensorMemoryHeader* const header = getMemBlockHeader( m_data );
if( header->decrement() )
{
// This tensor is the last one which had a reference to that block of memory
// Release the memory back to the heap
releaseBlock( header );
}
}
ownsMemory = false;
TensorShape::setZero();
m_data = nullptr;
m_type = (eDataType)0xFF;
}
#endif
Tensor::Tensor( const Tensor& that )
{
store( ne, that.sizeVec() );
store( nb, that.stridesVec() );
m_data = that.m_data;
m_type = that.m_type;
#if TENSOR_INTERNAL_ALLOC
if( that.ownsMemory && nullptr != m_data )
{
getMemBlockHeader( m_data )->increment();
ownsMemory = true;
}
else
ownsMemory = false;
#endif
}
Tensor::Tensor( Tensor&& that ) noexcept
{
store( ne, that.sizeVec() );
store( nb, that.stridesVec() );
m_data = that.m_data;
m_type = that.m_type;
#if TENSOR_INTERNAL_ALLOC
ownsMemory = that.ownsMemory;
that.ownsMemory = false;
#endif
that.m_data = nullptr;
}
void Tensor::operator=( const Tensor& that )
{
assert( this != &that );
#if TENSOR_INTERNAL_ALLOC
deallocate();
#endif
store( ne, that.sizeVec() );
store( nb, that.stridesVec() );
m_data = that.m_data;
m_type = that.m_type;
#if TENSOR_INTERNAL_ALLOC
if( that.ownsMemory && nullptr != m_data )
{
getMemBlockHeader( m_data )->increment();
ownsMemory = true;
}
else
ownsMemory = false;
#endif
}
void Tensor::operator=( Tensor&& that ) noexcept
{
assert( this != &that );
#if TENSOR_INTERNAL_ALLOC
deallocate();
#endif
store( ne, that.sizeVec() );
store( nb, that.stridesVec() );
m_data = that.m_data;
m_type = that.m_type;
that.m_data = nullptr;
#if TENSOR_INTERNAL_ALLOC
ownsMemory = that.ownsMemory;
that.ownsMemory = false;
#endif
}
HRESULT Tensor::create( eDataType type, const std::array<uint32_t, 4>& sizeElements, iMemoryAllocator* alloc )
{
const size_t len = (size_t)sizeElements[ 0 ] * sizeElements[ 1 ] * sizeElements[ 2 ] * sizeElements[ 3 ];
const size_t cbElement = DirectCompute::elementSize( type );
const size_t cb = len * cbElement;
#if TENSOR_INTERNAL_ALLOC
deallocate();
#endif
store( ne, load( sizeElements ) );
TensorShape::setDenseStrides();
this->m_type = type;
if( nullptr != alloc )
{
#if TENSOR_INTERNAL_ALLOC
ownsMemory = false;
#endif
m_data = alloc->allocate( cb, 32 );
if( nullptr == m_data )
return E_OUTOFMEMORY;
return S_OK;
}
else
{
#if TENSOR_INTERNAL_ALLOC
m_data = allocateBlock( cb, 1 );
if( nullptr == m_data )
return E_OUTOFMEMORY;
ownsMemory = true;
return S_OK;
#else
return E_POINTER;
#endif
}
}
namespace
{
static HRESULT arrayFromList( std::array<uint32_t, 4>& arr, std::initializer_list<uint32_t> list )
{
const size_t dims = list.size();
if( dims == 0 || dims > 4 )
return E_INVALIDARG;
for( size_t i = 0; i < dims; i++ )
{
uint32_t u = list.begin()[ i ];
if( u == 0 )
return E_INVALIDARG;
arr[ i ] = u;
}
for( size_t i = dims; i < 4; i++ )
arr[ i ] = 1;
return S_OK;
}
}
HRESULT Tensor::create( eDataType type, std::initializer_list<uint32_t> sizeElements, iMemoryAllocator* alloc )
{
std::array<uint32_t, 4> arr;
CHECK( arrayFromList( arr, sizeElements ) );
return create( type, arr, alloc );
}
Tensor::Tensor( void* pointer, eDataType type, std::initializer_list<uint32_t> size )
{
if( nullptr == pointer )
throw E_POINTER;
check( arrayFromList( ne, size ) );
TensorShape::setDenseStrides();
m_data = pointer;
m_type = type;
#if TENSOR_INTERNAL_ALLOC
ownsMemory = false;
#endif
}
Tensor::Tensor( void* pointer, eDataType type, uint32_t length ) noexcept
{
// size = [ length, 1, 1, 1 ]
const __m128i one = _mm_set1_epi32( 1 );
__m128i v = _mm_insert_epi32( one, (int)length, 0 );
store( ne, v );
// stride = [ 1, length, length, length ]
v = _mm_shuffle_epi32( v, _MM_SHUFFLE( 0, 0, 0, 1 ) );
store( nb, v );
m_data = pointer;
m_type = type;
#if TENSOR_INTERNAL_ALLOC
ownsMemory = false;
#endif
}
Tensor Tensor::fromData( void* pointer, eDataType type, uint32_t length )
{
HRESULT hr = E_UNEXPECTED;
if( nullptr != pointer )
{
if( 0 != length )
return Tensor{ pointer, type, length };
else
hr = E_INVALIDARG;
}
else
hr = E_POINTER;
throw hr;
}
HRESULT Tensor::attach( void* pointer, eDataType type, std::initializer_list<uint32_t> sizeElements )
{
if( nullptr == pointer )
return E_POINTER;
std::array<uint32_t, 4> arr;
CHECK( arrayFromList( arr, sizeElements ) );
#if TENSOR_INTERNAL_ALLOC
deallocate();
#endif
store( ne, load( arr ) );
TensorShape::setDenseStrides();
m_data = pointer;
this->m_type = type;
#if TENSOR_INTERNAL_ALLOC
ownsMemory = false;
#endif
return S_OK;
}
Tensor Tensor::reshape3d( uint32_t ne0, uint32_t ne1, uint32_t ne2 ) const
{
if( !isContinuous() )
throw E_NOTIMPL;
if( countElements() != ne0 * ne1 * ne2 )
throw E_INVALIDARG;
Tensor res = *this;
res.ne = { ne0, ne1, ne2, 1 };
res.setDenseStrides();
return res;
}
#if TENSOR_GGML_COMPAT
static const __m128i s_maskAlignment16 = _mm_set1_epi64x( 1 );
static const __m128i s_maskAlignment32 = _mm_set1_epi64x( 3 );
bool isAlignedProperly( __m128i r0, __m128i r1, __m128i mask )
{
__m128i test = _mm_or_si128( r0, r1 );
return (bool)_mm_testz_si128( test, mask );
}
Tensor::Tensor( const ggml_tensor* ggml )
{
store( ne, load16( ggml->ne ) );
__m128i r0 = load16( (const int*)&ggml->nb[ 0 ] );
__m128i r1 = load16( (const int*)&ggml->nb[ 2 ] );
// Divide from bytes into elements by right-shifting the 64-bit integers in these vectors
switch( ggml->type )
{
case GGML_TYPE_F16:
assert( isAlignedProperly( r0, r1, s_maskAlignment16 ) );
r0 = _mm_srli_epi64( r0, 1 );
r1 = _mm_srli_epi64( r1, 1 );
m_type = eDataType::FP16;
break;
case GGML_TYPE_F32:
assert( isAlignedProperly( r0, r1, s_maskAlignment32 ) );
r0 = _mm_srli_epi64( r0, 2 );
r1 = _mm_srli_epi64( r1, 2 );
m_type = eDataType::FP32;
break;
case GGML_TYPE_I32:
assert( isAlignedProperly( r0, r1, s_maskAlignment32 ) );
r0 = _mm_srli_epi64( r0, 2 );
r1 = _mm_srli_epi64( r1, 2 );
m_type = eDataType::U32;
break;
default:
throw E_INVALIDARG;
}
// downcast uint64_t into uint32_t in a single vector
r0 = _mm_shuffle_epi32( r0, _MM_SHUFFLE( 3, 3, 2, 0 ) );
r1 = _mm_shuffle_epi32( r1, _MM_SHUFFLE( 2, 0, 3, 3 ) );
store( nb, _mm_blend_epi16( r0, r1, 0b11110000 ) );
m_data = ggml->data;
}
ggml_tensor Tensor::ggml() const
{
ggml_tensor res;
memset( &res, 0, sizeof( ggml_tensor ) );
const __m128i size = sizeVec();
store16( res.ne, size );
const __m128i one = _mm_set1_epi32( 1 );
const uint32_t maskOnes = (uint32_t)_mm_movemask_ps( _mm_castsi128_ps( _mm_cmpeq_epi32( size, one ) ) );
const uint32_t maskNotOnes = maskOnes ^ 0b1111;
unsigned long idx;
if( _BitScanReverse( &idx, maskNotOnes ) )
res.n_dims = (int)idx + 1;
else
res.n_dims = 0;
const __m128i strides = stridesVec();
// Upcast strides from u32 to u64
const __m128i zero = _mm_setzero_si128();
__m128i r0 = _mm_unpacklo_epi32( strides, zero );
__m128i r1 = _mm_unpackhi_epi32( strides, zero );
// Scale from elements into bytes with left shift vector instructions
switch( m_type )
{
case eDataType::FP16:
r0 = _mm_slli_epi64( r0, 1 );
r1 = _mm_slli_epi64( r1, 1 );
res.type = GGML_TYPE_F16;
break;
case eDataType::FP32:
r0 = _mm_slli_epi64( r0, 2 );
r1 = _mm_slli_epi64( r1, 2 );
res.type = GGML_TYPE_F32;
break;
case eDataType::U32:
r0 = _mm_slli_epi64( r0, 2 );
r1 = _mm_slli_epi64( r1, 2 );
res.type = GGML_TYPE_I32;
break;
default:
throw OLE_E_BLANK;
}
store16( &res.nb[ 0 ], r0 );
store16( &res.nb[ 2 ], r1 );
res.data = m_data;
return res;
}
GgmlTensorView::GgmlTensorView( const Tensor& t ) : tensor( t.ggml() ) {}
#endif
| 9,229
|
C++
|
.cpp
| 354
| 23.830508
| 116
| 0.681844
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,557
|
ContextImpl.diarize.cpp
|
Const-me_Whisper/Whisper/Whisper/ContextImpl.diarize.cpp
|
#include "stdafx.h"
#include "ContextImpl.h"
using namespace Whisper;
namespace
{
// Offset the timestamp with mediaTimeOffset to find the time relative to the start of the iSpectrogram buffer,
// then scale from 100 nanosecond ticks into the Whisper's 10ms chunks, rounding down
inline int64_t chunkOffset( int64_t time, int64_t mediaTimeOffset )
{
time -= mediaTimeOffset;
return ( time * 100 ) / 10'000'000;
}
// Compute per-channel sum of std::absf( pcm ) in the specified buffer,
// and return left / right numbers in the lower 2 lanes of the SSE vector
inline __m128 __vectorcall computeChannelsEnergy( const std::vector<StereoSample>& sourceVector )
{
// Might be possible to implement way more sophisticated, and precise, version of this function.
// For example, compute these 3 metrics with VAD code, and cluster the numbers somehow.
// Not doing that currently; instead, replicating the simple version from the whisper.cpp original version.
const StereoSample* rsi = sourceVector.data();
const StereoSample* const rsiEnd = rsi + sourceVector.size();
const StereoSample* const rsiEndAligned = rsi + ( sourceVector.size() & ( ~(size_t)1 ) );
// Move 0x7FFFFFFF to lowest lane of the int32 vector;
// unlike float scalars or all vectors, integer scalar constants are in the instruction stream
__m128i absMaskInt = _mm_cvtsi32_si128( (int)0x7FFFFFFFu );
// Broadcast over the complete vector
absMaskInt = _mm_shuffle_epi32( absMaskInt, 0 );
// Bitcast to FP32 vector, for _mm_and_ps instruction
const __m128 absMask = _mm_castsi128_ps( absMaskInt );
__m128 acc = _mm_setzero_ps();
for( ; rsi < rsiEndAligned; rsi += 2 )
{
__m128 v = _mm_loadu_ps( (const float*)rsi );
v = _mm_and_ps( v, absMask );
acc = _mm_add_ps( acc, v );
}
if( rsi != rsiEnd )
{
__m128 v = _mm_castpd_ps( _mm_load_sd( (const double*)rsi ) );
v = _mm_and_ps( v, absMask );
acc = _mm_add_ps( acc, v );
}
// Return acc.xy + acc.zw
acc = _mm_add_ps( acc, _mm_movehl_ps( acc, acc ) );
return acc;
}
inline eSpeakerChannel produceResult( const __m128 ev )
{
// Original code did following:
// if( energy0 > 1.1 * energy1 ) speaker = "(speaker 0)"; else if( energy1 > 1.1 * energy0 ) speaker = "(speaker 1)"; else speaker = "(speaker ?)";
// Flip left/right channels
__m128 tmp = _mm_shuffle_ps( ev, ev, _MM_SHUFFLE( 3, 2, 0, 1 ) );
// Multiply by the magic number
tmp = _mm_mul_ps( tmp, _mm_set1_ps( 1.1f ) );
// Compare for ev > tmp
tmp = _mm_cmpgt_ps( ev, tmp );
const uint32_t mask = (uint32_t)_mm_movemask_ps( tmp ) & 0b11;
assert( mask != 0b11 ); // That would mean the following is true: ( ( left > right * 1.1 ) && ( right > left * 1.1 ) )
return (eSpeakerChannel)mask;
}
}
HRESULT COMLIGHTCALL ContextImpl::detectSpeaker( const sTimeInterval& time, eSpeakerChannel& result ) const noexcept
{
// Ensure we have the spectrogram
if( nullptr == currentSpectrogram )
{
logError( u8"Because the audio is streamed, iContext.detectSpeaker() method only works when called from the callbacks" );
return OLE_E_BLANK;
}
// Load the timestamps
int64_t begin = (int64_t)time.begin.ticks;
int64_t end = (int64_t)time.end.ticks;
// Offset + scale into chunks
begin = chunkOffset( begin, mediaTimeOffset );
end = chunkOffset( end, mediaTimeOffset );
int64_t len = end - begin;
if( len <= 0 )
{
result = eSpeakerChannel::Unsure;
return S_OK;
}
// Extract the slice of stereo PCM data
HRESULT hr = currentSpectrogram->copyStereoPcm( (size_t)begin, (size_t)len, diarizeBuffer );
if( hr == OLE_E_BLANK )
{
result = eSpeakerChannel::NoStereoData;
return S_OK;
}
CHECK( hr );
const __m128 energyVec = computeChannelsEnergy( diarizeBuffer );
result = produceResult( energyVec );
return S_OK;
}
| 3,802
|
C++
|
.cpp
| 93
| 38.247312
| 149
| 0.69797
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,558
|
ModelBuffers.cpp
|
Const-me_Whisper/Whisper/Whisper/ModelBuffers.cpp
|
#include "stdafx.h"
#include "ModelLoader.h"
#if BUILD_BOTH_VERSIONS
namespace DirectCompute
{
static ModelBuffers s_model;
const ModelBuffers& gpuModel = s_model;
}
using namespace DirectCompute;
ModelLoader::ModelLoader( int encoderLayers, int decoderLayers ) :
model( s_model )
{
if( encoderLayers <= 0 || decoderLayers <= 0 )
throw E_INVALIDARG;
model.enc.layers.resize( (uint32_t)encoderLayers );
model.dec.layers.resize( (uint32_t)decoderLayers );
}
void ModelLoader::add( const ggml_tensor* ggml, Tensor& gpu )
{
if( nullptr == ggml )
throw E_POINTER;
auto res = map.try_emplace( ggml, &gpu );
if( !res.second )
throw E_INVALIDARG;
}
Tensor* ModelLoader::lookup( const ggml_tensor* ggml ) const
{
auto it = map.find( ggml );
if( it == map.end() )
return nullptr;
return it->second;
}
bool ModelLoader::tryLoad( const ggml_tensor* ggml )
{
Tensor* rdi = lookup( ggml );
if( nullptr == rdi )
return false;
HRESULT hr = rdi->create( *ggml, eBufferUse::Immutable, true );
if( SUCCEEDED( hr ) )
return true;
throw hr;
}
#endif
__m128i __declspec( noinline ) DirectCompute::TensorPair::getMemoryUse() const
{
return _mm_add_epi64( w.getMemoryUse(), b.getMemoryUse() );
}
__m128i DirectCompute::LayerEncoder::getMemoryUse() const
{
__m128i v = attnLn0.getMemoryUse();
v = _mm_add_epi64( v, attnLn1.getMemoryUse() );
v = _mm_add_epi64( v, attnQuery.getMemoryUse() );
v = _mm_add_epi64( v, attnKey.getMemoryUse() );
v = _mm_add_epi64( v, attnValue.getMemoryUse() );
v = _mm_add_epi64( v, mlpLn.getMemoryUse() );
v = _mm_add_epi64( v, mlp0.getMemoryUse() );
v = _mm_add_epi64( v, mlp1.getMemoryUse() );
return v;
}
__m128i DirectCompute::EncoderBuffers::getMemoryUse() const
{
__m128i v = _mm_cvtsi64_si128( vectorMemoryUse( layers ) );
v = _mm_add_epi64( v, positionalEmbedding.getMemoryUse() );
v = _mm_add_epi64( v, conv1.getMemoryUse() );
v = _mm_add_epi64( v, conv2.getMemoryUse() );
v = _mm_add_epi64( v, lnPost.getMemoryUse() );
for( const auto& layer : layers )
v = _mm_add_epi64( v, layer.getMemoryUse() );
return v;
}
__m128i DirectCompute::LayerDecoder::getMemoryUse() const
{
__m128i v = attnLn0.getMemoryUse();
v = _mm_add_epi64( v, attnLn1.getMemoryUse() );
v = _mm_add_epi64( v, attnQuery.getMemoryUse() );
v = _mm_add_epi64( v, attnKey.getMemoryUse() );
v = _mm_add_epi64( v, attnValue.getMemoryUse() );
v = _mm_add_epi64( v, crossAttnLn0.getMemoryUse() );
v = _mm_add_epi64( v, crossAttnLn1.getMemoryUse() );
v = _mm_add_epi64( v, crossAttnQuery.getMemoryUse() );
v = _mm_add_epi64( v, crossAttnKey.getMemoryUse() );
v = _mm_add_epi64( v, crossAttnValue.getMemoryUse() );
v = _mm_add_epi64( v, mlpLn.getMemoryUse() );
v = _mm_add_epi64( v, mlp0.getMemoryUse() );
v = _mm_add_epi64( v, mlp1.getMemoryUse() );
return v;
}
__m128i DirectCompute::DecoderBuffers::getMemoryUse() const
{
__m128i v = _mm_cvtsi64_si128( vectorMemoryUse( layers ) );
v = _mm_add_epi64( v, positionalEmbedding.getMemoryUse() );
v = _mm_add_epi64( v, tokenEmbedding.getMemoryUse() );
v = _mm_add_epi64( v, ln.getMemoryUse() );
for( const auto& layer : layers )
v = _mm_add_epi64( v, layer.getMemoryUse() );
return v;
}
__m128i DirectCompute::ModelBuffers::getMemoryUse() const
{
return _mm_add_epi64( enc.getMemoryUse(), dec.getMemoryUse() );
}
| 3,316
|
C++
|
.cpp
| 101
| 31
| 78
| 0.698626
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,559
|
DecoderResultBuffer.cpp
|
Const-me_Whisper/Whisper/Whisper/DecoderResultBuffer.cpp
|
#include "stdafx.h"
#include "DecoderResultBuffer.h"
#include "../D3D/MappedResource.h"
using namespace DirectCompute;
void DecoderResultBuffer::copyFromVram( const Tensor& rsi )
{
ID3D11ShaderResourceView* srv = rsi;
if( nullptr == srv )
throw OLE_E_BLANK;
if( !rsi.isContinuous() )
throw E_INVALIDARG;
const uint32_t len = rsi.countElements();
if( len > m_capacity )
{
buffer = nullptr;
CD3D11_BUFFER_DESC desc{ len * 4, 0, D3D11_USAGE_STAGING, D3D11_CPU_ACCESS_READ };
check( device()->CreateBuffer( &desc, nullptr, &buffer ) );
m_capacity = len;
}
CComPtr<ID3D11Resource> source;
srv->GetResource( &source );
// Coordinates of a box are in bytes for buffers
D3D11_BOX box;
store16( &box, _mm_setr_epi32( 0, 0, 0, (int)( len * 4 ) ) );
*(uint64_t*)&box.bottom = 0x100000001ull;
context()->CopySubresourceRegion( buffer, 0, 0, 0, 0, source, 0, &box );
m_size = len;
}
void DecoderResultBuffer::copyToVector( std::vector<float>& vec ) const
{
vec.resize( m_size );
if( vec.empty() )
throw OLE_E_BLANK;
MappedResource mapped;
check( mapped.map( buffer, true ) );
memcpy( vec.data(), mapped.data(), (size_t)4 * m_size );
}
void DecoderResultBuffer::clear()
{
buffer = nullptr;
m_size = m_capacity = 0;
}
| 1,244
|
C++
|
.cpp
| 42
| 27.642857
| 84
| 0.703425
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,560
|
melSpectrogram.cpp
|
Const-me_Whisper/Whisper/Whisper/melSpectrogram.cpp
|
#include "stdafx.h"
#include <cmath>
#include "melSpectrogram.h"
namespace Whisper
{
HanningWindow::HanningWindow()
{
for( int i = 0; i < FFT_SIZE; i++ )
{
// TODO [low]: use XMVectorCos instead
hann[ i ] = (float)( 0.5 * ( 1.0 - std::cos( ( 2.0 * M_PI * i ) / ( FFT_SIZE ) ) ) );
}
}
const HanningWindow s_hanning;
}
namespace
{
using namespace Whisper;
uint32_t tempVectorSizeRecursion( uint32_t len )
{
// out.resize( in.size() * 2 );
const uint32_t res = len * 2;
if( len == 1 )
return res;
if( len % 2 == 1 )
return res; // dft
const uint32_t even = ( len + 1 ) / 2;
const uint32_t odd = len / 2;
const uint32_t evenFft = tempVectorSizeRecursion( even );
const uint32_t oddFft = tempVectorSizeRecursion( odd );
return res + even + odd + evenFft + oddFft;
}
// 6000
// const uint32_t tempBufferSize = FFT_SIZE + tempVectorSizeRecursion( FFT_SIZE );
constexpr uint32_t tempBufferSize = 6000;
// [ a, b, c, d ], [ e, f, g, h ] => [ a+b+c+d, e+f+g+h ]
inline __m128 hadd2( __m128 low, __m128 high )
{
// [ a, e, b, f ]
__m128 a = _mm_unpacklo_ps( low, high );
// [ c, g, d, h ]
__m128 b = _mm_unpackhi_ps( low, high );
// [ a+c, e+g, b+d, f+h ]
__m128 r = _mm_add_ps( a, b );
// [ b+d, f+h, b+d, f+h ]
__m128 tmp = _mm_movehl_ps( r, r );
// [ a+c+b+d, e+g+f+h ]
return _mm_add_ps( r, tmp );
}
inline __m128 load2( const float* rsi )
{
return _mm_castpd_ps( _mm_load_sd( (const double*)rsi ) );
}
inline void store2( float* rdi, __m128 vec )
{
_mm_store_sd( (double*)rdi, _mm_castps_pd( vec ) );
}
inline __m128 loadFloat3( const float* rsi )
{
__m128 f = load2( rsi );
f = _mm_insert_ps( f, _mm_load_ss( rsi + 2 ), 0x20 );
return f;
}
inline __m128 loadPartial( const float* rsi, size_t rem )
{
assert( rem > 0 && rem < 4 );
switch( rem )
{
case 1:
return _mm_load_ss( rsi );
case 2:
return load2( rsi );
case 3:
return loadFloat3( rsi );
}
return _mm_setzero_ps();
}
// naive Discrete Fourier Transform
// input is real-valued
// output is complex-valued
inline void dft( const float* rsi, size_t len, float* rdi )
{
const size_t lenAligned = len & ( ~(size_t)3 );
const size_t remainder = len % 4;
const double mulScalarBase = ( 2.0 * M_PI ) / (double)(int)len;
const __m128 nvInitial = _mm_setr_ps( 0, 1, 2, 3 );
const __m128 nvInc = _mm_set1_ps( 4 );
for( size_t k = 0; k < len; k++ )
{
#if 1
const __m128 mul = _mm_set1_ps( (float)( mulScalarBase * (int)k ) );
__m128 nv = nvInitial;
__m128 cosine = _mm_setzero_ps();
__m128 sine = _mm_setzero_ps();
for( size_t n = 0; n < lenAligned; n += 4 )
{
const __m128 angles = _mm_mul_ps( nv, mul );
nv = _mm_add_ps( nv, nvInc );
__m128 s, c;
// That library function from Windows SDK is way faster than std::sinf/cosf
// Especially because we use the version which computes 4 angles at once
// Source codes there: https://github.com/microsoft/DirectXMath/blob/dec2022/Inc/DirectXMathVector.inl#L4456-L4512
DirectX::XMVectorSinCos( &s, &c, angles );
// Multiply sin/cos by 4 source values
const __m128 source = _mm_loadu_ps( &rsi[ n ] );
c = _mm_mul_ps( c, source );
s = _mm_mul_ps( s, source );
// Accumulate in 2 vectors
cosine = _mm_add_ps( cosine, c );
sine = _mm_sub_ps( sine, s );
}
// Handle the remainder; debugger shows it's always 1, BTW
if( 0 != remainder )
{
const __m128 angles = _mm_mul_ps( nv, mul );
__m128 s, c;
DirectX::XMVectorSinCos( &s, &c, angles );
// loadPartial sets unused lanes to 0..
const __m128 source = loadPartial( &rsi[ lenAligned ], remainder );
// x * 0.0 == 0.0 ..
c = _mm_mul_ps( c, source );
s = _mm_mul_ps( s, source );
// .. that's why it's fine to accumulate the complete vectors.
// Adding or subtracting zero doesn't change the accumulator
cosine = _mm_add_ps( cosine, c );
sine = _mm_sub_ps( sine, s );
}
// Reduce 2*4 accumulators -> 2 scalars in a single vector
const __m128 res = hadd2( cosine, sine );
// Store 2 floats, with 1 instruction
store2( &rdi[ k * 2 ], res );
#else
// Original scalar version here
float re = 0;
float im = 0;
for( int n = 0; n < len; n++ )
{
float angle = (float)( 2 * M_PI * (int)k * n / len );
re += (float)( rsi[ n ] * std::cosf( angle ) );
im -= (float)( rsi[ n ] * std::sinf( angle ) );
}
rdi[ k * 2 + 0 ] = re;
rdi[ k * 2 + 1 ] = im;
#endif
}
}
inline void splitEvenOdd( const float* rsi, size_t len, float* rdiEven, float* rdiOdd )
{
const float* const rsiEndAligned = rsi + ( len & ( ~(size_t)7 ) );
const size_t rem = len % 8;
for( ; rsi < rsiEndAligned; rsi += 8, rdiEven += 4, rdiOdd += 4 )
{
const __m128 v1 = _mm_loadu_ps( rsi );
const __m128 v2 = _mm_loadu_ps( rsi + 4 );
const __m128 e = _mm_shuffle_ps( v1, v2, _MM_SHUFFLE( 2, 0, 2, 0 ) );
const __m128 o = _mm_shuffle_ps( v1, v2, _MM_SHUFFLE( 3, 1, 3, 1 ) );
_mm_storeu_ps( rdiEven, e );
_mm_storeu_ps( rdiOdd, o );
}
#pragma loop( no_vector )
for( size_t i = 0; i < rem; i++, rsi++ )
{
if( i % 2 == 0 )
{
*rdiEven = *rsi;
rdiEven++;
}
else
{
*rdiOdd = *rsi;
rdiOdd++;
}
}
}
inline __m128 set2( float f )
{
__m128 v = _mm_set_ss( f );
return _mm_moveldup_ps( v );
}
// [ x, y ] => [ x, y, x, y ]
inline __m128 dup2( __m128 x )
{
__m128d v = _mm_castps_pd( x );
v = _mm_movedup_pd( v );
return _mm_castpd_ps( v );
}
inline __m128 load2dup( const float* rsi )
{
return _mm_castpd_ps( _mm_loaddup_pd( (const double*)rsi ) );
}
inline void store2high( float* rdi, __m128 vec )
{
_mm_storeh_pd( (double*)rdi, _mm_castps_pd( vec ) );
}
}
using namespace Whisper;
SpectrogramContext::SpectrogramContext( const Filters& flt ) :
filters( flt )
{
assert( tempBufferSize == FFT_SIZE + tempVectorSizeRecursion( FFT_SIZE ) );
tempBuffer = std::make_unique<float[]>( tempBufferSize );
}
// Cooley-Tukey FFT
// poor man's implementation - use something better
// input is real-valued
// output is complex-valued
float* SpectrogramContext::fftRecursion( float* temp, const float* const rsi, const size_t len )
{
float* const out = temp;
temp += len * 2;
if( len == 1 )
{
out[ 0 ] = rsi[ 0 ];
out[ 1 ] = 0;
return temp;
}
if( len % 2 == 1 )
{
dft( rsi, len, out );
return temp;
}
const size_t lenEven = ( len + 1 ) / 2;
const size_t lenOdd = len / 2;
float* const even = temp;
temp += lenEven;
float* const odd = temp;
temp += lenOdd;
splitEvenOdd( rsi, len, even, odd );
const float* const evenFft = temp;
temp = fftRecursion( temp, even, lenEven );
const float* const oddFft = temp;
temp = fftRecursion( temp, odd, lenOdd );
const size_t N = len;
const __m128 maskNegateHigh = _mm_setr_ps( 0, 0, -0.0f, -0.0f );
for( size_t k = 0; k < N / 2; k++ )
{
const float theta = (float)( 2 * M_PI * (double)(int)k / N );
/*
const float re = std::cosf( theta );
const float im = -std::sinf( theta );
float re_odd = oddFft[ 2 * k + 0 ];
float im_odd = oddFft[ 2 * k + 1 ];
out[ 2 * k + 0 ] = evenFft[ 2 * k + 0 ] + re * re_odd - im * im_odd;
out[ 2 * k + 1 ] = evenFft[ 2 * k + 1 ] + re * im_odd + im * re_odd;
out[ 2 * ( k + N / 2 ) + 0 ] = evenFft[ 2 * k + 0 ] - re * re_odd + im * im_odd;
out[ 2 * ( k + N / 2 ) + 1 ] = evenFft[ 2 * k + 1 ] - re * im_odd - im * re_odd;
*/
float sine, cosine;
DirectX::XMScalarSinCos( &sine, &cosine, theta );
const __m128 re = _mm_set_ss( cosine );
const __m128 im = _mm_set_ss( sine );
__m128 reIm = _mm_shuffle_ps( re, im, _MM_SHUFFLE( 0, 0, 0, 0 ) );
// [ re, re, im, im ]
reIm = _mm_xor_ps( reIm, maskNegateHigh );
// [ re_odd, im_odd ]
__m128 odd = load2( oddFft + 2 * k );
// [ re_odd, im_odd, im_odd, re_odd ]
odd = _mm_shuffle_ps( odd, odd, _MM_SHUFFLE( 0, 1, 1, 0 ) );
// re_odd * re, im_odd * re, im_odd * im, re_odd * im ]
const __m128 products4 = _mm_mul_ps( reIm, odd );
// re_odd * re, im_odd * re, re_odd * re, im_odd * re
__m128 prod1 = dup2( products4 );
// im_odd * im, re_odd * im, im_odd * im, re_odd * im
__m128 prod2 = _mm_movehl_ps( products4, products4 );
// re_odd * re, im_odd * re, -re_odd * re, -im_odd * re
prod1 = _mm_xor_ps( prod1, maskNegateHigh );
// im_odd * im, re_odd * im, -im_odd * im, -re_odd * im
prod2 = _mm_xor_ps( prod2, maskNegateHigh );
const __m128 even = load2dup( evenFft + 2 * k );
__m128 res;
res = _mm_add_ps( even, prod1 );
res = _mm_addsub_ps( res, prod2 );
store2( out + 2 * k, res );
store2high( out + 2 * ( k + N / 2 ), res );
}
return temp;
}
void SpectrogramContext::fft( std::array<float, N_MEL>& rdi, const float* pcm, size_t length )
{
assert( length > 0 );
length = std::min( length, (size_t)FFT_SIZE );
float* const temp = tempBuffer.get();
// Apply Hanning window
for( size_t i = 0; i < length; i++ )
temp[ i ] = pcm[ i ] * s_hanning[ i ];
if( length < FFT_SIZE )
memset( temp + length, 0, ( FFT_SIZE - length ) * 4 );
float* const fftOut = temp + FFT_SIZE;
float* bufferEnd = fftRecursion( fftOut, temp, FFT_SIZE );
assert( bufferEnd == tempBuffer.get() + tempBufferSize );
// for( size_t j = 0; j < FFT_SIZE; j++ )
// fft_out[ j ] = ( fft_out[ 2 * j + 0 ] * fft_out[ 2 * j + 0 ] + fft_out[ 2 * j + 1 ] * fft_out[ 2 * j + 1 ] );
for( size_t j = 0; j < 4; j++ )
{
__m128 tmp = load2( fftOut + 2 * j );
tmp = _mm_mul_ps( tmp, tmp );
tmp = _mm_add_ss( tmp, _mm_movehdup_ps( tmp ) );
_mm_store_ss( fftOut + j, tmp );
}
for( size_t j = 4; j < FFT_SIZE; j += 4 )
{
__m128 low = _mm_loadu_ps( fftOut + 2 * j );
__m128 high = _mm_loadu_ps( fftOut + 2 * j + 4 );
low = _mm_mul_ps( low, low );
high = _mm_mul_ps( high, high );
__m128 res = _mm_hadd_ps( low, high );
_mm_storeu_ps( fftOut + j, res );
}
// for( size_t j = 1; j < FFT_SIZE / 2; j++ )
// fftOut[ j ] += fftOut[ FFT_SIZE - j ];
for( size_t j = 1; j < 4; j++ )
fftOut[ j ] += fftOut[ FFT_SIZE - j ];
for( size_t j = 4; j < FFT_SIZE / 2; j += 4 )
{
__m128 curr = _mm_loadu_ps( fftOut + j );
// Too bad _mm_loadr_ps requires alignment
__m128 high = _mm_loadu_ps( fftOut + ( FFT_SIZE - 3 ) - j );
high = _mm_shuffle_ps( high, high, _MM_SHUFFLE( 0, 1, 2, 3 ) );
curr = _mm_add_ps( curr, high );
_mm_storeu_ps( fftOut + j, curr );
}
constexpr size_t n_fft = 1 + ( FFT_SIZE / 2 );
// mel spectrogram
for( size_t j = 0; j < N_MEL; j++ )
{
double sum = 0.0;
for( size_t k = 0; k < n_fft; k++ )
sum += fftOut[ k ] * filters.data[ j * n_fft + k ];
if( sum < 1e-10 )
sum = 1e-10;
sum = log10( sum );
rdi[ j ] = (float)sum;
}
/*
const float* ptr = rdi.data();
const float* const ptrEnd = ptr + rdi.size();
static_assert( 0 == N_MEL % 4 );
__m128 ax = _mm_loadu_ps( ptr );
for( ptr += 4; ptr < ptrEnd; ptr += 4 )
ax = _mm_max_ps( ax, _mm_loadu_ps( ptr ) );
ax = _mm_max_ps( ax, _mm_movehl_ps( ax, ax ) );
ax = _mm_max_ss( ax, _mm_movehdup_ps( ax ) );
return _mm_cvtss_f32( ax );
*/
}
| 11,066
|
C++
|
.cpp
| 345
| 29.113043
| 118
| 0.575269
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,561
|
WhisperModel.cpp
|
Const-me_Whisper/Whisper/Whisper/WhisperModel.cpp
|
#include "stdafx.h"
#include "WhisperModel.h"
#include "loaderUtils.h"
#include "../D3D/createBuffer.h"
#include <atlcoll.h>
#include <atlstr.h>
#include "../Utils/GpuProfilerSimple.h"
#include "../Utils/CpuProfiler.h"
#include "../CPU/HybridLoader.h"
#include "../ML/Reshaper.h"
using namespace Whisper;
using namespace DirectCompute;
namespace
{
struct ParamsAndMelHeader
{
sModelParams mp;
uint32_t n_mel = 0, n_fft = 0;
};
enum struct ePostProcessing : uint8_t
{
None = 0,
MakePanels = 1
};
struct PendingTensor
{
DirectCompute::Tensor* dest = nullptr;
ePostProcessing postProcessing = ePostProcessing::None;
PendingTensor() = default;
PendingTensor( const PendingTensor& ) = default;
PendingTensor( DirectCompute::Tensor& tensor, ePostProcessing pp = ePostProcessing::None ) :
dest( &tensor ), postProcessing( pp ) { }
// If you wonder why not reshape them after all tensors are loaded, doing that on the fly is faster because CPU and GPU work in parallel
// In the current version, CPU reads data for a next tensor, while in the meantime GPU reshapes a previously loaded tensor.
HRESULT postProcess( Reshaper& rs, eDataType dt )
{
switch( postProcessing )
{
case ePostProcessing::None:
return S_OK;
case ePostProcessing::MakePanels:
if( gpuInfo().useReshapedMatMul() )
{
// GpuInfo structure says we should use that new method
return rs.makePanels( *dest, dt );
}
else
{
// The feature ain't enabled on the current user's GPU
return S_OK;
}
default:
return E_UNEXPECTED;
}
}
};
void populateEncodeTensorsMap( CAtlMap<CStringA, PendingTensor>& map, int layersEnc, DirectCompute::ModelBuffers& tensors )
{
tensors.enc.layers.resize( layersEnc );
CStringA tempString;
// Encoder tensors
auto& enc = tensors.enc;
map[ "encoder.positional_embedding" ] = enc.positionalEmbedding;
map[ "encoder.conv1.weight" ] = enc.conv1.w;
map[ "encoder.conv1.bias" ] = enc.conv1.b;
map[ "encoder.conv2.weight" ] = enc.conv2.w;
map[ "encoder.conv2.bias" ] = enc.conv2.b;
map[ "encoder.ln_post.weight" ] = enc.lnPost.w;
map[ "encoder.ln_post.bias" ] = enc.lnPost.b;
auto add = [ & ]( const char* name, int i, DirectCompute::Tensor& t, ePostProcessing pp = ePostProcessing::None )
{
tempString.Format( "encoder.blocks.%i.%s", i, name );
map[ tempString ] = PendingTensor{ t, pp };
};
auto add2 = [ & ]( const char* name, int i, DirectCompute::TensorPair& t, ePostProcessing ppWeight = ePostProcessing::None, ePostProcessing ppBias = ePostProcessing::None )
{
tempString.Format( "encoder.blocks.%i.%s.weight", i, name );
map[ tempString ] = PendingTensor{ t.w, ppWeight };
tempString.Format( "encoder.blocks.%i.%s.bias", i, name );
map[ tempString ] = PendingTensor{ t.b, ppBias };
};
for( int i = 0; i < layersEnc; i++ )
{
auto& gpu = enc.layers[ i ];
add2( "mlp_ln", i, gpu.mlpLn );
add2( "mlp.0", i, gpu.mlp0, ePostProcessing::MakePanels );
add2( "mlp.2", i, gpu.mlp1, ePostProcessing::MakePanels );
add2( "attn_ln", i, gpu.attnLn0 );
add2( "attn.query", i, gpu.attnQuery, ePostProcessing::MakePanels );
add( "attn.key.weight", i, gpu.attnKey, ePostProcessing::MakePanels );
add2( "attn.value", i, gpu.attnValue, ePostProcessing::MakePanels );
add2( "attn.out", i, gpu.attnLn1, ePostProcessing::MakePanels );
}
}
void populateDecodeTensorsMap( CAtlMap<CStringA, PendingTensor>& map, int layersDec, DirectCompute::ModelBuffers& tensors, bool hybrid )
{
tensors.dec.layers.resize( layersDec );
CStringA tempString;
// Decoder tensors
auto& dec = tensors.dec;
if( !hybrid )
{
map[ "decoder.positional_embedding" ] = dec.positionalEmbedding;
map[ "decoder.token_embedding.weight" ] = dec.tokenEmbedding;
map[ "decoder.ln.weight" ] = dec.ln.w;
map[ "decoder.ln.bias" ] = dec.ln.b;
}
auto add = [ & ]( const char* name, int i, DirectCompute::Tensor& t, ePostProcessing pp = ePostProcessing::None )
{
tempString.Format( "decoder.blocks.%i.%s", i, name );
map[ tempString ] = PendingTensor{ t, pp };
};
auto add2 = [ & ]( const char* name, int i, DirectCompute::TensorPair& t, ePostProcessing ppWeight = ePostProcessing::None, ePostProcessing ppBias = ePostProcessing::None )
{
tempString.Format( "decoder.blocks.%i.%s.weight", i, name );
map[ tempString ] = PendingTensor{ t.w, ppWeight };
tempString.Format( "decoder.blocks.%i.%s.bias", i, name );
map[ tempString ] = PendingTensor{ t.b, ppBias };
};
for( int i = 0; i < layersDec; i++ )
{
auto& gpu = dec.layers[ i ];
add( "cross_attn.key.weight", i, gpu.crossAttnKey, ePostProcessing::MakePanels );
add2( "cross_attn.value", i, gpu.crossAttnValue, ePostProcessing::MakePanels );
if( hybrid )
continue;
add2( "mlp_ln", i, gpu.mlpLn );
add2( "mlp.0", i, gpu.mlp0, ePostProcessing::MakePanels );
add2( "mlp.2", i, gpu.mlp1, ePostProcessing::MakePanels );
add2( "attn_ln", i, gpu.attnLn0 );
add2( "attn.query", i, gpu.attnQuery );
add( "attn.key.weight", i, gpu.attnKey );
add2( "attn.value", i, gpu.attnValue );
add2( "attn.out", i, gpu.attnLn1 );
add2( "cross_attn_ln", i, gpu.crossAttnLn0 );
add2( "cross_attn.query", i, gpu.crossAttnQuery );
add2( "cross_attn.out", i, gpu.crossAttnLn1 );
}
}
void populateTensorsMap( CAtlMap<CStringA, PendingTensor>& map, int layersEnc, int layersDec, DirectCompute::ModelBuffers& tensors, bool hybrid )
{
populateEncodeTensorsMap( map, layersEnc, tensors );
populateDecodeTensorsMap( map, layersDec, tensors, hybrid );
}
struct sTensorHeader
{
int n_dims, length, ftype;
};
// compare signed int32 lanes for a <= b
inline __m128i cmple( __m128i a, __m128i b )
{
__m128i i = _mm_min_epi32( a, b );
return _mm_cmpeq_epi32( a, i );
}
inline bool allPositive( const std::array<int, 4>& ne )
{
const __m128i v = _mm_loadu_si128( ( const __m128i* )ne.data() );
const __m128i le = cmple( v, _mm_setzero_si128() );
return (bool)_mm_testz_si128( le, le );
}
inline const char* cstr( const CStringA& s ) { return s; }
}
class WhisperModel::CallbacksImpl : public CpuCompute::iLoaderProgressSink
{
sLoadModelCallbacks lmcb;
int64_t fileSize;
HRESULT gotBytes( int64_t cb ) override final
{
if( nullptr != lmcb.cancel )
{
HRESULT hr = lmcb.cancel( lmcb.pv );
CHECK( hr );
if( S_OK != hr )
return HRESULT_FROM_WIN32( ERROR_CANCELLED );
}
if( nullptr != lmcb.progress )
{
postponedBytes -= cb;
assert( postponedBytes >= 0 );
int64_t pos = fileSize - postponedBytes;
const double progressVal = (double)pos / (double)fileSize;
HRESULT hr = lmcb.progress( progressVal, lmcb.pv );
CHECK( hr );
}
return S_OK;
}
public:
int64_t postponedBytes;
CallbacksImpl()
{
lmcb.progress = nullptr;
lmcb.cancel = nullptr;
lmcb.pv = nullptr;
fileSize = 0;
postponedBytes = 0;
}
HRESULT initialize( ComLight::iReadStream* stm, const sLoadModelCallbacks* rsi )
{
if( nullptr == rsi )
return S_OK;
lmcb = *rsi;
if( nullptr != lmcb.progress )
CHECK( stm->getLength( fileSize ) );
return S_OK;
}
HRESULT call( ComLight::iReadStream* stm )
{
if( nullptr != lmcb.cancel )
{
HRESULT hr = lmcb.cancel( lmcb.pv );
CHECK( hr );
if( S_OK != hr )
return HRESULT_FROM_WIN32( ERROR_CANCELLED );
}
if( nullptr != lmcb.progress )
{
int64_t pos;
CHECK( stm->getPosition( pos ) );
pos -= postponedBytes;
const double progressVal = (double)pos / (double)fileSize;
HRESULT hr = lmcb.progress( progressVal, lmcb.pv );
CHECK( hr );
}
return S_OK;
}
};
HRESULT WhisperModel::loadGpu( ComLight::iReadStream* stm, CallbacksImpl& callbacks )
{
CAtlMap<CStringA, PendingTensor> map;
populateTensorsMap( map, parameters.n_audio_layer, parameters.n_text_layer, tensors, false );
DirectCompute::Reshaper reshape;
std::vector<uint8_t> bytesVector;
size_t countLoaded = 0;
CStringA name;
int64_t cb = 0;
while( true )
{
CHECK( callbacks.call( stm ) );
sTensorHeader header;
HRESULT hr = readStruct( stm, header );
if( hr == E_EOF )
break;
if( FAILED( hr ) )
return hr;
if( header.n_dims < 1 || header.n_dims>3 )
return E_INVALIDARG;
std::array<int, 4> ne = { 1, 1, 1, 1 };
CHECK( readBytes( stm, ne.data(), header.n_dims * 4 ) );
if( !allPositive( ne ) )
return E_INVALIDARG;
char* nameBuffer = name.GetBufferSetLength( header.length );
hr = readBytes( stm, nameBuffer, header.length );
name.ReleaseBuffer();
if( FAILED( hr ) )
return hr;
auto p = map.Lookup( name );
if( nullptr == p )
{
logError( u8"%s: unknown tensor '%s' in model file", __func__, cstr( name ) );
return E_INVALIDARG;
}
DirectCompute::eDataType dt;
size_t cbElement;
if( header.ftype == 0 )
{
dt = DirectCompute::eDataType::FP32;
cbElement = 4;
}
else
{
dt = DirectCompute::eDataType::FP16;
cbElement = 2;
}
const size_t totalElts = (size_t)(uint32_t)ne[ 0 ] * (uint32_t)ne[ 1 ] * (uint32_t)ne[ 2 ];
if( totalElts * cbElement > UINT_MAX )
return DISP_E_OVERFLOW;
try
{
bytesVector.resize( cbElement * totalElts );
}
catch( const std::bad_alloc& )
{
return E_OUTOFMEMORY;
}
CHECK( readBytes( stm, bytesVector.data(), bytesVector.size() ) );
cb += bytesVector.size();
CHECK( p->m_value.dest->createImmutable( dt, ne, bytesVector.data() ) );
CHECK( p->m_value.postProcess( reshape, dt ) );
countLoaded++;
}
if( countLoaded != map.GetCount() )
{
logError( u8"Not all tensors loaded from model file - expected %zu, got %zu", map.GetCount(), countLoaded );
return E_INVALIDARG;
}
constexpr double mulMb = 1.0 / ( 1 << 20 );
logDebug( u8"Loaded %zu GPU tensors, %g MB VRAM", countLoaded, mulMb * cb );
return S_OK;
}
#if BUILD_HYBRID_VERSION
HRESULT WhisperModel::loadHybrid( ComLight::iReadStream* stm, CallbacksImpl& callbacks )
{
CAtlMap<CStringA, PendingTensor> map;
populateTensorsMap( map, parameters.n_audio_layer, parameters.n_text_layer, tensors, true );
DirectCompute::Reshaper reshape;
CpuCompute::HybridLoader loader( shared->hybridTensors, parameters.n_text_layer );
std::vector<uint8_t> bytesVector;
size_t countLoaded = 0;
CStringA name;
int64_t cb = 0;
while( true )
{
CHECK( callbacks.call( stm ) );
sTensorHeader header;
HRESULT hr = readStruct( stm, header );
if( hr == E_EOF )
break;
if( FAILED( hr ) )
return hr;
if( header.n_dims < 1 || header.n_dims > 3 )
return E_INVALIDARG;
std::array<int, 4> ne = { 1, 1, 1, 1 };
CHECK( readBytes( stm, ne.data(), header.n_dims * 4 ) );
if( !allPositive( ne ) )
return E_INVALIDARG;
char* nameBuffer = name.GetBufferSetLength( header.length );
hr = readBytes( stm, nameBuffer, header.length );
name.ReleaseBuffer();
if( FAILED( hr ) )
return hr;
auto p = map.Lookup( name );
if( nullptr == p )
{
HRESULT hr = loader.setupTensor( name, header.n_dims, header.ftype, ne, stm, callbacks.postponedBytes );
if( hr == S_OK )
continue;
logError( u8"%s: unknown tensor '%s' in model file", __func__, cstr( name ) );
return E_INVALIDARG;
}
DirectCompute::eDataType dt;
size_t cbElement;
if( header.ftype == 0 )
{
dt = DirectCompute::eDataType::FP32;
cbElement = 4;
}
else
{
dt = DirectCompute::eDataType::FP16;
cbElement = 2;
}
const size_t totalElts = (size_t)(uint32_t)ne[ 0 ] * (uint32_t)ne[ 1 ] * (uint32_t)ne[ 2 ];
if( totalElts * cbElement > UINT_MAX )
return DISP_E_OVERFLOW;
try
{
bytesVector.resize( cbElement * totalElts );
}
catch( const std::bad_alloc& )
{
return E_OUTOFMEMORY;
}
CHECK( readBytes( stm, bytesVector.data(), bytesVector.size() ) );
CHECK( p->m_value.dest->createImmutable( dt, ne, bytesVector.data() ) );
CHECK( p->m_value.postProcess( reshape, dt ) );
countLoaded++;
cb += bytesVector.size();
}
if( countLoaded != map.GetCount() )
{
logError( u8"Not all tensors loaded from model file - expected %zu, got %zu", map.GetCount(), countLoaded );
return E_INVALIDARG;
}
constexpr double mulMb = 1.0 / ( 1 << 20 );
logDebug( u8"Loaded %zu GPU tensors, %g MB VRAM", countLoaded, mulMb * cb );
CHECK( loader.completeLoad( stm, callbacks ) );
return S_OK;
}
#endif
HRESULT WhisperModel::load( ComLight::iReadStream* stm, bool hybrid, const sLoadModelCallbacks* callbacks )
{
CpuProfiler cpuPerf;
CallbacksImpl cb;
CHECK( cb.initialize( stm, callbacks ) );
// verify magic
{
uint32_t magic;
CHECK( readStruct( stm, magic ) );
if( magic != 0x67676d6c )
{
logError( u8"Invalid model file, bad magic" );
return E_INVALIDARG;
}
}
shared = std::make_shared<ModelShared>();
// hparams and MEL filters
{
ParamsAndMelHeader pmh;
CHECK( readStruct( stm, pmh ) );
parameters = pmh.mp;
assert( parameters.n_text_state == parameters.n_audio_state );
shared->filters.n_mel = pmh.n_mel;
shared->filters.n_fft = pmh.n_fft;
const size_t len = (size_t)pmh.n_mel * pmh.n_fft;
shared->filters.data.resize( len );
CHECK( readBytes( stm, shared->filters.data.data(), len * 4 ) );
const int64_t cb = len * 4;
constexpr double mulKb = 1.0 / ( 1 << 10 );
logDebug( u8"Loaded MEL filters, %.1f kb RAM", mulKb * cb );
}
CHECK( cb.call( stm ) );
// Vocabulary
CHECK( shared->vocab.load( stm, parameters.n_vocab ) );
CHECK( cb.call( stm ) );
DirectCompute::GpuProfilerSimple gpuProfiler;
CHECK( gpuProfiler.create() );
if( hybrid )
{
#if BUILD_HYBRID_VERSION
CHECK( loadHybrid( stm, cb ) )
#else
return E_NOTIMPL;
#endif
}
else
CHECK( loadGpu( stm, cb ) );
CHECK( gpuProfiler.time( loadTimeGpu ) );
loadTimeCpu = cpuPerf.elapsed();
return S_OK;
}
HRESULT Whisper::WhisperModel::createClone( const WhisperModel& rsi )
{
parameters = rsi.parameters;
shared = rsi.shared;
CHECK( tensors.createClone( rsi.tensors ) );
return S_OK;
}
__m128i Whisper::WhisperModel::getMemoryUse() const
{
size_t cb = shared->vocab.getMemoryUse();
cb += vectorMemoryUse( shared->filters.data );
__m128i v = _mm_cvtsi64_si128( (int64_t)cb );
v = _mm_add_epi64( v, tensors.getMemoryUse() );
return v;
}
| 14,185
|
C++
|
.cpp
| 445
| 28.853933
| 174
| 0.681875
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,562
|
ContextImpl.capture.cpp
|
Const-me_Whisper/Whisper/Whisper/ContextImpl.capture.cpp
|
#include "stdafx.h"
#include "ContextImpl.h"
#include "../API/iMediaFoundation.cl.h"
#include "../MF/AudioBuffer.h"
#include "../MF/mfUtils.h"
#include <mfidl.h>
#include <mfapi.h>
#include <mfreadwrite.h>
#include "voiceActivityDetection.h"
namespace
{
using namespace Whisper;
class TranscribeBuffer : public ComLight::ObjectRoot<iAudioBuffer>
{
// ==== iAudioBuffer ====
uint32_t COMLIGHTCALL countSamples() const override final
{
return (uint32_t)pcm.mono.size();
}
const float* COMLIGHTCALL getPcmMono() const override final
{
if( !pcm.mono.empty() )
return pcm.mono.data();
return nullptr;
}
const float* COMLIGHTCALL getPcmStereo() const override final
{
if( !pcm.stereo.empty() )
return pcm.stereo.data();
return nullptr;
}
HRESULT COMLIGHTCALL getTime( int64_t& rdi ) const override final
{
rdi = MFllMulDiv( currentOffset, 10'000'000, SAMPLE_RATE, 0 );
return S_OK;
}
public:
AudioBuffer pcm;
int64_t currentOffset = 0;
};
class TranscribeBufferObj : public ComLight::Object<TranscribeBuffer>
{
uint32_t Release() override final
{
return RefCounter::implRelease();
}
};
// Same data as in the Whisper.sCaptureParams public structure, the durations are scaled from FP32 seconds into uint32_t samples at 16 kHz
struct CaptureParams
{
uint32_t minDuration, maxDuration, dropStartSilence, pauseDuration;
uint32_t flags;
CaptureParams( const sCaptureParams& cp )
{
// Convert these floats from seconds to samples
__m128 floats = _mm_loadu_ps( &cp.minDuration );
floats = _mm_mul_ps( floats, _mm_set1_ps( (float)SAMPLE_RATE ) );
floats = _mm_round_ps( floats, _MM_FROUND_NINT );
__m128i ints = _mm_cvtps_epi32( floats );
store16( &minDuration, ints );
flags = cp.flags;
}
};
class Capture
{
CComPtr<IMFSourceReader> reader;
const CaptureParams captureParams;
const sCaptureCallbacks callbacks;
// Count of channels delivered from the source reader
uint8_t readerChannels = 0;
volatile char stateFlags = 0;
PTP_WORK work = nullptr;
volatile HRESULT workStatus = S_OK;
TranscribeBufferObj buffer;
CComAutoCriticalSection critSec;
AudioBuffer pcm;
AudioBuffer::pfnAppendSamples pfnAppendSamples = nullptr;
int64_t pcmStartTime = 0;
int64_t nextSampleTime = 0;
VAD vad;
sFullParams fullParams;
ProfileCollection& profiler;
iContext* const whisperContext;
// Set the state bit, and if needed notify user with the callback.
HRESULT setStateFlag( eCaptureStatus newBit ) noexcept
{
const uint8_t bit = (uint8_t)newBit;
const uint8_t oldVal = (uint8_t)InterlockedOr8( &stateFlags, (char)bit );
if( nullptr == callbacks.captureStatus )
return S_OK; // no callbacks
if( 0 != ( oldVal & bit ) )
return S_OK; // The bit was already set
return callbacks.captureStatus( callbacks.pv, (eCaptureStatus)( oldVal | bit ) );
}
// Clear the state bit, and if needed notify user with the callback
HRESULT clearStateFlag( eCaptureStatus clearBit ) noexcept
{
const uint8_t bit = (uint8_t)clearBit;
const uint8_t mask = ~bit;
const uint8_t oldVal = (uint8_t)InterlockedAnd8( &stateFlags, (char)mask );
if( nullptr == callbacks.captureStatus )
return S_OK; // no callbacks
if( 0 == ( oldVal & bit ) )
return S_OK; // The bit wasn't there
return callbacks.captureStatus( callbacks.pv, (eCaptureStatus)( oldVal & mask ) );
}
bool hasStateFlag( eCaptureStatus testBit ) const
{
const uint8_t bit = (uint8_t)testBit;
return 0 != ( (uint8_t)stateFlags & bit );
}
HRESULT workCallback();
static void __stdcall callbackStatic( PTP_CALLBACK_INSTANCE Instance, PVOID pv, PTP_WORK Work );
HRESULT readSample( bool discard );
// Run voice detection on the data in pcm.mono vector.
// When not detected, return 0. When detected, return last frame index where it is detected.
size_t detectVoice();
HRESULT postPoolWork()
{
assert( workStatus == S_OK );
CHECK( setStateFlag( eCaptureStatus::Transcribing ) );
workStatus = S_FALSE;
buffer.currentOffset = pcmStartTime;
pcm.swap( buffer.pcm );
SubmitThreadpoolWork( work );
pcmStartTime = nextSampleTime;
pcm.clear();
vad.clear();
return S_OK;
}
public:
Capture( const sCaptureCallbacks& cb, const iAudioCapture* ac, const sFullParams& sfp, iContext* wc, ProfileCollection& pc ) :
callbacks( cb ),
captureParams( ac->getParams() ),
fullParams( sfp ), whisperContext( wc ), profiler( pc )
{
}
~Capture()
{
if( workStatus == S_FALSE && nullptr != work )
WaitForThreadpoolWorkCallbacks( work, FALSE );
if( nullptr != work )
{
CloseThreadpoolWork( work );
work = nullptr;
}
}
HRESULT startup( const iAudioCapture* ac );
HRESULT checkCancel() noexcept
{
if( nullptr == callbacks.shouldCancel )
return S_OK;
return callbacks.shouldCancel( callbacks.pv );
}
HRESULT run();
};
HRESULT Capture::startup( const iAudioCapture* ac )
{
// Initialize the MF source reader
CHECK( ac->getReader( &reader ) );
work = CreateThreadpoolWork( &callbackStatic, this, nullptr );
if( nullptr == work )
return HRESULT_FROM_WIN32( GetLastError() );
// Set up media type, and figure out sample handler
CHECK( reader->SetStreamSelection( MF_SOURCE_READER_ALL_STREAMS, FALSE ) );
CHECK( reader->SetStreamSelection( MF_SOURCE_READER_FIRST_AUDIO_STREAM, TRUE ) );
CComPtr<IMFMediaType> mtNative;
CHECK( reader->GetNativeMediaType( MF_SOURCE_READER_FIRST_AUDIO_STREAM, MF_SOURCE_READER_CURRENT_TYPE_INDEX, &mtNative ) );
UINT32 numChannels;
CHECK( mtNative->GetUINT32( MF_MT_AUDIO_NUM_CHANNELS, &numChannels ) );
const bool sourceMono = numChannels < 2;
const bool wantStereo = 0 != ( captureParams.flags & (uint32_t)eCaptureFlags::Stereo );
pfnAppendSamples = AudioBuffer::appendSamplesFunc( sourceMono, wantStereo );
CComPtr<IMFMediaType> mt;
this->readerChannels = ( !sourceMono && wantStereo ) ? 2 : 1;
CHECK( createMediaType( !sourceMono, &mt ) );
CHECK( reader->SetCurrentMediaType( MF_SOURCE_READER_FIRST_AUDIO_STREAM, nullptr, mt ) );
CHECK( setStateFlag( eCaptureStatus::Listening ) );
return S_OK;
}
// This method is called in a loop until user stops the audio capture
HRESULT Capture::run()
{
HRESULT hr;
if( hasStateFlag( eCaptureStatus::Stalled ) )
{
hr = workStatus;
CHECK( hr );
if( S_OK != hr )
{
// Still stalled, discard the upcoming sample
return readSample( true );
}
else
{
// The postponed task has completed by now, no longer stalled
// Move the current PCM buffer to the transcribe thread
CHECK( clearStateFlag( eCaptureStatus::Stalled ) );
return postPoolWork();
}
}
const size_t oldSamples = pcm.mono.size();
CHECK( readSample( false ) );
const size_t newSamples = pcm.mono.size();
const size_t lastVoiceFrame = detectVoice();
if( lastVoiceFrame == 0 )
{
// No voice is detected in the entire buffered audio
clearStateFlag( eCaptureStatus::Voice );
if( newSamples < captureParams.dropStartSilence )
return S_OK;
pcm.clear();
vad.clear();
pcmStartTime = nextSampleTime;
return S_OK;
}
const bool newFrameVoice = lastVoiceFrame + captureParams.pauseDuration >= oldSamples;
if( newFrameVoice )
{
// A voice is detected in the buffer, and it was fairly recently
setStateFlag( eCaptureStatus::Voice );
if( newSamples < captureParams.maxDuration )
return S_OK; // While voice is continuously detected, we allow to grow the buffer up to `maxDuration` time
}
else
{
// A voice is detected in the buffer, but it was a while ago
clearStateFlag( eCaptureStatus::Voice );
if( newSamples < captureParams.minDuration )
return S_OK; // When detected pause in the voice, we fire the transcribe task right away.
}
// Hopefully, we have enough captured PCM data to run the ASR model.
// Check the background task status first.
hr = workStatus;
CHECK( hr );
if( hr == S_OK )
{
// S_OK workStatus means the previously posted transcribe job has completed successfully by now
return postPoolWork();
}
// S_FALSE means the previously posted transcribe job is still running
// Allow the buffer to grow up to maxDuration length, before starting to drop the samples
if( newSamples < captureParams.maxDuration )
return S_OK;
// The previous task has not finished yet, but we don't want to grow the buffer even further.
// We don't want concurrent transcribes here because not implemented, will simply crash.
// Set the "Stalled" flag which causes capture to drop further samples
setStateFlag( eCaptureStatus::Stalled );
return S_OK;
}
HRESULT Capture::readSample( bool discard )
{
while( true )
{
DWORD dwFlags = 0;
CComPtr<IMFSample> sample;
// Read the next sample
HRESULT hr = reader->ReadSample( (DWORD)MF_SOURCE_READER_FIRST_AUDIO_STREAM, 0, nullptr, &dwFlags, nullptr, &sample );
if( FAILED( hr ) )
{
logErrorHr( hr, u8"IMFSourceReader.ReadSample" );
return hr;
}
if( dwFlags & MF_SOURCE_READERF_CURRENTMEDIATYPECHANGED )
{
logError( u8"Media type changes ain’t supported by the library." );
return E_UNEXPECTED;
}
if( dwFlags & MF_SOURCE_READERF_ENDOFSTREAM )
return E_EOF;
if( !sample )
continue;
// Get a pointer to the audio data in the sample.
CComPtr<IMFMediaBuffer> buffer;
hr = sample->ConvertToContiguousBuffer( &buffer );
if( FAILED( hr ) )
return hr;
const float* pAudioData = nullptr;
DWORD cbBuffer;
hr = buffer->Lock( (BYTE**)&pAudioData, nullptr, &cbBuffer );
if( FAILED( hr ) )
return hr;
try
{
assert( 0 == ( cbBuffer % sizeof( float ) ) );
const size_t countFloats = cbBuffer / sizeof( float );
if( !discard )
{
const size_t prevSize = pcm.mono.size();
( pcm.*pfnAppendSamples )( pAudioData, countFloats );
const size_t newSize = pcm.mono.size();
this->nextSampleTime += ( newSize - prevSize );
}
else
{
this->nextSampleTime += countFloats / readerChannels;
}
}
catch( const std::bad_alloc& )
{
buffer->Unlock();
return E_OUTOFMEMORY;
}
// Unlock the buffer
hr = buffer->Unlock();
if( FAILED( hr ) )
return hr;
return S_OK;
}
}
HRESULT Capture::workCallback()
{
CHECK( whisperContext->runFull( fullParams, &buffer ) );
CHECK( clearStateFlag( eCaptureStatus::Transcribing ) );
return S_OK;
}
void __stdcall Capture::callbackStatic( PTP_CALLBACK_INSTANCE Instance, PVOID pv, PTP_WORK Work )
{
Capture* pThis = (Capture*)pv;
HRESULT status = E_UNEXPECTED;
try
{
status = pThis->workCallback();
}
catch( HRESULT hr )
{
status = hr;
}
catch( const std::bad_alloc& )
{
status = E_OUTOFMEMORY;
}
catch( const std::exception& )
{
status = E_FAIL;
}
assert( S_OK == status || FAILED( status ) );
pThis->workStatus = status;
}
size_t Capture::detectVoice()
{
auto pf = profiler.cpuBlock( eCpuBlock::VAD );
return vad.detect( pcm.mono.data(), pcm.mono.size() );
}
}
HRESULT COMLIGHTCALL ContextImpl::runCapture( const sFullParams& params, const sCaptureCallbacks& callbacks, const iAudioCapture* reader )
{
if( nullptr == reader )
return E_POINTER;
// Validate a few things
{
const auto& cp = reader->getParams();
if( cp.minDuration < 0.125f || cp.minDuration > 30.0f )
{
logError( u8"%s parameter %g is out of range", "minDuration", cp.minDuration );
return E_INVALIDARG;
}
if( cp.maxDuration < 0.125f || cp.maxDuration > 30.0f )
{
logError( u8"%s parameter %g is out of range", "maxDuration", cp.maxDuration );
return E_INVALIDARG;
}
}
auto profCompleteCpu = profiler.cpuBlock( eCpuBlock::RunComplete );
Capture capture{ callbacks, reader, params, this, profiler };
CHECK( capture.startup( reader ) );
while( true )
{
HRESULT hr = capture.checkCancel();
CHECK( hr );
if( hr != S_OK )
return S_OK;
CHECK( capture.run() );
}
}
| 12,058
|
C++
|
.cpp
| 375
| 28.658667
| 139
| 0.704749
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,563
|
ModelImpl.cpp
|
Const-me_Whisper/Whisper/Whisper/ModelImpl.cpp
|
#include "stdafx.h"
#include "ModelImpl.h"
#include "ContextImpl.h"
#include <intrin.h>
#include "../Utils/ReadStream.h"
#include "../modelFactory.h"
using namespace Whisper;
void ModelImpl::FinalRelease()
{
device.destroy();
}
HRESULT COMLIGHTCALL ModelImpl::createContext( iContext** pp )
{
auto ts = device.setForCurrentThread();
ComLight::CComPtr<ComLight::Object<ContextImpl>> obj;
iModel* m = this;
CHECK( ComLight::Object<ContextImpl>::create( obj, device, model, m ) );
obj.detach( pp );
return S_OK;
}
HRESULT COMLIGHTCALL ModelImpl::tokenize( const char* text, pfnDecodedTokens pfn, void* pv )
{
std::vector<int> tokens;
CHECK( model.shared->vocab.tokenize( text, tokens ) );
if( !tokens.empty() )
pfn( tokens.data(), (int)tokens.size(), pv );
else
pfn( nullptr, 0, pv );
return S_OK;
}
HRESULT COMLIGHTCALL ModelImpl::clone( iModel** rdi )
{
if( !device.gpuInfo.cloneableModel() )
{
logError( u8"iModel.clone requires the Cloneable model flag" );
return HRESULT_FROM_WIN32( ERROR_NOT_SUPPORTED );
}
ComLight::CComPtr<ComLight::Object<ModelImpl>> obj;
CHECK( ComLight::Object<ModelImpl>::create( obj, *this ) );
CHECK( obj->createClone( *this ) );
obj.detach( rdi );
return S_OK;
}
HRESULT ModelImpl::createClone( const ModelImpl& source )
{
auto ts = device.setForCurrentThread();
CHECK( device.createClone( source.device ) );
return model.createClone( source.model );
}
HRESULT ModelImpl::load( iReadStream* stm, bool hybrid, const sLoadModelCallbacks* callbacks )
{
auto ts = device.setForCurrentThread();
CHECK( device.create( gpuFlags, adapter ) );
return model.load( stm, hybrid, callbacks );
}
inline bool hasSse41AndF16C()
{
int cpu_info[ 4 ];
__cpuid( cpu_info, 1 );
// https://en.wikipedia.org/wiki/CPUID EAX=1: Processor Info and Feature Bits
constexpr uint32_t sse41 = ( 1u << 19 );
constexpr uint32_t f16c = ( 1u << 29 );
#ifdef __AVX__
constexpr uint32_t requiredBits = sse41 | f16c;
#else
constexpr uint32_t requiredBits = sse41;
#endif
const uint32_t ecx = (uint32_t)cpu_info[ 2 ];
return ( ecx & requiredBits ) == requiredBits;
}
// True when the current CPU is good enough to run the hybrid model
inline bool hasAvxAndFma()
{
// AVX needs OS support to preserve the 32-bytes registers across context switches, CPU support alone ain't enough
// Calling a kernel API to check that support
// The magic number is from there: https://stackoverflow.com/a/35096938/126995
if( 0 == ( GetEnabledXStateFeatures() & 4 ) )
return false;
// FMA3 and F16C
int cpuInfo[ 4 ];
__cpuid( cpuInfo, 1 );
// The magic numbers are from "Feature Information" table on Wikipedia:
// https://en.wikipedia.org/wiki/CPUID#EAX=1:_Processor_Info_and_Feature_Bits
constexpr int requiredBits = ( 1 << 12 ) | ( 1 << 29 );
if( requiredBits != ( cpuInfo[ 2 ] & requiredBits ) )
return false;
// BMI1
// https://en.wikipedia.org/wiki/CPUID#EAX=7,_ECX=0:_Extended_Features
__cpuid( cpuInfo, 7 );
if( 0 == ( cpuInfo[ 1 ] & ( 1 << 3 ) ) )
return false;
return true;
}
HRESULT __stdcall Whisper::loadGpuModel( const wchar_t* path, const sModelSetup& setup, const sLoadModelCallbacks* callbacks, iModel** pp )
{
if( nullptr == path || nullptr == pp )
return E_POINTER;
const bool hybrid = setup.impl == eModelImplementation::Hybrid;
if( hybrid )
{
#if BUILD_HYBRID_VERSION
if( !hasAvxAndFma() )
{
logError( u8"eModelImplementation.Hybrid model requires a CPU with AVX1, FMA3, F16C and BMI1 support" );
return ERROR_HV_CPUID_FEATURE_VALIDATION;
}
#else
logError( u8"This build of the DLL doesn’t implement eModelImplementation.Hybrid model" );
return E_NOTIMPL;
#endif
}
else if( !hasSse41AndF16C() )
{
logError( u8"eModelImplementation.GPU model requires a CPU with SSE 4.1 and F16C support" );
return ERROR_HV_CPUID_FEATURE_VALIDATION;
}
ComLight::Object<ReadStream> stream;
HRESULT hr = stream.open( path );
if( FAILED( hr ) )
{
logError16( L"Unable to open model binary file \"%s\"", path );
return hr;
}
ComLight::CComPtr<ComLight::Object<ModelImpl>> obj;
CHECK( ComLight::Object<ModelImpl>::create( obj, setup ) );
hr = obj->load( &stream, hybrid, callbacks );
if( FAILED( hr ) )
{
logError16( L"Error loading the model from \"%s\"", path );
return hr;
}
obj.detach( pp );
logInfo16( L"Loaded model from \"%s\" to VRAM", path );
return S_OK;
}
| 4,384
|
C++
|
.cpp
| 135
| 30.385185
| 139
| 0.71872
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,564
|
Vocabulary.cpp
|
Const-me_Whisper/Whisper/Whisper/Vocabulary.cpp
|
#include "stdafx.h"
#include "Vocabulary.h"
#include "loaderUtils.h"
#include <regex>
using ComLight::iReadStream;
using namespace Whisper;
Vocabulary::Vocabulary() :
idFromToken( 17u, 0.75f, 0.25f, 1.5f, 1024 )
{ }
void Vocabulary::addExtra( int index, const char* format, int i )
{
const int len = std::snprintf( nullptr, 0, format, i );
const size_t offset = stringData.size();
stringData.resize( offset + len + 1 );
char* const rdi = stringData.data() + offset;
std::snprintf( rdi, len + 1, format, i );
rdi[ len ] = '\0';
tokens[ index ] = reinterpret_cast<const char*>( offset );
}
void Vocabulary::completeBuild()
{
stringData.shrink_to_fit();
// Replace offsets with char pointers
const size_t dataLength = stringData.size();
for( auto& s : tokens )
{
// The reason this hack works - on Windows, lower 2GB of address space is reserved to the kernel.
// That's why the strings from the read only section of this DLL like "[_PREV_]" are guaranteed to have their addresses much larger than the size of the data buffer
const size_t ri = reinterpret_cast<size_t>( s );
if( ri < dataLength )
s = stringData.data() + ri;
}
// Build hash map to lookup the tokens
const size_t tokensCount = tokens.size();
for( size_t i = 0; i < tokensCount; i++ )
idFromToken.SetAt( tokens[ i ], (int)i );
idFromToken.Rehash();
// Log success message
int64_t cb = stringData.size();
cb += tokens.size() * sizeof( void* );
cb += sizeof( void* ) * idFromToken.GetHashTableSize();
cb += ( sizeof( THashMap::CPair ) + 16 ) * idFromToken.GetCount();
constexpr double mulKb = 1.0 / ( 1 << 10 );
logDebug( u8"Loaded vocabulary, %zu strings, %.1f kb RAM", tokens.size(), mulKb * cb );
}
int Vocabulary::findId( const char* token ) const
{
auto p = idFromToken.Lookup( token );
if( nullptr != p )
return p->m_value;
else
return -1;
}
HRESULT Vocabulary::load( ComLight::iReadStream* stm, int lengthInHeader )
{
if( lengthInHeader <= 0 )
return E_INVALIDARG;
tokens.clear();
stringData.clear();
int countWords = 0;
CHECK( readStruct( stm, countWords ) );
if( countWords <= 0 )
return E_INVALIDARG;
const size_t count = (uint32_t)countWords;
const size_t actualCount = std::max( count, (size_t)lengthInHeader );
tokens.resize( actualCount );
for( int i = 0; i < count; i++ )
{
int countChars = 0;
CHECK( readStruct( stm, countChars ) );
if( countChars < 0 )
{
logError( u8"Vocabulary.load failed: string length is negative" );
return E_INVALIDARG;
}
if( countChars == 0 )
{
// This happens with `ggml-large.bin` and `ggml-large-v1.bin` models.
// A bug in the model maybe?
tokens[ i ] = "";
continue;
}
const size_t len = (size_t)countChars;
const size_t offset = stringData.size();
stringData.resize( offset + len + 1 );
CHECK( readBytes( stm, &stringData[ offset ], len ) );
*stringData.rbegin() = '\0';
tokens[ i ] = reinterpret_cast<const char*>( offset );
}
n_vocab = lengthInHeader;
if( is_multilingual() )
{
token_eot++;
token_sot++;
token_prev++;
token_solm++;
token_not++;
token_beg++;
};
if( countWords < lengthInHeader )
{
for( int i = countWords; i < lengthInHeader; i++ )
{
if( i > token_beg )
addExtra( i, "[_TT_%i]", i - token_beg );
else if( i == token_eot )
tokens[ i ] = "[_EOT_]";
else if( i == token_sot )
tokens[ i ] = "[_SOT_]";
else if( i == token_prev )
tokens[ i ] = "[_PREV_]";
else if( i == token_not )
tokens[ i ] = "[_NOT_]";
else if( i == token_beg )
tokens[ i ] = "[_BEG_]";
else
addExtra( i, "[_extra_token_%i]", i );
}
}
completeBuild();
return S_OK;
}
void Vocabulary::getSpecialTokens( SpecialTokens& rdi ) const
{
rdi.TranscriptionEnd = token_eot;
rdi.TranscriptionStart = token_sot;
rdi.PreviousWord = token_prev;
rdi.SentenceStart = token_solm;
rdi.Not = token_not;
rdi.TranscriptionBegin = token_beg;
rdi.TaskTranslate = token_translate;
rdi.TaskTranscribe = token_transcribe;
}
// https://github.com/ggerganov/whisper.cpp/blob/v1.2.1/whisper.cpp#L2451
HRESULT Vocabulary::tokenize( const std::string& text, std::vector<id>& tokens ) const
{
std::vector<std::string> words;
// first split the text into words
{
std::string str = text;
std::string pat = R"('s|'t|'re|'ve|'m|'ll|'d| ?[[:alpha:]]+| ?[[:digit:]]+| ?[^\s[:alpha:][:digit:]]+|\s+(?!\S)|\s+)";
std::regex re( pat );
std::smatch m;
while( std::regex_search( str, m, re ) )
{
for( auto x : m )
words.push_back( x );
str = m.suffix();
}
}
// find the longest tokens that form the words:
tokens.clear();
for( const auto& word : words )
{
if( word.empty() )
continue;
int i = 0;
int n = (int)word.size();
while( i < n )
{
int j = n;
while( j > i )
{
const int it = findId( word.substr( i, j - i ) );
if( it >= 0 )
{
tokens.push_back( it );
i = j;
break;
}
j--;
}
if( i == n )
break;
if( j == i )
{
const auto sub = word.substr( i, 1 );
const int it = findId( sub );
if( it >= 0 )
{
tokens.push_back( it );
}
else
{
logError( u8"Unknown token \"%s\"", sub.c_str() );
return E_INVALIDARG;
}
i++;
}
}
}
return S_OK;
}
| 5,263
|
C++
|
.cpp
| 193
| 24.290155
| 166
| 0.635065
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,565
|
Spectrogram.cpp
|
Const-me_Whisper/Whisper/Whisper/Spectrogram.cpp
|
#include "stdafx.h"
#include "Spectrogram.h"
#include <memory>
#define _USE_MATH_DEFINES
#include <math.h>
#include "../Utils/parallelFor.h"
#include "../API/iMediaFoundation.cl.h"
#include "../ML/testUtils.h"
#include "melSpectrogram.h"
using namespace Whisper;
class alignas( 64 ) Spectrogram::MelContext
{
const float* const samples;
const size_t countSamples;
Spectrogram& result;
const int n_threads;
SpectrogramContext context;
public:
MelContext( const float* rsi, size_t len, const Filters& f, Spectrogram& rdi, int countThreads ) :
samples( rsi ), countSamples( len ), result( rdi ), n_threads( countThreads ),
context( f )
{ }
void run( int ith );
static HRESULT workCallback( int ith, void* ctx ) noexcept;
};
void Spectrogram::MelContext::run( int ith )
{
std::array<float, N_MEL> arr;
for( uint32_t i = ith; i < result.length; i += n_threads )
{
const int offset = i * FFT_STEP;
const float* rsi = samples + offset;
context.fft( arr, rsi, countSamples - offset );
for( size_t j = 0; j < N_MEL; j++ )
result.data[ j * result.length + i ] = arr[ j ];
}
}
HRESULT Spectrogram::MelContext::workCallback( int ith, void* ctx ) noexcept
{
std::vector<Spectrogram::MelContext>& contexts = *( std::vector<Spectrogram::MelContext>* )ctx;
try
{
contexts.at( ith ).run( ith );
return S_OK;
}
catch( const std::bad_alloc& )
{
return E_OUTOFMEMORY;
}
catch( const std::exception& )
{
return E_FAIL;
}
}
HRESULT Spectrogram::pcmToMel( const iAudioBuffer* buffer, const Filters& filters, int threads )
{
if( nullptr == buffer )
return E_POINTER;
const uint32_t countSamples = buffer->countSamples();
if( 0 == countSamples )
return OLE_E_BLANK;
const float* const samples = buffer->getPcmMono();
length = ( countSamples ) / FFT_STEP;
data.resize( N_MEL * length );
if( threads < 2 )
{
MelContext ctx{ samples, countSamples, filters, *this, 1 };
ctx.run( 0 );
}
else
{
std::vector<MelContext> contexts;
contexts.reserve( threads );
for( int i = 0; i < threads; i++ )
contexts.emplace_back( MelContext{ samples, countSamples, filters, *this, (int)threads } );
CHECK( parallelFor( &MelContext::workCallback, threads, &contexts ) );
}
// clamping and normalization
double mmax = -1e20;
for( double f : data )
mmax = std::max( mmax, f );
//printf("%s: max = %f\n", __func__, mmax);
mmax -= 8.0;
for( float& f : data )
{
if( f < mmax )
f = (float)mmax;
f = (float)( ( f + 4.0 ) / 4.0 );
}
// DirectCompute::dbgWriteBinaryFile( LR"(C:\Temp\2remove\ML\mel-my.bin)", data.data(), data.size() * 4 );
const float* const pcmStereo = buffer->getPcmStereo();
if( nullptr != pcmStereo )
{
try
{
stereo.resize( countSamples );
}
catch( const std::bad_alloc& )
{
return E_OUTOFMEMORY;
}
memcpy( stereo.data(), pcmStereo, countSamples * 8 );
}
else
stereo.clear();
return S_OK;
}
void Whisper::computeSignalEnergy( std::vector<float>& result, const iAudioBuffer* buffer, int n_samples_per_half_window )
{
const size_t countSamples = buffer->countSamples();
const float* const samples = buffer->getPcmMono();
const int hw = n_samples_per_half_window;
result.resize( countSamples );
for( size_t i = 0; i < countSamples; i++ )
{
float sum = 0;
for( int j = -hw; j <= hw; j++ )
if( i + j >= 0 && i + j < countSamples )
sum += fabsf( samples[ i + j ] );
result[ i ] = sum / ( 2 * hw + 1 );
}
}
HRESULT Spectrogram::copyStereoPcm( size_t offset, size_t length, std::vector<StereoSample>& buffer ) const
{
if( stereo.empty() )
return OLE_E_BLANK;
length *= FFT_STEP;
offset *= FFT_STEP;
if( offset >= stereo.size() )
return E_BOUNDS;
try
{
buffer.resize( length );
}
catch( const std::bad_alloc& )
{
return E_OUTOFMEMORY;
}
const size_t lengthToCopy = std::min( length, stereo.size() - offset );
memcpy( buffer.data(), &stereo[ offset ], lengthToCopy * 8 );
if( lengthToCopy == length )
return S_OK;
memset( &buffer[ lengthToCopy ], 0, ( buffer.size() - lengthToCopy ) * 8 );
return S_OK;
}
| 4,052
|
C++
|
.cpp
| 145
| 25.634483
| 122
| 0.675161
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,566
|
DecoderInputBuffers.cpp
|
Const-me_Whisper/Whisper/Whisper/DecoderInputBuffers.cpp
|
#include "stdafx.h"
#include "DecoderInputBuffers.h"
#include "../D3D/createBuffer.h"
#include "../D3D/MappedResource.h"
using namespace DirectCompute;
void DecoderInputBuffers::resize( uint32_t size )
{
if( 0 == size )
throw E_INVALIDARG;
if( size <= m_capacity )
{
m_size = size;
return;
}
embd = nullptr;
// Round up by 256, mostly for lulz
const uint32_t newCapacity = ( size + 0xFFu ) & ( ~( 0xFFu ) );
const size_t totalBytes = (size_t)4 * newCapacity;
check( createBuffer( eBufferUse::Dynamic, totalBytes, &embd, nullptr, nullptr ) );
m_capacity = newCapacity;
m_size = size;
}
namespace
{
static Tensor createView( ID3D11Buffer* buffer, uint32_t length )
{
Tensor res;
TensorGpuViews& views = res;
check( views.create( buffer, DXGI_FORMAT_R32_UINT, length, false ) );
res.ne = { length, 1, 1, 1 };
res.setDenseStrides();
return res;
}
}
Tensor DecoderInputBuffers::embedding( const int* rsi ) const
{
if( nullptr == embd || m_size == 0 )
throw OLE_E_BLANK;
// Upload the data
{
MappedResource mapped;
check( mapped.map( embd, false ) );
int* const rdi = (int*)mapped.data();
memcpy( rdi, rsi, m_size * (size_t)4 );
}
return createView( embd, m_size );
}
void DecoderInputBuffers::clear()
{
embd = nullptr;
m_size = 0;
m_capacity = 0;
}
HRESULT DecoderInputBuffers::zeroMemory() const
{
if( nullptr == embd || m_size == 0 )
return S_FALSE;
MappedResource mapped;
CHECK( mapped.map( embd, false ) );
__stosd( (DWORD*)mapped.data(), 0, m_capacity );
return S_OK;
}
| 1,541
|
C++
|
.cpp
| 62
| 22.709677
| 83
| 0.69215
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,567
|
ContextImpl.misc.cpp
|
Const-me_Whisper/Whisper/Whisper/ContextImpl.misc.cpp
|
#include "stdafx.h"
#include "ContextImpl.h"
#include <mfapi.h>
#include "MelStreamer.h"
#include "../API/iMediaFoundation.cl.h"
#include "../Utils/Trace/tracing.h"
using namespace Whisper;
static int getCpuCoresCount()
{
DWORD bufferSize = 0;
GetLogicalProcessorInformation( NULL, &bufferSize );
// The SYSTEM_LOGICAL_PROCESSOR_INFORMATION structure has a uint64_t field
// Ideally need to align by 8 bytes, and that's why uint64_t type for the storage
std::unique_ptr<uint64_t[]> buffer = std::make_unique<uint64_t[]>( ( bufferSize + 7 ) / 8 );
SYSTEM_LOGICAL_PROCESSOR_INFORMATION* ptr = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION*)buffer.get();
if( !GetLogicalProcessorInformation( ptr, &bufferSize ) )
{
HRESULT hr = getLastHr();
logWarningHr( hr, u8"GetLogicalProcessorInformation" );
return 0;
}
DWORD byteOffset = 0;
int physicalCores = 0;
while( byteOffset < bufferSize )
{
if( ptr->Relationship == RelationProcessorCore )
physicalCores++;
byteOffset += sizeof( SYSTEM_LOGICAL_PROCESSOR_INFORMATION );
ptr++;
}
return physicalCores;
}
int ContextImpl::defaultThreadsCount() const
{
#if BUILD_HYBRID_VERSION
const bool isHybrid = !model.shared->hybridTensors.layers.empty();
#else
constexpr bool isHybrid = false;
#endif
SYSTEM_INFO si;
GetSystemInfo( &si );
const int hardwareThreads = (int)si.dwNumberOfProcessors;
if( !isHybrid )
return std::min( hardwareThreads, 4 );
// It seems the CPU decoder in the hybrid context doesn’t scale well with count of hardware threads, but it does scale with count of physical cores.
int cores = getCpuCoresCount();
if( cores > 1 )
return cores;
return hardwareThreads;
}
HRESULT COMLIGHTCALL ContextImpl::fullDefaultParams( eSamplingStrategy strategy, sFullParams* rdi )
{
// whisper_full_default_params
if( nullptr == rdi )
return E_POINTER;
memset( rdi, 0, sizeof( sFullParams ) );
rdi->strategy = strategy;
rdi->cpuThreads = defaultThreadsCount();
rdi->n_max_text_ctx = 16384;
rdi->flags = eFullParamsFlags::PrintProgress | eFullParamsFlags::PrintTimestamps;
rdi->thold_pt = 0.01f;
rdi->thold_ptsum = 0.01f;
rdi->language = makeLanguageKey( "en" );
switch( strategy )
{
case eSamplingStrategy::Greedy:
rdi->beam_search.n_past = -1;
rdi->beam_search.beam_width = -1;
rdi->beam_search.n_best = -1;
break;
case eSamplingStrategy::BeamSearch:
rdi->greedy.n_past = -1;
rdi->beam_search.beam_width = 10;
rdi->beam_search.n_best = 5;
break;
default:
logError( u8"Unknown sampling strategy %i", (int)strategy );
return E_INVALIDARG;
}
return S_OK;
}
HRESULT COMLIGHTCALL ContextImpl::getModel( iModel** pp )
{
if( nullptr == pp )
return E_POINTER;
if( !modelPtr )
return OLE_E_BLANK;
*pp = modelPtr;
modelPtr->AddRef();
return S_OK;
}
size_t ContextImpl::Segment::memoryUsage() const
{
return text.capacity() + vectorMemoryUse( tokens );
}
__m128i ContextImpl::getMemoryUse() const
{
// Misc. system RAM
size_t cb = vectorMemoryUse( result_all );
for( const auto& r : result_all )
cb += r.memoryUsage();
cb += vectorMemoryUse( prompt_past );
cb += vectorMemoryUse( energy );
cb += vectorMemoryUse( probs );
cb += vectorMemoryUse( probs_id );
cb += vectorMemoryUse( results.segments );
cb += vectorMemoryUse( results.tokens );
cb += spectrogram.memoryUsage();
__m128i res = setLow_size( cb );
// Add all the VRAM in the temporary buffers
res = _mm_add_epi64( res, context.getMemoryUse() );
return res;
}
namespace
{
struct PrintedSize
{
double val;
const char* unit;
PrintedSize( int64_t cb )
{
if( cb < ( 1 << 10 ) )
{
val = (double)cb;
unit = "bytes";
}
else if( cb < ( 1 << 20 ) )
{
val = (double)cb * ( 1.0 / ( 1 << 10 ) );
unit = "KB";
}
else if( cb < ( 1 << 30 ) )
{
val = (double)cb * ( 1.0 / ( 1 << 20 ) );
unit = "MB";
}
else
{
val = (double)cb * ( 1.0 / ( 1 << 30 ) );
unit = "GB";
}
}
};
static void __declspec( noinline ) logMemoryUse( const char* what, __m128i cb )
{
PrintedSize sys{ _mm_cvtsi128_si64( cb ) };
PrintedSize vram{ _mm_extract_epi64( cb, 1 ) };
logInfo( u8"%s\t%g %s RAM, %g %s VRAM", what, sys.val, sys.unit, vram.val, vram.unit );
}
}
HRESULT COMLIGHTCALL ContextImpl::timingsPrint()
{
profiler.print();
auto ts = device.setForCurrentThread();
const __m128i memModel = model.getMemoryUse();
const __m128i memContext = getMemoryUse();
logInfo( u8" Memory Usage" );
logMemoryUse( "Model", memModel );
logMemoryUse( "Context", memContext );
logMemoryUse( "Total", _mm_add_epi64( memModel, memContext ) );
return S_OK;
}
HRESULT COMLIGHTCALL ContextImpl::timingsReset()
{
profiler.reset();
return S_OK;
}
HRESULT COMLIGHTCALL ContextImpl::getResults( eResultFlags flags, iTranscribeResult** pp ) const noexcept
{
if( nullptr == pp )
return E_POINTER;
if( flags & eResultFlags::NewObject )
{
ComLight::CComPtr<ComLight::Object<TranscribeResult>> obj;
CHECK( ComLight::Object<TranscribeResult>::create( obj ) );
CHECK( makeResults( flags, *obj, true ) );
obj.detach( pp );
return S_OK;
}
else
{
CHECK( makeResults( flags, results, false ) );
iTranscribeResult* res = &results;
res->AddRef();
*pp = res;
return S_OK;
}
}
inline int64_t scaleTime( int64_t wisperTicks )
{
return MFllMulDiv( wisperTicks, 10'000'000, 100, 0 );
}
HRESULT COMLIGHTCALL ContextImpl::makeResults( eResultFlags flags, TranscribeResult& res, bool moveStrings ) const noexcept
{
const size_t segments = result_all.size();
// Resize both vectors
try
{
res.segments.resize( segments );
if( flags & eResultFlags::Tokens )
{
size_t tc = 0;
for( const auto& s : result_all )
tc += s.tokens.size();
res.tokens.resize( tc );
}
else
res.tokens.clear();
res.segmentsText.clear();
if( moveStrings )
res.segmentsText.resize( segments );
}
catch( const std::bad_alloc& )
{
return E_OUTOFMEMORY;
}
const Whisper::Vocabulary& vocab = model.shared->vocab;
const Vocabulary::id tokenEot = vocab.token_eot;
size_t tokensSoFar = 0;
for( size_t i = 0; i < segments; i++ )
{
sSegment& rdi = res.segments[ i ];
const auto& rsi = result_all[ i ];
if( moveStrings )
{
res.segmentsText[ i ].swap( rsi.text );
rdi.text = res.segmentsText[ i ].c_str();
}
else
rdi.text = rsi.text.c_str();
if( flags & eResultFlags::Timestamps )
{
// Offset the time relative to the start of the media
rdi.time.begin = scaleTime( rsi.t0 ) + mediaTimeOffset;
rdi.time.end = scaleTime( rsi.t1 ) + mediaTimeOffset;
}
else
store16( &rdi.time, _mm_setzero_si128() );
rdi.firstToken = (uint32_t)tokensSoFar;
const size_t tc = rsi.tokens.size();
rdi.countTokens = (uint32_t)tc;
if( flags & eResultFlags::Tokens )
{
for( size_t i = 0; i < tc; i++ )
{
sToken& rdi = res.tokens[ tokensSoFar + i ];
const auto& src = rsi.tokens[ i ];
rdi.text = vocab.string( src.id );
if( flags & eResultFlags::Timestamps )
{
// Offset the time relative to the start of the media
rdi.time.begin = scaleTime( src.t0 ) + mediaTimeOffset;
rdi.time.end = scaleTime( src.t1 ) + mediaTimeOffset;
}
else
store16( &rdi.time, _mm_setzero_si128() );
// Copy 4 floats with unaligned load and store instructions
_mm_storeu_ps( &rdi.probability, _mm_loadu_ps( &src.p ) );
rdi.id = src.id;
uint32_t flags = 0;
if( src.id >= tokenEot )
flags |= (uint32_t)eTokenFlags::Special;
rdi.flags = (eTokenFlags)flags;
}
}
tokensSoFar += tc;
}
return S_OK;
}
int ContextImpl::wrapSegment( int max_len )
{
// whisper_wrap_segment
auto segment = result_all.back();
int res = 1;
int acc = 0;
std::string text;
const Whisper::Vocabulary& vocab = model.shared->vocab;
const int tokenEot = vocab.token_eot;
for( int i = 0; i < (int)segment.tokens.size(); i++ )
{
const auto& token = segment.tokens[ i ];
if( token.id >= tokenEot )
continue;
const char* txt = vocab.string( token.id );
const int cur = (int)strlen( txt );
if( acc + cur > max_len && i > 0 )
{
// split here
result_all.back().text = std::move( text );
result_all.back().t1 = token.t0;
result_all.back().tokens.resize( i );
result_all.push_back( {} );
result_all.back().t0 = token.t0;
result_all.back().t1 = segment.t1;
// add tokens [i, end] to the new segment
result_all.back().tokens.insert( result_all.back().tokens.end(), segment.tokens.begin() + i, segment.tokens.end() );
acc = 0;
text = "";
segment = result_all.back();
i = -1;
res++;
}
else
{
acc += cur;
text += txt;
}
}
result_all.back().text = std::move( text );
return res;
}
HRESULT COMLIGHTCALL ContextImpl::runFull( const sFullParams& params, const iAudioBuffer* buffer )
{
#if SAVE_DEBUG_TRACE
Tracing::vector( "runFull.pcm.in", buffer->getPcmMono(), buffer->countSamples() );
#endif
CHECK( buffer->getTime( mediaTimeOffset ) );
auto profCompleteCpu = profiler.cpuBlock( eCpuBlock::RunComplete );
{
auto p = profiler.cpuBlock( eCpuBlock::Spectrogram );
CHECK( spectrogram.pcmToMel( buffer, model.shared->filters, params.cpuThreads ) );
}
if( params.flag( eFullParamsFlags::TokenTimestamps ) )
{
t_beg = 0;
t_last = 0;
tid_last = 0;
computeSignalEnergy( energy, buffer, 32 );
}
try
{
sProgressSink progressSink{ nullptr, nullptr };
return runFullImpl( params, progressSink, spectrogram );
}
catch( HRESULT hr )
{
return hr;
}
}
HRESULT COMLIGHTCALL ContextImpl::runStreamed( const sFullParams& params, const sProgressSink& progress, const iAudioReader* reader )
{
if( params.flag( eFullParamsFlags::TokenTimestamps ) )
{
logError( u8"eFullParamsFlags.TokenTimestamps flag is not supported in streaming mode" );
return E_NOTIMPL;
}
mediaTimeOffset = 0;
auto profCompleteCpu = profiler.cpuBlock( eCpuBlock::RunComplete );
try
{
if( params.cpuThreads > 1 )
{
MelStreamerThread mel{ model.shared->filters, profiler, reader, params.cpuThreads };
return runFullImpl( params, progress, mel );
}
else
{
MelStreamerSimple mel{ model.shared->filters, profiler, reader };
return runFullImpl( params, progress, mel );
}
}
catch( HRESULT hr )
{
return hr;
}
}
| 10,253
|
C++
|
.cpp
| 366
| 25.254098
| 149
| 0.689929
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,568
|
KeyValueBuffers.cpp
|
Const-me_Whisper/Whisper/Whisper/KeyValueBuffers.cpp
|
#include "stdafx.h"
#include "KeyValueBuffers.h"
#include "../D3D/createBuffer.h"
#include "../ML/mlUtils.h"
using namespace DirectCompute;
void AttentionBuffer::resize( uint32_t size )
{
if( size <= m_size )
return;
buffer = nullptr;
check( createBuffer( eBufferUse::ReadWrite, (size_t)2 * size, &buffer, nullptr, nullptr ) );
m_size = size;
}
Tensor AttentionBuffer::view( uint32_t length, uint32_t offset ) const
{
if( length + offset > m_size )
throw E_BOUNDS;
if( 0 == length )
throw E_INVALIDARG;
CComPtr<ID3D11ShaderResourceView> srv;
CComPtr<ID3D11UnorderedAccessView> uav;
CD3D11_SHADER_RESOURCE_VIEW_DESC srvDesc{ D3D11_SRV_DIMENSION_BUFFER, DXGI_FORMAT_R16_FLOAT, offset, length };
check( device()->CreateShaderResourceView( buffer, &srvDesc, &srv ) );
CD3D11_UNORDERED_ACCESS_VIEW_DESC uavDesc{ D3D11_UAV_DIMENSION_BUFFER, DXGI_FORMAT_R16_FLOAT, offset, length };
check( device()->CreateUnorderedAccessView( buffer, &uavDesc, &uav ) );
TensorShape shape;
shape.ne = { length, 1, 1, 1 };
shape.setDenseStrides();
return Tensor( shape, srv, uav );
}
void KeyValueBuffers::resize( uint32_t size )
{
keys.resize( size );
values.resize( size );
}
HRESULT AttentionBuffer::zeroMemory() const
{
if( 0 == m_size )
return S_FALSE;
CComPtr<ID3D11UnorderedAccessView> uav;
CD3D11_UNORDERED_ACCESS_VIEW_DESC uavDesc{ D3D11_UAV_DIMENSION_BUFFER, DXGI_FORMAT_R16_FLOAT, 0, m_size };
check( device()->CreateUnorderedAccessView( buffer, &uavDesc, &uav ) );
try
{
DirectCompute::zeroMemory( uav, m_size );
return S_OK;
}
catch( HRESULT hr )
{
return hr;
}
}
HRESULT KeyValueBuffers::zeroMemory() const
{
CHECK( keys.zeroMemory() );
CHECK( values.zeroMemory() );
return S_OK;
}
| 1,728
|
C++
|
.cpp
| 58
| 27.827586
| 112
| 0.738397
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,569
|
Languages.cpp
|
Const-me_Whisper/Whisper/Whisper/Languages.cpp
|
#include "stdafx.h"
#include "Languages.h"
#include <atlcoll.h>
#include "../API/iContext.cl.h"
namespace
{
// These structures are compiled into the DLL, in read only data section
using Lang = Whisper::sLanguageEntry;
static const Lang s_languageData[] =
{
#include "languageCodez.inl"
};
using Whisper::makeLanguageKey;
// Values for the hash map
struct sLanguage
{
int id;
const char* name;
};
class LanguageIDs
{
CAtlMap<uint32_t, sLanguage> map;
void add( const char* code, int id, const char* name )
{
assert( strlen( code ) <= 4 );
const uint16_t key = makeLanguageKey( code );
map.SetAt( key, sLanguage{ id, name } );
}
public:
LanguageIDs() :
map( 103u, 0.75f, 0.25f, 2.25f, 99 )
{
for( const Lang& e : s_languageData )
map.SetAt( e.key, sLanguage{ e.id, e.name } );
};
int lookupId( const char* code ) const
{
const uint32_t key = makeLanguageKey( code );
auto p = map.Lookup( key );
return ( nullptr != p ) ? p->m_value.id : -1;
}
int lookupKey( uint32_t key ) const
{
auto p = map.Lookup( key );
return ( nullptr != p ) ? p->m_value.id : -1;
}
const char* lookupName( const char* code ) const
{
const uint32_t key = makeLanguageKey( code );
auto p = map.Lookup( key );
return ( nullptr != p ) ? p->m_value.name : nullptr;
}
};
static const LanguageIDs g_table;
}
namespace Whisper
{
int lookupLanguageId( const char* code )
{
return g_table.lookupId( code );
}
int lookupLanguageId( uint32_t key )
{
return g_table.lookupKey( key );
}
const char* lookupLanguageName( const char* code )
{
return g_table.lookupName( code );
}
int COMLIGHTCALL getLanguageId( const char* lang )
{
return lookupLanguageId( lang );
}
uint32_t COMLIGHTCALL findLanguageKeyW( const wchar_t* lang )
{
uint32_t key = 0;
uint32_t shift = 0;
for( size_t i = 0; i < 4; i++, lang++, shift += 8 )
{
const wchar_t c = *lang;
if( c == L'\0' )
break;
if( c >= 0x80 )
return UINT_MAX;
uint32_t u32 = (uint8_t)c;
u32 = u32 << shift;
key |= u32;
}
if( g_table.lookupKey( key ) >= 0 )
return key;
return UINT_MAX;
}
uint32_t COMLIGHTCALL findLanguageKeyA( const char* lang )
{
const uint32_t key = makeLanguageKey( lang );
if( g_table.lookupKey( key ) >= 0 )
return key;
return UINT_MAX;
}
HRESULT COMLIGHTCALL getSupportedLanguages( sLanguageList& rdi )
{
rdi.length = sizeof( s_languageData ) / sizeof( s_languageData[ 0 ] );
rdi.pointer = s_languageData;
return S_OK;
}
}
| 2,546
|
C++
|
.cpp
| 106
| 21.169811
| 73
| 0.660619
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,570
|
MelStreamer.cpp
|
Const-me_Whisper/Whisper/Whisper/MelStreamer.cpp
|
#include "stdafx.h"
#include "MelStreamer.h"
#include "../Utils/parallelFor.h"
using namespace Whisper;
MelStreamer::MelStreamer( const Filters& filters, ProfileCollection& prof, const iAudioReader* iar ) :
reader( iar ),
melContext( filters ),
profiler( prof )
{ }
void MelStreamer::dropOldChunks( size_t off )
{
const bool stereo = reader.outputsStereo();
for( size_t i = streamStartOffset; i < off; i++ )
{
queuePcmMono.pop_front();
queueMel.pop_front();
if( stereo )
queuePcmStereo.pop_front();
}
streamStartOffset = off;
}
HRESULT MelStreamer::ensurePcmChunks( size_t len )
{
if( readerEof )
return queuePcmMono.empty() ? E_EOF : S_FALSE;
const bool loadStereo = reader.outputsStereo();
const size_t neededChunks = len + FFT_SIZE / FFT_STEP;
while( true )
{
if( queuePcmMono.size() >= neededChunks )
return S_OK;
PcmMonoChunk& mono = queuePcmMono.emplace_back();
PcmStereoChunk* stereo = loadStereo ? &queuePcmStereo.emplace_back() : nullptr;
HRESULT hr = reader.readChunk( mono, stereo );
if( SUCCEEDED( hr ) )
continue;
queuePcmMono.pop_back();
if( loadStereo )
queuePcmStereo.pop_back();
if( hr == E_EOF )
{
readerEof = true;
return S_FALSE;
}
return hr;
}
}
size_t MelStreamer::serializePcm( size_t startOffset )
{
const ptrdiff_t chunks = (ptrdiff_t)queuePcmMono.size() - (ptrdiff_t)startOffset;
assert( chunks > 0 );
tempPcm.resize( chunks * FFT_STEP );
float* rdi = tempPcm.data();
for( auto it = queuePcmMono.begin() + startOffset; it != queuePcmMono.end(); it++ )
{
memcpy( rdi, it->mono.data(), FFT_STEP * 4 );
rdi += FFT_STEP;
}
return chunks;
}
namespace
{
__forceinline __m128 transpose4x80( __m128 vmax, const float* c0, const float* c1, const float* c2, const float* c3, float* rdi, size_t stride )
{
const float* const c0End = c0 + 80;
for( ; c0 < c0End; c0 += 4, c1 += 4, c2 += 4, c3 += 4, rdi += stride * 4 )
{
__m128 r0 = _mm_loadu_ps( c0 );
__m128 r1 = _mm_loadu_ps( c1 );
__m128 r2 = _mm_loadu_ps( c2 );
__m128 r3 = _mm_loadu_ps( c3 );
__m128 ax01 = _mm_max_ps( r0, r1 );
__m128 ax02 = _mm_max_ps( r2, r3 );
__m128 ax = _mm_max_ps( ax01, ax02 );
vmax = _mm_max_ps( vmax, ax );
_MM_TRANSPOSE4_PS( r0, r1, r2, r3 );
_mm_storeu_ps( rdi, r0 );
_mm_storeu_ps( rdi + stride, r1 );
_mm_storeu_ps( rdi + stride * 2, r2 );
_mm_storeu_ps( rdi + stride * 3, r3 );
}
return vmax;
}
__forceinline __m128 transpose80( __m128 vmax, const float* c0, float* rdi, size_t stride )
{
const float* const c0End = c0 + 80;
for( ; c0 < c0End; c0 += 4, rdi += stride * 4 )
{
__m128 r0 = _mm_loadu_ps( c0 );
vmax = _mm_max_ps( vmax, r0 );
_mm_store_ss( rdi, r0 );
*(int*)( rdi + stride ) = _mm_extract_ps( r0, 1 );
*(int*)( rdi + stride * 2 ) = _mm_extract_ps( r0, 2 );
*(int*)( rdi + stride * 3 ) = _mm_extract_ps( r0, 3 );
}
return vmax;
}
__forceinline float horizontalMaximum( __m128 v )
{
v = _mm_max_ps( v, _mm_movehl_ps( v, v ) );
v = _mm_max_ss( v, _mm_movehdup_ps( v ) );
return _mm_cvtss_f32( v );
}
}
void MelStreamer::makeTransposedBuffer( size_t off, size_t len )
{
// Resize the output
assert( len <= queueMel.size() );
outputMel.resize( len * N_MEL ); // N_MEL = 80
// First pass, copy transposed MEL data, and compute the maximum
const size_t lengthAligned = ( len / 4 ) * 4;
__m128 vMax = _mm_set1_ps( 1e-20f );
float* rdi = outputMel.data();
size_t i;
for( i = 0; i < lengthAligned; i += 4, rdi += 4 )
{
vMax = transpose4x80( vMax,
queueMel[ i ].data(),
queueMel[ i + 1 ].data(),
queueMel[ i + 2 ].data(),
queueMel[ i + 3 ].data(),
rdi, len );
}
for( ; i < len; i++, rdi++ )
vMax = transpose80( vMax, queueMel[ i ].data(), rdi, len );
// Second pass, clamping and normalization
float mmax;
const size_t bufferEnd = off + len;
if( lastBufferEnd != bufferEnd )
{
// Store maximum value in this class, along with the end sample index
mmax = horizontalMaximum( vMax );
lastBufferEnd = bufferEnd;
lastBufferMax = mmax;
}
else
{
// We're probably at the and of the stream, the caller asked for a smalled slice of the samples with the same end as the last time.
// Discard the computed maximum value, and instead use the number stored in this class
mmax = lastBufferMax;
}
mmax -= 8.0f;
vMax = _mm_set1_ps( mmax );
rdi = outputMel.data();
float* const rdiEnd = rdi + outputMel.size();
const __m128 add = _mm_set1_ps( 4 );
const __m128 mul = _mm_set1_ps( 1.0f / 4.0f );
for( ; rdi < rdiEnd; rdi += 4 )
{
__m128 v = _mm_loadu_ps( rdi );
v = _mm_max_ps( v, vMax );
v = _mm_add_ps( v, add );
v = _mm_mul_ps( v, mul );
_mm_storeu_ps( rdi, v );
}
}
HRESULT MelStreamerSimple::makeBuffer( size_t off, size_t len, const float** buffer, size_t& stride ) noexcept
{
if( off < streamStartOffset )
{
logError( u8"MelStreamer doesn't support backwards seeks" );
return E_UNEXPECTED;
}
if( off > streamStartOffset )
{
// The model wants to advance forward, drop now irrelevant chunks of data
dropOldChunks( off );
}
// Compute all these MEL chunks
const size_t availableMel = queueMel.size();
if( availableMel < len )
{
CHECK( ensurePcmChunks( len ) );
const size_t pcmChunks = serializePcm( availableMel );
const size_t missingMelChunks = len - availableMel;
size_t i;
const size_t loop1 = std::min( missingMelChunks, pcmChunks );
{
auto profilerBlock = profiler.cpuBlock( eCpuBlock::Spectrogram );
for( i = 0; i < loop1; i++ )
{
// if( readerEof && i + 1 == loop1 ) __debugbreak();
auto& arr = queueMel.emplace_back();
const float* sourcePcm = tempPcm.data() + i * FFT_STEP;
size_t availableChunks = pcmChunks - i;
size_t availableFloats = availableChunks * FFT_STEP;
melContext.fft( arr, sourcePcm, availableFloats );
}
}
for( ; i < missingMelChunks; i++ )
{
assert( readerEof );
auto& arr = queueMel.emplace_back();
memset( arr.data(), 0, N_MEL * 4 );
}
}
// Produce the result
makeTransposedBuffer( off, len );
stride = len;
*buffer = outputMel.data();
return S_OK;
}
MelStreamerThread::MelStreamerThread( const Filters& filters, ProfileCollection& profiler, const iAudioReader* iar, int countThreads ) :
MelStreamer( filters, profiler, iar ),
workerThreads( countThreads )
{
if( workerThreads > 1 )
{
check( ThreadPoolWork::create() );
melContextsWorkers.reserve( workerThreads - 1 );
for( int i = 1; i < workerThreads; i++ )
melContextsWorkers.emplace_back( filters );
}
InitializeConditionVariable( &wakeMain );
InitializeConditionVariable( &wakeBackground );
threadStatus = eThreadStatus::NotStarted;
const HANDLE h = CreateThread( nullptr, 0, &threadProcStatic, this, 0, nullptr );
if( nullptr == h )
throw HRESULT_FROM_WIN32( GetLastError() );
threadHandle.Attach( h );
}
using Lock = CComCritSecLock<CComAutoCriticalSection>;
constexpr ptrdiff_t prebufferChunks = 3000 * 2;
constexpr ptrdiff_t chunksPerWakeup = 512;
constexpr ptrdiff_t minChunksPerThread = 64;
HRESULT MelStreamerThread::threadMain()
{
pendingChunks.reserve( chunksPerWakeup );
EnterCriticalSection( &m_cs.m_sec );
threadStatus = eThreadStatus::Working;
while( true )
{
if( shuttingDown )
{
LeaveCriticalSection( &m_cs.m_sec );
return S_FALSE;
}
// Count of available MEL chunks
const ptrdiff_t availableMel = queueMel.size();
if( availableMel >= prebufferChunks )
{
threadStatus = eThreadStatus::Idle;
SleepConditionVariableCS( &wakeBackground, &m_cs.m_sec, INFINITE );
threadStatus = eThreadStatus::Working;
continue;
}
// Count of MEL chunks remaining in the whole stream
// availableMel of them are already on the queue
const ptrdiff_t remainingMel = (ptrdiff_t)getLength() - (ptrdiff_t)streamStartOffset;
LeaveCriticalSection( &m_cs.m_sec );
const ptrdiff_t missingChunks = prebufferChunks - availableMel;
ptrdiff_t chunks = std::min( missingChunks, chunksPerWakeup );
chunks = std::min( chunks, remainingMel - availableMel );
if( chunks <= 0 )
return S_OK; // This thread has produced all chunks of the stream
CHECK( ensurePcmChunks( availableMel + chunks ) );
const size_t pcmChunks = serializePcm( availableMel );
if( 0 == pcmChunks )
return S_OK;
pendingChunks.clear();
chunks = std::min( chunks, (ptrdiff_t)pcmChunks );
{
auto profilerBlock = profiler.cpuBlock( eCpuBlock::Spectrogram );
if( this->workerThreads <= 1 || chunks < minChunksPerThread * 2 )
{
// Thread pool disabled with a setting, or not enough work for the thread pool
for( ptrdiff_t i = 0; i < chunks; i++ )
{
MelChunk& arr = pendingChunks.emplace_back();
const float* sourcePcm = tempPcm.data() + i * FFT_STEP;
size_t availableChunks = pcmChunks - i;
size_t availableFloats = availableChunks * FFT_STEP;
melContext.fft( arr, sourcePcm, availableFloats );
}
}
else
{
// Use thread pool for these FFTs
pendingChunks.resize( chunks );
int nth = (int)( ( chunks + minChunksPerThread - 1 ) / minChunksPerThread );
nth = std::min( nth, this->workerThreads );
assert( nth > 1 );
this->fftChunks = (int)chunks;
this->fftThreads = nth;
CHECK( ThreadPoolWork::parallelFor( nth ) );
}
}
EnterCriticalSection( &m_cs.m_sec );
if( shuttingDown )
{
LeaveCriticalSection( &m_cs.m_sec );
return S_FALSE;
}
for( const auto& a : pendingChunks )
queueMel.push_back( a );
LeaveCriticalSection( &m_cs.m_sec );
WakeAllConditionVariable( &wakeMain );
pendingChunks.clear();
EnterCriticalSection( &m_cs.m_sec );
}
}
HRESULT MelStreamerThread::threadPoolCallback( int ith ) noexcept
{
SpectrogramContext& ctx = ( 0 != ith ) ? melContextsWorkers[ ith - 1 ] : melContext;
// Figure out the slice of the chunks to generate in this thread
const int nth = this->fftThreads;
const int chunks = this->fftChunks;
const int i0 = ( ith * chunks ) / nth;
const int i1 = ( ( ith + 1 ) * chunks ) / nth;
// Run these FFTs
const size_t pcmChunks = tempPcm.size() / FFT_STEP;
for( int i = i0; i < i1; i++ )
{
MelChunk& arr = pendingChunks[ i ];
const float* sourcePcm = tempPcm.data() + i * FFT_STEP;
size_t availableChunks = pcmChunks - i;
size_t availableFloats = availableChunks * FFT_STEP;
ctx.fft( arr, sourcePcm, availableFloats );
}
return S_OK;
}
HRESULT MelStreamerThread::run() noexcept
{
HRESULT status;
try
{
status = threadMain();
}
catch( HRESULT hr )
{
status = hr;
}
catch( const std::bad_alloc& )
{
status = E_OUTOFMEMORY;
}
catch( const std::exception& )
{
status = E_FAIL;
}
{
Lock lk( m_cs );
threadStatus = SUCCEEDED( status ) ? eThreadStatus::Completed : eThreadStatus::Failed;
}
// Especially when things fail, we want to wake the main thread up, so it's aware of the situation.
WakeAllConditionVariable( &wakeMain );
return status;
}
DWORD __stdcall MelStreamerThread::threadProcStatic( void* lpParameter )
{
setCurrentThreadName( "Whisper.dll MEL Streamer Thread" );
MelStreamerThread* p = (MelStreamerThread*)lpParameter;
return (DWORD)p->run();
}
HRESULT MelStreamerThread::makeBuffer( size_t off, size_t len, const float** buffer, size_t& stride ) noexcept
{
bool wakeThread = false;
{
Lock lock( m_cs );
if( off < streamStartOffset )
{
logError( u8"MelStreamer doesn't support backwards seeks" );
return E_UNEXPECTED;
}
if( off > streamStartOffset )
{
// The model wants to advance forward, drop now irrelevant chunks of data
dropOldChunks( off );
wakeThread = ( threadStatus == eThreadStatus::Working || threadStatus == eThreadStatus::Idle );
}
while( true )
{
const size_t availableMel = queueMel.size();
if( availableMel >= len )
break;
const eThreadStatus ts = threadStatus;
if( ts == eThreadStatus::Working || ts == eThreadStatus::Idle )
{
WakeAllConditionVariable( &wakeBackground );
SleepConditionVariableCS( &wakeMain, &m_cs.m_sec, INFINITE );
continue;
}
if( ts == eThreadStatus::Failed )
{
DWORD code;
if( GetExitCodeThread( threadHandle, &code ) )
return (HRESULT)code;
else
return HRESULT_FROM_WIN32( GetLastError() );
}
assert( ts == eThreadStatus::Completed );
break;
}
if( queueMel.size() < len )
{
assert( readerEof || threadStatus == eThreadStatus::Failed );
while( queueMel.size() < len )
{
auto& arr = queueMel.emplace_back();
memset( arr.data(), 0, N_MEL * 4 );
}
}
// Produce the result
makeTransposedBuffer( off, len );
} // Unlock the critical section
stride = len;
*buffer = outputMel.data();
if( wakeThread )
WakeAllConditionVariable( &wakeBackground );
return S_OK;
}
MelStreamerThread::~MelStreamerThread()
{
if( !threadHandle )
return;
{
Lock lock( m_cs );
if( threadStatus != eThreadStatus::Working )
return;
shuttingDown = true;
}
DWORD res = WaitForSingleObject( threadHandle, 100 );
if( res == WAIT_OBJECT_0 )
return;
// TODO: log a warning
}
HRESULT MelStreamer::copyStereoPcm( size_t offset, size_t length, std::vector<StereoSample>& buffer ) const
{
if( queuePcmStereo.empty() )
return OLE_E_BLANK;
if( offset < streamStartOffset )
{
logError( u8"MelStreamer doesn't support backwards seek" );
return E_UNEXPECTED;
}
// Offset relative to the first chunk on the queue
const size_t off = offset - streamStartOffset;
if( off >= queuePcmStereo.size() )
return E_BOUNDS;
// Resize the output buffer
try
{
buffer.resize( length * FFT_STEP );
}
catch( const std::bad_alloc& )
{
return E_OUTOFMEMORY;
}
StereoSample* rdi = buffer.data();
// Copy PCM chunks from the queue
const size_t lengthToCopy = std::min( length, queuePcmStereo.size() - off );
for( size_t i = 0; i < lengthToCopy; i++, rdi += FFT_STEP )
{
const float* rsi = queuePcmStereo[ i + off ].stereo.data();
memcpy( rdi, rsi, 8 * FFT_STEP );
}
// If needed, write zeros to the tail
if( lengthToCopy == length )
return S_OK;
memset( rdi, 0, ( length - lengthToCopy ) * FFT_STEP );
return S_OK;
}
| 14,108
|
C++
|
.cpp
| 462
| 27.634199
| 145
| 0.679558
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,571
|
ModelBuffers.clone.cpp
|
Const-me_Whisper/Whisper/Whisper/ModelBuffers.clone.cpp
|
#include <stdafx.h>
#include "ModelBuffers.h"
#include "../ML/mlUtils.h"
namespace
{
using namespace DirectCompute;
HRESULT clone( Tensor& rdi, const Tensor& rsi )
{
CComPtr<ID3D11ShaderResourceView> srv;
CHECK( cloneResourceView( rsi, &srv ) );
rdi = rsi;
rdi.setGpuViews( srv );
return S_OK;
}
HRESULT clone( TensorPair& rdi, const TensorPair& rsi )
{
CHECK( clone( rdi.w, rsi.w ) );
CHECK( clone( rdi.b, rsi.b ) );
return S_OK;
}
HRESULT clone( LayerEncoder& rdi, const LayerEncoder& rsi )
{
CHECK( clone( rdi.attnLn0, rsi.attnLn0 ) );
CHECK( clone( rdi.attnLn1, rsi.attnLn1 ) );
CHECK( clone( rdi.attnQuery, rsi.attnQuery ) );
CHECK( clone( rdi.attnKey, rsi.attnKey ) );
CHECK( clone( rdi.attnValue, rsi.attnValue ) );
CHECK( clone( rdi.mlpLn, rsi.mlpLn ) );
CHECK( clone( rdi.mlp0, rsi.mlp0 ) );
CHECK( clone( rdi.mlp1, rsi.mlp1 ) );
return S_OK;
}
HRESULT clone( LayerDecoder& rdi, const LayerDecoder& rsi )
{
CHECK( clone( rdi.attnLn0, rsi.attnLn0 ) );
CHECK( clone( rdi.attnLn1, rsi.attnLn1 ) );
CHECK( clone( rdi.attnQuery, rsi.attnQuery ) );
CHECK( clone( rdi.attnKey, rsi.attnKey ) );
CHECK( clone( rdi.attnValue, rsi.attnValue ) );
CHECK( clone( rdi.crossAttnLn0, rsi.crossAttnLn0 ) );
CHECK( clone( rdi.crossAttnLn1, rsi.crossAttnLn1 ) );
CHECK( clone( rdi.crossAttnQuery, rsi.crossAttnQuery ) );
CHECK( clone( rdi.crossAttnKey, rsi.crossAttnKey ) );
CHECK( clone( rdi.crossAttnValue, rsi.crossAttnValue ) );
CHECK( clone( rdi.mlpLn, rsi.mlpLn ) );
CHECK( clone( rdi.mlp0, rsi.mlp0 ) );
CHECK( clone( rdi.mlp1, rsi.mlp1 ) );
return S_OK;
}
template<class E>
HRESULT clone( std::vector<E>& rdi, const std::vector<E>& rsi )
{
const size_t len = rsi.size();
try
{
rdi.resize( len );
}
catch( const std::bad_alloc& )
{
return E_OUTOFMEMORY;
}
for( size_t i = 0; i < len; i++ )
CHECK( clone( rdi[ i ], rsi[ i ] ) );
return S_OK;
}
HRESULT clone( EncoderBuffers& rdi, const EncoderBuffers& rsi )
{
CHECK( clone( rdi.layers, rsi.layers ) );
CHECK( clone( rdi.positionalEmbedding, rsi.positionalEmbedding ) );
CHECK( clone( rdi.conv1, rsi.conv1 ) );
CHECK( clone( rdi.conv2, rsi.conv2 ) );
CHECK( clone( rdi.lnPost, rsi.lnPost ) );
return S_OK;
}
HRESULT clone( DecoderBuffers& rdi, const DecoderBuffers& rsi )
{
CHECK( clone( rdi.positionalEmbedding, rsi.positionalEmbedding ) );
CHECK( clone( rdi.tokenEmbedding, rsi.tokenEmbedding ) );
CHECK( clone( rdi.ln, rsi.ln ) );
CHECK( clone( rdi.layers, rsi.layers ) );
return S_OK;
}
}
HRESULT ModelBuffers::createClone( const ModelBuffers& rsi )
{
CHECK( clone( enc, rsi.enc ) );
CHECK( clone( dec, rsi.dec ) );
return S_OK;
}
| 2,730
|
C++
|
.cpp
| 89
| 27.977528
| 69
| 0.679863
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,572
|
voiceActivityDetection.cpp
|
Const-me_Whisper/Whisper/Whisper/voiceActivityDetection.cpp
|
#include "stdafx.h"
#include "voiceActivityDetection.h"
using namespace Whisper;
// Initially ported (poorly) from there https://github.com/panmasuo/voice-activity-detection MIT license
// The code is based on that article:
// https://www.researchgate.net/publication/255667085_A_simple_but_efficient_real-time_voice_activity_detection_algorithm
inline VAD::Feature VAD::defaultPrimaryThresholds()
{
Feature f;
f.energy = 40;
f.F = 185;
f.SFM = 5;
return f;
}
VAD::VAD() :
primThresh( defaultPrimaryThresholds() )
{
fft_signal = std::make_unique<cplx[]>( FFT_POINTS );
}
inline void VAD::fft( cplx* buf, cplx* out, size_t n, size_t step )
{
if( step < n )
{
fft( out, buf, n, step * 2 );
fft( out + step, buf + step, n, step * 2 );
for( size_t i = 0; i < n; i += 2 * step )
{
// cplx t = cexp(-I * M_PI * i / n) * out[i + step];
// using namespace std::complex_literals;
const float mul = (float)M_PI * (float)(int)i / (float)(int)n;
// cplx t0 = std::exp( -1.0if * mul ) * out[ i + step ];
float sine, cosine;
DirectX::XMScalarSinCos( &sine, &cosine, -mul );
const cplx exponent{ cosine, sine };
cplx t = exponent * out[ i + step ];
buf[ i / 2 ] = out[ i ] + t;
buf[ ( i + n ) / 2 ] = out[ i ] - t;
}
}
}
void VAD::fft() const
{
cplx out[ FFT_POINTS ];
memcpy( &out[ 0 ], fft_signal.get(), FFT_POINTS * sizeof( cplx ) );
fft( fft_signal.get(), out, FFT_POINTS, 1 );
}
constexpr float mulInt16FromFloat = 32768.0;
inline float squareRoot( float x )
{
__m128 v = _mm_set_ss( x );
v = _mm_sqrt_ss( v );
return _mm_cvtss_f32( v );
}
float VAD::computeEnergy( const float* rsi )
{
// calculate_energy
double sum = 0;
for( size_t i = 0; i < FFT_POINTS; i++ )
{
float f = rsi[ i ];
f *= mulInt16FromFloat;
f *= f;
sum += f;
}
return squareRoot( (float)( sum * ( 1.0 / FFT_POINTS ) ) );
}
float VAD::computeDominant( const cplx* spectrum )
{
// calculate_dominant, reworked heavily
float maxMagSquared = 0;
int maxFreq = 0;
for( int i = 0; i < FFT_POINTS / 2; i++ )
{
const float real = (float)spectrum[ i ].real();
const float imag = (float)spectrum[ i ].imag();
float sq = real * real + imag * imag;
if( sq <= maxMagSquared )
continue;
maxMagSquared = sq;
maxFreq = i;
}
return (float)maxFreq * FFT_STEP;
}
float VAD::computreSpectralFlatnessMeasure( const cplx* spectrum )
{
// calculate_sfm
double sum_ari = 0;
double sum_geo = 0;
for( size_t i = 0; i < FFT_POINTS; i++ )
{
// sig = cabsf( spectrum[ i ] );
float sig = std::abs( spectrum[ i ] );
sum_ari += sig;
sum_geo += std::log( sig );
}
sum_ari = sum_ari / FFT_POINTS;
sum_geo = std::exp( sum_geo / FFT_POINTS );
return -10.0f * std::log10f( (float)( sum_geo / sum_ari ) );
}
void VAD::clear()
{
memset( &state, 0, sizeof( State ) );
state.currThresh = primThresh;
}
size_t VAD::detect( const float* rsi, size_t length )
{
// The cryptic numbers in the comments are from section 3 "Proposed VAD Algorithm" of the article, on page 2550, on the right
const size_t frames = length / FFT_POINTS;
if( frames <= 0 )
{
clear();
return 0;
}
// Load detection state from the field
Feature currThresh = state.currThresh;
Feature minFeature = state.minFeature;
Feature curr = state.curr;
size_t lastSpeech = state.lastSpeech;
float silenceRun = state.silenceRun;
size_t i = state.i;
// Run the loop just on the [ state.i .. frames ] slice of the input PCM
rsi += i * FFT_POINTS;
for( ; i < frames; i++, rsi += FFT_POINTS )
{
// 3-2 calculate FFT
for( size_t j = 0; j < FFT_POINTS; j++ )
{
const float re = rsi[ j ] * mulInt16FromFloat;
fft_signal[ j ] = { re, 0.0f };
}
fft();
// 3-1 + 3-2 calculate features
curr.energy = computeEnergy( rsi );
curr.F = computeDominant( fft_signal.get() );
curr.SFM = computreSpectralFlatnessMeasure( fft_signal.get() );
// 3-3 calculate minimum value for first 30 frames
if( i == 0 )
minFeature = curr;
else if( i < 30 )
{
minFeature.energy = std::min( minFeature.energy, curr.energy );
minFeature.F = std::min( minFeature.F, curr.F );
minFeature.SFM = std::min( minFeature.SFM, curr.SFM );
}
// 3-4 set thresholds
currThresh.energy = primThresh.energy * std::log10f( minFeature.energy );
// 3-5 calculate decision
uint8_t counter = 0;
if( ( curr.energy - minFeature.energy ) >= currThresh.energy )
counter = 1;
if( ( curr.F - minFeature.F ) >= currThresh.F )
counter++;
if( ( curr.SFM - minFeature.SFM ) >= currThresh.SFM )
counter++;
if( counter > 1 )
{
// 3-6 If counter > 1 mark the current frame as speech
lastSpeech = ( i + 1 ) * FFT_POINTS;
silenceRun = 0.0f;
}
else
{
silenceRun += 1.0f;
// 3-7 If current frame is marked as silence, update the energy minimum value
minFeature.energy = ( ( silenceRun * minFeature.energy ) + curr.energy ) / ( silenceRun + 1 );
}
// 3-8
currThresh.energy = primThresh.energy * std::log10f( minFeature.energy );
}
// Store the updated detection state back into that field
state.currThresh = currThresh;
state.minFeature = minFeature;
state.curr = curr;
state.lastSpeech = (uint32_t)lastSpeech;
state.silenceRun = silenceRun;
state.i = (uint32_t)i;
return lastSpeech;
}
| 5,254
|
C++
|
.cpp
| 178
| 27.005618
| 126
| 0.654653
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,573
|
ContextImpl.cpp
|
Const-me_Whisper/Whisper/Whisper/ContextImpl.cpp
|
#include "stdafx.h"
#include "ContextImpl.h"
#include "Languages.h"
#include "../Utils/Trace/tracing.h"
using namespace Whisper;
ContextImpl::ContextImpl( const DirectCompute::Device& dev, const WhisperModel& modelData, iModel* modelPointer ) :
device( dev ),
model( modelData ),
modelPtr( modelPointer ),
context( modelData, profiler ),
profiler( modelData )
{ }
#define WHISPER_CHUNK_SIZE 30
HRESULT ContextImpl::encode( iSpectrogram& mel, int seek )
{
auto prof = profiler.cpuBlock( eCpuBlock::Encode );
// whisper_encode
using namespace DirectCompute;
sEncodeParams ep;
ep.n_ctx = ( exp_n_audio_ctx > 0 ) ? exp_n_audio_ctx : model.parameters.n_audio_ctx;
ep.n_mels = model.parameters.n_mels;
ep.mel_offset = seek;
ep.layersCount = model.parameters.n_audio_layer;
ep.n_state = model.parameters.n_audio_state;
ep.n_head = model.parameters.n_audio_head;
ep.n_audio_ctx = model.parameters.n_audio_ctx;
ep.n_text_state = model.parameters.n_text_state;
ep.n_text_layer = model.parameters.n_text_layer;
ep.n_text_ctx = model.parameters.n_text_ctx;
try
{
auto cur = context.encode( mel, ep );
Tracing::tensor( "encode-out", cur );
return S_OK;
}
catch( HRESULT hr )
{
return hr;
}
}
HRESULT ContextImpl::decode( const int* tokens, size_t length, int n_past, int threads )
{
// whisper_decode
using namespace DirectCompute;
sDecodeParams dp;
dp.n_state = model.parameters.n_audio_state;
dp.n_head = model.parameters.n_audio_head;
dp.n_ctx = model.parameters.n_text_ctx;
dp.n_past = n_past;
dp.M = exp_n_audio_ctx > 0 ? exp_n_audio_ctx : model.parameters.n_audio_ctx;
dp.n_text_layer = model.parameters.n_text_layer;
dp.n_vocab = model.parameters.n_vocab;
try
{
context.decode( tokens, (int)length, dp, probs, threads );
return S_OK;
}
catch( HRESULT hr )
{
return hr;
}
}
// the most basic sampling scheme - select the top token
sTokenData ContextImpl::sampleBest( const float* probs, bool force_timestamp, bool is_initial )
{
// whisper_sample_best
const Vocabulary& vocab = model.shared->vocab;
sTokenData result = { 0 };
size_t n_logits = vocab.size();
probs_id.clear();
probs_id.reserve( n_logits );
for( size_t i = 0; i < n_logits; i++ )
probs_id.emplace_back( probs[ i ], (int)i );
{
double sum_ts = 0.0;
double max_ts = -1.0;
double max_tx = -1.0;
for( int i = 0; i < vocab.token_beg; i++ )
max_tx = std::max( max_tx, probs_id[ i ].first );
const int i0 = is_initial ? vocab.token_beg + 101 : vocab.token_beg;
const int i1 = is_initial ? vocab.token_beg + 101 : (int)n_logits;
// the initial timestamp cannot be larger than 100
// ref: https://github.com/openai/whisper/blob/0b1ba3d46ebf7fe6f953acfd8cad62a4f851b49f/whisper/decoding.py#L426-L429
if( is_initial )
{
for( int i = i0; i < n_logits; i++ )
probs_id[ i ].first = -INFINITY;
}
for( int i = vocab.token_beg; i < i1; i++ )
{
sum_ts += probs_id[ i ].first;
if( probs_id[ i ].first > max_ts )
{
max_ts = probs_id[ i ].first;
result.tid = probs_id[ i ].second;
}
}
// if the probability sum of all timestamp tokens is higher than the max probability of the text tokens - sample a
// timestamp token
if( sum_ts > max_tx || force_timestamp )
{
// ref: https://github.com/openai/whisper/blob/0b1ba3d46ebf7fe6f953acfd8cad62a4f851b49f/whisper/decoding.py#L430-L438
for( int i = 0; i < vocab.token_beg; i++ )
probs_id[ i ].first = -INFINITY;
}
result.pt = (float)( max_ts / ( sum_ts + 1e-10 ) );
result.ptsum = (float)sum_ts;
}
// find the top K tokens
const int top_k = 4;
std::partial_sort(
probs_id.begin(),
probs_id.begin() + top_k, probs_id.end(),
[]( const std::pair<double, Vocabulary::id>& a, const std::pair<double, Vocabulary::id>& b ) {
return a.first > b.first;
} );
probs_id.resize( top_k );
//printf("\n");
//for (int i = 0; i < (int) probs_id.size(); i++) {
// printf("%d: '%s' %f, %d\n", i, vocab.id_to_token.at(probs_id[i].second).c_str(), probs_id[i].first, probs_id[i].second);
//}
int res = 0;
while( ( probs_id[ res ].second == vocab.token_sot ||
probs_id[ res ].second == vocab.token_solm ||
probs_id[ res ].second == vocab.token_not ) &&
res < (int)probs_id.size() - 1 )
{
res++;
}
result.id = probs_id[ res ].second;
result.p = (float)probs_id[ res ].first;
return result;
}
sTokenData ContextImpl::sampleBest()
{
const int n_vocab = model.shared->vocab.n_vocab;
return sampleBest( probs.data() + ( probs.size() - n_vocab ), false, false );
}
sTokenData ContextImpl::sampleTimestamp( bool initial )
{
const int n_vocab = model.shared->vocab.n_vocab;
return sampleBest( probs.data() + ( probs.size() - n_vocab ), true, initial );
}
// a cost-function / heuristic that is high for text that takes longer to pronounce
// Obviously, can be improved
static float voice_length( const char* text )
{
if( nullptr == text )
return 0;
float res = 0.0f;
while( true )
{
const char c = *text;
if( c == '\0' )
return res;
text++;
// Figure out the increment
float inc;
if( c >= '0' && c <= '9' )
inc = 3.0f;
else
{
switch( c )
{
case ' ': inc = 0.01f; break;
case ',': inc = 2.00f; break;
case '.':
case '!':
case '?':
inc = 3.00f; break;
default:
inc = 1.0f;
}
}
res += inc;
}
}
static int timestamp_to_sample( int64_t t, int n_samples )
{
return std::max( 0, std::min( (int)n_samples - 1, (int)( ( t * SAMPLE_RATE ) / 100 ) ) );
}
static int64_t sample_to_timestamp( int i_sample )
{
return ( 100 * i_sample ) / SAMPLE_RATE;
}
void ContextImpl::expComputeTokenLevelTimestamps( int i_segment, float thold_pt, float thold_ptsum )
{
// whisper_exp_compute_token_level_timestamps
const Whisper::Vocabulary& vocab = model.shared->vocab;
auto& segment = result_all[ i_segment ];
auto& tokens = segment.tokens;
const int n_samples = energy.size();
if( n_samples == 0 )
{
logWarning( u8"%s: no signal data available", __func__ );
return;
}
const int64_t t0 = segment.t0;
const int64_t t1 = segment.t1;
const int n = tokens.size();
if( n == 0 )
return;
if( n == 1 )
{
tokens[ 0 ].t0 = t0;
tokens[ 0 ].t1 = t1;
return;
}
auto& t_beg = this->t_beg;
auto& t_last = this->t_last;
auto& tid_last = this->tid_last;
for( int j = 0; j < n; ++j )
{
auto& token = tokens[ j ];
if( j == 0 )
{
if( token.id == vocab.token_beg )
{
tokens[ j ].t0 = t0;
tokens[ j ].t1 = t0;
tokens[ j + 1 ].t0 = t0;
t_beg = t0;
t_last = t0;
tid_last = vocab.token_beg;
}
else
{
tokens[ j ].t0 = t_last;
}
}
const int64_t tt = t_beg + 2 * ( token.tid - vocab.token_beg );
tokens[ j ].id = token.id;
tokens[ j ].tid = token.tid;
tokens[ j ].p = token.p;
tokens[ j ].pt = token.pt;
tokens[ j ].ptsum = token.ptsum;
tokens[ j ].vlen = voice_length( vocab.string( token.id ) );
if( token.pt > thold_pt && token.ptsum > thold_ptsum && token.tid > tid_last && tt <= t1 )
{
if( j > 0 )
tokens[ j - 1 ].t1 = tt;
tokens[ j ].t0 = tt;
tid_last = token.tid;
}
}
tokens[ n - 2 ].t1 = t1;
tokens[ n - 1 ].t0 = t1;
tokens[ n - 1 ].t1 = t1;
t_last = t1;
// find intervals of tokens with unknown timestamps
// fill the timestamps by proportionally splitting the interval based on the token voice lengths
{
int p0 = 0;
int p1 = 0;
while( true )
{
while( p1 < n && tokens[ p1 ].t1 < 0 )
p1++;
if( p1 >= n )
p1--;
if( p1 > p0 )
{
double psum = 0.0;
for( int j = p0; j <= p1; j++ )
psum += tokens[ j ].vlen;
//printf("analyzing %d - %d, psum = %f\n", p0, p1, psum);
const double dt = tokens[ p1 ].t1 - tokens[ p0 ].t0;
// split the time proportionally to the voice length
for( int j = p0 + 1; j <= p1; j++ )
{
const double ct = tokens[ j - 1 ].t0 + dt * tokens[ j - 1 ].vlen / psum;
tokens[ j - 1 ].t1 = ct;
tokens[ j ].t0 = ct;
}
}
p1++;
p0 = p1;
if( p1 >= n )
break;
}
}
// fix up (just in case)
for( int j = 0; j < n - 1; j++ )
{
if( tokens[ j ].t1 < 0 )
tokens[ j + 1 ].t0 = tokens[ j ].t1;
if( j > 0 )
{
if( tokens[ j - 1 ].t1 > tokens[ j ].t0 ) {
tokens[ j ].t0 = tokens[ j - 1 ].t1;
tokens[ j ].t1 = std::max( tokens[ j ].t0, tokens[ j ].t1 );
}
}
}
// VAD
// expand or contract tokens based on voice activity
{
constexpr int hw = SAMPLE_RATE / 8;
for( int j = 0; j < n; j++ )
{
if( tokens[ j ].id >= vocab.token_eot )
continue;
int s0 = timestamp_to_sample( tokens[ j ].t0, n_samples );
int s1 = timestamp_to_sample( tokens[ j ].t1, n_samples );
const int ss0 = std::max( s0 - hw, 0 );
const int ss1 = std::min( s1 + hw, n_samples );
const int ns = ss1 - ss0;
float sum = 0.0f;
for( int k = ss0; k < ss1; k++ )
sum += this->energy[ k ];
const float thold = 0.5 * sum / ns;
{
int k = s0;
if( this->energy[ k ] > thold && j > 0 )
{
while( k > 0 && this->energy[ k ] > thold )
k--;
tokens[ j ].t0 = sample_to_timestamp( k );
if( tokens[ j ].t0 < tokens[ j - 1 ].t1 )
tokens[ j ].t0 = tokens[ j - 1 ].t1;
else
s0 = k;
}
else
{
while( this->energy[ k ] < thold && k < s1 )
k++;
s0 = k;
tokens[ j ].t0 = sample_to_timestamp( k );
}
}
{
int k = s1;
if( this->energy[ k ] > thold )
{
while( k < n_samples - 1 && this->energy[ k ] > thold )
k++;
tokens[ j ].t1 = sample_to_timestamp( k );
if( j < ns - 1 && tokens[ j ].t1 > tokens[ j + 1 ].t0 )
tokens[ j ].t1 = tokens[ j + 1 ].t0;
else
s1 = k;
}
else
{
while( this->energy[ k ] < thold && k > s0 )
k--;
s1 = k;
tokens[ j ].t1 = sample_to_timestamp( k );
}
}
}
}
}
static std::string to_timestamp( int64_t t, bool comma = false )
{
int64_t msec = t * 10;
int64_t hr = msec / ( 1000 * 60 * 60 );
msec = msec - hr * ( 1000 * 60 * 60 );
int64_t min = msec / ( 1000 * 60 );
msec = msec - min * ( 1000 * 60 );
int64_t sec = msec / 1000;
msec = msec - sec * 1000;
char buf[ 32 ];
snprintf( buf, sizeof( buf ), "%02d:%02d:%02d%s%03d", (int)hr, (int)min, (int)sec, comma ? "," : ".", (int)msec );
return std::string( buf );
}
class ContextImpl::CurrentSpectrogramRaii
{
ContextImpl* ctx;
public:
CurrentSpectrogramRaii( ContextImpl* c, iSpectrogram& mel )
{
ctx = c;
c->currentSpectrogram = &mel;
}
~CurrentSpectrogramRaii()
{
ctx->currentSpectrogram = nullptr;
}
};
HRESULT COMLIGHTCALL ContextImpl::runFullImpl( const sFullParams& params, const sProgressSink& progress, iSpectrogram& mel )
{
auto ts = device.setForCurrentThread();
const Whisper::Vocabulary& vocab = model.shared->vocab;
// Ported from whisper_full() function
result_all.clear();
if( params.flag( eFullParamsFlags::SpeedupAudio ) )
{
logError( u8"GPU model doesn't implement the SpeedupAudio flag" );
return E_NOTIMPL;
}
CurrentSpectrogramRaii _cs( this, mel );
const int seek_start = params.offset_ms / 10;
const int seek_end = seek_start + ( params.duration_ms == 0 ? (int)mel.getLength() : params.duration_ms / 10 );
// if length of spectrogram is less than 1s (100 samples), then return
// basically don't process anything that is less than 1s
// see issue #39: https://github.com/ggerganov/whisper.cpp/issues/39
if( seek_end < 100 + seek_start )
return S_FALSE;
// the accumulated text context so far
if( params.flag( eFullParamsFlags::NoContext ) )
prompt_past.clear();
// prepend the prompt tokens to the prompt_past
if( params.prompt_tokens && params.prompt_n_tokens > 0 )
{
// parse tokens from the pointer
for( int i = 0; i < params.prompt_n_tokens; i++ )
prompt_past.push_back( params.prompt_tokens[ i ] );
std::rotate( prompt_past.begin(), prompt_past.end() - params.prompt_n_tokens, prompt_past.end() );
}
// overwrite audio_ctx
exp_n_audio_ctx = params.audio_ctx;
// these tokens determine the task that will be performed
std::vector<whisper_token> prompt_init = { vocab.token_sot };
if( vocab.is_multilingual() )
{
int langId = lookupLanguageId( params.language );
if( langId < 0 )
{
char lang[ 5 ];
*(uint32_t*)( &lang[ 0 ] ) = params.language;
lang[ 4 ] = '\0';
logError( u8"%s: unknown language '%s'", __func__, lang );
return E_INVALIDARG;
}
prompt_init.push_back( vocab.token_sot + 1 + langId );
if( params.flag( eFullParamsFlags::Translate ) )
prompt_init.push_back( vocab.token_translate );
else
prompt_init.push_back( vocab.token_transcribe );
}
// int progress_prev = 0;
// int progress_step = 5;
std::vector<sTokenData> tokens_cur;
tokens_cur.reserve( model.parameters.n_text_ctx );
std::vector<whisper_token> prompt;
prompt.reserve( model.parameters.n_text_ctx );
// main loop
int seek = seek_start;
// Start measuring "Run" profiler value, both CPU and GPU times
auto prof = context.completeProfiler();
bool stoppedPrematurely = false;
if( params.flag( eFullParamsFlags::NoContext ) )
{
CHECK( context.clearState() );
}
while( true )
{
if( nullptr != progress.pfn )
{
const int pos = seek - seek_start;
const int total = seek_end - seek_start;
const double percentage = (double)pos / (double)total;
auto cb = profiler.cpuBlock( eCpuBlock::Callbacks );
CHECK( progress.pfn( percentage, this, progress.pv ) );
}
if( seek + 100 >= seek_end )
break;
if( nullptr != params.encoder_begin_callback )
{
auto cb = profiler.cpuBlock( eCpuBlock::Callbacks );
HRESULT hr = params.encoder_begin_callback( this, params.encoder_begin_callback_user_data );
if( FAILED( hr ) )
return hr;
if( hr != S_OK )
{
stoppedPrematurely = true;
break;
}
}
// encode audio features starting at offset seek
CHECK( encode( mel, seek ) );
int n_past = 0;
prompt.clear();
// if we have already generated some text, use it as a prompt to condition the next generation
if( !prompt_past.empty() )
{
int n_take = std::min( std::min( params.n_max_text_ctx, model.parameters.n_text_ctx / 2 ), int( prompt_past.size() ) );
prompt = { vocab.token_prev };
prompt.insert( prompt.begin() + 1, prompt_past.end() - n_take, prompt_past.end() );
prompt_past.clear();
prompt_past.insert( prompt_past.end(), prompt.begin() + 1, prompt.end() );
}
prompt.insert( prompt.end(), prompt_init.begin(), prompt_init.end() );
int seek_delta = 100 * WHISPER_CHUNK_SIZE;
// print the prompt
//printf("\n\n");
//for (int i = 0; i < prompt.size(); i++) {
// printf("%s: prompt[%d] = %s\n", __func__, i, ctx->vocab.id_to_token[prompt[i]].c_str());
//}
//printf("\n\n");
// the accumulated transcription in the current iteration
int result_len = 0;
tokens_cur.clear();
bool failed = false;
bool has_ts = false; // have we already sampled a non-beg timestamp token for the current segment?
{
// Measure "Decode" profiler value, both CPU and GPU times
auto prof = context.decodeProfiler();
for( int i = 0, n_max = model.parameters.n_text_ctx / 2 - 4; i < n_max; i++ )
{
CHECK( decode( prompt.data(), prompt.size(), n_past, params.cpuThreads ) );
n_past += (int)prompt.size();
prompt.clear();
// very basic greedy sampling strategy:
//
// - always take the most probable token
//
// more sophisticated sampling strategies could be implemented here, but we keep it simple
// feel free to experiment!
//
{
auto p = profiler.cpuBlock( eCpuBlock::Sample );
const sTokenData token = ( i == 0 ) ? sampleTimestamp( true ) : sampleBest();
// timestamp token - update sliding window
if( token.id > vocab.token_beg )
{
const int seek_delta_new = 2 * ( token.id - vocab.token_beg );
// do not allow to go back in time
if( has_ts && seek_delta > seek_delta_new && result_len < i )
break;
seek_delta = seek_delta_new;
result_len = i + 1;
has_ts = true;
}
// add it to the context
prompt.push_back( token.id );
tokens_cur.push_back( token );
//{
// const auto tt = token.pt > 0.10 ? ctx->vocab.id_to_token[token.tid] : "[?]";
// printf("%s: %10s %6d %6.3f '%s'\n", __func__, tt.c_str(), token.id, token.pt, ctx->vocab.id_to_token[token.id].c_str());
//}
// end of segment
if( token.id == vocab.token_eot || // end of text token
( params.max_tokens > 0 && i >= params.max_tokens ) || // max tokens per segment reached
( has_ts && seek + seek_delta + 100 >= seek_end ) // end of audio reached
)
{
if( result_len == 0 )
{
if( seek + seek_delta + 100 >= seek_end )
result_len = i + 1;
else
{
failed = true;
break;
}
}
if( params.flag( eFullParamsFlags::SingleSegment ) )
{
result_len = i + 1;
seek_delta = 100 * WHISPER_CHUNK_SIZE;
}
break;
}
}
// sometimes, the decoding can get stuck in a repetition loop
// this is a simple strategy to avoid such cases - we simply flag the decoding as failed and advance
// the sliding window by 1 second
if( i == n_max - 1 && ( result_len == 0 || seek_delta < 100 * WHISPER_CHUNK_SIZE / 2 ) )
{
failed = true;
break;
}
}
}
if( failed )
{
logError( u8"%s: failed to generate timestamp token - skipping one second", __func__ );
seek += 100;
continue;
}
// shrink down to result_len
tokens_cur.resize( result_len );
for( const auto& r : tokens_cur )
prompt_past.push_back( r.id );
// store the text from this iteration
if( !tokens_cur.empty() )
{
int i0 = 0;
int t0 = seek + 2 * ( tokens_cur.front().tid - vocab.token_beg );
std::string text = "";
for( int i = 0; i < (int)tokens_cur.size(); i++ )
{
//printf("%s: %18s %6.3f %18s %6.3f\n", __func__,
// ctx->vocab.id_to_token[tokens_cur[i].id].c_str(), tokens_cur[i].p,
// ctx->vocab.id_to_token[tokens_cur[i].tid].c_str(), tokens_cur[i].pt);
if( params.flag( eFullParamsFlags::PrintSpecial ) || tokens_cur[ i ].id < vocab.token_eot )
text += vocab.string( tokens_cur[ i ].id );
if( tokens_cur[ i ].id > vocab.token_beg && !params.flag( eFullParamsFlags::SingleSegment ) )
{
const int t1 = seek + 2 * ( tokens_cur[ i ].tid - vocab.token_beg );
if( !text.empty() )
{
const bool speedUp = params.flag( eFullParamsFlags::SpeedupAudio );
const int tt0 = speedUp ? 2 * t0 : t0;
const int tt1 = speedUp ? 2 * t1 : t1;
if( params.flag( eFullParamsFlags::PrintRealtime ) )
{
if( params.flag( eFullParamsFlags::PrintTimestamps ) )
logDebug( u8"[%s --> %s] %s", to_timestamp( tt0 ).c_str(), to_timestamp( tt1 ).c_str(), text.c_str() );
else
logDebug( u8"%s", text.c_str() );
}
result_all.push_back( { tt0, tt1, text, {} } );
for( int j = i0; j <= i; j++ )
result_all.back().tokens.push_back( tokens_cur[ j ] );
int n_new = 1;
if( params.flag( eFullParamsFlags::TokenTimestamps ) )
{
expComputeTokenLevelTimestamps( (int)result_all.size() - 1, params.thold_pt, params.thold_ptsum );
if( params.max_len > 0 )
n_new = wrapSegment( params.max_len );
}
if( nullptr != params.new_segment_callback )
{
auto cb = profiler.cpuBlock( eCpuBlock::Callbacks );
HRESULT hr = params.new_segment_callback( this, n_new, params.new_segment_callback_user_data );
if( FAILED( hr ) )
return hr;
}
}
text = "";
while( i < (int)tokens_cur.size() && tokens_cur[ i ].id > vocab.token_beg )
i++;
i--;
t0 = t1;
i0 = i + 1;
}
}
if( !text.empty() )
{
const int t1 = seek + seek_delta;
const bool speedUp = params.flag( eFullParamsFlags::SpeedupAudio );
const int tt0 = speedUp ? 2 * t0 : t0;
const int tt1 = speedUp ? 2 * t1 : t1;
if( params.flag( eFullParamsFlags::PrintRealtime ) )
{
if( params.flag( eFullParamsFlags::PrintTimestamps ) )
logDebug( u8"[%s --> %s] %s", to_timestamp( tt0 ).c_str(), to_timestamp( tt1 ).c_str(), text.c_str() );
else
logDebug( u8"%s", text.c_str() );
}
result_all.push_back( { tt0, tt1, text, {} } );
for( int j = i0; j < (int)tokens_cur.size(); j++ )
result_all.back().tokens.push_back( tokens_cur[ j ] );
int n_new = 1;
if( params.flag( eFullParamsFlags::TokenTimestamps ) )
{
expComputeTokenLevelTimestamps( (int)result_all.size() - 1, params.thold_pt, params.thold_ptsum );
if( params.max_len > 0 )
n_new = wrapSegment( params.max_len );
}
if( nullptr != params.new_segment_callback )
{
auto cb = profiler.cpuBlock( eCpuBlock::Callbacks );
HRESULT hr = params.new_segment_callback( this, n_new, params.new_segment_callback_user_data );
if( FAILED( hr ) )
return hr;
}
}
}
seek += seek_delta;
}
if( nullptr != progress.pfn && !stoppedPrematurely )
{
auto cb = profiler.cpuBlock( eCpuBlock::Callbacks );
CHECK( progress.pfn( 1.0, this, progress.pv ) );
}
return S_OK;
}
| 21,276
|
C++
|
.cpp
| 672
| 27.80506
| 131
| 0.616316
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,574
|
MelInputTensor.cpp
|
Const-me_Whisper/Whisper/Whisper/MelInputTensor.cpp
|
#include "stdafx.h"
#include "MelInputTensor.h"
#include "../D3D/MappedResource.h"
#include "../D3D/createBuffer.h"
#include <mfapi.h> // MFCopyImage
using namespace DirectCompute;
HRESULT MelInputTensor::create( Whisper::iSpectrogram& spectrogram, const sEncodeParams& encParams )
{
// Ported from the initial portion of whisper_encode() function
const size_t ne0 = encParams.n_ctx * 2;
const size_t ne1 = encParams.n_mels;
const size_t totalElts = ne0 * ne1;
const size_t totalBytes = totalElts * 4;
if( capacity < (uint32_t)totalElts )
{
// The old buffer is too small: drop the old one, and create a larger buffer with SRV
buffer = nullptr;
TensorGpuViews::clear();
CHECK( createBuffer( eBufferUse::Dynamic, totalBytes, &buffer, nullptr, nullptr ) );
CHECK( TensorGpuViews::create( buffer, DXGI_FORMAT_R32_FLOAT, totalElts, false ) );
capacity = (uint32_t)totalElts;
}
// Upload data to VRAM using D3D11_MAP_WRITE_DISCARD, that's why we made a dynamic buffer
{
// Ported from whisper_encode() function
MappedResource mapped;
CHECK( mapped.map( buffer, false ) );
float* const dst = (float*)mapped.data();
memset( dst, 0, totalBytes );
const size_t n_len = spectrogram.getLength();
const size_t i0 = std::min( (size_t)encParams.mel_offset, n_len );
const size_t i1 = std::min( (size_t)encParams.mel_offset + 2 * encParams.n_ctx, n_len );
// Whisper::MelBufferRaii sourceBuffer{ spectrogram, i0, i1 - i0 };
constexpr DWORD n_mel = Whisper::N_MEL;
const size_t rowBytes = ( i1 - i0 ) * 4;
/*
for( size_t j = 0; j < n_mel; j++ )
{
float* rdi = dst + j * 2 * encParams.n_ctx;
const float* rsi = sourceBuffer[ j ];
memcpy( rdi, rsi, rowBytes );
} */
Whisper::MelBufferRaii sourceBuffer;
CHECK( sourceBuffer.make( spectrogram, i0, i1 - i0 ) );
CHECK( MFCopyImage(
(BYTE*)dst, (LONG)( 2 * encParams.n_ctx * sizeof( float ) ),
sourceBuffer.bytePtr(), sourceBuffer.strideBytes(),
(DWORD)rowBytes, n_mel ) );
}
// Shape the tensor
ne = { 2 * encParams.n_ctx, encParams.n_mels, 1, 1 };
TensorShape::setDenseStrides();
return S_OK;
}
| 2,114
|
C++
|
.cpp
| 54
| 36.5
| 100
| 0.698343
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,575
|
WhisperContext.cpp
|
Const-me_Whisper/Whisper/Whisper/WhisperContext.cpp
|
#include "stdafx.h"
#include "WhisperContext.h"
#include "ModelBuffers.h"
#include <optional>
#include "../Utils/Trace/tracing.h"
#include "../D3D/RenderDoc/renderDoc.h"
#include "../ML/testUtils.h"
using namespace DirectCompute;
namespace
{
// True to measure GPU time of individual shaders which run during the encode step of the algorithm
constexpr bool profileEncodeShaders = true;
// True to measure GPU time of individual shaders which run during the decode step of the algorithm
constexpr bool profileDecodeShaders = true;
LPCTSTR traceFileNative = LR"(C:\Temp\2remove\Whisper\gpu.bin)";
LPCTSTR traceFileHybrid = LR"(C:\Temp\2remove\Whisper\hybrid.bin)";
TensorsArena::sArenaConfigs defaultArenaConfigs()
{
TensorsArena::sArenaConfigs res = {};
return res;
}
}
WhisperContext::Arenas::Arenas() :
outer( defaultArenaConfigs() ), layer( defaultArenaConfigs() )
{ }
Tensor WhisperContext::DecoderLayerPool::tensor( eDataType type, const std::array<uint32_t, 4>& ne )
{
assert( type == eDataType::FP32 );
return result.tensor( eDataType::FP32, ne, &DirectCompute::defaultNewCapacity );
}
class WhisperContext::ArenaRaii
{
WhisperContext& context;
iTensorArena* prevCurrent;
public:
ArenaRaii( WhisperContext& ctx, iTensorArena& ta ) :
context( ctx )
{
prevCurrent = ctx.currentArena;
ctx.currentArena = &ta;
}
~ArenaRaii()
{
context.currentArena->reset();
context.currentArena = prevCurrent;
}
};
WhisperContext::WhisperContext( const Whisper::WhisperModel& wm, Whisper::ProfileCollection& pc ) :
MlContext( pc ),
gpuModel( wm.tensors )
{
#if BUILD_HYBRID_VERSION
if( !wm.shared->hybridTensors.layers.empty() )
{
hybridContext = std::make_unique<HybridContext>( wm );
check( hybridContext->create() );
#if SAVE_DEBUG_TRACE
Tracing::traceCreate( traceFileHybrid );
#endif
}
else
#endif
{
#if SAVE_DEBUG_TRACE
Tracing::traceCreate( traceFileNative );
#endif
}
}
#if BUILD_BOTH_VERSIONS
namespace
{
thread_local WhisperContext* ts_context = nullptr;
const ModelBuffers& getGlobalModel()
{
return gpuModel;
}
}
/*
WhisperContext::WhisperContext() :
gpuModel( getGlobalModel() ),
{
if( nullptr != ts_context )
throw HRESULT_FROM_WIN32( ERROR_ALREADY_INITIALIZED );
ts_context = this;
}*/
WhisperContext::~WhisperContext()
{
Tracing::traceClose();
if( ts_context == nullptr )
return;
assert( ts_context == this );
ts_context = nullptr;
}
WhisperContext& WhisperContext::current()
{
WhisperContext* c = ts_context;
if( nullptr == c )
throw OLE_E_BLANK;
return *c;
}
#else
WhisperContext& WhisperContext::current()
{
throw E_NOTIMPL;
}
#endif
Tensor WhisperContext::createTensor( eDataType type, const std::array<uint32_t, 4>& ne )
{
// return MlContext::createTensor( type, ne );
iTensorArena* const ca = currentArena;
if( nullptr != ca )
return ca->tensor( type, ne );
else
return MlContext::createTensor( type, ne );
}
void WhisperContext::fmaRepeat( Tensor& cur, const TensorPair& that )
{
MlContext::fmaRepeat( cur, that.w, that.b );
}
Tensor WhisperContext::convolutionAndGelu( const Tensor& mel, uint32_t n_ctx )
{
const EncoderBuffers& model = gpuModel.enc;
Tensor cur = conv_1d_1s( model.conv1.w, mel );
Tracing::tensor( "enc.conv1", cur );
addRepeatGelu( cur, model.conv1.b );
Tracing::tensor( "enc.temp1", cur );
cur = conv_1d_2s( model.conv2.w, cur );
addRepeatGelu( cur, model.conv2.b );
const Tensor& posEmbed = model.positionalEmbedding;
const uint32_t peStride = posEmbed.ne[ 0 ];
constexpr uint32_t peOffset = 0;
Tensor e_pe = view2d( posEmbed, posEmbed.ne[ 0 ], n_ctx, peStride, peOffset );
cur = add( e_pe, transpose( cur ) );
return cur;
}
Tensor WhisperContext::encodeLayer( const Tensor& source, size_t index, uint32_t n_state, uint32_t n_head, uint32_t n_ctx )
{
auto prof = profiler.block( eProfilerBlock::EncodeLayer );
ArenaRaii arenaRaii{ *this, arenas.layer };
const LayerEncoder& layer = gpuModel.enc.layers[ index ];
// norm
Tensor cur = norm( source );
if( 0 == index )
Tracing::tensor( "enc-norm", cur );
fmaRepeat( cur, layer.attnLn0 );
// self-attention
Tensor Qcur;
Tensor reshaped;
if( gpuInfo().useReshapedMatMul() )
{
const uint16_t tag = profiler.setNextTag( "enc.layer.1" );
reshaped = reshapePanels( cur );
profiler.setNextTag( tag );
Qcur = mulMatTiledEx( layer.attnQuery.w, reshaped );
}
else
{
profiler.setNextTag( "enc.layer.1" );
Qcur = mulMat( layer.attnQuery.w, cur );
}
if( 0 == index )
Tracing::tensor( "enc-Qcur", Qcur );
addRepeat( Qcur, layer.attnQuery.b );
// note: no bias for Key
Tensor Kcur;
if( gpuInfo().useReshapedMatMul() )
{
// Already reshaped by the previous `if`
profiler.setNextTag( "enc.layer.2" );
Kcur = mulMatTiledEx( layer.attnKey, reshaped );
}
else
{
profiler.setNextTag( "enc.layer.2" );
Kcur = mulMat( layer.attnKey, cur );
}
if( 0 == index )
Tracing::tensor( "enc-Kcur", Kcur );
Tensor Vcur;
if( gpuInfo().useReshapedMatMul() )
{
// Already reshaped by the previous `if`
profiler.setNextTag( "enc.layer.3" );
Vcur = mulMatTiledEx( layer.attnValue.w, reshaped );
}
else
{
profiler.setNextTag( "enc.layer.3" );
Vcur = mulMat( layer.attnValue.w, cur );
}
if( 0 == index )
Tracing::tensor( "enc-Vcur", Vcur );
addRepeat( Vcur, layer.attnValue.b );
// ------
Tensor Q = permute( copy( Qcur, eDataType::FP16, { n_state / n_head, n_head, n_ctx } ), 0, 2, 1, 3 );
Tensor K = permute( copy( Kcur, eDataType::FP16, { n_state / n_head, n_head, n_ctx } ), 0, 2, 1, 3 );
Tensor V = copy( permute( Vcur.reshape3d( n_state / n_head, n_head, n_ctx ), 1, 2, 0, 3 ), eDataType::FP16, { n_ctx, n_state / n_head, n_head } );
Tensor KQV = flashAttention( Q, K, V, false );
if( 0 == index )
Tracing::tensor( "enc-KQV", KQV );
Tensor KQV_merged = permute( KQV, 0, 2, 1, 3 );
copyInPlace( cur, KQV_merged, eDataType::FP32, { n_state, n_ctx } );
// projection
if( gpuInfo().useReshapedMatMul() )
{
const uint16_t tag = profiler.setNextTag( "enc.layer.4" );
cur = reshapePanels( cur );
profiler.setNextTag( tag );
cur = mulMatTiledEx( layer.attnLn1.w, cur );
}
else
{
profiler.setNextTag( "enc.layer.4" );
cur = mulMat( layer.attnLn1.w, cur );
}
// add the input
addRepeatEx( cur, layer.attnLn1.b, source );
// feed-forward network
Tensor inpFF = cur;
cur = norm( inpFF );
fmaRepeat( cur, layer.mlpLn );
// fully connected
if( gpuInfo().useReshapedMatMul() )
{
const uint16_t tag = profiler.setNextTag( "enc.layer.5" );
cur = reshapePanels( cur );
profiler.setNextTag( tag );
cur = mulMatTiledEx( layer.mlp0.w, cur );
}
else
{
profiler.setNextTag( "enc.layer.5" );
cur = mulMat( layer.mlp0.w, cur );
}
addRepeatGelu( cur, layer.mlp0.b );
// projection
if( gpuInfo().useReshapedMatMul() )
{
const uint16_t tag = profiler.setNextTag( "enc.layer.6" );
cur = reshapePanels( cur );
profiler.setNextTag( tag );
cur = mulMatTiledEx( layer.mlp1.w, cur );
}
else
{
profiler.setNextTag( "enc.layer.6" );
cur = mulMat( layer.mlp1.w, cur );
}
// output from this layer
addRepeatEx( cur, layer.mlp1.b, inpFF );
return cur;
}
void WhisperContext::createKeyValueBuffers( const sEncodeParams& encParams )
{
{
const uint32_t n_audio_ctx = encParams.n_audio_ctx;
const uint32_t n_mem = encParams.n_text_layer * encParams.n_audio_ctx;
const uint32_t n_elements = encParams.n_text_state * n_mem;
kvCross.resize( n_elements );
}
#if BUILD_HYBRID_VERSION
if( !hybridContext )
#endif
{
const uint32_t n_mem = encParams.n_text_layer * encParams.n_text_ctx;
const uint32_t n_elements = encParams.n_text_state * n_mem;
kv.resize( n_elements );
}
}
Tensor WhisperContext::encode( Whisper::iSpectrogram& spectrogram, const sEncodeParams& encParams )
{
auto prof = profiler.block( eProfilerBlock::Encode );
CaptureRaii renderdocCapture;
profiler.profileShaders = profileEncodeShaders;
createKeyValueBuffers( encParams );
// Upload the source
check( melInput.create( spectrogram, encParams ) );
Tracing::tensor( "enc.input", melInput );
ArenaRaii arenaRaii{ *this, arenas.outer };
// Initial few steps
Tensor cur = convolutionAndGelu( melInput, encParams.n_ctx );
// Process all these layers
{
const size_t layersCount = encParams.layersCount;
for( size_t i = 0; i < layersCount; i++ )
{
Tracing::tensor( { "enc.layer[ %i ].in", i }, cur );
cur = encodeLayer( cur, i, encParams.n_state, encParams.n_head, encParams.n_ctx );
}
}
Tracing::tensor( "enc.layers", cur );
// A few last steps
{
cur = norm( cur );
// cur = ln_f_g*cur + ln_f_b
fmaRepeat( cur, gpuModel.enc.lnPost );
}
// pre-compute cross-attention buffers
{
Tensor reshaped;
if( gpuInfo().useReshapedMatMul() )
{
if( cur.ne[ 1 ] != 1 )
{
profiler.setNextTag( "enc.cross" );
reshaped = reshapePanels( cur );
}
else
reshaped = cur;
}
const size_t layersCount = encParams.n_text_layer;
const uint32_t stride = encParams.n_state * encParams.n_ctx;
const float finalScaling = computeScaling( (int)encParams.n_state, (int)encParams.n_head );
for( size_t i = 0; i < layersCount; i++ )
{
const LayerDecoder& layer = gpuModel.dec.layers[ i ];
Tensor Kcross, Vcross;
if( gpuInfo().useReshapedMatMul() )
Kcross = mulMatEx( layer.crossAttnKey, reshaped, "enc.cross.1" );
else
{
profiler.setNextTag( "enc.cross.1" );
Kcross = mulMat( layer.crossAttnKey, cur );
}
scale( Kcross, finalScaling );
if( gpuInfo().useReshapedMatMul() )
Vcross = mulMatEx( layer.crossAttnValue.w, reshaped, "enc.cross.2" );
else
{
profiler.setNextTag( "enc.cross.2" );
Vcross = mulMat( layer.crossAttnValue.w, cur );
}
addRepeat( Vcross, layer.crossAttnValue.b );
Tensor k = kvCross.keys.view( stride, stride * (uint32_t)i );
copyImpl( Kcross, k, Kcross.getType() == eDataType::FP32 );
Tensor v = kvCross.values.view( stride, stride * (uint32_t)i );
copyImpl( Vcross, v, Vcross.getType() == eDataType::FP32 );
}
}
#if BUILD_HYBRID_VERSION
if( hybridContext )
{
// When running hybrid model, download cross-attention buffers from VRAM to system RAM
check( hybridContext->downloadKeyValues( kvCross ) );
}
#endif
return cur;
}
struct WhisperContext::sLayerDecParams
{
uint32_t n_state, n_head, N;
uint32_t n_ctx, n_past, M;
};
Tensor WhisperContext::decodeLayer( const Tensor& inpL, size_t il, const sLayerDecParams& ldp )
{
auto prof = profiler.block( eProfilerBlock::DecodeLayer );
const auto& layer = gpuModel.dec.layers[ il ];
std::optional<ArenaRaii> arenaRaii{ std::in_place, *this, arenas.layer };
if( 0 == il ) Tracing::tensor( "dec-inpL", inpL );
// norm
Tensor cur = norm( inpL );
fmaRepeat( cur, layer.attnLn0 );
if( 0 == il ) Tracing::tensor( "dec-norm", cur );
// self-attention
{
profiler.setNextTag( "dec.layer.1" );
Tensor Qcur = mulMat( layer.attnQuery.w, cur );
if( 0 == il ) Tracing::tensor( "dec-Qcur-0", Qcur );
const float scaling = computeScaling( (int)ldp.n_state, (int)ldp.n_head );
addRepeatScale( Qcur, layer.attnQuery.b, scaling );
if( 0 == il ) Tracing::tensor( "dec-Qcur-1", Qcur );
// note: no bias for Key
profiler.setNextTag( "dec.layer.2" );
Tensor Kcur = mulMat( layer.attnKey, cur );
scale( Kcur, scaling );
if( 0 == il ) Tracing::tensor( "dec-Kcur", Kcur );
profiler.setNextTag( "dec.layer.3" );
Tensor Vcur = mulMat( layer.attnValue.w, cur );
addRepeat( Vcur, layer.attnValue.b );
if( 0 == il ) Tracing::tensor( "dec-Vcur", Vcur );
// store key and value to memory
{
const uint32_t len = ldp.N * ldp.n_state;
const uint32_t off = ldp.n_state * ( (uint32_t)il * ldp.n_ctx + ldp.n_past );
Tensor k = kv.keys.view( len, off );
Tensor v = kv.values.view( len, off );
copyImpl( Kcur, k, true );
copyImpl( Vcur, v, true );
}
// ------
Tensor Q = permute( copy( Qcur, eDataType::FP32, { ldp.n_state / ldp.n_head, ldp.n_head, ldp.N } ), 0, 2, 1, 3 );
Tensor K = permute( kv.keys.view( ( ldp.n_past + ldp.N ) * ldp.n_state, (uint32_t)il * ldp.n_ctx * ldp.n_state )
.reshape3d( ldp.n_state / ldp.n_head, ldp.n_head, ldp.n_past + ldp.N ),
0, 2, 1, 3 );
profiler.setNextTag( "dec.layer.4" );
Tensor KQ = mulMat( K, Q );
if( 0 == il ) Tracing::tensor( "dec-KQ-0", KQ );
diagMaskInf( KQ, ldp.n_past );
if( 0 == il ) Tracing::tensor( "dec-KQ-1", KQ );
profiler.setNextTag( "decLayer.1" );
softMax( KQ );
if( 0 == il ) Tracing::tensor( "dec-KQ-2", KQ );
Tensor V_trans = permute(
kv.values
.view( ( ldp.n_past + ldp.N ) * ldp.n_state, (uint32_t)il * ldp.n_ctx * ldp.n_state )
.reshape3d( ldp.n_state / ldp.n_head, ldp.n_head, ldp.n_past + ldp.N ),
1, 2, 0, 3 );
profiler.setNextTag( "dec.layer.5" );
Tensor KQV = mulMat( V_trans, KQ );
if( 0 == il ) Tracing::tensor( "dec-KQV", KQV );
Tensor KQV_merged = permute( KQV, 0, 2, 1, 3 );
copyInPlace( cur, KQV_merged, eDataType::FP32, { ldp.n_state, ldp.N } );
}
{
profiler.setNextTag( "dec.layer.6" );
cur = mulMat( layer.attnLn1.w, cur );
}
// add the input
addRepeatEx( cur, layer.attnLn1.b, inpL );
Tensor inpCA = cur;
// norm
{
cur = norm( inpCA );
fmaRepeat( cur, layer.crossAttnLn0 );
}
// cross-attention
{
profiler.setNextTag( "dec.layer.7" );
Tensor Qcur = mulMat( layer.crossAttnQuery.w, cur );
addRepeatScale( Qcur, layer.crossAttnQuery.b, computeScaling( (int)ldp.n_state, (int)ldp.n_head ) );
// Kcross is already scaled
const uint32_t len = ldp.M * ldp.n_state;
const uint32_t off = (uint32_t)il * len;
Tensor Kcross = kvCross.keys.view( len, off ).reshape3d( ldp.n_state / ldp.n_head, ldp.n_head, ldp.M );
Tensor Vcross = kvCross.values.view( len, off ).reshape3d( ldp.n_state / ldp.n_head, ldp.n_head, ldp.M );
// ------
Tensor Q = permute( copy( Qcur, eDataType::FP32, { ldp.n_state / ldp.n_head, ldp.n_head, ldp.N } ), 0, 2, 1, 3 );
Tensor K = permute( Kcross, 0, 2, 1, 3 );
profiler.setNextTag( "dec.layer.8" );
Tensor KQ = mulMat( K, Q );
profiler.setNextTag( "decLayer.2" );
softMax( KQ );
Tensor V_trans = permute( Vcross, 1, 2, 0, 3 );
profiler.setNextTag( "dec.layer.9" );
Tensor KQV = mulMat( V_trans, KQ );
if( 0 == il ) Tracing::tensor( "dec-KQV", KQV );
Tensor KQV_merged = permute( KQV, 0, 2, 1, 3 );
copyInPlace( cur, KQV_merged, eDataType::FP32, { ldp.n_state, ldp.N } );
}
// projection
{
profiler.setNextTag( "dec.layer.10" );
cur = mulMat( layer.crossAttnLn1.w, cur );
}
// add the input
addRepeatEx( cur, layer.crossAttnLn1.b, inpCA );
Tensor inpFF = cur;
// feed-forward network
{
// norm
cur = norm( inpFF );
fmaRepeat( cur, layer.mlpLn );
if( gpuInfo().useReshapedMatMul() )
cur = mulMatEx( layer.mlp0.w, cur, "dec.layer.11" );
else
{
profiler.setNextTag( "dec.layer.11" );
cur = mulMat( layer.mlp0.w, cur );
}
addRepeatGelu( cur, layer.mlp0.b );
// projection
if( gpuInfo().useReshapedMatMul() )
{
if( cur.ne[ 1 ] != 1 )
{
const uint16_t tag = profiler.setNextTag( "dec.layer.12" );
cur = reshapePanels( cur );
// The mulMatTiledEx() line creates a layer output tensor. We have a special pool for such tensors so they survive the destruction of the arena.
arenaRaii.emplace( *this, decPool );
profiler.setNextTag( tag );
cur = mulMatTiledEx( layer.mlp1.w, cur );
}
else
{
// The mulMatByRowTiledEx() line creates a layer output tensor. We have a special pool for such tensors so they survive the destruction of the arena.
arenaRaii.emplace( *this, decPool );
profiler.setNextTag( "dec.layer.12" );
cur = mulMatByRowTiledEx( layer.mlp1.w, cur );
}
}
else
{
// The mulMat() line creates a layer output tensor. We have a special pool for such tensors so they survive the destruction of the arena.
arenaRaii.emplace( *this, decPool );
profiler.setNextTag( "dec.layer.12" );
cur = mulMat( layer.mlp1.w, cur );
}
}
// output from this layer
addRepeatEx( cur, layer.mlp1.b, inpFF );
return cur;
}
void WhisperContext::decode( const int* tokens, const int n_tokens, const sDecodeParams& decParams, std::vector<float>& probs, int threads )
{
auto cppp = profiler.cpuBlock( Whisper::eCpuBlock::DecodeStep );
#if BUILD_HYBRID_VERSION
if( hybridContext )
{
HybridContext::sDecParams sdp;
sdp.n_threads = threads;
sdp.M = decParams.M;
check( hybridContext->decode( tokens, n_tokens, decParams.n_past, sdp, probs ) );
return;
}
#endif
auto prof = profiler.block( eProfilerBlock::DecodeStep );
CaptureRaii renderdocCapture;
profiler.profileShaders = profileDecodeShaders;
ArenaRaii arenaRaii{ *this, arenas.outer };
assert( n_tokens > 0 );
const uint32_t N = (uint32_t)n_tokens;
decoderInput.resize( N );
Tensor embd = decoderInput.embedding( tokens );
Tensor cur = addRows( gpuModel.dec.tokenEmbedding, gpuModel.dec.positionalEmbedding, embd, decParams.n_past );
Tracing::tensor( "dec-rows", cur );
{
sLayerDecParams ldp;
ldp.n_state = decParams.n_state;
ldp.n_head = decParams.n_head;
ldp.N = N;
ldp.n_ctx = decParams.n_ctx;
ldp.n_past = decParams.n_past;
ldp.M = decParams.M;
#if 1
for( size_t i = 0; i < decParams.n_text_layer; i++ )
cur = decodeLayer( cur, i, ldp );
#else
dbgDecodeTest = decodeLayer( cur, 0, ldp );
return;
#endif
}
// norm
cur = norm( cur );
fmaRepeat( cur, gpuModel.dec.ln );
profiler.setNextTag( "dec.logits" );
cur = mulMat( gpuModel.dec.tokenEmbedding, cur );
// logits -> probs
profiler.setNextTag( "dec.probs" );
softMax( cur );
decoderOutput.copyFromVram( cur );
assert( decoderOutput.size() == N * decParams.n_vocab );
decoderOutput.copyToVector( probs );
Tracing::vector( "probs", probs );
}
__m128i WhisperContext::Arenas::getMemoryUse() const
{
__m128i res = outer.getMemoryUse();
res = _mm_add_epi64( res, layer.getMemoryUse() );
return res;
}
__m128i WhisperContext::DecoderLayerPool::getMemoryUse() const
{
size_t cb = result.getCapacity() * 4;
__m128i res = _mm_setzero_si128();
return _mm_insert_epi64( res, (int64_t)cb, 1 );
}
__m128i WhisperContext::getMemoryUse() const
{
__m128i res = MlContext::getMemoryUse();
res = _mm_add_epi64( res, arenas.getMemoryUse() );
res = _mm_add_epi64( res, decPool.getMemoryUse() );
res = _mm_add_epi64( res, melInput.getMemoryUse() );
res = _mm_add_epi64( res, kv.getMemoryUse() );
res = _mm_add_epi64( res, kvCross.getMemoryUse() );
res = _mm_add_epi64( res, decoderInput.getMemoryUse() );
res = _mm_add_epi64( res, decoderOutput.getMemoryUse() );
return res;
}
HRESULT WhisperContext::clearState()
{
// CHECK( kv.zeroMemory( cb ) );
// CHECK( kvCross.zeroMemory( cb ) );
// The above code doesn't work for some reason.
// Ideally need to debug, but destroying and re-creating these two buffers is not a huge deal. Unlike the buffers in the pools, only a few megabytes of VRAM.
kv.clear();
kvCross.clear();
CHECK( arenas.outer.zeroMemory() );
CHECK( arenas.layer.zeroMemory() );
CHECK( decPool.zeroMemory() );
CHECK( decoderInput.zeroMemory() );
decoderOutput.clear();
return S_OK;
}
| 19,104
|
C++
|
.cpp
| 593
| 29.676223
| 158
| 0.690099
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,576
|
mfUtils.cpp
|
Const-me_Whisper/Whisper/MF/mfUtils.cpp
|
#include "stdafx.h"
#include "mfUtils.h"
#include <mfapi.h>
HRESULT Whisper::createMediaType( bool stereo, IMFMediaType** pp )
{
if( nullptr == pp )
return E_POINTER;
CComPtr<IMFMediaType> mt;
CHECK( MFCreateMediaType( &mt ) );
CHECK( mt->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio ) );
CHECK( mt->SetGUID( MF_MT_SUBTYPE, MFAudioFormat_Float ) );
CHECK( mt->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, SAMPLE_RATE ) );
const uint32_t channels = stereo ? 2 : 1;
CHECK( mt->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channels ) );
CHECK( mt->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, channels * 4 ) );
CHECK( mt->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, channels * 4 * SAMPLE_RATE ) );
CHECK( mt->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, 32 ) );
CHECK( mt->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE ) );
*pp = mt.Detach();
return S_OK;
}
HRESULT Whisper::getStreamDuration( IMFSourceReader* reader, int64_t& duration )
{
PROPVARIANT var;
PropVariantInit( &var );
CHECK( reader->GetPresentationAttribute( MF_SOURCE_READER_MEDIASOURCE, MF_PD_DURATION, &var ) );
if( var.vt == VT_UI8 )
{
// The documentation says the type of that attribute is UINT64
// https://learn.microsoft.com/en-us/windows/win32/medfound/mf-pd-duration-attribute
duration = var.uhVal.QuadPart;
return S_OK;
}
logError( u8"Unexpected type of MF_PD_DURATION attribute" );
return E_INVALIDARG;
}
HRESULT Whisper::validateCurrentMediaType( IMFSourceReader* reader, uint32_t expectedChannels )
{
CComPtr<IMFMediaType> mt;
CHECK( reader->GetCurrentMediaType( MF_SOURCE_READER_FIRST_AUDIO_STREAM, &mt ) );
GUID guid;
CHECK( mt->GetGUID( MF_MT_MAJOR_TYPE, &guid ) );
if( guid != MFMediaType_Audio )
return E_FAIL;
CHECK( mt->GetGUID( MF_MT_SUBTYPE, &guid ) );
if( guid != MFAudioFormat_Float )
return E_FAIL;
UINT32 u32;
CHECK( mt->GetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, &u32 ) );
if( u32 != SAMPLE_RATE )
return E_FAIL;
CHECK( mt->GetUINT32( MF_MT_AUDIO_NUM_CHANNELS, &u32 ) );
if( u32 != expectedChannels )
return E_FAIL;
return S_OK;
}
| 2,069
|
C++
|
.cpp
| 56
| 34.785714
| 97
| 0.722639
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,577
|
AudioCapture.cpp
|
Const-me_Whisper/Whisper/MF/AudioCapture.cpp
|
#include "stdafx.h"
#include <atlstr.h>
#include <mfapi.h>
#include <mfidl.h>
#include <mfreadwrite.h>
#include "AudioCapture.h"
#include "../API/iMediaFoundation.cl.h"
#include "../ComLightLib/comLightServer.h"
#pragma comment(lib, "Mf.lib")
namespace
{
struct Strings
{
CString displayName, endpoint;
};
HRESULT getAllocString( IMFActivate* activate, const GUID& id, CString& rdi )
{
wchar_t* pointer = nullptr;
UINT32 cchName;
HRESULT hr = activate->GetAllocatedString( id, &pointer, &cchName );
if( SUCCEEDED( hr ) )
rdi.SetString( pointer, cchName );
CoTaskMemFree( pointer );
return hr;
}
HRESULT getInfo( IMFActivate* activate, Strings& rdi )
{
CHECK( getAllocString( activate, MF_DEVSOURCE_ATTRIBUTE_FRIENDLY_NAME, rdi.displayName ) );
CHECK( getAllocString( activate, MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_AUDCAP_ENDPOINT_ID, rdi.endpoint ) );
return S_OK;
}
HRESULT __stdcall supplyDevices( Whisper::pfnFoundCaptureDevices pfn, void* pv, IMFActivate** ppDevices, UINT32 count )
{
if( ppDevices == nullptr || count == 0 )
return pfn( 0, nullptr, pv );
std::vector<Strings> strings;
strings.reserve( count );
for( UINT i = 0; i < count; i++ )
{
IMFActivate* const activate = ppDevices[ i ];
if( nullptr == activate )
continue;
Strings info;
HRESULT hr = getInfo( activate, info );
if( FAILED( hr ) )
continue;
strings.emplace_back( std::move( info ) );
}
const size_t len = strings.size();
if( 0 == len )
return pfn( 0, nullptr, pv );
std::vector<Whisper::sCaptureDevice> pointers;
pointers.resize( len );
for( size_t i = 0; i < len; i++ )
{
const auto& src = strings[ i ];
auto& dest = pointers[ i ];
dest.displayName = src.displayName;
dest.endpoint = src.endpoint;
}
return pfn( (int)len, pointers.data(), pv );
}
}
HRESULT __stdcall Whisper::captureDeviceList( pfnFoundCaptureDevices pfn, void* pv )
{
// Create an attribute store to hold the search criteria.
CComPtr<IMFAttributes> attrs;
CHECK( MFCreateAttributes( &attrs, 1 ) );
// Request audio capture devices
CHECK( attrs->SetGUID( MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE, MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_AUDCAP_GUID ) );
// Enumerate the devices
IMFActivate** ppDevices = nullptr;
UINT32 count = 0;
CHECK( MFEnumDeviceSources( attrs, &ppDevices, &count ) );
// Feed the data to the caller
HRESULT hr = supplyDevices( pfn, pv, ppDevices, count );
// Free the memory
for( DWORD i = 0; i < count; i++ )
ppDevices[ i ]->Release();
CoTaskMemFree( ppDevices );
return hr;
}
namespace
{
using namespace Whisper;
class Capture : public ComLight::ObjectRoot<iAudioCapture>
{
CComPtr<IMFSourceReader> reader;
CComPtr<iMediaFoundation> mediaFoundation;
sCaptureParams captureParams;
HRESULT COMLIGHTCALL getReader( IMFSourceReader** pp ) const noexcept override final
{
if( pp == nullptr )
return E_POINTER;
CComPtr<IMFSourceReader> res = reader;
*pp = res.Detach();;
return S_OK;
}
const sCaptureParams& COMLIGHTCALL getParams() const noexcept override final
{
return captureParams;
}
public:
HRESULT open( iMediaFoundation* owner, const wchar_t* endpoint, const sCaptureParams& cp );
};
HRESULT Capture::open( iMediaFoundation* owner, const wchar_t* endpoint, const sCaptureParams& cp )
{
// Create an attribute store to hold the search criteria.
CComPtr<IMFAttributes> attrs;
CHECK( MFCreateAttributes( &attrs, 2 ) );
// Request audio capture devices
CHECK( attrs->SetGUID( MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE, MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_AUDCAP_GUID ) );
CHECK( attrs->SetString( MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_AUDCAP_ENDPOINT_ID, endpoint ) );
CComPtr<IMFMediaSource> source;
HRESULT hr = MFCreateDeviceSource( attrs, &source );
if( FAILED( hr ) )
{
logErrorHr( hr, u8"MFCreateDeviceSource" );
return hr;
}
// TODO: implement IMFSourceReaderCallback, pass into MF_SOURCE_READER_ASYNC_CALLBACK attribute
// This is to support cancellation
hr = MFCreateSourceReaderFromMediaSource( source, nullptr, &reader );
if( FAILED( hr ) )
{
logErrorHr( hr, u8"MFCreateSourceReaderFromMediaSource" );
return hr;
}
captureParams = cp;
mediaFoundation = owner;
return S_OK;
}
}
HRESULT __stdcall Whisper::captureOpen( iMediaFoundation* owner, const wchar_t* endpoint, const sCaptureParams& captureParams, iAudioCapture** pp ) noexcept
{
if( nullptr == endpoint || nullptr == pp )
return E_POINTER;
ComLight::CComPtr<ComLight::Object<Capture>> res;
CHECK( ComLight::Object<Capture>::create( res ) );
CHECK( res->open( owner, endpoint, captureParams ) );
res.detach( pp );
return S_OK;
}
| 4,693
|
C++
|
.cpp
| 143
| 30.041958
| 156
| 0.725425
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,578
|
MediaFoundation.cpp
|
Const-me_Whisper/Whisper/MF/MediaFoundation.cpp
|
#include "stdafx.h"
#include "../API/iMediaFoundation.cl.h"
#include "mfStartup.h"
#include "../ComLightLib/comLightServer.h"
#include "loadAudioFile.h"
#include <mfidl.h>
#include <mfreadwrite.h>
#include "mfUtils.h"
#include "AudioCapture.h"
#include <mfapi.h>
#include <shlwapi.h>
namespace Whisper
{
class AudioReader : public ComLight::ObjectRoot<iAudioReader>
{
CComPtr<IMFSourceReader> reader;
bool wantStereo;
CComPtr<iMediaFoundation> mediaFoundation;
mutable int64_t preciseSamplesCount = 0;
HRESULT COMLIGHTCALL getReader( IMFSourceReader** pp ) const noexcept override final
{
if( pp == nullptr )
return E_POINTER;
CComPtr<IMFSourceReader> res = reader;
*pp = res.Detach();;
return S_OK;
}
HRESULT COMLIGHTCALL requestedStereo() const noexcept override final
{
return wantStereo ? S_OK : S_FALSE;
}
HRESULT COMLIGHTCALL getDuration( int64_t& rdi ) const noexcept override final
{
if( reader )
{
if( 0 == preciseSamplesCount )
return getStreamDuration( reader, rdi );
else
{
rdi = MFllMulDiv( preciseSamplesCount, 10'000'000, SAMPLE_RATE, 0 );
return S_OK;
}
}
return OLE_E_BLANK;
}
public:
HRESULT open( iMediaFoundation* owner, LPCTSTR path, bool stereo )
{
HRESULT hr = MFCreateSourceReaderFromURL( path, nullptr, &reader );
if( FAILED( hr ) )
{
logError16( L"Unable to decode audio file \"%s\", MFCreateSourceReaderFromURL failed", path );
return hr;
}
wantStereo = stereo;
mediaFoundation = owner;
logDebug16( L"Created source reader from the file \"%s\"", path );
return S_OK;
}
HRESULT open( iMediaFoundation* owner, IMFByteStream* stream, bool stereo )
{
HRESULT hr = MFCreateSourceReaderFromByteStream( stream, nullptr, &reader );
if( FAILED( hr ) )
{
logErrorHr( hr, u8"MFCreateSourceReaderFromByteStream failed" );
return hr;
}
wantStereo = stereo;
mediaFoundation = owner;
logDebug( u8"Created source reader from the byte stream" );
return S_OK;
}
void setPreciseSamplesCount( int64_t count ) const
{
preciseSamplesCount = count;
}
};
void setPreciseSamplesCount( const iAudioReader* ar, int64_t count )
{
const AudioReader* r = static_cast<const AudioReader*>( ar );
r->setPreciseSamplesCount( count );
}
class MediaFoundation : public ComLight::ObjectRoot<iMediaFoundation>
{
MfStartupRaii raii;
DWORD tid = ~(DWORD)0;
HRESULT COMLIGHTCALL loadAudioFile( LPCTSTR path, bool stereo, iAudioBuffer** pp ) const noexcept override final
{
return Whisper::loadAudioFile( path, stereo, pp );
}
HRESULT COMLIGHTCALL openAudioFile( LPCTSTR path, bool stereo, iAudioReader** pp ) noexcept override final
{
if( nullptr == path || nullptr == pp )
return E_POINTER;
ComLight::CComPtr<ComLight::Object<AudioReader>> res;
CHECK( ComLight::Object<AudioReader>::create( res ) );
CHECK( res->open( this, path, stereo ) );
res.detach( pp );
return S_OK;
}
HRESULT COMLIGHTCALL loadAudioFileData( const void* data, uint64_t size, bool stereo, iAudioReader** pp ) noexcept override final
{
if( nullptr == data || nullptr == pp )
return E_POINTER;
if( 0 != ( size >> 32 ) )
return DISP_E_OVERFLOW; // SHCreateMemStream is limited to 4GB, it seems
CComPtr<IStream> comStream;
// Microsoft neglected to document their API, but Wine returns a new stream with reference counter = 1
// See shstream_create() function there https://source.winehq.org/source/dlls/shcore/main.c#0832
// That's why we need the Attach(), as opposed to an assignment
comStream.Attach( SHCreateMemStream( (const BYTE*)data, (UINT)size ) );
if( !comStream )
{
logError( u8"SHCreateMemStream failed" );
return E_FAIL;
}
CComPtr<IMFByteStream> mfStream;
CHECK( MFCreateMFByteStreamOnStream( comStream, &mfStream ) );
ComLight::CComPtr<ComLight::Object<AudioReader>> res;
CHECK( ComLight::Object<AudioReader>::create( res ) );
CHECK( res->open( this, mfStream, stereo ) );
res.detach( pp );
return S_OK;
}
HRESULT COMLIGHTCALL listCaptureDevices( pfnFoundCaptureDevices pfn, void* pv ) noexcept override final
{
return captureDeviceList( pfn, pv );
}
HRESULT COMLIGHTCALL openCaptureDevice( LPCTSTR endpoint, const sCaptureParams& captureParams, iAudioCapture** pp ) noexcept override final
{
return captureOpen( this, endpoint, captureParams, pp );
}
protected:
HRESULT FinalConstruct()
{
CHECK( raii.startup() );
tid = GetCurrentThreadId();
return S_OK;
}
public:
~MediaFoundation() override
{
assert( tid == GetCurrentThreadId() );
}
};
}
HRESULT COMLIGHTCALL Whisper::initMediaFoundation( iMediaFoundation** pp )
{
if( nullptr == pp )
return E_POINTER;
ComLight::CComPtr<ComLight::Object<MediaFoundation>> obj;
CHECK( ComLight::Object<MediaFoundation>::create( obj ) );
obj.detach( pp );
return S_OK;
}
| 4,947
|
C++
|
.cpp
| 155
| 28.529032
| 141
| 0.719732
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,579
|
loadAudioFile.cpp
|
Const-me_Whisper/Whisper/MF/loadAudioFile.cpp
|
#include "stdafx.h"
#include "../ComLightLib/comLightServer.h"
#include "loadAudioFile.h"
#include "mfUtils.h"
#include "AudioBuffer.h"
#include <mfidl.h>
#include <mfreadwrite.h>
#include <mfapi.h>
#pragma comment(lib, "Mfreadwrite.lib")
#pragma comment(lib, "mfuuid.lib")
namespace Whisper
{
class MediaFileBuffer : public ComLight::ObjectRoot<iAudioBuffer>
{
AudioBuffer pcm;
uint32_t channels = 0;
uint32_t COMLIGHTCALL countSamples() const noexcept override final
{
return (uint32_t)( pcm.mono.size() );
}
const float* COMLIGHTCALL getPcmMono() const noexcept override final
{
if( !pcm.mono.empty() )
return pcm.mono.data();
return nullptr;
}
const float* COMLIGHTCALL getPcmStereo() const noexcept override final
{
if( !pcm.stereo.empty() )
return pcm.stereo.data();
return nullptr;
}
HRESULT COMLIGHTCALL getTime( int64_t& rdi ) const noexcept override final
{
rdi = 0;
return S_OK;
}
public:
HRESULT load( LPCTSTR path, bool stereo );
};
HRESULT MediaFileBuffer::load( LPCTSTR path, bool stereo )
{
CComPtr<IMFSourceReader> reader;
HRESULT hr = MFCreateSourceReaderFromURL( path, nullptr, &reader );
if( FAILED( hr ) )
{
logError16( L"Unable to decode audio file \"%s\", MFCreateSourceReaderFromURL failed", path );
return hr;
}
CHECK( reader->SetStreamSelection( MF_SOURCE_READER_ALL_STREAMS, FALSE ) );
CHECK( reader->SetStreamSelection( MF_SOURCE_READER_FIRST_AUDIO_STREAM, TRUE ) );
CComPtr<IMFMediaType> mtNative;
CHECK( reader->GetNativeMediaType( MF_SOURCE_READER_FIRST_AUDIO_STREAM, MF_SOURCE_READER_CURRENT_TYPE_INDEX, &mtNative ) );
UINT32 numChannels;
CHECK( mtNative->GetUINT32( MF_MT_AUDIO_NUM_CHANNELS, &numChannels ) );
const bool sourceMono = numChannels == 1;
const AudioBuffer::pfnAppendSamples pfn = AudioBuffer::appendSamplesFunc( sourceMono, stereo );
channels = ( stereo && !sourceMono ) ? 2 : 1;
CComPtr<IMFMediaType> mt;
CHECK( createMediaType( !sourceMono, &mt ) );
CHECK( reader->SetCurrentMediaType( MF_SOURCE_READER_FIRST_AUDIO_STREAM, nullptr, mt ) );
while( true )
{
DWORD dwFlags = 0;
CComPtr<IMFSample> sample;
// Read the next sample.
hr = reader->ReadSample( (DWORD)MF_SOURCE_READER_FIRST_AUDIO_STREAM, 0, nullptr, &dwFlags, nullptr, &sample );
if( FAILED( hr ) )
{
logErrorHr( hr, u8"IMFSourceReader.ReadSample" );
return hr;
}
if( dwFlags & MF_SOURCE_READERF_CURRENTMEDIATYPECHANGED )
{
logError( u8"Media type changes ain’t supported by the library." );
return E_UNEXPECTED;
}
if( dwFlags & MF_SOURCE_READERF_ENDOFSTREAM )
break;
if( !sample )
{
// printf( "No sample\n" );
continue;
}
// Get a pointer to the audio data in the sample.
CComPtr<IMFMediaBuffer> buffer;
hr = sample->ConvertToContiguousBuffer( &buffer );
if( FAILED( hr ) )
return hr;
const float* pAudioData = nullptr;
DWORD cbBuffer;
hr = buffer->Lock( (BYTE**)&pAudioData, nullptr, &cbBuffer );
if( FAILED( hr ) )
return hr;
try
{
const size_t countFloats = cbBuffer / sizeof( float );
( pcm.*pfn )( pAudioData, countFloats );
}
catch( const std::bad_alloc& )
{
return E_OUTOFMEMORY;
}
// Unlock the buffer
hr = buffer->Unlock();
if( FAILED( hr ) )
return hr;
}
const size_t len = pcm.mono.size();
if( len == 0 )
{
logError16( L"The audio file \"%s\" has no samples", path );
return E_INVALIDARG;
}
if( len < SAMPLE_RATE / 2 )
logError16( L"The file \"%s\" only has %zu samples, less than 0.5 seconds of audio", path, len );
else
logDebug16( L"Loaded audio file from \"%s\": %zu samples, %g seconds", path, len, (int)len * ( 1.0 / SAMPLE_RATE ) );
return S_OK;
}
}
HRESULT COMLIGHTCALL Whisper::loadAudioFile( LPCTSTR path, bool stereo, iAudioBuffer** pp )
{
if( nullptr == path || nullptr == pp )
return E_POINTER;
ComLight::CComPtr<ComLight::Object<MediaFileBuffer>> obj;
CHECK( ComLight::Object<MediaFileBuffer>::create( obj ) );
CHECK( obj->load( path, stereo ) );
obj.detach( pp );
return S_OK;
}
| 4,129
|
C++
|
.cpp
| 131
| 28.114504
| 125
| 0.693256
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,580
|
mfStartup.cpp
|
Const-me_Whisper/Whisper/MF/mfStartup.cpp
|
#include "stdafx.h"
#include "mfStartup.h"
#include <atlbase.h>
#include <mfapi.h>
#pragma comment(lib, "Mfplat.lib")
namespace
{
struct sCoInitStatus
{
// Possible state:
// -1 is the initial state, coInitialize never called
// S_OK - CoInitializeEx succeeded, in this state the counter tracks the count of coInitialize() for the current thread
// S_FALSE - CoInitializeEx failed with RPC_E_CHANGED_MODE, or did nothing because already initialized for the current thread
// Error status - CoInitializeEx failed for some other reason
HRESULT code = -1;
uint32_t counter = 0;
};
thread_local sCoInitStatus coInitStatus;
static HRESULT coInitialize()
{
sCoInitStatus& cis = coInitStatus;
HRESULT hr = cis.code;
if( SUCCEEDED( hr ) )
{
if( S_OK == hr )
cis.counter++;
return S_FALSE;
}
if( hr == HRESULT( -1 ) )
{
hr = CoInitializeEx( nullptr, COINIT_MULTITHREADED );
if( S_OK == hr )
{
cis.counter = 1;
return cis.code = S_OK;
}
if( S_FALSE == hr || RPC_E_CHANGED_MODE == hr )
{
return cis.code = S_FALSE;
}
cis.code = hr;
return hr;
}
return hr;
}
static void coUninitialize()
{
sCoInitStatus& cis = coInitStatus;
if( cis.code == S_OK )
{
assert( cis.counter > 0 );
cis.counter--;
if( 0 == cis.counter )
CoUninitialize();
}
}
static CComAutoCriticalSection s_lock;
#define LOCK() CComCritSecLock<CComAutoCriticalSection> lock{ s_lock }
static uint32_t mfStartupCounter = 0;
constexpr uint8_t FlagCOM = 1;
constexpr uint8_t FlagMF = 0x10;
}
using namespace Whisper;
MfStartupRaii::~MfStartupRaii()
{
if( 0 != ( successFlags & FlagMF ) )
{
LOCK();
assert( mfStartupCounter > 0 );
mfStartupCounter--;
if( mfStartupCounter > 0 )
return;
MFShutdown();
successFlags &= ~FlagMF;
}
if( 0 != ( successFlags & FlagCOM ) )
{
coUninitialize();
successFlags &= ~FlagCOM;
}
}
HRESULT MfStartupRaii::startup()
{
if( 0 != ( successFlags & FlagMF ) )
return HRESULT_FROM_WIN32( ERROR_ALREADY_INITIALIZED );
HRESULT hr = coInitialize();
CHECK( hr );
if( hr == S_OK )
successFlags |= FlagCOM;
LOCK();
if( 0 == mfStartupCounter )
{
HRESULT hr = MFStartup( MF_VERSION, MFSTARTUP_LITE );
if( SUCCEEDED( hr ) )
{
mfStartupCounter = 1;
successFlags |= FlagMF;
return S_OK;
}
if( 0 != ( successFlags & FlagCOM ) )
{
coUninitialize();
successFlags &= ~FlagCOM;
}
return hr;
}
else
{
mfStartupCounter++;
successFlags |= FlagMF;
return S_FALSE;
}
}
| 2,530
|
C++
|
.cpp
| 113
| 19.530973
| 127
| 0.67875
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,581
|
PcmReader.cpp
|
Const-me_Whisper/Whisper/MF/PcmReader.cpp
|
#include "stdafx.h"
#include "PcmReader.h"
#include <mfapi.h>
#include <Mferror.h>
#include "mfUtils.h"
namespace Whisper
{
__interface iSampleHandler
{
void copyChunk( PcmMonoChunk* pMono, const AudioBuffer& rsi, size_t sourceOffset, PcmStereoChunk* pStereo ) const;
void moveBufferData( AudioBuffer& rdi, size_t amount ) const;
void appendPcm( AudioBuffer& rdi, const float* rsi, size_t countFloats ) const;
void copyChunk( PcmMonoChunk* pMono, const AudioBuffer& rsi, size_t sourceOffset, size_t samples, PcmStereoChunk* pStereo ) const;
uint32_t readerChannelsCount() const;
};
}
namespace
{
using namespace Whisper;
__forceinline void copyMono( PcmMonoChunk* rdi, const AudioBuffer& rsi, size_t sourceOffset, size_t samples )
{
assert( sourceOffset + samples <= rsi.mono.size() );
memcpy( rdi->mono.data(), &rsi.mono[ sourceOffset ], samples * 4 );
if( samples < FFT_STEP )
memset( rdi->mono.data() + samples, 0, ( FFT_STEP - samples ) * 4 );
}
__forceinline void copyStereo( PcmStereoChunk* rdi, const AudioBuffer& rsi, size_t sourceOffset, size_t samples )
{
memcpy( rdi->stereo.data(), &rsi.stereo[ sourceOffset * 2 ], samples * 8 );
if( samples < FFT_STEP )
memset( rdi->stereo.data() + samples * 2, 0, ( FFT_STEP - samples ) * 8 );
}
struct HandlerMono : iSampleHandler
{
void appendPcm( AudioBuffer& rdi, const float* rsi, size_t countFloats ) const override
{
rdi.appendMono( rsi, countFloats );
}
void copyChunk( PcmMonoChunk* pMono, const AudioBuffer& rsi, size_t sourceOffset, PcmStereoChunk* pStereo ) const override final
{
copyMono( pMono, rsi, sourceOffset, FFT_STEP );
}
void copyChunk( PcmMonoChunk* pMono, const AudioBuffer& rsi, size_t sourceOffset, size_t samples, PcmStereoChunk* pStereo ) const override final
{
copyMono( pMono, rsi, sourceOffset, samples );
}
void moveBufferData( AudioBuffer& rdi, size_t amount ) const override final
{
const size_t len = rdi.mono.size();
assert( amount <= len );
if( amount < len )
{
const size_t block = len - amount;
memmove( rdi.mono.data(), rdi.mono.data() + amount, block * 4 );
rdi.mono.resize( block );
}
else
rdi.mono.clear();
}
uint32_t readerChannelsCount() const override { return 1; }
};
struct HandlerDownmixedStereo : HandlerMono
{
void appendPcm( AudioBuffer& rdi, const float* rsi, size_t countFloats ) const override final
{
rdi.appendDownmixedStereo( rsi, countFloats );
}
uint32_t readerChannelsCount() const override final { return 2; }
};
struct HandlerStereo : iSampleHandler
{
void appendPcm( AudioBuffer& rdi, const float* rsi, size_t countFloats ) const override final
{
rdi.appendStereo( rsi, countFloats );
}
void copyChunk( PcmMonoChunk* pMono, const AudioBuffer& rsi, size_t sourceOffset, PcmStereoChunk* pStereo ) const override final
{
copyMono( pMono, rsi, sourceOffset, FFT_STEP );
copyStereo( pStereo, rsi, sourceOffset, FFT_STEP );
}
void copyChunk( PcmMonoChunk* pMono, const AudioBuffer& rsi, size_t sourceOffset, size_t samples, PcmStereoChunk* pStereo ) const override final
{
copyMono( pMono, rsi, sourceOffset, samples );
copyStereo( pStereo, rsi, sourceOffset, samples );
}
void moveBufferData( AudioBuffer& rdi, size_t amount ) const override final
{
const size_t len = rdi.mono.size();
assert( amount <= len );
if( amount < len )
{
const size_t block = len - amount;
memmove( rdi.mono.data(), rdi.mono.data() + amount, block * 4 );
rdi.mono.resize( block );
memmove( rdi.stereo.data(), rdi.stereo.data() + amount * 2, block * 8 );
rdi.stereo.resize( block * 2 );
}
else
{
rdi.mono.clear();
rdi.stereo.clear();
}
}
uint32_t readerChannelsCount() const override final { return 2; }
};
static const HandlerMono s_mono;
static const HandlerDownmixedStereo s_downmix;
static const HandlerStereo s_stereo;
__forceinline __m128i load( const GUID& guid )
{
return _mm_loadu_si128( ( const __m128i* )( &guid ) );
}
// Find audio decoder MFT, query MF_MT_SUBTYPE attribute of the current input media type of that MFT
HRESULT getDecoderInputSubtype( IMFSourceReader* reader, __m128i& rdi )
{
store16( &rdi, _mm_setzero_si128() );
CComPtr<IMFSourceReaderEx> readerEx;
CHECK( reader->QueryInterface( &readerEx ) );
constexpr uint32_t stream = MF_SOURCE_READER_FIRST_AUDIO_STREAM;
const __m128i decGuid = load( MFT_CATEGORY_AUDIO_DECODER );
alignas( 16 ) GUID category;
for( DWORD i = 0; true; i++ )
{
CComPtr<IMFTransform> mft;
HRESULT hr = readerEx->GetTransformForStream( stream, i, &category, &mft );
if( hr == MF_E_INVALIDINDEX )
{
// This happens for *.wav input files
// They don't have any MFT_CATEGORY_AUDIO_DECODER MFTs in the source reader, and it's not an error
return S_FALSE;
}
if( FAILED( hr ) )
return hr;
const __m128i cat = _mm_load_si128( ( const __m128i* ) & category );
if( !vectorEqual( decGuid, cat ) )
continue;
CComPtr<IMFMediaType> mt;
CHECK( mft->GetInputCurrentType( 0, &mt ) );
CHECK( mt->GetGUID( MF_MT_SUBTYPE, (GUID*)&rdi ) );
return S_OK;
}
}
// S_OK when the reader has an MP3 decoder for the first audio stream, S_FALSE otherwise
HRESULT isMp3Decoder( IMFSourceReader* reader )
{
__m128i subtype;
CHECK( getDecoderInputSubtype( reader, subtype ) );
const bool res = vectorEqual( subtype, load( MFAudioFormat_MP3 ) );
return res ? S_OK : S_FALSE;
}
// Workaround for a Microsoft's bug in Media Foundation MP3 decoder: https://github.com/Const-me/Whisper/issues/4
// Media Foundation is reporting incorrect media duration = 12.54. Windows Media Player does the same.
// Winamp and Media Player Classic are reporting 12:35, VLC reports 12:36.
HRESULT getPreciseDuration( IMFSourceReader* reader, size_t& length, bool mono, const iAudioReader* iar )
{
size_t samples = 0;
// Decode the complete stream, counting samples
while( true )
{
DWORD dwFlags = 0;
CComPtr<IMFSample> sample;
// Read the next sample
HRESULT hr = reader->ReadSample( (DWORD)MF_SOURCE_READER_FIRST_AUDIO_STREAM, 0, nullptr, &dwFlags, nullptr, &sample );
if( FAILED( hr ) )
{
logErrorHr( hr, u8"IMFSourceReader.ReadSample" );
return hr;
}
if( dwFlags & MF_SOURCE_READERF_CURRENTMEDIATYPECHANGED )
{
// logError( u8"Media type changes ain’t supported by the library." );
// return E_UNEXPECTED;
// This happens for some video files at the very start of the reading, with Dolby AC3 audio track.
// Instead of failing the transcribe process, verify the important attributes (FP32 samples, sample rate, count of channels) haven’t changed.
CHECK( validateCurrentMediaType( reader, mono ? 1 : 2 ) );
}
if( dwFlags & MF_SOURCE_READERF_ENDOFSTREAM )
break;
if( !sample )
{
// printf( "No sample\n" );
continue;
}
// Get a pointer to the audio data in the sample.
CComPtr<IMFMediaBuffer> buffer;
hr = sample->ConvertToContiguousBuffer( &buffer );
if( FAILED( hr ) )
return hr;
const float* pAudioData = nullptr;
DWORD cbBuffer;
hr = buffer->Lock( (BYTE**)&pAudioData, nullptr, &cbBuffer );
if( FAILED( hr ) )
return hr;
assert( 0 == ( cbBuffer % sizeof( float ) ) );
const size_t countFloats = cbBuffer / sizeof( float );
if( mono )
samples += countFloats;
else
{
assert( 0 == countFloats % 2 );
samples += countFloats / 2;
}
// Unlock the buffer
hr = buffer->Unlock();
if( FAILED( hr ) )
return hr;
}
// Rewind the stream to beginning
PROPVARIANT pv;
PropVariantInit( &pv );
pv.vt = VT_I8;
pv.hVal.QuadPart = 0;
CHECK( reader->SetCurrentPosition( GUID_NULL, pv ) );
// Make the output value
length = samples / FFT_STEP;
// Store the actual samples count in the reader
// This way the iAudioReader.getDuration() API returns correct value to the user
setPreciseSamplesCount( iar, samples );
return S_OK;
}
HRESULT getDuration( IMFSourceReader* reader, size_t& length, bool mono, const iAudioReader* iar )
{
HRESULT hr = isMp3Decoder( reader );
if( SUCCEEDED( hr ) )
{
if( S_OK == hr )
{
return getPreciseDuration( reader, length, mono, iar );
}
}
else
logWarningHr( hr, u8"isMp3Decoder" );
// Find out the length
int64_t durationTicks;
CHECK( getStreamDuration( reader, durationTicks ) );
// Convert length to chunks
// Seconds = Ticks / 10^7
// Samples = Seconds * SAMPLE_RATE = Ticks * SAMPLE_RATE / 10^7
// Chunks = Samples / FFT_STEP = Ticks * SAMPLE_RATE / ( FFT_STEP * 10^7 ), and we want that integer rounded down
constexpr __int64 mul = SAMPLE_RATE;
constexpr __int64 div = (__int64)FFT_STEP * 10'000'000;
length = (size_t)MFllMulDiv( durationTicks, mul, div, 0 );
return S_OK;
}
}
PcmReader::PcmReader( const iAudioReader* iar )
{
if( nullptr == iar )
throw E_POINTER;
check( iar->getReader( &reader ) );
const bool stereo = iar->requestedStereo() == S_OK;
// Set up media type, and figure out sample handler
check( reader->SetStreamSelection( MF_SOURCE_READER_ALL_STREAMS, FALSE ) );
check( reader->SetStreamSelection( MF_SOURCE_READER_FIRST_AUDIO_STREAM, TRUE ) );
CComPtr<IMFMediaType> mtNative;
check( reader->GetNativeMediaType( MF_SOURCE_READER_FIRST_AUDIO_STREAM, MF_SOURCE_READER_CURRENT_TYPE_INDEX, &mtNative ) );
UINT32 numChannels;
check( mtNative->GetUINT32( MF_MT_AUDIO_NUM_CHANNELS, &numChannels ) );
const bool sourceMono = numChannels < 2;
if( sourceMono )
sampleHandler = &s_mono;
else if( !stereo )
sampleHandler = &s_downmix;
else
{
sampleHandler = &s_stereo;
m_stereoOutput = true;
}
CComPtr<IMFMediaType> mt;
check( createMediaType( !sourceMono, &mt ) );
check( reader->SetCurrentMediaType( MF_SOURCE_READER_FIRST_AUDIO_STREAM, nullptr, mt ) );
// Find out the length.
// Sadly, broken Microsoft's MP3 decoder MFT made this much harder than necessary:
// https://github.com/Const-me/Whisper/issues/4
check( getDuration( reader, m_length, sourceMono, iar ) );
}
HRESULT PcmReader::readNextSample()
{
const size_t off = bufferReadOffset;
const size_t availableSamples = pcm.mono.size() - off;
// If needed, move the remaining PCM data to the start of these vectors
if( availableSamples > 0 )
{
if( 0 != off )
sampleHandler->moveBufferData( pcm, off );
}
else
pcm.clear();
bufferReadOffset = 0;
while( true )
{
DWORD dwFlags = 0;
CComPtr<IMFSample> sample;
// Read the next sample
HRESULT hr = reader->ReadSample( (DWORD)MF_SOURCE_READER_FIRST_AUDIO_STREAM, 0, nullptr, &dwFlags, nullptr, &sample );
if( FAILED( hr ) )
{
logErrorHr( hr, u8"IMFSourceReader.ReadSample" );
return hr;
}
if( dwFlags & MF_SOURCE_READERF_CURRENTMEDIATYPECHANGED )
{
// logError( u8"Media type changes ain’t supported by the library." );
// return E_UNEXPECTED;
// This happens for some video files at the very start of the reading, with Dolby AC3 audio track.
// Instead of failing the transcribe process, verify the important attributes (FP32 samples, sample rate, count of channels) haven’t changed.
CHECK( validateCurrentMediaType( reader, sampleHandler->readerChannelsCount() ) );
}
if( dwFlags & MF_SOURCE_READERF_ENDOFSTREAM )
return E_EOF;
if( !sample )
{
// printf( "No sample\n" );
continue;
}
// Get a pointer to the audio data in the sample.
CComPtr<IMFMediaBuffer> buffer;
hr = sample->ConvertToContiguousBuffer( &buffer );
if( FAILED( hr ) )
return hr;
const float* pAudioData = nullptr;
DWORD cbBuffer;
hr = buffer->Lock( (BYTE**)&pAudioData, nullptr, &cbBuffer );
if( FAILED( hr ) )
return hr;
try
{
assert( 0 == ( cbBuffer % sizeof( float ) ) );
const size_t countFloats = cbBuffer / sizeof( float );
sampleHandler->appendPcm( pcm, pAudioData, countFloats );
}
catch( const std::bad_alloc& )
{
buffer->Unlock();
return E_OUTOFMEMORY;
}
// Unlock the buffer
hr = buffer->Unlock();
if( FAILED( hr ) )
return hr;
return S_OK;
}
}
HRESULT PcmReader::readChunk( PcmMonoChunk& mono, PcmStereoChunk* stereo )
{
while( true )
{
const size_t off = bufferReadOffset;
const size_t availableSamples = pcm.mono.size() - off;
if( availableSamples >= FFT_STEP )
{
// We have enough data in the buffer
sampleHandler->copyChunk( &mono, pcm, off, stereo );
bufferReadOffset = off + FFT_STEP;
return S_OK;
}
if( !m_readerEndOfFile )
{
// We don't have enough data, but the stream has not ended yet, can load moar samples from the reader
HRESULT hr = readNextSample();
if( SUCCEEDED( hr ) )
continue;
if( hr != E_EOF )
return hr;
m_readerEndOfFile = true;
}
if( availableSamples > 0 )
{
// We have reached the end of stream of the reader, but the buffer still has a few samples.
// Return the final incomplete chunk padded with zeros
sampleHandler->copyChunk( &mono, pcm, off, availableSamples, stereo );
bufferReadOffset = off + availableSamples;
return S_OK;
}
return E_EOF;
}
}
| 13,138
|
C++
|
.cpp
| 377
| 31.535809
| 146
| 0.700685
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,582
|
AudioBuffer.cpp
|
Const-me_Whisper/Whisper/MF/AudioBuffer.cpp
|
#include "stdafx.h"
#include "AudioBuffer.h"
using namespace Whisper;
void AudioBuffer::appendMono( const float* rsi, size_t countFloats )
{
mono.insert( mono.end(), rsi, rsi + countFloats );
}
void AudioBuffer::appendStereo( const float* rsi, size_t countFloats )
{
assert( 0 == ( countFloats % 2 ) );
const size_t countSamples = countFloats / 2;
const size_t oldLength = mono.size();
assert( oldLength * 2 == stereo.size() );
mono.resize( oldLength + countSamples );
stereo.resize( ( oldLength + countSamples ) * 2 );
const float* const rsiEnd = rsi + countSamples * 2;
const float* const rsiEndAligned = rsiEnd - ( countSamples * 2 ) % 8;
float* rdiStereo = &stereo[ oldLength * 2 ];
float* rdiMono = &mono[ oldLength ];
const __m128 half = _mm_set1_ps( 0.5f );
for( ; rsi < rsiEndAligned; rsi += 8, rdiStereo += 8, rdiMono += 4 )
{
// Load 4 samples = 8 floats
__m128 v0 = _mm_loadu_ps( rsi ); // L0, R0, L1, R1
__m128 v1 = _mm_loadu_ps( rsi + 4 );// L2, R2, L3, R3
// Store into the stereo PCM vector
_mm_storeu_ps( rdiStereo, v0 );
_mm_storeu_ps( rdiStereo + 4, v1 );
// Compute and store the average of these channels
__m128 left = _mm_shuffle_ps( v0, v1, _MM_SHUFFLE( 2, 0, 2, 0 ) );
__m128 right = _mm_shuffle_ps( v0, v1, _MM_SHUFFLE( 3, 1, 3, 1 ) );
__m128 sum = _mm_add_ps( left, right );
sum = _mm_mul_ps( sum, half );
_mm_storeu_ps( rdiMono, sum );
}
#pragma loop (no_vector)
for( ; rsi < rsiEnd; rsi += 2, rdiStereo += 2, rdiMono++ )
{
__m128 vec = _mm_castpd_ps( _mm_load_sd( (const double*)rsi ) );
_mm_store_sd( (double*)rdiStereo, _mm_castps_pd( vec ) );
vec = _mm_add_ss( vec, _mm_movehdup_ps( vec ) );
vec = _mm_mul_ss( vec, half );
_mm_store_ss( rdiMono, vec );
}
}
void AudioBuffer::appendDownmixedStereo( const float* rsi, size_t countFloats )
{
assert( 0 == ( countFloats % 2 ) );
const size_t countSamples = countFloats / 2;
const size_t oldLength = mono.size();
mono.resize( oldLength + countSamples );
const float* const rsiEnd = rsi + countSamples * 2;
const float* const rsiEndAligned = rsiEnd - ( countSamples * 2 ) % 8;
float* rdiMono = &mono[ oldLength ];
const __m128 half = _mm_set1_ps( 0.5f );
for( ; rsi < rsiEndAligned; rsi += 8, rdiMono += 4 )
{
// Load 4 samples = 8 floats
__m128 v0 = _mm_loadu_ps( rsi ); // L0, R0, L1, R1
__m128 v1 = _mm_loadu_ps( rsi + 4 );// L2, R2, L3, R3
// Compute and store the average of these channels
__m128 left = _mm_shuffle_ps( v0, v1, _MM_SHUFFLE( 2, 0, 2, 0 ) );
__m128 right = _mm_shuffle_ps( v0, v1, _MM_SHUFFLE( 3, 1, 3, 1 ) );
__m128 sum = _mm_add_ps( left, right );
sum = _mm_mul_ps( sum, half );
_mm_storeu_ps( rdiMono, sum );
}
#pragma loop (no_vector)
for( ; rsi < rsiEnd; rsi += 2, rdiMono++ )
{
__m128 vec = _mm_castpd_ps( _mm_load_sd( (const double*)rsi ) );
vec = _mm_add_ss( vec, _mm_movehdup_ps( vec ) );
vec = _mm_mul_ss( vec, half );
_mm_store_ss( rdiMono, vec );
}
}
| 2,967
|
C++
|
.cpp
| 76
| 36.592105
| 79
| 0.629565
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,584
|
KeyValueDownloader.cpp
|
Const-me_Whisper/Whisper/Hybrid/KeyValueDownloader.cpp
|
#include "stdafx.h"
#include "KeyValueDownloader.h"
HRESULT KeyValueDownloader::create( const Whisper::sModelParams& mp )
{
const uint32_t n_audio_ctx = mp.n_audio_ctx;
const uint32_t n_mem = mp.n_text_layer * mp.n_audio_ctx;
const uint32_t n_elements = mp.n_text_state * n_mem;
CD3D11_BUFFER_DESC desc{ n_elements * 2, 0, D3D11_USAGE_STAGING, D3D11_CPU_ACCESS_READ };
ID3D11Device* dev = DirectCompute::device();
CHECK( dev->CreateBuffer( &desc, nullptr, &keys ) );
CHECK( dev->CreateBuffer( &desc, nullptr, &values ) );
length = n_elements;
return S_OK;
}
HRESULT KeyValueDownloader::download( const DirectCompute::KeyValueBuffers& source )
{
ID3D11DeviceContext* ctx = DirectCompute::context();
ctx->CopyResource( keys, source.keys.getBuffer() );
ctx->CopyResource( values, source.values.getBuffer() );
return S_OK;
}
KeyValueDownloader::ReadMap::ReadMap( KeyValueDownloader& owner ) :
length( owner.length )
{
check( mappedKeys.map( owner.keys, true ) );
check( mappedValues.map( owner.values, true ) );
}
| 1,030
|
C++
|
.cpp
| 27
| 36.407407
| 90
| 0.742743
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,585
|
HybridContext.cpp
|
Const-me_Whisper/Whisper/Hybrid/HybridContext.cpp
|
#include "stdafx.h"
#include <immintrin.h>
#include <optional>
#include "HybridContext.h"
#include "../Utils/Trace/tracing.h"
#if BUILD_HYBRID_VERSION
#ifndef __AVX__
#error Hybrid version requires AVX build, and ideally AVX2 CPU
#endif // !__AVX__
namespace
{
int threadsCount( int t )
{
#ifdef NDEBUG
if( t == 0 )
{
SYSTEM_INFO si;
GetSystemInfo( &si );
return (int)si.dwNumberOfProcessors;
}
if( t <= 1 )
return 1;
return t;
#else
return 1;
#endif
}
constexpr size_t MB = 1u << 20;
}
HybridContext::HybridContext( const Whisper::WhisperModel& wm ) :
ml( threadsCount( 0 ) ),
model( wm.shared->hybridTensors ),
whisperModel( wm )
{ }
namespace
{
enum struct eModelType : uint8_t
{
Tiny = 0,
Base = 1,
Small = 2,
Medium = 3,
Large = 4,
};
static HRESULT detectModelType( const Whisper::sModelParams& modelParams, eModelType& mt )
{
switch( modelParams.n_audio_layer )
{
case 4:
mt = eModelType::Tiny;
return S_OK;
case 6:
mt = eModelType::Base;
return S_OK;
case 12:
mt = eModelType::Small;
return S_OK;
case 24:
mt = eModelType::Medium;
return S_OK;
case 32:
mt = eModelType::Large;
return S_OK;
}
logError( u8"Unrecognized model" );
return E_INVALIDARG;
}
struct alignas( 2 ) RamMB
{
uint8_t dec, decLayer;
constexpr RamMB( uint8_t d, uint8_t dl ) : dec( d ), decLayer( dl ) { }
__m128i loadBytes() const
{
__m128i v = _mm_loadu_si16( this );
// Upcast bytes to int64_t. That instruction can load directly from memory, too bad VC++ optimized doesn't care
v = _mm_cvtepu8_epi64( v );
// Scale from megabytes into bytes, the multiplier is obviously 2^20
v = _mm_slli_epi64( v, 20 );
return v;
}
};
// The magic numbers are from MEM_REQ_DECODE and MEM_REQ_DECODE_LAYER red/black maps in the reference version,
// near the top of whisper.cpp source file
static const std::array<RamMB, 5> s_memRequirements =
{
RamMB{ 200, 32 }, // Tiny
RamMB{ 202, 44 }, // Base
RamMB{ 204, 64 }, // Small
RamMB{ 206, 84 }, // Medium
RamMB{ 208, 110 }, // Large
};
}
HRESULT HybridContext::create()
{
// Allocate buffers for compute
// We know they're large, so bypassing the heap
eModelType modelType;
CHECK( detectModelType( whisperModel.parameters, modelType ) );
const __m128i bytes = s_memRequirements.at( (uint8_t)modelType ).loadBytes();
CHECK( allocCompute.create( _mm_cvtsi128_si64( bytes ) ) );
CHECK( allocComputeLayer.create( _mm_extract_epi64( bytes, 1 ) ) );
// Create staging buffers to download output from encoder stage,
// in the reference version they're named memory_cross_k / memory_cross_v
CHECK( kvCross.create( whisperModel.parameters ) );
// Create RAM buffers for memory_k / memory_v
CHECK( kv.create( whisperModel.parameters ) );
return S_OK;
}
class HybridContext::SetAllocatorRaii
{
HybridContext& context;
CpuCompute::iMemoryAllocator* prevAlloc;
CpuCompute::iArenaAllocator* newAlloc;
public:
SetAllocatorRaii( HybridContext* owner, CpuCompute::iArenaAllocator& a ) :
context( *owner )
{
prevAlloc = context.ml.setAllocator( &a );
newAlloc = &a;
}
~SetAllocatorRaii()
{
context.ml.setAllocator( prevAlloc );
newAlloc->resetArena();
}
};
HRESULT HybridContext::decode( const int* tokens, const int n_tokens, const int n_past, const sDecParams& dp, std::vector<float>& probs )
{
CHECK( ml.setThreadsCount( dp.n_threads ) );
// whisper_decode
const auto& hparams = whisperModel.parameters;
const uint32_t n_vocab = hparams.n_vocab;
const uint32_t n_ctx = hparams.n_text_ctx;
const uint32_t n_state = hparams.n_text_state;
const uint32_t n_head = hparams.n_text_head;
const uint32_t n_layer = hparams.n_text_layer;
const uint32_t N = n_tokens;
const uint32_t M = dp.M;
SetAllocatorRaii ac{ this, allocCompute };
using namespace CpuCompute;
Tensor cur = ml.addRows( model.tokenEmbedding, model.positionalEmbedding, tokens, n_tokens, n_past );
Tracing::tensor( "dec-rows", cur );
Tensor inpL = cur;
auto kvCross = this->kvCross.map();
for( uint32_t il = 0; il < n_layer; il++ )
{
if( 0 == il ) Tracing::tensor( "dec-inpL", inpL );
const auto& layer = model.layers[ il ];
SetAllocatorRaii acLayer{ this, allocComputeLayer };
// norm
Tensor cur = ml.norm( inpL );
ml.fmaRepeat( cur, layer.attnLn0 );
if( 0 == il ) Tracing::tensor( "dec-norm", cur );
// self-attention
{
Tensor Qcur = ml.mulMat( layer.attnQuery.w, cur );
if( 0 == il ) Tracing::tensor( "dec-Qcur-0", Qcur );
const float scaling = computeScaling( (int)n_state, (int)n_head );
ml.addRepeatScale( Qcur, layer.attnQuery.b, scaling );
if( 0 == il ) Tracing::tensor( "dec-Qcur-1", Qcur );
// note: no bias for Key
Tensor Kcur = ml.mulMat( layer.attnKey, cur );
ml.scale( Kcur, scaling );
if( 0 == il ) Tracing::tensor( "dec-Kcur", Kcur );
Tensor Vcur = ml.mulMat( layer.attnValue.w, cur );
ml.addRepeat( Vcur, layer.attnValue.b );
if( 0 == il ) Tracing::tensor( "dec-Vcur", Vcur );
// store key and value to memory
{
const uint32_t len = N * n_state;
const uint32_t off = n_state * ( (uint32_t)il * n_ctx + n_past );
Tensor k = kv.keysView( len, off );
Tensor v = kv.valuesView( len, off );
CHECK( ml.copyImpl( k, Kcur ) );
CHECK( ml.copyImpl( v, Vcur ) );
}
// ------
Tensor Q = ml.permute( ml.copy( Qcur, eDataType::FP32, { n_state / n_head, n_head, N } ), 0, 2, 1, 3 );
Tensor K = ml.permute( kv.keysView( ( n_past + N ) * n_state, (uint32_t)il * n_ctx * n_state )
.reshape3d( n_state / n_head, n_head, n_past + N ),
0, 2, 1, 3 );
Tensor KQ = ml.mulMat( K, Q );
if( 0 == il ) Tracing::tensor( "dec-KQ-0", KQ );
ml.diagMaskInf( KQ, n_past );
if( 0 == il ) Tracing::tensor( "dec-KQ-1", KQ );
ml.softMax( KQ );
if( 0 == il ) Tracing::tensor( "dec-KQ-2", KQ );
Tensor V_trans = ml.permute(
kv.valuesView( ( n_past + N ) * n_state, (uint32_t)il * n_ctx * n_state )
.reshape3d( n_state / n_head, n_head, n_past + N ),
1, 2, 0, 3 );
Tensor KQV = ml.mulMat( V_trans, KQ );
if( 0 == il ) Tracing::tensor( "dec-KQV", KQV );
Tensor KQV_merged = ml.permute( KQV, 0, 2, 1, 3 );
ml.copyInPlace( cur, KQV_merged, eDataType::FP32, { n_state, N } );
}
{
cur = ml.mulMat( layer.attnLn1.w, cur );
ml.addRepeat( cur, layer.attnLn1.b );
}
// add the input
Tensor inpCA = ml.add( cur, inpL );
// norm
{
cur = ml.norm( inpCA );
ml.fmaRepeat( cur, layer.crossAttnLn0 );
}
// cross-attention
{
Tensor Qcur = ml.mulMat( layer.crossAttnQuery.w, cur );
ml.addRepeatScale( Qcur, layer.crossAttnQuery.b, computeScaling( (int)n_state, (int)n_head ) );
// Kcross is already scaled
const uint32_t len = M * n_state;
const uint32_t off = (uint32_t)il * len;
Tensor Kcross = kvCross.keysView( len, off ).reshape3d( n_state / n_head, n_head, M );
Tensor Vcross = kvCross.valuesView( len, off ).reshape3d( n_state / n_head, n_head, M );
// ------
Tensor Q = ml.permute( ml.copy( Qcur, eDataType::FP32, { n_state / n_head, n_head, N } ), 0, 2, 1, 3 );
Tensor K = ml.permute( Kcross, 0, 2, 1, 3 );
Tensor KQ = ml.mulMat( K, Q );
ml.softMax( KQ );
Tensor V_trans = ml.permute( Vcross, 1, 2, 0, 3 );
Tensor KQV = ml.mulMat( V_trans, KQ );
if( 0 == il ) Tracing::tensor( "dec-KQV", KQV );
Tensor KQV_merged = ml.permute( KQV, 0, 2, 1, 3 );
ml.copyInPlace( cur, KQV_merged, eDataType::FP32, { n_state, N } );
}
// projection
{
cur = ml.mulMat( layer.crossAttnLn1.w, cur );
ml.addRepeat( cur, layer.crossAttnLn1.b );
}
// add the input
ml.addInPlace( cur, inpCA );
Tensor inpFF = cur;
// feed-forward network
{
// norm
cur = ml.norm( inpFF );
ml.fmaRepeat( cur, layer.mlpLn );
cur = ml.mulMat( layer.mlp0.w, cur );
ml.addRepeatGelu( cur, layer.mlp0.b );
// The mulMat() below creates a tensor for the output of this layer.
// We have a special memory storage for these tensors, that's how they survive resets of per-layer arenas
allocLayerOutput.resetArena();
ml.setAllocator( &allocLayerOutput );
// projection
cur = ml.mulMat( layer.mlp1.w, cur );
ml.addRepeat( cur, layer.mlp1.b );
}
// output from this layer
ml.addInPlace( cur, inpFF );
inpL = cur;
}
// norm
cur = ml.norm( inpL );
ml.fmaRepeat( cur, model.ln );
cur = ml.mulMat( model.tokenEmbedding, cur );
// logits -> probs
ml.softMax( cur );
const float* rsi = cur.fp32();
probs.assign( rsi, rsi + cur.countElements() );
Tracing::vector( "probs", probs );
return S_OK;
}
void* HybridContext::AllocSingle::allocate( size_t cb, size_t align )
{
if( !allocated )
{
allocated = true;
if( cb <= capacity )
{
CpuCompute::dbgMarkUninitializedMemory( buffer.pointer(), capacity );
return buffer.pointer();
}
else
{
HRESULT hr = buffer.allocate( cb );
if( SUCCEEDED( hr ) )
{
capacity = cb;
CpuCompute::dbgMarkUninitializedMemory( buffer.pointer(), capacity );
return buffer.pointer();
}
logErrorHr( hr, u8"HybridContext.AllocSingle.allocate" );
throw hr;
}
}
else
{
logError( u8"HybridContext.AllocSingle only supports 1 tensor" );
throw E_UNEXPECTED;
}
}
void HybridContext::AllocSingle::resetArena()
{
allocated = false;
if( capacity > 0 )
CpuCompute::dbgMarkFreedMemory( buffer.pointer(), capacity );
}
#endif
| 9,408
|
C++
|
.cpp
| 301
| 28.199336
| 137
| 0.662102
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,587
|
ProfileCollection.cpp
|
Const-me_Whisper/Whisper/Utils/ProfileCollection.cpp
|
#include "stdafx.h"
#include "ProfileCollection.h"
#include "GpuProfiler.h"
#include "../Whisper/WhisperModel.h"
#include "../D3D/shaderNames.h"
using namespace Whisper;
ProfileCollection::Measure& ProfileCollection::measure( DirectCompute::eProfilerBlock which )
{
uint32_t key = (uint16_t)which;
key |= 0x20000;
return measures[ key ];
}
ProfileCollection::Measure& ProfileCollection::measure( DirectCompute::eComputeShader which )
{
uint32_t key = (uint16_t)which;
key |= 0x30000;
return measures[ key ];
}
ProfileCollection::Measure& ProfileCollection::measure( eCpuBlock which )
{
uint32_t key = (uint8_t)which;
key |= 0x10000;
CComCritSecLock<CComAutoCriticalSection> lock{ critSec };
return measures[ key ];
}
#if PROFILER_COLLECT_TAGS
ProfileCollection::Measure& ProfileCollection::measure( DirectCompute::eComputeShader which, uint16_t tag )
{
uint32_t key = (uint8_t)which;
key = key << 16;
key |= tag;
CComCritSecLock<CComAutoCriticalSection> lock{ critSec };
return taggedShaders[ key ];
}
#endif
namespace
{
using pfnPrintEnum = const char* ( * )( uint16_t val );
static const char* printCpuBlock( uint16_t id )
{
const eCpuBlock which = (eCpuBlock)id;
switch( which )
{
#define V(x) case eCpuBlock::x: return #x
V( LoadModel );
V( RunComplete );
V( Run );
V( Callbacks );
V( Spectrogram );
V( Sample );
V( VAD );
V( Encode );
V( Decode );
V( DecodeStep );
V( DecodeLayer );
#undef V
}
assert( false );
return nullptr;
}
static const char* printGpuBlock( uint16_t id )
{
using DirectCompute::eProfilerBlock;
const eProfilerBlock which = (eProfilerBlock)id;
switch( which )
{
#define V(x) case eProfilerBlock::x: return #x
V( LoadModel );
V( Run );
V( Encode );
V( EncodeLayer );
V( Decode );
V( DecodeStep );
V( DecodeLayer );
#undef V
}
assert( false );
return nullptr;
}
static const char* printShader( uint16_t id )
{
return DirectCompute::computeShaderName( (DirectCompute::eComputeShader)id );
}
static pfnPrintEnum printSectionStart( uint16_t type )
{
switch( type )
{
case 1:
logInfo( u8" CPU Tasks" );
return &printCpuBlock;
case 2:
logInfo( u8" GPU Tasks" );
return &printGpuBlock;
case 3:
logInfo( u8" Compute Shaders" );
return &printShader;
default:
return nullptr;
}
}
struct PrintedTime
{
double value;
const char* unit;
PrintedTime( uint64_t ticks )
{
const double dbl = (double)(int64_t)ticks;
if( ticks >= 10'000'000 )
{
value = dbl / 1.0E+7;
unit = "seconds";
}
else if( ticks >= 10'000 )
{
value = dbl / 1.0E+4;
unit = "milliseconds";
}
else
{
value = dbl / 1.0E+1;
unit = "microseconds";
}
}
PrintedTime( double dbl )
{
if( dbl >= 10'000'000 )
{
value = dbl / 1.0E+7;
unit = "seconds";
}
else if( dbl >= 10'000 )
{
value = dbl / 1.0E+4;
unit = "milliseconds";
}
else
{
value = dbl / 1.0E+1;
unit = "microseconds";
}
}
};
}
void ProfileCollection::Measure::print( const char* name ) const
{
PrintedTime total{ totalTicks };
if( 1 == count )
logInfo( u8"%s\t%g %s", name, total.value, total.unit );
else
{
PrintedTime avg = (double)totalTicks / (double)(int64_t)count;
logInfo( u8"%s\t%g %s, %zu calls, %g %s average", name, total.value, total.unit, count, avg.value, avg.unit );
}
}
#if PROFILER_COLLECT_TAGS
struct TaggedShaderCmp
{
bool operator()( uint16_t cs, uint32_t key ) const
{
return cs < key >> 16;
}
bool operator()( uint32_t key, uint16_t cs ) const
{
return key >> 16 < cs;
}
};
void ProfileCollection::TaggedTemp::print() const
{
PrintedTime total{ ticks };
if( 1 == count )
logInfo( u8" %s\t%g %s", name, total.value, total.unit );
else
{
PrintedTime avg = (double)ticks / (double)(int64_t)count;
logInfo( u8" %s\t%g %s, %zu calls, %g %s average", name, total.value, total.unit, count, avg.value, avg.unit );
}
}
#endif
void ProfileCollection::print()
{
keysTemp.clear();
for( POSITION pos = measures.GetStartPosition(); nullptr != pos; )
{
auto* p = measures.GetNext( pos );
if( p->m_value.count == 0 )
continue;
keysTemp.push_back( p->m_key );
}
std::sort( keysTemp.begin(), keysTemp.end() );
auto it = std::lower_bound( keysTemp.begin(), keysTemp.end(), 0x30000u );
if( it != keysTemp.end() )
{
auto lambda = [ this ]( uint32_t a, uint32_t b )
{
const uint64_t ta = measures.Lookup( a )->m_value.totalTicks;
const uint64_t tb = measures.Lookup( b )->m_value.totalTicks;
return ta > tb;
};
std::stable_sort( it, keysTemp.end(), lambda );
}
#if PROFILER_COLLECT_TAGS
taggedKeysTemp.clear();
for( POSITION pos = taggedShaders.GetStartPosition(); nullptr != pos; )
{
auto* p = taggedShaders.GetNext( pos );
if( p->m_value.count == 0 )
continue;
taggedKeysTemp.push_back( p->m_key );
}
std::sort( taggedKeysTemp.begin(), taggedKeysTemp.end() );
#endif
uint16_t prevKeyType = 0;
pfnPrintEnum pfn = nullptr;
for( uint32_t k : keysTemp )
{
const uint16_t type = (uint16_t)( k >> 16 );
if( type != prevKeyType )
{
prevKeyType = type;
pfn = printSectionStart( type );
}
if( pfn == nullptr )
continue;
const auto* p = measures.Lookup( k );
assert( nullptr != p );
p->m_value.print( pfn( (uint16_t)k ) );
#if PROFILER_COLLECT_TAGS
if( type == 3 )
{
// Compute shader
auto range = std::equal_range( taggedKeysTemp.begin(), taggedKeysTemp.end(), (uint16_t)k, TaggedShaderCmp{} );
if( range.first != range.second )
{
// We have at least 1 tag for that compute shader
taggedTimes.clear();
uint64_t totalTicks = 0;
size_t totalCount = 0;
for( auto it = range.first; it != range.second; it++ )
{
const uint32_t key = *it;
const uint16_t tagId = (uint16_t)key;
assert( 0 != tagId );
const auto* p = taggedShaders.Lookup( key );
assert( nullptr != p );
auto& rdi = taggedTimes.emplace_back();
rdi.ticks = p->m_value.totalTicks;
totalTicks += p->m_value.totalTicks;
rdi.count = p->m_value.count;
totalCount += p->m_value.count;
rdi.name = tagNames[ tagId ];
}
assert( totalCount <= p->m_value.count );
if( totalCount < p->m_value.count )
{
auto& rdi = taggedTimes.emplace_back();
rdi.ticks = p->m_value.totalTicks - totalTicks;
rdi.count = p->m_value.count - totalCount;
rdi.name = tagNames[ 0 ];
}
std::stable_sort( taggedTimes.begin(), taggedTimes.end() );
for( const auto& e : taggedTimes )
e.print();
}
}
#endif
}
}
void ProfileCollection::reset()
{
for( POSITION pos = measures.GetStartPosition(); nullptr != pos; )
measures.GetNextValue( pos ).reset();
}
ProfileCollection::ProfileCollection( const WhisperModel& model )
{
const __m128i vals = model.getLoadTimes();
uint64_t s = (uint64_t)_mm_cvtsi128_si64( vals );
measure( eCpuBlock::LoadModel ).add( s );
s = (uint64_t)_mm_extract_epi64( vals, 1 );
measure( DirectCompute::eProfilerBlock::LoadModel ).add( s );
#if PROFILER_COLLECT_TAGS
// Tag ID 0 means no tag at all. makeTagId() method returns 0 for nullptr name, and starts numbering with 1 for non-empoty tag names
// Push the tag name corresponding to ID = 0, this way we can index directly with tag IDs.
tagNames.push_back( "<untagged>" );
#endif
}
uint16_t ProfileCollection::makeTagId( const char* tag )
{
#if PROFILER_COLLECT_TAGS
if( nullptr == tag )
return 0;
auto p = tagIDs.Lookup( tag );
if( nullptr != p )
return p->m_value;
const size_t newTag = tagIDs.GetCount() + 1;
if( newTag <= 0xFFFF )
{
tagIDs.SetAt( tag, (uint16_t)newTag );
tagNames.push_back( tag );
return (uint16_t)newTag;
}
throw DISP_E_OVERFLOW;
#else
return 0;
#endif
}
| 7,794
|
C++
|
.cpp
| 305
| 22.613115
| 133
| 0.665997
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,588
|
miscUtils.cpp
|
Const-me_Whisper/Whisper/Utils/miscUtils.cpp
|
#include "stdafx.h"
#include "miscUtils.h"
#include <cmath>
void setCurrentThreadName( const char* threadName )
{
const DWORD dwThreadID = GetCurrentThreadId();
// https://stackoverflow.com/a/10364541/126995
#pragma pack(push,8)
typedef struct tagTHREADNAME_INFO
{
DWORD dwType; // Must be 0x1000.
LPCSTR szName; // Pointer to name (in user addr space).
DWORD dwThreadID; // Thread ID (-1=caller thread).
DWORD dwFlags; // Reserved for future use, must be zero.
} THREADNAME_INFO;
#pragma pack(pop)
THREADNAME_INFO info;
info.dwType = 0x1000;
info.szName = threadName;
info.dwThreadID = dwThreadID;
info.dwFlags = 0;
constexpr DWORD MS_VC_EXCEPTION = 0x406D1388;
__try
{
RaiseException( MS_VC_EXCEPTION, 0, sizeof( info ) / sizeof( ULONG_PTR ), (ULONG_PTR*)&info );
}
__except( EXCEPTION_EXECUTE_HANDLER )
{
}
}
float computeScaling( int mul, int div )
{
#ifdef _DEBUG
const float ref = (float)std::pow( (double)mul / (double)div, -0.25 );
#endif
// Make int32 vector with both numbers
__m128i iv = _mm_cvtsi32_si128( mul );
iv = _mm_insert_epi32( iv, div, 1 );
// Convert both numbers to FP64
__m128d v = _mm_cvtepi32_pd( iv );
// Compute mul / div
v = _mm_div_sd( v, _mm_unpackhi_pd( v, v ) );
// Square root
v = _mm_sqrt_sd( v, v );
// 4-th root
v = _mm_sqrt_sd( v, v );
// Invert the value
v = _mm_div_sd( _mm_set_sd( 1.0 ), v );
// Downcast to FP32, and return the result
__m128 f32 = _mm_cvtsd_ss( _mm_setzero_ps(), v );
return _mm_cvtss_f32( f32 );
}
| 1,520
|
C++
|
.cpp
| 52
| 27.307692
| 96
| 0.677596
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,589
|
Logger.cpp
|
Const-me_Whisper/Whisper/Utils/Logger.cpp
|
#include "stdafx.h"
#include "Logger.h"
#include "../API/iContext.cl.h"
#include <cstdarg>
#include <atlstr.h>
namespace
{
wchar_t* formatMessage( HRESULT hr )
{
wchar_t* err;
if( FormatMessage( FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM,
NULL,
hr,
MAKELANGID( LANG_NEUTRAL, SUBLANG_DEFAULT ),
(LPTSTR)&err,
0,
nullptr ) )
return err;
return nullptr;
}
class Utf
{
CStringA utf8;
CStringW utf16;
void appendError( HRESULT hr )
{
const wchar_t* err = formatMessage( hr );
if( nullptr != err )
{
utf16 += err;
LocalFree( (HLOCAL)err );
utf16.TrimRight();
}
else
utf16.AppendFormat( L"error code %i (0x%08X)", hr, hr );
}
public:
const char* print( const char* pszFormat, std::va_list va )
{
utf8.FormatV( pszFormat, va );
return utf8;
}
const wchar_t* print( const wchar_t* pszFormat, std::va_list va )
{
utf16.FormatV( pszFormat, va );
return utf16;
}
const wchar_t* upcast( const char* message, int len )
{
int count = MultiByteToWideChar( CP_UTF8, 0, message, len, nullptr, 0 );
if( count == 0 )
return nullptr;
wchar_t* b = utf16.GetBufferSetLength( len + 1 );
count = MultiByteToWideChar( CP_UTF8, 0, message, len, b, len );
utf16.ReleaseBuffer( count );
return utf16;
}
int utf8Length() const
{
return utf8.GetLength();
}
const wchar_t* printError( HRESULT hr, const char* pszFormat, std::va_list va )
{
print( pszFormat, va );
upcast( utf8, utf8.GetLength() );
utf16 += L": ";
appendError( hr );
return utf16;
}
const char* downcast()
{
int count = WideCharToMultiByte( CP_UTF8, 0, utf16, utf16.GetLength(), nullptr, 0, nullptr, nullptr );
char* s = utf8.GetBufferSetLength( count + 1 );
count = WideCharToMultiByte( CP_UTF8, 0, utf16, utf16.GetLength(), s, count, nullptr, nullptr );
utf8.ReleaseBufferSetLength( count );
return utf8;
}
};
thread_local Utf ts_utf;
using Whisper::eLoggerFlags;
class Logger : Whisper::sLoggerSetup
{
inline bool hasFlag( eLoggerFlags bit ) const
{
return 0 != ( (uint8_t)flags & (uint8_t)bit );
}
bool useStdError() const
{
return hasFlag( eLoggerFlags::UseStandardError );
}
static void writeStdError( Whisper::eLogLevel lvl, const char* message, int len )
{
const wchar_t* w = ts_utf.upcast( message, len );
if( nullptr != w )
fwprintf( stderr, L"%s\n", w );
}
public:
Logger()
{
memset( this, 0, sizeof( Logger ) );
}
bool willLog( Whisper::eLogLevel lvl ) const
{
if( (uint8_t)lvl > (uint8_t)level )
return false;
if( useStdError() )
return true;
return nullptr != sink;
}
void message( Whisper::eLogLevel lvl, const char8_t* pszFormat, std::va_list va ) const
{
const char* s = ts_utf.print( (const char*)pszFormat, va );
auto pfn = sink;
if( nullptr != pfn )
pfn( context, lvl, s );
if( useStdError() )
writeStdError( lvl, s, ts_utf.utf8Length() );
}
void message( Whisper::eLogLevel lvl, const wchar_t* pszFormat, std::va_list va ) const
{
Utf& u = ts_utf;
const wchar_t* w = u.print( pszFormat, va );
auto pfn = sink;
if( nullptr != pfn )
pfn( context, lvl, u.downcast() );
if( useStdError() )
fwprintf( stderr, L"%s\n", w );
}
void message( Whisper::eLogLevel lvl, HRESULT hr, const char* pszFormat, std::va_list va ) const
{
if( hasFlag( eLoggerFlags::SkipFormatMessage ) )
{
message( lvl, (const char8_t*)pszFormat, va );
return;
}
Utf& u = ts_utf;
const wchar_t* w = ts_utf.printError( hr, (const char*)pszFormat, va );
auto pfn = sink;
if( nullptr != pfn )
pfn( context, lvl, u.downcast() );
if( useStdError() )
fwprintf( stderr, L"%s\n", w );
}
void operator=( const sLoggerSetup& rsi )
{
sink = rsi.sink;
context = rsi.context;
level = rsi.level;
flags = rsi.flags;
}
};
static Logger s_logger;
}
bool willLogMessage( Whisper::eLogLevel lvl )
{
return s_logger.willLog( lvl );
}
using Whisper::eLogLevel;
#define LOG_MESSAGE_IMPL( lvl ) \
if( !s_logger.willLog( lvl ) ) \
return; \
std::va_list args; \
va_start( args, pszFormat ); \
s_logger.message( lvl, pszFormat, args ); \
va_end( args );
void logError( const char8_t* pszFormat, ... )
{
LOG_MESSAGE_IMPL( eLogLevel::Error );
}
void logError16( const wchar_t* pszFormat, ... )
{
LOG_MESSAGE_IMPL( eLogLevel::Error );
}
void logWarning( const char8_t* pszFormat, ... )
{
LOG_MESSAGE_IMPL( eLogLevel::Warning );
}
void logWarning16( const wchar_t* pszFormat, ... )
{
LOG_MESSAGE_IMPL( eLogLevel::Warning );
}
void logInfo( const char8_t* pszFormat, ... )
{
LOG_MESSAGE_IMPL( eLogLevel::Info );
}
void logInfo16( const wchar_t* pszFormat, ... )
{
LOG_MESSAGE_IMPL( eLogLevel::Info );
}
void logDebug( const char8_t* pszFormat, ... )
{
LOG_MESSAGE_IMPL( eLogLevel::Debug );
}
void logDebug16( const wchar_t* pszFormat, ... )
{
LOG_MESSAGE_IMPL( eLogLevel::Debug );
}
#undef LOG_MESSAGE_IMPL
#define LOG_MESSAGE_IMPL( lvl ) \
if( !s_logger.willLog( lvl ) ) \
return; \
std::va_list args; \
va_start( args, pszFormat ); \
s_logger.message( lvl, hr, (const char*)pszFormat, args ); \
va_end( args );
void logErrorHr( long hr, const char8_t* pszFormat, ... )
{
LOG_MESSAGE_IMPL( eLogLevel::Error );
}
void logWarningHr( long hr, const char8_t* pszFormat, ... )
{
LOG_MESSAGE_IMPL( eLogLevel::Warning );
}
#undef LOG_MESSAGE_IMPL
// DLL entry point
HRESULT COMLIGHTCALL Whisper::setupLogger( const sLoggerSetup& setup )
{
s_logger = setup;
return S_OK;
}
| 5,782
|
C++
|
.cpp
| 220
| 23.390909
| 105
| 0.640808
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,590
|
parallelFor.cpp
|
Const-me_Whisper/Whisper/Utils/parallelFor.cpp
|
#include "stdafx.h"
#include "parallelFor.h"
namespace
{
class alignas( 64 ) ParallelForContext
{
volatile long threadIndex;
volatile HRESULT status;
alignas( 64 ) void* const context;
const Whisper::pfnParallelForCallback pfn;
static void __stdcall callbackStatic( PTP_CALLBACK_INSTANCE Instance, PVOID pv, PTP_WORK Work );
public:
ParallelForContext( void* ctx, Whisper::pfnParallelForCallback pfn );
PTP_WORK createWork();
HRESULT getStatus() const;
};
ParallelForContext::ParallelForContext( void* ctx, Whisper::pfnParallelForCallback callback ) :
threadIndex( 1 ),
status( S_FALSE ),
context( ctx ),
pfn( callback )
{ }
PTP_WORK ParallelForContext::createWork()
{
return CreateThreadpoolWork( &callbackStatic, this, nullptr );
}
void __stdcall ParallelForContext::callbackStatic( PTP_CALLBACK_INSTANCE Instance, PVOID pv, PTP_WORK Work )
{
ParallelForContext& context = *(ParallelForContext*)pv;
int ith = InterlockedIncrement( &context.threadIndex );
ith--;
const HRESULT hr = context.pfn( ith, context.context );
if( SUCCEEDED( hr ) )
return;
InterlockedCompareExchange( &context.status, hr, S_FALSE );
}
HRESULT ParallelForContext::getStatus() const
{
const HRESULT hr = status;
if( SUCCEEDED( hr ) )
return S_OK;
return hr;
}
}
namespace Whisper
{
HRESULT parallelFor( pfnParallelForCallback pfn, int threadsCount, void* ctx )
{
if( threadsCount < 1 )
return E_BOUNDS;
if( threadsCount == 1 )
return pfn( 0, ctx );
ParallelForContext context{ ctx, pfn };
PTP_WORK const pw = context.createWork();
if( nullptr == pw )
return getLastHr();
for( int i = 1; i < threadsCount; i++ )
SubmitThreadpoolWork( pw );
const HRESULT hr0 = pfn( 0, ctx );
WaitForThreadpoolWorkCallbacks( pw, FALSE );
CloseThreadpoolWork( pw );
if( FAILED( hr0 ) )
return hr0;
return context.getStatus();
}
}
using namespace Whisper;
ThreadPoolWork::~ThreadPoolWork()
{
if( nullptr != work )
{
CloseThreadpoolWork( work );
work = nullptr;
}
}
HRESULT ThreadPoolWork::create()
{
if( nullptr == work )
{
work = CreateThreadpoolWork( &callbackStatic, this, nullptr );
if( nullptr != work )
return S_OK;
return getLastHr();
}
return HRESULT_FROM_WIN32( ERROR_ALREADY_INITIALIZED );
}
HRESULT ThreadPoolWork::parallelFor( int threadsCount ) noexcept
{
if( nullptr != work )
{
if( threadsCount <= 1 )
return threadPoolCallback( 0 );
threadIndex = 1;
status = S_FALSE;
for( int i = 1; i < threadsCount; i++ )
SubmitThreadpoolWork( work );
const HRESULT hr0 = threadPoolCallback( 0 );
WaitForThreadpoolWorkCallbacks( work, FALSE );
if( FAILED( hr0 ) )
return hr0;
if( SUCCEEDED( status ) )
return S_OK;
return status;
}
return OLE_E_BLANK;
}
void __stdcall ThreadPoolWork::callbackStatic( PTP_CALLBACK_INSTANCE Instance, PVOID pv, PTP_WORK Work )
{
ThreadPoolWork* tpw = (ThreadPoolWork*)pv;
int ith = InterlockedIncrement( &tpw->threadIndex );
ith--;
const HRESULT hr = tpw->threadPoolCallback( ith );
if( SUCCEEDED( hr ) )
return;
InterlockedCompareExchange( &tpw->status, hr, S_FALSE );
}
| 3,154
|
C++
|
.cpp
| 116
| 24.517241
| 109
| 0.726005
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,591
|
GpuProfiler.cpp
|
Const-me_Whisper/Whisper/Utils/GpuProfiler.cpp
|
#include "stdafx.h"
#include "GpuProfiler.h"
#include "GpuProfilerSimple.h"
using namespace DirectCompute;
inline void GpuProfiler::sProfilerData::reset()
{
_mm_storeu_si128( ( __m128i* ) & callsPending, _mm_setzero_si128() );
}
inline void GpuProfiler::sProfilerData::addPending( int64_t time )
{
callsPending++;
timePending += time;
}
inline void GpuProfiler::sProfilerData::dropPending()
{
callsPending = 0;
timePending = 0;
}
inline void GpuProfiler::sProfilerData::makeTime( uint64_t freq )
{
dest->count += callsPending;
dest->totalTicks += ::makeTime( timePending, freq );
callsPending = 0;
timePending = 0;
}
HRESULT GpuProfiler::Queue::create()
{
ID3D11Device* const dev = device();
CD3D11_QUERY_DESC desc{ D3D11_QUERY_TIMESTAMP };
for( Entry& e : queue )
{
CHECK( dev->CreateQuery( &desc, &e.query ) );
e.block = nullptr;
e.event = eEvent::None;
e.shader = EmptyShader;
}
return S_OK;
}
namespace
{
static uint64_t getTimestamp( ID3D11Query* query, const DelayExecution& delay )
{
ID3D11DeviceContext* const ctx = context();
uint64_t res = 0;
while( true )
{
const HRESULT hr = ctx->GetData( query, &res, sizeof( uint64_t ), 0 );
check( hr );
if( S_OK == hr )
return res;
delay.delay();
}
}
static D3D11_QUERY_DATA_TIMESTAMP_DISJOINT waitForDisjointData( ID3D11Query* query )
{
ID3D11DeviceContext* const ctx = context();
ctx->End( query );
D3D11_QUERY_DATA_TIMESTAMP_DISJOINT res;
while( true )
{
const HRESULT hr = ctx->GetData( query, &res, sizeof( D3D11_QUERY_DATA_TIMESTAMP_DISJOINT ), 0 );
check( hr );
if( S_OK == hr )
return res;
Sleep( 1 );
}
}
}
void GpuProfiler::Queue::Entry::join( GpuProfiler& owner )
{
assert( nullptr != block );
uint64_t res = getTimestamp( query, owner.delay );
#if PROFILER_COLLECT_TAGS
block->haveTimestamp( event, shader, tag, res, owner );
#else
block->haveTimestamp( event, shader, 0, res, owner );
#endif
block = nullptr;
event = eEvent::None;
shader = EmptyShader;
}
void GpuProfiler::Queue::submit( BlockState* block, eEvent evt, uint16_t shader, uint16_t tag )
{
// if( evt == GpuProfiler::eEvent::Shader && shader == 0 ) __debugbreak();
assert( nullptr != block );
Entry& e = queue[ nextEntry ];
if( nullptr != e.block )
e.join( owner );
e.block = block;
e.event = evt;
e.shader = shader;
#if PROFILER_COLLECT_TAGS
e.tag = tag;
#endif
context()->End( e.query );
nextEntry = ( nextEntry + 1 ) % queueLength;
}
void GpuProfiler::Queue::join()
{
while( true )
{
Entry& e = queue[ nextEntry ];
if( nullptr == e.block )
return;
e.join( owner );
nextEntry = ( nextEntry + 1 ) % queueLength;
}
}
static inline uint32_t makeTagKey( uint16_t cs, uint16_t tag )
{
uint32_t r = cs;
r = r << 16;
r |= tag;
return r;
}
void GpuProfiler::BlockState::completePrevShader( uint64_t time, GpuProfiler& profiler )
{
if( shaderStart == -1 )
return;
assert( prevShader != EmptyShader );
const int64_t elapsed = (int64_t)time - shaderStart;
sProfilerData* dest = nullptr;
auto* p = profiler.results.Lookup( prevShader );
if( nullptr != p )
dest = &p->m_value;
else
{
sProfilerData& res = profiler.results[ prevShader ];
res.dest = &profiler.dest.measure( (eComputeShader)prevShader );
dest = &res;
}
dest->addPending( elapsed );
#if PROFILER_COLLECT_TAGS
if( 0 != prevShaderTag )
{
const uint32_t key = makeTagKey( prevShader, prevShaderTag );
auto* pt = profiler.resultsTagged.Lookup( key );
if( nullptr != pt )
dest = &pt->m_value;
else
{
sProfilerData& res = profiler.resultsTagged[ key ];
res.dest = &profiler.dest.measure( (eComputeShader)prevShader, prevShaderTag );
dest = &res;
}
dest->addPending( elapsed );
}
#endif
prevShader = EmptyShader;
prevShaderTag = 0;
shaderStart = -1;
}
void GpuProfiler::BlockState::haveTimestamp( eEvent evt, uint16_t cs, uint16_t tag, uint64_t time, GpuProfiler& profiler )
{
switch( evt )
{
case eEvent::BlockStart:
assert( -1 == timeStart );
assert( -1 == shaderStart );
assert( cs == EmptyShader );
timeStart = (int64_t)time;
if( nullptr != parentBlock )
parentBlock->completePrevShader( time, profiler );
return;
case eEvent::BlockEnd:
assert( -1 != timeStart );
assert( cs == EmptyShader );
completePrevShader( time, profiler );
destBlock->addPending( (int64_t)time - timeStart );
timeStart = -1;
return;
case eEvent::Shader:
assert( cs != EmptyShader );
// if( cs == (uint16_t)0 ) __debugbreak();
completePrevShader( time, profiler );
prevShader = cs;
prevShaderTag = tag;
shaderStart = (int64_t)time;
return;
}
assert( false );
}
HRESULT GpuProfiler::create( size_t maxDepth )
{
CD3D11_QUERY_DESC desc{ D3D11_QUERY_TIMESTAMP_DISJOINT };
CHECK( device()->CreateQuery( &desc, &disjoint ) );
CHECK( queries.create() );
stack.reserve( maxDepth );
return S_OK;
}
void GpuProfiler::blockStart( eProfilerBlock which )
{
BlockState* parentBlock;
if( stack.empty() )
{
context()->Begin( disjoint );
parentBlock = nullptr;
}
else
parentBlock = *stack.rbegin();
BlockState* bs = nullptr;
auto p = blockStates.Lookup( which );
if( nullptr != p )
bs = &p->m_value;
else
{
BlockState& block = blockStates[ which ];
block.destBlock = &results[ (uint16_t)which ];
block.destBlock->dest = &dest.measure( which );
bs = █
}
bs->parentBlock = parentBlock;
queries.submit( bs, eEvent::BlockStart );
stack.push_back( bs );
}
void GpuProfiler::blockEnd()
{
assert( !stack.empty() );
BlockState* const bs = *stack.rbegin();
queries.submit( bs, eEvent::BlockEnd );
stack.pop_back();
if( !stack.empty() )
return;
const D3D11_QUERY_DATA_TIMESTAMP_DISJOINT dtsd = waitForDisjointData( disjoint );
queries.join();
if( !dtsd.Disjoint )
{
// Fortunately, these timers appear to be relatively high resolution.
// Specifically, on the iGPU inside Ryzen 7 5700G that frequency is 1E+8 = 100 MHz
// On nVidia 1080Ti, that frequency is 1E+9 = 1 GHz
const uint64_t freq = dtsd.Frequency;
resultsMakeTime( freq );
}
else
{
// Something occurred in between the query's ID3D11DeviceContext::Begin and ID3D11DeviceContext::End calls
// that caused the timestamp counter to become discontinuous or disjoint, such as unplugging the AC cord on a laptop, overheating, or throttling up/down due to laptop savings events.
// The timestamp returned by ID3D11DeviceContext::GetData for a timestamp query is only reliable if Disjoint is FALSE.
resultsDropPending();
}
}
void GpuProfiler::computeShader( eComputeShader cs )
{
assert( !stack.empty() );
if( !profileShaders )
return;
BlockState* const bs = *stack.rbegin();
#if PROFILER_COLLECT_TAGS
queries.submit( bs, eEvent::Shader, (uint16_t)cs, m_nextTag );
m_nextTag = 0;
#else
queries.submit( bs, eEvent::Shader, (uint16_t)cs );
#endif
}
void GpuProfiler::resultsDropPending()
{
for( POSITION pos = results.GetStartPosition(); nullptr != pos; )
results.GetNextValue( pos ).dropPending();
#if PROFILER_COLLECT_TAGS
for( POSITION pos = resultsTagged.GetStartPosition(); nullptr != pos; )
resultsTagged.GetNextValue( pos ).dropPending();
#endif
}
void GpuProfiler::resultsMakeTime( uint64_t freq )
{
for( POSITION pos = results.GetStartPosition(); nullptr != pos; )
results.GetNextValue( pos ).makeTime( freq );
#if PROFILER_COLLECT_TAGS
for( POSITION pos = resultsTagged.GetStartPosition(); nullptr != pos; )
resultsTagged.GetNextValue( pos ).makeTime( freq );
#endif
}
void GpuProfiler::resultsReset()
{
for( POSITION pos = results.GetStartPosition(); nullptr != pos; )
results.GetNextValue( pos ).reset();
#if PROFILER_COLLECT_TAGS
for( POSITION pos = resultsTagged.GetStartPosition(); nullptr != pos; )
resultsTagged.GetNextValue( pos ).reset();
#endif
}
#if PROFILER_COLLECT_TAGS
uint16_t __declspec( noinline ) GpuProfiler::setNextTag( const char* name )
{
uint16_t tag = dest.makeTagId( name );
m_nextTag = tag;
return tag;
}
#endif
HRESULT GpuProfilerSimple::create()
{
ID3D11Device* const dev = device();
CD3D11_QUERY_DESC desc{ D3D11_QUERY_TIMESTAMP_DISJOINT };
CHECK( dev->CreateQuery( &desc, &disjoint ) );
desc.Query = D3D11_QUERY_TIMESTAMP;
CHECK( dev->CreateQuery( &desc, &begin ) );
CHECK( dev->CreateQuery( &desc, &end ) );
context()->Begin( disjoint );
context()->End( begin );
return S_OK;
}
HRESULT GpuProfilerSimple::time( uint64_t& rdi ) const
{
context()->End( end );
try
{
const D3D11_QUERY_DATA_TIMESTAMP_DISJOINT dtsd = waitForDisjointData( disjoint );
const uint64_t t2 = getTimestamp( end, delay );
const uint64_t t1 = getTimestamp( begin, delay );
if( !dtsd.Disjoint )
{
rdi = makeTime( t2 - t1, dtsd.Frequency );
return S_OK;
}
else
{
// Something occurred in between the query's ID3D11DeviceContext::Begin and ID3D11DeviceContext::End calls
// that caused the timestamp counter to become discontinuous or disjoint, such as unplugging the AC cord on a laptop, overheating, or throttling up/down due to laptop savings events.
// The timestamp returned by ID3D11DeviceContext::GetData for a timestamp query is only reliable if Disjoint is FALSE.
rdi = -1;
return S_FALSE;
}
}
catch( HRESULT hr )
{
return hr;
}
}
| 9,243
|
C++
|
.cpp
| 328
| 25.911585
| 185
| 0.714592
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,592
|
CpuProfiler.cpp
|
Const-me_Whisper/Whisper/Utils/CpuProfiler.cpp
|
#include "stdafx.h"
#include "CpuProfiler.h"
namespace
{
using namespace Whisper;
inline int64_t qpcNow()
{
int64_t res;
QueryPerformanceCounter( (LARGE_INTEGER*)&res );
return res;
}
class CpuTimescale
{
uint64_t frequency = 0;
const int64_t tscStart;
const int64_t qpcStart;
uint64_t computeTscFrequency();
public:
CpuTimescale() :
tscStart( tscNow() ),
qpcStart( qpcNow() )
{ }
inline uint64_t computeTicks( uint64_t tsc )
{
uint64_t freq = frequency;
if( freq == 0 )
freq = computeTscFrequency();
return makeTime( tsc, freq );
}
};
uint64_t __declspec( noinline ) CpuTimescale::computeTscFrequency()
{
int64_t tsc = tscNow();
int64_t qpc = qpcNow();
tsc -= tscStart;
qpc -= qpcStart;
uint64_t qpcFreq;
QueryPerformanceFrequency( (LARGE_INTEGER*)&qpcFreq );
// Seconds = qpc / qpcFreq
// ticks per second = tsc / seconds = tsc * qpcFreq / qpc
uint64_t res = ( (uint64_t)tsc * qpcFreq + ( (uint64_t)qpc / 2 ) - 1 ) / (uint64_t)qpc;
frequency = res;
const double GHz = (double)(int64_t)res * 1.0E-9;
logDebug( u8"Computed CPU base frequency: %g GHz", GHz );
return res;
}
static CpuTimescale timescale;
}
uint64_t Whisper::ticksFromTsc( uint64_t tscDiff )
{
return timescale.computeTicks( tscDiff );
}
| 1,297
|
C++
|
.cpp
| 52
| 22.134615
| 89
| 0.689376
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,593
|
DelayExecution.cpp
|
Const-me_Whisper/Whisper/Utils/DelayExecution.cpp
|
#include "stdafx.h"
#include "DelayExecution.h"
namespace
{
constexpr bool useHighRezTimer = false;
constexpr int64_t sleepMicroseconds = 200;
inline HRESULT sleepImpl( HANDLE timer )
{
constexpr int64_t sleepTicks = sleepMicroseconds * 10;
LARGE_INTEGER li;
// Negative values indicate relative time
li.QuadPart = -sleepTicks;
if( !SetWaitableTimerEx( timer, &li, 0, nullptr, nullptr, nullptr, 0 ) )
return getLastHr();
const DWORD res = WaitForSingleObject( timer, 50 );
if( res == WAIT_OBJECT_0 )
return S_OK;
if( res == WAIT_FAILED )
return getLastHr();
return E_FAIL;
}
}
void DelayExecution::sleepOnTheTimer( const DelayExecution& delay )
{
HRESULT hr = sleepImpl( delay.timer );
if( SUCCEEDED( hr ) )
return;
logWarningHr( hr, u8"DelayExecution.sleepOnTheTimer" );
}
void DelayExecution::spinWait( const DelayExecution& )
{
for( size_t i = 0; i < 1024; i++ )
_mm_pause();
}
void DelayExecution::sleep( const DelayExecution& )
{
Sleep( 0 );
}
DelayExecution::DelayExecution()
{
if constexpr( useHighRezTimer )
{
constexpr DWORD flags = CREATE_WAITABLE_TIMER_HIGH_RESOLUTION;
HANDLE h = CreateWaitableTimerEx( nullptr, nullptr, flags, TIMER_ALL_ACCESS );
if( nullptr != h )
{
timer.Attach( h );
pfn = &sleepOnTheTimer;
return;
}
const HRESULT hr = getLastHr();
logWarningHr( hr, u8"CreateWaitableTimerEx" );
}
pfn = &spinWait;
// pfn = &sleep;
}
| 1,429
|
C++
|
.cpp
| 56
| 23.125
| 80
| 0.722141
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,594
|
TraceWriter.cpp
|
Const-me_Whisper/Whisper/Utils/Trace/TraceWriter.cpp
|
#include "stdafx.h"
#include "TraceWriter.h"
#include <atlfile.h>
#include <atlcoll.h>
#include <atlstr.h>
#include "TraceStructures.h"
#include "../../ML/Tensor.h"
#include "../../CPU/Tensor.h"
#include <Shlobj.h>
using namespace Tracing;
namespace
{
static HRESULT createDir( LPCTSTR pathFile )
{
LPCWSTR fn = PathFindFileName( pathFile );
if( fn == pathFile )
return E_FAIL;
const int cc = (int)( fn - pathFile );
CString dir{ pathFile, cc };
if( PathIsDirectory( dir ) )
return S_OK;
const int status = SHCreateDirectoryEx( nullptr, dir, nullptr );
if( 0 == status )
return S_OK;
return HRESULT_FROM_WIN32( status );
}
class TraceFileWriter
{
CAtlFile file;
// Concatenated strings, including the 0 terminators
std::vector<char> stringsData;
// Index = string ID, value = start offset into stringsData
std::vector<uint32_t> stringsIndex;
// Hash map to unduplicate these strings
CAtlMap<CStringA, uint32_t> stringsHash;
uint32_t addString( const CStringA& s )
{
auto p = stringsHash.Lookup( s );
if( p != nullptr )
return p->m_value;
const uint32_t off = (uint32_t)stringsData.size();
const char* rsi = s;
stringsData.insert( stringsData.end(), rsi, rsi + s.GetLength() + 1 );
stringsIndex.push_back( off );
const uint32_t newId = (uint32_t)stringsHash.GetCount();
stringsHash.SetAt( s, newId );
return newId;
}
void addString( sTraceItem& rdi, const ItemName& name )
{
rdi.countFormatArgs = name.countArgs;
rdi.stringIndex = addString( name.pointer );
rdi.formatArgs = name.args;
}
std::vector<sTraceItem> items;
uint64_t offset = 0;
public:
HRESULT create( LPCTSTR path )
{
CHECK( createDir( path ) );
CHECK( file.Create( path, GENERIC_WRITE, 0, CREATE_ALWAYS ) );
constexpr uint64_t cbHeader = sizeof( sFileHeader );
CHECK( file.SetSize( cbHeader ) );
CHECK( file.Seek( 0, SEEK_END ) );
offset = 0;
return S_OK;
}
HRESULT buffer( const ItemName& name, const void* rsi, size_t length, eDataType dt )
{
sTraceItem& rdi = items.emplace_back();
const uint64_t cb = rdi.buffer( offset, length, dt );
addString( rdi, name );
assert( cb <= UINT_MAX );
CHECK( file.Write( rsi, (DWORD)cb ) );
offset += cb;
return S_OK;
}
HRESULT tensor( const ItemName& name, const void* rsi, __m128i size, __m128i strides, eDataType dt )
{
sTraceItem& rdi = items.emplace_back();
const uint64_t cb = rdi.tensor( offset, size, strides, dt );
addString( rdi, name );
assert( cb <= UINT_MAX );
CHECK( file.Write( rsi, (DWORD)cb ) );
offset += cb;
return S_OK;
}
HRESULT close()
{
if( !file )
return S_FALSE;
const uint32_t cbStringsData = (uint32_t)stringsData.size();
const uint32_t cbStringsIndex = (uint32_t)( stringsIndex.size() * 4 );
if( !stringsIndex.empty() )
CHECK( file.Write( stringsIndex.data(), cbStringsIndex ) );
if( !stringsData.empty() )
CHECK( file.Write( stringsData.data(), cbStringsData ) );
const uint32_t cbItems = (uint32_t)items.size() * (uint32_t)sizeof( sTraceItem );
if( !items.empty() )
CHECK( file.Write( items.data(), cbItems ) );
CHECK( file.Seek( 0, FILE_BEGIN ) );
sFileHeader header;
memset( &header, 0, sizeof( header ) );
header.magic = header.correctMagic;
header.cbItem = sizeof( sTraceItem );
header.countItems = (uint32_t)items.size();
header.bytesPayload = offset;
header.countStrings = (uint32_t)stringsIndex.size();
header.bytesStrings = cbStringsData + cbStringsIndex;
CHECK( file.Write( &header, sizeof( header ) ) );
CHECK( file.Flush() );
file.Close();
return S_OK;
}
};
class TraceWriter : public iTraceWriter
{
TraceFileWriter file;
HRESULT buffer( const ItemName& name, const void* rsi, size_t length, eDataType dt ) override final
{
return file.buffer( name, rsi, length, dt );
}
HRESULT tensor( const ItemName& name, const void* rsi, __m128i size, __m128i strides, eDataType dt ) override final
{
return file.tensor( name, rsi, size, strides, dt );
}
public:
TraceWriter( LPCTSTR path )
{
check( file.create( path ) );
}
~TraceWriter()
{
check( file.close() );
}
};
}
std::unique_ptr<iTraceWriter> iTraceWriter::create( LPCTSTR path )
{
return std::make_unique<TraceWriter>( path );
}
namespace
{
static std::vector<float> tempFp32;
static std::vector<uint16_t> tempFp16;
template<class E>
inline const void* ptr( const std::vector<E>& vec )
{
return vec.empty() ? nullptr : vec.data();
}
}
HRESULT iTraceWriter::tensor( const ItemName& name, const DirectCompute::Tensor& source )
{
const __m128i size = source.sizeVec();
const __m128i strides = source.stridesVec();
const eDataType dt = source.getType();
if( dt == eDataType::FP32 )
{
source.download( tempFp32 );
return tensor( name, ptr( tempFp32 ), size, strides, eDataType::FP32 );
}
else if( dt == eDataType::FP16 )
{
source.download( tempFp16 );
return tensor( name, ptr( tempFp16 ), size, strides, eDataType::FP16 );
}
return E_NOTIMPL;
}
HRESULT iTraceWriter::tensor( const ItemName& name, const CpuCompute::Tensor& source )
{
const __m128i size = source.sizeVec();
const __m128i strides = source.stridesVec();
const eDataType dt = source.type();
if( dt == eDataType::FP32 )
return tensor( name, source.fp32(), size, strides, eDataType::FP32 );
else if( dt == eDataType::FP16 )
return tensor( name, source.fp16(), size, strides, eDataType::FP16 );
else
return E_NOTIMPL;
}
#if BUILD_BOTH_VERSIONS
#include "../../source/ggml.h"
HRESULT __declspec( noinline ) iTraceWriter::tensor( const ItemName& name, const ggml_tensor& source )
{
__m128i size = load16( source.ne );
__m128i strides = _mm_setr_epi32(
(int)(uint32_t)source.nb[ 0 ],
(int)(uint32_t)source.nb[ 1 ],
(int)(uint32_t)source.nb[ 2 ],
(int)(uint32_t)source.nb[ 3 ] );
const __m128i ones = _mm_set1_epi32( 1 );
switch( source.n_dims )
{
case 0:
size = ones;
break;
case 1:
size = _mm_blend_epi16( size, ones, 0b11111100 );
break;
case 2:
size = _mm_blend_epi16( size, ones, 0b11110000 );
break;
case 3:
size = _mm_blend_epi16( size, ones, 0b11000000 );
break;
case 4:
break;
default:
return E_INVALIDARG;
}
const ggml_type dt = source.type;
switch( dt )
{
case GGML_TYPE_F16:
strides = _mm_srli_epi32( strides, 1 );
return tensor( name, source.data, size, strides, eDataType::FP16 );
case GGML_TYPE_F32:
strides = _mm_srli_epi32( strides, 2 );
return tensor( name, source.data, size, strides, eDataType::FP32 );
default:
return E_NOTIMPL;
}
}
#else
HRESULT iTraceWriter::tensor( const ItemName& name, const ggml_tensor& source )
{
return E_NOTIMPL;
}
#endif
| 6,746
|
C++
|
.cpp
| 229
| 26.554585
| 117
| 0.686305
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,595
|
tracing.cpp
|
Const-me_Whisper/Whisper/Utils/Trace/tracing.cpp
|
#include "stdafx.h"
#include "tracing.h"
#include "../../source/ggml.h"
#include "../../ML/Tensor.h"
namespace Tracing
{
#if SAVE_DEBUG_TRACE
std::unique_ptr<iTraceWriter> s_writer;
static BOOL __stdcall consoleHandler( DWORD dwCtrlType )
{
if( dwCtrlType == CTRL_C_EVENT )
s_writer = nullptr;
// Return TRUE if handled this message, further handler functions won't be called.
// Return FALSE to pass this message to further handlers until default handler calls ExitProcess().
return FALSE;
}
void traceCreate( LPCTSTR path )
{
s_writer = iTraceWriter::create( path );
SetConsoleCtrlHandler( &consoleHandler, TRUE );
}
void traceClose()
{
s_writer = nullptr;
}
iTraceWriter* getWriter()
{
return s_writer.get();
}
using Pair = std::pair<ItemName, ggml_tensor>;
static std::vector<Pair> delayed;
void delayTensor( const ItemName& name, const ggml_tensor* tensor )
{
delayed.emplace_back( name, *tensor );
}
HRESULT writeDelayedTensors()
{
if( delayed.empty() )
return S_FALSE;
iTraceWriter* w = getWriter();
if( nullptr == w )
{
delayed.clear();
return S_FALSE;
}
for( const Pair& p : delayed )
w->tensor( p.first, p.second );
delayed.clear();
return S_OK;
}
#elif DBG_TEST_NAN
HRESULT tensor( const ItemName& name, const DirectCompute::Tensor& tensor )
{
const bool found = scanTensorForNaN( tensor, tensor.countElements() );
if( !found )
return S_FALSE;
__debugbreak();
return S_FALSE;
}
#endif
}
| 1,490
|
C++
|
.cpp
| 61
| 21.918033
| 101
| 0.70936
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,596
|
TraceStructures.cpp
|
Const-me_Whisper/Whisper/Utils/Trace/TraceStructures.cpp
|
#include "stdafx.h"
#include "TraceStructures.h"
using namespace Tracing;
uint64_t sTraceItem::buffer( uint64_t off, size_t length, eDataType type )
{
payloadOffset = off;
payloadSize = length * DirectCompute::elementSize( type );
*(uint64_t*)( &size[ 0 ] ) = length;
*(uint64_t*)( &size[ 2 ] ) = 0;
_mm_storeu_si128( ( __m128i* )stride.data(), _mm_setzero_si128() );
itemType = eItemType::Buffer;
dataType = type;
return payloadSize;
}
uint64_t sTraceItem::tensor( uint64_t off, __m128i ne, __m128i nb, eDataType type )
{
payloadOffset = off;
_mm_storeu_si128( ( __m128i* )size.data(), ne );
_mm_storeu_si128( ( __m128i* )stride.data(), nb );
uint64_t count = 1;
for( uint32_t i : size )
if( i != 0 )
count *= i;
payloadSize = count * DirectCompute::elementSize( type );
itemType = eItemType::Tensor;
dataType = type;
return payloadSize;
}
| 865
|
C++
|
.cpp
| 28
| 29.035714
| 83
| 0.681437
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,597
|
Context.ops.cpp
|
Const-me_Whisper/Whisper/ML/Context.ops.cpp
|
#include "stdafx.h"
#include "MlContext.h"
#include "testUtils.h"
using namespace DirectCompute;
Tensor MlContext::createTensor( eDataType type, const std::array<uint32_t, 4>& ne )
{
Tensor res;
check( res.create( type, ne ) );
return res;
}
Tensor MlContext::createTensor( eDataType type, std::initializer_list<uint32_t> ne )
{
size_t nDims = ne.size();
if( 0 == nDims || nDims > 4 )
throw E_INVALIDARG;
std::array<uint32_t, 4> arr;
for( size_t i = 0; i < nDims; i++ )
arr[ i ] = ne.begin()[ i ];
for( size_t i = nDims; i < 4; i++ )
arr[ i ] = 1;
return createTensor( type, arr );
}
Tensor MlContext::conv_1d_1s( const Tensor& a, const Tensor& b )
{
assert( b.isMatrix() );
assert( a.ne[ 1 ] == b.ne[ 1 ] );
assert( a.ne[ 3 ] == 1 );
Tensor res = createTensor( eDataType::FP32, { b.ne[ 0 ], a.ne[ 2 ] } );
convolution( a, b, res );
return res;
}
Tensor MlContext::conv_1d_2s( const Tensor& a, const Tensor& b )
{
assert( b.isMatrix() );
assert( a.ne[ 1 ] == b.ne[ 1 ] );
assert( a.ne[ 3 ] == 1 );
Tensor res = createTensor( eDataType::FP32, { b.ne[ 0 ] / 2, a.ne[ 2 ] } );
#if 0
static PrintUniqueTensorSizes printSize( "conv_1d_2s" );
printSize.print( a, b );
#endif
convolution2( a, b, res );
return res;
}
namespace
{
inline bool canRepeat( const TensorShape& t0, const TensorShape& t1 )
{
return ( t1.ne[ 0 ] % t0.ne[ 0 ] == 0 ) &&
( t1.ne[ 1 ] % t0.ne[ 1 ] == 0 ) &&
( t1.ne[ 2 ] % t0.ne[ 2 ] == 0 ) &&
( t1.ne[ 3 ] % t0.ne[ 3 ] == 0 );
}
}
Tensor MlContext::cwiseBinary( const Tensor& a, const Tensor& b, eComputeShader cs )
{
assert( isSameShape( a, b ) );
Tensor res = createTensor( a.getType(), a.ne );
cwiseBinary( a, b, res, cs );
return res;
}
Tensor __declspec( noinline ) MlContext::view2d( const Tensor& a, uint32_t ne0, uint32_t ne1, uint32_t nb1, uint32_t offset )
{
if( 0 != offset )
throw E_NOTIMPL;
Tensor res = a;
res.ne = { ne0, ne1, 1, 1 };
res.nb[ 1 ] = nb1;
res.nb[ 2 ] = res.nb[ 3 ] = nb1 * ne1;
return res;
}
Tensor MlContext::transpose( const Tensor& a )
{
Tensor result;
// A magic number for _mm_shuffle_epi32 SSE2 instruction to swap two lower int32 lanes in a vector
constexpr int swapXy = _MM_SHUFFLE( 3, 2, 0, 1 );
__m128i v = a.sizeVec();
v = _mm_shuffle_epi32( v, swapXy );
store( result.ne, v );
v = a.stridesVec();
v = _mm_shuffle_epi32( v, swapXy );
store( result.nb, v );
result.setGpuViews( a, a );
return result;
}
Tensor MlContext::norm( const Tensor& a )
{
Tensor res = createTensor( a.getType(), a.ne );
norm( a, res );
return res;
}
Tensor MlContext::mulMat( const Tensor& a, const Tensor& b )
{
if( !canMulMat( a, b ) )
throw E_INVALIDARG;
Tensor res = createTensor( eDataType::FP32, { a.ne[ 1 ], b.ne[ 1 ], a.ne[ 2 ], b.ne[ 3 ] } );
if constexpr( enableInexactOptimizations )
mulMatTiled( a, b, res );
else
mulMat( a, b, res );
#if 0
Tensor testTiled;
check( testTiled.create( eDataType::FP32, res.ne ) );
mulMatTiled( a, b, testTiled );
std::vector<float> current, tiled;
res.download( current );
testTiled.download( tiled );
sTensorDiff diff = computeDiff( current.data(), tiled.data(), current.size() );
diff.print( "mulMatTiled" );
#endif
return res;
}
Tensor MlContext::mulMatEx( const Tensor& a, const Tensor& b, const char* tagName )
{
if( !canMulMat( a, b ) )
throw E_INVALIDARG;
if( 0 != a.nb[ 0 ] )
throw E_INVALIDARG; // The first argument is expected to be pre-transposed
const uint16_t tag = profiler.setNextTag( tagName );
if( b.ne[ 1 ] != 1 )
{
if( b.nb[ 0 ] != 0 )
{
Tensor rhs = reshapePanels( b );
profiler.setNextTag( tag );
return mulMatTiledEx( a, rhs );
}
else
{
// Second argument already reshaped into these panels
return mulMatTiledEx( a, b );
}
}
else
{
if( 0 != b.nb[ 0 ] )
return mulMatByRowTiledEx( a, b );
// That shader requires classic VRAM layout of the second argument, gonna fail with pre-transposed one
throw E_INVALIDARG;
}
}
Tensor MlContext::permute( const Tensor& a, uint8_t axis0, uint8_t axis1, uint8_t axis2, uint8_t axis3 )
{
assert( axis0 < 4 );
assert( axis1 < 4 );
assert( axis2 < 4 );
assert( axis3 < 4 );
assert( axis0 != axis1 );
assert( axis0 != axis2 );
assert( axis0 != axis3 );
assert( axis1 != axis2 );
assert( axis1 != axis3 );
assert( axis2 != axis3 );
Tensor res = a;
res.ne[ axis0 ] = a.ne[ 0 ];
res.ne[ axis1 ] = a.ne[ 1 ];
res.ne[ axis2 ] = a.ne[ 2 ];
res.ne[ axis3 ] = a.ne[ 3 ];
res.nb[ axis0 ] = a.nb[ 0 ];
res.nb[ axis1 ] = a.nb[ 1 ];
res.nb[ axis2 ] = a.nb[ 2 ];
res.nb[ axis3 ] = a.nb[ 3 ];
return res;
}
Tensor MlContext::flashAttention( const Tensor& q, const Tensor& k, const Tensor& v, bool masked )
{
if( !canMulMat( k, q ) )
throw E_INVALIDARG;
if constexpr( enableInexactOptimizations )
{
if( !masked )
{
profiler.setNextTag( "flashAttn.1" );
Tensor tmp = mulMat( k, q );
profiler.setNextTag( "flashAttention" );
const float tempScale = (float)( 1.0 / sqrt( (double)(int)q.ne[ 0 ] ) );
softMax( tmp, tempScale );
profiler.setNextTag( "flashAttn.2" );
return mulMat( v, tmp );
}
}
Tensor res = createTensor( eDataType::FP32, q.ne );
flashAttention( q, k, v, res, masked );
#if 0
Tensor tmpMat = mulMat( k, q );
float scale = (float)( 1.0 / sqrt( (double)(int)q.ne[ 0 ] ) );
softMax( tmpMat, scale );
Tensor testRes = mulMat( v, tmpMat );
computeDiff( res, testRes ).print( "flashAttention mulmat" );
#endif
return res;
}
Tensor MlContext::copy( const Tensor& a, eDataType type, std::initializer_list<uint32_t> size )
{
const size_t dims = size.size();
if( 0 == dims || dims > 4 )
throw E_BOUNDS;
size_t nRequested = 1;
for( size_t i = 0; i < dims; i++ )
{
uint32_t n = size.begin()[ i ];
nRequested *= n;
}
if( nRequested != a.countElements() )
throw E_INVALIDARG;
const eDataType st = a.getType();
Tensor res;
if( a.isContinuous() && st == type )
{
// Same type, and it's dense - no need to call any compute shaders, equal to reshape
res = a;
for( size_t i = 0; i < dims; i++ )
res.ne[ i ] = size.begin()[ i ];;
for( size_t i = dims; i < 4; i++ )
res.ne[ i ] = 1;
res.setDenseStrides();
}
else
{
// Either converting non-continuous to continuous, or converting types
res = createTensor( type, size );
copyImpl( a, res, st == eDataType::FP32 && type == eDataType::FP16 );
}
return res;
}
void MlContext::copyInPlace( Tensor& dest, const Tensor& a, eDataType type, std::initializer_list<uint32_t> size )
{
assert( type == dest.getType() );
const size_t dims = size.size();
if( 0 == dims || dims > 4 )
throw E_BOUNDS;
size_t nRequested = 1;
for( size_t i = 0; i < dims; i++ )
{
uint32_t n = size.begin()[ i ];
nRequested *= n;
}
if( nRequested != a.countElements() || nRequested != dest.countElements() )
throw E_INVALIDARG;
// Reshape the destination
for( size_t i = 0; i < dims; i++ )
dest.ne[ i ] = size.begin()[ i ];
for( size_t i = dims; i < 4; i++ )
dest.ne[ i ] = 1;
dest.setDenseStrides();
// Call the shader
const eDataType st = a.getType();
copyImpl( a, dest, st == eDataType::FP32 && type == eDataType::FP16 );
}
| 7,158
|
C++
|
.cpp
| 249
| 26.485944
| 125
| 0.640256
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,598
|
ConstantBuffer.cpp
|
Const-me_Whisper/Whisper/ML/ConstantBuffer.cpp
|
#include "stdafx.h"
#include "ConstantBuffer.h"
#include "../D3D/MappedResource.h"
using namespace DirectCompute;
HRESULT ConstantBuffer::create()
{
if( nullptr == buffer )
{
CD3D11_BUFFER_DESC desc{ 16 * 3 * 2, D3D11_BIND_CONSTANT_BUFFER, D3D11_USAGE_DYNAMIC, D3D11_CPU_ACCESS_WRITE };
return device()->CreateBuffer( &desc, nullptr, &buffer );
}
return HRESULT_FROM_WIN32( ERROR_ALREADY_INITIALIZED );
}
namespace
{
__forceinline void copy32( __m128i* rdi, const TensorShape& ts )
{
_mm_storeu_si128( rdi, ts.sizeVec() );
_mm_storeu_si128( rdi + 1, ts.stridesVec() );
}
}
HRESULT ConstantBuffer::update( const TensorShape& t0 )
{
MappedResource mapped;
CHECK( mapped.map( buffer, false ) );
__m128i* const rdi = ( __m128i* )mapped.data();
copy32( rdi, t0 );
return S_OK;
}
HRESULT ConstantBuffer::update( const TensorShape& t0, const TensorShape& t1 )
{
MappedResource mapped;
CHECK( mapped.map( buffer, false ) );
__m128i* const rdi = ( __m128i* )mapped.data();
copy32( rdi, t0 );
copy32( rdi + 2, t1 );
return S_OK;
}
HRESULT ConstantBuffer::update( const TensorShape& t0, const TensorShape& t1, const TensorShape& t2 )
{
MappedResource mapped;
CHECK( mapped.map( buffer, false ) );
__m128i* const rdi = ( __m128i* )mapped.data();
copy32( rdi, t0 );
copy32( rdi + 2, t1 );
copy32( rdi + 4, t2 );
return S_OK;
}
void ConstantBuffer::bind() const
{
ID3D11Buffer* p = buffer;
assert( nullptr != p );
context()->CSSetConstantBuffers( 0, 1, &p );
}
| 1,491
|
C++
|
.cpp
| 54
| 25.796296
| 113
| 0.701889
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,599
|
TensorEx.cpp
|
Const-me_Whisper/Whisper/ML/TensorEx.cpp
|
#include "stdafx.h"
#include "TensorEx.h"
#include "../D3D/createBuffer.h"
#include "../source/ggml.h"
#include "../D3D/MappedResource.h"
using namespace DirectCompute;
HRESULT TensorEx::create( const ggml_tensor& ggml, eBufferUse usage, bool uploadData )
{
TensorGpuViews::clear();
buffer = nullptr;
stagingBuffer = nullptr;
CHECK( TensorShape::create( ggml ) );
const ggml_type dataType = ggml.type;
const uint32_t cbElement = (uint32_t)ggml_type_size( dataType );
const size_t totalBytes = ggml_nbytes( &ggml );
if( totalBytes > INT_MAX )
return DISP_E_OVERFLOW;
const uint32_t countElements = (uint32_t)( totalBytes / cbElement );
{
const void* const rsi = uploadData ? ggml.data : nullptr;
ID3D11Buffer** ppStagingBuffer = ( usage == eBufferUse::ReadWriteDownload ) ? &stagingBuffer : nullptr;
CHECK( createBuffer( usage, totalBytes, &buffer, rsi, ppStagingBuffer ) );
}
DXGI_FORMAT format;
switch( dataType )
{
case GGML_TYPE_F16:
format = DXGI_FORMAT_R16_FLOAT;
break;
case GGML_TYPE_F32:
format = DXGI_FORMAT_R32_FLOAT;
break;
default:
return E_NOTIMPL;
}
const bool makeUav = usage == eBufferUse::ReadWrite || usage == eBufferUse::ReadWriteDownload;
return TensorGpuViews::create( buffer, format, totalBytes / cbElement, makeUav );
}
HRESULT TensorEx::create( eDataType type, eBufferUse usage, const std::array<uint32_t, 4>& sizeElements )
{
TensorGpuViews::clear();
buffer = nullptr;
stagingBuffer = nullptr;
std::initializer_list<uint32_t> il( sizeElements.data(), sizeElements.data() + 4 );
ID3D11Buffer** ppStaging = ( usage == eBufferUse::ReadWriteDownload ) ? &stagingBuffer : nullptr;
return Tensor::create( type, il, usage, buffer, nullptr, ppStaging );
}
HRESULT TensorEx::getViewSize( uint32_t& cbElement, uint32_t& countElements ) const
{
ID3D11ShaderResourceView* const srv = *this;
if( nullptr == srv )
return OLE_E_BLANK;
D3D11_SHADER_RESOURCE_VIEW_DESC viewDesc;
srv->GetDesc( &viewDesc );
cbElement = dxgiSizeof( viewDesc.Format );
assert( viewDesc.ViewDimension == D3D_SRV_DIMENSION_BUFFER );
assert( viewDesc.Buffer.FirstElement == 0 );
countElements = viewDesc.Buffer.NumElements;
return S_OK;
}
HRESULT TensorEx::download( void* rdi, size_t cb ) const
{
if( nullptr == stagingBuffer )
return HRESULT_FROM_WIN32( ERROR_GPIO_OPERATION_DENIED ); // The requested operation is not supported for the specified handle.
ID3D11DeviceContext* const ctx = context();
ctx->CopyResource( stagingBuffer, buffer );
MappedResource mapped;
CHECK( mapped.map( stagingBuffer, true ) );
memcpy( rdi, mapped.data(), cb );
return S_OK;
}
HRESULT TensorEx::download( void* rdi ) const
{
uint32_t cbElement, numElements;
CHECK( getViewSize( cbElement, numElements ) );
size_t cb = (size_t)cbElement * numElements;
return download( rdi, cb );
}
| 2,836
|
C++
|
.cpp
| 78
| 34.25641
| 129
| 0.74708
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,600
|
testUtils.cpp
|
Const-me_Whisper/Whisper/ML/testUtils.cpp
|
#include "stdafx.h"
#include "testUtils.h"
#include <immintrin.h>
#include <atlfile.h>
#include <atlpath.h>
namespace
{
using DirectCompute::sTensorDiff;
__forceinline __m256 load( const float* rsi )
{
return _mm256_loadu_ps( rsi );
}
__forceinline __m256 load( const uint16_t* rsi )
{
const __m128i iv = _mm_load_si128( ( const __m128i* )rsi );
return _mm256_cvtph_ps( iv );
}
__forceinline void loadPartial( const uint16_t* x, const uint16_t* y, size_t count, __m256& fx, __m256& fy )
{
__m128i ix, iy;
switch( count )
{
case 1: // load 2 bytes
ix = _mm_cvtsi32_si128( *x );
iy = _mm_cvtsi32_si128( *y );
break;
case 2: // load 4 bytes
ix = _mm_cvtsi32_si128( *(const int*)x );
iy = _mm_cvtsi32_si128( *(const int*)y );
break;
case 3: // load 6 bytes
ix = _mm_cvtsi32_si128( *(const int*)x );
iy = _mm_cvtsi32_si128( *(const int*)y );
ix = _mm_insert_epi16( ix, x[ 2 ], 2 );
iy = _mm_insert_epi16( iy, y[ 2 ], 2 );
break;
case 4: // load 8 bytes
ix = _mm_cvtsi64_si128( *(const int64_t*)x );
iy = _mm_cvtsi64_si128( *(const int64_t*)y );
break;
case 5: // load 10 bytes
ix = _mm_cvtsi64_si128( *(const int64_t*)x );
iy = _mm_cvtsi64_si128( *(const int64_t*)y );
ix = _mm_insert_epi16( ix, x[ 4 ], 4 );
iy = _mm_insert_epi16( iy, y[ 4 ], 4 );
break;
case 6: // load 12 bytes
ix = _mm_cvtsi64_si128( *(const int64_t*)x );
iy = _mm_cvtsi64_si128( *(const int64_t*)y );
ix = _mm_insert_epi32( ix, *(const int*)( x + 4 ), 2 );
iy = _mm_insert_epi32( iy, *(const int*)( y + 4 ), 2 );
break;
case 7: // load 14 bytes
ix = _mm_cvtsi64_si128( *(const int64_t*)x );
iy = _mm_cvtsi64_si128( *(const int64_t*)y );
ix = _mm_insert_epi32( ix, *(const int*)( x + 4 ), 2 );
iy = _mm_insert_epi32( iy, *(const int*)( y + 4 ), 2 );
ix = _mm_insert_epi16( ix, x[ 6 ], 6 );
iy = _mm_insert_epi16( iy, y[ 6 ], 6 );
break;
default:
fx = fy = _mm256_setzero_ps();
return;
}
fx = _mm256_cvtph_ps( ix );
fy = _mm256_cvtph_ps( iy );
}
inline __m128 loadFloat2( const float* rsi )
{
return _mm_castpd_ps( _mm_load_sd( (const double*)rsi ) );
}
inline __m128 loadFloat3( const float* rsi )
{
__m128 f = loadFloat2( rsi );
f = _mm_insert_ps( f, _mm_load_ss( rsi + 2 ), 0x20 );
return f;
}
__forceinline void loadPartial( const float* x, const float* y, size_t count, __m256& fx, __m256& fy )
{
__m128 low1, high1;
__m128 low2, high2;
high1 = high2 = _mm_setzero_ps();
switch( count )
{
case 1:
low1 = _mm_load_ss( x );
low2 = _mm_load_ss( y );
break;
case 2:
low1 = loadFloat2( x );
low2 = loadFloat2( y );
break;
case 3:
low1 = loadFloat3( x );
low2 = loadFloat3( y );
break;
case 4:
low1 = _mm_loadu_ps( x );
low2 = _mm_loadu_ps( y );
break;
case 5:
low1 = _mm_loadu_ps( x );
low2 = _mm_loadu_ps( y );
high1 = _mm_load_ss( x + 4 );
high2 = _mm_load_ss( y + 4 );
break;
case 6:
low1 = _mm_loadu_ps( x );
low2 = _mm_loadu_ps( y );
high1 = loadFloat2( x + 4 );
high2 = loadFloat2( y + 4 );
break;
case 7: // load 14 bytes
low1 = _mm_loadu_ps( x );
low2 = _mm_loadu_ps( y );
high1 = loadFloat3( x + 4 );
high2 = loadFloat3( y + 4 );
break;
default:
fx = fy = _mm256_setzero_ps();
return;
}
fx = _mm256_setr_m128( low1, high1 );
fy = _mm256_setr_m128( low2, high2 );
}
__forceinline float horizontalMaximum( __m256 v )
{
__m128 s = _mm256_extractf128_ps( v, 1 );
s = _mm_max_ps( s, _mm256_castps256_ps128( v ) );
s = _mm_max_ps( s, _mm_movehl_ps( s, s ) );
s = _mm_max_ss( s, _mm_movehdup_ps( s ) );
return _mm_cvtss_f32( s );
}
__forceinline double horizontalSum( __m256 v )
{
__m256d d = _mm256_cvtps_pd( _mm256_extractf128_ps( v, 1 ) );
d = _mm256_add_pd( d, _mm256_cvtps_pd( _mm256_castps256_ps128( v ) ) );
__m128d s = _mm256_extractf128_pd( d, 1 );
s = _mm_add_pd( s, _mm256_castpd256_pd128( d ) );
s = _mm_add_sd( s, _mm_unpackhi_pd( s, s ) );
return _mm_cvtsd_f64( s );
}
__m256 maskInfNan( __m256 diff, __m256 a, __m256 b )
{
__m256i ai = _mm256_castps_si256( a );
__m256i bi = _mm256_castps_si256( b );
__m256i eqi = _mm256_cmpeq_epi32( ai, bi );
__m256 eq = _mm256_castsi256_ps( eqi );
return _mm256_andnot_ps( eq, diff );
}
class DiffAcc
{
__m256 maxAbs = _mm256_setzero_ps();
__m256 sumSquares = _mm256_setzero_ps();
public:
__forceinline void add( __m256 a, __m256 b )
{
const __m256 neg0 = _mm256_set1_ps( -0.0f );
__m256 diff = _mm256_sub_ps( b, a );
diff = maskInfNan( diff, a, b );
sumSquares = _mm256_fmadd_ps( diff, diff, sumSquares );
const __m256 absDiff = _mm256_andnot_ps( neg0, diff );
maxAbs = _mm256_max_ps( maxAbs, absDiff );
}
__forceinline sTensorDiff reduce( size_t count )
{
sTensorDiff res;
res.maxAbsDiff = horizontalMaximum( maxAbs );
res.avgDiffSquared = (float)( horizontalSum( sumSquares ) / (double)(int64_t)count );
res.length = count;
return res;
}
};
template<class E>
static sTensorDiff __declspec( noinline ) diffVectors( const E* a, const E* b, size_t length )
{
// const E* const aEnd = a + length;
const E* const aEndAligned = a + ( length / 8 ) * 8;
const size_t remainder = length % 8;
DiffAcc acc;
for( ; a < aEndAligned; a += 8, b += 8 )
acc.add( load( a ), load( b ) );
if( remainder != 0 )
{
__m256 va, vb;
loadPartial( a, b, remainder, va, vb );
acc.add( va, vb );
}
return acc.reduce( length );
}
}
sTensorDiff DirectCompute::computeDiff( const float* a, const float* b, size_t length )
{
return diffVectors( a, b, length );
}
sTensorDiff DirectCompute::computeDiff( const uint16_t* a, const uint16_t* b, size_t length )
{
return diffVectors( a, b, length );
}
void DirectCompute::sTensorDiff::print( const char* what ) const
{
logDebug( u8"%s: length %zu, maxAbsDiff = %g, avgDiffSquared = %g", what, length, maxAbsDiff, avgDiffSquared );
}
void DirectCompute::sTensorDiff::print() const
{
logDebug( u8"%zu elements, maxAbsDiff = %g, avgDiffSquared = %g", length, maxAbsDiff, avgDiffSquared );
}
HRESULT DirectCompute::dbgWriteBinaryFile( LPCTSTR fileName, const void* rsi, size_t cb )
{
CPath path;
path.m_strPath = LR"(C:\Temp\2remove\Whisper)";
path.Append( fileName );
CAtlFile file;
CHECK( file.Create( path, GENERIC_WRITE, 0, CREATE_ALWAYS ) );
CHECK( file.Write( rsi, (DWORD)cb ) );
CHECK( file.Flush() );
return S_OK;
}
#include "Tensor.h"
sTensorDiff DirectCompute::computeDiff( const Tensor& a, const Tensor& b )
{
assert( isSameShapeAndLayout( a, b ) );
const eDataType dt = a.getType();
assert( dt == b.getType() );
switch( dt )
{
case eDataType::FP32:
{
std::vector<float> v1, v2;
a.download( v1 );
b.download( v2 );
assert( v1.size() == v2.size() );
#if 0
const size_t firstZero = std::find( v2.begin(), v2.end(), 0.0f ) - v2.begin();
std::vector<float> delta;
delta.resize( v1.size() );
for( size_t i = 0; i < v1.size(); i++ )
delta[ i ] = std::abs( v1[ i ] - v2[ i ] );
const size_t maxIndex = std::max_element( delta.begin(), delta.end() ) - delta.begin();
#endif
return computeDiff( v1.data(), v2.data(), v1.size() );
}
}
throw E_NOTIMPL;
}
using namespace DirectCompute;
void PrintUniqueTensorSizes::printImpl( const std::array<uint32_t, 8>& a )
{
auto pair = set.emplace( a );
if( !pair.second )
return; // was already there
const __m128i rhs = _mm_loadu_si128( ( const __m128i* ) ( &a[ 4 ] ) );
if( _mm_testz_si128( rhs, rhs ) )
{
logDebug( u8"%s: [ %i, %i, %i, %i ]", what,
a[ 0 ], a[ 1 ], a[ 2 ], a[ 3 ] );
}
else
{
logDebug( u8"%s: [ %i, %i, %i, %i ], [ %i, %i, %i, %i ]", what,
a[ 0 ], a[ 1 ], a[ 2 ], a[ 3 ], a[ 4 ], a[ 5 ], a[ 6 ], a[ 7 ] );
}
}
void PrintUniqueTensorSizes::print( const Tensor& lhs, const Tensor& rhs )
{
std::array<uint32_t, 8> arr;
__m128i* const rdi = ( __m128i* )arr.data();
_mm_storeu_si128( rdi, lhs.sizeVec() );
_mm_storeu_si128( rdi + 1, rhs.sizeVec() );
printImpl( arr );
}
void PrintUniqueTensorSizes::print( const int* lhs, const int* rhs )
{
std::array<uint32_t, 8> arr;
__m128i* const rdi = ( __m128i* )arr.data();
_mm_storeu_si128( rdi, load16( lhs ) );
_mm_storeu_si128( rdi + 1, load16( rhs ) );
printImpl( arr );
}
void PrintUniqueTensorSizes::print( const Tensor& lhs )
{
std::array<uint32_t, 8> arr;
__m128i* const rdi = ( __m128i* )arr.data();
_mm_storeu_si128( rdi, lhs.sizeVec() );
_mm_storeu_si128( rdi + 1, _mm_setzero_si128() );
printImpl( arr );
}
#include "testUtilsC.h"
void printUniqueTensorSize( const char* name, const int* lhs, const int* rhs )
{
using TS = DirectCompute::PrintUniqueTensorSizes;
static std::unordered_map<std::string, TS> map;
TS& ts = map.try_emplace( name, name ).first->second;
ts.print( lhs, rhs );
}
| 8,858
|
C++
|
.cpp
| 295
| 27.223729
| 112
| 0.61246
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,601
|
LookupTablesData.cpp
|
Const-me_Whisper/Whisper/ML/LookupTablesData.cpp
|
#include "stdafx.h"
#include "LookupTablesData.h"
#include <immintrin.h>
#include <atlfile.h>
#include <Utils/LZ4/lz4.h>
using namespace DirectCompute;
namespace
{
inline float fp32( uint16_t f16 )
{
__m128i i = _mm_cvtsi32_si128( f16 );
__m128 f = _mm_cvtph_ps( i );
return _mm_cvtss_f32( f );
}
inline uint16_t fp16( float fp32 )
{
__m128 f = _mm_set_ss( fp32 );
__m128i i = _mm_cvtps_ph( f, 0 );
uint32_t res = (uint32_t)_mm_cvtsi128_si32( i );
return (uint16_t)res;
}
constexpr double GELU_COEF_A = 0.044715;
constexpr double SQRT_2_OVER_PI = 0.79788456080286535587989211986876;
inline float computeGelu( float x )
{
return (float)( 0.5 * x * ( 1.0 + tanh( SQRT_2_OVER_PI * x * ( 1.0 + GELU_COEF_A * x * x ) ) ) );
}
}
#ifndef __AVX__
namespace
{
// Compressed data of these two lookup tables
#include "LookupTablesData.inl"
}
#endif
LookupTablesData::LookupTablesData()
{
#ifdef __AVX__
// When compiling for AVX, we assume the CPU also has F16C
// https://en.wikipedia.org/wiki/F16C
for( int i = 0; i < 0x10000; i++ )
{
const float f = fp32( i );
gelu[ i ] = fp16( computeGelu( f ) );
exponent[ i ] = fp16( (float)exp( f ) );
}
#else
// When compiling without AVX, use the data compiled into the DLL
constexpr int cbThis = (int)( sizeof( *this ) );
const int lz4Status = LZ4_decompress_safe( (const char*)s_tableData.data(), (char*)this, (int)s_tableData.size(), cbThis );
if( lz4Status != cbThis )
{
logError( u8"LZ4_decompress_safe failed with status %i", lz4Status );
throw PLA_E_CABAPI_FAILURE;
}
#endif
#if false
// Temporary code to save the content of these lookup tables
CAtlFile tempFile;
tempFile.Create( LR"(C:\Temp\2remove\Whisper\tables.bin)", GENERIC_WRITE, 0, CREATE_ALWAYS );
tempFile.Write( this, (DWORD)sizeof( *this ) );
#endif
}
| 1,814
|
C++
|
.cpp
| 63
| 26.857143
| 124
| 0.684814
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,602
|
Device.cpp
|
Const-me_Whisper/Whisper/ML/Device.cpp
|
#include "stdafx.h"
#include "Device.h"
#include "../D3D/createDevice.h"
#include "../D3D/shaders.h"
#include "../D3D/device.h"
#include "mlUtils.h"
#include "../D3D/MappedResource.h"
using namespace DirectCompute;
HRESULT Device::create( uint32_t flags, const std::wstring& adapter )
{
CHECK( validateFlags( flags ) );
CHECK( createDevice( adapter, &device, &context ) );
CHECK( queryDeviceInfo( gpuInfo, device, flags ) );
CHECK( createComputeShaders( shaders ) );
CHECK( lookupTables.create() );
{
CD3D11_BUFFER_DESC desc{ 16, D3D11_BIND_CONSTANT_BUFFER, D3D11_USAGE_DYNAMIC, D3D11_CPU_ACCESS_WRITE };
CHECK( device->CreateBuffer( &desc, nullptr, &smallCb ) );
}
#if DBG_TEST_NAN
CHECK( nanTestBuffers.create() );
#endif
return S_OK;
}
HRESULT Device::createClone( const Device& source )
{
CHECK( cloneDevice( source.device, &device, &context ) );
gpuInfo = source.gpuInfo;
CHECK( createComputeShaders( shaders ) );
CHECK( lookupTables.createClone( source.lookupTables ) );
{
CD3D11_BUFFER_DESC desc{ 16, D3D11_BIND_CONSTANT_BUFFER, D3D11_USAGE_DYNAMIC, D3D11_CPU_ACCESS_WRITE };
CHECK( device->CreateBuffer( &desc, nullptr, &smallCb ) );
}
#if DBG_TEST_NAN
CHECK( nanTestBuffers.create() );
#endif
return S_OK;
}
void Device::destroy()
{
#if DBG_TEST_NAN
nanTestBuffers.destroy();
#endif
smallCb = nullptr;
shaders.clear();
context = nullptr;
device = nullptr;
}
__m128i __declspec( noinline ) DirectCompute::bufferMemoryUsage( ID3D11Buffer* buffer )
{
if( nullptr != buffer )
{
D3D11_BUFFER_DESC desc;
buffer->GetDesc( &desc );
if( desc.Usage != D3D11_USAGE_STAGING )
return setHigh_size( desc.ByteWidth );
else
return setLow_size( desc.ByteWidth );
}
return _mm_setzero_si128();
}
__m128i __declspec( noinline ) DirectCompute::resourceMemoryUsage( ID3D11ShaderResourceView* srv )
{
if( nullptr != srv )
{
CComPtr<ID3D11Resource> res;
srv->GetResource( &res );
CComPtr<ID3D11Buffer> buff;
if( SUCCEEDED( res.QueryInterface( &buff ) ) )
return bufferMemoryUsage( buff );
assert( false ); // We don't use textures in this project
}
return _mm_setzero_si128();
}
static thread_local const Device* ts_device = nullptr;
ID3D11Device* DirectCompute::device()
{
const Device* dev = ts_device;
if( nullptr != dev )
return dev->device;
throw OLE_E_BLANK;
}
ID3D11DeviceContext* DirectCompute::context()
{
const Device* dev = ts_device;
if( nullptr != dev )
return dev->context;
throw OLE_E_BLANK;
}
const sGpuInfo& DirectCompute::gpuInfo()
{
const Device* dev = ts_device;
if( nullptr != dev )
return dev->gpuInfo;
throw OLE_E_BLANK;
}
const LookupTables& DirectCompute::lookupTables()
{
const Device* dev = ts_device;
if( nullptr != dev )
return dev->lookupTables;
throw OLE_E_BLANK;
}
void DirectCompute::bindShader( eComputeShader shader )
{
const Device* dev = ts_device;
if( nullptr != dev )
{
ID3D11ComputeShader* cs = dev->shaders[ (uint16_t)shader ];
dev->context->CSSetShader( cs, nullptr, 0 );
return;
}
throw OLE_E_BLANK;
}
ID3D11Buffer* __vectorcall DirectCompute::updateSmallCb( __m128i cbData )
{
const Device* dev = ts_device;
if( nullptr != dev && nullptr != dev->smallCb )
{
ID3D11Buffer* cb = dev->smallCb;
MappedResource mapped;
check( mapped.map( cb, false ) );
store16( mapped.data(), cbData );
return cb;
}
throw OLE_E_BLANK;
}
#if DBG_TEST_NAN
const DbgNanTest& DirectCompute::getNanTestBuffers()
{
const Device* dev = ts_device;
if( nullptr != dev )
return dev->nanTestBuffers;
throw OLE_E_BLANK;
}
#endif
Device::ThreadSetupRaii::ThreadSetupRaii( const Device* dev )
{
assert( ts_device == nullptr );
ts_device = dev;
setup = true;
}
Device::ThreadSetupRaii::~ThreadSetupRaii()
{
if( setup )
{
assert( ts_device != nullptr );
ts_device = nullptr;
}
}
| 3,823
|
C++
|
.cpp
| 151
| 23.317881
| 105
| 0.725802
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,603
|
TensorsArena.cpp
|
Const-me_Whisper/Whisper/ML/TensorsArena.cpp
|
#include "stdafx.h"
#include "TensorsArena.h"
#include "../D3D/createBuffer.h"
#include "mlUtils.h"
static inline uint32_t roundUpPower2( uint32_t x )
{
// std::bit_ceil from C++/20 standard library implements runtime dispatch, uses LZCNT when AVX2 is available, otherwise BSR
// That's not what we want.
// BSR is only slightly slower than LZCNT: same speed on Intel, on AMD it's 3 versus 1 cycles.
// defaultNewCapacity function is only called occasionally, that branch is therefore unpredictable.
assert( x > 1 );
unsigned long idx;
_BitScanReverse( &idx, x - 1 );
return 2u << idx;
}
uint32_t DirectCompute::defaultNewCapacity( uint32_t current, uint32_t requested )
{
// Implement some reasonable tactics to compute capacity of these buffers
constexpr uint32_t minAlloc = 1024; // 1k elements = 4kb of VRAM for FP32 tensors
constexpr uint32_t allocGranularity = 1u << 14; // 16k elements = 64kb of VRAM for FP32 tensors
if( requested > minAlloc )
{
const uint32_t roundedUpPowerOf2 = roundUpPower2( requested );
constexpr uint32_t lowMask = allocGranularity - 1;
constexpr uint32_t highMask = ~lowMask;
const uint32_t roundedUpGranularity = ( requested + lowMask ) & highMask;
const uint32_t res = std::min( roundedUpPowerOf2, roundedUpGranularity );
assert( res >= requested );
return res;
}
return minAlloc;
}
using namespace DirectCompute;
TensorsArena::ArenaImpl::ArenaImpl( eDataType dataType, const sArenaConfig& config ) :
type( dataType ),
pfnNewCap( nullptr != config.pfnCapInner ? config.pfnCapInner : &defaultNewCapacity )
{
pool.reserve( config.initialCapOuter );
}
Tensor PooledTensor::tensor( eDataType type, const std::array<uint32_t, 4>& ne, pfnNewCapacity pfnNewCap )
{
const uint32_t p1 = ne[ 0 ] * ne[ 1 ];
const uint32_t p2 = ne[ 2 ] * ne[ 3 ];
const uint32_t count = p1 * p2;
if( count > capacity )
{
views.clear();
const uint32_t newCap = pfnNewCap( capacity, count );
assert( newCap >= count );
const size_t cb = elementSize( type ) * newCap;
CComPtr<ID3D11Buffer> buffer;
check( createBuffer( eBufferUse::ReadWrite, cb, &buffer, nullptr, nullptr ) );
check( views.create( buffer, viewFormat( type ), newCap, true ) );
capacity = newCap;
}
TensorShape shape;
shape.ne = ne;
shape.setDenseStrides();
Tensor res{ shape, views };
res.dbgSetType( type );
#if DBG_TEST_NAN
fillTensorWithNaN( res );
#endif
return res;
}
Tensor TensorsArena::ArenaImpl::tensor( const std::array<uint32_t, 4>& ne )
{
PooledTensor* res;
if( index >= pool.size() )
{
assert( index == pool.size() );
res = &pool.emplace_back();
}
else
res = &pool[ index ];
index++;
return res->tensor( type, ne, pfnNewCap );
}
TensorsArena::TensorsArena( const sArenaConfigs& configs ) :
arenas{ ArenaImpl{ eDataType::FP16, configs.fp16 }, ArenaImpl{ eDataType::FP32, configs.fp32 } }
{
static_assert( 0 == (uint8_t)eDataType::FP16 );
static_assert( 1 == (uint8_t)eDataType::FP32 );
}
Tensor TensorsArena::tensor( eDataType type, const std::array<uint32_t, 4>& ne )
{
ArenaImpl& arena = arenas[ (uint8_t)type ];
return arena.tensor( ne );
}
void TensorsArena::reset()
{
for( ArenaImpl& a : arenas )
a.reset();
}
void TensorsArena::clear()
{
for( ArenaImpl& a : arenas )
a.clear();
}
__m128i TensorsArena::ArenaImpl::getMemoryUse() const
{
const size_t cbElement = elementSize( type );
size_t countElts = 0;
for( const auto& t : pool )
countElts += t.getCapacity();
const size_t cbVideo = cbElement * countElts;
const size_t cbSystem = vectorMemoryUse( pool );
return setr_size( cbSystem, cbVideo );
}
__m128i TensorsArena::getMemoryUse() const
{
__m128i res = _mm_setzero_si128();
for( const auto& a : arenas )
res = _mm_add_epi64( res, a.getMemoryUse() );
return res;
}
HRESULT PooledTensor::zeroMemory()
{
if( 0 == capacity )
return S_FALSE;
try
{
DirectCompute::zeroMemory( views, capacity );
return S_OK;
}
catch( HRESULT hr )
{
return hr;
}
}
HRESULT TensorsArena::ArenaImpl::zeroMemory()
{
for( PooledTensor& e : pool )
CHECK( e.zeroMemory() );
return S_OK;
}
HRESULT TensorsArena::zeroMemory()
{
for( ArenaImpl& e : arenas )
CHECK( e.zeroMemory() );
return S_OK;
}
| 4,204
|
C++
|
.cpp
| 142
| 27.570423
| 124
| 0.721259
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,604
|
MlContext.dbg.cpp
|
Const-me_Whisper/Whisper/ML/MlContext.dbg.cpp
|
#include "stdafx.h"
#include "MlContext.h"
#include "../source/ggml.h"
#include "testUtils.h"
using namespace DirectCompute;
#define E_TYPE HRESULT_FROM_WIN32( ERROR_DATATYPE_MISMATCH )
static void dbgPrintSizeDiff( const char* what, __m128i ref, __m128i gpu )
{
std::array<int, 8> a;
_mm_storeu_si128( ( __m128i* ) & a[ 0 ], ref );
_mm_storeu_si128( ( __m128i* ) & a[ 4 ], gpu );
printf( "%s; reference [ %i, %i, %i, %i ], GPGPU [ %i, %i, %i, %i ]\n",
what,
a[ 0 ], a[ 1 ], a[ 2 ], a[ 3 ],
a[ 4 ], a[ 5 ], a[ 6 ], a[ 7 ] );
}
void MlContext::dbgPrintDifference( const ggml_tensor* reference, const Tensor& gpu, const char* what, bool trapToDebugger )
{
sTensorDiff diff;
const __m128i gpuSize = gpu.sizeVec();
const __m128i gpuStrides = gpu.stridesVec();
__m128i expectedStrides;
if( reference->type == GGML_TYPE_F32 )
{
if( gpu.getType() != eDataType::FP32 )
throw E_TYPE;
expectedStrides = _mm_slli_epi32( gpuStrides, 2 );
std::vector<float> v;
gpu.download( v );
diff = computeDiff( v.data(), (const float*)reference->data, v.size() );
}
else if( reference->type == GGML_TYPE_F16 )
{
if( gpu.getType() != eDataType::FP16 )
throw E_TYPE;
expectedStrides = _mm_slli_epi32( gpuStrides, 1 );
std::vector<uint16_t> v;
gpu.download( v );
diff = computeDiff( v.data(), (const uint16_t*)reference->data, v.size() );
}
else
throw E_NOTIMPL;
const __m128i ggmlSize = _mm_loadu_si128( ( const __m128i* ) & reference->ne[ 0 ] );
const __m128i ggmlStrides = _mm_loadu_si128( ( const __m128i* ) & reference->nb[ 0 ] );
if( !vectorEqual( gpuSize, ggmlSize ) )
dbgPrintSizeDiff( "dbgPrintDifference - size is different", ggmlSize, gpuSize );
// if( !vectorEqual( expectedStrides, ggmlStrides ) ) dbgPrintSizeDiff( "dbgPrintDifference - stride is different", ggmlStrides, expectedStrides );
diff.print( what );
if( trapToDebugger )
__debugbreak();
}
| 1,903
|
C++
|
.cpp
| 51
| 35.019608
| 148
| 0.663957
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,605
|
MlContext.cpp
|
Const-me_Whisper/Whisper/ML/MlContext.cpp
|
#include "stdafx.h"
#include "MlContext.h"
#include "../D3D/shaderNames.h"
#include "LookupTables.h"
#include "../D3D/shaders.h"
#include "../D3D/Binder.h"
#include "../D3D/MappedResource.h"
#include "../D3D/downloadBuffer.h"
#include "testUtils.h"
#include "reshapedMultiply.h"
using namespace DirectCompute;
// TODO: change this to a field, and set to false when the GPU doesn't support FP64 math
// Most notably, Intel has dropped the support recently:
// https://www.intel.com/content/www/us/en/developer/articles/guide/lp-api-developer-optimization-guide.html#inpage-nav-3-8-undefined
// "To improve power and performance", LOL
constexpr bool usePreciseComputeShaders = true;
MlContext::MlContext( Whisper::ProfileCollection& profileColl ) :
profiler( profileColl )
{
check( cb.create() );
check( profiler.create() );
}
void MlContext::bindShader( eComputeShader cs )
{
DirectCompute::bindShader( cs );
profiler.computeShader( cs );
}
void MlContext::mulMatDot( const Tensor& src0, const Tensor& src1, Tensor& res )
{
const auto& size1 = src1.ne;
if( 1 != size1[ 3 ] )
throw E_UNEXPECTED;
const size_t tempLength = size1[ 0 ] * size1[ 1 ] * size1[ 2 ] * size1[ 3 ];
const TensorGpuViews& tempBuffer = temp.fp16( tempLength );
cb.bind();
bindShader( eComputeShader::mulMatDotReshape );
cb.update( src1 );
Binder bind;
bind.bind( src1, tempBuffer );
context()->Dispatch( size1[ 1 ], size1[ 2 ], 1 );
bindShader( eComputeShader::mulMatDotMain );
cb.update( src0, src1, res );
bind.bind( src0, tempBuffer, res );
const auto& size0 = src0.ne;
// total rows in src0
const uint32_t nr = size0[ 1 ] * size0[ 2 ] * size0[ 3 ];
context()->Dispatch( size1[ 1 ], nr, 1 );
}
void MlContext::mulMatMad( const Tensor& a, const Tensor& b, Tensor& res )
{
// CaptureRaii renderDoc;
const uint32_t resultElts = res.countElements();
constexpr uint32_t nth = 4;
uint32_t fp16;
TensorGpuViews tempBuffer;
const eDataType dataType = a.getType();
if( dataType == eDataType::FP16 )
{
fp16 = TRUE;
tempBuffer = temp.fp16( resultElts * nth );
}
else if( dataType == eDataType::FP32 )
{
fp16 = FALSE;
tempBuffer = temp.fp32( resultElts * nth );
}
else
throw E_INVALIDARG;
TensorShape resultShape = res;
resultShape.nb = { fp16, resultElts, 0, 0 };
cb.update( a, b, resultShape );
bindShader( eComputeShader::mulMatMadMain );
cb.bind();
Binder bind;
bind.bind( { a, b }, { res, tempBuffer } );
context()->Dispatch( b.ne[ 1 ], b.ne[ 2 ], b.ne[ 3 ] );
}
void MlContext::mulMatTiled( const Tensor& a, const Tensor& b, Tensor& res )
{
cb.update( a, b, res );
cb.bind();
Binder bind;
bind.bind( a, b, res );
if( b.ne[ 1 ] == 1 )
{
if( b.ne[ 0 ] != 1 )
{
#if 0
static PrintUniqueTensorSizes printSize( "mulMatByRow" );
printSize.print( a, b );
#endif
// Tensor B is a single row, we have optimized compute shaders for that use case
// Even 2 of them, tiled and sequential. Select between these two shaders.
constexpr uint32_t minHeightToTile = 2;
if( a.ne[ 1 ] < minHeightToTile )
{
bindShader( eComputeShader::mulMatByRow );
context()->Dispatch( a.ne[ 1 ], a.ne[ 2 ], a.ne[ 3 ] );
}
else
{
bindShader( eComputeShader::mulMatByRowTiled );
constexpr uint32_t TILE_Y = 64;
const uint32_t groupsX = ( a.ne[ 1 ] + TILE_Y - 1 ) / TILE_Y;
context()->Dispatch( groupsX, a.ne[ 2 ], a.ne[ 3 ] );
}
}
else
{
// Tensor B is a single element: we have an optimized shader for that as well
bindShader( eComputeShader::mulMatByScalar );
context()->Dispatch( a.ne[ 2 ], a.ne[ 3 ], 1 );
}
}
else
{
// According to visual studio debugger, when the second argument of this method is a 2D matrix, the first argument is 2D as well.
// Assuming both arguments are 2D matrices.
// For optimal VRAM bandwidth utilization, we compute such matrix products in square tiles, a tile is 32x32 elements.
// Dispatching one thread group for each tile of the output matrix.
bindShader( eComputeShader::mulMatTiled );
// These compute shaders correctly handle partial tiles on the right and bottom edges of the output matrix, that's why rounding up
constexpr uint32_t TILE_SIZE = 32;
const uint32_t x = ( res.ne[ 0 ] + TILE_SIZE - 1 ) / TILE_SIZE;
const uint32_t y = ( res.ne[ 1 ] + TILE_SIZE - 1 ) / TILE_SIZE;
const uint32_t z = res.ne[ 2 ] * res.ne[ 3 ];
context()->Dispatch( x, y, z );
}
}
void MlContext::mulMat( const Tensor& src0, const Tensor& src1, Tensor& res )
{
const uint32_t nb00 = src0.nb[ 0 ];
const uint32_t nb01 = src0.nb[ 1 ];
if( nb01 >= nb00 )
mulMatDot( src0, src1, res );
else
mulMatMad( src0, src1, res );
}
namespace
{
// Must match the HLSL in flashAttention.hlsl
struct sFlashAttentionConstants
{
TensorShape q, k, v, res;
BOOL masked;
float scale;
uint32_t tempBufferStride;
uint32_t zzPadding;
};
struct sFlashAttnDispatchInfo
{
uint32_t tempStride;
uint32_t groupsCount;
};
sFlashAttnDispatchInfo makeFlashAttentionConstants( CComPtr<ID3D11Buffer>& buffer, const Tensor& q, const Tensor& k, const Tensor& v, Tensor& res, bool masked )
{
if( nullptr == buffer )
{
CD3D11_BUFFER_DESC desc{ sizeof( sFlashAttentionConstants ), D3D11_BIND_CONSTANT_BUFFER, D3D11_USAGE_DYNAMIC, D3D11_CPU_ACCESS_WRITE };
check( device()->CreateBuffer( &desc, nullptr, &buffer ) );
}
sFlashAttnDispatchInfo result;
sFlashAttentionConstants cb;
cb.q = q;
cb.k = k;
cb.v = v;
cb.res = res;
cb.masked = masked ? TRUE : FALSE;
const int neq0 = (int)cb.q.ne[ 0 ];
const int D = neq0;
cb.scale = (float)( 1.0 / sqrt( (double)(int)D ) );
const uint32_t nek1 = cb.k.ne[ 1 ];
constexpr uint32_t align = 32 / 4;
result.tempStride = ( ( nek1 + align - 1 ) / align ) * align;
cb.tempBufferStride = result.tempStride;
cb.zzPadding = 0;
result.groupsCount = cb.q.ne[ 1 ] * cb.q.ne[ 2 ] * cb.q.ne[ 3 ];
MappedResource mapped;
check( mapped.map( buffer, false ) );
memcpy( mapped.data(), &cb, sizeof( cb ) );
return result;
}
}
void MlContext::flashAttention( const Tensor& q, const Tensor& k, const Tensor& v, Tensor& res, bool masked )
{
sFlashAttnDispatchInfo di = makeFlashAttentionConstants( flashAttentionConstants, q, k, v, res, masked );
const uint32_t tempLength = di.tempStride * di.groupsCount;
const TensorGpuViews& tb = temp.fp32( tempLength );
csSetCB( flashAttentionConstants );
ID3D11DeviceContext* const ctx = context();
Binder bind;
bind.bind( { q, k, v, lookupTables().exponent()}, {res, tb});
if constexpr( usePreciseComputeShaders && !enableInexactOptimizations )
{
bindShader( eComputeShader::flashAttentionCompat1 );
ctx->Dispatch( di.groupsCount, 1, 1 );
bindShader( eComputeShader::flashAttentionCompat2 );
ctx->Dispatch( ( di.groupsCount + 31 ) / 32, 1, 1 );
bindShader( eComputeShader::flashAttentionCompat3 );
ctx->Dispatch( di.groupsCount, 1, 1 );
}
else
{
// This version is not too bad, e.g. maxAbsDiff = 2.7895e-05, avgDiffSquared = 1.61783e-14
// And probably much faster.
// But still, it does not deliver bitwise equality with the reference CPU version
bindShader( eComputeShader::flashAttention );
ctx->Dispatch( di.groupsCount, 1, 1 );
}
}
namespace
{
// Round up the number to be a multiple of 32
inline uint32_t roundUp32( uint32_t x )
{
return ( x + 31 ) & ( ~31u );
}
}
void MlContext::convolutionImpl( const Tensor& a, const Tensor& b, Tensor& res, bool is2 )
{
const uint32_t ne00 = a.ne[ 0 ];
const uint32_t ne01 = a.ne[ 1 ];
const uint32_t ne02 = a.ne[ 2 ];
const uint32_t ne10 = b.ne[ 0 ];
const uint32_t ne11 = b.ne[ 1 ];
const uint32_t nb00 = a.nb[ 0 ];
const uint32_t nb01 = a.nb[ 1 ];
const uint32_t nb02 = a.nb[ 2 ];
const uint32_t nb10 = b.nb[ 0 ];
const uint32_t nb11 = b.nb[ 1 ];
const uint32_t nb1 = res.nb[ 1 ];
const uint32_t ew0 = roundUp32( ne01 );
const uint32_t nk = ne00;
const uint32_t nh = nk / 2;
const uint32_t lenTemp1 = ne02 * ew0 * ne00;
const uint32_t lenTemp2 = ( ne10 + ne00 ) * ew0;
const TensorGpuViews& temp1 = temp.fp16( lenTemp1, true );
const TensorGpuViews& temp2 = temp.fp16_2( lenTemp2, true );
cb.bind();
bindShader( eComputeShader::convolutionPrep1 );
cb.update( a );
Binder bind;
bind.bind( a, temp1 );
context()->Dispatch( ne01, ne02, 1 );
bindShader( eComputeShader::convolutionPrep2 );
cb.update( a, b );
bind.bind( b, temp2 );
context()->Dispatch( ne11, 1, 1 );
cb.update( a, b, res );
bind.bind( temp1, temp2, res );
if( is2 )
{
if constexpr( enableInexactOptimizations )
{
constexpr uint32_t KERNEL = 3;
constexpr uint32_t TILE_Y = 8;
if( a.ne[ 0 ] == KERNEL )
{
const uint32_t x = ( ( ne10 / 2 ) + TILE_Y - 1 ) / TILE_Y;
bindShader( eComputeShader::convolutionMain2Fixed );
context()->Dispatch( x, ne02, 1 );
return;
}
}
bindShader( eComputeShader::convolutionMain2 );
context()->Dispatch( ne10 / 2, ne02, 1 );
}
else
{
bindShader( eComputeShader::convolutionMain );
context()->Dispatch( ne10, ne02, 1 );
}
#if 0
std::vector<uint16_t> tmp;
downloadBuffer( temp1, tmp );
dbgWriteBinaryFile( L"conv-gpu-arg1.bin", tmp.data(), lenTemp1 * 2 );
downloadBuffer( temp2, tmp );
dbgWriteBinaryFile( L"conv-gpu-arg2.bin", tmp.data(), lenTemp1 * 2 );
res.download( tempVector );
dbgWriteBinaryFile( L"conv-gpu-result.bin", tempVector.data(), tempVector.size() * 4 );
#endif
}
void MlContext::norm( const Tensor& a, Tensor& res )
{
const uint32_t ne01 = a.ne[ 1 ];
const uint32_t ne02 = a.ne[ 2 ];
const uint32_t ne03 = a.ne[ 3 ];
cb.bind();
cb.update( a, res );
Binder bind;
bind.bind( a, res );
if constexpr( usePreciseComputeShaders && !enableInexactOptimizations )
{
bindShader( eComputeShader::normCompat );
context()->Dispatch( ( ne01 + 31 ) / 32, ne02, ne03 );
}
else
{
constexpr uint32_t FIXED_ROW_SIZE = 1024;
eComputeShader cs = ( a.ne[ 0 ] == FIXED_ROW_SIZE ) ? eComputeShader::normFixed : eComputeShader::norm;
bindShader( cs );
context()->Dispatch( ne01, ne02, ne03 );
}
}
void MlContext::cwiseBinary( const Tensor& a, const Tensor& b, Tensor& res, eComputeShader cs )
{
assert( isSameShape( a, b ) );
assert( isSameShape( a, res ) );
bindShader( cs );
cb.bind();
check( cb.update( a, b, res ) );
Binder bind;
bind.bind( a, b, res );
uint32_t rows = a.countRows();
context()->Dispatch( rows, 1, 1 );
}
Tensor MlContext::add( const Tensor& a, const Tensor& b )
{
return cwiseBinary( a, b, eComputeShader::add );
}
void MlContext::addInPlace( Tensor& a, const Tensor& b )
{
if( !isSameShape( a, b ) )
throw E_INVALIDARG;
assert( a.getType() == eDataType::FP32 );
check( cb.update( a, b ) );
bindShader( eComputeShader::addInPlace );
cb.bind();
Binder bind;
bind.bind( b, a );
context()->Dispatch( a.ne[ 1 ], a.ne[ 2 ], a.ne[ 3 ] );
}
void MlContext::copyImpl( const Tensor& a, Tensor& res, bool downcastFp32 )
{
assert( res.isContinuous() );
const eComputeShader cs = a.isContinuous() ? eComputeShader::copyConvert : eComputeShader::copyTranspose;
bindShader( cs );
cb.bind();
// These two shaders don't need shape of the destination because dense, but they wants a boolean flag whether to implement rounding while downcasting
TensorShape dummyShape;
dummyShape.setZero();
dummyShape.ne[ 0 ] = downcastFp32 ? TRUE : FALSE;
check( cb.update( a, dummyShape ) );
Binder bind;
bind.bind( a, res );
context()->Dispatch( a.ne[ 1 ], a.ne[ 2 ], a.ne[ 3 ] );
}
namespace
{
uint32_t bitcast( float val )
{
__m128 f = _mm_set_ss( val );
__m128i i = _mm_castps_si128( f );
return (uint32_t)_mm_cvtsi128_si32( i );
}
}
void MlContext::scale( Tensor& a, float mul )
{
if( !a.isContinuous() )
throw E_INVALIDARG;
bindShader( eComputeShader::scaleInPlace );
cb.bind();
TensorShape dummyShape;
dummyShape.setZero();
dummyShape.ne[ 0 ] = bitcast( mul );
check( cb.update( a, dummyShape ) );
Binder bind;
bind.bind( a );
context()->Dispatch( a.countRows(), 1, 1 );
}
void MlContext::addRepeat( Tensor& a, const Tensor& b )
{
check( cb.update( a, b ) );
bindShader( eComputeShader::addRepeat );
cb.bind();
Binder bind;
bind.bind( b, a );
context()->Dispatch( a.ne[ 1 ], a.ne[ 2 ], a.ne[ 3 ] );
}
void MlContext::addRepeatScale( Tensor& a, const Tensor& b, float scale )
{
#if 0
addRepeat( a, b );
this->scale( a, scale );
return;
#endif
TensorShape dummyShape;
dummyShape.setZero();
dummyShape.ne[ 0 ] = bitcast( scale );
check( cb.update( a, b, dummyShape ) );
bindShader( eComputeShader::addRepeatScale );
cb.bind();
Binder bind;
bind.bind( b, a );
context()->Dispatch( a.ne[ 1 ], a.ne[ 2 ], a.ne[ 3 ] );
}
void MlContext::fmaRepeat( Tensor& a, const Tensor& mul, const Tensor& add )
{
eComputeShader cs;
if( isSameShapeAndLayout( mul, add ) )
{
cs = eComputeShader::fmaRepeat1;
check( cb.update( a, mul ) );
}
else
{
cs = eComputeShader::fmaRepeat2;
check( cb.update( a, mul, add ) );
}
bindShader( cs );
cb.bind();
Binder bind;
bind.bind( mul, add, a );
context()->Dispatch( a.ne[ 1 ], a.ne[ 2 ], a.ne[ 3 ] );
}
void MlContext::diagMaskInf( Tensor& a, uint32_t n_past )
{
if( !a.isContinuous() )
throw E_INVALIDARG;
bindShader( eComputeShader::diagMaskInf );
TensorShape dummyShape;
dummyShape.setZero();
dummyShape.ne[ 0 ] = n_past;
cb.bind();
check( cb.update( a, dummyShape ) );
Binder bind;
bind.bind( a );
const uint32_t n = a.countRows();
const uint32_t nr = a.ne[ 1 ];
const uint32_t nz = n / nr;
context()->Dispatch( nr, nz, 1 );
}
void MlContext::softMax( Tensor& a, float inputScale )
{
if( !a.isContinuous() )
throw E_INVALIDARG;
if constexpr( usePreciseComputeShaders && !enableInexactOptimizations )
{
assert( inputScale == 1.0f );
bindShader( eComputeShader::softMaxCompat );
const uint32_t nr = a.countRows();
TensorShape dummyShape;
dummyShape.setZero();
dummyShape.ne[ 0 ] = nr;
cb.bind();
check( cb.update( a, dummyShape ) );
Binder bind;
bind.bind( lookupTables().exponent(), a);
context()->Dispatch( ( nr + 31 ) / 32, 1, 1 );
}
else
{
#if 0
static PrintUniqueTensorSizes printSizes( "softMax" );
printSizes.print( a );
#endif
constexpr uint32_t FIXED_ROW_SIZE = 1500;
eComputeShader cs;
if( a.ne[ 0 ] == FIXED_ROW_SIZE )
cs = eComputeShader::softMaxFixed;
else if( a.ne[ 0 ] >= ( 1024 * 4 ) )
cs = eComputeShader::softMaxLong;
else
cs = eComputeShader::softMax;
bindShader( cs );
const uint32_t nr = a.countRows();
TensorShape dummyShape;
dummyShape.setZero();
dummyShape.ne[ 0 ] = nr;
dummyShape.ne[ 1 ] = bitcast( inputScale );
cb.bind();
check( cb.update( a, dummyShape ) );
Binder bind;
bind.bind( lookupTables().exponent(), a);
context()->Dispatch( nr, 1, 1 );
}
}
void MlContext::addRepeatGelu( Tensor& a, const Tensor& b )
{
check( cb.update( a, b ) );
bindShader( eComputeShader::addRepeatGelu );
cb.bind();
Binder bind;
bind.bind( b, lookupTables().gelu(), a);
context()->Dispatch( a.ne[ 1 ], a.ne[ 2 ], a.ne[ 3 ] );
}
namespace
{
inline bool canAddRows( const Tensor& tokenEmbedding, const Tensor& positionalEmbedding, const Tensor& embd, uint32_t pastTokensCount )
{
if( tokenEmbedding.ne[ 0 ] != positionalEmbedding.ne[ 0 ] )
return false; // Different row lengths
if( embd.ne[ 0 ] + pastTokensCount > positionalEmbedding.ne[ 1 ] )
return false; // Too many rows requested, positionalEmbedding matrix doesn't have that many
return true;
}
}
Tensor MlContext::addRows( const Tensor& tokenEmbedding, const Tensor& positionalEmbedding, const Tensor& embd, uint32_t pastTokensCount )
{
if( !canAddRows( tokenEmbedding, positionalEmbedding, embd, pastTokensCount ) )
throw E_INVALIDARG;
const uint32_t rowLength = tokenEmbedding.ne[ 0 ];
const uint32_t rows = embd.ne[ 0 ];
Tensor result = createTensor( eDataType::FP32, { rowLength, rows } );
TensorShape constants;
// rowLength
constants.ne[ 0 ] = rowLength;
// pastTokensCount
constants.ne[ 1 ] = pastTokensCount;
// outputRowStride
constants.ne[ 2 ] = result.nb[ 1 ];
// embStrides
constants.nb[ 0 ] = tokenEmbedding.nb[ 0 ];
constants.nb[ 1 ] = tokenEmbedding.nb[ 1 ];
// posStrides
constants.nb[ 2 ] = positionalEmbedding.nb[ 0 ];
constants.nb[ 3 ] = positionalEmbedding.nb[ 1 ];
check( cb.update( constants ) );
bindShader( eComputeShader::addRows );
cb.bind();
Binder bind;
bind.bind( { tokenEmbedding, positionalEmbedding, embd }, { result } );
context()->Dispatch( rows, 1, 1 );
return result;
}
Tensor MlContext::reshapePanels( const Tensor& a )
{
constexpr uint32_t TILE_SIZE = ReshapedMultiply::TILE_SIZE;
const eDataType dataType = a.getType();
// Reshaping into column major horizontal panels, height = TILE_SIZE, width = width of the source matrix
// Round height to multiple of tile size
std::array<uint32_t, 4> ne = a.ne;
// Dispatch a group of threads thread per panel
const uint32_t groupsX = ( ne[ 1 ] + TILE_SIZE - 1 ) / TILE_SIZE;
ne[ 1 ] = groupsX * TILE_SIZE;;
// Each panel has [ size.x, TILE_SIZE ] elements
const uint32_t panelSize = ne[ 0 ] * TILE_SIZE;
Tensor result = createTensor( dataType, ne );
TensorShape constants;
constants.setZero();
// uint panelSize : packoffset( c2.y );
constants.ne[ 1 ] = panelSize;
// uint2 layerStrides: packoffset( c2.z );
constants.ne[ 2 ] = result.nb[ 2 ];
constants.ne[ 3 ] = result.nb[ 3 ];
check( cb.update( a, constants ) );
bindShader( eComputeShader::matReshapePanels );
cb.bind();
Binder bind;
bind.bind( a, result );
context()->Dispatch( groupsX, a.ne[ 2 ], a.ne[ 3 ] );
#if 0
if( dataType == eDataType::FP32 )
{
std::vector<float> v1, v2;
a.download( v1 );
result.download( v2 );
__debugbreak();
}
else if( dataType == eDataType::FP16 )
{
std::vector<uint16_t> v1, v2;
a.download( v1 );
result.download( v2 );
__debugbreak();
}
#endif
// Set up size and stride expected by the mulMatTiledEx compute shader
result.ne = a.ne;
result.nb[ 0 ] = 0;
result.nb[ 1 ] = panelSize;
return result;
}
Tensor MlContext::mulMatTiledEx( const Tensor& a, const Tensor& b )
{
constexpr uint32_t TILE_SIZE = ReshapedMultiply::TILE_SIZE;
if( !canMulMat( a, b ) )
throw E_INVALIDARG; // Wrong size
if( 0 != ( a.nb[ 0 ] | b.nb[ 0 ] ) )
throw E_INVALIDARG; // Both tensors are expected to be pre-transposed into these panels
Tensor res = createTensor( eDataType::FP32, { a.ne[ 1 ], b.ne[ 1 ], a.ne[ 2 ], b.ne[ 3 ] } );
check( cb.update( a, b, res ) );
bindShader( eComputeShader::mulMatTiledEx );
cb.bind();
Binder bind;
bind.bind( a, b, res );
const uint32_t x = ( res.ne[ 0 ] + TILE_SIZE - 1 ) / TILE_SIZE;
const uint32_t y = ( res.ne[ 1 ] + TILE_SIZE - 1 ) / TILE_SIZE;
const uint32_t z = res.ne[ 2 ] * res.ne[ 3 ];
context()->Dispatch( x, y, z );
return res;
}
Tensor MlContext::mulMatByRowTiledEx( const Tensor& a, const Tensor& b )
{
constexpr uint32_t TILE_SIZE = ReshapedMultiply::TILE_SIZE;
assert( canMulMat( a, b ) );
assert( b.ne[ 1 ] == 1 );
Tensor res = createTensor( eDataType::FP32, { a.ne[ 1 ], 1, a.ne[ 2 ], b.ne[ 3 ] } );
check( cb.update( a, b, res ) );
bindShader( eComputeShader::mulMatByRowTiledEx );
cb.bind();
Binder bind;
bind.bind( a, b, res );
const uint32_t x = ( res.ne[ 0 ] + TILE_SIZE - 1 ) / TILE_SIZE;
const uint32_t y = res.ne[ 2 ];
const uint32_t z = res.ne[ 3 ];
context()->Dispatch( x, y, z );
return res;
}
void MlContext::addRepeatEx( Tensor& dest, const Tensor& pattern, const Tensor& finalAdd )
{
if( !isSameShape( dest, finalAdd ) )
throw E_INVALIDARG;
assert( dest.getType() == eDataType::FP32 );
check( cb.update( dest, pattern, finalAdd ) );
bindShader( eComputeShader::addRepeatEx );
cb.bind();
Binder bind;
bind.bind( pattern, finalAdd, dest );
context()->Dispatch( dest.ne[ 1 ], dest.ne[ 2 ], dest.ne[ 3 ] );
}
__m128i MlContext::getMemoryUse() const
{
__m128i v = cb.getMemoryUse();
v = _mm_add_epi64( v, temp.getMemoryUse() );
v = _mm_add_epi64( v, bufferMemoryUsage( flashAttentionConstants ) );
v = _mm_add_epi64( v, lookupTables().getMemoryUsage());
return v;
}
| 20,097
|
C++
|
.cpp
| 629
| 29.600954
| 161
| 0.686305
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
18,606
|
Reshaper.cpp
|
Const-me_Whisper/Whisper/ML/Reshaper.cpp
|
#include "stdafx.h"
#include "Reshaper.h"
#include "../D3D/MappedResource.h"
#include "../D3D/Binder.h"
#include "../D3D/shaders.h"
#include "reshapedMultiply.h"
namespace
{
using namespace DirectCompute;
struct Constants
{
// Size and strides of the source tensor
TensorShape arg0;
uint32_t zzPadding;
// Count of elements per panel
uint32_t panelSize;
// Layer strides of the output matrix
std::array<uint32_t, 2> layerStrides;
};
}
HRESULT DirectCompute::Reshaper::createConstants()
{
constexpr uint32_t cb = sizeof( Constants );
CD3D11_BUFFER_DESC desc{ cb, D3D11_BIND_CONSTANT_BUFFER, D3D11_USAGE_DYNAMIC, D3D11_CPU_ACCESS_WRITE };
return device()->CreateBuffer( &desc, nullptr, &constantBuffer );
}
HRESULT DirectCompute::Reshaper::makePanels( Tensor& tensor, eDataType dataType )
{
if( !constantBuffer )
CHECK( createConstants() );
constexpr uint32_t TILE_SIZE = ReshapedMultiply::TILE_SIZE;
// Reshaping into column major horizontal panels, height = TILE_SIZE, width = width of the source matrix
std::array<uint32_t, 4> ne = tensor.ne;
const uint32_t groupsX = ( ne[ 1 ] + TILE_SIZE - 1 ) / TILE_SIZE;
ne[ 1 ] = groupsX * TILE_SIZE;;
// Each panel has [ size.x, TILE_SIZE ] elements
const uint32_t panelSize = ne[ 0 ] * TILE_SIZE;
Tensor result;
result.create( dataType, ne, true );
{
MappedResource mapped;
CHECK( mapped.map( constantBuffer, false ) );
Constants& cb = *(Constants*)mapped.data();
store( cb.arg0.ne, tensor.sizeVec() );
store( cb.arg0.nb, tensor.stridesVec() );
cb.panelSize = panelSize;
cb.layerStrides[ 0 ] = result.nb[ 2 ];
cb.layerStrides[ 1 ] = result.nb[ 3 ];
}
csSetCB( constantBuffer );
{
Binder bind;
bind.bind( tensor, result );
bindShader( eComputeShader::matReshapePanels );
context()->Dispatch( groupsX, tensor.ne[ 2 ], tensor.ne[ 3 ] );
}
tensor.nb[ 0 ] = 0;
tensor.nb[ 1 ] = panelSize;
tensor.nb[ 2 ] = result.nb[ 2 ];
tensor.nb[ 3 ] = result.nb[ 3 ];
tensor.setGpuViews( result );
return S_OK;
}
DirectCompute::Reshaper::~Reshaper()
{
if( constantBuffer )
csSetCB( nullptr );
}
| 2,102
|
C++
|
.cpp
| 68
| 28.691176
| 105
| 0.711672
|
Const-me/Whisper
| 8,145
| 691
| 142
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.