id
int64
0
755k
file_name
stringlengths
3
109
file_path
stringlengths
13
185
content
stringlengths
31
9.38M
size
int64
31
9.38M
language
stringclasses
1 value
extension
stringclasses
11 values
total_lines
int64
1
340k
avg_line_length
float64
2.18
149k
max_line_length
int64
7
2.22M
alphanum_fraction
float64
0
1
repo_name
stringlengths
6
65
repo_stars
int64
100
47.3k
repo_forks
int64
0
12k
repo_open_issues
int64
0
3.4k
repo_license
stringclasses
9 values
repo_extraction_date
stringclasses
92 values
exact_duplicates_redpajama
bool
2 classes
near_duplicates_redpajama
bool
2 classes
exact_duplicates_githubcode
bool
2 classes
exact_duplicates_stackv2
bool
1 class
exact_duplicates_stackv1
bool
2 classes
near_duplicates_githubcode
bool
2 classes
near_duplicates_stackv1
bool
2 classes
near_duplicates_stackv2
bool
1 class
5,106
game_compatibility.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/game_compatibility.cpp
#include "game_compatibility.h" #include "gui_settings.h" #include "downloader.h" #include "localized.h" #include "Crypto/unpkg.h" #include "Loader/PSF.h" #include <QApplication> #include <QMessageBox> #include <QJsonArray> #include <QJsonDocument> LOG_CHANNEL(compat_log, "Compat"); constexpr auto qstr = QString::fromStdString; game_compatibility::game_compatibility(std::shared_ptr<gui_settings> gui_settings, QWidget* parent) : QObject(parent) , m_gui_settings(std::move(gui_settings)) { m_filepath = m_gui_settings->GetSettingsDir() + "/compat_database.dat"; m_downloader = new downloader(parent); RequestCompatibility(); connect(m_downloader, &downloader::signal_download_error, this, &game_compatibility::handle_download_error); connect(m_downloader, &downloader::signal_download_finished, this, &game_compatibility::handle_download_finished); connect(m_downloader, &downloader::signal_download_canceled, this, &game_compatibility::handle_download_canceled); } void game_compatibility::handle_download_error(const QString& error) { Q_EMIT DownloadError(error); } void game_compatibility::handle_download_finished(const QByteArray& content) { compat_log.notice("Database download finished"); // Create new map from database and write database to file if database was valid if (ReadJSON(QJsonDocument::fromJson(content).object(), true)) { // Write database to file QFile file(m_filepath); if (file.exists()) { compat_log.notice("Database file found: %s", m_filepath); } if (!file.open(QIODevice::WriteOnly)) { compat_log.error("Database Error - Could not write database to file: %s", m_filepath); return; } file.write(content); file.close(); compat_log.success("Wrote database to file: %s", m_filepath); } // We have a new database in map, therefore refresh gamelist to new state Q_EMIT DownloadFinished(); } void game_compatibility::handle_download_canceled() { Q_EMIT DownloadCanceled(); } bool game_compatibility::ReadJSON(const QJsonObject& json_data, bool after_download) { const int return_code = json_data["return_code"].toInt(); if (return_code < 0) { if (after_download) { std::string error_message; switch (return_code) { case -1: error_message = "Server Error - Internal Error"; break; case -2: error_message = "Server Error - Maintenance Mode"; break; default: error_message = "Server Error - Unknown Error"; break; } compat_log.error("%s: return code %d", error_message, return_code); Q_EMIT DownloadError(qstr(error_message) + " " + QString::number(return_code)); } else { compat_log.error("Database Error - Invalid: return code %d", return_code); } return false; } if (!json_data["results"].isObject()) { compat_log.error("Database Error - No Results found"); return false; } m_compat_database.clear(); QJsonObject json_results = json_data["results"].toObject(); // Retrieve status data for every valid entry for (const auto& key : json_results.keys()) { if (!json_results[key].isObject()) { compat_log.error("Database Error - Unusable object %s", key); continue; } QJsonObject json_result = json_results[key].toObject(); // Retrieve compatibility information from json compat::status status = ::at32(Status_Data, json_result.value("status").toString("NoResult")); // Add date if possible status.date = json_result.value("date").toString(); // Add latest version if possible status.latest_version = json_result.value("update").toString(); // Add patchsets if possible if (const QJsonValue patchsets_value = json_result.value("patchsets"); patchsets_value.isArray()) { for (const QJsonValue& patch_set : patchsets_value.toArray()) { compat::pkg_patchset set; set.tag_id = patch_set["tag_id"].toString().toStdString(); set.popup = patch_set["popup"].toBool(); set.signoff = patch_set["signoff"].toBool(); set.popup_delay = patch_set["popup_delay"].toInt(); set.min_system_ver = patch_set["min_system_ver"].toString().toStdString(); if (const QJsonValue packages_value = patch_set["packages"]; packages_value.isArray()) { for (const QJsonValue& package : packages_value.toArray()) { compat::pkg_package pkg; pkg.version = package["version"].toString().toStdString(); pkg.size = package["size"].toInt(); pkg.sha1sum = package["sha1sum"].toString().toStdString(); pkg.ps3_system_ver = package["ps3_system_ver"].toString().toStdString(); pkg.drm_type = package["drm_type"].toString().toStdString(); if (const QJsonValue changelogs_value = package["changelogs"]; changelogs_value.isArray()) { for (const QJsonValue& changelog : changelogs_value.toArray()) { compat::pkg_changelog chl; chl.type = changelog["type"].toString().toStdString(); chl.content = changelog["content"].toString().toStdString(); pkg.changelogs.push_back(std::move(chl)); } } if (const QJsonValue titles_value = package["titles"]; titles_value.isArray()) { for (const QJsonValue& title : titles_value.toArray()) { compat::pkg_title ttl; ttl.type = title["type"].toString().toStdString(); ttl.title = title["title"].toString().toStdString(); pkg.titles.push_back(std::move(ttl)); } } set.packages.push_back(std::move(pkg)); } } status.patch_sets.push_back(std::move(set)); } } // Add status to map m_compat_database.emplace(key.toStdString(), std::move(status)); } return true; } void game_compatibility::RequestCompatibility(bool online) { if (!online) { // Retrieve database from file QFile file(m_filepath); if (!file.exists()) { compat_log.notice("Database file not found: %s", m_filepath); return; } if (!file.open(QIODevice::ReadOnly)) { compat_log.error("Database Error - Could not read database from file: %s", m_filepath); return; } const QByteArray content = file.readAll(); file.close(); compat_log.notice("Finished reading database from file: %s", m_filepath); // Create new map from database ReadJSON(QJsonDocument::fromJson(content).object(), online); return; } const std::string url = "https://rpcs3.net/compatibility?api=v1&export"; compat_log.notice("Beginning compatibility database download from: %s", url); m_downloader->start(url, true, true, tr("Downloading Database")); // We want to retrieve a new database, therefore refresh gamelist and indicate that Q_EMIT DownloadStarted(); } compat::status game_compatibility::GetCompatibility(const std::string& title_id) { if (m_compat_database.empty()) { return ::at32(Status_Data, "NoData"); } if (const auto it = m_compat_database.find(title_id); it != m_compat_database.cend()) { return it->second; } return ::at32(Status_Data, "NoResult"); } compat::status game_compatibility::GetStatusData(const QString& status) const { return ::at32(Status_Data, status); } compat::package_info game_compatibility::GetPkgInfo(const QString& pkg_path, game_compatibility* compat) { compat::package_info info; const package_reader reader(pkg_path.toStdString()); if (!reader.is_valid()) { info.is_valid = false; return info; } const psf::registry psf = reader.get_psf(); // TODO: localization of title and changelog const std::string title_key = "TITLE"; const std::string changelog_key = "paramhip"; info.path = pkg_path; info.title = qstr(std::string(psf::get_string(psf, title_key))); // Let's read this from the psf first info.title_id = qstr(std::string(psf::get_string(psf, "TITLE_ID"))); info.category = qstr(std::string(psf::get_string(psf, "CATEGORY"))); info.version = qstr(std::string(psf::get_string(psf, "APP_VER"))); if (!info.category.isEmpty()) { const Localized localized; if (const auto boot_cat = localized.category.cat_boot.find(info.category); boot_cat != localized.category.cat_boot.end()) { info.local_cat = boot_cat->second; } else if (const auto data_cat = localized.category.cat_data.find(info.category); data_cat != localized.category.cat_data.end()) { info.local_cat = data_cat->second; } if (info.category == "GD") { // For now let's assume that PS3 Game Data packages are always updates or DLC. // Update packages always seem to have an APP_VER, so let's say it's a DLC otherwise. // Ideally this would simply be the package content type, but I am too lazy to implement this right now. if (info.version.isEmpty()) { info.type = compat::package_type::dlc; } else { info.type = compat::package_type::update; } } } if (info.version.isEmpty()) { // Fallback to VERSION info.version = qstr(std::string(psf::get_string(psf, "VERSION"))); } if (compat) { compat::status stat = compat->GetCompatibility(info.title_id.toStdString()); if (!stat.patch_sets.empty()) { // We currently only handle the first patch set for (const auto& package : stat.patch_sets.front().packages) { if (info.version.toStdString() == package.version) { if (const std::string localized_title = package.get_title(title_key); !localized_title.empty()) { info.title= qstr(localized_title); } if (const std::string localized_changelog = package.get_changelog(changelog_key); !localized_changelog.empty()) { info.changelog = qstr(localized_changelog); } // This should be an update since it was found in a patch set info.type = compat::package_type::update; break; } } } } if (info.title.isEmpty()) { const QFileInfo file_info(pkg_path); info.title = file_info.fileName(); } return info; }
9,825
C++
.cpp
284
31.014085
128
0.696317
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,107
uuid.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/uuid.cpp
#include "stdafx.h" #include "uuid.h" #include "Utilities/StrUtil.h" #include <QUuid> #include <QRegularExpressionValidator> LOG_CHANNEL(uuid_log, "UUID"); namespace gui { namespace utils { std::string get_uuid_path() { #ifdef _WIN32 const std::string config_dir = fs::get_config_dir() + "config/"; const std::string uuid_path = config_dir + "uuid"; if (!fs::create_path(config_dir)) { uuid_log.error("Could not create path: %s (%s)", uuid_path, fs::g_tls_error); } return uuid_path; #else return fs::get_config_dir() + "uuid"; #endif } std::string make_uuid() { return QUuid::createUuid().toString().toStdString(); } std::string load_uuid() { const std::string uuid_path = get_uuid_path(); if (!fs::is_file(uuid_path)) { uuid_log.notice("File does not exist: %s (%s)", uuid_path, fs::g_tls_error); return {}; } if (fs::file uuid_file = fs::file(uuid_path); uuid_file) { const std::string uuid = fmt::trim(uuid_file.to_string()); if (!validate_uuid(uuid)) { uuid_log.error("Invalid uuid '%s' found in file: %s", uuid, uuid_path); return {}; } return uuid; } uuid_log.error("Could not open file: %s (%s)", uuid_path, fs::g_tls_error); return {}; } bool validate_uuid(const std::string& uuid) { const QRegularExpressionValidator validator(QRegularExpression("^[a-fA-F0-9{}-]*$")); QString test_string = QString::fromStdString(uuid); int pos = 0; if (uuid.empty() || !uuid.starts_with("{") || !uuid.ends_with("}") || validator.validate(test_string, pos) == QValidator::State::Invalid) { return false; } return true; } bool save_uuid(const std::string& uuid) { if (!validate_uuid(uuid)) { uuid_log.error("Can not save invalid uuid '%s'", uuid); return false; } const std::string uuid_path = get_uuid_path(); if (fs::file uuid_file(uuid_path, fs::rewrite); !uuid_file || !uuid_file.write(uuid)) { uuid_log.error("Could not write file: %s (%s)", uuid_path, fs::g_tls_error); return false; } uuid_log.notice("Wrote to file: %s", uuid_path); return true; } bool create_new_uuid(std::string& uuid) { uuid = make_uuid(); if (uuid.empty()) { uuid_log.error("Empty uuid"); return false; } if (!save_uuid(uuid)) { uuid_log.error("Failed to save uuid"); return false; } return true; } void log_uuid() { std::string uuid = load_uuid(); if (uuid.empty()) { if (!create_new_uuid(uuid)) { return; } } uuid_log.notice("Installation ID: %s", uuid); } } }
2,639
C++
.cpp
105
21.238095
140
0.624402
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,108
vfs_dialog_usb_input.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/vfs_dialog_usb_input.cpp
#include "vfs_dialog_usb_input.h" #include "gui_settings.h" #include <QDialogButtonBox> #include <QPushButton> #include <QVBoxLayout> #include "Emu/vfs_config.h" vfs_dialog_usb_input::vfs_dialog_usb_input(const QString& name, const cfg::device_info& default_info, cfg::device_info* info, std::shared_ptr<gui_settings> _gui_settings, QWidget* parent) : QDialog(parent), m_gui_settings(std::move(_gui_settings)), m_gui_save(gui::fs_dev_usb_list) { ensure(!!info); ensure(!name.isEmpty()); ensure(name.back() >= '0' && name.back() <= '7'); setWindowTitle(tr("Edit %0").arg(name)); setObjectName("vfs_dialog_usb_input"); m_gui_save.name.replace('X', name.back()); // Create path widget m_path_widget = new vfs_dialog_path_widget(name, QString::fromStdString(info->path), QString::fromStdString(default_info.path), m_gui_save, m_gui_settings); m_path_widget->layout()->setContentsMargins(0, 0, 0, 0); // Create buttons QDialogButtonBox* buttons = new QDialogButtonBox(QDialogButtonBox::Cancel | QDialogButtonBox::Apply | QDialogButtonBox::RestoreDefaults); buttons->button(QDialogButtonBox::RestoreDefaults)->setText(tr("Reset All")); buttons->button(QDialogButtonBox::Apply)->setDefault(true); connect(buttons, &QDialogButtonBox::clicked, this, [this, buttons, info](QAbstractButton* button) { if (button == buttons->button(QDialogButtonBox::Apply)) { m_gui_settings->SetValue(m_gui_save, m_path_widget->get_dir_list()); info->path = m_path_widget->get_selected_path(); info->vid = m_vid_edit->text().toStdString(); info->pid = m_pid_edit->text().toStdString(); info->serial = m_serial_edit->text().toStdString(); accept(); } else if (button == buttons->button(QDialogButtonBox::RestoreDefaults)) { if (QMessageBox::question(this, tr("Confirm Reset"), tr("Reset all entries and file system directories?")) != QMessageBox::Yes) return; m_path_widget->reset(); m_vid_edit->setText(""); m_pid_edit->setText(""); m_serial_edit->setText(""); } else if (button == buttons->button(QDialogButtonBox::Cancel)) { reject(); } }); m_vid_edit = new QLineEdit; m_vid_edit->setMaxLength(4); m_vid_edit->setValidator(new QRegularExpressionValidator(QRegularExpression("^[a-fA-F0-9]*$"), this)); // HEX only m_vid_edit->setText(QString::fromStdString(info->vid)); m_pid_edit = new QLineEdit; m_pid_edit->setMaxLength(4); m_pid_edit->setValidator(new QRegularExpressionValidator(QRegularExpression("^[a-fA-F0-9]*$"), this)); // HEX only m_pid_edit->setText(QString::fromStdString(info->pid)); m_serial_edit = new QLineEdit; m_serial_edit->setMaxLength(64); // Max length defined in sys_fs m_serial_edit->setText(QString::fromStdString(info->serial)); QVBoxLayout* vbox_left = new QVBoxLayout; vbox_left->addWidget(new QLabel(tr("Vendor ID:"))); vbox_left->addWidget(new QLabel(tr("Product ID:"))); vbox_left->addWidget(new QLabel(tr("Serial:"))); QVBoxLayout* vbox_right = new QVBoxLayout; vbox_right->addWidget(m_vid_edit); vbox_right->addWidget(m_pid_edit); vbox_right->addWidget(m_serial_edit); QHBoxLayout* hbox = new QHBoxLayout; hbox->addLayout(vbox_left); hbox->addLayout(vbox_right); QVBoxLayout* vbox = new QVBoxLayout; vbox->addWidget(m_path_widget); vbox->addLayout(hbox); vbox->addWidget(buttons); setLayout(vbox); buttons->button(QDialogButtonBox::Apply)->setFocus(); }
3,392
C++
.cpp
76
42.013158
187
0.723908
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,109
breakpoint_list.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/breakpoint_list.cpp
#include "breakpoint_list.h" #include "breakpoint_handler.h" #include "Emu/CPU/CPUDisAsm.h" #include "Emu/Cell/PPUThread.h" #include "Emu/Cell/SPUThread.h" #include <QMenu> #include <QMessageBox> #include <QMouseEvent> extern bool is_using_interpreter(thread_class t_class); breakpoint_list::breakpoint_list(QWidget* parent, breakpoint_handler* handler) : QListWidget(parent), m_ppu_breakpoint_handler(handler) { setEditTriggers(QAbstractItemView::NoEditTriggers); setContextMenuPolicy(Qt::CustomContextMenu); setSelectionMode(QAbstractItemView::ExtendedSelection); connect(this, &QListWidget::itemDoubleClicked, this, &breakpoint_list::OnBreakpointListDoubleClicked); connect(this, &QListWidget::customContextMenuRequested, this, &breakpoint_list::OnBreakpointListRightClicked); m_delete_action = new QAction(tr("&Delete"), this); m_delete_action->setShortcut(Qt::Key_Delete); m_delete_action->setShortcutContext(Qt::WidgetShortcut); connect(m_delete_action, &QAction::triggered, this, &breakpoint_list::OnBreakpointListDelete); addAction(m_delete_action); // Hide until used in order to allow as much space for registers panel as possible hide(); } /** * It's unfortunate I need a method like this to sync these. Should ponder a cleaner way to do this. */ void breakpoint_list::UpdateCPUData(std::shared_ptr<CPUDisAsm> disasm) { m_disasm = std::move(disasm); } void breakpoint_list::ClearBreakpoints() { while (count()) { auto* currentItem = takeItem(0); const u32 loc = currentItem->data(Qt::UserRole).value<u32>(); m_ppu_breakpoint_handler->RemoveBreakpoint(loc); delete currentItem; } hide(); } void breakpoint_list::RemoveBreakpoint(u32 addr) { m_ppu_breakpoint_handler->RemoveBreakpoint(addr); for (int i = 0; i < count(); i++) { QListWidgetItem* currentItem = item(i); if (currentItem->data(Qt::UserRole).value<u32>() == addr) { delete takeItem(i); break; } } if (!count()) { hide(); } } bool breakpoint_list::AddBreakpoint(u32 pc) { if (!m_ppu_breakpoint_handler->AddBreakpoint(pc)) { return false; } m_disasm->disasm(pc); QString text = QString::fromStdString(m_disasm->last_opcode); text.remove(10, 13); QListWidgetItem* breakpoint_item = new QListWidgetItem(text); breakpoint_item->setForeground(m_text_color_bp); breakpoint_item->setBackground(m_color_bp); breakpoint_item->setData(Qt::UserRole, pc); addItem(breakpoint_item); show(); return true; } /** * If breakpoint exists, we remove it, else add new one. Yeah, it'd be nicer from a code logic to have it be set/reset. But, that logic has to happen somewhere anyhow. */ void breakpoint_list::HandleBreakpointRequest(u32 loc, bool only_add) { const auto cpu = m_disasm ? m_disasm->get_cpu() : nullptr; if (!cpu || cpu->state & cpu_flag::exit) { return; } if (!is_using_interpreter(cpu->get_class())) { QMessageBox::warning(this, tr("Interpreters-Only Feature!"), tr("Cannot set breakpoints on non-interpreter decoders.")); return; } switch (cpu->get_class()) { case thread_class::spu: { if (loc >= SPU_LS_SIZE || loc % 4) { QMessageBox::warning(this, tr("Invalid Memory For Breakpoints!"), tr("Cannot set breakpoints on non-SPU executable memory!")); return; } const auto spu = static_cast<spu_thread*>(cpu); auto& list = spu->local_breakpoints; const u32 pos_at = loc / 4; const u32 pos_bit = 1u << (pos_at % 8); if (list[pos_at / 8].fetch_xor(pos_bit) & pos_bit) { if (std::none_of(list.begin(), list.end(), [](auto& val){ return val.load(); })) { spu->has_active_local_bps = false; } } else { if (!spu->has_active_local_bps.exchange(true)) { spu->state.atomic_op([](bs_t<cpu_flag>& flags) { if (flags & cpu_flag::pending) { flags += cpu_flag::pending_recheck; } else { flags += cpu_flag::pending; } }); } } return; } case thread_class::ppu: break; default: QMessageBox::warning(this, tr("Unimplemented Breakpoints For Thread Type!"), tr("Cannot set breakpoints on a thread not an PPU/SPU currently, sorry.")); return; } if (!vm::check_addr(loc, vm::page_executable)) { QMessageBox::warning(this, tr("Invalid Memory For Breakpoints!"), tr("Cannot set breakpoints on non-executable memory!")); return; } if (m_ppu_breakpoint_handler->HasBreakpoint(loc)) { if (!only_add) { RemoveBreakpoint(loc); } } else { if (!AddBreakpoint(loc)) { QMessageBox::warning(this, tr("Unknown error while setting breakpoint!"), tr("Failed to set breakpoints.")); return; } } } void breakpoint_list::OnBreakpointListDoubleClicked() { if (QListWidgetItem* item = currentItem()) { const u32 address = item->data(Qt::UserRole).value<u32>(); Q_EMIT RequestShowAddress(address); } } void breakpoint_list::OnBreakpointListRightClicked(const QPoint &pos) { if (!itemAt(pos)) { return; } m_context_menu = new QMenu(); if (selectedItems().count() == 1) { QAction* rename_action = m_context_menu->addAction(tr("&Rename")); connect(rename_action, &QAction::triggered, this, [this]() { QListWidgetItem* current_item = selectedItems().first(); current_item->setFlags(current_item->flags() | Qt::ItemIsEditable); editItem(current_item); }); m_context_menu->addSeparator(); } m_context_menu->addAction(m_delete_action); m_context_menu->exec(viewport()->mapToGlobal(pos)); m_context_menu->deleteLater(); m_context_menu = nullptr; } void breakpoint_list::OnBreakpointListDelete() { for (int i = selectedItems().count() - 1; i >= 0; i--) { RemoveBreakpoint(::at32(selectedItems(), i)->data(Qt::UserRole).value<u32>()); } if (m_context_menu) { m_context_menu->close(); } } void breakpoint_list::mouseDoubleClickEvent(QMouseEvent* ev) { if (!ev) return; // Qt's itemDoubleClicked signal doesn't distinguish between mouse buttons and there is no simple way to get the pressed button. // So we have to ignore this event when another button is pressed. if (ev->button() != Qt::LeftButton) { ev->ignore(); return; } QListWidget::mouseDoubleClickEvent(ev); }
6,116
C++
.cpp
211
26.407583
168
0.716502
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,110
flow_widget_item.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/flow_widget_item.cpp
#include "flow_widget_item.h" #include <QStyle> #include <QStyleOption> #include <QPainter> flow_widget_item::flow_widget_item(QWidget* parent) : QWidget(parent) { setAttribute(Qt::WA_Hover); // We need to enable the hover attribute to ensure that hover events are handled. } flow_widget_item::~flow_widget_item() { } void flow_widget_item::polish_style() { style()->unpolish(this); style()->polish(this); } void flow_widget_item::paintEvent(QPaintEvent* /*event*/) { // Needed for stylesheets to apply to QWidgets QStyleOption option; option.initFrom(this); QPainter painter(this); style()->drawPrimitive(QStyle::PE_Widget, &option, &painter, this); if (!got_visible && cb_on_first_visibility) { if (QWidget* widget = static_cast<QWidget*>(parent())) { if (widget->visibleRegion().intersects(geometry())) { got_visible = true; cb_on_first_visibility(); } } } } void flow_widget_item::focusInEvent(QFocusEvent* event) { QWidget::focusInEvent(event); // We need to polish the widgets in order to re-apply any stylesheet changes for the focus property. polish_style(); Q_EMIT focused(); } void flow_widget_item::focusOutEvent(QFocusEvent* event) { QWidget::focusOutEvent(event); // We need to polish the widgets in order to re-apply any stylesheet changes for the focus property. polish_style(); } void flow_widget_item::keyPressEvent(QKeyEvent* event) { if (!event) { return; } switch (event->key()) { case Qt::Key_Left: Q_EMIT navigate(flow_navigation::left); return; case Qt::Key_Right: Q_EMIT navigate(flow_navigation::right); return; case Qt::Key_Up: Q_EMIT navigate(flow_navigation::up); return; case Qt::Key_Down: Q_EMIT navigate(flow_navigation::down); return; case Qt::Key_Home: Q_EMIT navigate(flow_navigation::home); return; case Qt::Key_End: Q_EMIT navigate(flow_navigation::end); return; case Qt::Key_PageUp: Q_EMIT navigate(flow_navigation::page_up); return; case Qt::Key_PageDown: Q_EMIT navigate(flow_navigation::page_down); return; default: break; } QWidget::keyPressEvent(event); } bool flow_widget_item::event(QEvent* event) { bool hover_changed = false; switch (event->type()) { case QEvent::HoverEnter: hover_changed = setProperty("hover", "true"); break; case QEvent::HoverLeave: hover_changed = setProperty("hover", "false"); break; default: break; } if (hover_changed) { // We need to polish the widgets in order to re-apply any stylesheet changes for the custom hover property. // :hover does not work if we add descendants in the qss, so we need to use a custom property. polish_style(); } return QWidget::event(event); }
2,684
C++
.cpp
107
22.794393
110
0.732317
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,111
save_data_info_dialog.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/save_data_info_dialog.cpp
#include "save_data_info_dialog.h" #include <QPushButton> #include <QHBoxLayout> #include <QHeaderView> #include "Emu/System.h" save_data_info_dialog::save_data_info_dialog(SaveDataEntry save, QWidget* parent) : QDialog(parent) , m_entry(std::move(save)) { setWindowTitle(tr("Save Data Information")); // Table m_list = new QTableWidget(this); //m_list->setItemDelegate(new table_item_delegate(this)); // to get rid of item selection rectangles include "table_item_delegate.h" m_list->setSelectionBehavior(QAbstractItemView::SelectRows); // enable to only select whole rows instead of items m_list->setEditTriggers(QAbstractItemView::NoEditTriggers); m_list->setColumnCount(2); m_list->setHorizontalHeaderLabels(QStringList() << tr("Name") << tr("Detail")); // Buttons QPushButton* close_button = new QPushButton(tr("&Close"), this); connect(close_button, &QAbstractButton::clicked, this, &save_data_info_dialog::close); // Button Layout QHBoxLayout* hbox_actions = new QHBoxLayout(); hbox_actions->addStretch(); //Add a stretch to make Close on the Right-Down corner of this dialog. hbox_actions->addWidget(close_button); // Main Layout QVBoxLayout* vbox_main = new QVBoxLayout(); vbox_main->addWidget(m_list, 1); vbox_main->addLayout(hbox_actions, 0); vbox_main->setAlignment(Qt::AlignCenter); setLayout(vbox_main); UpdateData(); m_list->horizontalHeader()->resizeSections(QHeaderView::ResizeToContents); m_list->verticalHeader()->resizeSections(QHeaderView::ResizeToContents); const QSize table_size ( m_list->verticalHeader()->width() + m_list->horizontalHeader()->length() + m_list->frameWidth() * 2, m_list->horizontalHeader()->height() + m_list->verticalHeader()->length() + m_list->frameWidth() * 2 ); // no minimum size needed because we always have same table size and row count resize(sizeHint() - m_list->sizeHint() + table_size); } //This is intended to write the information of save data to QTableView. void save_data_info_dialog::UpdateData() { m_list->clearContents(); const int num_entries = 4; // set this to number of members in struct m_list->setRowCount(num_entries); //Maybe there should be more details of save data. m_list->setItem(0, 0, new QTableWidgetItem(tr("User ID"))); m_list->setItem(0, 1, new QTableWidgetItem(QString::fromStdString(Emu.GetUsr()))); m_list->setItem(1, 0, new QTableWidgetItem(tr("Title"))); m_list->setItem(1, 1, new QTableWidgetItem(QString::fromStdString(m_entry.title))); m_list->setItem(2, 0, new QTableWidgetItem(tr("Subtitle"))); m_list->setItem(2, 1, new QTableWidgetItem(QString::fromStdString(m_entry.subtitle))); m_list->setItem(3, 0, new QTableWidgetItem(tr("Detail"))); m_list->setItem(3, 1, new QTableWidgetItem(QString::fromStdString(m_entry.details))); QImage img; if (!m_entry.iconBuf.empty() && img.loadFromData(m_entry.iconBuf.data(), static_cast<int>(m_entry.iconBuf.size()), "PNG")) { m_list->insertRow(0); QTableWidgetItem* img_item = new QTableWidgetItem(); img_item->setData(Qt::DecorationRole, QPixmap::fromImage(img)); m_list->setItem(0, 0, new QTableWidgetItem(tr("Icon"))); m_list->setItem(0, 1, img_item); } }
3,166
C++
.cpp
66
45.787879
133
0.742459
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,112
emu_settings.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/emu_settings.cpp
#include "emu_settings.h" #include "config_adapter.h" #include <QMessageBox> #include <QLineEdit> #include <QTimer> #include <QCalendarWidget> #include "Emu/System.h" #include "Emu/system_config.h" #include "Emu/vfs_config.h" #include "Emu/system_utils.hpp" #include "Emu/Cell/Modules/cellSysutil.h" #include "Emu/Io/Keyboard.h" #include "util/yaml.hpp" #include "Utilities/File.h" #include "Utilities/Config.h" LOG_CHANNEL(cfg_log, "CFG"); extern std::string g_cfg_defaults; //! Default settings grabbed from Utilities/Config.h // Emit sorted YAML namespace { static NEVER_INLINE void emit_data(YAML::Emitter& out, const YAML::Node& node) { // TODO out << node; } // Incrementally load YAML static NEVER_INLINE void operator +=(YAML::Node& left, const YAML::Node& node) { if (node && !node.IsNull()) { if (node.IsMap()) { for (const auto& pair : node) { if (pair.first.IsScalar()) { auto&& lhs = left[pair.first.Scalar()]; lhs += pair.second; } else { // Exotic case (TODO: probably doesn't work) auto&& lhs = left[YAML::Clone(pair.first)]; lhs += pair.second; } } } else if (node.IsScalar() || node.IsSequence()) { // Scalars and sequences are replaced completely, but this may change in future. // This logic may be overwritten by custom demands of every specific cfg:: node. left = node; } } } } emu_settings::emu_settings() : QObject() { } bool emu_settings::Init() { m_render_creator = new render_creator(this); if (m_render_creator->abort_requested) { return false; } // Make Vulkan default setting if it is supported if (m_render_creator->Vulkan.supported && !m_render_creator->Vulkan.adapters.empty()) { const std::string adapter = ::at32(m_render_creator->Vulkan.adapters, 0).toStdString(); cfg_log.notice("Setting the default renderer to Vulkan. Default GPU: '%s'", adapter); Emu.SetDefaultRenderer(video_renderer::vulkan); Emu.SetDefaultGraphicsAdapter(adapter); } return true; } void emu_settings::LoadSettings(const std::string& title_id, bool create_config_from_global) { m_title_id = title_id; // Create config path if necessary fs::create_path(title_id.empty() ? fs::get_config_dir() : rpcs3::utils::get_custom_config_dir()); // Load default config auto [default_config, default_error] = yaml_load(g_cfg_defaults); if (default_error.empty()) { m_default_settings = default_config; m_current_settings = YAML::Clone(default_config); } else { cfg_log.fatal("Failed to load default config:\n%s", default_error); QMessageBox::critical(nullptr, tr("Config Error"), tr("Failed to load default config:\n%0") .arg(QString::fromStdString(default_error)), QMessageBox::Ok); } if (create_config_from_global) { // Add global config const std::string global_config_path = fs::get_config_dir() + "config.yml"; fs::g_tls_error = fs::error::ok; fs::file config(global_config_path, fs::read + fs::create); auto [global_config, global_error] = yaml_load(config ? config.to_string() : ""); if (config && global_error.empty()) { m_current_settings += global_config; } else { config.close(); cfg_log.fatal("Failed to load global config %s:\n%s (%s)", global_config_path, global_error, fs::g_tls_error); QMessageBox::critical(nullptr, tr("Config Error"), tr("Failed to load global config:\nFile: %0\nError: %1") .arg(QString::fromStdString(global_config_path)).arg(QString::fromStdString(global_error)), QMessageBox::Ok); } } // Add game config if (!title_id.empty()) { // Remove obsolete settings of the global config before adding the custom settings. // Otherwise we'll always trigger the "obsolete settings dialog" when editing custom configs. ValidateSettings(true); std::string custom_config_path; if (std::string config_path = rpcs3::utils::get_custom_config_path(m_title_id); fs::is_file(config_path)) { custom_config_path = std::move(config_path); } if (!custom_config_path.empty()) { if (fs::file config{custom_config_path}) { auto [custom_config, custom_error] = yaml_load(config.to_string()); config.close(); if (custom_error.empty()) { m_current_settings += custom_config; } else { cfg_log.fatal("Failed to load custom config %s:\n%s", custom_config_path, custom_error); QMessageBox::critical(nullptr, tr("Config Error"), tr("Failed to load custom config:\nFile: %0\nError: %1") .arg(QString::fromStdString(custom_config_path)).arg(QString::fromStdString(custom_error)), QMessageBox::Ok); } } else if (fs::g_tls_error != fs::error::noent) { cfg_log.fatal("Failed to load custom config %s (file error: %s)", custom_config_path, fs::g_tls_error); QMessageBox::critical(nullptr, tr("Config Error"), tr("Failed to load custom config:\nFile: %0\nError: %1") .arg(QString::fromStdString(custom_config_path)).arg(QString::fromStdString(fmt::format("%s", fs::g_tls_error))), QMessageBox::Ok); } } } } bool emu_settings::ValidateSettings(bool cleanup) { bool is_clean = true; std::function<void(int, YAML::Node&, std::vector<std::string>&, cfg::_base*)> search_level; search_level = [&search_level, &is_clean, &cleanup, this](int level, YAML::Node& yml_node, std::vector<std::string>& keys, cfg::_base* cfg_base) { if (!yml_node || !yml_node.IsMap()) { return; } const int next_level = level + 1; for (const auto& yml_entry : yml_node) { const std::string& key = yml_entry.first.Scalar(); cfg::_base* cfg_node = nullptr; keys.resize(next_level); keys[level] = key; if (cfg_base && cfg_base->get_type() == cfg::type::node) { for (const auto& node : static_cast<const cfg::node*>(cfg_base)->get_nodes()) { if (node->get_name() == keys[level]) { cfg_node = node; break; } } } if (cfg_node) { // Ignore every node in Log subsection if (level == 0 && cfg_node->get_name() == "Log") { continue; } YAML::Node next_node = yml_node[key]; search_level(next_level, next_node, keys, cfg_node); } else { const auto get_full_key = [&keys](const std::string& separator) -> std::string { std::string full_key; for (usz i = 0; i < keys.size(); i++) { full_key += keys[i]; if (i < keys.size() - 1) full_key += separator; } return full_key; }; is_clean = false; if (cleanup) { if (!yml_node.remove(key)) { cfg_log.error("Could not remove config entry: %s", get_full_key(": ")); is_clean = true; // abort return; } // Let's only remove one entry at a time. I got some weird issues when doing all at once. return; } else { cfg_log.warning("Unknown config entry found: %s", get_full_key(": ")); } } } }; std::unique_ptr<cfg_root> root = std::make_unique<cfg_root>(); std::vector<std::string> keys; do { is_clean = true; search_level(0, m_current_settings, keys, root.get()); } while (cleanup && !is_clean); return is_clean; } void emu_settings::RestoreDefaults() { m_current_settings = YAML::Clone(m_default_settings); Q_EMIT RestoreDefaultsSignal(); } void emu_settings::SaveSettings() const { YAML::Emitter out; emit_data(out, m_current_settings); Emulator::SaveSettings(out.c_str(), m_title_id); } void emu_settings::EnhanceComboBox(QComboBox* combobox, emu_settings_type type, bool is_ranged, bool use_max, int max, bool sorted, bool strict) { if (!combobox) { cfg_log.fatal("EnhanceComboBox '%s' was used with an invalid object", cfg_adapter::get_setting_name(type)); return; } if (is_ranged) { if (sorted) { cfg_log.warning("EnhanceCombobox '%s': ignoring sorting request on ranged combo box", cfg_adapter::get_setting_name(type)); } const QStringList range = GetQStringSettingOptions(type); ensure(!range.empty()); const int max_item = use_max ? max : range.last().toInt(); for (int i = range.first().toInt(); i <= max_item; i++) { combobox->addItem(QString::number(i), i); } } else { const QStringList settings = GetQStringSettingOptions(type); for (int i = 0; i < settings.count(); i++) { const QString localized_setting = GetLocalizedSetting(settings[i], type, combobox->count(), strict); combobox->addItem(localized_setting, QVariant({settings[i], i})); } if (sorted) { combobox->model()->sort(0, Qt::AscendingOrder); } } // Since the QComboBox has localised strings, we can't just findText / findData, so we need to manually iterate through it to find our index const auto find_index = [](QComboBox* combobox, const QString& value) { if (!combobox) { return -1; } for (int i = 0; i < combobox->count(); i++) { const QVariantList var_list = combobox->itemData(i).toList(); if (var_list.size() != 2 || !var_list[0].canConvert<QString>()) { fmt::throw_exception("Invalid data found in combobox entry %d (text='%s', listsize=%d, itemcount=%d)", i, combobox->itemText(i), var_list.size(), combobox->count()); } if (value == var_list[0].toString()) { return i; } } return -1; }; const std::string def = GetSettingDefault(type); const std::string selected = GetSetting(type); const QString selected_q = QString::fromStdString(selected); int index; if (is_ranged) { index = combobox->findData(selected_q); } else { index = find_index(combobox, selected_q); } if (index == -1) { cfg_log.error("EnhanceComboBox '%s' tried to set an invalid value: %s. Setting to default: %s", cfg_adapter::get_setting_name(type), selected, def); if (is_ranged) { index = combobox->findData(QString::fromStdString(def)); } else { index = find_index(combobox, QString::fromStdString(def)); } m_broken_types.insert(type); } combobox->setCurrentIndex(index); connect(combobox, QOverload<int>::of(&QComboBox::currentIndexChanged), combobox, [this, is_ranged, combobox, type](int index) { if (index < 0) return; if (is_ranged) { SetSetting(type, combobox->itemData(index).toString().toStdString()); } else { const QVariantList var_list = combobox->itemData(index).toList(); if (var_list.size() != 2 || !var_list[0].canConvert<QString>()) { fmt::throw_exception("Invalid data found in combobox entry %d (text='%s', listsize=%d, itemcount=%d)", index, combobox->itemText(index), var_list.size(), combobox->count()); } SetSetting(type, var_list[0].toString().toStdString()); } }); connect(this, &emu_settings::RestoreDefaultsSignal, combobox, [def, combobox, is_ranged, find_index]() { if (is_ranged) { combobox->setCurrentIndex(combobox->findData(QString::fromStdString(def))); } else { combobox->setCurrentIndex(find_index(combobox, QString::fromStdString(def))); } }); } void emu_settings::EnhanceCheckBox(QCheckBox* checkbox, emu_settings_type type) { if (!checkbox) { cfg_log.fatal("EnhanceCheckBox '%s' was used with an invalid object", cfg_adapter::get_setting_name(type)); return; } std::string def = GetSettingDefault(type); std::transform(def.begin(), def.end(), def.begin(), ::tolower); if (def != "true" && def != "false") { cfg_log.fatal("EnhanceCheckBox '%s' was used with an invalid emu_settings_type", cfg_adapter::get_setting_name(type)); return; } std::string selected = GetSetting(type); std::transform(selected.begin(), selected.end(), selected.begin(), ::tolower); if (selected == "true") { checkbox->setChecked(true); } else if (selected != "false") { cfg_log.error("EnhanceCheckBox '%s' tried to set an invalid value: %s. Setting to default: %s", cfg_adapter::get_setting_name(type), selected, def); checkbox->setChecked(def == "true"); m_broken_types.insert(type); } connect(checkbox, &QCheckBox::checkStateChanged, this, [type, this](Qt::CheckState val) { const std::string str = val != Qt::Unchecked ? "true" : "false"; SetSetting(type, str); }); connect(this, &emu_settings::RestoreDefaultsSignal, checkbox, [def, checkbox]() { checkbox->setChecked(def == "true"); }); } void emu_settings::EnhanceDateTimeEdit(QDateTimeEdit* date_time_edit, emu_settings_type type, const QString& format, bool use_calendar, bool as_offset_from_now, int offset_update_time) { if (!date_time_edit) { cfg_log.fatal("EnhanceDateTimeEdit '%s' was used with an invalid object", cfg_adapter::get_setting_name(type)); return; } date_time_edit->setDisplayFormat(format); date_time_edit->setCalendarPopup(use_calendar); if (as_offset_from_now) { // If using offset from now, then we disable the keyboard tracking to reduce the numebr of events that occur (since for each event we will lose focus) date_time_edit->setKeyboardTracking(false); const QStringList range = GetQStringSettingOptions(type); ensure(!range.empty()); bool ok_def = false, ok_min = false, ok_max = false; const s64 def = QString::fromStdString(GetSettingDefault(type)).toLongLong(&ok_def); const s64 min = range.first().toLongLong(&ok_min); const s64 max = range.last().toLongLong(&ok_max); if (!ok_def || !ok_min || !ok_max) { cfg_log.fatal("EnhanceDateTimeEdit '%s' was used with an invalid emu_settings_type", cfg_adapter::get_setting_name(type)); return; } bool ok_sel = false; s64 val = QString::fromStdString(GetSetting(type)).toLongLong(&ok_sel); if (!ok_sel || val < min || val > max) { cfg_log.error("EnhanceDateTimeEdit '%s' tried to set an invalid value: %d. Setting to default: %d. Allowed range: [%d, %d]", cfg_adapter::get_setting_name(type), val, def, min, max); val = def; m_broken_types.insert(type); SetSetting(type, std::to_string(def)); } // we'll capture the DateTime once, and apply the min/max and offset against it here. const QDateTime now = QDateTime::currentDateTime(); // we set the allowed limits date_time_edit->setDateTimeRange(now.addSecs(min), now.addSecs(max)); // we add the offset, and set the control to have this datetime value const QDateTime date_time = now.addSecs(val); date_time_edit->setDateTime(date_time); // if we have an invalid update time then we won't run the update timer if (offset_update_time > 0) { QTimer* console_time_update = new QTimer(date_time_edit); connect(console_time_update, &QTimer::timeout, date_time_edit, [this, date_time_edit, min, max]() { if (!date_time_edit->hasFocus() && (!date_time_edit->calendarPopup() || !date_time_edit->calendarWidget()->hasFocus())) { const QDateTime now = QDateTime::currentDateTime(); const s64 offset = QString::fromStdString(GetSetting(emu_settings_type::ConsoleTimeOffset)).toLongLong(); date_time_edit->setDateTime(now.addSecs(offset)); date_time_edit->setDateTimeRange(now.addSecs(min), now.addSecs(max)); } }); console_time_update->start(offset_update_time); } connect(this, &emu_settings::RestoreDefaultsSignal, date_time_edit, [def, date_time_edit]() { date_time_edit->setDateTime(QDateTime::currentDateTime().addSecs(def)); }); } else { const QStringList range = GetQStringSettingOptions(type); ensure(!range.empty()); QString str = QString::fromStdString(GetSettingDefault(type)); const QDateTime def = QDateTime::fromString(str, Qt::ISODate); const QDateTime min = QDateTime::fromString(range.first(), Qt::ISODate); const QDateTime max = QDateTime::fromString(range.last(), Qt::ISODate); if (!def.isValid() || !min.isValid() || !max.isValid()) { cfg_log.fatal("EnhanceDateTimeEdit '%s' was used with an invalid emu_settings_type", cfg_adapter::get_setting_name(type)); return; } str = QString::fromStdString(GetSetting(type)); QDateTime val = QDateTime::fromString(str, Qt::ISODate); if (!val.isValid() || val < min || val > max) { cfg_log.error("EnhanceDateTimeEdit '%s' tried to set an invalid value: %s. Setting to default: %s Allowed range: [%s, %s]", cfg_adapter::get_setting_name(type), val.toString(Qt::ISODate), def.toString(Qt::ISODate), min.toString(Qt::ISODate), max.toString(Qt::ISODate)); val = def; m_broken_types.insert(type); SetSetting(type, def.toString(Qt::ISODate).toStdString()); } // we set the allowed limits date_time_edit->setDateTimeRange(min, max); // set the date_time value to the control date_time_edit->setDateTime(val); connect(this, &emu_settings::RestoreDefaultsSignal, date_time_edit, [def, date_time_edit]() { date_time_edit->setDateTime(def); }); } connect(date_time_edit, &QDateTimeEdit::dateTimeChanged, this, [date_time_edit, type, as_offset_from_now, this](const QDateTime& datetime) { if (as_offset_from_now) { // offset will be applied in seconds const s64 offset = QDateTime::currentDateTime().secsTo(datetime); SetSetting(type, std::to_string(offset)); // HACK: We are only looking at whether the control has focus to prevent the time from updating dynamically, so we // clear the focus, so that this dynamic updating isn't suppressed. if (date_time_edit) { date_time_edit->clearFocus(); } } else { // date time will be written straight into settings SetSetting(type, datetime.toString(Qt::ISODate).toStdString()); } }); } void emu_settings::EnhanceSlider(QSlider* slider, emu_settings_type type) { if (!slider) { cfg_log.fatal("EnhanceSlider '%s' was used with an invalid object", cfg_adapter::get_setting_name(type)); return; } const QStringList range = GetQStringSettingOptions(type); ensure(!range.empty()); bool ok_def, ok_sel, ok_min, ok_max; const int def = QString::fromStdString(GetSettingDefault(type)).toInt(&ok_def); const int min = range.first().toInt(&ok_min); const int max = range.last().toInt(&ok_max); if (!ok_def || !ok_min || !ok_max) { cfg_log.fatal("EnhanceSlider '%s' was used with an invalid emu_settings_type", cfg_adapter::get_setting_name(type)); return; } const QString selected = QString::fromStdString(GetSetting(type)); int val = selected.toInt(&ok_sel); if (!ok_sel || val < min || val > max) { cfg_log.error("EnhanceSlider '%s' tried to set an invalid value: %d. Setting to default: %d. Allowed range: [%d, %d]", cfg_adapter::get_setting_name(type), val, def, min, max); val = def; m_broken_types.insert(type); } slider->setRange(min, max); slider->setValue(val); connect(slider, &QSlider::valueChanged, this, [type, this](int value) { SetSetting(type, QString::number(value).toStdString()); }); connect(this, &emu_settings::RestoreDefaultsSignal, slider, [def, slider]() { slider->setValue(def); }); } void emu_settings::EnhanceSpinBox(QSpinBox* spinbox, emu_settings_type type, const QString& prefix, const QString& suffix) { if (!spinbox) { cfg_log.fatal("EnhanceSpinBox '%s' was used with an invalid object", cfg_adapter::get_setting_name(type)); return; } const QStringList range = GetQStringSettingOptions(type); ensure(!range.empty()); bool ok_def, ok_sel, ok_min, ok_max; const int def = QString::fromStdString(GetSettingDefault(type)).toInt(&ok_def); const int min = range.first().toInt(&ok_min); const int max = range.last().toInt(&ok_max); if (!ok_def || !ok_min || !ok_max) { cfg_log.fatal("EnhanceSpinBox '%s' was used with an invalid type", cfg_adapter::get_setting_name(type)); return; } const std::string selected = GetSetting(type); int val = QString::fromStdString(selected).toInt(&ok_sel); if (!ok_sel || val < min || val > max) { cfg_log.error("EnhanceSpinBox '%s' tried to set an invalid value: %d. Setting to default: %d. Allowed range: [%d, %d]", cfg_adapter::get_setting_name(type), selected, def, min, max); val = def; m_broken_types.insert(type); } spinbox->setPrefix(prefix); spinbox->setSuffix(suffix); spinbox->setRange(min, max); spinbox->setValue(val); connect(spinbox, &QSpinBox::textChanged, this, [type, spinbox, this](const QString& /* text*/) { if (!spinbox) return; SetSetting(type, spinbox->cleanText().toStdString()); }); connect(this, &emu_settings::RestoreDefaultsSignal, spinbox, [def, spinbox]() { spinbox->setValue(def); }); } void emu_settings::EnhanceDoubleSpinBox(QDoubleSpinBox* spinbox, emu_settings_type type, const QString& prefix, const QString& suffix) { if (!spinbox) { cfg_log.fatal("EnhanceDoubleSpinBox '%s' was used with an invalid object", cfg_adapter::get_setting_name(type)); return; } const std::vector<std::string> range = GetSettingOptions(type); ensure(!range.empty()); const std::string def_s = GetSettingDefault(type); const std::string val_s = GetSetting(type); const std::string& min_s = range.front(); const std::string& max_s = range.back(); // cfg::_float range is in s32 constexpr s32 min_value = ::std::numeric_limits<s32>::min(); constexpr s32 max_value = ::std::numeric_limits<s32>::max(); f64 val, def, min, max; const bool ok_sel = try_to_float(&val, val_s, min_value, max_value); const bool ok_def = try_to_float(&def, def_s, min_value, max_value); const bool ok_min = try_to_float(&min, min_s, min_value, max_value); const bool ok_max = try_to_float(&max, max_s, min_value, max_value); if (!ok_def || !ok_min || !ok_max) { cfg_log.fatal("EnhanceDoubleSpinBox '%s' was used with an invalid type. (val='%s', def='%s', min_s='%s', max_s='%s')", cfg_adapter::get_setting_name(type), val_s, def_s, min_s, max_s); return; } if (!ok_sel || val < min || val > max) { cfg_log.error("EnhanceDoubleSpinBox '%s' tried to set an invalid value: %f. Setting to default: %f. Allowed range: [%f, %f]", cfg_adapter::get_setting_name(type), val, def, min, max); val = def; m_broken_types.insert(type); } spinbox->setPrefix(prefix); spinbox->setSuffix(suffix); spinbox->setRange(min, max); spinbox->setValue(val); connect(spinbox, &QDoubleSpinBox::textChanged, this, [type, spinbox, this](const QString& /* text*/) { if (!spinbox) return; SetSetting(type, spinbox->cleanText().toStdString()); }); connect(this, &emu_settings::RestoreDefaultsSignal, spinbox, [def, spinbox]() { spinbox->setValue(def); }); } void emu_settings::EnhanceLineEdit(QLineEdit* edit, emu_settings_type type) { if (!edit) { cfg_log.fatal("EnhanceEdit '%s' was used with an invalid object", cfg_adapter::get_setting_name(type)); return; } const std::string set_text = GetSetting(type); edit->setText(QString::fromStdString(set_text)); connect(edit, &QLineEdit::textChanged, this, [type, this](const QString &text) { const QString trimmed = text.trimmed(); if (trimmed.size() != text.size()) { cfg_log.warning("EnhanceLineEdit '%s' input was trimmed", cfg_adapter::get_setting_name(type)); } SetSetting(type, trimmed.toStdString()); }); connect(this, &emu_settings::RestoreDefaultsSignal, edit, [this, edit, type]() { edit->setText(QString::fromStdString(GetSettingDefault(type))); }); } void emu_settings::EnhanceRadioButton(QButtonGroup* button_group, emu_settings_type type) { if (!button_group) { cfg_log.fatal("EnhanceRadioButton '%s' was used with an invalid object", cfg_adapter::get_setting_name(type)); return; } const QString selected = QString::fromStdString(GetSetting(type)); const QString def = QString::fromStdString(GetSettingDefault(type)); const QStringList options = GetQStringSettingOptions(type); if (button_group->buttons().count() < options.size()) { cfg_log.fatal("EnhanceRadioButton '%s': wrong button count", cfg_adapter::get_setting_name(type)); return; } bool found = false; int def_pos = -1; for (int i = 0; i < options.count(); i++) { const QString& option = options[i]; const QString localized_setting = GetLocalizedSetting(option, type, i, true); QAbstractButton* button = button_group->button(i); button->setText(localized_setting); if (!found && option == selected) { found = true; button->setChecked(true); } if (def_pos == -1 && option == def) { def_pos = i; } connect(button, &QAbstractButton::toggled, this, [this, type, val = option.toStdString()](bool checked) { if (checked) { SetSetting(type, val); } }); } if (!found) { ensure(def_pos >= 0); cfg_log.error("EnhanceRadioButton '%s' tried to set an invalid value: %s. Setting to default: %s.", cfg_adapter::get_setting_name(type), selected, def); m_broken_types.insert(type); // Select the default option on invalid setting string button_group->button(def_pos)->setChecked(true); } connect(this, &emu_settings::RestoreDefaultsSignal, button_group, [button_group, def_pos]() { if (button_group && button_group->button(def_pos)) { button_group->button(def_pos)->setChecked(true); } }); } std::vector<std::string> emu_settings::GetLibrariesControl() { return m_current_settings["Core"]["Libraries Control"].as<std::vector<std::string>, std::initializer_list<std::string>>({}); } void emu_settings::SaveSelectedLibraries(const std::vector<std::string>& libs) { m_current_settings["Core"]["Libraries Control"] = libs; } std::vector<std::string> emu_settings::GetSettingOptions(emu_settings_type type) { return cfg_adapter::get_options(::at32(settings_location, type)); } QStringList emu_settings::GetQStringSettingOptions(emu_settings_type type) { QStringList values; for (const std::string& value : cfg_adapter::get_options(::at32(settings_location, type))) { values.append(QString::fromStdString(value)); } return values; } std::string emu_settings::GetSettingDefault(emu_settings_type type) const { if (const auto node = cfg_adapter::get_node(m_default_settings, ::at32(settings_location, type)); node && node.IsScalar()) { return node.Scalar(); } cfg_log.fatal("GetSettingDefault(type=%d) could not retrieve the requested node", static_cast<int>(type)); return ""; } std::string emu_settings::GetSetting(emu_settings_type type) const { if (const auto node = cfg_adapter::get_node(m_current_settings, ::at32(settings_location, type)); node && node.IsScalar()) { return node.Scalar(); } cfg_log.fatal("GetSetting(type=%d) could not retrieve the requested node", static_cast<int>(type)); return ""; } void emu_settings::SetSetting(emu_settings_type type, const std::string& val) const { cfg_adapter::get_node(m_current_settings, ::at32(settings_location, type)) = val; } emu_settings_type emu_settings::FindSettingsType(const cfg::_base* node) const { // Add key and value to static map on first use static std::map<u32, emu_settings_type> id_to_type; static std::mutex mtx; std::lock_guard lock(mtx); if (!node) [[unlikely]] { // Provoke error. Don't use ensure or we will get a nullptr deref warning in VS return ::at32(id_to_type, umax); } std::vector<std::string> node_location; if (!id_to_type.contains(node->get_id())) { for (const cfg::_base* n = node; n; n = n->get_parent()) { if (!n->get_name().empty()) { node_location.push_back(n->get_name()); } } std::reverse(node_location.begin(), node_location.end()); for (const auto& [type, loc]: settings_location) { if (node_location.size() != loc.size()) { continue; } bool is_match = true; for (usz i = 0; i < node_location.size(); i++) { if (node_location[i] != loc[i]) { is_match = false; break; } } if (is_match && !id_to_type.try_emplace(node->get_id(), type).second) { cfg_log.error("'%s' already exists", loc.back()); } } } if (!id_to_type.contains(node->get_id())) { fmt::throw_exception("Node '%s' not represented in emu_settings_type", node->get_name()); } return ::at32(id_to_type, node->get_id()); } void emu_settings::OpenCorrectionDialog(QWidget* parent) { if (!m_broken_types.empty() && QMessageBox::question(parent, tr("Fix invalid settings?"), tr( "Your config file contained one or more unrecognized values for settings.\n" "Their default value will be used until they are corrected.\n" "Consider that a correction might render them invalid for other versions of RPCS3.\n" "\n" "Do you wish to let the program correct them for you?\n" "This change will only be final when you save the config." ), QMessageBox::Yes | QMessageBox::No, QMessageBox::No) == QMessageBox::Yes) { for (const auto& type : m_broken_types) { const std::string def = GetSettingDefault(type); const std::string old = GetSetting(type); SetSetting(type, def); cfg_log.success("The config entry '%s' was corrected from '%s' to '%s'", cfg_adapter::get_setting_name(type), old, def); } m_broken_types.clear(); cfg_log.success("You need to save the settings in order to make these changes permanent!"); } } QString emu_settings::GetLocalizedSetting(const QString& original, emu_settings_type type, int index, bool strict) const { switch (type) { case emu_settings_type::SPUBlockSize: switch (static_cast<spu_block_size_type>(index)) { case spu_block_size_type::safe: return tr("Safe", "SPU block size"); case spu_block_size_type::mega: return tr("Mega", "SPU block size"); case spu_block_size_type::giga: return tr("Giga", "SPU block size"); } break; case emu_settings_type::ThreadSchedulerMode: switch (static_cast<thread_scheduler_mode>(index)) { case thread_scheduler_mode::old: return tr("RPCS3 Scheduler", "Thread Scheduler Mode"); case thread_scheduler_mode::alt: return tr("RPCS3 Alternative Scheduler", "Thread Scheduler Mode"); case thread_scheduler_mode::os: return tr("Operating System", "Thread Scheduler Mode"); } break; case emu_settings_type::EnableTSX: switch (static_cast<tsx_usage>(index)) { case tsx_usage::disabled: return tr("Disabled", "Enable TSX"); case tsx_usage::enabled: return tr("Enabled", "Enable TSX"); case tsx_usage::forced: return tr("Forced", "Enable TSX"); } break; case emu_settings_type::Renderer: switch (static_cast<video_renderer>(index)) { case video_renderer::null: return tr("Disable Video Output", "Video renderer"); case video_renderer::opengl: return tr("OpenGL", "Video renderer"); case video_renderer::vulkan: return tr("Vulkan", "Video renderer"); } break; case emu_settings_type::ShaderMode: switch (static_cast<shader_mode>(index)) { case shader_mode::recompiler: return tr("Legacy (single threaded)", "Shader Mode"); case shader_mode::async_recompiler: return tr("Async (multi threaded)", "Shader Mode"); case shader_mode::async_with_interpreter: return tr("Async with Shader Interpreter", "Shader Mode"); case shader_mode::interpreter_only: return tr("Shader Interpreter only", "Shader Mode"); } break; case emu_settings_type::Resolution: switch (static_cast<video_resolution>(index)) { case video_resolution::_1080p: return tr("1080p", "Resolution"); case video_resolution::_1080i: return tr("1080i", "Resolution"); case video_resolution::_720p: return tr("720p", "Resolution"); case video_resolution::_480p: return tr("480p", "Resolution"); case video_resolution::_480i: return tr("480i", "Resolution"); case video_resolution::_576p: return tr("576p", "Resolution"); case video_resolution::_576i: return tr("576i", "Resolution"); case video_resolution::_1600x1080p: return tr("1600x1080p", "Resolution"); case video_resolution::_1440x1080p: return tr("1440x1080p", "Resolution"); case video_resolution::_1280x1080p: return tr("1280x1080p", "Resolution"); case video_resolution::_960x1080p: return tr("960x1080p", "Resolution"); } break; case emu_settings_type::FrameLimit: switch (static_cast<frame_limit_type>(index)) { case frame_limit_type::none: return tr("Off", "Frame limit"); case frame_limit_type::_30: return tr("30", "Frame limit"); case frame_limit_type::_50: return tr("50", "Frame limit"); case frame_limit_type::_60: return tr("60", "Frame limit"); case frame_limit_type::_120: return tr("120", "Frame limit"); case frame_limit_type::display_rate: return tr("Display", "Frame limit"); case frame_limit_type::_auto: return tr("Auto", "Frame limit"); case frame_limit_type::_ps3: return tr("PS3 Native", "Frame limit"); case frame_limit_type::infinite: return tr("Infinite", "Frame limit"); } break; case emu_settings_type::MSAA: switch (static_cast<msaa_level>(index)) { case msaa_level::none: return tr("Disabled", "MSAA"); case msaa_level::_auto: return tr("Auto", "MSAA"); } break; case emu_settings_type::ShaderPrecisionQuality: switch (static_cast<gpu_preset_level>(index)) { case gpu_preset_level::_auto: return tr("Auto", "Shader Precision"); case gpu_preset_level::ultra: return tr("Ultra", "Shader Precision"); case gpu_preset_level::high: return tr("High", "Shader Precision"); case gpu_preset_level::low: return tr("Low", "Shader Precision"); } break; case emu_settings_type::OutputScalingMode: switch (static_cast<output_scaling_mode>(index)) { case output_scaling_mode::nearest: return tr("Nearest", "Output Scaling Mode"); case output_scaling_mode::bilinear: return tr("Bilinear", "Output Scaling Mode"); case output_scaling_mode::fsr: return tr("FidelityFX Super Resolution", "Output Scaling Mode"); } break; case emu_settings_type::AudioRenderer: switch (static_cast<audio_renderer>(index)) { case audio_renderer::null: return tr("Disable Audio Output", "Audio renderer"); #ifdef _WIN32 case audio_renderer::xaudio: return tr("XAudio2", "Audio renderer"); #endif case audio_renderer::cubeb: return tr("Cubeb", "Audio renderer"); #ifdef HAVE_FAUDIO case audio_renderer::faudio: return tr("FAudio", "Audio renderer"); #endif } break; case emu_settings_type::MicrophoneType: switch (static_cast<microphone_handler>(index)) { case microphone_handler::null: return tr("Disabled", "Microphone handler"); case microphone_handler::standard: return tr("Standard", "Microphone handler"); case microphone_handler::singstar: return tr("SingStar", "Microphone handler"); case microphone_handler::real_singstar: return tr("Real SingStar", "Microphone handler"); case microphone_handler::rocksmith: return tr("Rocksmith", "Microphone handler"); } break; case emu_settings_type::KeyboardHandler: switch (static_cast<keyboard_handler>(index)) { case keyboard_handler::null: return tr("Null", "Keyboard handler"); case keyboard_handler::basic: return tr("Basic", "Keyboard handler"); } break; case emu_settings_type::MouseHandler: switch (static_cast<mouse_handler>(index)) { case mouse_handler::null: return tr("Null", "Mouse handler"); case mouse_handler::basic: return tr("Basic", "Mouse handler"); case mouse_handler::raw: return tr("Raw", "Mouse handler"); } break; case emu_settings_type::CameraType: switch (static_cast<fake_camera_type>(index)) { case fake_camera_type::unknown: return tr("Unknown", "Camera type"); case fake_camera_type::eyetoy: return tr("EyeToy", "Camera type"); case fake_camera_type::eyetoy2: return tr("PS Eye", "Camera type"); case fake_camera_type::uvc1_1: return tr("UVC 1.1", "Camera type"); } break; case emu_settings_type::CameraFlip: switch (static_cast<camera_flip>(index)) { case camera_flip::none: return tr("No", "Camera flip"); case camera_flip::horizontal: return tr("Flip horizontally", "Camera flip"); case camera_flip::vertical: return tr("Flip vertically", "Camera flip"); case camera_flip::both: return tr("Flip both axes", "Camera flip"); } break; case emu_settings_type::Camera: switch (static_cast<camera_handler>(index)) { case camera_handler::null: return tr("Null", "Camera handler"); case camera_handler::fake: return tr("Fake", "Camera handler"); case camera_handler::qt: return tr("Qt", "Camera handler"); } break; case emu_settings_type::MusicHandler: switch (static_cast<music_handler>(index)) { case music_handler::null: return tr("Null", "Music handler"); case music_handler::qt: return tr("Qt", "Music handler"); } break; case emu_settings_type::PadHandlerMode: switch (static_cast<pad_handler_mode>(index)) { case pad_handler_mode::single_threaded: return tr("Single-threaded", "Pad handler mode"); case pad_handler_mode::multi_threaded: return tr("Multi-threaded", "Pad handler mode"); } break; case emu_settings_type::Move: switch (static_cast<move_handler>(index)) { case move_handler::null: return tr("Null", "Move handler"); case move_handler::fake: return tr("Fake", "Move handler"); case move_handler::mouse: return tr("Mouse", "Move handler"); case move_handler::raw_mouse: return tr("Raw Mouse", "Move handler"); #ifdef HAVE_LIBEVDEV case move_handler::gun: return tr("Gun", "Gun handler"); #endif } break; case emu_settings_type::Buzz: switch (static_cast<buzz_handler>(index)) { case buzz_handler::null: return tr("Null (use real Buzzers)", "Buzz handler"); case buzz_handler::one_controller: return tr("1 controller (1-4 players)", "Buzz handler"); case buzz_handler::two_controllers: return tr("2 controllers (5-7 players)", "Buzz handler"); } break; case emu_settings_type::Turntable: switch (static_cast<turntable_handler>(index)) { case turntable_handler::null: return tr("Null", "Turntable handler"); case turntable_handler::one_controller: return tr("1 controller", "Turntable handler"); case turntable_handler::two_controllers: return tr("2 controllers", "Turntable handler"); } break; case emu_settings_type::GHLtar: switch (static_cast<ghltar_handler>(index)) { case ghltar_handler::null: return tr("Null", "GHLtar handler"); case ghltar_handler::one_controller: return tr("1 controller", "GHLtar handler"); case ghltar_handler::two_controllers: return tr("2 controllers", "GHLtar handler"); } break; case emu_settings_type::InternetStatus: switch (static_cast<np_internet_status>(index)) { case np_internet_status::disabled: return tr("Disconnected", "Internet Status"); case np_internet_status::enabled: return tr("Connected", "Internet Status"); } break; case emu_settings_type::PSNStatus: switch (static_cast<np_psn_status>(index)) { case np_psn_status::disabled: return tr("Disconnected", "PSN Status"); case np_psn_status::psn_fake: return tr("Simulated", "PSN Status"); case np_psn_status::psn_rpcn: return tr("RPCN", "PSN Status"); } break; case emu_settings_type::SleepTimersAccuracy: switch (static_cast<sleep_timers_accuracy_level>(index)) { case sleep_timers_accuracy_level::_as_host: return tr("As Host", "Sleep timers accuracy"); case sleep_timers_accuracy_level::_usleep: return tr("Usleep Only", "Sleep timers accuracy"); case sleep_timers_accuracy_level::_all_timers: return tr("All Timers", "Sleep timers accuracy"); } break; case emu_settings_type::FIFOAccuracy: switch (static_cast<rsx_fifo_mode>(index)) { case rsx_fifo_mode::fast: return tr("Fast", "RSX FIFO Accuracy"); case rsx_fifo_mode::atomic: return tr("Atomic", "RSX FIFO Accuracy"); case rsx_fifo_mode::atomic_ordered: return tr("Ordered & Atomic", "RSX FIFO Accuracy"); case rsx_fifo_mode::as_ps3: return tr("PS3", "RSX FIFO Accuracy"); } break; case emu_settings_type::PerfOverlayDetailLevel: switch (static_cast<detail_level>(index)) { case detail_level::none: return tr("None", "Detail Level"); case detail_level::minimal: return tr("Minimal", "Detail Level"); case detail_level::low: return tr("Low", "Detail Level"); case detail_level::medium: return tr("Medium", "Detail Level"); case detail_level::high: return tr("High", "Detail Level"); } break; case emu_settings_type::PerfOverlayFramerateDetailLevel: case emu_settings_type::PerfOverlayFrametimeDetailLevel: switch (static_cast<perf_graph_detail_level>(index)) { case perf_graph_detail_level::minimal: return tr("Minimal", "Perf Graph Detail Level"); case perf_graph_detail_level::show_min_max: return tr("Show Min And Max", "Perf Graph Detail Level"); case perf_graph_detail_level::show_one_percent_avg: return tr("Show 1% Low And Average", "Perf Graph Detail Level"); case perf_graph_detail_level::show_all: return tr("Show All", "Perf Graph Detail Level"); } break; case emu_settings_type::PerfOverlayPosition: switch (static_cast<screen_quadrant>(index)) { case screen_quadrant::top_left: return tr("Top Left", "Performance overlay position"); case screen_quadrant::top_right: return tr("Top Right", "Performance overlay position"); case screen_quadrant::bottom_left: return tr("Bottom Left", "Performance overlay position"); case screen_quadrant::bottom_right: return tr("Bottom Right", "Performance overlay position"); } break; case emu_settings_type::PPUDecoder: switch (static_cast<ppu_decoder_type>(index)) { case ppu_decoder_type::_static: return tr("Interpreter (static)", "PPU decoder"); case ppu_decoder_type::llvm: return tr("Recompiler (LLVM)", "PPU decoder"); } break; case emu_settings_type::SPUDecoder: switch (static_cast<spu_decoder_type>(index)) { case spu_decoder_type::_static: return tr("Interpreter (static)", "SPU decoder"); case spu_decoder_type::dynamic: return tr("Interpreter (dynamic)", "SPU decoder"); case spu_decoder_type::asmjit: return tr("Recompiler (ASMJIT)", "SPU decoder"); case spu_decoder_type::llvm: return tr("Recompiler (LLVM)", "SPU decoder"); } break; case emu_settings_type::EnterButtonAssignment: switch (static_cast<enter_button_assign>(index)) { case enter_button_assign::circle: return tr("Enter with circle", "Enter button assignment"); case enter_button_assign::cross: return tr("Enter with cross", "Enter button assignment"); } break; case emu_settings_type::AudioFormat: switch (static_cast<audio_format>(index)) { case audio_format::stereo: return tr("Stereo", "Audio format"); case audio_format::surround_5_1: return tr("Surround 5.1", "Audio format"); case audio_format::surround_7_1: return tr("Surround 7.1", "Audio format"); case audio_format::manual: return tr("Manual", "Audio format"); case audio_format::automatic: return tr("Automatic", "Audio format"); } break; case emu_settings_type::AudioFormats: switch (static_cast<audio_format_flag>(index)) { case audio_format_flag::lpcm_2_48khz: return tr("Linear PCM 2 Ch. 48 kHz", "Audio format flag"); case audio_format_flag::lpcm_5_1_48khz: return tr("Linear PCM 5.1 Ch. 48 kHz", "Audio format flag"); case audio_format_flag::lpcm_7_1_48khz: return tr("Linear PCM 7.1 Ch. 48 kHz", "Audio format flag"); case audio_format_flag::ac3: return tr("Dolby Digital 5.1 Ch.", "Audio format flag"); case audio_format_flag::dts: return tr("DTS 5.1 Ch.", "Audio format flag"); } break; case emu_settings_type::AudioProvider: switch (static_cast<audio_provider>(index)) { case audio_provider::none: return tr("None", "Audio Provider"); case audio_provider::cell_audio: return tr("CellAudio", "Audio Provider"); case audio_provider::rsxaudio: return tr("RSXAudio", "Audio Provider"); } break; case emu_settings_type::AudioAvport: switch (static_cast<audio_avport>(index)) { case audio_avport::hdmi_0: return tr("HDMI 0", "Audio Avport"); case audio_avport::hdmi_1: return tr("HDMI 1", "Audio Avport"); case audio_avport::avmulti: return tr("AV multiout", "Audio Avport"); case audio_avport::spdif_0: return tr("SPDIF 0", "Audio Avport"); case audio_avport::spdif_1: return tr("SPDIF 1", "Audio Avport"); } break; case emu_settings_type::AudioChannelLayout: switch (static_cast<audio_channel_layout>(index)) { case audio_channel_layout::automatic: return tr("Auto", "Audio Channel Layout"); case audio_channel_layout::mono: return tr("Mono", "Audio Channel Layout"); case audio_channel_layout::stereo: return tr("Stereo", "Audio Channel Layout"); case audio_channel_layout::stereo_lfe: return tr("Stereo LFE", "Audio Channel Layout"); case audio_channel_layout::quadraphonic: return tr("Quadraphonic", "Audio Channel Layout"); case audio_channel_layout::quadraphonic_lfe: return tr("Quadraphonic LFE", "Audio Channel Layout"); case audio_channel_layout::surround_5_1: return tr("Surround 5.1", "Audio Channel Layout"); case audio_channel_layout::surround_7_1: return tr("Surround 7.1", "Audio Channel Layout"); } break; case emu_settings_type::LicenseArea: switch (static_cast<CellSysutilLicenseArea>(index)) { case CellSysutilLicenseArea::CELL_SYSUTIL_LICENSE_AREA_J: return tr("Japan", "License Area"); case CellSysutilLicenseArea::CELL_SYSUTIL_LICENSE_AREA_A: return tr("America", "License Area"); case CellSysutilLicenseArea::CELL_SYSUTIL_LICENSE_AREA_E: return tr("Europe, Oceania, Middle East, Russia", "License Area"); case CellSysutilLicenseArea::CELL_SYSUTIL_LICENSE_AREA_H: return tr("Southeast Asia", "License Area"); case CellSysutilLicenseArea::CELL_SYSUTIL_LICENSE_AREA_K: return tr("Korea", "License Area"); case CellSysutilLicenseArea::CELL_SYSUTIL_LICENSE_AREA_C: return tr("China", "License Area"); case CellSysutilLicenseArea::CELL_SYSUTIL_LICENSE_AREA_OTHER: return tr("Other", "License Area"); } break; case emu_settings_type::VulkanAsyncSchedulerDriver: switch (static_cast<vk_gpu_scheduler_mode>(index)) { case vk_gpu_scheduler_mode::safe: return tr("Safe", "Asynchronous Queue Scheduler"); case vk_gpu_scheduler_mode::fast: return tr("Fast", "Asynchronous Queue Scheduler"); } break; case emu_settings_type::Language: switch (static_cast<CellSysutilLang>(index)) { case CELL_SYSUTIL_LANG_JAPANESE: return tr("Japanese", "System Language"); case CELL_SYSUTIL_LANG_ENGLISH_US: return tr("English (US)", "System Language"); case CELL_SYSUTIL_LANG_FRENCH: return tr("French", "System Language"); case CELL_SYSUTIL_LANG_SPANISH: return tr("Spanish", "System Language"); case CELL_SYSUTIL_LANG_GERMAN: return tr("German", "System Language"); case CELL_SYSUTIL_LANG_ITALIAN: return tr("Italian", "System Language"); case CELL_SYSUTIL_LANG_DUTCH: return tr("Dutch", "System Language"); case CELL_SYSUTIL_LANG_PORTUGUESE_PT: return tr("Portuguese (Portugal)", "System Language"); case CELL_SYSUTIL_LANG_RUSSIAN: return tr("Russian", "System Language"); case CELL_SYSUTIL_LANG_KOREAN: return tr("Korean", "System Language"); case CELL_SYSUTIL_LANG_CHINESE_T: return tr("Chinese (Traditional)", "System Language"); case CELL_SYSUTIL_LANG_CHINESE_S: return tr("Chinese (Simplified)", "System Language"); case CELL_SYSUTIL_LANG_FINNISH: return tr("Finnish", "System Language"); case CELL_SYSUTIL_LANG_SWEDISH: return tr("Swedish", "System Language"); case CELL_SYSUTIL_LANG_DANISH: return tr("Danish", "System Language"); case CELL_SYSUTIL_LANG_NORWEGIAN: return tr("Norwegian", "System Language"); case CELL_SYSUTIL_LANG_POLISH: return tr("Polish", "System Language"); case CELL_SYSUTIL_LANG_ENGLISH_GB: return tr("English (UK)", "System Language"); case CELL_SYSUTIL_LANG_PORTUGUESE_BR: return tr("Portuguese (Brazil)", "System Language"); case CELL_SYSUTIL_LANG_TURKISH: return tr("Turkish", "System Language"); default: break; } break; case emu_settings_type::KeyboardType: switch (static_cast<CellKbMappingType>(index)) { case CELL_KB_MAPPING_101: return tr("English keyboard (US standard)", "Keyboard Type"); case CELL_KB_MAPPING_106: return tr("Japanese keyboard", "Keyboard Type"); case CELL_KB_MAPPING_106_KANA: return tr("Japanese keyboard (Kana state)", "Keyboard Type"); case CELL_KB_MAPPING_GERMAN_GERMANY: return tr("German keyboard", "Keyboard Type"); case CELL_KB_MAPPING_SPANISH_SPAIN: return tr("Spanish keyboard", "Keyboard Type"); case CELL_KB_MAPPING_FRENCH_FRANCE: return tr("French keyboard", "Keyboard Type"); case CELL_KB_MAPPING_ITALIAN_ITALY: return tr("Italian keyboard", "Keyboard Type"); case CELL_KB_MAPPING_DUTCH_NETHERLANDS: return tr("Dutch keyboard", "Keyboard Type"); case CELL_KB_MAPPING_PORTUGUESE_PORTUGAL: return tr("Portuguese keyboard (Portugal)", "Keyboard Type"); case CELL_KB_MAPPING_RUSSIAN_RUSSIA: return tr("Russian keyboard", "Keyboard Type"); case CELL_KB_MAPPING_ENGLISH_UK: return tr("English keyboard (UK standard)", "Keyboard Type"); case CELL_KB_MAPPING_KOREAN_KOREA: return tr("Korean keyboard", "Keyboard Type"); case CELL_KB_MAPPING_NORWEGIAN_NORWAY: return tr("Norwegian keyboard", "Keyboard Type"); case CELL_KB_MAPPING_FINNISH_FINLAND: return tr("Finnish keyboard", "Keyboard Type"); case CELL_KB_MAPPING_DANISH_DENMARK: return tr("Danish keyboard", "Keyboard Type"); case CELL_KB_MAPPING_SWEDISH_SWEDEN: return tr("Swedish keyboard", "Keyboard Type"); case CELL_KB_MAPPING_CHINESE_TRADITIONAL: return tr("Chinese keyboard (Traditional)", "Keyboard Type"); case CELL_KB_MAPPING_CHINESE_SIMPLIFIED: return tr("Chinese keyboard (Simplified)", "Keyboard Type"); case CELL_KB_MAPPING_SWISS_FRENCH_SWITZERLAND: return tr("French keyboard (Switzerland)", "Keyboard Type"); case CELL_KB_MAPPING_SWISS_GERMAN_SWITZERLAND: return tr("German keyboard (Switzerland)", "Keyboard Type"); case CELL_KB_MAPPING_CANADIAN_FRENCH_CANADA: return tr("French keyboard (Canada)", "Keyboard Type"); case CELL_KB_MAPPING_BELGIAN_BELGIUM: return tr("French keyboard (Belgium)", "Keyboard Type"); case CELL_KB_MAPPING_POLISH_POLAND: return tr("Polish keyboard", "Keyboard Type"); case CELL_KB_MAPPING_PORTUGUESE_BRAZIL: return tr("Portuguese keyboard (Brazil)", "Keyboard Type"); case CELL_KB_MAPPING_TURKISH_TURKEY: return tr("Turkish keyboard", "Keyboard Type"); } break; case emu_settings_type::ExclusiveFullscreenMode: switch (static_cast<vk_exclusive_fs_mode>(index)) { case vk_exclusive_fs_mode::unspecified: return tr("Automatic (Default)", "Exclusive Fullscreen Mode"); case vk_exclusive_fs_mode::disable: return tr("Prefer borderless fullscreen", "Exclusive Fullscreen Mode"); case vk_exclusive_fs_mode::enable: return tr("Prefer exclusive fullscreen", "Exclusive Fullscreen Mode"); } break; case emu_settings_type::StereoRenderMode: switch (static_cast<stereo_render_mode_options>(index)) { case stereo_render_mode_options::disabled: return tr("Disabled", "3D Display Mode"); case stereo_render_mode_options::side_by_side: return tr("Side-by-side", "3D Display Mode"); case stereo_render_mode_options::over_under: return tr("Over-under", "3D Display Mode"); case stereo_render_mode_options::interlaced: return tr("Interlaced", "3D Display Mode"); case stereo_render_mode_options::anaglyph_red_green: return tr("Anaglyph Red-Green", "3D Display Mode"); case stereo_render_mode_options::anaglyph_red_blue: return tr("Anaglyph Red-Blue", "3D Display Mode"); case stereo_render_mode_options::anaglyph_red_cyan: return tr("Anaglyph Red-Cyan", "3D Display Mode"); case stereo_render_mode_options::anaglyph_magenta_cyan: return tr("Anaglyph Magenta-Cyan", "3D Display Mode"); case stereo_render_mode_options::anaglyph_trioscopic: return tr("Anaglyph Green-Magenta (Trioscopic)", "3D Display Mode"); case stereo_render_mode_options::anaglyph_amber_blue: return tr("Anaglyph Amber-Blue (ColorCode 3D)", "3D Display Mode"); } break; case emu_settings_type::MidiDevices: switch (static_cast<midi_device_type>(index)) { case midi_device_type::guitar: return tr("Guitar (17 frets)", "Midi Device Type"); case midi_device_type::guitar_22fret: return tr("Guitar (22 frets)", "Midi Device Type"); case midi_device_type::keyboard: return tr("Keyboard", "Midi Device Type"); case midi_device_type::drums: return tr("Drums", "Midi Device Type"); } break; case emu_settings_type::XFloatAccuracy: switch (static_cast<xfloat_accuracy>(index)) { case xfloat_accuracy::accurate: return tr("Accurate XFloat"); case xfloat_accuracy::approximate: return tr("Approximate XFloat"); case xfloat_accuracy::relaxed: return tr("Relaxed XFloat"); case xfloat_accuracy::inaccurate: return tr("Inaccurate XFloat"); } break; default: break; } if (strict) { std::string type_string; if (const auto it = settings_location.find(type); it != settings_location.cend()) { for (const char* loc : it->second) { if (!type_string.empty()) type_string += ": "; type_string += loc; } } fmt::throw_exception("Missing translation for emu setting (original=%s, type='%s'=%d, index=%d)", original, type_string.empty() ? "?" : type_string, static_cast<int>(type), index); } return original; } std::string emu_settings::GetLocalizedSetting(const std::string& original, emu_settings_type type, int index, bool strict) const { return GetLocalizedSetting(QString::fromStdString(original), type, index, strict).toStdString(); } std::string emu_settings::GetLocalizedSetting(const cfg::_base* node, u32 index) const { const emu_settings_type type = FindSettingsType(node); const std::vector<std::string> settings = GetSettingOptions(type); return GetLocalizedSetting(::at32(settings, index), type, index, true); }
52,967
C++
.cpp
1,312
37.382622
186
0.709684
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,113
fatal_error_dialog.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/fatal_error_dialog.cpp
#include "fatal_error_dialog.h" #include <QLayout> #include <QTextDocument> #include <QIcon> const QString document_with_help_text = R"( <style> p {white-space: nowrap;} </style> <p> %1<br> %2<br> <a href='https://github.com/RPCS3/rpcs3/wiki/How-to-ask-for-Support'>https://github.com/RPCS3/rpcs3/wiki/How-to-ask-for-Support</a><br> %3<br> </p> )"; const QString document_without_help_text = R"( <style> p {white-space: nowrap;} </style> <p> %1<br> </p> )"; fatal_error_dialog::fatal_error_dialog(std::string_view text, bool is_html, bool include_help_text) : QMessageBox() { const QString qstr = QString::fromUtf8(text.data(), text.size()); const QString msg = is_html ? qstr : Qt::convertFromPlainText(qstr); QString document_body; if (include_help_text) [[likely]] { document_body = document_with_help_text .arg(msg) .arg(tr("HOW TO REPORT ERRORS:")) .arg(tr("Please, don't send incorrect reports. Thanks for understanding.")); } else { document_body = document_without_help_text.arg(msg); } #ifndef __APPLE__ setWindowIcon(QIcon(":/rpcs3.ico")); #endif setWindowTitle(tr("RPCS3: Fatal Error")); setIcon(QMessageBox::Icon::Critical); setTextFormat(Qt::TextFormat::RichText); setText(document_body); layout()->setSizeConstraint(QLayout::SetFixedSize); }
1,297
C++
.cpp
48
25.166667
136
0.721641
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,114
osk_dialog_frame.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/osk_dialog_frame.cpp
#include "osk_dialog_frame.h" #include "custom_dialog.h" #include "Emu/Cell/Modules/cellMsgDialog.h" #include "util/asm.hpp" #include <QDialogButtonBox> #include <QLabel> #include <QLineEdit> #include <QTextEdit> #include <QHBoxLayout> #include <QFormLayout> #include <QRegularExpressionValidator> osk_dialog_frame::~osk_dialog_frame() { if (m_dialog) { m_dialog->deleteLater(); } } void osk_dialog_frame::Create(const osk_params& params) { state = OskDialogState::Open; static const auto& lineEditWidth = []() {return QLabel("This is the very length of the lineedit due to hidpi reasons.").sizeHint().width(); }; if (m_dialog) { m_dialog->close(); delete m_dialog; } m_dialog = new custom_dialog(false); m_dialog->setModal(true); // Title m_dialog->setWindowTitle(QString::fromStdString(params.title)); // Message QLabel* message_label = new QLabel(QString::fromStdU16String(params.message)); // Text Input Counter const QString input_text = QString::fromStdU16String(std::u16string(params.init_text)); QLabel* input_count_label = new QLabel(QString("%1/%2").arg(input_text.length()).arg(params.charlimit)); // Button Layout QDialogButtonBox* button_box = new QDialogButtonBox(QDialogButtonBox::Ok); // Input Layout QHBoxLayout* inputLayout = new QHBoxLayout; inputLayout->setAlignment(Qt::AlignHCenter); // Text Input if (params.prohibit_flags & CELL_OSKDIALOG_NO_RETURN) { QLineEdit* input = new QLineEdit(m_dialog); input->setFixedWidth(lineEditWidth()); input->setMaxLength(params.charlimit); input->setText(input_text); input->setFocus(); if (params.panel_flag & CELL_OSKDIALOG_PANELMODE_PASSWORD) { input->setEchoMode(QLineEdit::Password); // Let's assume that games only use the password mode with single-line edit fields } if (params.prohibit_flags & CELL_OSKDIALOG_NO_SPACE) { input->setValidator(new QRegularExpressionValidator(QRegularExpression("^\\S*$"), this)); } connect(input, &QLineEdit::textChanged, input_count_label, [input_count_label, charlimit = params.charlimit, this](const QString& text) { input_count_label->setText(QString("%1/%2").arg(text.length()).arg(charlimit)); SetOskText(text); // if (on_osk_key_input_entered) on_osk_key_input_entered({}); // Not applicable }); connect(input, &QLineEdit::returnPressed, m_dialog, &QDialog::accept); inputLayout->addWidget(input); } else { QTextEdit* input = new QTextEdit(m_dialog); input->setFixedWidth(lineEditWidth()); input->setText(input_text); input->setFocus(); input->moveCursor(QTextCursor::End); m_text_old = input_text; connect(input, &QTextEdit::textChanged, [=, this]() { QString text = input->toPlainText(); if (text == m_text_old) { return; } QTextCursor cursor = input->textCursor(); const int cursor_pos_new = cursor.position(); const int cursor_pos_old = cursor_pos_new + m_text_old.length() - text.length(); // Reset to old state if character limit was reached if (m_text_old.length() >= static_cast<int>(params.charlimit) && text.length() > static_cast<int>(params.charlimit)) { input->blockSignals(true); input->setPlainText(m_text_old); cursor.setPosition(cursor_pos_old); input->setTextCursor(cursor); input->blockSignals(false); return; } int cursor_pos = cursor.position(); // Clear text of spaces if necessary if (params.prohibit_flags & CELL_OSKDIALOG_NO_SPACE) { int trim_len = text.length(); text.remove(QRegularExpression("\\s+")); trim_len -= text.length(); cursor_pos -= trim_len; } // Crop if more than one character was pasted and the character limit was exceeded text.chop(text.length() - params.charlimit); // Set new text and block signals to prevent infinite loop input->blockSignals(true); input->setPlainText(text); cursor.setPosition(cursor_pos); input->setTextCursor(cursor); input->blockSignals(false); m_text_old = text; input_count_label->setText(QString("%1/%2").arg(text.length()).arg(params.charlimit)); SetOskText(text); // if (on_osk_key_input_entered) on_osk_key_input_entered({}); // Not applicable }); inputLayout->addWidget(input); } inputLayout->addWidget(input_count_label); QFormLayout* layout = new QFormLayout(m_dialog); layout->setFormAlignment(Qt::AlignHCenter); layout->addRow(message_label); layout->addRow(inputLayout); layout->addWidget(button_box); m_dialog->setLayout(layout); // Events connect(button_box, &QDialogButtonBox::accepted, m_dialog, &QDialog::accept); connect(m_dialog, &QDialog::finished, [this](int result) { switch (result) { case QDialog::Accepted: on_osk_close(CELL_OSKDIALOG_CLOSE_CONFIRM); break; case QDialog::Rejected: on_osk_close(CELL_OSKDIALOG_CLOSE_CANCEL); break; default: on_osk_close(result); break; } }); // Fix size m_dialog->layout()->setSizeConstraint(QLayout::SetFixedSize); m_dialog->show(); } void osk_dialog_frame::SetOskText(const QString& text) { std::memcpy(osk_text.data(), utils::bless<char16_t>(text.constData()), std::min<usz>(osk_text.size(), text.size() + usz{1}) * sizeof(char16_t)); } void osk_dialog_frame::Close(s32 status) { if (m_dialog) { switch (status) { case CELL_OSKDIALOG_CLOSE_CONFIRM: m_dialog->done(QDialog::Accepted); break; case CELL_OSKDIALOG_CLOSE_CANCEL: m_dialog->done(QDialog::Rejected); break; default: m_dialog->done(status); break; } } } void osk_dialog_frame::Clear(bool clear_all_data) { if (m_dialog && clear_all_data) { SetOskText(""); } } void osk_dialog_frame::SetText(const std::u16string& text) { if (m_dialog) { SetOskText(QString::fromStdU16String(text)); } } void osk_dialog_frame::Insert(const std::u16string& text) { if (m_dialog) { // TODO: Correct position (will probably never get implemented because this dialog is just a fallback) SetOskText(QString::fromStdU16String(text)); } }
5,972
C++
.cpp
190
28.515789
145
0.721623
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,115
shortcut_handler.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/shortcut_handler.cpp
#include "stdafx.h" #include "shortcut_handler.h" #include "Emu/System.h" LOG_CHANNEL(shortcut_log, "Shortcuts"); shortcut_handler::shortcut_handler(gui::shortcuts::shortcut_handler_id handler_id, QObject* parent, const std::shared_ptr<gui_settings>& gui_settings) : QObject(parent), m_handler_id(handler_id), m_gui_settings(gui_settings) { // Initialize shortcuts shortcut_settings sc_settings{}; for (const auto& [shortcut_key, info] : sc_settings.shortcut_map) { // Skip shortcuts that weren't meant for this handler if (handler_id != info.handler_id) { continue; } const QKeySequence key_sequence = sc_settings.get_key_sequence(info, gui_settings); QShortcut* shortcut = new QShortcut(key_sequence, parent); shortcut->setAutoRepeat(false); shortcut_key_info key_info{}; key_info.shortcut = shortcut; key_info.info = info; key_info.key_sequence = key_sequence; m_shortcuts[shortcut_key] = key_info; connect(shortcut, &QShortcut::activated, this, [this, key = shortcut_key]() { handle_shortcut(key, m_shortcuts[key].key_sequence); }); connect(shortcut, &QShortcut::activatedAmbiguously, this, [this, key = shortcut_key]() { // TODO: do not allow same shortcuts and remove this connect // activatedAmbiguously will trigger if you have the same key sequence for several shortcuts const QKeySequence& key_sequence = m_shortcuts[key].key_sequence; shortcut_log.error("%s: Shortcut activated ambiguously: %s (%s)", m_handler_id, key, key_sequence.toString()); handle_shortcut(key, key_sequence); }); } } void shortcut_handler::update() { shortcut_log.notice("%s: Updating shortcuts", m_handler_id); shortcut_settings sc_settings{}; for (const auto& [shortcut_key, info] : sc_settings.shortcut_map) { // Skip shortcuts that weren't meant for this handler if (m_handler_id != info.handler_id || !m_shortcuts.contains(shortcut_key)) { continue; } const QKeySequence key_sequence = sc_settings.get_key_sequence(info, m_gui_settings); shortcut_key_info& key_info = m_shortcuts[shortcut_key]; key_info.key_sequence = key_sequence; if (key_info.shortcut) { key_info.shortcut->setKey(key_sequence); } } } void shortcut_handler::handle_shortcut(gui::shortcuts::shortcut shortcut_key, const QKeySequence& key_sequence) { shortcut_log.notice("%s: Shortcut pressed: %s (%s)", m_handler_id, shortcut_key, key_sequence.toString()); Q_EMIT shortcut_activated(shortcut_key, key_sequence); }
2,482
C++
.cpp
63
36.650794
150
0.738046
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,116
vfs_dialog_usb_tab.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/vfs_dialog_usb_tab.cpp
#include "vfs_dialog_usb_tab.h" #include "vfs_dialog_usb_input.h" #include "table_item_delegate.h" #include "Utilities/Config.h" #include <QVBoxLayout> #include <QHeaderView> #include <QScrollBar> #include <QMenu> #include <QMouseEvent> constexpr int max_usb_devices = 8; const auto get_device_info = [](const QString& device_name, const cfg::map_of_type<cfg::device_info>& device_map) -> cfg::device_info { if (auto it = device_map.find(device_name.toStdString()); it != device_map.cend()) { return it->second; } return {}; }; const auto get_device_name = [](int i) -> QString { return QString("/dev_usb00%0").arg(i); }; enum usb_column : int { usb_name = 0, usb_path = 1, usb_vid = 2, usb_pid = 3, usb_serial = 4 }; vfs_dialog_usb_tab::vfs_dialog_usb_tab(cfg::device_entry* cfg_node, std::shared_ptr<gui_settings> _gui_settings, QWidget* parent) : QWidget(parent), m_cfg_node(cfg_node), m_gui_settings(std::move(_gui_settings)) { m_usb_table = new QTableWidget(this); m_usb_table->setItemDelegate(new table_item_delegate(this, false)); m_usb_table->setShowGrid(false); m_usb_table->setSelectionBehavior(QAbstractItemView::SelectRows); m_usb_table->setEditTriggers(QAbstractItemView::NoEditTriggers); m_usb_table->setContextMenuPolicy(Qt::CustomContextMenu); m_usb_table->setVerticalScrollMode(QAbstractItemView::ScrollPerPixel); m_usb_table->setHorizontalScrollMode(QAbstractItemView::ScrollPerPixel); m_usb_table->verticalScrollBar()->setSingleStep(20); m_usb_table->horizontalScrollBar()->setSingleStep(10); m_usb_table->setColumnCount(5); m_usb_table->setHorizontalHeaderLabels(QStringList() << tr("Device") << tr("Path") << tr("Vendor ID") << tr("Product ID") << tr("Serial")); m_usb_table->horizontalHeader()->setSectionResizeMode(0, QHeaderView::Fixed); m_usb_table->horizontalHeader()->setStretchLastSection(true); m_usb_table->setRowCount(max_usb_devices); for (int i = 0; i < max_usb_devices; i++) { const QString device_name = get_device_name(i); const cfg::device_info info = get_device_info(device_name, m_cfg_node->get_map()); m_usb_table->setItem(i, usb_column::usb_name, new QTableWidgetItem(device_name)); m_usb_table->setItem(i, usb_column::usb_path, new QTableWidgetItem(QString::fromStdString(info.path))); m_usb_table->setItem(i, usb_column::usb_vid, new QTableWidgetItem(QString::fromStdString(info.vid))); m_usb_table->setItem(i, usb_column::usb_pid, new QTableWidgetItem(QString::fromStdString(info.pid))); m_usb_table->setItem(i, usb_column::usb_serial, new QTableWidgetItem(QString::fromStdString(info.serial))); } m_usb_table->resizeColumnsToContents(); connect(m_usb_table, &QTableWidget::customContextMenuRequested, this, &vfs_dialog_usb_tab::show_context_menu); connect(m_usb_table, &QTableWidget::itemDoubleClicked, this, &vfs_dialog_usb_tab::double_clicked_slot); QVBoxLayout* vbox = new QVBoxLayout; vbox->addWidget(m_usb_table); setLayout(vbox); } void vfs_dialog_usb_tab::set_settings() const { cfg::map_of_type<cfg::device_info> device_map{}; for (int i = 0; i < max_usb_devices; i++) { cfg::device_info info{}; info.path = m_usb_table->item(i, usb_column::usb_path)->text().toStdString(); info.vid = m_usb_table->item(i, usb_column::usb_vid)->text().toStdString(); info.pid = m_usb_table->item(i, usb_column::usb_pid)->text().toStdString(); info.serial = m_usb_table->item(i, usb_column::usb_serial)->text().toStdString(); device_map.emplace(get_device_name(i).toStdString(), std::move(info)); } m_cfg_node->set_map(std::move(device_map)); } void vfs_dialog_usb_tab::reset() const { for (int i = 0; i < max_usb_devices; i++) { const QString device_name = get_device_name(i); const cfg::device_info info = get_device_info(device_name, m_cfg_node->get_default()); m_usb_table->item(i, usb_column::usb_path)->setText(QString::fromStdString(info.path)); m_usb_table->item(i, usb_column::usb_vid)->setText(QString::fromStdString(info.vid)); m_usb_table->item(i, usb_column::usb_pid)->setText(QString::fromStdString(info.pid)); m_usb_table->item(i, usb_column::usb_serial)->setText(QString::fromStdString(info.serial)); } } void vfs_dialog_usb_tab::show_usb_input_dialog(int index) { if (index < 0 || index >= max_usb_devices) { return; } const QString device_name = get_device_name(index); const cfg::device_info default_info = get_device_info(device_name, m_cfg_node->get_default()); cfg::device_info info{}; info.path = m_usb_table->item(index, usb_column::usb_path)->text().toStdString(); info.vid = m_usb_table->item(index, usb_column::usb_vid)->text().toStdString(); info.pid = m_usb_table->item(index, usb_column::usb_pid)->text().toStdString(); info.serial = m_usb_table->item(index, usb_column::usb_serial)->text().toStdString(); vfs_dialog_usb_input* input_dialog = new vfs_dialog_usb_input(device_name, default_info, &info, m_gui_settings, this); if (input_dialog->exec() == QDialog::Accepted) { m_usb_table->item(index, usb_column::usb_path)->setText(QString::fromStdString(info.path)); m_usb_table->item(index, usb_column::usb_vid)->setText(QString::fromStdString(info.vid)); m_usb_table->item(index, usb_column::usb_pid)->setText(QString::fromStdString(info.pid)); m_usb_table->item(index, usb_column::usb_serial)->setText(QString::fromStdString(info.serial)); } input_dialog->deleteLater(); } void vfs_dialog_usb_tab::show_context_menu(const QPoint& pos) { const int row = m_usb_table->indexAt(pos).row(); if (row < 0 || row >= max_usb_devices) { return; } QMenu menu{}; QAction* edit = menu.addAction(tr("&Edit")); connect(edit, &QAction::triggered, this, [this, row]() { show_usb_input_dialog(row); }); menu.exec(m_usb_table->viewport()->mapToGlobal(pos)); } void vfs_dialog_usb_tab::double_clicked_slot(QTableWidgetItem* item) { if (!item) { return; } show_usb_input_dialog(item->row()); } void vfs_dialog_usb_tab::mouseDoubleClickEvent(QMouseEvent* ev) { if (!ev) return; // Qt's itemDoubleClicked signal doesn't distinguish between mouse buttons and there is no simple way to get the pressed button. // So we have to ignore this event when another button is pressed. if (ev->button() != Qt::LeftButton) { ev->ignore(); return; } QWidget::mouseDoubleClickEvent(ev); }
6,318
C++
.cpp
149
40.241611
140
0.722412
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,117
debugger_frame.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/debugger_frame.cpp
#include "debugger_frame.h" #include "register_editor_dialog.h" #include "instruction_editor_dialog.h" #include "memory_viewer_panel.h" #include "elf_memory_dumping_dialog.h" #include "gui_settings.h" #include "debugger_list.h" #include "breakpoint_list.h" #include "breakpoint_handler.h" #include "call_stack_list.h" #include "input_dialog.h" #include "qt_utils.h" #include "Emu/System.h" #include "Emu/IdManager.h" #include "Emu/RSX/RSXThread.h" #include "Emu/RSX/RSXDisAsm.h" #include "Emu/Cell/PPUAnalyser.h" #include "Emu/Cell/PPUDisAsm.h" #include "Emu/Cell/PPUThread.h" #include "Emu/Cell/SPUDisAsm.h" #include "Emu/Cell/SPUThread.h" #include "Emu/CPU/CPUThread.h" #include "Emu/CPU/CPUDisAsm.h" #include <QKeyEvent> #include <QScrollBar> #include <QFontDatabase> #include <QCompleter> #include <QVBoxLayout> #include <QTimer> #include <QCheckBox> #include <QMessageBox> #include <algorithm> #include <functional> #include "util/asm.hpp" constexpr auto qstr = QString::fromStdString; constexpr auto s_pause_flags = cpu_flag::dbg_pause + cpu_flag::dbg_global_pause; extern atomic_t<bool> g_debugger_pause_all_threads_on_bp; extern const ppu_decoder<ppu_itype> g_ppu_itype; extern bool is_using_interpreter(thread_class t_class) { switch (t_class) { case thread_class::ppu: return g_cfg.core.ppu_decoder != ppu_decoder_type::llvm; case thread_class::spu: return g_cfg.core.spu_decoder != spu_decoder_type::asmjit && g_cfg.core.spu_decoder != spu_decoder_type::llvm; default: return true; } } extern std::shared_ptr<CPUDisAsm> make_disasm(const cpu_thread* cpu, std::shared_ptr<cpu_thread> handle) { if (!handle) { switch (cpu->get_class()) { case thread_class::ppu: handle = idm::get<named_thread<ppu_thread>>(cpu->id); break; case thread_class::spu: handle = idm::get<named_thread<spu_thread>>(cpu->id); break; default: break; } } std::shared_ptr<CPUDisAsm> result; switch (cpu->get_class()) { case thread_class::ppu: result = std::make_shared<PPUDisAsm>(cpu_disasm_mode::interpreter, vm::g_sudo_addr); break; case thread_class::spu: result = std::make_shared<SPUDisAsm>(cpu_disasm_mode::interpreter, static_cast<const spu_thread*>(cpu)->ls); break; case thread_class::rsx: result = std::make_shared<RSXDisAsm>(cpu_disasm_mode::interpreter, vm::g_sudo_addr, 0, cpu); break; default: return result; } result->set_cpu_handle(std::move(handle)); return result; } debugger_frame::debugger_frame(std::shared_ptr<gui_settings> gui_settings, QWidget *parent) : custom_dock_widget(tr("Debugger [Press F1 for Help]"), parent) , m_gui_settings(std::move(gui_settings)) { setContentsMargins(0, 0, 0, 0); m_update = new QTimer(this); connect(m_update, &QTimer::timeout, this, &debugger_frame::UpdateUI); m_mono = QFontDatabase::systemFont(QFontDatabase::FixedFont); m_mono.setPointSize(9); QVBoxLayout* vbox_p_main = new QVBoxLayout(); vbox_p_main->setContentsMargins(5, 5, 5, 5); QHBoxLayout* hbox_b_main = new QHBoxLayout(); hbox_b_main->setContentsMargins(0, 0, 0, 0); m_ppu_breakpoint_handler = new breakpoint_handler(); m_breakpoint_list = new breakpoint_list(this, m_ppu_breakpoint_handler); m_debugger_list = new debugger_list(this, m_gui_settings, m_ppu_breakpoint_handler); m_debugger_list->installEventFilter(this); m_call_stack_list = new call_stack_list(this); m_choice_units = new QComboBox(this); m_choice_units->setSizeAdjustPolicy(QComboBox::AdjustToContents); m_choice_units->setMaxVisibleItems(30); m_choice_units->setMaximumWidth(500); m_choice_units->setEditable(true); m_choice_units->setInsertPolicy(QComboBox::NoInsert); m_choice_units->lineEdit()->setPlaceholderText(tr("Choose a thread")); m_choice_units->completer()->setCompletionMode(QCompleter::PopupCompletion); m_choice_units->completer()->setMaxVisibleItems(30); m_choice_units->completer()->setFilterMode(Qt::MatchContains); m_choice_units->installEventFilter(this); m_go_to_addr = new QPushButton(tr("Go To Address"), this); m_go_to_pc = new QPushButton(tr("Go To PC"), this); m_btn_step = new QPushButton(tr("Step"), this); m_btn_step_over = new QPushButton(tr("Step Over"), this); m_btn_run = new QPushButton(RunString, this); EnableButtons(false); ChangeColors(); hbox_b_main->addWidget(m_go_to_addr); hbox_b_main->addWidget(m_go_to_pc); hbox_b_main->addWidget(m_btn_step); hbox_b_main->addWidget(m_btn_step_over); hbox_b_main->addWidget(m_btn_run); hbox_b_main->addWidget(m_choice_units); hbox_b_main->addStretch(); // Misc state m_misc_state = new QPlainTextEdit(this); m_misc_state->setLineWrapMode(QPlainTextEdit::NoWrap); m_misc_state->setTextInteractionFlags(Qt::TextSelectableByMouse | Qt::TextSelectableByKeyboard); // Registers m_regs = new QPlainTextEdit(this); m_regs->setLineWrapMode(QPlainTextEdit::NoWrap); m_regs->setTextInteractionFlags(Qt::TextSelectableByMouse | Qt::TextSelectableByKeyboard); m_debugger_list->setFont(m_mono); m_misc_state->setFont(m_mono); m_regs->setFont(m_mono); m_call_stack_list->setFont(m_mono); m_right_splitter = new QSplitter(this); m_right_splitter->setOrientation(Qt::Vertical); m_right_splitter->addWidget(m_misc_state); m_right_splitter->addWidget(m_regs); m_right_splitter->addWidget(m_call_stack_list); m_right_splitter->addWidget(m_breakpoint_list); // Set relative sizes for widgets m_right_splitter->setStretchFactor(0, 2); // misc state m_right_splitter->setStretchFactor(1, 8); // registers m_right_splitter->setStretchFactor(2, 3); // call stack m_right_splitter->setStretchFactor(3, 1); // breakpoint list m_splitter = new QSplitter(this); m_splitter->addWidget(m_debugger_list); m_splitter->addWidget(m_right_splitter); QHBoxLayout* hbox_w_list = new QHBoxLayout(); hbox_w_list->addWidget(m_splitter); vbox_p_main->addLayout(hbox_b_main); vbox_p_main->addLayout(hbox_w_list); QWidget* body = new QWidget(this); body->setLayout(vbox_p_main); setWidget(body); connect(m_go_to_addr, &QAbstractButton::clicked, this, &debugger_frame::ShowGotoAddressDialog); connect(m_go_to_pc, &QAbstractButton::clicked, this, [this]() { ShowPC(true); }); connect(m_btn_step, &QAbstractButton::clicked, this, &debugger_frame::DoStep); connect(m_btn_step_over, &QAbstractButton::clicked, [this]() { DoStep(true); }); connect(m_btn_run, &QAbstractButton::clicked, this, &debugger_frame::RunBtnPress); connect(m_choice_units->lineEdit(), &QLineEdit::editingFinished, [&] { m_choice_units->clearFocus(); }); connect(m_choice_units, QOverload<int>::of(&QComboBox::currentIndexChanged), this, [&](){ m_is_spu_disasm_mode = false; OnSelectUnit(); }); connect(this, &QDockWidget::visibilityChanged, this, &debugger_frame::EnableUpdateTimer); connect(m_debugger_list, &debugger_list::BreakpointRequested, m_breakpoint_list, &breakpoint_list::HandleBreakpointRequest); connect(m_breakpoint_list, &breakpoint_list::RequestShowAddress, m_debugger_list, &debugger_list::ShowAddress); connect(this, &debugger_frame::CallStackUpdateRequested, m_call_stack_list, &call_stack_list::HandleUpdate); connect(m_call_stack_list, &call_stack_list::RequestShowAddress, m_debugger_list, &debugger_list::ShowAddress); m_debugger_list->RefreshView(); m_choice_units->clear(); m_choice_units->addItem(NoThreadString); } void debugger_frame::SaveSettings() const { m_gui_settings->SetValue(gui::d_splitterState, m_splitter->saveState()); } void debugger_frame::ChangeColors() const { if (m_debugger_list) { const QColor color = gui::utils::get_foreground_color(); m_debugger_list->m_color_bp = m_breakpoint_list->m_color_bp = gui::utils::get_label_color("debugger_frame_breakpoint", Qt::yellow, Qt::darkYellow, QPalette::Window); m_debugger_list->m_color_pc = gui::utils::get_label_color("debugger_frame_pc", Qt::green, Qt::darkGreen, QPalette::Window); m_debugger_list->m_text_color_bp = m_breakpoint_list->m_text_color_bp = gui::utils::get_label_color("debugger_frame_breakpoint", color, color); m_debugger_list->m_text_color_pc = gui::utils::get_label_color("debugger_frame_pc", color, color); } } bool debugger_frame::eventFilter(QObject* object, QEvent* event) { // There's no overlap between keys so returning true wouldn't matter. if (object == m_debugger_list && event->type() == QEvent::KeyPress) { keyPressEvent(static_cast<QKeyEvent*>(event)); event->accept(); // Restore accepted state return false; } if (object == m_choice_units && event->type() == QEvent::FocusOut) { if (int index = m_choice_units->currentIndex(); index >= 0) { // Restore item text automatically on focus-out after search m_choice_units->setCurrentText(m_choice_units->itemText(index)); } } return false; } void debugger_frame::closeEvent(QCloseEvent* event) { SaveSettings(); QDockWidget::closeEvent(event); Q_EMIT DebugFrameClosed(); } void debugger_frame::showEvent(QShowEvent* event) { // resize splitter widgets if (!m_splitter->restoreState(m_gui_settings->GetValue(gui::d_splitterState).toByteArray())) { const int width_right = width() / 3; const int width_left = width() - width_right; m_splitter->setSizes({width_left, width_right}); } QDockWidget::showEvent(event); } void debugger_frame::hideEvent(QHideEvent* event) { // save splitter state or it will resume its initial state on next show m_gui_settings->SetValue(gui::d_splitterState, m_splitter->saveState()); QDockWidget::hideEvent(event); } void debugger_frame::open_breakpoints_settings() { QDialog* dlg = new QDialog(this); dlg->setWindowTitle(tr("Breakpoint Settings")); dlg->setModal(true); QCheckBox* check_box = new QCheckBox(tr("Pause All Threads On Hit"), dlg); check_box->setCheckable(true); check_box->setChecked(g_debugger_pause_all_threads_on_bp.load()); check_box->setToolTip(tr("When set: a breakpoint hit will pause the emulation instead of the current thread." "\nApplies on all breakpoints in all threads regardless if set before or after changing this setting.")); connect(check_box, &QCheckBox::clicked, dlg, [](bool checked) { g_debugger_pause_all_threads_on_bp = checked; }); QPushButton* button_ok = new QPushButton(tr("OK"), dlg); connect(button_ok, &QAbstractButton::clicked, dlg, &QDialog::accept); QHBoxLayout* hbox_layout = new QHBoxLayout(dlg); hbox_layout->addWidget(check_box); hbox_layout->addWidget(button_ok); dlg->setLayout(hbox_layout); dlg->setAttribute(Qt::WA_DeleteOnClose); dlg->open(); } void debugger_frame::keyPressEvent(QKeyEvent* event) { if (!isActiveWindow()) { event->ignore(); return; } const auto cpu = get_cpu(); const int row = m_debugger_list->currentRow(); switch (event->key()) { case Qt::Key_F1: { if (event->isAutoRepeat()) { event->ignore(); return; } QDialog* dlg = new QDialog(this); dlg->setAttribute(Qt::WA_DeleteOnClose); dlg->setWindowTitle(tr("Debugger Guide & Shortcuts")); QLabel* l = new QLabel(tr( "Keys Ctrl+G: Go to typed address." "\nKeys Ctrl+B: Open breakpoints settings." "\nKeys Ctrl+C: Copy instruction contents." "\nKeys Ctrl+F: Find thread." "\nKeys Alt+S: Capture SPU images of selected SPU or generalized form when used from PPU." "\nKeys Alt+S: Launch a memory viewer pointed to the current RSX semaphores location when used from RSX." "\nKeys Alt+R: Load last saved SPU state capture." "\nKeys Alt+F5: Show the SPU disassmebler dialog." "\nKey D: SPU MFC commands logger, MFC debug setting must be enabled." "\nKey D: Also PPU calling history logger, interpreter and non-zero call history size must be used." "\nKey E: Instruction Editor: click on the instruction you want to modify, then press E." "\nKey F: Dedicated floating point mode switch for SPU threads." "\nKey R: Registers Editor for selected thread." "\nKey N: Show next instruction the thread will execute after marked instruction, does nothing if target is not predictable." "\nKey M: Show the Memory Viewer with initial address pointing to the marked instruction." "\nKey I: Show RSX method detail." "\nKey F10: Perform step-over on instructions. (skip function calls)" "\nKey F11: Perform single-stepping on instructions." "\nKey F1: Show this help dialog." "\nKey Up: Scroll one instruction upwards. (address is decremented)" "\nKey Down: Scroll one instruction downwards. (address is incremented)" "\nKey Page-Up: Scroll upwards with steps count equal to the viewed instruction count." "\nKey Page-Down: Scroll downwards with steps count equal to the viewed instruction count." "\nDouble-click: Set breakpoints.")); gui::utils::set_font_size(*l, 9); QVBoxLayout* layout = new QVBoxLayout(); layout->addWidget(l); dlg->setLayout(layout); dlg->setFixedSize(dlg->sizeHint()); dlg->move(QCursor::pos()); dlg->open(); return; } default: break; } if (event->modifiers() == Qt::ControlModifier) { switch (const auto key = event->key()) { case Qt::Key_PageUp: case Qt::Key_PageDown: { if (event->isAutoRepeat()) { event->ignore(); break; } const int count = m_choice_units->count(); const int cur_index = m_choice_units->currentIndex(); if (count && cur_index >= 0) { // Wrap around // Adding count so the result would not be negative, that would alter the remainder operation m_choice_units->setCurrentIndex((cur_index + count + (key == Qt::Key_PageUp ? -1 : 1)) % count); } return; } case Qt::Key_F: { m_choice_units->clearEditText(); m_choice_units->setFocus(); return; } default: break; } } if (!cpu) { event->ignore(); return; } const u32 address_limits = (cpu->get_class() == thread_class::spu ? 0x3fffc : ~3); const u32 pc = (m_debugger_list->m_pc & address_limits); const u32 selected = (m_debugger_list->m_showing_selected_instruction ? m_debugger_list->m_selected_instruction : cpu->get_pc()) & address_limits; const auto modifiers = event->modifiers(); if (modifiers == Qt::ControlModifier) { if (event->isAutoRepeat()) { event->ignore(); return; } switch (event->key()) { case Qt::Key_G: { ShowGotoAddressDialog(); return; } case Qt::Key_B: { open_breakpoints_settings(); return; } default: break; } } else { switch (event->key()) { case Qt::Key_D: { if (event->isAutoRepeat()) { break; } auto get_max_allowed = [&](QString title, QString description, u32 limit) -> u32 { input_dialog dlg(4, "", title, description.arg(limit), QString::number(limit), this); QFont mono = QFontDatabase::systemFont(QFontDatabase::FixedFont); mono.setPointSize(8); dlg.set_input_font(mono, false); dlg.set_clear_button_enabled(false); dlg.set_button_enabled(QDialogButtonBox::StandardButton::Ok, false); dlg.set_validator(new QRegularExpressionValidator(QRegularExpression("^[1-9][0-9]*$"), &dlg)); u32 max = 0; connect(&dlg, &input_dialog::text_changed, [&](const QString& changed) { bool ok = false; const u32 dummy = changed.toUInt(&ok, 10); ok = ok && dummy && dummy <= limit; dlg.set_button_enabled(QDialogButtonBox::StandardButton::Ok, ok); if (ok) { max = dummy; } }); if (dlg.exec() != QDialog::Accepted) { max = 0; } return max; }; auto copy_overlapping_list = [&] <typename T> (u64& index, u64 max, const std::vector<T>& in, std::vector<T>& out, bool& emptied) { max = std::min<u64>(max, in.size()); const u64 current_pos = index % in.size(); const u64 last_elements = std::min<u64>(current_pos, max); const u64 overlapped_old_elements = std::min<u64>(index, max) - last_elements; out.resize(overlapped_old_elements + last_elements); // Save list contents (only the relavant parts) std::copy(in.end() - overlapped_old_elements, in.end(), out.begin()); std::copy_n(in.begin() + current_pos - last_elements, last_elements, out.begin() + overlapped_old_elements); // Check if max elements to log is larger/equal to current list size if ((emptied = index && max >= index)) { // Empty list when possible (further calls' history logging will not log any call before this) index = 0; } }; if (cpu->get_class() == thread_class::spu && g_cfg.core.mfc_debug) { const u32 max = get_max_allowed(tr("Max MFC cmds logged"), tr("Decimal only, max allowed is %0."), spu_thread::max_mfc_dump_idx); // Preallocate in order to save execution time when inside suspend_all. std::vector<mfc_cmd_dump> copy(max); bool emptied = false; cpu_thread::suspend_all(nullptr, {}, [&] { const auto spu = static_cast<spu_thread*>(cpu); copy_overlapping_list(spu->mfc_dump_idx, max, spu->mfc_history, copy, emptied); }); std::string ret; u32 i = 0; for (auto it = copy.rbegin(); it != copy.rend(); it++, i++) { auto& dump = *it; const u32 pc = std::exchange(dump.cmd.eah, 0); fmt::append(ret, "\n(%d) PC 0x%05x: [%s] (%s)", i, pc, dump.cmd, spu_block_hash{dump.block_hash}); if (dump.cmd.cmd == MFC_PUTLLC_CMD) { fmt::append(ret, " %s", dump.cmd.tag == MFC_PUTLLC_SUCCESS ? "(passed)" : "(failed)"); } auto load = [&](usz index) { be_t<u32> data{}; std::memcpy(&data, dump.data + index * sizeof(data), sizeof(data)); return data; }; for (usz i = 0; i < utils::aligned_div(std::min<u32>(dump.cmd.size, 128), 4); i += 4) { fmt::append(ret, "\n[0x%02x] %08x %08x %08x %08x", i * sizeof(be_t<u32>) , load(i + 0), load(i + 1), load(i + 2), load(i + 3)); } } if (ret.empty()) { ret = "No MFC commands have been logged"; } if (emptied) { ret += "\nPrevious MFC history has been emptied!"; } spu_log.success("SPU MFC dump of '%s': %s", cpu->get_name(), ret); } else if (cpu->get_class() == thread_class::ppu && g_cfg.core.ppu_call_history) { const u32 max = get_max_allowed(tr("Max PPU calls logged"), tr("Decimal only, max allowed is %0."), ppu_thread::call_history_max_size); // Preallocate in order to save execution time when inside suspend_all. std::vector<u32> copy(max); std::vector<typename ppu_thread::syscall_history_t::entry_t> sys_copy(ppu_thread::syscall_history_max_size); std::array<bool, 2> emptied{}; cpu_thread::suspend_all(nullptr, {}, [&] { auto& list = static_cast<ppu_thread*>(cpu)->call_history; auto& sys_list = static_cast<ppu_thread*>(cpu)->syscall_history; copy_overlapping_list(list.index, max, list.data, copy, emptied[0]); copy_overlapping_list(sys_list.index, max, sys_list.data, sys_copy, emptied[1]); }); std::string ret; PPUDisAsm dis_asm(cpu_disasm_mode::normal, vm::g_sudo_addr); u32 i = 0; for (auto it = copy.rbegin(); it != copy.rend(); it++, i++) { dis_asm.disasm(*it); fmt::append(ret, "\n(%u) 0x%08x: %s", i, *it, dis_asm.last_opcode); } i = 0; for (auto it = sys_copy.rbegin(); it != sys_copy.rend(); it++, i++) { fmt::append(ret, "\n(%u) 0x%08x: %s, 0x%x, r3=0x%x, r4=0x%x, r5=0x%x, r6=0x%x", i, it->cia, it->func_name, it->error, it->args[0], it->args[1], it->args[2], it->args[3]); } if (ret.empty()) { ret = "No PPU calls have been logged"; } if (emptied[0]) { ret += "\nPrevious call history has been emptied!"; } if (emptied[1]) { ret += "\nPrevious HLE call history has been emptied!"; } ppu_log.success("PPU calling history dump of '%s': %s", cpu->get_name(), ret); } return; } case Qt::Key_E: { if (event->isAutoRepeat()) { break; } if (cpu->get_class() == thread_class::ppu || cpu->get_class() == thread_class::spu) { if (!m_inst_editor) { m_inst_editor = new instruction_editor_dialog(this, selected, m_disasm.get(), make_check_cpu(cpu)); connect(m_inst_editor, &QDialog::finished, this, [this]() { m_inst_editor = nullptr; }); m_inst_editor->show(); } } return; } case Qt::Key_F: { if (event->isAutoRepeat()) { break; } if (cpu->get_class() == thread_class::ppu) { static_cast<ppu_thread*>(cpu)->debugger_mode.atomic_op([](ppu_debugger_mode& mode) { mode = static_cast<ppu_debugger_mode>((static_cast<u32>(mode) + 1) % static_cast<u32>(ppu_debugger_mode::max_mode)); }); return; } if (cpu->get_class() == thread_class::spu) { static_cast<spu_thread*>(cpu)->debugger_mode.atomic_op([](spu_debugger_mode& mode) { mode = static_cast<spu_debugger_mode>((static_cast<u32>(mode) + 1) % static_cast<u32>(spu_debugger_mode::max_mode)); }); return; } break; } case Qt::Key_R: { if (event->isAutoRepeat()) { break; } if (cpu->get_class() == thread_class::ppu || cpu->get_class() == thread_class::spu) { if (cpu->get_class() == thread_class::spu && modifiers & Qt::AltModifier) { static_cast<spu_thread*>(cpu)->try_load_debug_capture(); return; } if (!m_reg_editor) { m_reg_editor = new register_editor_dialog(this, m_disasm.get(), make_check_cpu(cpu)); connect(m_reg_editor, &QDialog::finished, this, [this]() { m_reg_editor = nullptr; }); m_reg_editor->show(); } } return; } case Qt::Key_S: { if (event->isAutoRepeat()) { break; } if (modifiers & Qt::AltModifier) { if (cpu->get_class() == thread_class::rsx) { if (u32 addr = static_cast<rsx::thread*>(cpu)->label_addr) { // Memory viewer pointing to RSX semaphores idm::make<memory_viewer_handle>(this, m_disasm, addr, make_check_cpu(nullptr)); } return; } if (cpu->get_class() == thread_class::ppu) { new elf_memory_dumping_dialog(pc, m_gui_settings, this); return; } if (cpu->get_class() != thread_class::spu) { return; } if (!cpu->state.all_of(cpu_flag::wait + cpu_flag::dbg_pause)) { QMessageBox::warning(this, QObject::tr("Pause the SPU Thread!"), QObject::tr("Cannot perform SPU capture due to the thread needing manual pausing!")); return; } static_cast<spu_thread*>(cpu)->capture_state(); return; } break; } case Qt::Key_N: { // Next instruction according to code flow // Known branch targets are selected over next PC for conditional branches // Indirect branches (unknown targets, such as function return) do not proceed to any instruction std::array<u32, 2> res{umax, umax}; const u32 selected = (m_debugger_list->m_showing_selected_instruction ? m_debugger_list->m_selected_instruction : cpu->get_pc()) & address_limits; switch (cpu->get_class()) { case thread_class::spu: { res = op_branch_targets(selected, spu_opcode_t{static_cast<spu_thread*>(cpu)->_ref<u32>(selected)}); break; } case thread_class::ppu: { be_t<ppu_opcode_t> op{}; if (vm::check_addr(selected, vm::page_executable) && vm::try_access(selected, &op, 4, false)) res = op_branch_targets(selected, op); break; } default: break; } if (auto it = std::find_if(res.rbegin(), res.rend(), FN(x != umax)); it != res.rend()) m_debugger_list->ShowAddress(*it - std::max(row, 0) * 4, true); return; } case Qt::Key_M: { if (event->isAutoRepeat()) { break; } if (m_disasm && cpu->get_class() == thread_class::spu) { // Save shared pointer to shared memory handle, ensure the destructor will not be called until the SPUDisAsm is destroyed static_cast<SPUDisAsm*>(m_disasm.get())->set_shm(static_cast<const spu_thread*>(cpu)->shm); } // Memory viewer idm::make<memory_viewer_handle>(this, m_disasm, pc, make_check_cpu(cpu)); return; } case Qt::Key_F10: { DoStep(true); return; } case Qt::Key_F11: { DoStep(false); return; } case Qt::Key_F5: { if (modifiers & Qt::AltModifier) { OnSelectSPUDisassembler(); return; } break; } default: break; } } event->ignore(); } cpu_thread* debugger_frame::get_cpu() { if (m_emu_state == system_state::stopped) { m_rsx = nullptr; m_cpu.reset(); return nullptr; } // Wait flag is raised by the thread itself, acknowledging exit if (m_cpu) { if (m_cpu->state.all_of(cpu_flag::wait + cpu_flag::exit)) { m_cpu.reset(); return nullptr; } return m_cpu.get(); } // m_rsx is raw pointer, when emulation is stopped it won't be cleared // Therefore need to do invalidation checks manually if (m_rsx) { if (g_fxo->try_get<rsx::thread>() != m_rsx || !m_rsx->ctrl || m_rsx->state.all_of(cpu_flag::wait + cpu_flag::exit)) { m_rsx = nullptr; return nullptr; } } return m_rsx; } std::function<cpu_thread*()> debugger_frame::make_check_cpu(cpu_thread* cpu, bool unlocked) { constexpr cpu_thread* null_cpu = nullptr; if (Emu.IsStopped()) { return []() { return null_cpu; }; } const auto type = cpu ? cpu->get_class() : thread_class::general; std::shared_ptr<cpu_thread> shared; if (g_fxo->is_init<id_manager::id_map<named_thread<ppu_thread>>>() && g_fxo->is_init<id_manager::id_map<named_thread<spu_thread>>>()) { if (unlocked) { if (type == thread_class::ppu) { shared = idm::get_unlocked<named_thread<ppu_thread>>(cpu->id); } else if (type == thread_class::spu) { shared = idm::get_unlocked<named_thread<spu_thread>>(cpu->id); } } else { if (type == thread_class::ppu) { shared = idm::get<named_thread<ppu_thread>>(cpu->id); } else if (type == thread_class::spu) { shared = idm::get<named_thread<spu_thread>>(cpu->id); } } } if (type == thread_class::rsx) { if (g_fxo->try_get<rsx::thread>() != cpu) { return []() { return null_cpu; }; } } else if (!shared || shared.get() != cpu) { return []() { return null_cpu; }; } return [cpu, type, shared = std::move(shared), emulation_id = Emu.GetEmulationIdentifier()]() mutable -> cpu_thread* { if (emulation_id != Emu.GetEmulationIdentifier() || Emu.IsStopped()) { // Invalidate all data after Emu.Kill() shared.reset(); cpu = nullptr; return nullptr; } if (type == thread_class::ppu || type == thread_class::spu) { // SPU and PPU if (!shared || shared->state.all_of(cpu_flag::exit + cpu_flag::wait)) { shared.reset(); return nullptr; } return shared.get(); } // RSX const auto rsx = g_fxo->try_get<rsx::thread>(); if (cpu) { if (rsx != cpu || !rsx->ctrl || rsx->state.all_of(cpu_flag::wait + cpu_flag::exit)) { cpu = nullptr; return nullptr; } } return cpu; }; } void debugger_frame::UpdateUI() { const auto cpu = get_cpu(); // Refresh at a high rate during initialization (looks weird otherwise) if (m_ui_update_ctr % (cpu || m_ui_update_ctr < 200 || m_debugger_list->m_dirty_flag ? 5 : 50) == 0) { // If no change to instruction position happened, update instruction list at 20hz ShowPC(); } if (m_ui_update_ctr % 20 == 0 && !m_thread_list_pending_update) { // Update threads list at 5hz (low priority) UpdateUnitList(); } if (!cpu) { if (m_last_pc != umax || !m_last_query_state.empty()) { if (m_ui_update_ctr % 20 && !m_thread_list_pending_update) { // Update threads list (thread exited) UpdateUnitList(); } ShowPC(); m_last_query_state.clear(); m_last_pc = -1; DoUpdate(); } } else if (m_ui_update_ctr % 5 == 0 || m_ui_update_ctr < m_ui_fast_update_permission_deadline) { const auto cia = cpu->get_pc(); const auto size_context = cpu->get_class() == thread_class::ppu ? sizeof(ppu_thread) : cpu->get_class() == thread_class::spu ? sizeof(spu_thread) : sizeof(cpu_thread); if (m_last_pc != cia || m_last_query_state.size() != size_context || std::memcmp(m_last_query_state.data(), static_cast<void *>(cpu), size_context)) { // Copy thread data m_last_query_state.resize(size_context); std::memcpy(m_last_query_state.data(), static_cast<void *>(cpu), size_context); m_last_pc = cia; DoUpdate(); const bool paused = !!(cpu->state & s_pause_flags); if (paused) { m_btn_run->setText(RunString); } else { m_btn_run->setText(PauseString); } if (m_ui_update_ctr % 5) { // Call if it hasn't been called before ShowPC(); } if (is_using_interpreter(cpu->get_class())) { m_btn_step->setEnabled(paused); m_btn_step_over->setEnabled(paused); } } } m_ui_update_ctr++; } void debugger_frame::UpdateUnitList() { const u64 emulation_id = static_cast<std::underlying_type_t<Emulator::stop_counter_t>>(Emu.GetEmulationIdentifier()); const u64 threads_created = cpu_thread::g_threads_created; const u64 threads_deleted = cpu_thread::g_threads_deleted; const system_state emu_state = Emu.GetStatus(); std::unique_lock<shared_mutex> lock{id_manager::g_mutex, std::defer_lock}; if (emulation_id == m_emulation_id && threads_created == m_threads_created && threads_deleted == m_threads_deleted && emu_state == m_emu_state) { // Nothing to do m_thread_list_pending_update = false; return; } const cpu_thread* old_cpu_ptr = get_cpu(); if (!lock.try_lock()) { m_thread_list_pending_update = true; QTimer::singleShot(5, [this]() { UpdateUnitList(); }); return; } std::vector<std::pair<QString, std::function<cpu_thread*()>>> cpu_list; cpu_list.reserve(threads_created >= threads_deleted ? 0 : threads_created - threads_deleted); usz reselected_index = umax; const auto on_select = [&](u32 id, cpu_thread& cpu) { std::function<cpu_thread*()> func_cpu = make_check_cpu(std::addressof(cpu), true); // Space at the end is to pad a gap on the right cpu_list.emplace_back(qstr((id >> 24 == 0x55 ? "RSX[0x55555555]" : cpu.get_name()) + ' '), std::move(func_cpu)); if (old_cpu_ptr == std::addressof(cpu)) { reselected_index = cpu_list.size() - 1; } }; if (emu_state != system_state::stopped) { idm::select<named_thread<ppu_thread>>(on_select, idm::unlocked); idm::select<named_thread<spu_thread>>(on_select, idm::unlocked); if (const auto render = g_fxo->try_get<rsx::thread>(); render && render->ctrl) { on_select(render->id, *render); } } lock.unlock(); m_emulation_id = emulation_id; m_threads_created = threads_created; m_threads_deleted = threads_deleted; m_emu_state = emu_state; m_thread_list_pending_update = false; { const QSignalBlocker blocker(m_choice_units); m_threads_info.clear(); m_choice_units->clear(); m_threads_info.emplace_back(make_check_cpu(nullptr)); m_choice_units->addItem(NoThreadString); for (auto&& [thread_name, func_cpu] : cpu_list) { m_threads_info.emplace_back(std::move(func_cpu)); m_choice_units->addItem(std::move(thread_name)); } if (reselected_index != umax) { // Include no-thread at index 0 m_choice_units->setCurrentIndex(::narrow<s32>(reselected_index + 1)); } } // Close dialogs which are tied to the specific thread selected if (reselected_index == umax) { if (m_reg_editor) m_reg_editor->close(); if (m_inst_editor) m_inst_editor->close(); if (m_goto_dialog) m_goto_dialog->close(); } if (emu_state == system_state::stopped) { ClearBreakpoints(); ClearCallStack(); } OnSelectUnit(); m_choice_units->update(); } void debugger_frame::OnSelectUnit() { if (m_is_spu_disasm_mode) { return; } cpu_thread* selected = nullptr; if (m_emu_state != system_state::stopped) { if (int index = m_choice_units->currentIndex(); index >= 0 && index + 0u < m_threads_info.size()) { selected = ::at32(m_threads_info, index)(); } if (selected && m_cpu.get() == selected) { // They match, nothing to do. return; } if (selected && m_rsx == selected) { return; } if (!selected && !m_rsx && !m_cpu) { return; } } m_disasm.reset(); m_cpu.reset(); m_rsx = nullptr; m_spu_disasm_memory.reset(); if (selected) { const u32 cpu_id = selected->id; switch (cpu_id >> 24) { case 1: { m_cpu = idm::get<named_thread<ppu_thread>>(cpu_id); if (selected == m_cpu.get()) { m_disasm = make_disasm(selected, m_cpu); } break; } case 2: { m_cpu = idm::get<named_thread<spu_thread>>(cpu_id); if (selected == m_cpu.get()) { m_disasm = make_disasm(selected, m_cpu); } break; } case 0x55: { m_rsx = static_cast<rsx::thread*>(selected); if (get_cpu()) { m_disasm = make_disasm(m_rsx, nullptr); } break; } default: break; } } if (!m_disasm) { m_cpu.reset(); m_rsx = nullptr; } EnableButtons(true); m_debugger_list->UpdateCPUData(m_disasm); m_breakpoint_list->UpdateCPUData(m_disasm); ShowPC(true); DoUpdate(); UpdateUI(); } void debugger_frame::OnSelectSPUDisassembler() { if (m_spu_disasm_dialog) { m_spu_disasm_dialog->move(QCursor::pos()); m_spu_disasm_dialog->show(); m_spu_disasm_dialog->setFocus(); return; } m_spu_disasm_dialog = new QDialog(this); m_spu_disasm_dialog->setWindowTitle(tr("SPU Disassmebler Properties")); // Panels QVBoxLayout* vbox_panel(new QVBoxLayout()); QHBoxLayout* hbox_expression_input_panel = new QHBoxLayout(); QHBoxLayout* hbox_button_panel(new QHBoxLayout()); // Address expression input QLineEdit* source_eal(new QLineEdit(m_spu_disasm_dialog)); QLineEdit* start_pc(new QLineEdit(m_spu_disasm_dialog)); source_eal->setFont(m_mono); source_eal->setMaxLength(12); source_eal->setValidator(new QRegularExpressionValidator(QRegularExpression("^(0[xX])?0*[a-fA-F0-9]{0,8}$"), this)); start_pc->setFont(m_mono); start_pc->setMaxLength(7); start_pc->setValidator(new QRegularExpressionValidator(QRegularExpression("^(0[xX])?0*[a-fA-F0-9]{0,5}$"), this)); // Ok/Cancel QPushButton* button_ok = new QPushButton(tr("OK")); QPushButton* button_cancel = new QPushButton(tr("Cancel")); hbox_expression_input_panel->addWidget(new QLabel(tr("Source Address: "))); hbox_expression_input_panel->addWidget(source_eal); hbox_expression_input_panel->addSpacing(10); hbox_expression_input_panel->addWidget(new QLabel(tr("Load PC: "))); hbox_expression_input_panel->addWidget(start_pc); hbox_button_panel->addWidget(button_ok); hbox_button_panel->addWidget(button_cancel); vbox_panel->addLayout(hbox_expression_input_panel); vbox_panel->addSpacing(8); vbox_panel->addLayout(hbox_button_panel); m_spu_disasm_dialog->setLayout(vbox_panel); const QFont font = source_eal->font(); source_eal->setPlaceholderText(QString::fromStdString(fmt::format("0x%08x", 0))); start_pc->setPlaceholderText(QString::fromStdString(fmt::format("0x%05x", 0))); source_eal->setFixedWidth(gui::utils::get_label_width(source_eal->placeholderText(), &font) + 5); start_pc->setFixedWidth(gui::utils::get_label_width(start_pc->placeholderText(), &font) + 5); if (m_spu_disasm_origin_eal) { source_eal->setText(QString::fromStdString(fmt::format("0x%08x", m_spu_disasm_origin_eal))); start_pc->setText(QString::fromStdString(fmt::format("0x%05x", m_spu_disasm_pc))); } connect(button_ok, &QAbstractButton::clicked, m_spu_disasm_dialog, &QDialog::accept); connect(button_cancel, &QAbstractButton::clicked, m_spu_disasm_dialog, &QDialog::reject); m_spu_disasm_dialog->move(QCursor::pos()); m_spu_disasm_dialog->setAttribute(Qt::WA_DeleteOnClose); connect(m_spu_disasm_dialog, &QDialog::finished, this, [this, source_eal, start_pc](int result) { m_spu_disasm_dialog = nullptr; if (result != QDialog::Accepted) { return; } const u64 spu_base = EvaluateExpression(start_pc->text()); if (spu_base > SPU_LS_SIZE - 4 || spu_base % 4) { return; } const u64 spu_addr = EvaluateExpression(source_eal->text()); if (spu_addr == umax || !spu_addr) { return; } // Try to load as much memory as possible until SPU local memory ends // Because I don't think there is a need for a size argument // The user probably does not know the exact size of the SPU code either u32 spu_size = SPU_LS_SIZE - spu_base; for (u32 passed = spu_base; passed < SPU_LS_SIZE; passed += 4096) { if (!vm::check_addr(spu_addr + passed)) { if (passed == spu_base) { return; } spu_size = passed - spu_base - (spu_addr + passed) % 4096; break; } if (4096 > ~(spu_addr + passed)) { // For overflow spu_size = std::min<u32>(SPU_LS_SIZE, 0 - spu_addr); break; } } m_disasm.reset(); m_cpu.reset(); m_rsx = nullptr; m_spu_disasm_memory = std::make_shared<utils::shm>(SPU_LS_SIZE); m_spu_disasm_memory->map_self(); m_is_spu_disasm_mode = true; std::memset(m_spu_disasm_memory->get(), 0, spu_base); std::memcpy(m_spu_disasm_memory->get() + spu_base, vm::get_super_ptr(spu_addr), spu_size); std::memset(m_spu_disasm_memory->get() + spu_base + spu_size, 0, SPU_LS_SIZE - (spu_base + spu_size)); m_spu_disasm_pc = spu_base; m_spu_disasm_origin_eal = spu_addr; m_disasm = std::make_shared<SPUDisAsm>(cpu_disasm_mode::interpreter, m_spu_disasm_memory->get()); EnableButtons(true); m_debugger_list->UpdateCPUData(m_disasm); m_breakpoint_list->UpdateCPUData(m_disasm); ShowPC(true); DoUpdate(); UpdateUI(); }); m_spu_disasm_dialog->show(); } void debugger_frame::DoUpdate() { // Check if we need to disable a step over bp if (const auto cpu0 = get_cpu(); cpu0 && m_last_step_over_breakpoint != umax && cpu0->get_pc() == m_last_step_over_breakpoint) { m_ppu_breakpoint_handler->RemoveBreakpoint(m_last_step_over_breakpoint); m_last_step_over_breakpoint = -1; } WritePanels(); } void debugger_frame::WritePanels() { const auto cpu = get_cpu(); if (!cpu) { m_misc_state->clear(); m_regs->clear(); ClearCallStack(); return; } int loc = m_misc_state->verticalScrollBar()->value(); int hloc = m_misc_state->horizontalScrollBar()->value(); m_misc_state->clear(); m_misc_state->setPlainText(qstr(cpu->dump_misc())); m_misc_state->verticalScrollBar()->setValue(loc); m_misc_state->horizontalScrollBar()->setValue(hloc); loc = m_regs->verticalScrollBar()->value(); hloc = m_regs->horizontalScrollBar()->value(); m_regs->clear(); m_last_reg_state.clear(); cpu->dump_regs(m_last_reg_state, m_dump_reg_func_data); m_regs->setPlainText(qstr(m_last_reg_state)); m_regs->verticalScrollBar()->setValue(loc); m_regs->horizontalScrollBar()->setValue(hloc); Q_EMIT CallStackUpdateRequested(cpu->dump_callstack_list()); } void debugger_frame::ShowGotoAddressDialog() { if (m_goto_dialog) { m_goto_dialog->move(QCursor::pos()); m_goto_dialog->show(); m_goto_dialog->setFocus(); return; } m_goto_dialog = new QDialog(this); m_goto_dialog->setWindowTitle(tr("Go To Address")); // Panels QVBoxLayout* vbox_panel(new QVBoxLayout()); QHBoxLayout* hbox_expression_input_panel = new QHBoxLayout(); QHBoxLayout* hbox_button_panel(new QHBoxLayout()); // Address expression input QLineEdit* expression_input(new QLineEdit(m_goto_dialog)); expression_input->setFont(m_mono); expression_input->setMaxLength(18); if (const auto thread = get_cpu(); !thread || thread->get_class() != thread_class::spu) { expression_input->setValidator(new QRegularExpressionValidator(QRegularExpression("^(0[xX])?0*[a-fA-F0-9]{0,8}$"), this)); } else { expression_input->setValidator(new QRegularExpressionValidator(QRegularExpression("^(0[xX])?0*[a-fA-F0-9]{0,5}$"), this)); } // Ok/Cancel QPushButton* button_ok = new QPushButton(tr("OK")); QPushButton* button_cancel = new QPushButton(tr("Cancel")); hbox_expression_input_panel->addWidget(expression_input); hbox_button_panel->addWidget(button_ok); hbox_button_panel->addWidget(button_cancel); vbox_panel->addLayout(hbox_expression_input_panel); vbox_panel->addSpacing(8); vbox_panel->addLayout(hbox_button_panel); m_goto_dialog->setLayout(vbox_panel); const auto cpu_check = make_check_cpu(get_cpu()); const auto cpu = cpu_check(); const QFont font = expression_input->font(); // -1 from get_pc() turns into 0 const u32 pc = cpu ? utils::align<u32>(cpu->get_pc(), 4) : 0; expression_input->setPlaceholderText(QString("0x%1").arg(pc, 16, 16, QChar('0'))); expression_input->setFixedWidth(gui::utils::get_label_width(expression_input->placeholderText(), &font) + 5); connect(button_ok, &QAbstractButton::clicked, m_goto_dialog, &QDialog::accept); connect(button_cancel, &QAbstractButton::clicked, m_goto_dialog, &QDialog::reject); m_goto_dialog->move(QCursor::pos()); m_goto_dialog->setAttribute(Qt::WA_DeleteOnClose); connect(m_goto_dialog, &QDialog::finished, this, [this, cpu, cpu_check, expression_input](int result) { // Check if the thread has not been destroyed and is still the focused since // This also works if no thread is selected and has been selected before if (result == QDialog::Accepted && cpu == get_cpu() && cpu == cpu_check()) { PerformGoToRequest(expression_input->text()); } m_goto_dialog = nullptr; }); m_goto_dialog->show(); } void debugger_frame::PerformGoToRequest(const QString& text_argument) { const bool asterisk_prefixed = text_argument.startsWith(QChar('*')); const u64 address = EvaluateExpression(asterisk_prefixed ? text_argument.sliced(1, text_argument.size() - 1) : text_argument); if (address != umax) { // Try to read from OPD entry if prefixed by asterisk if (asterisk_prefixed) { if (auto cpu = get_cpu()) { if (cpu->try_get<ppu_thread>()) { const vm::ptr<u32> func_ptr = vm::cast(static_cast<u32>(address)); be_t<u32> code_addr{}; if (func_ptr.try_read(code_addr)) { m_debugger_list->ShowAddress(code_addr, true); } } } return; } m_debugger_list->ShowAddress(static_cast<u32>(address), true); } } void debugger_frame::PerformGoToThreadRequest(const QString& text_argument) { const u64 thread_id = EvaluateExpression(text_argument); if (thread_id != umax) { for (usz i = 0; i < m_threads_info.size(); i++) { if (cpu_thread* ptr = m_threads_info[i](); ptr && ptr->id == thread_id) { // Success m_choice_units->setCurrentIndex(::narrow<s32>(i)); return; } } } } void debugger_frame::PerformAddBreakpointRequest(u32 addr) { m_debugger_list->BreakpointRequested(addr, true); } u64 debugger_frame::EvaluateExpression(const QString& expression) { bool ok = false; // Parse expression (or at least used to, was nuked to remove the need for QtJsEngine) const QString fixed_expression = QRegularExpression(QRegularExpression::anchoredPattern("a .*|^[A-Fa-f0-9]+$")).match(expression).hasMatch() ? "0x" + expression : expression; const u64 res = static_cast<u64>(fixed_expression.toULong(&ok, 16)); if (ok) return res; if (const auto thread = get_cpu(); thread && expression.isEmpty()) return thread->get_pc(); return umax; } void debugger_frame::ClearBreakpoints() const { m_breakpoint_list->ClearBreakpoints(); } void debugger_frame::ClearCallStack() { Q_EMIT CallStackUpdateRequested({}); } void debugger_frame::ShowPC(bool user_requested) { const auto cpu0 = get_cpu(); const u32 pc = (cpu0 ? cpu0->get_pc() : (m_is_spu_disasm_mode ? m_spu_disasm_pc : 0)); if (user_requested) { m_debugger_list->EnableThreadFollowing(); } m_debugger_list->ShowAddress(pc, false); } void debugger_frame::DoStep(bool step_over) { if (const auto cpu = get_cpu()) { bool should_step_over = step_over && cpu->get_class() == thread_class::ppu; // If stepping over, lay at the same spot and wait for the thread to finish the call // If not, fixate on the current pointed instruction m_debugger_list->EnableThreadFollowing(!should_step_over); if (should_step_over) { m_debugger_list->ShowAddress(cpu->get_pc() + 4, false); } if (step_over && cpu->get_class() == thread_class::rsx) { const bool was_paused = cpu->is_paused(); static_cast<rsx::thread*>(cpu)->pause_on_draw = true; if (was_paused) { RunBtnPress(); m_debugger_list->EnableThreadFollowing(true); } return; } if (const auto _state = +cpu->state; _state & s_pause_flags && _state & cpu_flag::wait && !(_state & cpu_flag::dbg_step)) { if (should_step_over && cpu->get_class() == thread_class::ppu) { const u32 current_instruction_pc = cpu->get_pc(); vm::ptr<u32> inst_ptr = vm::cast(current_instruction_pc); be_t<u32> result{}; if (inst_ptr.try_read(result)) { ppu_opcode_t ppu_op{result}; const ppu_itype::type itype = g_ppu_itype.decode(ppu_op.opcode); should_step_over = (itype & ppu_itype::branch && ppu_op.lk); } } if (should_step_over) { const u32 current_instruction_pc = cpu->get_pc(); // Set breakpoint on next instruction const u32 next_instruction_pc = current_instruction_pc + 4; m_ppu_breakpoint_handler->AddBreakpoint(next_instruction_pc); // Undefine previous step over breakpoint if it hasn't been already // This can happen when the user steps over a branch that doesn't return to itself if (m_last_step_over_breakpoint != umax) { m_ppu_breakpoint_handler->RemoveBreakpoint(next_instruction_pc); } m_last_step_over_breakpoint = next_instruction_pc; } cpu->state.atomic_op([&](bs_t<cpu_flag>& state) { state -= s_pause_flags; if (!should_step_over) { if (u32* ptr = cpu->get_pc2()) { state += cpu_flag::dbg_step; *ptr = cpu->get_pc(); } } }); cpu->state.notify_one(); } } // Tighten up, put the debugger on a wary watch over any thread info changes if there aren't any // This allows responsive debugger interaction m_ui_fast_update_permission_deadline = m_ui_update_ctr + 5; } void debugger_frame::EnableUpdateTimer(bool enable) const { if (m_update->isActive() == enable) { return; } enable ? m_update->start(10) : m_update->stop(); } void debugger_frame::RunBtnPress() { if (const auto cpu = get_cpu()) { // If paused, unpause. // If not paused, add dbg_pause. const auto old = cpu->state.fetch_op([](bs_t<cpu_flag>& state) { if (state & s_pause_flags) { state -= s_pause_flags; } else { state += cpu_flag::dbg_pause; } }); // Notify only if no pause flags are set after this change if (old & s_pause_flags) { if (g_debugger_pause_all_threads_on_bp && Emu.IsPaused() && (old & s_pause_flags) == s_pause_flags) { // Resume all threads were paused by this breakpoint Emu.Resume(); } cpu->state.notify_one(); m_debugger_list->EnableThreadFollowing(); } } // Tighten up, put the debugger on a wary watch over any thread info changes if there aren't any // This allows responsive debugger interaction m_ui_fast_update_permission_deadline = m_ui_update_ctr + 5; } void debugger_frame::EnableButtons(bool enable) { const auto cpu = get_cpu(); if (!cpu) enable = false; const bool step = enable && is_using_interpreter(cpu->get_class()); m_go_to_addr->setEnabled(enable); m_go_to_pc->setEnabled(enable); m_btn_step->setEnabled(step); m_btn_step_over->setEnabled(step); m_btn_run->setEnabled(enable); }
47,123
C++
.cpp
1,399
30.340243
175
0.681868
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,118
shortcut_utils.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/shortcut_utils.cpp
#include "stdafx.h" #include "shortcut_utils.h" #include "qt_utils.h" #include "Emu/system_utils.hpp" #include "Emu/VFS.h" #include "Emu/vfs_config.h" #include "Utilities/File.h" #include "Utilities/StrUtil.h" #ifdef _WIN32 #include <Windows.h> #include <shlobj.h> #include <winnls.h> #include <shobjidl.h> #include <objbase.h> #include <objidl.h> #include <shlguid.h> #include <comdef.h> #else #include <sys/stat.h> #include <errno.h> #endif #include <QFile> #include <QImageWriter> #include <QPixmap> #include <QStandardPaths> LOG_CHANNEL(sys_log, "SYS"); namespace gui::utils { bool create_square_shortcut_icon_file(const std::string& src_icon_path, const std::string& target_icon_dir, std::string& target_icon_path, const std::string& extension, int size) { if (src_icon_path.empty() || target_icon_dir.empty() || extension.empty()) { sys_log.error("Failed to create shortcut. Icon parameters empty."); return false; } QPixmap icon(QString::fromStdString(src_icon_path)); if (!gui::utils::create_square_pixmap(icon, size)) { sys_log.error("Failed to create shortcut. Icon empty."); return false; } target_icon_path = target_icon_dir + "shortcut." + fmt::to_lower(extension); QFile icon_file(QString::fromStdString(target_icon_path)); if (!icon_file.open(QFile::OpenModeFlag::ReadWrite | QFile::OpenModeFlag::Truncate)) { sys_log.error("Failed to create icon file '%s': %s", target_icon_path, icon_file.errorString()); return false; } // Use QImageWriter instead of QPixmap::save in order to be able to log errors if (QImageWriter writer(&icon_file, fmt::to_upper(extension).c_str()); !writer.write(icon.toImage())) { sys_log.error("Failed to write icon file '%s': %s", target_icon_path, writer.errorString()); return false; } icon_file.close(); return true; } bool create_shortcut(const std::string& name, [[maybe_unused]] const std::string& serial, [[maybe_unused]] const std::string& target_cli_args, [[maybe_unused]] const std::string& description, [[maybe_unused]] const std::string& src_icon_path, [[maybe_unused]] const std::string& target_icon_dir, shortcut_location location) { if (name.empty()) { sys_log.error("Cannot create shortcuts without a name"); return false; } // Remove illegal characters from filename const std::string simple_name = QString::fromStdString(vfs::escape(name, true)).simplified().toStdString(); if (simple_name.empty() || simple_name == "." || simple_name == "..") { sys_log.error("Failed to create shortcut: Cleaned file name empty or not allowed"); return false; } std::string link_path; if (location == shortcut_location::desktop) { link_path = QStandardPaths::writableLocation(QStandardPaths::StandardLocation::DesktopLocation).toStdString(); } else if (location == shortcut_location::applications) { link_path = QStandardPaths::writableLocation(QStandardPaths::StandardLocation::ApplicationsLocation).toStdString(); } #ifdef _WIN32 else if (location == shortcut_location::rpcs3_shortcuts) { link_path = rpcs3::utils::get_games_dir() + "/shortcuts/"; fs::create_dir(link_path); } #endif if (!fs::is_dir(link_path) && !fs::create_dir(link_path)) { sys_log.error("Failed to create shortcut. Folder does not exist: %s", link_path); return false; } if (location == shortcut_location::applications) { link_path += "/RPCS3"; if (!fs::create_path(link_path)) { sys_log.error("Failed to create shortcut. Could not create path: %s (%s)", link_path, fs::g_tls_error); return false; } } #ifdef _WIN32 const auto str_error = [](HRESULT hr) -> std::string { _com_error err(hr); const TCHAR* errMsg = err.ErrorMessage(); return fmt::format("%s [%d]", wchar_to_utf8(errMsg), hr); }; fmt::append(link_path, "/%s.lnk", simple_name); sys_log.notice("Creating shortcut '%s' with arguments '%s' and .ico dir '%s'", link_path, target_cli_args, target_icon_dir); // https://stackoverflow.com/questions/3906974/how-to-programmatically-create-a-shortcut-using-win32 HRESULT res = CoInitialize(NULL); if (FAILED(res)) { sys_log.error("Failed to create shortcut: CoInitialize failed (%s)", str_error(res)); return false; } IShellLink* pShellLink = nullptr; IPersistFile* pPersistFile = nullptr; const auto cleanup = [&](bool return_value, const std::string& fail_reason) -> bool { if (!return_value) sys_log.error("Failed to create shortcut: %s", fail_reason); if (pPersistFile) pPersistFile->Release(); if (pShellLink) pShellLink->Release(); CoUninitialize(); return return_value; }; res = CoCreateInstance(CLSID_ShellLink, NULL, CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&pShellLink)); if (FAILED(res)) return cleanup(false, "CoCreateInstance failed"); const std::string working_dir{ fs::get_executable_dir() }; const std::string rpcs3_path{ working_dir + "rpcs3.exe" }; const std::wstring w_target_file = utf8_to_wchar(rpcs3_path); res = pShellLink->SetPath(w_target_file.c_str()); if (FAILED(res)) return cleanup(false, fmt::format("SetPath failed (%s)", str_error(res))); const std::wstring w_working_dir = utf8_to_wchar(working_dir); res = pShellLink->SetWorkingDirectory(w_working_dir.c_str()); if (FAILED(res)) return cleanup(false, fmt::format("SetWorkingDirectory failed (%s)", str_error(res))); if (!target_cli_args.empty()) { const std::wstring w_target_cli_args = utf8_to_wchar(target_cli_args); res = pShellLink->SetArguments(w_target_cli_args.c_str()); if (FAILED(res)) return cleanup(false, fmt::format("SetArguments failed (%s)", str_error(res))); } if (!description.empty()) { const std::wstring w_descpription = utf8_to_wchar(description); res = pShellLink->SetDescription(w_descpription.c_str()); if (FAILED(res)) return cleanup(false, fmt::format("SetDescription failed (%s)", str_error(res))); } if (!src_icon_path.empty() && !target_icon_dir.empty()) { std::string target_icon_path; if (!create_square_shortcut_icon_file(src_icon_path, target_icon_dir, target_icon_path, "ico", 512)) return cleanup(false, ".ico creation failed"); const std::wstring w_icon_path = utf8_to_wchar(target_icon_path); res = pShellLink->SetIconLocation(w_icon_path.c_str(), 0); if (FAILED(res)) return cleanup(false, fmt::format("SetIconLocation failed (%s)", str_error(res))); } // Use the IPersistFile object to save the shell link res = pShellLink->QueryInterface(IID_PPV_ARGS(&pPersistFile)); if (FAILED(res)) return cleanup(false, fmt::format("QueryInterface failed (%s)", str_error(res))); // Save shortcut const std::wstring w_link_file = utf8_to_wchar(link_path); res = pPersistFile->Save(w_link_file.c_str(), TRUE); if (FAILED(res)) { if (location == shortcut_location::desktop) { return cleanup(false, fmt::format("Saving file to desktop failed (%s)", str_error(res))); } else { return cleanup(false, fmt::format("Saving file to start menu failed (%s)", str_error(res))); } } return cleanup(true, {}); #elif defined(__APPLE__) fmt::append(link_path, "/%s.app", simple_name); const std::string contents_dir = link_path + "/Contents/"; const std::string macos_dir = contents_dir + "MacOS/"; const std::string resources_dir = contents_dir + "Resources/"; if (!fs::create_path(contents_dir) || !fs::create_path(macos_dir) || !fs::create_path(resources_dir)) { sys_log.error("Failed to create shortcut. Could not create app bundle structure. (%s)", fs::g_tls_error); return false; } const std::string plist_path = contents_dir + "Info.plist"; const std::string launcher_path = macos_dir + "launcher"; std::string launcher_content; fmt::append(launcher_content, "#!/bin/bash\nopen -b net.rpcs3.rpcs3 --args %s", target_cli_args); fs::file launcher_file(launcher_path, fs::read + fs::rewrite); if (!launcher_file) { sys_log.error("Failed to create launcher file: %s (%s)", launcher_path, fs::g_tls_error); return false; } if (launcher_file.write(launcher_content.data(), launcher_content.size()) != launcher_content.size()) { sys_log.error("Failed to write launcher file: %s", launcher_path); return false; } launcher_file.close(); if (chmod(launcher_path.c_str(), S_IRWXU) != 0) { sys_log.error("Failed to change file permissions for launcher file: %s (%d)", strerror(errno), errno); return false; } const std::string plist_content = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" "<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n" "<plist version=\"1.0\">\n" "<dict>\n" "\t<key>CFBundleExecutable</key>\n" "\t<string>launcher</string>\n" "\t<key>CFBundleIconFile</key>\n" "\t<string>shortcut.icns</string>\n" "\t<key>CFBundleInfoDictionaryVersion</key>\n" "\t<string>1.0</string>\n" "\t<key>CFBundlePackageType</key>\n" "\t<string>APPL</string>\n" "\t<key>CFBundleSignature</key>\n" "\t<string>\?\?\?\?</string>\n" #if defined(ARCH_ARM64) "\t<key>CFBundleIdentifier</key>\n" "\t<string>net.rpcs3" + (serial.empty() ? "" : ("." + serial)) + "</string>\n" "\t<key>LSArchitecturePriority</key>\n" "\t<array>\n" "\t\t<string>arm64</string>\n" "\t</array>\n" "\t<key>LSRequiresNativeExecution</key>\n" "\t<true/>\n" #endif "</dict>\n" "</plist>\n"; fs::file plist_file(plist_path, fs::read + fs::rewrite); if (!plist_file) { sys_log.error("Failed to create plist file: %s (%s)", plist_path, fs::g_tls_error); return false; } if (plist_file.write(plist_content.data(), plist_content.size()) != plist_content.size()) { sys_log.error("Failed to write plist file: %s", plist_path); return false; } plist_file.close(); if (!src_icon_path.empty()) { std::string target_icon_path = resources_dir; if (!create_square_shortcut_icon_file(src_icon_path, resources_dir, target_icon_path, "icns", 512)) { // Error is logged in create_square_shortcut_icon_file return false; } } return true; #else const std::string exe_path = fs::get_executable_path(); if (exe_path.empty()) { sys_log.error("Failed to create shortcut. Executable path empty."); return false; } fmt::append(link_path, "/%s.desktop", simple_name); std::string file_content; fmt::append(file_content, "[Desktop Entry]\n"); fmt::append(file_content, "Encoding=UTF-8\n"); fmt::append(file_content, "Version=1.0\n"); fmt::append(file_content, "Type=Application\n"); fmt::append(file_content, "Terminal=false\n"); fmt::append(file_content, "Exec=\"%s\" %s\n", exe_path, target_cli_args); fmt::append(file_content, "Name=%s\n", name); fmt::append(file_content, "Categories=Application;Game\n"); if (!description.empty()) { fmt::append(file_content, "Comment=%s\n", QString::fromStdString(description).simplified()); } if (!src_icon_path.empty() && !target_icon_dir.empty()) { std::string target_icon_path; if (!create_square_shortcut_icon_file(src_icon_path, target_icon_dir, target_icon_path, "png", 512)) { // Error is logged in create_square_shortcut_icon_file return false; } fmt::append(file_content, "Icon=%s\n", target_icon_path); } fs::file shortcut_file(link_path, fs::read + fs::rewrite); if (!shortcut_file) { sys_log.error("Failed to create .desktop file: %s (%s)", link_path, fs::g_tls_error); return false; } if (shortcut_file.write(file_content.data(), file_content.size()) != file_content.size()) { sys_log.error("Failed to write .desktop file: %s", link_path); return false; } shortcut_file.close(); if (location == shortcut_location::desktop) { if (chmod(link_path.c_str(), S_IRWXU) != 0) // enables user to execute file { // Simply log failure. At least we have the file. sys_log.error("Failed to change file permissions for .desktop file: %s (%d)", strerror(errno), errno); } } return true; #endif } }
12,319
C++
.cpp
323
34.105263
179
0.671579
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,119
memory_viewer_panel.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/memory_viewer_panel.cpp
#include "Utilities/mutex.h" #include "Emu/Memory/vm_locking.h" #include "Emu/Memory/vm.h" #include "memory_viewer_panel.h" #include "Emu/Cell/SPUThread.h" #include "Emu/CPU/CPUDisAsm.h" #include "Emu/Cell/SPUDisAsm.h" #include "Emu/RSX/RSXThread.h" #include "Emu/RSX/rsx_utils.h" #include "Emu/IdManager.h" #include <QVBoxLayout> #include <QPushButton> #include <QSpinBox> #include <QGroupBox> #include <QTextEdit> #include <QComboBox> #include <QCheckBox> #include <QWheelEvent> #include <QHoverEvent> #include <QMouseEvent> #include <QTimer> #include <QThread> #include <QKeyEvent> #include "util/logs.hpp" #include "util/asm.hpp" #include "util/vm.hpp" LOG_CHANNEL(gui_log, "GUI"); constexpr auto qstr = QString::fromStdString; memory_viewer_panel::memory_viewer_panel(QWidget* parent, std::shared_ptr<CPUDisAsm> disasm, u32 addr, std::function<cpu_thread*()> func) : QDialog(parent) , m_addr(addr) , m_get_cpu(std::move(func)) , m_type([&]() { const auto cpu = m_get_cpu(); if (!cpu) return thread_class::general; thread_class type = cpu->get_class(); switch (type) { case thread_class::ppu: case thread_class::spu: case thread_class::rsx: break; default: fmt::throw_exception("Unknown CPU type (0x%x)", cpu->id_type()); } return type; }()) , m_rsx(m_type == thread_class::rsx ? static_cast<rsx::thread*>(m_get_cpu()) : nullptr) , m_spu_shm([&]() { const auto cpu = m_get_cpu(); return cpu && m_type == thread_class::spu ? static_cast<spu_thread*>(cpu)->shm : nullptr; }()) , m_addr_mask(m_type == thread_class::spu ? SPU_LS_SIZE - 1 : ~0) , m_disasm(std::move(disasm)) { const auto cpu = m_get_cpu(); setWindowTitle( cpu && m_type == thread_class::spu ? tr("Memory Viewer Of %0").arg(qstr(cpu->get_name())) : cpu && m_type == thread_class::rsx ? tr("Memory Viewer Of RSX[0x55555555]") : tr("Memory Viewer")); setObjectName("memory_viewer"); m_colcount = 4; m_rowcount = 1; const int pSize = 10; // Font QFont mono = QFontDatabase::systemFont(QFontDatabase::FixedFont); mono.setPointSize(pSize); m_fontMetrics = new QFontMetrics(mono); // Layout: QVBoxLayout* vbox_panel = new QVBoxLayout(this); // Tools QHBoxLayout* hbox_tools = new QHBoxLayout(this); // Tools: Memory Viewer Options QGroupBox* tools_mem = new QGroupBox(tr("Memory Viewer Options"), this); QHBoxLayout* hbox_tools_mem = new QHBoxLayout(this); // Tools: Memory Viewer Options: Address QGroupBox* tools_mem_addr = new QGroupBox(tr("Address"), this); QHBoxLayout* hbox_tools_mem_addr = new QHBoxLayout(this); m_addr_line = new QLineEdit(this); m_addr_line->setPlaceholderText("00000000"); m_addr_line->setFont(mono); m_addr_line->setMaxLength(18); m_addr_line->setFixedWidth(75); m_addr_line->setFocus(); m_addr_line->setValidator(new QRegularExpressionValidator(QRegularExpression(m_type == thread_class::spu ? "^(0[xX])?0*[a-fA-F0-9]{0,5}$" : "^(0[xX])?0*[a-fA-F0-9]{0,8}$"), this)); hbox_tools_mem_addr->addWidget(m_addr_line); tools_mem_addr->setLayout(hbox_tools_mem_addr); // Tools: Memory Viewer Options: Words QGroupBox* tools_mem_words = new QGroupBox(tr("Words"), this); QHBoxLayout* hbox_tools_mem_words = new QHBoxLayout(); class words_spin_box : public QSpinBox { public: words_spin_box(QWidget* parent = nullptr) : QSpinBox(parent) {} ~words_spin_box() override {} private: int valueFromText(const QString &text) const override { return std::countr_zero(text.toULong()); } QString textFromValue(int value) const override { return tr("%0").arg(1 << value); } }; words_spin_box* sb_words = new words_spin_box(this); sb_words->setRange(0, 2); sb_words->setValue(2); hbox_tools_mem_words->addWidget(sb_words); tools_mem_words->setLayout(hbox_tools_mem_words); // Tools: Memory Viewer Options: Control QGroupBox* tools_mem_buttons = new QGroupBox(tr("Control")); QHBoxLayout* hbox_tools_mem_buttons = new QHBoxLayout(this); QPushButton* b_fprev = new QPushButton("<<", this); QPushButton* b_prev = new QPushButton("<", this); QPushButton* b_next = new QPushButton(">", this); QPushButton* b_fnext = new QPushButton(">>", this); b_fprev->setFixedWidth(20); b_prev->setFixedWidth(20); b_next->setFixedWidth(20); b_fnext->setFixedWidth(20); b_fprev->setAutoDefault(false); b_prev->setAutoDefault(false); b_next->setAutoDefault(false); b_fnext->setAutoDefault(false); hbox_tools_mem_buttons->addWidget(b_fprev); hbox_tools_mem_buttons->addWidget(b_prev); hbox_tools_mem_buttons->addWidget(b_next); hbox_tools_mem_buttons->addWidget(b_fnext); tools_mem_buttons->setLayout(hbox_tools_mem_buttons); QGroupBox* tools_mem_refresh = new QGroupBox(tr("Refresh")); QHBoxLayout* hbox_tools_mem_refresh = new QHBoxLayout(this); QPushButton* button_auto_refresh = new QPushButton(QStringLiteral(" "), this); button_auto_refresh->setFixedWidth(20); button_auto_refresh->setAutoDefault(false); hbox_tools_mem_refresh->addWidget(button_auto_refresh); tools_mem_refresh->setLayout(hbox_tools_mem_refresh); // Merge Tools: Memory Viewer hbox_tools_mem->addWidget(tools_mem_addr); hbox_tools_mem->addWidget(tools_mem_words); hbox_tools_mem->addWidget(tools_mem_buttons); hbox_tools_mem->addWidget(tools_mem_refresh); tools_mem->setLayout(hbox_tools_mem); // Tools: Raw Image Preview Options QGroupBox* tools_img = new QGroupBox(tr("Raw Image Preview Options"), this); QHBoxLayout* hbox_tools_img = new QHBoxLayout(this); // Tools: Raw Image Preview Options : Size QGroupBox* tools_img_size = new QGroupBox(tr("Size"), this); QHBoxLayout* hbox_tools_img_size = new QHBoxLayout(this); QLabel* l_x = new QLabel(" x "); QSpinBox* sb_img_size_x = new QSpinBox(this); QSpinBox* sb_img_size_y = new QSpinBox(this); sb_img_size_x->setRange(1, m_type == thread_class::spu ? 256 : 4096); sb_img_size_y->setRange(1, m_type == thread_class::spu ? 256 : 4096); sb_img_size_x->setValue(256); sb_img_size_y->setValue(256); hbox_tools_img_size->addWidget(sb_img_size_x); hbox_tools_img_size->addWidget(l_x); hbox_tools_img_size->addWidget(sb_img_size_y); tools_img_size->setLayout(hbox_tools_img_size); // Tools: Raw Image Preview Options: Mode QGroupBox* tools_img_mode = new QGroupBox(tr("Mode"), this); QHBoxLayout* hbox_tools_img_mode = new QHBoxLayout(this); QComboBox* cbox_img_mode = new QComboBox(this); cbox_img_mode->addItem("RGB", QVariant::fromValue(color_format::RGB)); cbox_img_mode->addItem("ARGB", QVariant::fromValue(color_format::ARGB)); cbox_img_mode->addItem("RGBA", QVariant::fromValue(color_format::RGBA)); cbox_img_mode->addItem("ABGR", QVariant::fromValue(color_format::ABGR)); cbox_img_mode->addItem("G8", QVariant::fromValue(color_format::G8)); cbox_img_mode->addItem("G32MAX", QVariant::fromValue(color_format::G32MAX)); cbox_img_mode->setCurrentIndex(1); //ARGB hbox_tools_img_mode->addWidget(cbox_img_mode); tools_img_mode->setLayout(hbox_tools_img_mode); // Merge Tools: Raw Image Preview Options hbox_tools_img->addWidget(tools_img_size); hbox_tools_img->addWidget(tools_img_mode); tools_img->setLayout(hbox_tools_img); // Tools: Tool Buttons QGroupBox* tools_buttons = new QGroupBox(tr("Tools"), this); QVBoxLayout* hbox_tools_buttons = new QVBoxLayout(this); QPushButton* b_img = new QPushButton(tr("View\nimage"), this); b_img->setAutoDefault(false); hbox_tools_buttons->addWidget(b_img); tools_buttons->setLayout(hbox_tools_buttons); // Merge Tools = Memory Viewer Options + Raw Image Preview Options + Tool Buttons hbox_tools->addSpacing(20); hbox_tools->addWidget(tools_mem); hbox_tools->addWidget(tools_img); hbox_tools->addWidget(tools_buttons); hbox_tools->addSpacing(20); // Memory Panel: m_hbox_mem_panel = new QHBoxLayout(this); // Memory Panel: Address Panel m_mem_addr = new QLabel(""); QSizePolicy sp_retain = m_mem_addr->sizePolicy(); sp_retain.setRetainSizeWhenHidden(false); m_mem_addr->setSizePolicy(sp_retain); m_mem_addr->setObjectName("memory_viewer_address_panel"); m_mem_addr->setFont(mono); m_mem_addr->setAutoFillBackground(true); m_mem_addr->setTextInteractionFlags(Qt::TextSelectableByMouse | Qt::TextSelectableByKeyboard); m_mem_addr->ensurePolished(); // Memory Panel: Hex Panel m_mem_hex = new QLabel(""); m_mem_hex->setSizePolicy(sp_retain); m_mem_hex->setObjectName("memory_viewer_hex_panel"); m_mem_hex->setFont(mono); m_mem_hex->setAutoFillBackground(true); m_mem_hex->setTextInteractionFlags(Qt::TextSelectableByMouse | Qt::TextSelectableByKeyboard); m_mem_hex->ensurePolished(); // Memory Panel: ASCII Panel m_mem_ascii = new QLabel(""); m_mem_ascii->setSizePolicy(sp_retain); m_mem_ascii->setObjectName("memory_viewer_ascii_panel"); m_mem_ascii->setFont(mono); m_mem_ascii->setAutoFillBackground(true); m_mem_ascii->setTextInteractionFlags(Qt::TextSelectableByMouse | Qt::TextSelectableByKeyboard); m_mem_ascii->ensurePolished(); // Merge Memory Panel: m_hbox_mem_panel->setAlignment(Qt::AlignTop | Qt::AlignHCenter); m_hbox_mem_panel->addSpacing(20); m_hbox_mem_panel->addWidget(m_mem_addr); m_hbox_mem_panel->addSpacing(10); m_hbox_mem_panel->addWidget(m_mem_hex); m_hbox_mem_panel->addSpacing(10); m_hbox_mem_panel->addWidget(m_mem_ascii); m_hbox_mem_panel->addSpacing(20); QHBoxLayout* hbox_memory_search = new QHBoxLayout(this); // Set Margins to adjust WindowSize vbox_panel->setContentsMargins(0, 0, 0, 0); hbox_tools->setContentsMargins(0, 0, 0, 0); tools_mem_addr->setContentsMargins(0, 5, 0, 0); tools_mem_words->setContentsMargins(0, 5, 0, 0); tools_mem_buttons->setContentsMargins(0, 5, 0, 0); tools_img_mode->setContentsMargins(0, 5, 0, 0); tools_img_size->setContentsMargins(0, 5, 0, 0); tools_mem->setContentsMargins(0, 5, 0, 0); tools_img->setContentsMargins(0, 5, 0, 0); tools_buttons->setContentsMargins(0, 5, 0, 0); m_hbox_mem_panel->setContentsMargins(0, 0, 0, 0); hbox_memory_search->setContentsMargins(0, 0, 0, 0); if (m_disasm) { // Extract memory view from the disassembler std::tie(m_ptr, m_size) = m_disasm->get_memory_span(); } QGroupBox* group_search = new QGroupBox(tr("Memory Search"), this); QPushButton* button_collapse_viewer = new QPushButton(reinterpret_cast<const char*>(u8"Ʌ"), group_search); button_collapse_viewer->setFixedWidth(QLabel(button_collapse_viewer->text()).sizeHint().width() * 3); m_search_line = new QLineEdit(group_search); m_search_line->setFixedWidth(QLabel(QString("This is the very length of the lineedit due to hidpi reasons.").chopped(4)).sizeHint().width()); m_search_line->setPlaceholderText(tr("Search...")); m_search_line->setMaxLength(4096); QPushButton* button_search = new QPushButton(tr("Search"), group_search); button_search->setEnabled(false); m_chkbox_case_insensitive = new QCheckBox(tr("Case Insensitive"), group_search); m_chkbox_case_insensitive->setCheckable(true); m_chkbox_case_insensitive->setToolTip(tr("When using string mode, the characters' case will not matter both in string and in memory." "\nWarning: this may reduce performance of the search.")); m_cbox_input_mode = new QComboBox(group_search); m_cbox_input_mode->addItem(tr("Select search mode(s).."), QVariant::fromValue(+no_mode)); m_cbox_input_mode->addItem(tr("Deselect All Modes"), QVariant::fromValue(+clear_modes)); m_cbox_input_mode->addItem(tr("String"), QVariant::fromValue(+as_string)); m_cbox_input_mode->addItem(tr("HEX bytes/integer"), QVariant::fromValue(+as_hex)); m_cbox_input_mode->addItem(tr("Double"), QVariant::fromValue(+as_f64)); m_cbox_input_mode->addItem(tr("Float"), QVariant::fromValue(+as_f32)); m_cbox_input_mode->addItem(tr("Instruction"), QVariant::fromValue(+as_inst)); m_cbox_input_mode->addItem(tr("RegEx Instruction"), QVariant::fromValue(+as_regex_inst)); QString tooltip = tr("String: search the memory for the specified string." "\nHEX bytes/integer: search the memory for hexadecimal values. Spaces, commas, \"0x\", \"0X\", \"\\x\", \"h\", \"H\" ensure separation of bytes but they are not mandatory." "\nDouble: reinterpret the string as 64-bit precision floating point value. Values are searched for exact representation, meaning -0 != 0." "\nFloat: reinterpret the string as 32-bit precision floating point value. Values are searched for exact representation, meaning -0 != 0." "\nInstruction: search an instruction contains the text of the string." "\nRegEx: search an instruction containing text that matches the regular expression input."); if (m_size != 0x40000/*SPU_LS_SIZE*/) { m_cbox_input_mode->addItem("SPU Instruction", QVariant::fromValue(+as_fake_spu_inst)); m_cbox_input_mode->addItem(tr("SPU RegEx-Instruction"), QVariant::fromValue(+as_regex_fake_spu_inst)); tooltip.append(tr("\nSPU Instruction: Search an SPU instruction contains the text of the string. For searching instructions within embedded SPU images.\nTip: SPU floats are commented along forming instructions.")); } connect(m_cbox_input_mode, QOverload<int>::of(&QComboBox::currentIndexChanged), group_search, [this, button_search](int index) { if (index < 1 || m_rsx) { return; } if ((1u << index) == clear_modes) { m_modes = {}; } else { m_modes = search_mode{m_modes | (1 << index)}; } const s32 count = std::popcount(+m_modes); if (count == 0) { button_search->setEnabled(false); m_cbox_input_mode->setItemText(0, tr("Select search mode(s)..")); } else { button_search->setEnabled(true); m_cbox_input_mode->setItemText(0, tr("%0 mode(s) selected").arg(count)); } for (u32 i = search_mode_last / 2; i > clear_modes; i /= 2) { if (i & m_modes && count > 1) { m_cbox_input_mode->setItemText(std::countr_zero<u32>(i), qstr(fmt::format("* %s", search_mode{i}))); } else { m_cbox_input_mode->setItemText(std::countr_zero<u32>(i), qstr(fmt::format("%s", search_mode{i}))); } } if (count != 1) { m_cbox_input_mode->setCurrentIndex(0); } }); m_cbox_input_mode->setToolTip(tooltip); QVBoxLayout* vbox_search_layout = new QVBoxLayout(group_search); QHBoxLayout* hbox_search_panel = new QHBoxLayout(group_search); QHBoxLayout* hbox_search_modes = new QHBoxLayout(group_search); hbox_search_panel->addWidget(button_collapse_viewer); hbox_search_panel->addWidget(m_search_line); hbox_search_panel->addWidget(m_cbox_input_mode); hbox_search_panel->addWidget(m_chkbox_case_insensitive); hbox_search_panel->addWidget(button_search); vbox_search_layout->addLayout(hbox_search_panel); vbox_search_layout->addLayout(hbox_search_modes); group_search->setLayout(vbox_search_layout); hbox_memory_search->setAlignment(Qt::AlignHCenter | Qt::AlignBottom); hbox_memory_search->addSpacing(20); hbox_memory_search->addWidget(group_search); hbox_memory_search->addSpacing(20); // Merge and display everything vbox_panel->addSpacing(10); auto get_row = [row = 0]() mutable { return row++; }; vbox_panel->addLayout(hbox_tools, get_row()); vbox_panel->addSpacing(5); vbox_panel->addLayout(m_hbox_mem_panel, get_row()); // TODO: RSX memory searcher if (!m_rsx) { vbox_panel->addLayout(hbox_memory_search, get_row()); vbox_panel->addSpacing(15); } else { group_search->deleteLater(); } vbox_panel->setSizeConstraint(QLayout::SetNoConstraint); setLayout(vbox_panel); // Events connect(m_addr_line, &QLineEdit::returnPressed, [this]() { bool ok = false; const QString text = m_addr_line->text(); const u32 addr = (text.startsWith("0x", Qt::CaseInsensitive) ? text.right(text.size() - 2) : text).toULong(&ok, 16); if (ok) m_addr = addr; scroll(0); // Refresh }); connect(sb_words, static_cast<void (QSpinBox::*)(int)>(&QSpinBox::valueChanged), [=, this]() { m_colcount = 1 << sb_words->value(); ShowMemory(); }); connect(b_prev, &QAbstractButton::clicked, [this]() { scroll(-1); }); connect(b_next, &QAbstractButton::clicked, [this]() { scroll(1); }); connect(b_fprev, &QAbstractButton::clicked, [this]() { scroll(m_rowcount * -1); }); connect(b_fnext, &QAbstractButton::clicked, [this]() { scroll(m_rowcount); }); connect(b_img, &QAbstractButton::clicked, [=, this]() { const color_format format = cbox_img_mode->currentData().value<color_format>(); const int sizex = sb_img_size_x->value(); const int sizey = sb_img_size_y->value(); ShowImage(this, m_addr, format, sizex, sizey, false); }); QTimer* auto_refresh_timer = new QTimer(this); connect(auto_refresh_timer, &QTimer::timeout, this, [this]() { ShowMemory(); }); connect(button_auto_refresh, &QAbstractButton::clicked, this, [=, this]() { const bool is_checked = button_auto_refresh->text() != " "; if (auto_refresh_timer->isActive() != is_checked) { return; } if (is_checked) { button_auto_refresh->setText(QStringLiteral(" ")); auto_refresh_timer->stop(); } else { button_auto_refresh->setText(reinterpret_cast<const char*>(u8"█")); ShowMemory(); auto_refresh_timer->start(16); } }); if (!m_rsx) { connect(button_search, &QAbstractButton::clicked, this, [this]() { if (m_search_thread && m_search_thread->isRunning()) { // Prevent spamming (search is costly on performance) return; } if (m_search_thread) { m_search_thread->deleteLater(); m_search_thread = nullptr; } std::string wstr = m_search_line->text().toStdString(); if (wstr.empty() || wstr.size() >= 4096u) { gui_log.error("String is empty or too long (size=%u)", wstr.size()); return; } m_search_thread = QThread::create([this, wstr, m_modes = m_modes]() { gui_log.notice("Searching for %s (mode: %s)", wstr, m_modes); u64 found = 0; for (int modes = m_modes; modes; modes &= modes - 1) { found += OnSearch(wstr, modes & ~(modes - 1)); } gui_log.success("Search completed (found %u matches)", +found); }); m_search_thread->start(); }); connect(button_collapse_viewer, &QAbstractButton::clicked, this, [this, button_collapse_viewer, m_previous_row_count = -1]() mutable { const bool is_collapsing = button_collapse_viewer->text() == reinterpret_cast<const char*>(u8"Ʌ"); button_collapse_viewer->setText(is_collapsing ? "V" : reinterpret_cast<const char*>(u8"Ʌ")); if (is_collapsing) { m_previous_row_count = std::exchange(m_rowcount, 0); setMinimumHeight(0); } else { m_rowcount = std::exchange(m_previous_row_count, 0); setMaximumHeight(16777215); // Default Qt value } ShowMemory(); QTimer::singleShot(0, this, [this, button_collapse_viewer]() { const bool is_collapsing = button_collapse_viewer->text() != reinterpret_cast<const char*>(u8"Ʌ"); // singleShot to evaluate properly after the event const int height_hint = sizeHint().height(); resize(size().width(), height_hint); if (is_collapsing) { setMinimumHeight(height_hint); setMaximumHeight(height_hint + 1); } else { setMinimumHeight(m_min_height); } }); }); } // Set the minimum height of one row m_rowcount = 1; ShowMemory(); m_min_height = sizeHint().height(); setMinimumHeight(m_min_height); m_rowcount = 16; ShowMemory(); setFixedWidth(sizeHint().width()); // Fill the QTextEdits scroll(0); // Show by default show(); // Expected to be created by IDM, emulation stop will close it const u32 id = idm::last_id(); auto handle_ptr = idm::get_unlocked<memory_viewer_handle>(id); connect(this, &memory_viewer_panel::finished, [handle_ptr = std::move(handle_ptr), id, this](int) { if (m_search_thread) { m_search_thread->wait(); m_search_thread->deleteLater(); m_search_thread = nullptr; } idm::remove_verify<memory_viewer_handle>(id, handle_ptr); }); } memory_viewer_panel::~memory_viewer_panel() { } void memory_viewer_panel::wheelEvent(QWheelEvent *event) { // Set some scrollspeed modifiers: u32 step_size = 1; if (event->modifiers().testFlag(Qt::ControlModifier)) step_size *= m_rowcount; const QPoint num_steps = event->angleDelta() / 8 / 15; // http://doc.qt.io/qt-5/qwheelevent.html#pixelDelta scroll(step_size * (0 - num_steps.y())); } void memory_viewer_panel::scroll(s32 steps) { m_addr += m_colcount * 4 * steps; // Add steps m_addr &= m_addr_mask; // Mask it m_addr -= m_addr % (m_colcount * 4); // Align by amount of bytes in a row m_addr_line->setText(qstr(fmt::format("%08x", m_addr))); ShowMemory(); } void memory_viewer_panel::resizeEvent(QResizeEvent *event) { QDialog::resizeEvent(event); const int font_height = m_fontMetrics->height(); const QMargins margins = layout()->contentsMargins(); int free_height = event->size().height() - (layout()->count() * (margins.top() + margins.bottom())) - c_pad_memory_labels; for (int i = 0; i < layout()->count(); i++) { const auto it = layout()->itemAt(i); if (it != m_hbox_mem_panel) // Do not take our memory layout into account free_height -= it->sizeHint().height(); } const u32 new_row_count = std::max(0, free_height) / font_height; if (m_rowcount != new_row_count) { m_rowcount = new_row_count; QTimer::singleShot(0, [this]() { // Prevent recursion of events ShowMemory(); }); } } std::string memory_viewer_panel::getHeaderAtAddr(u32 addr) const { if (m_type == thread_class::spu) return {}; // Check if its an SPU Local Storage beginning const u32 spu_boundary = utils::align<u32>(addr, SPU_LS_SIZE); if (spu_boundary <= addr + m_colcount * 4 - 1) { std::shared_ptr<named_thread<spu_thread>> spu; if (const u32 raw_spu_index = (spu_boundary - RAW_SPU_BASE_ADDR) / SPU_LS_SIZE; raw_spu_index < 5) { spu = idm::get<named_thread<spu_thread>>(spu_thread::find_raw_spu(raw_spu_index)); if (spu && spu->get_type() == spu_type::threaded) { spu.reset(); } } else if (const u32 spu_index = (spu_boundary - SPU_FAKE_BASE_ADDR) / SPU_LS_SIZE; spu_index < spu_thread::id_count) { spu = idm::get<named_thread<spu_thread>>(spu_thread::id_base | spu_index); if (spu && spu->get_type() != spu_type::threaded) { spu.reset(); } } if (spu) { return spu->get_name(); } } return {}; } void* memory_viewer_panel::to_ptr(u32 addr, u32 size) const { if (m_type >= thread_class::spu && !m_get_cpu()) { return nullptr; } if (!size) { return nullptr; } switch (m_type) { case thread_class::general: case thread_class::ppu: { if (vm::check_addr(addr, 0, size)) { return vm::get_super_ptr(addr); } break; } case thread_class::spu: { if (size <= SPU_LS_SIZE && SPU_LS_SIZE - size >= (addr % SPU_LS_SIZE)) { return m_spu_shm->map_self() + (addr % SPU_LS_SIZE); } break; } case thread_class::rsx: { u32 final_addr = 0; constexpr u32 local_mem = rsx::constants::local_mem_base; if (size > 0x2000'0000 || local_mem + 0x1000'0000 - size < addr) { break; } for (u32 i = addr; i >> 20 <= (addr + size - 1) >> 20; i += 0x100000) { const u32 temp = rsx::get_address(i - (i >= local_mem ? local_mem : 0), i < local_mem ? CELL_GCM_LOCATION_MAIN : CELL_GCM_LOCATION_LOCAL, true); if (!temp) { // Failure final_addr = 0; break; } if (!final_addr) { // First time, save starting address for later checks final_addr = temp; } else if (final_addr != temp - (i - addr)) { // TODO: Non-contiguous memory final_addr = 0; break; } } if (vm::check_addr(final_addr, 0, size)) { return vm::get_super_ptr(final_addr); } break; } } return nullptr; } void memory_viewer_panel::ShowMemory() { QString t_mem_addr_str; QString t_mem_hex_str; QString t_mem_ascii_str; for (u32 row = 0, spu_passed = 0; row < m_rowcount; row++) { if (row) { t_mem_addr_str += "\r\n"; t_mem_hex_str += "\r\n"; t_mem_ascii_str += "\r\n"; } { // Check if this address contains potential header const u32 addr = (m_addr + (row - spu_passed) * m_colcount * 4) & m_addr_mask; const std::string header = getHeaderAtAddr(addr); if (!header.empty()) { // Create an SPU header at the beginning of local storage // Like so: // ======================================= // SPU[0x0000100] CellSpursKernel0 // ======================================= bool _break = false; for (u32 i = 0; i < 3; i++) { t_mem_addr_str += qstr(fmt::format("%08x", addr)); std::string str(i == 1 ? header : ""); const u32 expected_str_size = m_colcount * 13 - 2; // Truncate or enlarge string to a fixed size str.resize(expected_str_size); std::replace(str.begin(), str.end(), '\0', i == 1 ? ' ' : '='); t_mem_hex_str += qstr(str); spu_passed++; row++; if (row >= m_rowcount) { _break = true; break; } t_mem_addr_str += "\r\n"; t_mem_hex_str += "\r\n"; t_mem_ascii_str += "\r\n"; } if (_break) { break; } } t_mem_addr_str += qstr(fmt::format("%08x", (m_addr + (row - spu_passed) * m_colcount * 4) & m_addr_mask)); } for (u32 col = 0; col < m_colcount; col++) { if (col) { t_mem_hex_str += " "; } const u32 addr = (m_addr + (row - spu_passed) * m_colcount * 4 + col * 4) & m_addr_mask; if (const auto ptr = this->to_ptr(addr)) { const be_t<u32> rmem = read_from_ptr<be_t<u32>>(static_cast<const u8*>(ptr)); t_mem_hex_str += qstr(fmt::format("%02x %02x %02x %02x", static_cast<u8>(rmem >> 24), static_cast<u8>(rmem >> 16), static_cast<u8>(rmem >> 8), static_cast<u8>(rmem >> 0))); std::string str{reinterpret_cast<const char*>(&rmem), 4}; for (auto& ch : str) { if (!std::isprint(static_cast<u8>(ch))) ch = '.'; } t_mem_ascii_str += qstr(std::move(str)); } else { t_mem_hex_str += "?? ?? ?? ??"; t_mem_ascii_str += "????"; } } } m_mem_addr->setVisible(m_rowcount != 0); m_mem_hex->setVisible(m_rowcount != 0); m_mem_ascii->setVisible(m_rowcount != 0); if (t_mem_addr_str != m_mem_addr->text()) m_mem_addr->setText(t_mem_addr_str); if (t_mem_hex_str != m_mem_hex->text()) m_mem_hex->setText(t_mem_hex_str); if (t_mem_ascii_str != m_mem_ascii->text()) m_mem_ascii->setText(t_mem_ascii_str); auto mask_height = [&](int height) { return m_rowcount != 0 ? height + c_pad_memory_labels : 0; }; // Adjust Text Boxes (also helps with window resize) QSize textSize = m_fontMetrics->size(0, m_mem_addr->text()); m_mem_addr->setFixedSize(textSize.width() + 10, mask_height(textSize.height())); textSize = m_fontMetrics->size(0, m_mem_hex->text()); m_mem_hex->setFixedSize(textSize.width() + 10, mask_height(textSize.height())); textSize = m_fontMetrics->size(0, m_mem_ascii->text()); m_mem_ascii->setFixedSize(textSize.width() + 10, mask_height(textSize.height())); } void memory_viewer_panel::SetPC(const uint pc) { m_addr = pc; } void memory_viewer_panel::keyPressEvent(QKeyEvent* event) { if (!isActiveWindow()) { QDialog::keyPressEvent(event); return; } if (event->modifiers() == Qt::ControlModifier) { switch (const auto key = event->key()) { case Qt::Key_PageUp: case Qt::Key_PageDown: { scroll(key == Qt::Key_PageDown ? m_rowcount : 0u - m_rowcount); break; } case Qt::Key_F5: { if (event->isAutoRepeat()) { break; } // Single refresh ShowMemory(); break; } case Qt::Key_F: { m_addr_line->setFocus(); break; } default: break; } } QDialog::keyPressEvent(event); } void memory_viewer_panel::ShowImage(QWidget* parent, u32 addr, color_format format, u32 width, u32 height, bool flipv) const { u32 texel_bytes = 4; switch (format) { case color_format::RGB: { texel_bytes = 3; break; } case color_format::G8: { texel_bytes = 1; break; } default: break; } // If exceeds 32-bits it is invalid as well, UINT32_MAX always fails checks const u32 memsize = utils::mul_saturate<u32>(utils::mul_saturate<u32>(texel_bytes, width), height); if (memsize == 0) { return; } const auto originalBuffer = static_cast<u8*>(this->to_ptr(addr, memsize)); if (!originalBuffer) { return; } const auto convertedBuffer = new (std::nothrow) u8[memsize / texel_bytes * u64{4}]; if (!convertedBuffer) { // OOM or invalid memory address, give up return; } switch (format) { case color_format::RGB: { const u32 pitch = width * 3; const u32 pitch_new = width * 4; for (u32 y = 0; y < height; y++) { const u32 offset = y * pitch; const u32 offset_new = y * pitch_new; for (u32 x = 0, x_new = 0; x < pitch; x += 3, x_new += 4) { convertedBuffer[offset_new + x_new + 0] = originalBuffer[offset + x + 2]; convertedBuffer[offset_new + x_new + 1] = originalBuffer[offset + x + 1]; convertedBuffer[offset_new + x_new + 2] = originalBuffer[offset + x + 0]; convertedBuffer[offset_new + x_new + 3] = 255; } } break; } case color_format::ARGB: { const u32 pitch = width * 4; for (u32 y = 0; y < height; y++) { const u32 offset = y * pitch; for (u32 x = 0; x < pitch; x += 4) { convertedBuffer[offset + x + 0] = originalBuffer[offset + x + 3]; convertedBuffer[offset + x + 1] = originalBuffer[offset + x + 2]; convertedBuffer[offset + x + 2] = originalBuffer[offset + x + 1]; convertedBuffer[offset + x + 3] = originalBuffer[offset + x + 0]; } } break; } case color_format::RGBA: { const u32 pitch = width * 4; for (u32 y = 0; y < height; y++) { const u32 offset = y * pitch; for (u32 x = 0; x < pitch; x += 4) { convertedBuffer[offset + x + 0] = originalBuffer[offset + x + 2]; convertedBuffer[offset + x + 1] = originalBuffer[offset + x + 1]; convertedBuffer[offset + x + 2] = originalBuffer[offset + x + 0]; convertedBuffer[offset + x + 3] = originalBuffer[offset + x + 3]; } } break; } case color_format::ABGR: { const u32 pitch = width * 4; for (u32 y = 0; y < height; y++) { const u32 offset = y * pitch; for (u32 x = 0; x < pitch; x += 4) { convertedBuffer[offset + x + 0] = originalBuffer[offset + x + 1]; convertedBuffer[offset + x + 1] = originalBuffer[offset + x + 2]; convertedBuffer[offset + x + 2] = originalBuffer[offset + x + 3]; convertedBuffer[offset + x + 3] = originalBuffer[offset + x + 0]; } } break; } case color_format::G8: { const u32 pitch = width * 1; const u32 pitch_new = width * 4; for (u32 y = 0; y < height; y++) { const u32 offset = y * pitch; const u32 offset_new = y * pitch_new; for (u32 x = 0; x < pitch; x++) { const u8 color = originalBuffer[offset + x]; convertedBuffer[offset_new + x * 4 + 0] = color; convertedBuffer[offset_new + x * 4 + 1] = color; convertedBuffer[offset_new + x * 4 + 2] = color; convertedBuffer[offset_new + x * 4 + 3] = 255; } } break; } case color_format::G32MAX: { // Special: whitens as 4-byte groups tend to have a higher value, in order to perceive memory contents // May be used to search for instructions or floats for example const u32 pitch = width * 4; for (u32 y = 0; y < height; y++) { const u32 offset = y * pitch; for (u32 x = 0; x < pitch; x += 4) { const u8 color = std::max({originalBuffer[offset + x + 0], originalBuffer[offset + x + 1], originalBuffer[offset + x + 2], originalBuffer[offset + x + 3]}); convertedBuffer[offset + x + 0] = color; convertedBuffer[offset + x + 1] = color; convertedBuffer[offset + x + 2] = color; convertedBuffer[offset + x + 3] = 255; } } break; } } // Flip vertically if (flipv && height > 1 && memsize > 1) { const u32 pitch = width * 4; for (u32 y = 0; y < height / 2; y++) { const u32 offset = y * pitch; const u32 flip_offset = (height - y - 1) * pitch; for (u32 x = 0; x < pitch; x++) { const u8 tmp = convertedBuffer[offset + x]; convertedBuffer[offset + x] = convertedBuffer[flip_offset + x]; convertedBuffer[flip_offset + x] = tmp; } } } std::unique_ptr<QImage> image = std::make_unique<QImage>(convertedBuffer, width, height, QImage::Format_ARGB32, [](void* buffer){ delete[] static_cast<u8*>(buffer); }, convertedBuffer); if (image->isNull()) return; QLabel* canvas = new QLabel(); canvas->setFixedSize(width, height); canvas->setAttribute(Qt::WA_Hover); canvas->setPixmap(QPixmap::fromImage(*image)); QLabel* image_title = new QLabel(); QVBoxLayout* layout = new QVBoxLayout(); layout->setContentsMargins(0, 0, 0, 0); layout->addWidget(image_title); layout->addWidget(canvas); struct image_viewer : public QDialog { QLabel* const m_canvas; QLabel* const m_image_title; const std::unique_ptr<QImage> m_image; const u32 m_addr; const int m_addr_scale = 1; const u32 m_pitch; const u32 m_width; const u32 m_height; int m_canvas_scale = 1; image_viewer(QWidget* parent, QLabel* canvas, QLabel* image_title, std::unique_ptr<QImage> image, u32 addr, u32 addr_scale, u32 pitch, u32 width, u32 height) noexcept : QDialog(parent) , m_canvas(canvas) , m_image_title(image_title) , m_image(std::move(image)) , m_addr(addr) , m_addr_scale(addr_scale) , m_pitch(pitch) , m_width(width) , m_height(height) { } bool eventFilter(QObject* object, QEvent* event) override { if (object == m_canvas && (event->type() == QEvent::HoverMove || event->type() == QEvent::HoverEnter || event->type() == QEvent::HoverLeave)) { const QPointF xy = static_cast<QHoverEvent*>(event)->position() / m_canvas_scale; set_window_name_by_coordinates(xy.x(), xy.y()); return false; } if (object == m_canvas && event->type() == QEvent::MouseButtonDblClick && static_cast<QMouseEvent*>(event)->button() == Qt::LeftButton) { QLineEdit* addr_line = static_cast<memory_viewer_panel*>(parent())->m_addr_line; const QPointF xy = static_cast<QMouseEvent*>(event)->position() / m_canvas_scale; addr_line->setText(qstr(fmt::format("%08x", get_pointed_addr(xy.x(), xy.y())))); Q_EMIT addr_line->returnPressed(); close(); return false; } return QDialog::eventFilter(object, event); } u32 get_pointed_addr(u32 x, u32 y) const { return m_addr + m_addr_scale * (y * m_pitch + x) / m_canvas_scale; } void set_window_name_by_coordinates(int x, int y) { if (x < 0 || y < 0) { m_image_title->setText(qstr(fmt::format("[-, -]: NA"))); return; } m_image_title->setText(qstr(fmt::format("[x:%d, y:%d]: 0x%x", x, y, get_pointed_addr(x, y)))); } void keyPressEvent(QKeyEvent* event) override { if (!isActiveWindow()) { QDialog::keyPressEvent(event); return; } if (event->modifiers() == Qt::ControlModifier) { switch (const auto key = event->key()) { case Qt::Key_Equal: // Also plus case Qt::Key_Plus: case Qt::Key_Minus: { m_canvas_scale = std::clamp(m_canvas_scale + (key == Qt::Key_Minus ? -1 : 1), 1, 5); const QSize fixed_size(m_width * m_canvas_scale, m_height * m_canvas_scale); // Fast transformation makes it not blurry, does not use bilinear filtering m_canvas->setPixmap(QPixmap::fromImage(m_image->scaled(fixed_size.width(), fixed_size.height(), Qt::KeepAspectRatio, Qt::FastTransformation))); m_canvas->setFixedSize(fixed_size); QTimer::singleShot(0, this, [this]() { // sizeHint() evaluates properly after events have been processed setFixedSize(sizeHint()); }); break; } } } QDialog::keyPressEvent(event); } }; image_viewer* f_image_viewer = new image_viewer(parent, canvas, image_title, std::move(image), addr, texel_bytes, width, width, height); canvas->installEventFilter(f_image_viewer); f_image_viewer->setWindowTitle(qstr(fmt::format("Raw Image @ 0x%x", addr))); f_image_viewer->setLayout(layout); f_image_viewer->setAttribute(Qt::WA_DeleteOnClose); f_image_viewer->show(); QTimer::singleShot(0, f_image_viewer, [f_image_viewer]() { // sizeHint() evaluates properly after events have been processed f_image_viewer->setFixedSize(f_image_viewer->sizeHint()); }); }
36,116
C++
.cpp
1,052
31.191065
216
0.675004
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,120
game_list_base.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/game_list_base.cpp
#include "stdafx.h" #include "game_list_base.h" #include "localized.h" #include <QDir> #include <QPainter> #include <cmath> #include <unordered_set> LOG_CHANNEL(game_list_log, "GameList"); game_list_base::game_list_base() { } void game_list_base::repaint_icons(std::vector<game_info>& game_data, const QColor& icon_color, const QSize& icon_size, qreal device_pixel_ratio) { m_icon_size = icon_size; m_icon_color = icon_color; QPixmap placeholder(icon_size * device_pixel_ratio); placeholder.setDevicePixelRatio(device_pixel_ratio); placeholder.fill(Qt::transparent); for (game_info& game : game_data) { game->pxmap = placeholder; if (movie_item_base* item = game->item) { item->set_icon_load_func([this, game, device_pixel_ratio, cancel = item->icon_loading_aborted()](int) { IconLoadFunction(game, device_pixel_ratio, cancel); }); item->call_icon_func(); } } } void game_list_base::IconLoadFunction(game_info game, qreal device_pixel_ratio, std::shared_ptr<atomic_t<bool>> cancel) { if (cancel && cancel->load()) { return; } static std::unordered_set<std::string> warn_once_list; static shared_mutex s_mtx; if (game->icon.isNull() && (game->info.icon_path.empty() || !game->icon.load(QString::fromStdString(game->info.icon_path)))) { if (game_list_log.warning) { bool logged = false; { std::lock_guard lock(s_mtx); logged = !warn_once_list.emplace(game->info.icon_path).second; } if (!logged) { game_list_log.warning("Could not load image from path %s", QDir(QString::fromStdString(game->info.icon_path)).absolutePath()); } } } if (!game->item || (cancel && cancel->load())) { return; } const QColor color = GetGridCompatibilityColor(game->compat.color); { std::lock_guard lock(game->item->pixmap_mutex); game->pxmap = PaintedPixmap(game->icon, device_pixel_ratio, game->hasCustomConfig, game->hasCustomPadConfig, color); } if (!cancel || !cancel->load()) { if (m_icon_ready_callback) m_icon_ready_callback(game); } } QPixmap game_list_base::PaintedPixmap(const QPixmap& icon, qreal device_pixel_ratio, bool paint_config_icon, bool paint_pad_config_icon, const QColor& compatibility_color) const { QSize canvas_size(320, 176); QSize icon_size(icon.size()); QPoint target_pos; if (!icon.isNull()) { // Let's upscale the original icon to at least fit into the outer rect of the size of PS3's ICON0.PNG if (icon_size.width() < 320 || icon_size.height() < 176) { icon_size.scale(320, 176, Qt::KeepAspectRatio); } canvas_size = icon_size; // Calculate the centered size and position of the icon on our canvas. if (icon_size.width() != 320 || icon_size.height() != 176) { ensure(icon_size.height() > 0); constexpr double target_ratio = 320.0 / 176.0; // aspect ratio 20:11 if ((icon_size.width() / static_cast<double>(icon_size.height())) > target_ratio) { canvas_size.setHeight(std::ceil(icon_size.width() / target_ratio)); } else { canvas_size.setWidth(std::ceil(icon_size.height() * target_ratio)); } target_pos.setX(std::max<int>(0, (canvas_size.width() - icon_size.width()) / 2.0)); target_pos.setY(std::max<int>(0, (canvas_size.height() - icon_size.height()) / 2.0)); } } // Create a canvas large enough to fit our entire scaled icon QPixmap canvas(canvas_size * device_pixel_ratio); canvas.setDevicePixelRatio(device_pixel_ratio); canvas.fill(m_icon_color); // Create a painter for our canvas QPainter painter(&canvas); painter.setRenderHint(QPainter::SmoothPixmapTransform); // Draw the icon onto our canvas if (!icon.isNull()) { painter.drawPixmap(target_pos.x(), target_pos.y(), icon_size.width(), icon_size.height(), icon); } // Draw config icons if necessary if (!m_is_list_layout && (paint_config_icon || paint_pad_config_icon)) { const int width = canvas_size.width() * 0.2; const QPoint origin = QPoint(canvas_size.width() - width, 0); QString icon_path; if (paint_config_icon && paint_pad_config_icon) { icon_path = ":/Icons/combo_config_bordered.png"; } else if (paint_config_icon) { icon_path = ":/Icons/custom_config.png"; } else if (paint_pad_config_icon) { icon_path = ":/Icons/controllers.png"; } QPixmap custom_config_icon(icon_path); custom_config_icon.setDevicePixelRatio(device_pixel_ratio); painter.drawPixmap(origin, custom_config_icon.scaled(QSize(width, width) * device_pixel_ratio, Qt::KeepAspectRatio, Qt::TransformationMode::SmoothTransformation)); } // Draw game compatibility icons if necessary if (compatibility_color.isValid()) { const int size = canvas_size.height() * 0.2; const int spacing = canvas_size.height() * 0.05; QColor copyColor = QColor(compatibility_color); copyColor.setAlpha(215); // ~85% opacity painter.setRenderHint(QPainter::Antialiasing); painter.setBrush(QBrush(copyColor)); painter.setPen(QPen(Qt::black, std::max(canvas_size.width() / 320, canvas_size.height() / 176))); painter.drawEllipse(spacing, spacing, size, size); } // Finish the painting painter.end(); // Scale and return our final image return canvas.scaled(m_icon_size * device_pixel_ratio, Qt::KeepAspectRatio, Qt::TransformationMode::SmoothTransformation); } QColor game_list_base::GetGridCompatibilityColor(const QString& string) const { if (m_draw_compat_status_to_grid && !m_is_list_layout) { return QColor(string); } return QColor(); } std::string game_list_base::GetGameVersion(const game_info& game) { if (game->info.app_ver == Localized().category.unknown.toStdString()) { // Fall back to Disc/Pkg Revision return game->info.version; } return game->info.app_ver; } QIcon game_list_base::GetCustomConfigIcon(const game_info& game) { if (!game) return {}; static const QIcon icon_combo_config_bordered(":/Icons/combo_config_bordered.png"); static const QIcon icon_custom_config(":/Icons/custom_config.png"); static const QIcon icon_controllers(":/Icons/controllers.png"); if (game->hasCustomConfig && game->hasCustomPadConfig) { return icon_combo_config_bordered; } if (game->hasCustomConfig) { return icon_custom_config; } if (game->hasCustomPadConfig) { return icon_controllers; } return {}; }
6,241
C++
.cpp
188
30.494681
177
0.716234
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,121
elf_memory_dumping_dialog.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/elf_memory_dumping_dialog.cpp
#include "elf_memory_dumping_dialog.h" #include "Utilities/Config.h" #include "Emu/Cell/SPUThread.h" #include "qt_utils.h" #include <QFileDialog> #include <QCoreApplication> #include <QFontDatabase> #include <QHBoxLayout> #include <QPushButton> #include <QMessageBox> #include <QLineEdit> #include <QLabel> LOG_CHANNEL(gui_log, "GUI"); Q_DECLARE_METATYPE(spu_memory_segment_dump_data); elf_memory_dumping_dialog::elf_memory_dumping_dialog(u32 ppu_debugger_addr, std::shared_ptr<gui_settings> _gui_settings, QWidget* parent) : QDialog(parent), m_gui_settings(std::move(_gui_settings)) { setWindowTitle(tr("SPU ELF Dumper")); setAttribute(Qt::WA_DeleteOnClose); m_seg_list = new QListWidget(); // Font const int pSize = 10; QFont mono = QFontDatabase::systemFont(QFontDatabase::FixedFont); mono.setPointSize(pSize); m_seg_list->setMinimumWidth(gui::utils::get_label_width(tr("PPU Address: 0x00000000, LS Address: 0x00000, Segment Size: 0x00000, Flags: 0x0"))); // Address expression input auto make_hex_edit = [this, mono](u32 max_digits) { QLineEdit* le = new QLineEdit(); le->setFont(mono); le->setMaxLength(max_digits + 2); le->setPlaceholderText("0x" + QStringLiteral("0").repeated(max_digits)); le->setValidator(new QRegularExpressionValidator(QRegularExpression(QStringLiteral("^(0[xX])?0*[a-fA-F0-9]{0,%1}$").arg(max_digits)), this)); return le; }; m_segment_size_input = make_hex_edit(5); m_ppu_address_input = make_hex_edit(8); m_ls_address_input = make_hex_edit(5); m_segment_flags_input = make_hex_edit(1); m_segment_flags_input->setText("0x7"); // READ WRITE EXEC m_ppu_address_input->setText(QStringLiteral("0x%1").arg(ppu_debugger_addr & -0x10000, 1, 16)); // SPU code segments are usually 128 bytes aligned, let's make it even 64k so the user would have to type himself the lower part to avoid human errors. QPushButton* add_segment_button = new QPushButton(QStringLiteral("+")); add_segment_button->setToolTip(tr("Add new segment")); add_segment_button->setFixedWidth(add_segment_button->sizeHint().height()); // Make button square connect(add_segment_button, &QAbstractButton::clicked, this, &elf_memory_dumping_dialog::add_new_segment); QPushButton* remove_segment_button = new QPushButton(QStringLiteral("-")); remove_segment_button->setToolTip(tr("Remove segment")); remove_segment_button->setFixedWidth(remove_segment_button->sizeHint().height()); // Make button square remove_segment_button->setEnabled(false); connect(remove_segment_button, &QAbstractButton::clicked, this, &elf_memory_dumping_dialog::remove_segment); QPushButton* save_to_file = new QPushButton(tr("Save To ELF")); save_to_file->setToolTip(tr("Save To An ELF file")); connect(save_to_file, &QAbstractButton::clicked, this, &elf_memory_dumping_dialog::save_to_file); QHBoxLayout* hbox_input = new QHBoxLayout; hbox_input->addWidget(new QLabel(tr("Segment Size:"))); hbox_input->addWidget(m_segment_size_input); hbox_input->addSpacing(5); hbox_input->addWidget(new QLabel(tr("PPU Address:"))); hbox_input->addWidget(m_ppu_address_input); hbox_input->addSpacing(5); hbox_input->addWidget(new QLabel(tr("LS Address:"))); hbox_input->addWidget(m_ls_address_input); hbox_input->addSpacing(5); hbox_input->addWidget(new QLabel(tr("Flags:"))); hbox_input->addWidget(m_segment_flags_input); QHBoxLayout* hbox_save_and_edit = new QHBoxLayout; hbox_save_and_edit->addStretch(2); hbox_save_and_edit->addWidget(add_segment_button); hbox_save_and_edit->addSpacing(4); hbox_save_and_edit->addWidget(remove_segment_button); hbox_save_and_edit->addSpacing(4); hbox_save_and_edit->addWidget(save_to_file); QVBoxLayout* vbox = new QVBoxLayout(); vbox->addLayout(hbox_input); vbox->addSpacing(5); vbox->addWidget(m_seg_list); vbox->addSpacing(5); vbox->addLayout(hbox_save_and_edit); setLayout(vbox); connect(m_seg_list, &QListWidget::currentRowChanged, this, [this, remove_segment_button](int row) { remove_segment_button->setEnabled(row >= 0 && m_seg_list->item(row)); }); show(); } void elf_memory_dumping_dialog::add_new_segment() { QStringList errors; auto interpret = [&](QString text, QString error_field) -> u32 { bool ok = false; // Parse expression (or at least used to, was nuked to remove the need for QtJsEngine) const QString fixed_expression = QRegularExpression(QRegularExpression::anchoredPattern("a .*|^[A-Fa-f0-9]+$")).match(text).hasMatch() ? "0x" + text : text; const u32 res = static_cast<u32>(fixed_expression.toULong(&ok, 16)); if (!ok) { errors << error_field; return umax; } return res; }; spu_memory_segment_dump_data data{}; data.segment_size = interpret(m_segment_size_input->text(), tr("Segment Size")); data.src_addr = vm::get_super_ptr(interpret(m_ppu_address_input->text(), tr("PPU Address"))); data.ls_addr = interpret(m_ls_address_input->text(), tr("LS Address")); data.flags = interpret(m_segment_flags_input->text(), tr("Segment Flags")); if (!errors.isEmpty()) { QMessageBox::warning(this, tr("Failed To Add Segment"), tr("Segment parameters are incorrect:\n%1").arg(errors.join('\n'))); return; } if (data.segment_size % 4) { QMessageBox::warning(this, tr("Failed To Add Segment"), tr("SPU segment size must be 4 bytes aligned.")); return; } if (data.segment_size + data.ls_addr > SPU_LS_SIZE || data.segment_size == 0 || data.segment_size % 4) { QMessageBox::warning(this, tr("Failed To Add Segment"), tr("SPU segment range is invalid.")); return; } if (!vm::check_addr(vm::try_get_addr(data.src_addr).first, vm::page_readable, data.segment_size)) { QMessageBox::warning(this, tr("Failed To Add Segment"), tr("PPU address range is not accessible.")); return; } for (int i = 0; i < m_seg_list->count(); ++i) { ensure(m_seg_list->item(i)->data(Qt::UserRole).canConvert<spu_memory_segment_dump_data>()); const auto seg_stored = m_seg_list->item(i)->data(Qt::UserRole).value<spu_memory_segment_dump_data>(); const auto stored_max = seg_stored.src_addr + seg_stored.segment_size; const auto data_max = data.src_addr + data.segment_size; if (seg_stored.src_addr < data_max && data.src_addr < stored_max) { QMessageBox::warning(this, tr("Failed To Add Segment"), tr("SPU segment overlaps with previous SPU segment(s)\n")); return; } } auto item = new QListWidgetItem(tr("PPU Address: 0x%0, LS Address: 0x%1, Segment Size: 0x%2, Flags: 0x%3").arg(+vm::try_get_addr(data.src_addr).first, 5, 16).arg(data.ls_addr, 2, 16).arg(data.segment_size, 2, 16).arg(data.flags, 2, 16), m_seg_list); item->setData(Qt::UserRole, QVariant::fromValue(data)); m_seg_list->setCurrentItem(item); } void elf_memory_dumping_dialog::remove_segment() { const int row = m_seg_list->currentRow(); if (row >= 0) { QListWidgetItem* item = m_seg_list->takeItem(row); delete item; } } void elf_memory_dumping_dialog::save_to_file() { std::vector<spu_memory_segment_dump_data> segs; segs.reserve(m_seg_list->count()); for (int i = 0; i < m_seg_list->count(); ++i) { ensure(m_seg_list->item(i)->data(Qt::UserRole).canConvert<spu_memory_segment_dump_data>()); const auto seg_stored = m_seg_list->item(i)->data(Qt::UserRole).value<spu_memory_segment_dump_data>(); segs.emplace_back(seg_stored); } if (segs.empty()) { return; } const QString path_last_elf = m_gui_settings->GetValue(gui::fd_save_elf).toString(); const QString qpath = QFileDialog::getSaveFileName(this, tr("Capture"), path_last_elf, "SPU ELF (*.elf)" ); const std::string path = qpath.toStdString(); if (!path.empty()) { const auto result = spu_thread::capture_memory_as_elf({segs.data(), segs.size()}).save(); if (!result.empty() && fs::write_file(path, fs::rewrite, result)) { gui_log.success("Saved ELF at %s", path); m_gui_settings->SetValue(gui::fd_save_elf, qpath); } else { QMessageBox::warning(this, tr("Save Failure"), tr("Failed to save SPU ELF.")); } } }
7,954
C++
.cpp
182
41.302198
250
0.719793
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,122
basic_mouse_settings_dialog.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/basic_mouse_settings_dialog.cpp
#include "stdafx.h" #include "basic_mouse_settings_dialog.h" #include "localized_emu.h" #include "Input/basic_mouse_handler.h" #include "Input/keyboard_pad_handler.h" #include "Emu/Io/mouse_config.h" #include "util/asm.hpp" #include <QGroupBox> #include <QMessageBox> #include <QVBoxLayout> LOG_CHANNEL(cfg_log, "CFG"); enum button_role { button_name = Qt::UserRole, button_code }; basic_mouse_settings_dialog::basic_mouse_settings_dialog(QWidget* parent) : QDialog(parent) { setObjectName("basic_mouse_settings_dialog"); setWindowTitle(tr("Configure Basic Mouse Handler")); setAttribute(Qt::WA_DeleteOnClose); setAttribute(Qt::WA_StyledBackground); setModal(true); QVBoxLayout* v_layout = new QVBoxLayout(this); m_button_box = new QDialogButtonBox(this); m_button_box->setStandardButtons(QDialogButtonBox::Apply | QDialogButtonBox::Cancel | QDialogButtonBox::Save | QDialogButtonBox::RestoreDefaults); connect(m_button_box, &QDialogButtonBox::clicked, this, [this](QAbstractButton* button) { if (button == m_button_box->button(QDialogButtonBox::Apply)) { g_cfg_mouse.save(); } else if (button == m_button_box->button(QDialogButtonBox::Save)) { g_cfg_mouse.save(); accept(); } else if (button == m_button_box->button(QDialogButtonBox::RestoreDefaults)) { if (QMessageBox::question(this, tr("Confirm Reset"), tr("Reset all settings?")) != QMessageBox::Yes) return; reset_config(); } else if (button == m_button_box->button(QDialogButtonBox::Cancel)) { // Restore config if (!g_cfg_mouse.load()) { cfg_log.notice("Could not restore mouse config. Using defaults."); } reject(); } }); if (!g_cfg_mouse.load()) { cfg_log.notice("Could not load basic mouse config. Using defaults."); } m_buttons = new QButtonGroup(this); connect(m_buttons, &QButtonGroup::idClicked, this, &basic_mouse_settings_dialog::on_button_click); connect(&m_remap_timer, &QTimer::timeout, this, [this]() { auto button = m_buttons->button(m_button_id); if (--m_seconds <= 0) { if (button) { if (const int button_id = m_buttons->id(button)) { const std::string name = g_cfg_mouse.get_button(button_id).to_string(); button->setText(name.empty() ? QStringLiteral("-") : QString::fromStdString(name)); } } reactivate_buttons(); return; } if (button) { button->setText(tr("[ Waiting %1 ]").arg(m_seconds)); } }); const auto insert_button = [this](int id, QPushButton* button) { m_buttons->addButton(button, id); button->installEventFilter(this); }; constexpr u32 button_count = 8; constexpr u32 max_items_per_column = 4; int rows = button_count; for (u32 cols = 1; utils::aligned_div(button_count, cols) > max_items_per_column;) { rows = utils::aligned_div(button_count, ++cols); } QWidget* widget = new QWidget(this); QGridLayout* grid_layout = new QGridLayout(this); for (int i = 0, row = 0, col = 0; i < static_cast<int>(button_count); i++, row++) { const int cell_code = get_mouse_button_code(i); const QString translated_cell_button = localized_emu::translated_mouse_button(cell_code); QHBoxLayout* h_layout = new QHBoxLayout(this); QGroupBox* gb = new QGroupBox(translated_cell_button, this); QPushButton* pb = new QPushButton(this); insert_button(cell_code, pb); const std::string saved_btn = g_cfg_mouse.get_button(cell_code); pb->setText(saved_btn.empty() ? QStringLiteral("-") : QString::fromStdString(saved_btn)); if (row >= rows) { row = 0; col++; } m_push_buttons[cell_code] = pb; h_layout->addWidget(pb); gb->setLayout(h_layout); grid_layout->addWidget(gb, row, col); } widget->setLayout(grid_layout); v_layout->addWidget(widget); v_layout->addWidget(m_button_box); setLayout(v_layout); m_palette = m_push_buttons[CELL_MOUSE_BUTTON_1]->palette(); // save normal palette } void basic_mouse_settings_dialog::reset_config() { g_cfg_mouse.from_default(); for (auto& [cell_code, pb] : m_push_buttons) { if (!pb) continue; const QString text = QString::fromStdString(g_cfg_mouse.get_button(cell_code).def); pb->setText(text.isEmpty() ? QStringLiteral("-") : text); } } void basic_mouse_settings_dialog::on_button_click(int id) { if (id < 0) { return; } for (auto but : m_buttons->buttons()) { but->setEnabled(false); but->setFocusPolicy(Qt::ClickFocus); } m_button_box->setEnabled(false); for (auto but : m_button_box->buttons()) { but->setFocusPolicy(Qt::ClickFocus); } m_button_id = id; if (auto button = m_buttons->button(m_button_id)) { button->setText(tr("[ Waiting %1 ]").arg(MAX_SECONDS)); button->setPalette(QPalette(Qt::blue)); button->grabMouse(); } m_remap_timer.start(1000); } void basic_mouse_settings_dialog::mouseReleaseEvent(QMouseEvent* event) { if (m_button_id < 0) { // We are not remapping a button, so pass the event to the base class. QDialog::mouseReleaseEvent(event); return; } const std::string name = keyboard_pad_handler::GetMouseName(event); g_cfg_mouse.get_button(m_button_id).from_string(name); if (auto button = m_buttons->button(m_button_id)) { button->setText(QString::fromStdString(name)); } reactivate_buttons(); } bool basic_mouse_settings_dialog::eventFilter(QObject* object, QEvent* event) { switch (event->type()) { case QEvent::MouseButtonRelease: { // On right click clear binding if we are not remapping pad button if (m_button_id < 0) { QMouseEvent* mouse_event = static_cast<QMouseEvent*>(event); if (const auto button = qobject_cast<QPushButton*>(object); button && button->isEnabled() && mouse_event->button() == Qt::RightButton) { if (const int button_id = m_buttons->id(button)) { button->setText(QStringLiteral("-")); g_cfg_mouse.get_button(button_id).from_string(""); return true; } } } // Disabled buttons should not absorb mouseclicks event->ignore(); break; } default: { break; } } return QDialog::eventFilter(object, event); } void basic_mouse_settings_dialog::reactivate_buttons() { m_remap_timer.stop(); m_seconds = MAX_SECONDS; if (m_button_id < 0) { return; } if (auto button = m_buttons->button(m_button_id)) { button->setPalette(m_palette); button->releaseMouse(); } m_button_id = -1; // Enable all buttons m_button_box->setEnabled(true); for (auto but : m_button_box->buttons()) { but->setFocusPolicy(Qt::StrongFocus); } for (auto but : m_buttons->buttons()) { but->setEnabled(true); but->setFocusPolicy(Qt::StrongFocus); } }
6,559
C++
.cpp
230
25.769565
147
0.701958
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,123
midi_creator.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/midi_creator.cpp
#include "stdafx.h" #include "midi_creator.h" #include "Utilities/StrFmt.h" #include "Utilities/StrUtil.h" #include <rtmidi_c.h> LOG_CHANNEL(cfg_log, "CFG"); midi_creator::midi_creator() { setObjectName("midi_creator"); } // We need to recreate the localized string because the midi creator is currently only created once. QString midi_creator::get_none() { return tr("None", "MIDI device"); } void midi_creator::refresh_list() { m_midi_list.clear(); m_midi_list.append(get_none()); const auto deleter = [](RtMidiWrapper* ptr) { if (ptr) rtmidi_in_free(ptr); }; std::unique_ptr<RtMidiWrapper, decltype(deleter)> midi_in(rtmidi_in_create_default()); ensure(midi_in); if (!midi_in->ok) { cfg_log.error("Could not get MIDI in ptr: %s", midi_in->msg); return; } const RtMidiApi api = rtmidi_in_get_current_api(midi_in.get()); if (!midi_in->ok) { cfg_log.error("Could not get MIDI api: %s", midi_in->msg); return; } if (const char* api_name = rtmidi_api_name(api)) { cfg_log.notice("MIDI: Using %s api", api_name); } else { cfg_log.warning("Could not get MIDI api name"); } const u32 port_count = rtmidi_get_port_count(midi_in.get()); if (!midi_in->ok || port_count == umax) { cfg_log.error("Could not get MIDI port count: %s", midi_in->msg); return; } for (u32 port_number = 0; port_number < port_count; port_number++) { char buf[128]{}; s32 size = sizeof(buf); if (rtmidi_get_port_name(midi_in.get(), port_number, buf, &size) == -1 || !midi_in->ok) { cfg_log.error("Error getting MIDI port name for port %d: %s", port_number, midi_in->msg); continue; } cfg_log.notice("Found MIDI device with name: %s", buf); m_midi_list.append(QString::fromUtf8(buf)); } } QStringList midi_creator::get_midi_list() const { return m_midi_list; } std::array<midi_device, max_midi_devices> midi_creator::get_selection_list() const { return m_sel_list; } std::string midi_creator::set_device(u32 num, const midi_device& device) { ensure(num < m_sel_list.size()); m_sel_list[num] = device; if (device.name == get_none().toStdString()) { m_sel_list[num].name.clear(); } std::string result; for (const midi_device& device : m_sel_list) { fmt::append(result, "%s@@@", device); } return result; } void midi_creator::parse_devices(const std::string& list) { m_sel_list = {}; const std::vector<std::string> devices_list = fmt::split(list, { "@@@" }); for (usz index = 0; index < std::min(m_sel_list.size(), devices_list.size()); index++) { m_sel_list[index] = midi_device::from_string(devices_list[index]); } }
2,592
C++
.cpp
92
25.967391
100
0.683232
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,124
qt_utils.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/qt_utils.cpp
#include "stdafx.h" #include "qt_utils.h" #include "gui_settings.h" #include <QApplication> #include <QBitmap> #include <QDesktopServices> #include <QFontMetrics> #include <QPainter> #include <QProcess> #include <QScreen> #include <QUrl> #include "Emu/system_utils.hpp" #include "Utilities/File.h" #include <cmath> LOG_CHANNEL(gui_log, "GUI"); constexpr auto qstr = QString::fromStdString; namespace gui { namespace utils { QRect create_centered_window_geometry(const QScreen* screen, const QRect& base, s32 target_width, s32 target_height) { ensure(screen); // Get minimum virtual screen x & y for clamping the // window x & y later while taking the width and height // into account, so they don't go offscreen const QRect screen_geometry = screen->availableGeometry(); const s32 min_screen_x = screen_geometry.x(); const s32 max_screen_x = screen_geometry.x() + screen_geometry.width() - target_width; const s32 min_screen_y = screen_geometry.y(); const s32 max_screen_y = screen_geometry.y() + screen_geometry.height() - target_height; const s32 frame_x_raw = base.left() + ((base.width() - target_width) / 2); const s32 frame_y_raw = base.top() + ((base.height() - target_height) / 2); const s32 frame_x = std::clamp(frame_x_raw, min_screen_x, max_screen_x); const s32 frame_y = std::clamp(frame_y_raw, min_screen_y, max_screen_y); return QRect(frame_x, frame_y, target_width, target_height); } bool create_square_pixmap(QPixmap& pixmap, int target_size) { if (pixmap.isNull()) return false; QSize canvas_size(target_size, target_size); QSize pixmap_size(pixmap.size()); QPoint target_pos; // Let's upscale the original pixmap to at least fit into the outer rect. if (pixmap_size.width() < target_size || pixmap_size.height() < target_size) { pixmap_size.scale(target_size, target_size, Qt::KeepAspectRatio); } canvas_size = pixmap_size; // Calculate the centered size and position of the icon on our canvas. if (pixmap_size.width() != target_size || pixmap_size.height() != target_size) { ensure(pixmap_size.height() > 0); constexpr double target_ratio = 1.0; // square icon if ((pixmap_size.width() / static_cast<double>(pixmap_size.height())) > target_ratio) { canvas_size.setHeight(std::ceil(pixmap_size.width() / target_ratio)); } else { canvas_size.setWidth(std::ceil(pixmap_size.height() * target_ratio)); } target_pos.setX(std::max<int>(0, (canvas_size.width() - pixmap_size.width()) / 2.0)); target_pos.setY(std::max<int>(0, (canvas_size.height() - pixmap_size.height()) / 2.0)); } // Create a canvas large enough to fit our entire scaled icon QPixmap canvas(canvas_size); canvas.fill(Qt::transparent); // Create a painter for our canvas QPainter painter(&canvas); painter.setRenderHint(QPainter::SmoothPixmapTransform); // Draw the icon onto our canvas painter.drawPixmap(target_pos.x(), target_pos.y(), pixmap_size.width(), pixmap_size.height(), pixmap); // Finish the painting painter.end(); pixmap = canvas; return true; } QPixmap get_colorized_pixmap(const QPixmap& old_pixmap, const QColor& old_color, const QColor& new_color, bool use_special_masks, bool colorize_all) { QPixmap pixmap = old_pixmap; if (colorize_all) { const QBitmap mask = pixmap.createMaskFromColor(Qt::transparent, Qt::MaskInColor); pixmap.fill(new_color); pixmap.setMask(mask); return pixmap; } const QBitmap mask = pixmap.createMaskFromColor(old_color, Qt::MaskOutColor); pixmap.fill(new_color); pixmap.setMask(mask); // special masks for disc icon and others if (use_special_masks) { // Example usage for an icon with multiple shades of the same color //auto saturatedColor = [](const QColor& col, float sat /* must be < 1 */) //{ // int r = col.red() + sat * (255 - col.red()); // int g = col.green() + sat * (255 - col.green()); // int b = col.blue() + sat * (255 - col.blue()); // return QColor(r, g, b, col.alpha()); //}; //QColor test_color(0, 173, 246, 255); //QPixmap test_pixmap = old_pixmap; //QBitmap test_mask = test_pixmap.createMaskFromColor(test_color, Qt::MaskOutColor); //test_pixmap.fill(saturatedColor(new_color, 0.6f)); //test_pixmap.setMask(test_mask); const QColor white_color(Qt::white); QPixmap white_pixmap = old_pixmap; const QBitmap white_mask = white_pixmap.createMaskFromColor(white_color, Qt::MaskOutColor); white_pixmap.fill(white_color); white_pixmap.setMask(white_mask); QPainter painter(&pixmap); painter.setRenderHint(QPainter::SmoothPixmapTransform); painter.drawPixmap(QPoint(0, 0), white_pixmap); //painter.drawPixmap(QPoint(0, 0), test_pixmap); painter.end(); } return pixmap; } QIcon get_colorized_icon(const QIcon& old_icon, const QColor& old_color, const QColor& new_color, bool use_special_masks, bool colorize_all) { return QIcon(get_colorized_pixmap(old_icon.pixmap(::at32(old_icon.availableSizes(), 0)), old_color, new_color, use_special_masks, colorize_all)); } QIcon get_colorized_icon(const QIcon& old_icon, const QColor& old_color, const std::map<QIcon::Mode, QColor>& new_colors, bool use_special_masks, bool colorize_all) { QIcon icon{}; const QPixmap old_pixmap = old_icon.pixmap(::at32(old_icon.availableSizes(), 0)); for (const auto& [mode, color] : new_colors) { icon.addPixmap(get_colorized_pixmap(old_pixmap, old_color, color, use_special_masks, colorize_all), mode); } return icon; } QStringList get_dir_entries(const QDir& dir, const QStringList& name_filters, bool full_path) { QFileInfoList entries = dir.entryInfoList(name_filters, QDir::Files); QStringList res; for (const QFileInfo& entry : entries) { res.append(full_path ? entry.absoluteFilePath() : entry.baseName()); } return res; } QColor get_foreground_color() { QLabel dummy_color; dummy_color.ensurePolished(); return dummy_color.palette().color(QPalette::ColorRole::WindowText); } QColor get_background_color() { QLabel dummy_color; dummy_color.ensurePolished(); return dummy_color.palette().color(QPalette::ColorRole::Window); } QColor get_label_color(const QString& object_name, const QColor& fallback_light, const QColor& fallback_dark, QPalette::ColorRole color_role) { if (!gui::custom_stylesheet_active || !gui::stylesheet.contains(object_name)) { return dark_mode_active() ? fallback_dark : fallback_light; } QLabel dummy_color; dummy_color.setObjectName(object_name); dummy_color.ensurePolished(); return dummy_color.palette().color(color_role); } QFont get_label_font(const QString& object_name) { QLabel dummy_font; dummy_font.setObjectName(object_name); dummy_font.ensurePolished(); return dummy_font.font(); } int get_label_width(const QString& text, const QFont* font) { QLabel l(text); if (font) l.setFont(*font); return l.sizeHint().width(); } QColor get_link_color(const QString& name) { return gui::utils::get_label_color(name, QColor(0, 116, 231), QColor(135, 206, 250)); } QString get_link_color_string(const QString& name) { return get_link_color(name).name(); } QString get_link_style(const QString& name) { return QString("style=\"color: %0;\"").arg(get_link_color_string(name)); } QPixmap get_centered_pixmap(QPixmap pixmap, const QSize& icon_size, int offset_x, int offset_y, qreal device_pixel_ratio, Qt::TransformationMode mode) { // Create empty canvas for expanded image QPixmap exp_img(icon_size); exp_img.setDevicePixelRatio(device_pixel_ratio); exp_img.fill(Qt::transparent); // Load scaled pixmap pixmap = pixmap.scaled(icon_size, Qt::KeepAspectRatio, mode); // Define offset for raw image placement QPoint offset(offset_x + icon_size.width() / 2 - pixmap.width() / 2, offset_y + icon_size.height() / 2 - pixmap.height() / 2); // Place raw image inside expanded image QPainter painter(&exp_img); painter.setRenderHint(QPainter::SmoothPixmapTransform); painter.drawPixmap(offset, pixmap); painter.end(); return exp_img; } QPixmap get_centered_pixmap(const QString& path, const QSize& icon_size, int offset_x, int offset_y, qreal device_pixel_ratio, Qt::TransformationMode mode) { return get_centered_pixmap(QPixmap(path), icon_size, offset_x, offset_y, device_pixel_ratio, mode); } QImage get_opaque_image_area(const QString& path) { QImage image = QImage(path); int w_min = 0; int w_max = image.width(); int h_min = 0; int h_max = image.height(); for (int y = 0; y < image.height(); ++y) { const QRgb* row = reinterpret_cast<const QRgb*>(image.constScanLine(y)); bool row_filled = false; for (int x = 0; x < image.width(); ++x) { if (qAlpha(row[x])) { row_filled = true; w_min = std::max(w_min, x); if (w_max > x) { w_max = x; x = w_min; } } } if (row_filled) { h_max = std::min(h_max, y); h_min = y; } } return image.copy(QRect(QPoint(w_max, h_max), QPoint(w_min, h_min))); } // taken from https://stackoverflow.com/a/30818424/8353754 // because size policies won't work as expected (see similar bugs in Qt bugtracker) void resize_combo_box_view(QComboBox* combo) { int max_width = 0; const QFontMetrics font_metrics(combo->font()); for (int i = 0; i < combo->count(); ++i) { max_width = std::max(max_width, font_metrics.horizontalAdvance(combo->itemText(i))); } if (combo->view()->minimumWidth() < max_width) { // add scrollbar width and margin max_width += combo->style()->pixelMetric(QStyle::PM_ScrollBarExtent); max_width += combo->view()->autoScrollMargin(); combo->view()->setMinimumWidth(max_width); } } void update_table_item_count(QTableWidget* table) { if (!table) return; int item_count = table->rowCount(); const bool is_empty = item_count < 1; if (is_empty) table->insertRow(0); const int item_height = table->rowHeight(0); if (is_empty) { table->clearContents(); table->setRowCount(0); } const int available_height = table->rect().height() - table->horizontalHeader()->height() - table->frameWidth() * 2; if (available_height < item_height || item_height < 1) return; const int new_item_count = available_height / item_height; if (new_item_count == item_count) return; item_count = new_item_count; table->clearContents(); table->setRowCount(0); for (int i = 0; i < item_count; ++i) table->insertRow(i); if (table->horizontalScrollBar()) table->removeRow(--item_count); } void show_windowed_image(const QImage& img, const QString& title) { if (img.isNull()) return; QLabel* canvas = new QLabel(); canvas->setWindowTitle(title); canvas->setObjectName("windowed_image"); canvas->setPixmap(QPixmap::fromImage(img)); canvas->setFixedSize(img.size()); canvas->ensurePolished(); canvas->show(); } // Loads the app icon from path and embeds it centered into an empty square icon QIcon get_app_icon_from_path(const std::string& path, const std::string& title_id) { // Try to find custom icon first std::string icon_path = fs::get_config_dir() + "/Icons/game_icons/" + title_id + "/ICON0.PNG"; bool found_file = fs::is_file(icon_path); if (!found_file) { // Get Icon for the gs_frame from path. this handles presumably all possible use cases const QString qpath = qstr(path); const std::string path_list[] = { path, qpath.section("/", 0, -2, QString::SectionIncludeTrailingSep).toStdString(), qpath.section("/", 0, -3, QString::SectionIncludeTrailingSep).toStdString() }; for (const std::string& pth : path_list) { if (!fs::is_dir(pth)) { continue; } const std::string sfo_dir = rpcs3::utils::get_sfo_dir_from_game_path(pth, title_id); icon_path = sfo_dir + "/ICON0.PNG"; found_file = fs::is_file(icon_path); if (found_file) { break; } } } if (found_file) { // load the image from path. It will most likely be a rectangle const QImage source = QImage(qstr(icon_path)); const int edge_max = std::max(source.width(), source.height()); // create a new transparent image with square size and same format as source (maybe handle other formats than RGB32 as well?) const QImage::Format format = source.format() == QImage::Format_RGB32 ? QImage::Format_ARGB32 : source.format(); QImage dest(edge_max, edge_max, format); dest.fill(Qt::transparent); // get the location to draw the source image centered within the dest image. const QPoint dest_pos = source.width() > source.height() ? QPoint(0, (source.width() - source.height()) / 2) : QPoint((source.height() - source.width()) / 2, 0); // Paint the source into/over the dest QPainter painter(&dest); painter.setRenderHint(QPainter::SmoothPixmapTransform); painter.drawImage(dest_pos, source); painter.end(); return QIcon(QPixmap::fromImage(dest)); } // if nothing was found reset the icon to default return QApplication::windowIcon(); } void open_dir(const std::string& spath) { QString path = qstr(spath); if (fs::is_file(spath)) { // open directory and select file // https://stackoverflow.com/questions/3490336/how-to-reveal-in-finder-or-show-in-explorer-with-qt #ifdef _WIN32 // Remove double slashes and convert to native separators. Double slashes don't seem to work with the explorer call. path.replace(QRegularExpression("[\\\\|/]+"), QDir::separator()); gui_log.notice("gui::utils::open_dir: About to open file path '%s' (original: '%s')", path, spath); if (!QProcess::startDetached("explorer.exe", {"/select,", path})) { gui_log.error("gui::utils::open_dir: Failed to start explorer process"); } #elif defined(__APPLE__) gui_log.notice("gui::utils::open_dir: About to open file path '%s'", spath); QProcess::execute("/usr/bin/osascript", { "-e", "tell application \"Finder\" to reveal POSIX file \"" + path + "\"" }); QProcess::execute("/usr/bin/osascript", { "-e", "tell application \"Finder\" to activate" }); #else // open parent directory const QUrl url = QUrl::fromLocalFile(qstr(fs::get_parent_dir(spath))); const std::string url_path = url.toString().toStdString(); gui_log.notice("gui::utils::open_dir: About to open parent dir url '%s' for path '%s'", url_path, spath); if (!QDesktopServices::openUrl(url)) { gui_log.error("gui::utils::open_dir: Failed to open parent dir url '%s' for path '%s'", url_path, spath); } #endif return; } if (!fs::is_dir(spath) && !fs::create_path(spath)) { gui_log.error("gui::utils::open_dir: Failed to create path '%s' (%s)", spath, fs::g_tls_error); return; } const QUrl url = QUrl::fromLocalFile(path); const std::string url_path = url.toString().toStdString(); gui_log.notice("gui::utils::open_dir: About to open dir url '%s' for path '%s'", url_path, spath); if (!QDesktopServices::openUrl(url)) { gui_log.error("gui::utils::open_dir: Failed to open dir url '%s' for path '%s'", url_path, spath); } } void open_dir(const QString& path) { open_dir(path.toStdString()); } QTreeWidgetItem* find_child(QTreeWidgetItem* parent, const QString& text) { if (parent) { for (int i = 0; i < parent->childCount(); i++) { if (parent->child(i)->text(0) == text) { return parent->child(i); } } } return nullptr; } void find_children_by_data(QTreeWidgetItem* parent, std::vector<QTreeWidgetItem*>& children, const std::vector<std::pair<int /*role*/, QVariant /*data*/>>& criteria, bool recursive) { if (parent) { for (int i = 0; i < parent->childCount(); i++) { if (auto item = parent->child(i)) { bool match = true; for (const auto& [role, data] : criteria) { if (item->data(0, role) != data) { match = false; break; } } if (match) { children.push_back(item); } if (recursive) { find_children_by_data(item, children, criteria, recursive); } } } } } QTreeWidgetItem* add_child(QTreeWidgetItem *parent, const QString& text, int column) { if (parent) { QTreeWidgetItem *tree_item = new QTreeWidgetItem(); tree_item->setText(column, text); parent->addChild(tree_item); return tree_item; } return nullptr; }; void remove_children(QTreeWidgetItem* parent) { if (parent) { for (int i = parent->childCount() - 1; i >= 0; i--) { parent->removeChild(parent->child(i)); } } } void remove_children(QTreeWidgetItem* parent, const std::vector<std::pair<int /*role*/, QVariant /*data*/>>& criteria, bool recursive) { if (parent) { for (int i = parent->childCount() - 1; i >= 0; i--) { if (const auto item = parent->child(i)) { bool match = true; for (const auto& [role, data] : criteria) { if (item->data(0, role) != data) { match = false; break; } } if (!match) { parent->removeChild(item); } else if (recursive) { remove_children(item, criteria, recursive); } } } } } void sort_tree_item(QTreeWidgetItem* item, Qt::SortOrder sort_order, bool recursive) { if (item) { item->sortChildren(0, sort_order); if (recursive) { for (int i = 0; i < item->childCount(); i++) { sort_tree_item(item->child(i), sort_order, recursive); } } } } void sort_tree(QTreeWidget* tree, Qt::SortOrder sort_order, bool recursive) { if (tree) { tree->sortByColumn(0, sort_order); if (recursive) { for (int i = 0; i < tree->topLevelItemCount(); i++) { sort_tree_item(tree->topLevelItem(i), sort_order, recursive); } } } } QString format_byte_size(usz size) { usz byte_unit = 0; usz divisor = 1; static const QString s_units[]{"B", "KB", "MB", "GB", "TB", "PB"}; while (byte_unit < std::size(s_units) - 1 && size / divisor >= 1024) { byte_unit++; divisor *= 1024; } return QStringLiteral("%0 %1").arg(QString::number((size + 0.) / divisor, 'f', 2)).arg(s_units[byte_unit]); } } // utils } // gui
18,746
C++
.cpp
536
30.339552
183
0.657369
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,125
gui_application.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/gui_application.cpp
#include "stdafx.h" #include "gui_application.h" #include "qt_utils.h" #include "welcome_dialog.h" #include "main_window.h" #include "emu_settings.h" #include "gui_settings.h" #include "persistent_settings.h" #include "gs_frame.h" #include "gl_gs_frame.h" #include "localized_emu.h" #include "qt_camera_handler.h" #include "qt_music_handler.h" #include "rpcs3_version.h" #ifdef WITH_DISCORD_RPC #include "_discord_utils.h" #endif #include "Emu/Io/Null/null_camera_handler.h" #include "Emu/Io/Null/null_music_handler.h" #include "Emu/vfs_config.h" #include "util/init_mutex.hpp" #include "util/console.h" #include "trophy_notification_helper.h" #include "save_data_dialog.h" #include "msg_dialog_frame.h" #include "osk_dialog_frame.h" #include "recvmessage_dialog_frame.h" #include "sendmessage_dialog_frame.h" #include "stylesheets.h" #include "progress_dialog.h" #include <QScreen> #include <QFontDatabase> #include <QLayout> #include <QLibraryInfo> #include <QDirIterator> #include <QFileInfo> #include <QMessageBox> #include <QTextDocument> #include <QStyleFactory> #include <QStyleHints> #include <clocale> #include "Emu/RSX/Null/NullGSRender.h" #include "Emu/RSX/GL/GLGSRender.h" #if defined(HAVE_VULKAN) #include "Emu/RSX/VK/VKGSRender.h" #endif #ifdef _WIN32 #include "Windows.h" #endif LOG_CHANNEL(gui_log, "GUI"); std::unique_ptr<raw_mouse_handler> g_raw_mouse_handler; [[noreturn]] void report_fatal_error(std::string_view text, bool is_html = false, bool include_help_text = true); gui_application::gui_application(int& argc, char** argv) : QApplication(argc, argv) { std::setlocale(LC_NUMERIC, "C"); // On linux Qt changes to system locale while initializing QCoreApplication } gui_application::~gui_application() { #ifdef WITH_DISCORD_RPC discord::shutdown(); #endif } bool gui_application::Init() { #ifndef __APPLE__ setWindowIcon(QIcon(":/rpcs3.ico")); #endif if (!rpcs3::is_release_build() && !rpcs3::is_local_build()) { const std::string_view branch_name = rpcs3::get_full_branch(); gui_log.warning("Experimental Build Warning! Build origin: %s", branch_name); QMessageBox msg; msg.setWindowModality(Qt::WindowModal); msg.setWindowTitle(tr("Experimental Build Warning")); msg.setIcon(QMessageBox::Critical); msg.setTextFormat(Qt::RichText); msg.setStandardButtons(QMessageBox::Yes | QMessageBox::No); msg.setDefaultButton(QMessageBox::No); msg.setText(tr( R"( <p style="white-space: nowrap;"> Please understand that this build is not an official RPCS3 release.<br> This build contains changes that may break games, or even <b>damage</b> your data.<br> We recommend to download and use the official build from the <a %0 href='https://rpcs3.net/download'>RPCS3 website</a>.<br><br> Build origin: %1<br> Do you wish to use this build anyway? </p> )" ).arg(gui::utils::get_link_style()).arg(Qt::convertFromPlainText(branch_name.data()))); msg.layout()->setSizeConstraint(QLayout::SetFixedSize); if (msg.exec() == QMessageBox::No) { return false; } } m_emu_settings = std::make_shared<emu_settings>(); m_gui_settings = std::make_shared<gui_settings>(); m_persistent_settings = std::make_shared<persistent_settings>(); if (!m_emu_settings->Init()) { return false; } if (m_gui_settings->GetValue(gui::m_attachCommandLine).toBool()) { utils::attach_console(utils::console_stream::std_err, true); } else { m_gui_settings->SetValue(gui::m_attachCommandLine, false); } // The user might be set by cli arg. If not, set another user. if (m_active_user.empty()) { // Get active user with standard user as fallback m_active_user = m_persistent_settings->GetCurrentUser("00000001").toStdString(); } // Force init the emulator InitializeEmulator(m_active_user, m_show_gui); // Create the main window if (m_show_gui) { m_main_window = new main_window(m_gui_settings, m_emu_settings, m_persistent_settings, nullptr); const auto codes = GetAvailableLanguageCodes(); const auto language = m_gui_settings->GetValue(gui::loc_language).toString(); const auto index = codes.indexOf(language); LoadLanguage(index < 0 ? QLocale(QLocale::English).bcp47Name() : ::at32(codes, index)); } // Create callbacks from the emulator, which reference the handlers. InitializeCallbacks(); // Create connects to propagate events throughout Gui. InitializeConnects(); if (m_gui_settings->GetValue(gui::ib_show_welcome).toBool()) { welcome_dialog* welcome = new welcome_dialog(m_gui_settings, false); bool use_dark_theme = false; connect(welcome, &QDialog::accepted, this, [&]() { use_dark_theme = welcome->does_user_want_dark_theme(); }); welcome->exec(); if (use_dark_theme) { m_gui_settings->SetValue(gui::m_currentStylesheet, "Darker Style by TheMitoSan"); } } // Check maxfiles if (utils::get_maxfiles() < 4096) { QMessageBox::warning(nullptr, tr("Warning"), tr("The current limit of maximum file descriptors is too low.\n" "Some games will crash.\n" "\n" "Please increase the limit before running RPCS3.")); } if (m_main_window && !m_main_window->Init(m_with_cli_boot)) { return false; } #ifdef WITH_DISCORD_RPC // Discord Rich Presence Integration if (m_gui_settings->GetValue(gui::m_richPresence).toBool()) { discord::initialize(); } #endif // Install native event filter #ifdef _WIN32 // Currently only needed for raw mouse input on windows installNativeEventFilter(&m_native_event_filter); #endif return true; } void gui_application::SwitchTranslator(QTranslator& translator, const QString& filename, const QString& language_code) { // remove the old translator removeTranslator(&translator); const QString lang_path = QLibraryInfo::path(QLibraryInfo::TranslationsPath) + QStringLiteral("/"); const QString file_path = lang_path + filename; if (QFileInfo(file_path).isFile()) { // load the new translator if (translator.load(file_path)) { installTranslator(&translator); } } else if (const QString default_code = QLocale(QLocale::English).bcp47Name(); language_code != default_code) { // show error, but ignore default case "en", since it is handled in source code gui_log.error("No translation file found in: %s", file_path); // reset current language to default "en" m_language_code = default_code; } } void gui_application::LoadLanguage(const QString& language_code) { if (m_language_code == language_code) { return; } m_language_code = language_code; const QLocale locale = QLocale(language_code); const QString locale_name = QLocale::languageToString(locale.language()); QLocale::setDefault(locale); // Idk if this is overruled by the QLocale default, so I'll change it here just to be sure. // As per QT recommendations to avoid conflicts for POSIX functions std::setlocale(LC_NUMERIC, "C"); SwitchTranslator(m_translator, QStringLiteral("rpcs3_%1.qm").arg(language_code), language_code); if (m_main_window) { const QString default_code = QLocale(QLocale::English).bcp47Name(); QStringList language_codes = GetAvailableLanguageCodes(); if (!language_codes.contains(default_code)) { language_codes.prepend(default_code); } m_main_window->RetranslateUI(language_codes, m_language_code); } m_gui_settings->SetValue(gui::loc_language, m_language_code); gui_log.notice("Current language changed to %s (%s)", locale_name, language_code); } QStringList gui_application::GetAvailableLanguageCodes() { QStringList language_codes; const QString language_path = QLibraryInfo::path(QLibraryInfo::TranslationsPath); if (QFileInfo(language_path).isDir()) { const QDir dir(language_path); const QStringList filenames = dir.entryList(QStringList("rpcs3_*.qm")); for (const QString& filename : filenames) { QString language_code = filename; // "rpcs3_en.qm" language_code.truncate(language_code.lastIndexOf('.')); // "rpcs3_en" language_code.remove(0, language_code.indexOf('_') + 1); // "en" if (language_codes.contains(language_code)) { gui_log.error("Found duplicate language '%s' (%s)", language_code, filename); } else { language_codes << language_code; } } } return language_codes; } void gui_application::InitializeConnects() { connect(&m_timer, &QTimer::timeout, this, &gui_application::UpdatePlaytime); connect(this, &gui_application::OnEmulatorRun, this, &gui_application::StartPlaytime); connect(this, &gui_application::OnEmulatorStop, this, &gui_application::StopPlaytime); connect(this, &gui_application::OnEmulatorPause, this, &gui_application::StopPlaytime); connect(this, &gui_application::OnEmulatorResume, this, &gui_application::StartPlaytime); connect(this, &QGuiApplication::applicationStateChanged, this, &gui_application::OnAppStateChanged); if (m_main_window) { connect(m_main_window, &main_window::RequestLanguageChange, this, &gui_application::LoadLanguage); connect(m_main_window, &main_window::RequestGlobalStylesheetChange, this, &gui_application::OnChangeStyleSheetRequest); connect(m_main_window, &main_window::NotifyEmuSettingsChange, this, [this](){ OnEmuSettingsChange(); }); connect(m_main_window, &main_window::NotifyShortcutHandlers, this, &gui_application::OnShortcutChange); connect(this, &gui_application::OnEmulatorRun, m_main_window, &main_window::OnEmuRun); connect(this, &gui_application::OnEmulatorStop, m_main_window, &main_window::OnEmuStop); connect(this, &gui_application::OnEmulatorPause, m_main_window, &main_window::OnEmuPause); connect(this, &gui_application::OnEmulatorResume, m_main_window, &main_window::OnEmuResume); connect(this, &gui_application::OnEmulatorReady, m_main_window, &main_window::OnEmuReady); connect(this, &gui_application::OnEnableDiscEject, m_main_window, &main_window::OnEnableDiscEject); connect(this, &gui_application::OnEnableDiscInsert, m_main_window, &main_window::OnEnableDiscInsert); connect(QGuiApplication::styleHints(), &QStyleHints::colorSchemeChanged, this, [this](){ OnChangeStyleSheetRequest(); }); } #ifdef WITH_DISCORD_RPC connect(this, &gui_application::OnEmulatorRun, [this](bool /*start_playtime*/) { // Discord Rich Presence Integration if (m_gui_settings->GetValue(gui::m_richPresence).toBool()) { discord::update_presence(Emu.GetTitleID(), Emu.GetTitle()); } }); connect(this, &gui_application::OnEmulatorStop, [this]() { // Discord Rich Presence Integration if (m_gui_settings->GetValue(gui::m_richPresence).toBool()) { discord::update_presence(m_gui_settings->GetValue(gui::m_discordState).toString().toStdString()); } }); #endif qRegisterMetaType<std::function<void()>>("std::function<void()>"); connect(this, &gui_application::RequestCallFromMainThread, this, &gui_application::CallFromMainThread); } std::unique_ptr<gs_frame> gui_application::get_gs_frame() { extern const std::unordered_map<video_resolution, std::pair<int, int>, value_hash<video_resolution>> g_video_out_resolution_map; auto [w, h] = ::at32(g_video_out_resolution_map, g_cfg.video.resolution); const bool resize_game_window = m_gui_settings->GetValue(gui::gs_resize).toBool(); if (resize_game_window) { if (m_gui_settings->GetValue(gui::gs_resize_manual).toBool()) { w = m_gui_settings->GetValue(gui::gs_width).toInt(); h = m_gui_settings->GetValue(gui::gs_height).toInt(); } else { const qreal device_pixel_ratio = devicePixelRatio(); w /= device_pixel_ratio; h /= device_pixel_ratio; } } QScreen* screen = nullptr; QRect base_geometry{}; // Use screen index set by CLI argument int screen_index = m_game_screen_index; const int last_screen_index = m_gui_settings->GetValue(gui::gs_screen).toInt(); // Use last used screen if no CLI index was set if (screen_index < 0) { screen_index = last_screen_index; } // Try to find the specified screen if (screen_index >= 0) { const QList<QScreen*> available_screens = screens(); if (screen_index < available_screens.count()) { screen = ::at32(available_screens, screen_index); if (screen) { base_geometry = screen->geometry(); } } if (!screen) { gui_log.error("The selected game screen with index %d is not available (available screens: %d)", screen_index, available_screens.count()); } } // Fallback to the screen of the main window. Use the primary screen as last resort. if (!screen) { screen = m_main_window ? m_main_window->screen() : primaryScreen(); base_geometry = m_main_window ? m_main_window->frameGeometry() : primaryScreen()->geometry(); } // Use saved geometry if possible. Ignore this if the last used screen is different than the requested screen. QRect frame_geometry = screen_index != last_screen_index ? QRect{} : m_gui_settings->GetValue(gui::gs_geometry).value<QRect>(); if (frame_geometry.isNull() || frame_geometry.isEmpty()) { // Center above main window or inside screen if the saved geometry is invalid frame_geometry = gui::utils::create_centered_window_geometry(screen, base_geometry, w, h); } else if (resize_game_window) { // Apply size override to our saved geometry if needed frame_geometry.setSize(QSize(w, h)); } // Load AppIcon const QIcon app_icon = m_main_window ? m_main_window->GetAppIcon() : gui::utils::get_app_icon_from_path(Emu.GetBoot(), Emu.GetTitleID()); gs_frame* frame = nullptr; switch (g_cfg.video.renderer.get()) { case video_renderer::opengl: { frame = new gl_gs_frame(screen, frame_geometry, app_icon, m_gui_settings, m_start_games_fullscreen); break; } case video_renderer::null: case video_renderer::vulkan: { frame = new gs_frame(screen, frame_geometry, app_icon, m_gui_settings, m_start_games_fullscreen); break; } } m_game_window = frame; return std::unique_ptr<gs_frame>(frame); } /** RPCS3 emulator has functions it desires to call from the GUI at times. Initialize them in here. */ void gui_application::InitializeCallbacks() { EmuCallbacks callbacks = CreateCallbacks(); callbacks.try_to_quit = [this](bool force_quit, std::function<void()> on_exit) -> bool { // Close rpcs3 if closed in no-gui mode if (force_quit || !m_main_window) { if (on_exit) { on_exit(); } if (m_main_window) { // Close main window in order to save its window state m_main_window->close(); } quit(); return true; } return false; }; callbacks.call_from_main_thread = [this](std::function<void()> func, atomic_t<u32>* wake_up) { RequestCallFromMainThread(std::move(func), wake_up); }; callbacks.init_gs_render = [](utils::serial* ar) { switch (g_cfg.video.renderer.get()) { case video_renderer::null: { g_fxo->init<rsx::thread, named_thread<NullGSRender>>(ar); break; } case video_renderer::opengl: { #if not defined(__APPLE__) g_fxo->init<rsx::thread, named_thread<GLGSRender>>(ar); #endif break; } case video_renderer::vulkan: { #if defined(HAVE_VULKAN) g_fxo->init<rsx::thread, named_thread<VKGSRender>>(ar); #endif break; } } }; callbacks.get_camera_handler = []() -> std::shared_ptr<camera_handler_base> { switch (g_cfg.io.camera.get()) { case camera_handler::null: case camera_handler::fake: { return std::make_shared<null_camera_handler>(); } case camera_handler::qt: { return std::make_shared<qt_camera_handler>(); } } return nullptr; }; callbacks.get_music_handler = []() -> std::shared_ptr<music_handler_base> { switch (g_cfg.audio.music.get()) { case music_handler::null: { return std::make_shared<null_music_handler>(); } case music_handler::qt: { return std::make_shared<qt_music_handler>(); } } return nullptr; }; callbacks.get_gs_frame = [this]() -> std::unique_ptr<GSFrameBase> { return get_gs_frame(); }; callbacks.get_msg_dialog = [this]() -> std::shared_ptr<MsgDialogBase> { return m_show_gui ? std::make_shared<msg_dialog_frame>() : nullptr; }; callbacks.get_osk_dialog = [this]() -> std::shared_ptr<OskDialogBase> { return m_show_gui ? std::make_shared<osk_dialog_frame>() : nullptr; }; callbacks.get_save_dialog = []() -> std::unique_ptr<SaveDialogBase> { return std::make_unique<save_data_dialog>(); }; callbacks.get_sendmessage_dialog = [this]() -> std::shared_ptr<SendMessageDialogBase> { return std::make_shared<sendmessage_dialog_frame>(); }; callbacks.get_recvmessage_dialog = [this]() -> std::shared_ptr<RecvMessageDialogBase> { return std::make_shared<recvmessage_dialog_frame>(); }; callbacks.get_trophy_notification_dialog = [this]() -> std::unique_ptr<TrophyNotificationBase> { return std::make_unique<trophy_notification_helper>(m_game_window); }; callbacks.on_run = [this](bool start_playtime) { OnEmulatorRun(start_playtime); }; callbacks.on_pause = [this]() { OnEmulatorPause(); }; callbacks.on_resume = [this]() { OnEmulatorResume(true); }; callbacks.on_stop = [this]() { OnEmulatorStop(); }; callbacks.on_ready = [this]() { OnEmulatorReady(); }; callbacks.enable_disc_eject = [this](bool enabled) { Emu.CallFromMainThread([this, enabled]() { OnEnableDiscEject(enabled); }); }; callbacks.enable_disc_insert = [this](bool enabled) { Emu.CallFromMainThread([this, enabled]() { OnEnableDiscInsert(enabled); }); }; callbacks.on_missing_fw = [this]() { if (!m_main_window) return false; return m_main_window->OnMissingFw(); }; callbacks.handle_taskbar_progress = [this](s32 type, s32 value) { if (m_game_window) { switch (type) { case 0: static_cast<gs_frame*>(m_game_window)->progress_reset(value); break; case 1: static_cast<gs_frame*>(m_game_window)->progress_increment(value); break; case 2: static_cast<gs_frame*>(m_game_window)->progress_set_limit(value); break; case 3: static_cast<gs_frame*>(m_game_window)->progress_set_value(value); break; default: gui_log.fatal("Unknown type in handle_taskbar_progress(type=%d, value=%d)", type, value); break; } } }; callbacks.get_localized_string = [](localized_string_id id, const char* args) -> std::string { return localized_emu::get_string(id, args); }; callbacks.get_localized_u32string = [](localized_string_id id, const char* args) -> std::u32string { return localized_emu::get_u32string(id, args); }; callbacks.get_localized_setting = [this](const cfg::_base* node, u32 enum_index) -> std::string { ensure(!!m_emu_settings); return m_emu_settings->GetLocalizedSetting(node, enum_index); }; callbacks.play_sound = [this](const std::string& path) { Emu.CallFromMainThread([this, path]() { if (fs::is_file(path)) { // Allow to play 3 sound effects at the same time while (m_sound_effects.size() >= 3) { m_sound_effects.pop_front(); } // Create a new sound effect. Re-using the same object seems to be broken for some users starting with Qt 6.6.3. std::unique_ptr<QSoundEffect> sound_effect = std::make_unique<QSoundEffect>(); sound_effect->setSource(QUrl::fromLocalFile(QString::fromStdString(path))); sound_effect->setVolume(g_cfg.audio.volume * 0.01f); sound_effect->play(); m_sound_effects.push_back(std::move(sound_effect)); } }); }; if (m_show_gui) // If this is false, we already have a fallback in the main_application. { callbacks.on_install_pkgs = [this](const std::vector<std::string>& pkgs) { ensure(m_main_window); ensure(!pkgs.empty()); QStringList pkg_list; for (const std::string& pkg : pkgs) { pkg_list << QString::fromStdString(pkg); } return m_main_window->InstallPackages(pkg_list, true); }; } callbacks.on_emulation_stop_no_response = [this](std::shared_ptr<atomic_t<bool>> closed_successfully, int seconds_waiting_already) { const std::string terminate_message = tr("Stopping emulator took too long." "\nSome thread has probably deadlocked. Aborting.").toStdString(); if (!closed_successfully) { report_fatal_error(terminate_message); } Emu.CallFromMainThread([this, closed_successfully, seconds_waiting_already, terminate_message] { const auto seconds = std::make_shared<int>(seconds_waiting_already); QMessageBox* mb = new QMessageBox(); mb->setWindowTitle(tr("PS3 Game/Application Is Unresponsive")); mb->setIcon(QMessageBox::Critical); mb->setStandardButtons(QMessageBox::Yes | QMessageBox::No); mb->setDefaultButton(QMessageBox::No); mb->button(QMessageBox::Yes)->setText(tr("Terminate RPCS3")); mb->button(QMessageBox::No)->setText(tr("Keep Waiting")); QString text_base = tr("Waiting for %0 second(s) already to stop emulation without success." "\nKeep waiting or terminate RPCS3 unsafely at your own risk?"); mb->setText(text_base.arg(10)); mb->layout()->setSizeConstraint(QLayout::SetFixedSize); mb->setAttribute(Qt::WA_DeleteOnClose); QTimer* update_timer = new QTimer(mb); connect(update_timer, &QTimer::timeout, [mb, seconds, text_base, closed_successfully]() { *seconds += 1; mb->setText(text_base.arg(*seconds)); if (*closed_successfully) { mb->reject(); } }); connect(mb, &QDialog::accepted, mb, [closed_successfully, terminate_message] { if (!*closed_successfully) { report_fatal_error(terminate_message); } }); mb->open(); update_timer->start(1000); }); }; callbacks.on_save_state_progress = [this](std::shared_ptr<atomic_t<bool>> closed_successfully, stx::shared_ptr<utils::serial> ar_ptr, stx::atomic_ptr<std::string>* code_location, std::shared_ptr<void> init_mtx) { Emu.CallFromMainThread([this, closed_successfully, ar_ptr, code_location, init_mtx] { const auto half_seconds = std::make_shared<int>(1); progress_dialog* pdlg = new progress_dialog(tr("Creating Save-State / Do Not Close RPCS3"), tr("Please wait..."), tr("Hide Progress"), 0, 100, true, m_main_window); pdlg->setAutoReset(false); pdlg->setAutoClose(true); pdlg->show(); QString text_base = tr("%0 written, %1 second(s) passed%2"); pdlg->setLabelText(text_base.arg("0B").arg(1).arg("")); pdlg->setAttribute(Qt::WA_DeleteOnClose); QTimer* update_timer = new QTimer(pdlg); connect(update_timer, &QTimer::timeout, [pdlg, ar_ptr, half_seconds, text_base, closed_successfully , code_location, init_mtx, old_written = usz{0}, repeat_count = u32{0}]() mutable { std::string verbose_message; usz bytes_written = 0; while (true) { auto mtx = static_cast<stx::init_mutex*>(init_mtx.get()); auto init = mtx->access(); if (!init) { // Try to wait for the abort process to complete auto fake_reset = mtx->reset(); if (!fake_reset) { // End of emulation termination pdlg->reject(); return; } fake_reset.set_init(); // Now ar_ptr contains a null file descriptor continue; } if (auto str_ptr = code_location->load()) { verbose_message = "\n" + *str_ptr; } bytes_written = ar_ptr->is_writing() ? std::max<usz>(ar_ptr->get_size(), old_written) : old_written; break; } *half_seconds += 1; if (old_written == bytes_written) { if (repeat_count == 60) { if (verbose_message.empty()) { verbose_message += "\n"; } else { verbose_message += ". "; } verbose_message += "If Stuck, Report To Developers"; } else { repeat_count++; } } else { repeat_count = 0; } old_written = bytes_written; pdlg->setLabelText(text_base.arg(gui::utils::format_byte_size(bytes_written)).arg(*half_seconds / 2).arg(QString::fromStdString(verbose_message))); // 300MB -> 50%, 600MB -> 75%, 1200MB -> 87.5% etc const int percent = std::clamp(static_cast<int>(100. - 100. / std::pow(2., std::fmax(0.01, bytes_written * 1. / (300 * 1024 * 1024)))), 2, 100); // Add a third of the remaining progress when the keyword is found pdlg->setValue(verbose_message.find("Finalizing") != umax ? 100 - ((100 - percent) * 2 / 3) : percent); if (*closed_successfully) { pdlg->reject(); } }); pdlg->open(); update_timer->start(500); }); }; callbacks.add_breakpoint = [this](u32 addr) { Emu.BlockingCallFromMainThread([this, addr]() { m_main_window->OnAddBreakpoint(addr); }); }; Emu.SetCallbacks(std::move(callbacks)); } void gui_application::StartPlaytime(bool start_playtime = true) { if (!start_playtime) { return; } const QString serial = QString::fromStdString(Emu.GetTitleID()); if (serial.isEmpty()) { return; } m_persistent_settings->SetLastPlayed(serial, QDateTime::currentDateTime().toString(gui::persistent::last_played_date_format), true); m_timer_playtime.start(); m_timer.start(10000); // Update every 10 seconds in case the emulation crashes } void gui_application::UpdatePlaytime() { if (!m_timer_playtime.isValid()) { m_timer.stop(); return; } const QString serial = QString::fromStdString(Emu.GetTitleID()); if (serial.isEmpty()) { m_timer_playtime.invalidate(); m_timer.stop(); return; } m_persistent_settings->AddPlaytime(serial, m_timer_playtime.restart(), false); m_persistent_settings->SetLastPlayed(serial, QDateTime::currentDateTime().toString(gui::persistent::last_played_date_format), true); } void gui_application::StopPlaytime() { m_timer.stop(); if (!m_timer_playtime.isValid()) return; const QString serial = QString::fromStdString(Emu.GetTitleID()); if (serial.isEmpty()) { m_timer_playtime.invalidate(); return; } m_persistent_settings->AddPlaytime(serial, m_timer_playtime.restart(), false); m_persistent_settings->SetLastPlayed(serial, QDateTime::currentDateTime().toString(gui::persistent::last_played_date_format), true); m_timer_playtime.invalidate(); } /* * Handle a request to change the stylesheet based on the current entry in the settings. */ void gui_application::OnChangeStyleSheetRequest() { // skip stylesheets on first repaint if a style was set from command line if (m_use_cli_style && gui::stylesheet.isEmpty()) { gui::stylesheet = styleSheet().isEmpty() ? "/* style set by command line arg */" : styleSheet(); if (m_main_window) { m_main_window->RepaintGui(); } return; } // Remove old fonts QFontDatabase::removeAllApplicationFonts(); const QString stylesheet_name = m_gui_settings->GetValue(gui::m_currentStylesheet).toString(); // Determine default style if (m_default_style.isEmpty()) { #ifdef _WIN32 // On windows, the custom stylesheets don't seem to work properly unless we use the windowsvista style as default if (QStyleFactory::keys().contains("windowsvista")) { m_default_style = "windowsvista"; gui_log.notice("Using '%s' as default style", m_default_style); } #endif // Use the initial style as default style if (const QStyle* style = m_default_style.isEmpty() ? QApplication::style() : nullptr) { m_default_style = style->name(); gui_log.notice("Determined '%s' as default style", m_default_style); } // Fallback to the first style, which is supposed to be the default style according to the Qt docs. if (m_default_style.isEmpty()) { if (const QStringList styles = QStyleFactory::keys(); !styles.empty()) { m_default_style = styles.front(); gui_log.notice("Determined '%s' as default style (first style available)", m_default_style); } } } // Reset style to default before doing anything else, or we will get unexpected effects in custom stylesheets. if (QStyle* style = QStyleFactory::create(m_default_style)) { setStyle(style); } const auto match_native_style = [&stylesheet_name]() -> QString { // Search for "native (<style>)" static const QRegularExpression expr(gui::NativeStylesheet + " \\((?<style>.*)\\)"); const QRegularExpressionMatch match = expr.match(stylesheet_name); if (match.hasMatch()) { return match.captured("style"); } return {}; }; gui_log.notice("Changing stylesheet to '%s'", stylesheet_name); gui::custom_stylesheet_active = false; if (stylesheet_name.isEmpty() || stylesheet_name == gui::DefaultStylesheet) { gui_log.notice("Using default stylesheet"); setStyleSheet(gui::stylesheets::default_style_sheet); gui::custom_stylesheet_active = true; } else if (stylesheet_name == gui::NoStylesheet) { gui_log.notice("Using empty style"); setStyleSheet("/* none */"); } else if (const QString native_style = match_native_style(); !native_style.isEmpty()) { if (QStyle* style = QStyleFactory::create(native_style)) { gui_log.notice("Using native style '%s'", native_style); setStyleSheet("/* none */"); setStyle(style); } else { gui_log.error("Failed to set stylesheet: Native style '%s' not available", native_style); } } else { QString stylesheet_path; QString stylesheet_dir; std::vector<QDir> locs; locs.push_back(m_gui_settings->GetSettingsDir()); #if !defined(_WIN32) #ifdef __APPLE__ locs.push_back(QCoreApplication::applicationDirPath() + "/../Resources/GuiConfigs/"); #else #ifdef DATADIR const QString data_dir = (DATADIR); locs.push_back(data_dir + "/GuiConfigs/"); #endif locs.push_back(QCoreApplication::applicationDirPath() + "/../share/rpcs3/GuiConfigs/"); #endif locs.push_back(QCoreApplication::applicationDirPath() + "/GuiConfigs/"); #endif for (QDir& loc : locs) { QFileInfo file_info(loc.absoluteFilePath(stylesheet_name + QStringLiteral(".qss"))); if (file_info.exists()) { loc.cdUp(); stylesheet_dir = loc.absolutePath(); stylesheet_path = file_info.absoluteFilePath(); break; } } if (QFile file(stylesheet_path); !stylesheet_path.isEmpty() && file.open(QIODevice::ReadOnly | QIODevice::Text)) { const QString config_dir = QString::fromStdString(fs::get_config_dir()); // Add PS3 fonts QDirIterator ps3_font_it(QString::fromStdString(g_cfg_vfs.get_dev_flash() + "data/font/"), QStringList() << "*.ttf", QDir::Files, QDirIterator::Subdirectories); while (ps3_font_it.hasNext()) QFontDatabase::addApplicationFont(ps3_font_it.next()); // Add custom fonts QDirIterator custom_font_it(config_dir + "fonts/", QStringList() << "*.ttf", QDir::Files, QDirIterator::Subdirectories); while (custom_font_it.hasNext()) QFontDatabase::addApplicationFont(custom_font_it.next()); // Replace relative paths with absolute paths. Since relative paths should always be the same, we can just use simple string replacement. // Another option would be to use QDir::setCurrent, but that changes current working directory for the whole process (We don't want that). QString stylesheet = file.readAll(); stylesheet.replace(QStringLiteral("url(\"GuiConfigs/"), QStringLiteral("url(\"") + stylesheet_dir + QStringLiteral("/GuiConfigs/")); setStyleSheet(stylesheet); file.close(); } else { gui_log.error("Could not find stylesheet '%s'. Using default.", stylesheet_name); setStyleSheet(gui::stylesheets::default_style_sheet); } gui::custom_stylesheet_active = true; } gui::stylesheet = styleSheet(); if (m_main_window) { m_main_window->RepaintGui(); } } void gui_application::OnShortcutChange() { if (m_game_window) { static_cast<gs_frame*>(m_game_window)->update_shortcuts(); } } /** * Using connects avoids timers being unable to be used in a non-qt thread. So, even if this looks stupid to just call func, it's succinct. */ void gui_application::CallFromMainThread(const std::function<void()>& func, atomic_t<u32>* wake_up) { func(); if (wake_up) { *wake_up = true; wake_up->notify_one(); } } void gui_application::OnAppStateChanged(Qt::ApplicationState state) { // Invalidate previous delayed pause call (even when the setting is off because it is dynamic) m_pause_delayed_tag++; if (!g_cfg.misc.autopause) { return; } const auto emu_state = Emu.GetStatus(); const bool is_active = state == Qt::ApplicationActive; if (emu_state != system_state::paused && emu_state != system_state::running) { return; } const bool is_paused = emu_state == system_state::paused; if (is_active != is_paused) { // Nothing to do (either paused and this is focus-out event or running and this is a focus-in event) // Invalidate data m_is_pause_on_focus_loss_active = false; m_emu_focus_out_emulation_id = Emulator::stop_counter_t{}; return; } if (is_paused) { // Check if Emu.Resume() or Emu.Kill() has not been called since if (m_is_pause_on_focus_loss_active && m_pause_amend_time_on_focus_loss == Emu.GetPauseTime() && m_emu_focus_out_emulation_id == Emu.GetEmulationIdentifier()) { m_is_pause_on_focus_loss_active = false; Emu.Resume(); } return; } // Gather validation data m_emu_focus_out_emulation_id = Emu.GetEmulationIdentifier(); auto pause_callback = [this, delayed_tag = m_pause_delayed_tag]() { // Check if Emu.Kill() has not been called since if (applicationState() != Qt::ApplicationActive && Emu.IsRunning() && m_emu_focus_out_emulation_id == Emu.GetEmulationIdentifier() && delayed_tag == m_pause_delayed_tag && !m_is_pause_on_focus_loss_active) { if (Emu.Pause()) { // Gather validation data m_pause_amend_time_on_focus_loss = Emu.GetPauseTime(); m_emu_focus_out_emulation_id = Emu.GetEmulationIdentifier(); m_is_pause_on_focus_loss_active = true; } } }; if (state == Qt::ApplicationSuspended) { // Must be invoked now (otherwise it may not happen later) pause_callback(); return; } // Delay pause so it won't immediately pause the emulated application QTimer::singleShot(1000, this, pause_callback); } bool gui_application::native_event_filter::nativeEventFilter([[maybe_unused]] const QByteArray& eventType, [[maybe_unused]] void* message, [[maybe_unused]] qintptr* result) { #ifdef _WIN32 if (!Emu.IsRunning() && !g_raw_mouse_handler) { return false; } if (eventType == "windows_generic_MSG") { if (MSG* msg = static_cast<MSG*>(message); msg && msg->message == WM_INPUT) { if (auto* handler = g_fxo->try_get<MouseHandlerBase>(); handler && handler->type == mouse_handler::raw) { static_cast<raw_mouse_handler*>(handler)->handle_native_event(*msg); } if (g_raw_mouse_handler) { g_raw_mouse_handler->handle_native_event(*msg); } } } #endif return false; }
34,410
C++
.cpp
982
31.925662
211
0.708002
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,126
find_dialog.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/find_dialog.cpp
#include "find_dialog.h" #include <QVBoxLayout> find_dialog::find_dialog(QPlainTextEdit* edit, QWidget *parent, Qt::WindowFlags f) : QDialog(parent, f), m_text_edit(edit) { setWindowTitle(tr("Find string")); m_find_bar = new QLineEdit(); m_find_bar->setPlaceholderText(tr("Search...")); m_label_count_lines = new QLabel(tr("Counted in lines: -")); m_label_count_total = new QLabel(tr("Counted in total: -")); m_find_first = new QPushButton(tr("First")); m_find_last = new QPushButton(tr("Last")); m_find_next = new QPushButton(tr("Next")); m_find_previous = new QPushButton(tr("Previous")); QHBoxLayout* count_layout = new QHBoxLayout(); count_layout->addWidget(m_label_count_lines); count_layout->addWidget(m_label_count_total); QHBoxLayout* button_layout = new QHBoxLayout(); button_layout->addWidget(m_find_first); button_layout->addWidget(m_find_last); button_layout->addWidget(m_find_previous); button_layout->addWidget(m_find_next); QVBoxLayout* layout = new QVBoxLayout(); layout->addWidget(m_find_bar); layout->addLayout(count_layout); layout->addLayout(button_layout); setLayout(layout); connect(m_find_first, &QPushButton::clicked, this, &find_dialog::find_first); connect(m_find_last, &QPushButton::clicked, this, &find_dialog::find_last); connect(m_find_next, &QPushButton::clicked, this, &find_dialog::find_next); connect(m_find_previous, &QPushButton::clicked, this, &find_dialog::find_previous); m_find_next->setDefault(true); show(); } int find_dialog::count_all() { m_count_lines = 0; m_count_total = 0; if (!m_text_edit || m_find_bar->text().isEmpty()) { show_count(); return 0; } const QTextCursor old_cursor = m_text_edit->textCursor(); m_text_edit->moveCursor(QTextCursor::Start); int old_line_index = -1; while (m_text_edit->find(m_find_bar->text())) { m_count_total++; const int new_line_index = m_text_edit->textCursor().blockNumber(); if (new_line_index != old_line_index) { m_count_lines++; old_line_index = new_line_index; } } m_text_edit->setTextCursor(old_cursor); show_count(); return m_count_total; } void find_dialog::find_first() { if (count_all() <= 0) return; m_text_edit->moveCursor(QTextCursor::Start); m_text_edit->find(m_find_bar->text()); } void find_dialog::find_last() { if (count_all() <= 0) return; m_text_edit->moveCursor(QTextCursor::End); m_text_edit->find(m_find_bar->text(), QTextDocument::FindBackward); } void find_dialog::find_next() { if (count_all() <= 0) return; m_text_edit->find(m_find_bar->text()); } void find_dialog::find_previous() { if (count_all() <= 0) return; m_text_edit->find(m_find_bar->text(), QTextDocument::FindBackward); } void find_dialog::show_count() const { m_label_count_lines->setText(tr("Counted in lines: %0").arg(m_count_lines)); m_label_count_total->setText(tr("Counted in total: %0").arg(m_count_total)); }
2,899
C++
.cpp
90
30
122
0.711359
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,127
cg_disasm_window.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/cg_disasm_window.cpp
#include "cg_disasm_window.h" #include "gui_settings.h" #include "syntax_highlighter.h" #include <QSplitter> #include <QMenu> #include <QFileDialog> #include <QHBoxLayout> #include <QFontDatabase> #include <QMimeData> #include "Emu/RSX/Program/CgBinaryProgram.h" LOG_CHANNEL(gui_log, "GUI"); cg_disasm_window::cg_disasm_window(std::shared_ptr<gui_settings> gui_settings) : m_gui_settings(std::move(gui_settings)) { setWindowTitle(tr("Cg Disasm")); setObjectName("cg_disasm"); setAttribute(Qt::WA_DeleteOnClose); setAttribute(Qt::WA_StyledBackground); setAcceptDrops(true); setMinimumSize(QSize(200, 150)); // seems fine on win 10 resize(QSize(620, 395)); m_path_last = m_gui_settings->GetValue(gui::fd_cg_disasm).toString(); m_disasm_text = new QTextEdit(this); m_disasm_text->setReadOnly(true); m_disasm_text->setWordWrapMode(QTextOption::NoWrap); m_disasm_text->setFont(QFontDatabase::systemFont(QFontDatabase::FixedFont)); m_glsl_text = new QTextEdit(this); m_glsl_text->setReadOnly(true); m_glsl_text->setWordWrapMode(QTextOption::NoWrap); m_glsl_text->setFont(QFontDatabase::systemFont(QFontDatabase::FixedFont)); // m_disasm_text syntax highlighter sh_asm = new AsmHighlighter(m_disasm_text->document()); // m_glsl_text syntax highlighter sh_glsl = new GlslHighlighter(m_glsl_text->document()); QSplitter* splitter = new QSplitter(); splitter->addWidget(m_disasm_text); splitter->addWidget(m_glsl_text); QHBoxLayout* layout = new QHBoxLayout(); layout->addWidget(splitter); setLayout(layout); m_disasm_text->setContextMenuPolicy(Qt::CustomContextMenu); m_glsl_text->setContextMenuPolicy(Qt::CustomContextMenu); connect(m_disasm_text, &QWidget::customContextMenuRequested, this, &cg_disasm_window::ShowContextMenu); connect(m_glsl_text, &QWidget::customContextMenuRequested, this, &cg_disasm_window::ShowContextMenu); ShowDisasm(); } void cg_disasm_window::ShowContextMenu(const QPoint &pos) { QMenu menu; QAction* clear = new QAction(tr("&Clear")); QAction* open = new QAction(tr("Open &Cg binary program")); menu.addAction(open); menu.addSeparator(); menu.addAction(clear); connect(clear, &QAction::triggered, [this]() { m_disasm_text->clear(); m_glsl_text->clear(); }); connect(open, &QAction::triggered, [this]() { const QString file_path = QFileDialog::getOpenFileName(this, tr("Select Cg program object"), m_path_last, tr("Cg program objects (*.fpo;*.vpo);;")); if (file_path.isEmpty()) return; m_path_last = file_path; ShowDisasm(); }); const auto obj = qobject_cast<QTextEdit*>(sender()); QPoint origin; if (obj == m_disasm_text) { origin = m_disasm_text->viewport()->mapToGlobal(pos); } else if (obj == m_glsl_text) { origin = m_glsl_text->viewport()->mapToGlobal(pos); } else { origin = mapToGlobal(pos); } menu.exec(origin); } void cg_disasm_window::ShowDisasm() const { if (QFileInfo(m_path_last).isFile()) { CgBinaryDisasm disasm(m_path_last.toStdString()); disasm.BuildShaderBody(); m_disasm_text->setText(QString::fromStdString(disasm.GetArbShader())); m_glsl_text->setText(QString::fromStdString(disasm.GetGlslShader())); m_gui_settings->SetValue(gui::fd_cg_disasm, m_path_last); } else if (!m_path_last.isEmpty()) { gui_log.error("CgDisasm: Failed to open %s", m_path_last); } } bool cg_disasm_window::IsValidFile(const QMimeData& md, bool save) { const QList<QUrl> urls = md.urls(); if (urls.count() > 1) { return false; } const QString suff = QFileInfo(urls[0].fileName()).suffix().toLower(); if (suff == "fpo" || suff == "vpo") { if (save) { m_path_last = urls[0].toLocalFile(); } return true; } return false; } void cg_disasm_window::dropEvent(QDropEvent* ev) { if (IsValidFile(*ev->mimeData(), true)) { ShowDisasm(); } } void cg_disasm_window::dragEnterEvent(QDragEnterEvent* ev) { if (IsValidFile(*ev->mimeData())) { ev->accept(); } } void cg_disasm_window::dragMoveEvent(QDragMoveEvent* ev) { if (IsValidFile(*ev->mimeData())) { ev->accept(); } } void cg_disasm_window::dragLeaveEvent(QDragLeaveEvent* ev) { ev->accept(); }
4,117
C++
.cpp
141
27.035461
150
0.732066
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,128
movie_item_base.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/movie_item_base.cpp
#include "stdafx.h" #include "movie_item_base.h" #include <QFile> movie_item_base::movie_item_base() { init_pointers(); } movie_item_base::~movie_item_base() { if (m_movie) { m_movie->stop(); } if (m_media_player) { m_media_player->stop(); } wait_for_icon_loading(true); wait_for_size_on_disk_loading(true); } void movie_item_base::init_pointers() { m_icon_loading_aborted.reset(new atomic_t<bool>(false)); m_size_on_disk_loading_aborted.reset(new atomic_t<bool>(false)); } void movie_item_base::set_active(bool active) { if (!std::exchange(m_active, active) && active) { init_movie(); if (m_movie) { m_movie->jumpToFrame(1); m_movie->start(); } if (m_media_player) { m_media_player->play(); } } } void movie_item_base::init_movie() { if (m_movie || m_media_player) { // Already initialized return; } if (!m_icon_callback || m_movie_path.isEmpty() || !QFile::exists(m_movie_path)) { m_movie_path.clear(); return; } const QString lower = m_movie_path.toLower(); if (lower.endsWith(".gif")) { m_movie.reset(new QMovie(m_movie_path)); m_movie_path.clear(); if (!m_movie->isValid()) { m_movie.reset(); return; } QObject::connect(m_movie.get(), &QMovie::frameChanged, m_movie.get(), [this](int) { m_icon_callback({}); }); return; } if (lower.endsWith(".pam")) { // We can't set PAM files as source of the video player, so we have to feed them as raw data. QFile file(m_movie_path); if (!file.open(QFile::OpenModeFlag::ReadOnly)) { return; } // TODO: Decode the pam properly before pushing it to the player m_movie_data = file.readAll(); if (m_movie_data.isEmpty()) { return; } m_movie_buffer.reset(new QBuffer(&m_movie_data)); m_movie_buffer->open(QIODevice::ReadOnly); } m_video_sink.reset(new QVideoSink()); QObject::connect(m_video_sink.get(), &QVideoSink::videoFrameChanged, m_video_sink.get(), [this](const QVideoFrame& frame) { m_icon_callback(frame); }); m_media_player.reset(new QMediaPlayer()); m_media_player->setVideoSink(m_video_sink.get()); m_media_player->setLoops(QMediaPlayer::Infinite); if (m_movie_buffer) { m_media_player->setSourceDevice(m_movie_buffer.get()); } else { m_media_player->setSource(m_movie_path); } } void movie_item_base::stop_movie() { if (m_movie) { m_movie->stop(); } m_video_sink.reset(); m_media_player.reset(); m_movie_buffer.reset(); m_movie_data.clear(); } QPixmap movie_item_base::get_movie_image(const QVideoFrame& frame) const { if (!m_active) { return {}; } if (m_movie) { return m_movie->currentPixmap(); } if (!frame.isValid()) { return {}; } // Get image. This usually also converts the image to ARGB32. return QPixmap::fromImage(frame.toImage()); } void movie_item_base::call_icon_func() const { if (m_icon_callback) { m_icon_callback({}); } } void movie_item_base::set_icon_func(const icon_callback_t& func) { m_icon_callback = func; } void movie_item_base::call_icon_load_func(int index) { if (!m_icon_load_callback || m_icon_loading || m_icon_loading_aborted->load()) { return; } wait_for_icon_loading(true); *m_icon_loading_aborted = false; m_icon_loading = true; m_icon_load_thread.reset(QThread::create([this, index]() { if (m_icon_load_callback) { m_icon_load_callback(index); } })); m_icon_load_thread->start(); } void movie_item_base::set_icon_load_func(const icon_load_callback_t& func) { wait_for_icon_loading(true); m_icon_loading = false; m_icon_load_callback = func; *m_icon_loading_aborted = false; } void movie_item_base::call_size_calc_func() { if (!m_size_calc_callback || m_size_on_disk_loading || m_size_on_disk_loading_aborted->load()) { return; } wait_for_size_on_disk_loading(true); *m_size_on_disk_loading_aborted = false; m_size_on_disk_loading = true; m_size_calc_thread.reset(QThread::create([this]() { if (m_size_calc_callback) { m_size_calc_callback(); } })); m_size_calc_thread->start(); } void movie_item_base::set_size_calc_func(const size_calc_callback_t& func) { m_size_on_disk_loading = false; m_size_calc_callback = func; *m_size_on_disk_loading_aborted = false; } void movie_item_base::wait_for_icon_loading(bool abort) { *m_icon_loading_aborted = abort; if (m_icon_load_thread && m_icon_load_thread->isRunning()) { m_icon_load_thread->wait(); m_icon_load_thread.reset(); } } void movie_item_base::wait_for_size_on_disk_loading(bool abort) { *m_size_on_disk_loading_aborted = abort; if (m_size_calc_thread && m_size_calc_thread->isRunning()) { m_size_calc_thread->wait(); m_size_calc_thread.reset(); } }
4,664
C++
.cpp
209
19.990431
122
0.68978
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,129
input_dialog.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/input_dialog.cpp
#include "input_dialog.h" #include "qt_utils.h" #include <QVBoxLayout> #include <QDialogButtonBox> #include <QLineEdit> #include <QPushButton> #include <QValidator> #include <QLabel> input_dialog::input_dialog(int max_length, const QString& text, const QString& title, const QString& label, const QString& placeholder, QWidget *parent, Qt::WindowFlags f) : QDialog(parent, f) { setWindowTitle(title); m_label = new QLabel(label); m_input = new QLineEdit(); m_input->setPlaceholderText(placeholder); m_input->setText(text); m_input->setMaxLength(max_length); m_input->setClearButtonEnabled(true); connect(m_input, &QLineEdit::textChanged, this, &input_dialog::text_changed); m_button_box = new QDialogButtonBox(QDialogButtonBox::Ok | QDialogButtonBox::Cancel); connect(m_button_box, &QDialogButtonBox::accepted, this, &QDialog::accept); connect(m_button_box, &QDialogButtonBox::rejected, this, &QDialog::reject); QVBoxLayout* layout = new QVBoxLayout(); layout->addWidget(m_label); layout->addWidget(m_input); layout->addWidget(m_button_box); setLayout(layout); setFixedHeight(sizeHint().height()); } input_dialog::~input_dialog() { } void input_dialog::set_clear_button_enabled(bool enabled) const { m_input->setClearButtonEnabled(enabled); } void input_dialog::set_input_font(const QFont& font, bool fix_width, char sample) const { if (const int max = m_input->maxLength(); max > 0 && fix_width && std::isprint(static_cast<uchar>(sample))) { const QString str = QString(max, sample); m_input->setFixedWidth(gui::utils::get_label_width(str, &font)); } m_input->setFont(font); } void input_dialog::set_validator(const QValidator* validator) const { m_input->setValidator(validator); } void input_dialog::set_button_enabled(QDialogButtonBox::StandardButton id, bool enabled) const { if (QPushButton* button = m_button_box->button(id)) { button->setEnabled(enabled); } } void input_dialog::set_label_text(const QString& text) const { m_label->setText(text); } QString input_dialog::get_input_text() const { return m_input->text(); }
2,080
C++
.cpp
64
30.71875
171
0.75912
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,130
user_manager_dialog.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/user_manager_dialog.cpp
#include <QRegularExpressionValidator> #include <QInputDialog> #include <QKeyEvent> #include <QMouseEvent> #include <QEvent> #include <QScreen> #include <QHeaderView> #include <QHBoxLayout> #include <QPushButton> #include <QMenu> #include <QDesktopServices> #include <QMessageBox> #include <QGuiApplication> #include "user_manager_dialog.h" #include "table_item_delegate.h" #include "main_application.h" #include "gui_settings.h" #include "persistent_settings.h" #include "qt_utils.h" #include "Emu/System.h" #include "Emu/system_utils.hpp" #include "Utilities/StrUtil.h" #include "Utilities/File.h" #include "util/logs.hpp" constexpr auto qstr = QString::fromStdString; LOG_CHANNEL(gui_log, "GUI"); user_manager_dialog::user_manager_dialog(std::shared_ptr<gui_settings> gui_settings, std::shared_ptr<persistent_settings> persistent_settings, QWidget* parent) : QDialog(parent) , m_gui_settings(std::move(gui_settings)) , m_persistent_settings(std::move(persistent_settings)) { setWindowTitle(tr("User Manager")); setMinimumSize(QSize(500, 400)); setModal(true); Init(); } void user_manager_dialog::Init() { // Table m_table = new QTableWidget(this); m_table->setItemDelegate(new table_item_delegate(this)); // to get rid of cell selection rectangles m_table->setSelectionMode(QAbstractItemView::SelectionMode::SingleSelection); m_table->setSelectionBehavior(QAbstractItemView::SelectRows); m_table->setContextMenuPolicy(Qt::CustomContextMenu); m_table->setColumnCount(2); m_table->setCornerButtonEnabled(false); m_table->setAlternatingRowColors(true); m_table->setHorizontalHeaderLabels(QStringList() << tr("User ID") << tr("User Name")); m_table->horizontalHeader()->setDefaultAlignment(Qt::AlignLeft); m_table->horizontalHeader()->setStretchLastSection(true); m_table->horizontalHeader()->setDefaultSectionSize(150); m_table->installEventFilter(this); QPushButton* push_remove_user = new QPushButton(tr("&Delete User"), this); push_remove_user->setAutoDefault(false); QPushButton* push_create_user = new QPushButton(tr("&Create User"), this); push_create_user->setAutoDefault(false); QPushButton* push_login_user = new QPushButton(tr("&Log In User"), this); push_login_user->setAutoDefault(false); QPushButton* push_rename_user = new QPushButton(tr("&Rename User"), this); push_rename_user->setAutoDefault(false); QPushButton* push_close = new QPushButton(tr("&Close"), this); push_close->setAutoDefault(false); // Button Layout QHBoxLayout* hbox_buttons = new QHBoxLayout(); hbox_buttons->addWidget(push_create_user); hbox_buttons->addWidget(push_login_user); hbox_buttons->addWidget(push_rename_user); hbox_buttons->addWidget(push_remove_user); hbox_buttons->addStretch(); hbox_buttons->addWidget(push_close); // Main Layout QVBoxLayout* vbox_main = new QVBoxLayout(); vbox_main->setAlignment(Qt::AlignCenter); vbox_main->addWidget(m_table); vbox_main->addLayout(hbox_buttons); setLayout(vbox_main); // Get the active user m_active_user = m_persistent_settings->GetCurrentUser("00000001").toStdString(); // Get the real active user (might differ, set by cli) if (m_active_user != Emu.GetUsr()) { m_active_user = Emu.GetUsr(); } UpdateTable(); restoreGeometry(m_gui_settings->GetValue(gui::um_geometry).toByteArray()); // Use this in multiple connects to protect the current user from deletion/rename. const auto enable_buttons = [=, this]() { const u32 key = GetUserKey(); if (key == 0) { push_login_user->setEnabled(false); push_rename_user->setEnabled(false); push_remove_user->setEnabled(false); return; } const bool enable = m_user_list[key].GetUserId() != m_active_user; push_login_user->setEnabled(enable); push_rename_user->setEnabled(true); push_remove_user->setEnabled(enable); }; enable_buttons(); // Connects and events connect(push_close, &QAbstractButton::clicked, this, &user_manager_dialog::close); connect(push_remove_user, &QAbstractButton::clicked, this, &user_manager_dialog::OnUserRemove); connect(push_rename_user, &QAbstractButton::clicked, this, &user_manager_dialog::OnUserRename); connect(push_create_user, &QAbstractButton::clicked, this, &user_manager_dialog::OnUserCreate); connect(push_login_user, &QAbstractButton::clicked, this, &user_manager_dialog::OnUserLogin); connect(this, &user_manager_dialog::OnUserLoginSuccess, this, enable_buttons); connect(m_table->horizontalHeader(), &QHeaderView::sectionClicked, this, &user_manager_dialog::OnSort); connect(m_table, &QTableWidget::customContextMenuRequested, this, &user_manager_dialog::ShowContextMenu); connect(m_table, &QTableWidget::itemDoubleClicked, this, &user_manager_dialog::OnUserLogin); connect(m_table, &QTableWidget::itemSelectionChanged, this, enable_buttons); } void user_manager_dialog::UpdateTable(bool mark_only) { // For indicating logged-in user. QFont bold_font; bold_font.setBold(true); if (mark_only) { const QString active_user = qstr(m_active_user); for (int i = 0; i < m_table->rowCount(); i++) { QTableWidgetItem* user_id_item = m_table->item(i, 0); QTableWidgetItem* username_item = m_table->item(i, 1); // Compare current config value with the one in this user if (active_user == user_id_item->text()) { user_id_item->setFont(bold_font); username_item->setFont(bold_font); } else { user_id_item->setFont(QFont()); username_item->setFont(QFont()); } } return; } // Get the user folders in the home directory and the currently logged in user. m_user_list.clear(); m_user_list = user_account::GetUserAccounts(rpcs3::utils::get_hdd0_dir() + "home"); // Clear and then repopulate the table with the list gathered above. m_table->setRowCount(static_cast<int>(m_user_list.size())); int row = 0; for (auto& [id, account] : m_user_list) { QTableWidgetItem* user_id_item = new QTableWidgetItem(qstr(account.GetUserId())); user_id_item->setData(Qt::UserRole, id); // For sorting to work properly user_id_item->setFlags(user_id_item->flags() & ~Qt::ItemIsEditable); m_table->setItem(row, 0, user_id_item); QTableWidgetItem* username_item = new QTableWidgetItem(qstr(account.GetUsername())); username_item->setData(Qt::UserRole, id); // For sorting to work properly username_item->setFlags(username_item->flags() & ~Qt::ItemIsEditable); m_table->setItem(row, 1, username_item); // Compare current config value with the one in this user (only 8 digits in userId) if (m_active_user.starts_with(account.GetUserId())) { user_id_item->setFont(bold_font); username_item->setFont(bold_font); } ++row; } // GUI resizing m_table->horizontalHeader()->resizeSections(QHeaderView::ResizeToContents); m_table->verticalHeader()->resizeSections(QHeaderView::ResizeToContents); const QSize table_size( m_table->verticalHeader()->width() + m_table->horizontalHeader()->length() + m_table->frameWidth() * 2, m_table->horizontalHeader()->height() + m_table->verticalHeader()->length() + m_table->frameWidth() * 2); const QSize preferred_size = minimumSize().expandedTo(sizeHint() - m_table->sizeHint() + table_size).expandedTo(size()); const QSize max_size(preferred_size.width(), static_cast<int>(QGuiApplication::primaryScreen()->size().height() * 0.6)); resize(preferred_size.boundedTo(max_size)); } // Remove a user folder, needs to be confirmed. void user_manager_dialog::OnUserRemove() { const u32 key = GetUserKey(); if (key == 0) { return; } const QString username = qstr(m_user_list[key].GetUsername()); const QString user_id = qstr(m_user_list[key].GetUserId()); const std::string user_dir = m_user_list[key].GetUserDir(); if (QMessageBox::question(this, tr("Delete Confirmation"), tr("Are you sure you want to delete the following user?\n\nUser ID: %0\nUsername: %1\n\n" "This will remove all files in:\n%2").arg(user_id).arg(username).arg(qstr(user_dir)), QMessageBox::Yes, QMessageBox::No) == QMessageBox::Yes) { gui_log.warning("Deleting user: %s", user_dir); fs::remove_all(user_dir); UpdateTable(); } } void user_manager_dialog::GenerateUser(const std::string& user_id, const std::string& username) { ensure(rpcs3::utils::check_user(user_id) > 0); // Create user folders and such. const std::string home_dir = rpcs3::utils::get_hdd0_dir() + "home/"; const std::string user_dir = home_dir + user_id; fs::create_dir(home_dir); fs::create_dir(user_dir + "/"); fs::create_dir(user_dir + "/exdata/"); fs::create_dir(user_dir + "/savedata/"); fs::create_dir(user_dir + "/trophy/"); fs::write_file(user_dir + "/localusername", fs::create + fs::excl + fs::write, username); } bool user_manager_dialog::ValidateUsername(const QString& text_to_validate) { // "Entire string (^...$) must be between 3 and 16 characters // and only consist of letters, numbers, underscores, and hyphens." const QRegularExpressionValidator validator(QRegularExpression("^[A-Za-z0-9_-]{3,16}$")); int pos = 0; QString text = text_to_validate; return (validator.validate(text, pos) == QValidator::Acceptable); } void user_manager_dialog::OnUserRename() { const u32 key = GetUserKey(); if (key == 0) { return; } const std::string user_id = m_user_list[key].GetUserId(); const std::string username = m_user_list[key].GetUsername(); const QString q_username = qstr(username); QInputDialog* dialog = new QInputDialog(this); dialog->setWindowTitle(tr("Rename User")); dialog->setLabelText(tr("User Id: %0\nOld Username: %1\n\nNew Username: ").arg(qstr(user_id)).arg(q_username)); dialog->setTextValue(q_username); dialog->resize(200, 100); while (dialog->exec() != QDialog::Rejected) { dialog->resize(200, 100); const QString text_to_validate = dialog->textValue(); if (!ValidateUsername(text_to_validate)) { QMessageBox::warning(this, tr("Error"), tr("Name must be between 3 and 16 characters and only consist of letters, numbers, underscores, and hyphens.")); continue; } const std::string username_file = rpcs3::utils::get_hdd0_dir() + "home/" + user_id + "/localusername"; const std::string new_username = text_to_validate.toStdString(); if (fs::write_file(username_file, fs::rewrite, new_username)) { gui_log.success("Renamed user %s with id %s to %s", username, user_id, new_username); } else { gui_log.fatal("Could not rename user %s with id %s to %s", username, user_id, new_username); } UpdateTable(); break; } } void user_manager_dialog::OnUserCreate() { // Take the smallest user id > 0, then reformat the result into an 8-digit string. u32 smallest = 1; for (auto it = m_user_list.begin(); it != m_user_list.end(); ++it) { if (it->first > smallest) { break; } smallest++; } if (smallest >= 100000000) // Only 8 digits allowed { QMessageBox::warning(this, tr("Error"), tr("Cannot add more users.")); return; } const std::string next_user_id = fmt::format("%08d", smallest); ensure(rpcs3::utils::check_user(next_user_id) > 0); QInputDialog* dialog = new QInputDialog(this); dialog->setWindowTitle(tr("New User")); dialog->setLabelText(tr("New User ID: %0\n\nNew Username: ").arg(qstr(next_user_id))); dialog->resize(200, 100); while (dialog->exec() != QDialog::Rejected) { dialog->resize(200, 100); QString text_to_validate = dialog->textValue(); if (!ValidateUsername(text_to_validate)) { QMessageBox::warning(this, tr("Error"), tr("Name must be between 3 and 16 characters and only consist of letters, numbers, underscores, and hyphens.")); continue; } GenerateUser(next_user_id, text_to_validate.toStdString()); UpdateTable(); break; } } void user_manager_dialog::OnUserLogin() { if (!Emu.IsStopped()) { if (QMessageBox::question(this, tr("Stop emulator?"), tr("In order to change the user you have to stop the emulator first.\n\nStop the emulator now?"), QMessageBox::Yes | QMessageBox::Abort) != QMessageBox::Yes) { return; } gui_log.notice("Stopping current emulation in order to change the current user."); Emu.GracefulShutdown(false); } const u32 key = GetUserKey(); const std::string new_user = m_user_list[key].GetUserId(); main_application::InitializeEmulator(new_user, Emu.HasGui()); m_active_user = new_user; m_persistent_settings->SetValue(gui::persistent::active_user, qstr(m_active_user)); UpdateTable(true); Q_EMIT OnUserLoginSuccess(); } void user_manager_dialog::OnSort(int logicalIndex) { if (logicalIndex < 0) { return; } else if (logicalIndex == m_sort_column) { m_sort_ascending ^= true; } else { m_sort_ascending = true; } m_sort_column = logicalIndex; m_table->sortByColumn(m_sort_column, m_sort_ascending ? Qt::AscendingOrder : Qt::DescendingOrder); } void user_manager_dialog::ShowContextMenu(const QPoint &pos) { const u32 key = GetUserKey(); if (key == 0) { return; } QMenu* menu = new QMenu(); // Create submenu for sort options. QMenu* sort_menu = menu->addMenu(tr("&Sort By")); QAction* user_id_act = sort_menu->addAction(tr("User ID")); QAction* username_act = sort_menu->addAction(tr("User Name")); QAction* remove_act = menu->addAction(tr("&Remove")); QAction* rename_act = menu->addAction(tr("&Rename")); QAction* login_act = menu->addAction(tr("&Login")); QAction* show_dir_act = menu->addAction(tr("&Open User Directory")); // Only enable actions if selected user is not logged in user. const bool enable = m_user_list[key].GetUserId() != m_active_user; remove_act->setEnabled(enable); rename_act->setEnabled(enable); // Connects and Events connect(remove_act, &QAction::triggered, this, &user_manager_dialog::OnUserRemove); connect(rename_act, &QAction::triggered, this, &user_manager_dialog::OnUserRename); connect(login_act, &QAction::triggered, this, &user_manager_dialog::OnUserLogin); connect(show_dir_act, &QAction::triggered, this, [this, key]() { const QString path = qstr(m_user_list[key].GetUserDir()); gui::utils::open_dir(path); }); connect(user_id_act, &QAction::triggered, this, [this] {OnSort(0); }); connect(username_act, &QAction::triggered, this, [this] {OnSort(1); }); menu->exec(m_table->viewport()->mapToGlobal(pos)); } // Returns the current user's key > 0. if no user is selected, return 0 u32 user_manager_dialog::GetUserKey() const { const int idx = m_table->currentRow(); if (idx < 0) { return 0; } const QTableWidgetItem* item = m_table->item(idx, 0); if (!item) { return 0; } const u32 idx_real = item->data(Qt::UserRole).toUInt(); if (!m_user_list.contains(idx_real)) { return 0; } return idx_real; } void user_manager_dialog::closeEvent(QCloseEvent *event) { m_gui_settings->SetValue(gui::um_geometry, saveGeometry()); QDialog::closeEvent(event); } bool user_manager_dialog::eventFilter(QObject *object, QEvent *event) { const u32 key = GetUserKey(); if (key == 0 || object != m_table || m_user_list[key].GetUserId() == m_active_user) { return QDialog::eventFilter(object, event); } if (event->type() == QEvent::KeyRelease) { const QKeyEvent* key_event = static_cast<QKeyEvent*>(event); switch (key_event->key()) { case Qt::Key_F2: OnUserRename(); break; case Qt::Key_Delete: OnUserRemove(); break; case Qt::Key_Return: case Qt::Key_Enter: OnUserLogin(); break; default: break; } } return QDialog::eventFilter(object, event); } void user_manager_dialog::mouseDoubleClickEvent(QMouseEvent* ev) { if (!ev) return; // Qt's itemDoubleClicked signal doesn't distinguish between mouse buttons and there is no simple way to get the pressed button. // So we have to ignore this event when another button is pressed. if (ev->button() != Qt::LeftButton) { ev->ignore(); return; } QDialog::mouseDoubleClickEvent(ev); }
15,755
C++
.cpp
426
34.558685
159
0.72633
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,131
save_data_list_dialog.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/save_data_list_dialog.cpp
#include "save_data_list_dialog.h" #include "save_data_info_dialog.h" #include "gui_settings.h" #include "persistent_settings.h" #include <QPushButton> #include <QHBoxLayout> #include <QHeaderView> #include <QGuiApplication> #include <QScreen> #include <QMouseEvent> LOG_CHANNEL(cellSaveData); //Show up the savedata list, either to choose one to save/load or to manage saves. //I suggest to use function callbacks to give save data list or get save data entry. (Not implemented or stubbed) save_data_list_dialog::save_data_list_dialog(const std::vector<SaveDataEntry>& entries, s32 focusedEntry, u32 op, vm::ptr<CellSaveDataListSet> listSet, QWidget* parent) : QDialog(parent) , m_save_entries(entries) { cellSaveData.notice("Creating Qt save_data_list_dialog (entries=%d, focusedEntry=%d, op=0x%x, listSet=*0x%x)", entries.size(), focusedEntry, op, listSet); if (op >= 8) { setWindowTitle(tr("Save Data Interface (Delete)")); } else if (op & 1) { setWindowTitle(tr("Save Data Interface (Load)")); } else { setWindowTitle(tr("Save Data Interface (Save)")); } setMinimumSize(QSize(400, 400)); m_gui_settings.reset(new gui_settings()); m_persistent_settings.reset(new persistent_settings()); // Table m_list = new QTableWidget(this); //m_list->setItemDelegate(new table_item_delegate(this)); // to get rid of cell selection rectangles include "table_item_delegate.h" m_list->setSelectionMode(QAbstractItemView::SelectionMode::SingleSelection); m_list->setSelectionBehavior(QAbstractItemView::SelectRows); m_list->setContextMenuPolicy(Qt::CustomContextMenu); m_list->setColumnCount(4); m_list->setHorizontalHeaderLabels(QStringList() << tr("Title") << tr("Subtitle") << tr("Save ID") << tr("Entry Notes")); // Button Layout QHBoxLayout* hbox_action = new QHBoxLayout(); if (!entries.empty()) { // If there are no entries, don't add the selection widget or the selection label to the UI. QPushButton *push_select = new QPushButton(tr("&Select Entry"), this); connect(push_select, &QAbstractButton::clicked, this, &save_data_list_dialog::accept); push_select->setAutoDefault(true); push_select->setDefault(true); hbox_action->addWidget(push_select); m_entry_label = new QLabel(this); UpdateSelectionLabel(); } if (listSet && listSet->newData) { QPushButton *saveNewEntry = new QPushButton(tr("Save New Entry"), this); connect(saveNewEntry, &QAbstractButton::clicked, this, [&]() { m_entry = rsx::overlays::user_interface::selection_code::new_save; accept(); }); hbox_action->addWidget(saveNewEntry); } hbox_action->addStretch(); QPushButton *push_cancel = new QPushButton(tr("&Cancel"), this); push_cancel->setAutoDefault(false); hbox_action->addWidget(push_cancel); // events connect(push_cancel, &QAbstractButton::clicked, this, &save_data_list_dialog::close); connect(m_list, &QTableWidget::itemDoubleClicked, this, &save_data_list_dialog::OnEntryInfo); connect(m_list, &QTableWidget::currentCellChanged, this, [&](int cr, int cc, int pr, int pc) { m_entry = cr; UpdateSelectionLabel(); Q_UNUSED(cc) Q_UNUSED(pr) Q_UNUSED(pc) }); connect(m_list->horizontalHeader(), &QHeaderView::sectionClicked, this, &save_data_list_dialog::OnSort); // main layout QVBoxLayout* vbox_main = new QVBoxLayout(); vbox_main->setAlignment(Qt::AlignCenter); vbox_main->addWidget(m_list); if (m_entry_label != nullptr) { vbox_main->addWidget(m_entry_label); } vbox_main->addLayout(hbox_action); setLayout(vbox_main); UpdateList(); connect(m_list, &QTableWidget::cellChanged, [&](int row, int col) { const int original_index = m_list->item(row, 0)->data(Qt::UserRole).toInt(); const SaveDataEntry original_entry = m_save_entries[original_index]; const QString original_dir_name = QString::fromStdString(original_entry.dirName); QVariantMap notes = m_persistent_settings->GetValue(gui::persistent::save_notes).toMap(); notes[original_dir_name] = m_list->item(row, col)->text(); m_persistent_settings->SetValue(gui::persistent::save_notes, notes); }); m_list->setCurrentCell(focusedEntry, 0); } void save_data_list_dialog::UpdateSelectionLabel() { if (m_entry_label != nullptr) { if (m_list->currentRow() == -1) { m_entry_label->setText(tr("Currently Selected: None")); } else { const int entry = m_list->item(m_list->currentRow(), 0)->data(Qt::UserRole).toInt(); m_entry_label->setText(tr("Currently Selected: ") + QString::fromStdString(m_save_entries[entry].dirName)); } } } s32 save_data_list_dialog::GetSelection() const { if (result() == QDialog::Accepted) { if (m_entry == rsx::overlays::user_interface::selection_code::new_save) { // Save new entry return rsx::overlays::user_interface::selection_code::new_save; } return m_list->item(m_entry, 0)->data(Qt::UserRole).toInt(); } // Cancel is pressed. May figure out proper cellsavedata code to use later. return rsx::overlays::user_interface::selection_code::canceled; } void save_data_list_dialog::OnSort(int logicalIndex) { if (logicalIndex >= 0) { if (logicalIndex == m_sort_column) { m_sort_ascending ^= true; } else { m_sort_ascending = true; } m_sort_column = logicalIndex; const Qt::SortOrder sort_order = m_sort_ascending ? Qt::AscendingOrder : Qt::DescendingOrder; m_list->sortByColumn(m_sort_column, sort_order); } } //Display info dialog directly. void save_data_list_dialog::OnEntryInfo() { if (const int idx = m_list->currentRow(); idx != -1) { save_data_info_dialog* infoDialog = new save_data_info_dialog(m_save_entries[idx], this); infoDialog->setModal(true); infoDialog->show(); } } void save_data_list_dialog::UpdateList() { m_list->clearContents(); m_list->setRowCount(::narrow<int>(m_save_entries.size())); const QVariantMap notes = m_persistent_settings->GetValue(gui::persistent::save_notes).toMap(); int row = 0; for (const SaveDataEntry& entry: m_save_entries) { const QString title = QString::fromStdString(entry.title); const QString subtitle = QString::fromStdString(entry.subtitle); const QString dirName = QString::fromStdString(entry.dirName); QTableWidgetItem* titleItem = new QTableWidgetItem(title); titleItem->setData(Qt::UserRole, row); // For sorting to work properly titleItem->setFlags(titleItem->flags() & ~Qt::ItemIsEditable); m_list->setItem(row, 0, titleItem); QTableWidgetItem* subtitleItem = new QTableWidgetItem(subtitle); subtitleItem->setFlags(subtitleItem->flags() & ~Qt::ItemIsEditable); m_list->setItem(row, 1, subtitleItem); QTableWidgetItem* dirNameItem = new QTableWidgetItem(dirName); dirNameItem->setFlags(dirNameItem->flags() & ~Qt::ItemIsEditable); m_list->setItem(row, 2, dirNameItem); QTableWidgetItem* noteItem = new QTableWidgetItem(); noteItem->setFlags(noteItem->flags() | Qt::ItemIsEditable); if (notes.contains(dirName)) { noteItem->setText(notes[dirName].toString()); } m_list->setItem(row, 3, noteItem); ++row; } m_list->horizontalHeader()->resizeSections(QHeaderView::ResizeToContents); m_list->verticalHeader()->resizeSections(QHeaderView::ResizeToContents); const QSize table_size ( m_list->verticalHeader()->width() + m_list->horizontalHeader()->length() + m_list->frameWidth() * 2, m_list->horizontalHeader()->height() + m_list->verticalHeader()->length() + m_list->frameWidth() * 2 ); const QSize preferred_size = minimumSize().expandedTo(sizeHint() - m_list->sizeHint() + table_size); const QSize max_size(preferred_size.width(), static_cast<int>(QGuiApplication::primaryScreen()->geometry().height() * 0.6)); resize(preferred_size.boundedTo(max_size)); } void save_data_list_dialog::mouseDoubleClickEvent(QMouseEvent* ev) { if (!ev) return; // Qt's itemDoubleClicked signal doesn't distinguish between mouse buttons and there is no simple way to get the pressed button. // So we have to ignore this event when another button is pressed. if (ev->button() != Qt::LeftButton) { ev->ignore(); return; } QDialog::mouseDoubleClickEvent(ev); }
8,045
C++
.cpp
207
36.400966
168
0.735634
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,132
sendmessage_dialog_frame.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/sendmessage_dialog_frame.cpp
#include <QVBoxLayout> #include <QHBoxLayout> #include <QPushButton> #include <QMessageBox> #include <QTimer> #include "sendmessage_dialog_frame.h" #include "Emu/IdManager.h" #include "Emu/System.h" #include "util/logs.hpp" LOG_CHANNEL(sendmessage_dlg_log, "sendmessage dlg"); void sendmessage_friend_callback(void* param, rpcn::NotificationType ntype, const std::string& username, bool status) { auto* dlg = static_cast<sendmessage_dialog_frame*>(param); dlg->callback_handler(ntype, username, status); } sendmessage_dialog_frame::~sendmessage_dialog_frame() { if (m_dialog) { m_dialog->deleteLater(); } } error_code sendmessage_dialog_frame::Exec(message_data& msg_data, std::set<std::string>& npids) { if (m_dialog) { m_dialog->close(); delete m_dialog; } m_dialog = new custom_dialog(false); m_dialog->setModal(true); m_dialog->setWindowTitle(tr("Choose friend to message:")); m_rpcn = rpcn::rpcn_client::get_instance(true); QVBoxLayout* vbox_global = new QVBoxLayout(); m_lst_friends = new QListWidget(); vbox_global->addWidget(m_lst_friends); QHBoxLayout* hbox_btns = new QHBoxLayout(); hbox_btns->addStretch(); QPushButton* btn_ok = new QPushButton(tr("Ok")); QPushButton* btn_cancel = new QPushButton(tr("Cancel")); hbox_btns->addWidget(btn_ok); hbox_btns->addWidget(btn_cancel); vbox_global->addLayout(hbox_btns); m_dialog->setLayout(vbox_global); connect(this, &sendmessage_dialog_frame::signal_add_friend, this, &sendmessage_dialog_frame::slot_add_friend); connect(this, &sendmessage_dialog_frame::signal_remove_friend, this, &sendmessage_dialog_frame::slot_remove_friend); error_code result = CELL_CANCEL; connect(btn_ok, &QAbstractButton::clicked, this, [this, &msg_data, &npids, &result]() { // Check one target is selected auto selected = m_lst_friends->selectedItems(); if (selected.empty()) { QMessageBox::critical(m_dialog, tr("Error sending a message!"), tr("You must select a friend!"), QMessageBox::Ok); return; } npids.insert(selected[0]->text().toStdString()); // Send the message if (m_rpcn->send_message(msg_data, npids)) { result = CELL_OK; } m_dialog->close(); }); connect(btn_cancel, &QAbstractButton::clicked, m_dialog, &custom_dialog::close); rpcn::friend_data data; m_rpcn->get_friends_and_register_cb(data, sendmessage_friend_callback, this); for (const auto& fr : data.friends) { // Only add online friends to the list if (fr.second.online) { add_friend(m_lst_friends, QString::fromStdString(fr.first)); } } auto& nps = g_fxo->get<np_state>(); QTimer timer; connect(&timer, &QTimer::timeout, this, [this, &nps, &timer]() { bool abort = Emu.IsStopped(); if (!abort && nps.abort_gui_flag.exchange(false)) { sendmessage_dlg_log.warning("Aborted by sceNp!"); abort = true; } if (abort) { if (m_dialog) { m_dialog->close(); } timer.stop(); } }); timer.start(10ms); m_dialog->exec(); m_rpcn->remove_friend_cb(sendmessage_friend_callback, this); return result; } void sendmessage_dialog_frame::add_friend(QListWidget* list, const QString& name) { if (auto found = list->findItems(name, Qt::MatchExactly); !found.empty()) { return; } list->addItem(new QListWidgetItem(name)); } void sendmessage_dialog_frame::remove_friend(QListWidget* list, const QString& name) { if (auto found = list->findItems(name, Qt::MatchExactly); !found.empty()) { delete list->takeItem(list->row(found[0])); } } void sendmessage_dialog_frame::slot_add_friend(QString name) { add_friend(m_lst_friends, name); } void sendmessage_dialog_frame::slot_remove_friend(QString name) { remove_friend(m_lst_friends, name); } void sendmessage_dialog_frame::callback_handler(u16 ntype, const std::string& username, bool status) { QString qtr_username = QString::fromStdString(username); switch (ntype) { case rpcn::NotificationType::FriendQuery: // Other user sent a friend request case rpcn::NotificationType::FriendPresenceChanged: break; case rpcn::NotificationType::FriendNew: // Add a friend to the friendlist(either accepted a friend request or friend accepted it) { if (status) { Q_EMIT signal_add_friend(qtr_username); } break; } case rpcn::NotificationType::FriendLost: // Remove friend from the friendlist(user removed friend or friend removed friend) { Q_EMIT signal_remove_friend(qtr_username); break; } case rpcn::NotificationType::FriendStatus: // Set status of friend to Offline or Online { if (status) { Q_EMIT signal_add_friend(qtr_username); } else { Q_EMIT signal_remove_friend(qtr_username); } break; } default: { sendmessage_dlg_log.fatal("An unhandled notification type was received by the sendmessage dialog callback!"); break; } } }
4,801
C++
.cpp
162
27.08642
130
0.727628
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,133
pad_settings_dialog.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/pad_settings_dialog.cpp
#include <QCheckBox> #include <QPushButton> #include <QPainter> #include <QPainterPath> #include <QInputDialog> #include <QMessageBox> #include <QSvgRenderer> #include "qt_utils.h" #include "pad_settings_dialog.h" #include "pad_led_settings_dialog.h" #include "pad_motion_settings_dialog.h" #include "ui_pad_settings_dialog.h" #include "tooltips.h" #include "gui_settings.h" #include "Emu/System.h" #include "Emu/system_utils.hpp" #include "Emu/Io/Null/NullPadHandler.h" #include "Utilities/File.h" #include "Input/pad_thread.h" #include "Input/product_info.h" #include "Input/keyboard_pad_handler.h" #include <thread> LOG_CHANNEL(cfg_log, "CFG"); inline std::string sstr(const QString& _in) { return _in.toStdString(); } constexpr auto qstr = QString::fromStdString; cfg_input_configurations g_cfg_input_configs; inline bool CreateConfigFile(const QString& dir, const QString& name) { if (!QDir().mkpath(dir)) { cfg_log.fatal("Failed to create dir %s", dir); return false; } const QString filename = dir + name + ".yml"; QFile new_file(filename); if (!new_file.open(QIODevice::WriteOnly)) { cfg_log.fatal("Failed to create file %s", filename); return false; } new_file.close(); return true; } void pad_settings_dialog::pad_button::insert_key(const std::string& key, bool append_key) { std::vector<std::string> buttons; if (append_key) { buttons = cfg_pad::get_buttons(keys); } buttons.push_back(key); keys = cfg_pad::get_buttons(std::move(buttons)); text = QString::fromStdString(keys).replace(",", ", "); } pad_settings_dialog::pad_settings_dialog(std::shared_ptr<gui_settings> gui_settings, QWidget* parent, const GameInfo* game) : QDialog(parent) , ui(new Ui::pad_settings_dialog) , m_gui_settings(std::move(gui_settings)) { pad::set_enabled(false); ui->setupUi(this); if (game) { m_title_id = game->serial; setWindowTitle(tr("Gamepad Settings: [%0] %1").arg(qstr(game->serial)).arg(qstr(game->name).simplified())); } else { setWindowTitle(tr("Gamepad Settings")); } // Load input configs g_cfg_input_configs.load(); if (m_title_id.empty()) { const QString input_config_dir = qstr(rpcs3::utils::get_input_config_dir(m_title_id)); QStringList config_files = gui::utils::get_dir_entries(QDir(input_config_dir), QStringList() << "*.yml"); QString active_config_file = qstr(g_cfg_input_configs.active_configs.get_value(g_cfg_input_configs.global_key)); if (!config_files.contains(active_config_file)) { const QString default_config_file = qstr(g_cfg_input_configs.default_config); if (!config_files.contains(default_config_file) && CreateConfigFile(input_config_dir, default_config_file)) { config_files.prepend(default_config_file); } active_config_file = default_config_file; } for (const QString& profile : config_files) { ui->chooseConfig->addItem(profile); } ui->chooseConfig->setCurrentText(active_config_file); } else { ui->chooseConfig->addItem(qstr(m_title_id)); ui->gb_config_files->setEnabled(false); } // Create tab widget for 7 players for (int i = 1; i < 8; i++) { const QString tab_title = tr("Player %0").arg(i); if (i == 1) { ui->tabWidget->setTabText(0, tab_title); } else { ui->tabWidget->addTab(new QWidget, tab_title); } } // On tab change: move the layout to the new tab and refresh connect(ui->tabWidget, &QTabWidget::currentChanged, this, &pad_settings_dialog::OnTabChanged); // Combobox: Input type connect(ui->chooseHandler, &QComboBox::currentTextChanged, this, &pad_settings_dialog::ChangeHandler); // Combobox: Devices connect(ui->chooseDevice, QOverload<int>::of(&QComboBox::currentIndexChanged), this, &pad_settings_dialog::ChangeDevice); // Combobox: Configs connect(ui->chooseConfig, &QComboBox::currentTextChanged, this, &pad_settings_dialog::ChangeConfig); // Pushbutton: Add config file connect(ui->b_addConfig, &QAbstractButton::clicked, this, &pad_settings_dialog::AddConfigFile); ui->buttonBox->button(QDialogButtonBox::Reset)->setText(tr("Filter Noise")); connect(ui->buttonBox, &QDialogButtonBox::clicked, this, [this](QAbstractButton* button) { if (button == ui->buttonBox->button(QDialogButtonBox::Save)) { SaveExit(); } else if (button == ui->buttonBox->button(QDialogButtonBox::Cancel)) { CancelExit(); } else if (button == ui->buttonBox->button(QDialogButtonBox::Reset)) { OnPadButtonClicked(button_ids::id_blacklist); } else if (button == ui->buttonBox->button(QDialogButtonBox::RestoreDefaults)) { OnPadButtonClicked(button_ids::id_reset_parameters); } }); // Refresh Button connect(ui->b_refresh, &QPushButton::clicked, this, &pad_settings_dialog::RefreshHandlers); ui->chooseClass->addItem(tr("Standard (Pad)"), u32{CELL_PAD_PCLASS_TYPE_STANDARD}); ui->chooseClass->addItem(tr("Guitar"), u32{CELL_PAD_PCLASS_TYPE_GUITAR}); ui->chooseClass->addItem(tr("Drum"), u32{CELL_PAD_PCLASS_TYPE_DRUM}); ui->chooseClass->addItem(tr("DJ"), u32{CELL_PAD_PCLASS_TYPE_DJ}); ui->chooseClass->addItem(tr("Dance Mat"), u32{CELL_PAD_PCLASS_TYPE_DANCEMAT}); ui->chooseClass->addItem(tr("PS Move Navigation"), u32{CELL_PAD_PCLASS_TYPE_NAVIGATION}); ui->chooseClass->addItem(tr("Skateboard"), u32{CELL_PAD_PCLASS_TYPE_SKATEBOARD}); ui->chooseClass->addItem(tr("GunCon 3"), u32{CELL_PAD_FAKE_TYPE_GUNCON3}); ui->chooseClass->addItem(tr("Top Shot Elite"), u32{CELL_PAD_FAKE_TYPE_TOP_SHOT_ELITE}); ui->chooseClass->addItem(tr("Top Shot Fearmaster"),u32{CELL_PAD_FAKE_TYPE_TOP_SHOT_FEARMASTER}); ui->chooseClass->addItem(tr("uDraw GameTablet"), u32{CELL_PAD_FAKE_TYPE_GAMETABLET}); connect(ui->chooseClass, QOverload<int>::of(&QComboBox::currentIndexChanged), this, [this](int index) { if (index < 0) return; HandleDeviceClassChange(ui->chooseClass->currentData().toUInt()); }); ui->chb_show_emulated_values->setChecked(m_gui_settings->GetValue(gui::pads_show_emulated).toBool()); connect(ui->chb_show_emulated_values, &QCheckBox::clicked, [this](bool checked) { m_gui_settings->SetValue(gui::pads_show_emulated, checked); const cfg_pad& cfg = GetPlayerConfig(); RepaintPreviewLabel(ui->preview_stick_left, ui->slider_stick_left->value(), ui->anti_deadzone_slider_stick_left->value(), ui->slider_stick_left->size().width(), m_lx, m_ly, cfg.lpadsquircling, cfg.lstickmultiplier / 100.0); RepaintPreviewLabel(ui->preview_stick_right, ui->slider_stick_right->value(), ui->anti_deadzone_slider_stick_right->value(), ui->slider_stick_right->size().width(), m_rx, m_ry, cfg.rpadsquircling, cfg.rstickmultiplier / 100.0); }); ui->mouse_movement->addItem(tr("Relative"), static_cast<int>(mouse_movement_mode::relative)); ui->mouse_movement->addItem(tr("Absolute"), static_cast<int>(mouse_movement_mode::absolute)); // Initialize configurable buttons InitButtons(); // Initialize tooltips SubscribeTooltips(); // Repaint controller image QSvgRenderer renderer(QStringLiteral(":/Icons/DualShock_3.svg")); QPixmap controller_pixmap(renderer.defaultSize() * 10); controller_pixmap.fill(Qt::transparent); QPainter painter(&controller_pixmap); painter.setRenderHints(QPainter::TextAntialiasing | QPainter::Antialiasing | QPainter::SmoothPixmapTransform); renderer.render(&painter, controller_pixmap.rect()); const QColor color = gui::utils::get_foreground_color(); ui->l_controller->setPixmap(gui::utils::get_colorized_pixmap(controller_pixmap, QColor(), gui::utils::get_label_color("l_controller", color, color), false, true)); // Show default widgets first in order to calculate the required size for the scroll area (see pad_settings_dialog::ResizeDialog) ui->left_stack->setCurrentIndex(0); ui->right_stack->setCurrentIndex(0); // Set up first tab OnTabChanged(0); ChangeConfig(ui->chooseConfig->currentText()); } void pad_settings_dialog::closeEvent(QCloseEvent* event) { m_gui_settings->SetValue(gui::pads_geometry, saveGeometry()); QDialog::closeEvent(event); } pad_settings_dialog::~pad_settings_dialog() { if (m_input_thread) { m_input_thread_state = input_thread_state::pausing; *m_input_thread = thread_state::finished; } if (!Emu.IsStopped()) { pad::reset(Emu.GetTitleID()); } pad::set_enabled(true); } void pad_settings_dialog::showEvent(QShowEvent* event) { RepaintPreviewLabel(ui->preview_stick_left, ui->slider_stick_left->value(), ui->anti_deadzone_slider_stick_left->value(), ui->slider_stick_left->size().width(), 0, 0, 0, 0); RepaintPreviewLabel(ui->preview_stick_right, ui->slider_stick_right->value(), ui->anti_deadzone_slider_stick_right->value(), ui->slider_stick_right->size().width(), 0, 0, 0, 0); // Resize in order to fit into our scroll area if (!restoreGeometry(m_gui_settings->GetValue(gui::pads_geometry).toByteArray())) { ResizeDialog(); } QDialog::showEvent(event); } void pad_settings_dialog::InitButtons() { m_pad_buttons = new QButtonGroup(this); m_palette = ui->b_left->palette(); // save normal palette const auto insert_button = [this](int id, QPushButton* button) { m_pad_buttons->addButton(button, id); button->installEventFilter(this); }; insert_button(button_ids::id_pad_lstick_left, ui->b_lstick_left); insert_button(button_ids::id_pad_lstick_down, ui->b_lstick_down); insert_button(button_ids::id_pad_lstick_right, ui->b_lstick_right); insert_button(button_ids::id_pad_lstick_up, ui->b_lstick_up); insert_button(button_ids::id_pad_left, ui->b_left); insert_button(button_ids::id_pad_down, ui->b_down); insert_button(button_ids::id_pad_right, ui->b_right); insert_button(button_ids::id_pad_up, ui->b_up); insert_button(button_ids::id_pad_l1, ui->b_shift_l1); insert_button(button_ids::id_pad_l2, ui->b_shift_l2); insert_button(button_ids::id_pad_l3, ui->b_shift_l3); insert_button(button_ids::id_pad_start, ui->b_start); insert_button(button_ids::id_pad_select, ui->b_select); insert_button(button_ids::id_pad_ps, ui->b_ps); insert_button(button_ids::id_pad_r1, ui->b_shift_r1); insert_button(button_ids::id_pad_r2, ui->b_shift_r2); insert_button(button_ids::id_pad_r3, ui->b_shift_r3); insert_button(button_ids::id_pad_square, ui->b_square); insert_button(button_ids::id_pad_cross, ui->b_cross); insert_button(button_ids::id_pad_circle, ui->b_circle); insert_button(button_ids::id_pad_triangle, ui->b_triangle); insert_button(button_ids::id_pad_rstick_left, ui->b_rstick_left); insert_button(button_ids::id_pad_rstick_down, ui->b_rstick_down); insert_button(button_ids::id_pad_rstick_right, ui->b_rstick_right); insert_button(button_ids::id_pad_rstick_up, ui->b_rstick_up); insert_button(button_ids::id_pressure_intensity, ui->b_pressure_intensity); insert_button(button_ids::id_analog_limiter, ui->b_analog_limiter); m_pad_buttons->addButton(ui->b_refresh, button_ids::id_refresh); m_pad_buttons->addButton(ui->b_addConfig, button_ids::id_add_config_file); connect(m_pad_buttons, &QButtonGroup::idClicked, this, &pad_settings_dialog::OnPadButtonClicked); connect(&m_remap_timer, &QTimer::timeout, this, [this]() { if (--m_seconds <= 0) { ReactivateButtons(); return; } if (auto button = m_pad_buttons->button(m_button_id)) { button->setText(tr("[ Waiting %1 ]").arg(m_seconds)); } }); connect(ui->chb_vibration_large, &QCheckBox::clicked, this, [this](bool checked) { if (!checked) { return; } ui->chb_vibration_switch->isChecked() ? SetPadData(m_min_force, m_max_force) : SetPadData(m_max_force, m_min_force); QTimer::singleShot(300, [this]() { SetPadData(m_min_force, m_min_force); }); }); connect(ui->chb_vibration_small, &QCheckBox::clicked, this, [this](bool checked) { if (!checked) { return; } ui->chb_vibration_switch->isChecked() ? SetPadData(m_max_force, m_min_force) : SetPadData(m_min_force, m_max_force); QTimer::singleShot(300, [this]() { SetPadData(m_min_force, m_min_force); }); }); connect(ui->chb_vibration_switch, &QCheckBox::clicked, this, [this](bool checked) { checked ? SetPadData(m_min_force, m_max_force) : SetPadData(m_max_force, m_min_force); QTimer::singleShot(200, [this, checked]() { checked ? SetPadData(m_max_force, m_min_force) : SetPadData(m_min_force, m_max_force); QTimer::singleShot(200, [this]() { SetPadData(m_min_force, m_min_force); }); }); }); connect(ui->slider_stick_left, &QSlider::valueChanged, this, [&](int value) { RepaintPreviewLabel(ui->preview_stick_left, value, ui->anti_deadzone_slider_stick_left->value(), ui->slider_stick_left->size().width(), m_lx, m_ly, ui->squircle_left->value(), ui->stick_multi_left->value()); }); connect(ui->slider_stick_right, &QSlider::valueChanged, this, [&](int value) { RepaintPreviewLabel(ui->preview_stick_right, value, ui->anti_deadzone_slider_stick_right->value(), ui->slider_stick_right->size().width(), m_rx, m_ry, ui->squircle_right->value(), ui->stick_multi_right->value()); }); connect(ui->anti_deadzone_slider_stick_left, &QSlider::valueChanged, this, [&](int value) { RepaintPreviewLabel(ui->preview_stick_left, ui->slider_stick_left->value(), value, ui->slider_stick_left->size().width(), m_lx, m_ly, ui->squircle_left->value(), ui->stick_multi_left->value()); }); connect(ui->anti_deadzone_slider_stick_right, &QSlider::valueChanged, this, [&](int value) { RepaintPreviewLabel(ui->preview_stick_right, ui->slider_stick_right->value(), value, ui->slider_stick_right->size().width(), m_rx, m_ry, ui->squircle_right->value(), ui->stick_multi_right->value()); }); // Open LED settings connect(ui->b_led_settings, &QPushButton::clicked, this, [this]() { // Allow LED battery indication while the dialog is open ensure(m_handler); const cfg_pad& cfg = GetPlayerConfig(); SetPadData(0, 0, cfg.led_battery_indicator.get()); pad_led_settings_dialog dialog(this, cfg.colorR, cfg.colorG, cfg.colorB, m_handler->has_rgb(), m_handler->has_player_led(), cfg.player_led_enabled.get(), m_handler->has_battery(), m_handler->has_battery_led(), cfg.led_low_battery_blink.get(), cfg.led_battery_indicator.get(), cfg.led_battery_indicator_brightness); connect(&dialog, &pad_led_settings_dialog::pass_led_settings, this, [this](const pad_led_settings_dialog::led_settings& settings) { ensure(m_handler); cfg_pad& cfg = GetPlayerConfig(); cfg.colorR.set(settings.color_r); cfg.colorG.set(settings.color_g); cfg.colorB.set(settings.color_b); cfg.led_battery_indicator.set(settings.battery_indicator); cfg.led_battery_indicator_brightness.set(settings.battery_indicator_brightness); cfg.led_low_battery_blink.set(settings.low_battery_blink); cfg.player_led_enabled.set(settings.player_led_enabled); SetPadData(0, 0, settings.battery_indicator); }); dialog.exec(); SetPadData(0, 0); }); // Open Motion settings connect(ui->b_motion_controls, &QPushButton::clicked, this, [this]() { if (m_timer_input.isActive()) { m_timer_input.stop(); } if (m_timer_pad_refresh.isActive()) { m_timer_pad_refresh.stop(); } pause_input_thread(); pad_motion_settings_dialog dialog(this, m_handler, g_cfg_input.player[GetPlayerIndex()]); dialog.exec(); if (ui->chooseDevice->isEnabled() && ui->chooseDevice->currentIndex() >= 0) { start_input_thread(); m_timer_input.start(10); m_timer_pad_refresh.start(1000); } }); // Use timer to display button input connect(&m_timer_input, &QTimer::timeout, this, [this]() { input_callback_data data; { std::lock_guard lock(m_input_mutex); data = m_input_callback_data; m_input_callback_data.has_new_data = false; } if (!data.has_new_data) { return; } const auto update_preview = [this](const std::string& pad_name, bool is_connected, int battery_level, int trigger_left, int trigger_right, int lx, int ly, int rx, int ry) { SwitchPadInfo(pad_name, is_connected); if (is_connected != m_enable_buttons && (!is_connected || !m_remap_timer.isActive())) { SwitchButtons(is_connected); } ui->pb_battery->setValue(m_enable_battery ? battery_level : 0); if (m_handler->has_deadzones()) { ui->preview_trigger_left->setValue(trigger_left); ui->preview_trigger_right->setValue(trigger_right); if (m_lx != lx || m_ly != ly) { m_lx = lx; m_ly = ly; RepaintPreviewLabel(ui->preview_stick_left, ui->slider_stick_left->value(), ui->anti_deadzone_slider_stick_left->value(), ui->slider_stick_left->size().width(), m_lx, m_ly, ui->squircle_left->value(), ui->stick_multi_left->value()); } if (m_rx != rx || m_ry != ry) { m_rx = rx; m_ry = ry; RepaintPreviewLabel(ui->preview_stick_right, ui->slider_stick_right->value(), ui->anti_deadzone_slider_stick_right->value(), ui->slider_stick_right->size().width(), m_rx, m_ry, ui->squircle_right->value(), ui->stick_multi_right->value()); } } }; if (data.status == PadHandlerBase::connection::disconnected) { // Disable Button Remapping update_preview(data.pad_name, false, 0, 0, 0, 0, 0, 0, 0); return; } // Enable Button Remapping update_preview(data.pad_name, true, data.battery_level, data.preview_values[0], data.preview_values[1], data.preview_values[2], data.preview_values[3], data.preview_values[4], data.preview_values[5]); if (data.val <= 0 || data.status == PadHandlerBase::connection::no_data) { return; } cfg_log.notice("get_next_button_press: %s device %s button %s pressed with value %d", m_handler->m_type, data.pad_name, data.name, data.val); if (m_button_id > button_ids::id_pad_begin && m_button_id < button_ids::id_pad_end && m_button_id == data.button_id) { m_cfg_entries[m_button_id].insert_key(data.name, m_enable_multi_binding); ReactivateButtons(); } }); // Use timer to refresh pad connection status connect(&m_timer_pad_refresh, &QTimer::timeout, this, &pad_settings_dialog::RefreshPads); // Use thread to get button input m_input_thread = std::make_unique<named_thread<std::function<void()>>>("Pad Settings Thread", [this]() { u32 button_id = button_ids::id_pad_begin; // Used to check if this is the first call during a remap while (thread_ctrl::state() != thread_state::aborting) { thread_ctrl::wait_for(1000); if (m_input_thread_state != input_thread_state::active) { if (m_input_thread_state == input_thread_state::pausing) { std::lock_guard lock(m_input_mutex); m_input_callback_data = {}; m_input_thread_state = input_thread_state::paused; } continue; } std::lock_guard lock(m_handler_mutex); const std::vector<std::string> buttons = { m_cfg_entries[button_ids::id_pad_l2].keys, m_cfg_entries[button_ids::id_pad_r2].keys, m_cfg_entries[button_ids::id_pad_lstick_left].keys, m_cfg_entries[button_ids::id_pad_lstick_right].keys, m_cfg_entries[button_ids::id_pad_lstick_down].keys, m_cfg_entries[button_ids::id_pad_lstick_up].keys, m_cfg_entries[button_ids::id_pad_rstick_left].keys, m_cfg_entries[button_ids::id_pad_rstick_right].keys, m_cfg_entries[button_ids::id_pad_rstick_down].keys, m_cfg_entries[button_ids::id_pad_rstick_up].keys }; // Check if this is the first call during a remap const u32 new_button_id = m_button_id; const bool is_mapping = new_button_id > button_ids::id_pad_begin && new_button_id < button_ids::id_pad_end; const bool first_call = std::exchange(button_id, new_button_id) != button_id && is_mapping; const PadHandlerBase::gui_call_type call_type = first_call ? PadHandlerBase::gui_call_type::reset_input : PadHandlerBase::gui_call_type::normal; const PadHandlerBase::connection status = m_handler->get_next_button_press(m_device_name, [this, button_id](u16 val, std::string name, std::string pad_name, u32 battery_level, pad_preview_values preview_values) { std::lock_guard lock(m_input_mutex); m_input_callback_data.val = val; m_input_callback_data.name = std::move(name); m_input_callback_data.pad_name = std::move(pad_name); m_input_callback_data.battery_level = battery_level; m_input_callback_data.preview_values = std::move(preview_values); m_input_callback_data.has_new_data = true; m_input_callback_data.status = PadHandlerBase::connection::connected; m_input_callback_data.button_id = button_id; }, [this, button_id](std::string pad_name) { std::lock_guard lock(m_input_mutex); m_input_callback_data.pad_name = std::move(pad_name); m_input_callback_data.has_new_data = true; m_input_callback_data.status = PadHandlerBase::connection::disconnected; m_input_callback_data.button_id = button_id; }, call_type, buttons); if (status == PadHandlerBase::connection::no_data) { std::lock_guard lock(m_input_mutex); m_input_callback_data.pad_name = m_device_name; m_input_callback_data.has_new_data = true; m_input_callback_data.status = status; m_input_callback_data.button_id = button_id; } } }); } void pad_settings_dialog::RefreshPads() { for (int i = 0; i < ui->chooseDevice->count(); i++) { pad_device_info info = get_pad_info(ui->chooseDevice, i); if (info.name.empty()) { continue; } std::lock_guard lock(m_handler_mutex); const PadHandlerBase::connection status = m_handler->get_next_button_press(info.name, nullptr, nullptr, PadHandlerBase::gui_call_type::get_connection, {}); switch_pad_info(i, info, status != PadHandlerBase::connection::disconnected); } } void pad_settings_dialog::SetPadData(u32 large_motor, u32 small_motor, bool led_battery_indicator) { ensure(m_handler); const cfg_pad& cfg = GetPlayerConfig(); std::lock_guard lock(m_handler_mutex); m_handler->SetPadData(m_device_name, GetPlayerIndex(), large_motor, small_motor, cfg.colorR, cfg.colorG, cfg.colorB, cfg.player_led_enabled.get(), led_battery_indicator, cfg.led_battery_indicator_brightness); } pad_device_info pad_settings_dialog::get_pad_info(QComboBox* combo, int index) { if (!combo || index < 0) { cfg_log.fatal("get_pad_info: Invalid combo box or index (combo=%d, index=%d)", !!combo, index); return {}; } const QVariant user_data = combo->itemData(index); if (!user_data.canConvert<pad_device_info>()) { cfg_log.fatal("get_pad_info: Cannot convert itemData for index %d and itemText %s", index, combo->itemText(index)); return {}; } return user_data.value<pad_device_info>(); } void pad_settings_dialog::switch_pad_info(int index, pad_device_info info, bool is_connected) { if (index >= 0 && info.is_connected != is_connected) { info.is_connected = is_connected; ui->chooseDevice->setItemData(index, QVariant::fromValue(info)); ui->chooseDevice->setItemText(index, is_connected ? info.localized_name : (info.localized_name + Disconnected_suffix)); } if (!is_connected && m_remap_timer.isActive() && ui->chooseDevice->currentIndex() == index) { ReactivateButtons(); } } void pad_settings_dialog::SwitchPadInfo(const std::string& pad_name, bool is_connected) { for (int i = 0; i < ui->chooseDevice->count(); i++) { if (pad_device_info info = get_pad_info(ui->chooseDevice, i); info.name == pad_name) { switch_pad_info(i, info, is_connected); break; } } } void pad_settings_dialog::ReloadButtons() { m_cfg_entries.clear(); auto updateButton = [this](int id, QPushButton* button, cfg::string* cfg_text) { const QString text = qstr(*cfg_text); m_cfg_entries.insert(std::make_pair(id, pad_button{cfg_text, *cfg_text, text})); button->setText(text); }; cfg_pad& cfg = GetPlayerConfig(); updateButton(button_ids::id_pad_lstick_left, ui->b_lstick_left, &cfg.ls_left); updateButton(button_ids::id_pad_lstick_down, ui->b_lstick_down, &cfg.ls_down); updateButton(button_ids::id_pad_lstick_right, ui->b_lstick_right, &cfg.ls_right); updateButton(button_ids::id_pad_lstick_up, ui->b_lstick_up, &cfg.ls_up); updateButton(button_ids::id_pad_left, ui->b_left, &cfg.left); updateButton(button_ids::id_pad_down, ui->b_down, &cfg.down); updateButton(button_ids::id_pad_right, ui->b_right, &cfg.right); updateButton(button_ids::id_pad_up, ui->b_up, &cfg.up); updateButton(button_ids::id_pad_l1, ui->b_shift_l1, &cfg.l1); updateButton(button_ids::id_pad_l2, ui->b_shift_l2, &cfg.l2); updateButton(button_ids::id_pad_l3, ui->b_shift_l3, &cfg.l3); updateButton(button_ids::id_pad_start, ui->b_start, &cfg.start); updateButton(button_ids::id_pad_select, ui->b_select, &cfg.select); updateButton(button_ids::id_pad_ps, ui->b_ps, &cfg.ps); updateButton(button_ids::id_pad_r1, ui->b_shift_r1, &cfg.r1); updateButton(button_ids::id_pad_r2, ui->b_shift_r2, &cfg.r2); updateButton(button_ids::id_pad_r3, ui->b_shift_r3, &cfg.r3); updateButton(button_ids::id_pad_square, ui->b_square, &cfg.square); updateButton(button_ids::id_pad_cross, ui->b_cross, &cfg.cross); updateButton(button_ids::id_pad_circle, ui->b_circle, &cfg.circle); updateButton(button_ids::id_pad_triangle, ui->b_triangle, &cfg.triangle); updateButton(button_ids::id_pad_rstick_left, ui->b_rstick_left, &cfg.rs_left); updateButton(button_ids::id_pad_rstick_down, ui->b_rstick_down, &cfg.rs_down); updateButton(button_ids::id_pad_rstick_right, ui->b_rstick_right, &cfg.rs_right); updateButton(button_ids::id_pad_rstick_up, ui->b_rstick_up, &cfg.rs_up); updateButton(button_ids::id_pressure_intensity, ui->b_pressure_intensity, &cfg.pressure_intensity_button); updateButton(button_ids::id_analog_limiter, ui->b_analog_limiter, &cfg.analog_limiter_button); UpdateLabels(true); } void pad_settings_dialog::ReactivateButtons() { m_remap_timer.stop(); m_seconds = MAX_SECONDS; m_enable_multi_binding = false; if (m_button_id == button_ids::id_pad_begin) { return; } if (auto button = m_pad_buttons->button(m_button_id)) { button->setPalette(m_palette); button->releaseMouse(); } m_button_id = button_ids::id_pad_begin; UpdateLabels(); SwitchButtons(true); for (auto but : m_pad_buttons->buttons()) { but->setFocusPolicy(Qt::StrongFocus); } for (auto but : ui->buttonBox->buttons()) { but->setFocusPolicy(Qt::StrongFocus); } ui->tabWidget->setFocusPolicy(Qt::TabFocus); ui->scrollArea->setFocusPolicy(Qt::StrongFocus); ui->chooseConfig->setFocusPolicy(Qt::WheelFocus); ui->chooseHandler->setFocusPolicy(Qt::WheelFocus); ui->chooseDevice->setFocusPolicy(Qt::WheelFocus); ui->chooseClass->setFocusPolicy(Qt::WheelFocus); ui->chooseProduct->setFocusPolicy(Qt::WheelFocus); } void pad_settings_dialog::RepaintPreviewLabel(QLabel* label, int deadzone, int anti_deadzone, int desired_width, int x, int y, int squircle, double multiplier) const { desired_width = 100; // Let's keep a fixed size for these labels for now const qreal deadzone_max = m_handler ? m_handler->thumb_max : 255; // 255 used as fallback. The deadzone circle shall be small. constexpr qreal relative_size = 0.9; const qreal device_pixel_ratio = devicePixelRatioF(); const qreal scaled_width = desired_width * device_pixel_ratio; const qreal origin = desired_width / 2.0; const qreal outer_circle_diameter = relative_size * desired_width; const qreal deadzone_circle_diameter = outer_circle_diameter * deadzone / deadzone_max; const qreal anti_deadzone_circle_diameter = outer_circle_diameter * anti_deadzone / deadzone_max; const qreal outer_circle_radius = outer_circle_diameter / 2.0; const qreal deadzone_circle_radius = deadzone_circle_diameter / 2.0; const qreal anti_deadzone_circle_radius = anti_deadzone_circle_diameter / 2.0; const qreal stick_x = std::clamp(outer_circle_radius * x * multiplier / deadzone_max, -outer_circle_radius, outer_circle_radius); const qreal stick_y = std::clamp(outer_circle_radius * -y * multiplier / deadzone_max, -outer_circle_radius, outer_circle_radius); const bool show_emulated_values = ui->chb_show_emulated_values->isChecked(); qreal ingame_x = 0.0; qreal ingame_y = 0.0; // Set up the canvas for our work of art QPixmap pixmap(scaled_width, scaled_width); pixmap.setDevicePixelRatio(device_pixel_ratio); pixmap.fill(Qt::transparent); // Configure the painter and set its origin QPainter painter(&pixmap); painter.setRenderHint(QPainter::Antialiasing, true); painter.setRenderHint(QPainter::TextAntialiasing, true); painter.translate(origin, origin); painter.setBrush(QBrush(Qt::white)); if (show_emulated_values) { u16 real_x = 0; u16 real_y = 0; if (m_handler) { const int m_in = multiplier * 100.0; const u16 normal_x = m_handler->NormalizeStickInput(static_cast<u16>(std::abs(x)), deadzone, m_in, true); const u16 normal_y = m_handler->NormalizeStickInput(static_cast<u16>(std::abs(y)), deadzone, m_in, true); const s32 x_in = x >= 0 ? normal_x : 0 - normal_x; const s32 y_in = y >= 0 ? normal_y : 0 - normal_y; m_handler->convert_stick_values(real_x, real_y, x_in, y_in, deadzone, anti_deadzone, squircle); } constexpr qreal real_max = 126; ingame_x = std::clamp(outer_circle_radius * (static_cast<qreal>(real_x) - real_max) / real_max, -outer_circle_radius, outer_circle_radius); ingame_y = std::clamp(outer_circle_radius * -(static_cast<qreal>(real_y) - real_max) / real_max, -outer_circle_radius, outer_circle_radius); // Draw a black outer squircle that roughly represents the DS3's max values QPainterPath path; path.addRoundedRect(QRectF(-outer_circle_radius, -outer_circle_radius, outer_circle_diameter, outer_circle_diameter), 5, 5, Qt::SizeMode::RelativeSize); painter.setPen(QPen(Qt::black, 1.0)); painter.drawPath(path); } // Draw a black outer circle that represents the maximum for the deadzone painter.setPen(QPen(Qt::black, 1.0)); painter.drawEllipse(QRectF(-outer_circle_radius, -outer_circle_radius, outer_circle_diameter, outer_circle_diameter)); painter.setBrush(QBrush(Qt::transparent)); // Draw a red inner circle that represents the current deadzone painter.setPen(QPen(Qt::red, 1.0)); painter.drawEllipse(QRectF(-deadzone_circle_radius, -deadzone_circle_radius, deadzone_circle_diameter, deadzone_circle_diameter)); // Draw a green inner circle that represents the current anti-deadzone painter.setPen(QPen(Qt::green, 1.0)); painter.drawEllipse(QRectF(-anti_deadzone_circle_radius, -anti_deadzone_circle_radius, anti_deadzone_circle_diameter, anti_deadzone_circle_diameter)); // Draw a blue dot that represents the current stick orientation painter.setPen(QPen(Qt::blue, 2.0)); painter.drawEllipse(QRectF(stick_x - 0.5, stick_y - 0.5, 1.0, 1.0)); if (show_emulated_values) { // Draw a red dot that represents the current ingame stick orientation painter.setPen(QPen(Qt::red, 2.0)); painter.drawEllipse(QRectF(ingame_x - 0.5, ingame_y - 0.5, 1.0, 1.0)); } painter.end(); label->setPixmap(pixmap); } void pad_settings_dialog::keyPressEvent(QKeyEvent *keyEvent) { if (m_button_id == button_ids::id_pad_begin) { // We are not remapping a button, so pass the event to the base class. QDialog::keyPressEvent(keyEvent); return; } if (m_handler->m_type != pad_handler::keyboard) { // Do nothing, we don't want to interfere with the ongoing remapping. return; } if (keyEvent->isAutoRepeat()) { return; } if (m_button_id <= button_ids::id_pad_begin || m_button_id >= button_ids::id_pad_end) { cfg_log.error("Pad Settings: Handler Type: %d, Unknown button ID: %d", static_cast<int>(m_handler->m_type), m_button_id.load()); } else { m_cfg_entries[m_button_id].insert_key(keyboard_pad_handler::GetKeyName(keyEvent, false), m_enable_multi_binding); } ReactivateButtons(); } void pad_settings_dialog::mouseReleaseEvent(QMouseEvent* event) { if (m_button_id == button_ids::id_pad_begin) { // We are not remapping a button, so pass the event to the base class. QDialog::mouseReleaseEvent(event); return; } if (m_handler->m_type != pad_handler::keyboard) { // Do nothing, we don't want to interfere with the ongoing remapping. return; } if (m_button_id <= button_ids::id_pad_begin || m_button_id >= button_ids::id_pad_end) { cfg_log.error("Pad Settings: Handler Type: %d, Unknown button ID: %d", static_cast<int>(m_handler->m_type), m_button_id.load()); } else { m_cfg_entries[m_button_id].insert_key((static_cast<keyboard_pad_handler*>(m_handler.get()))->GetMouseName(event), m_enable_multi_binding); } ReactivateButtons(); } void pad_settings_dialog::wheelEvent(QWheelEvent *event) { if (m_button_id == button_ids::id_pad_begin) { // We are not remapping a button, so pass the event to the base class. QDialog::wheelEvent(event); return; } if (m_handler->m_type != pad_handler::keyboard) { // Do nothing, we don't want to interfere with the ongoing remapping. return; } if (m_button_id <= button_ids::id_pad_begin || m_button_id >= button_ids::id_pad_end) { cfg_log.error("Pad Settings: Handler Type: %d, Unknown button ID: %d", static_cast<int>(m_handler->m_type), m_button_id.load()); return; } const QPoint direction = event->angleDelta(); if (direction.isNull()) { // Scrolling started/ended event, no direction given return; } u32 key; if (const int x = direction.x()) { if (event->inverted() ? x < 0 : x > 0) { key = mouse::wheel_left; } else { key = mouse::wheel_right; } } else { const int y = direction.y(); if (event->inverted() ? y < 0 : y > 0) { key = mouse::wheel_up; } else { key = mouse::wheel_down; } } m_cfg_entries[m_button_id].insert_key((static_cast<keyboard_pad_handler*>(m_handler.get()))->GetMouseName(key), m_enable_multi_binding); ReactivateButtons(); } void pad_settings_dialog::mouseMoveEvent(QMouseEvent* event) { if (m_button_id == button_ids::id_pad_begin) { // We are not remapping a button, so pass the event to the base class. QDialog::mouseMoveEvent(event); return; } if (m_handler->m_type != pad_handler::keyboard) { // Do nothing, we don't want to interfere with the ongoing remapping. return; } if (m_button_id <= button_ids::id_pad_begin || m_button_id >= button_ids::id_pad_end) { cfg_log.error("Pad Settings: Handler Type: %d, Unknown button ID: %d", static_cast<int>(m_handler->m_type), m_button_id.load()); } else { const QPoint mouse_pos = QCursor::pos(); const int delta_x = mouse_pos.x() - m_last_pos.x(); const int delta_y = mouse_pos.y() - m_last_pos.y(); u32 key = 0; if (delta_x > 100) { key = mouse::move_right; } else if (delta_x < -100) { key = mouse::move_left; } else if (delta_y > 100) { key = mouse::move_down; } else if (delta_y < -100) { key = mouse::move_up; } if (key != 0) { m_cfg_entries[m_button_id].insert_key((static_cast<keyboard_pad_handler*>(m_handler.get()))->GetMouseName(key), m_enable_multi_binding); ReactivateButtons(); } } } bool pad_settings_dialog::eventFilter(QObject* object, QEvent* event) { switch (event->type()) { case QEvent::MouseButtonRelease: { // On right click clear binding if we are not remapping pad button if (m_button_id == button_ids::id_pad_begin) { QMouseEvent* mouse_event = static_cast<QMouseEvent*>(event); if (const auto button = qobject_cast<QPushButton*>(object); button && button->isEnabled() && mouse_event->button() == Qt::RightButton) { if (const int button_id = m_pad_buttons->id(button); m_cfg_entries.contains(button_id)) { pad_button& button = m_cfg_entries[button_id]; button.keys.clear(); button.text.clear(); UpdateLabels(); return true; } } } // Disabled buttons should not absorb mouseclicks event->ignore(); break; } case QEvent::MouseMove: { mouseMoveEvent(static_cast<QMouseEvent*>(event)); break; } case QEvent::Enter: { if (ui->l_description && m_descriptions.contains(object)) { // Check for visibility when entering a widget (needed in case of overlapping widgets in a QStackedWidget for example) if (const auto widget = qobject_cast<QWidget*>(object); widget && widget->isVisible()) { ui->l_description->setText(m_descriptions[object]); } } break; } case QEvent::Leave: { if (ui->l_description && m_descriptions.contains(object)) { ui->l_description->setText(m_description); } break; } default: { break; } } return QDialog::eventFilter(object, event); } void pad_settings_dialog::UpdateLabels(bool is_reset) { if (is_reset) { const cfg_pad& cfg = GetPlayerConfig(); // Update device class const int index = ui->chooseClass->findData(cfg.device_class_type.get()); ui->chooseClass->setCurrentIndex(index); // Trigger the change manually in case that the class dropdown didn't fire an event HandleDeviceClassChange(cfg.device_class_type); const auto products = input::get_products_by_class(cfg.device_class_type); for (usz i = 0; i < products.size(); i++) { if (products[i].vendor_id == cfg.vendor_id && products[i].product_id == cfg.product_id) { ui->chooseProduct->setCurrentIndex(static_cast<int>(i)); break; } } ui->chb_vibration_large->setChecked(cfg.enable_vibration_motor_large.get()); ui->chb_vibration_small->setChecked(cfg.enable_vibration_motor_small.get()); ui->chb_vibration_switch->setChecked(cfg.switch_vibration_motors.get()); // Update Trigger Thresholds ui->preview_trigger_left->setRange(0, m_handler->trigger_max); ui->slider_trigger_left->setRange(0, m_handler->trigger_max); ui->slider_trigger_left->setValue(cfg.ltriggerthreshold); ui->preview_trigger_right->setRange(0, m_handler->trigger_max); ui->slider_trigger_right->setRange(0, m_handler->trigger_max); ui->slider_trigger_right->setValue(cfg.rtriggerthreshold); // Update Stick Deadzones ui->slider_stick_left->setRange(0, m_handler->thumb_max); ui->slider_stick_left->setValue(cfg.lstickdeadzone); ui->slider_stick_right->setRange(0, m_handler->thumb_max); ui->slider_stick_right->setValue(cfg.rstickdeadzone); ui->anti_deadzone_slider_stick_left->setRange(0, m_handler->thumb_max); ui->anti_deadzone_slider_stick_left->setValue(cfg.lstick_anti_deadzone); ui->anti_deadzone_slider_stick_right->setRange(0, m_handler->thumb_max); ui->anti_deadzone_slider_stick_right->setValue(cfg.rstick_anti_deadzone); std::vector<std::string> range; // Update Mouse Movement Mode const int mouse_movement_index = ui->mouse_movement->findData(static_cast<int>(cfg.mouse_move_mode.get())); ensure(mouse_movement_index >= 0); ui->mouse_movement->setCurrentIndex(mouse_movement_index); // Update Mouse Deadzones range = cfg.mouse_deadzone_x.to_list(); ui->mouse_dz_x->setRange(std::stoi(range.front()), std::stoi(range.back())); ui->mouse_dz_x->setValue(cfg.mouse_deadzone_x); range = cfg.mouse_deadzone_y.to_list(); ui->mouse_dz_y->setRange(std::stoi(range.front()), std::stoi(range.back())); ui->mouse_dz_y->setValue(cfg.mouse_deadzone_y); // Update Mouse Acceleration range = cfg.mouse_acceleration_x.to_list(); ui->mouse_accel_x->setRange(std::stod(range.front()) / 100.0, std::stod(range.back()) / 100.0); ui->mouse_accel_x->setValue(cfg.mouse_acceleration_x / 100.0); range = cfg.mouse_acceleration_y.to_list(); ui->mouse_accel_y->setRange(std::stod(range.front()) / 100.0, std::stod(range.back()) / 100.0); ui->mouse_accel_y->setValue(cfg.mouse_acceleration_y / 100.0); // Update Stick Lerp Factors range = cfg.l_stick_lerp_factor.to_list(); ui->left_stick_lerp->setRange(std::stod(range.front()) / 100.0, std::stod(range.back()) / 100.0); ui->left_stick_lerp->setValue(cfg.l_stick_lerp_factor / 100.0); range = cfg.r_stick_lerp_factor.to_list(); ui->right_stick_lerp->setRange(std::stod(range.front()) / 100.0, std::stod(range.back()) / 100.0); ui->right_stick_lerp->setValue(cfg.r_stick_lerp_factor / 100.0); // Update Stick Multipliers range = cfg.lstickmultiplier.to_list(); ui->stick_multi_left->setRange(std::stod(range.front()) / 100.0, std::stod(range.back()) / 100.0); ui->stick_multi_left->setValue(cfg.lstickmultiplier / 100.0); range = cfg.rstickmultiplier.to_list(); ui->stick_multi_right->setRange(std::stod(range.front()) / 100.0, std::stod(range.back()) / 100.0); ui->stick_multi_right->setValue(cfg.rstickmultiplier / 100.0); // Update Squircle Factors range = cfg.lpadsquircling.to_list(); ui->squircle_left->setRange(std::stoi(range.front()), std::stoi(range.back())); ui->squircle_left->setValue(cfg.lpadsquircling); range = cfg.rpadsquircling.to_list(); ui->squircle_right->setRange(std::stoi(range.front()), std::stoi(range.back())); ui->squircle_right->setValue(cfg.rpadsquircling); RepaintPreviewLabel(ui->preview_stick_left, ui->slider_stick_left->value(), ui->anti_deadzone_slider_stick_left->value(), ui->slider_stick_left->size().width(), m_lx, m_ly, cfg.lpadsquircling, cfg.lstickmultiplier / 100.0); RepaintPreviewLabel(ui->preview_stick_right, ui->slider_stick_right->value(), ui->anti_deadzone_slider_stick_right->value(), ui->slider_stick_right->size().width(), m_rx, m_ry, cfg.rpadsquircling, cfg.rstickmultiplier / 100.0); // Update analog limiter toggle mode ui->cb_analog_limiter_toggle_mode->setChecked(cfg.analog_limiter_toggle_mode.get()); // Update pressure sensitivity factors range = cfg.pressure_intensity.to_list(); ui->sb_pressure_intensity->setRange(std::stoi(range.front()), std::stoi(range.back())); ui->sb_pressure_intensity->setValue(cfg.pressure_intensity); // Update pressure sensitivity toggle mode ui->cb_pressure_intensity_toggle_mode->setChecked(cfg.pressure_intensity_toggle_mode.get()); // Update pressure sensitivity deadzone range = cfg.pressure_intensity_deadzone.to_list(); ui->pressure_intensity_deadzone->setRange(std::stoi(range.front()), std::stoi(range.back())); ui->pressure_intensity_deadzone->setValue(cfg.pressure_intensity_deadzone.get()); // Apply stored/default LED settings to the device m_enable_led = m_handler->has_led(); m_enable_battery_led = m_handler->has_battery_led(); SetPadData(0, 0); // Enable battery and LED group box m_enable_battery = m_handler->has_battery(); ui->gb_battery->setVisible(m_enable_battery || m_enable_led); } for (auto& [id, button] : m_cfg_entries) { if (is_reset) { button.keys = *button.cfg_text; button.text = qstr(button.keys); } // The button has to contain at least one character, because it would be square'ish otherwise if (auto btn = m_pad_buttons->button(id)) { btn->setText(button.text.isEmpty() ? QStringLiteral("-") : button.text); } } } void pad_settings_dialog::SwitchButtons(bool is_enabled) { m_enable_buttons = is_enabled; ui->chb_show_emulated_values->setEnabled(is_enabled); ui->stick_multi_left->setEnabled(is_enabled); ui->stick_multi_right->setEnabled(is_enabled); ui->squircle_left->setEnabled(is_enabled); ui->squircle_right->setEnabled(is_enabled); ui->gb_pressure_intensity_deadzone->setEnabled(is_enabled); ui->gb_pressure_intensity->setEnabled(is_enabled && m_enable_pressure_intensity_button); ui->gb_analog_limiter->setEnabled(is_enabled && m_enable_analog_limiter_button); ui->gb_vibration->setEnabled(is_enabled && m_enable_rumble); ui->gb_motion_controls->setEnabled(is_enabled && m_enable_motion); ui->gb_stick_deadzones->setEnabled(is_enabled && m_enable_deadzones); ui->gb_stick_anti_deadzones->setEnabled(is_enabled && m_enable_deadzones); ui->gb_triggers->setEnabled(is_enabled && m_enable_deadzones); ui->gb_battery->setEnabled(is_enabled && (m_enable_battery || m_enable_led)); ui->pb_battery->setEnabled(is_enabled && m_enable_battery); ui->b_led_settings->setEnabled(is_enabled && m_enable_led); ui->gb_mouse_movement->setEnabled(is_enabled && m_handler->m_type == pad_handler::keyboard); ui->gb_mouse_accel->setEnabled(is_enabled && m_handler->m_type == pad_handler::keyboard); ui->gb_mouse_dz->setEnabled(is_enabled && m_handler->m_type == pad_handler::keyboard); ui->gb_stick_lerp->setEnabled(is_enabled && m_handler->m_type == pad_handler::keyboard); ui->chooseClass->setEnabled(is_enabled && ui->chooseClass->count() > 0); ui->chooseProduct->setEnabled(is_enabled && ui->chooseProduct->count() > 0); ui->buttonBox->button(QDialogButtonBox::Reset)->setEnabled(is_enabled && m_handler->m_type != pad_handler::keyboard); for (int i = button_ids::id_pad_begin + 1; i < button_ids::id_pad_end; i++) { if (auto button = m_pad_buttons->button(i)) { button->setEnabled(is_enabled); } } } void pad_settings_dialog::OnPadButtonClicked(int id) { switch (id) { case button_ids::id_led: case button_ids::id_pad_begin: case button_ids::id_pad_end: case button_ids::id_add_config_file: case button_ids::id_refresh: return; case button_ids::id_reset_parameters: ReactivateButtons(); GetPlayerConfig().from_default(); UpdateLabels(true); return; case button_ids::id_blacklist: { std::lock_guard lock(m_handler_mutex); [[maybe_unused]] const PadHandlerBase::connection status = m_handler->get_next_button_press(m_device_name, nullptr, nullptr, PadHandlerBase::gui_call_type::blacklist, {}); return; } default: break; } // On shift+click or shift+space enable multi key binding if (QApplication::keyboardModifiers() & Qt::KeyboardModifier::ShiftModifier) { m_enable_multi_binding = true; } // On alt+click or alt+space allow to handle triggers as the entire stick axis m_handler->set_trigger_recognition_mode((QApplication::keyboardModifiers() & Qt::KeyboardModifier::AltModifier) ? PadHandlerBase::trigger_recognition_mode::two_directional : PadHandlerBase::trigger_recognition_mode::one_directional); for (auto but : m_pad_buttons->buttons()) { but->setFocusPolicy(Qt::ClickFocus); } for (auto but : ui->buttonBox->buttons()) { but->setFocusPolicy(Qt::ClickFocus); } ui->tabWidget->setFocusPolicy(Qt::ClickFocus); ui->scrollArea->setFocusPolicy(Qt::ClickFocus); ui->chooseConfig->setFocusPolicy(Qt::ClickFocus); ui->chooseHandler->setFocusPolicy(Qt::ClickFocus); ui->chooseDevice->setFocusPolicy(Qt::ClickFocus); ui->chooseClass->setFocusPolicy(Qt::ClickFocus); ui->chooseProduct->setFocusPolicy(Qt::ClickFocus); m_last_pos = QCursor::pos(); m_button_id = id; if (auto button = m_pad_buttons->button(m_button_id)) { button->setText(tr("[ Waiting %1 ]").arg(MAX_SECONDS)); button->setPalette(QPalette(Qt::blue)); button->grabMouse(); } SwitchButtons(false); // disable all buttons, needed for using Space, Enter and other specific buttons m_remap_timer.start(1000); } void pad_settings_dialog::OnTabChanged(int index) { // Apply current config ApplyCurrentPlayerConfig(index); // Move layout to the new tab ui->tabWidget->widget(index)->setLayout(ui->mainLayout); // Refresh handlers RefreshHandlers(); } void pad_settings_dialog::ChangeHandler() { // Pause input thread. This means we don't have to lock the handler mutex here. pause_input_thread(); bool force_enable = false; // enable configs even with disconnected devices const u32 player = GetPlayerIndex(); const bool is_ldd_pad = GetIsLddPad(player); cfg_player* player_config = g_cfg_input.player[player]; std::string handler; std::string device; std::string buddy_device; if (is_ldd_pad) { handler = fmt::format("%s", pad_handler::null); } else { handler = sstr(ui->chooseHandler->currentData().toString()); device = player_config->device.to_string(); buddy_device = player_config->buddy_device.to_string(); } cfg_pad& cfg = player_config->config; // Change and get this player's current handler. if (auto& cfg_handler = player_config->handler; handler != cfg_handler.to_string()) { if (!cfg_handler.from_string(handler)) { cfg_log.error("Failed to convert input string: %s", handler); return; } // Initialize the new pad config's defaults m_handler = pad_thread::GetHandler(player_config->handler); pad_thread::InitPadConfig(cfg, cfg_handler, m_handler); } else { m_handler = pad_thread::GetHandler(player_config->handler); } ensure(m_handler); // Get the handler's currently available devices. const std::vector<pad_list_entry> device_list = m_handler->list_devices(); // Localized tooltips const Tooltips tooltips; // Change the description switch (m_handler->m_type) { case pad_handler::null: GetPlayerConfig().from_default(); if (is_ldd_pad) m_description = tooltips.gamepad_settings.ldd_pad; else m_description = tooltips.gamepad_settings.null; break; case pad_handler::keyboard: m_description = tooltips.gamepad_settings.keyboard; break; case pad_handler::skateboard: m_description = tooltips.gamepad_settings.skateboard; break; #ifdef _WIN32 case pad_handler::xinput: m_description = tooltips.gamepad_settings.xinput; break; case pad_handler::mm: m_description = tooltips.gamepad_settings.mmjoy; break; case pad_handler::ds3: m_description = tooltips.gamepad_settings.ds3_windows; break; case pad_handler::ds4: m_description = tooltips.gamepad_settings.ds4_windows; break; case pad_handler::dualsense: m_description = tooltips.gamepad_settings.dualsense_windows; break; #elif __linux__ case pad_handler::ds3: m_description = tooltips.gamepad_settings.ds3_linux; break; case pad_handler::ds4: m_description = tooltips.gamepad_settings.ds4_linux; break; case pad_handler::dualsense: m_description = tooltips.gamepad_settings.dualsense_linux; break; #else case pad_handler::ds3: m_description = tooltips.gamepad_settings.ds3_other; break; case pad_handler::ds4: m_description = tooltips.gamepad_settings.ds4_other; break; case pad_handler::dualsense: m_description = tooltips.gamepad_settings.dualsense_other; break; #endif #ifdef HAVE_SDL2 case pad_handler::sdl: m_description = tooltips.gamepad_settings.sdl; break; #endif #ifdef HAVE_LIBEVDEV case pad_handler::evdev: m_description = tooltips.gamepad_settings.evdev; break; #endif } ui->l_description->setText(m_description); // Update parameters m_min_force = 0; m_max_force = 255; // Reset parameters m_lx = 0; m_ly = 0; m_rx = 0; m_ry = 0; // Enable Vibration Checkboxes m_enable_rumble = m_handler->has_rumble(); // Enable Motion Settings m_enable_motion = m_handler->has_motion(); // Enable Deadzone Settings m_enable_deadzones = m_handler->has_deadzones(); // Enable Pressure Sensitivity Settings m_enable_pressure_intensity_button = m_handler->has_pressure_intensity_button(); // Enable Analog Limiter Settings m_enable_analog_limiter_button = m_handler->has_analog_limiter_button(); // Change our contextual widgets ui->left_stack->setCurrentIndex((m_handler->m_type == pad_handler::keyboard) ? 1 : 0); ui->right_stack->setCurrentIndex((m_handler->m_type == pad_handler::keyboard) ? 1 : 0); ui->gb_pressure_intensity->setVisible(m_handler->has_pressure_intensity_button()); ui->gb_analog_limiter->setVisible(m_handler->has_analog_limiter_button()); // Update device dropdown and block signals while doing so ui->chooseDevice->blockSignals(true); ui->chooseDevice->clear(); // Refill the device combobox with currently available devices switch (m_handler->m_type) { #ifdef _WIN32 case pad_handler::xinput: case pad_handler::mm: #endif case pad_handler::ds3: case pad_handler::ds4: case pad_handler::dualsense: case pad_handler::skateboard: { const QString name_string = qstr(m_handler->name_string()); for (usz i = 1; i <= m_handler->max_devices(); i++) // Controllers 1-n in GUI { const QString device_name = name_string + QString::number(i); const QString device_name_localized = GetLocalizedPadName(m_handler->m_type, device_name, i); ui->chooseDevice->addItem(device_name_localized, QVariant::fromValue(pad_device_info{ sstr(device_name), device_name_localized, true })); } force_enable = true; break; } case pad_handler::null: { if (is_ldd_pad) { ui->chooseDevice->setPlaceholderText(tr("Custom Controller")); break; } [[fallthrough]]; } default: { for (usz i = 0; i < device_list.size(); i++) { const pad_list_entry& device = ::at32(device_list, i); if (!device.is_buddy_only) { const QString device_name_localized = GetLocalizedPadName(m_handler->m_type, QString::fromStdString(device.name), i); const QVariant user_data = QVariant::fromValue(pad_device_info{ device.name, device_name_localized, true }); ui->chooseDevice->addItem(device_name_localized, user_data); } } break; } } // Re-enable signals for device dropdown ui->chooseDevice->blockSignals(false); // Handle empty device list const bool config_enabled = force_enable || (m_handler->m_type != pad_handler::null && ui->chooseDevice->count() > 0); if (config_enabled) { RefreshPads(); for (int i = 0; i < ui->chooseDevice->count(); i++) { if (pad_device_info info = get_pad_info(ui->chooseDevice, i); info.name == device) { ui->chooseDevice->setCurrentIndex(i); break; } } if (ui->chooseDevice->currentIndex() < 0 && ui->chooseDevice->count() > 0) { ui->chooseDevice->setCurrentIndex(0); } // Force Refresh ChangeDevice(ui->chooseDevice->currentIndex()); } else { if (ui->chooseDevice->count() == 0) { ui->chooseDevice->setPlaceholderText(tr("No Device Detected")); } // Keep the configured device name m_device_name = GetDeviceName(); } // Handle running timers if (m_remap_timer.isActive()) { ReactivateButtons(); } if (m_timer_input.isActive()) { m_timer_input.stop(); } if (m_timer_pad_refresh.isActive()) { m_timer_pad_refresh.stop(); } // Reload the buttons with the new handler ReloadButtons(); // Enable configuration if possible SwitchButtons(config_enabled && m_handler->m_type == pad_handler::keyboard); ui->buttonBox->button(QDialogButtonBox::RestoreDefaults)->setEnabled(!is_ldd_pad); ui->chooseDevice->setEnabled(config_enabled && ui->chooseDevice->count() > 0); ui->chooseHandler->setEnabled(!is_ldd_pad && ui->chooseHandler->count() > 0); // Re-enable input timer if (ui->chooseDevice->isEnabled() && ui->chooseDevice->currentIndex() >= 0) { start_input_thread(); m_timer_input.start(10); m_timer_pad_refresh.start(1000); } } void pad_settings_dialog::ChangeConfig(const QString& config_file) { if (config_file.isEmpty()) return; m_config_file = sstr(config_file); // Load in order to get the pad handlers if (!g_cfg_input.load(m_title_id, m_config_file, true)) { cfg_log.notice("Loaded empty pad config"); } // Adjust to the different pad handlers for (usz i = 0; i < g_cfg_input.player.size(); i++) { std::shared_ptr<PadHandlerBase> handler; pad_thread::InitPadConfig(g_cfg_input.player[i]->config, g_cfg_input.player[i]->handler, handler); } // Reload with proper defaults if (!g_cfg_input.load(m_title_id, m_config_file, true)) { cfg_log.notice("Reloaded empty pad config"); } const u32 player_id = GetPlayerIndex(); const std::string handler = fmt::format("%s", g_cfg_input.player[player_id]->handler.get()); if (const QString q_handler = qstr(handler); ui->chooseHandler->findText(q_handler) >= 0) { ui->chooseHandler->setCurrentText(q_handler); } else { cfg_log.error("Handler '%s' not found in handler dropdown.", handler); } // Force Refresh ChangeHandler(); } void pad_settings_dialog::ChangeDevice(int index) { if (index < 0) return; const QVariant user_data = ui->chooseDevice->itemData(index); if (!user_data.canConvert<pad_device_info>()) { cfg_log.fatal("ChangeDevice: Cannot convert itemData for index %d and itemText %s", index, ui->chooseDevice->itemText(index)); return; } const pad_device_info info = user_data.value<pad_device_info>(); SetDeviceName(info.name); } void pad_settings_dialog::HandleDeviceClassChange(u32 class_id) const { ui->chooseProduct->clear(); for (const input::product_info& product : input::get_products_by_class(class_id)) { switch (product.type) { case input::product_type::playstation_3_controller: { ui->chooseProduct->addItem(tr("PS3 Controller", "PlayStation 3 Controller"), static_cast<int>(product.type)); break; } case input::product_type::dance_dance_revolution_mat: { ui->chooseProduct->addItem(tr("Dance Dance Revolution", "Dance Dance Revolution Mat"), static_cast<int>(product.type)); break; } case input::product_type::dj_hero_turntable: { ui->chooseProduct->addItem(tr("DJ Hero Turntable", "DJ Hero Turntable"), static_cast<int>(product.type)); break; } case input::product_type::harmonix_rockband_drum_kit: { ui->chooseProduct->addItem(tr("Rock Band", "Harmonix Rock Band Drum Kit"), static_cast<int>(product.type)); break; } case input::product_type::harmonix_rockband_drum_kit_2: { ui->chooseProduct->addItem(tr("Rock Band Pro", "Harmonix Rock Band Pro-Drum Kit"), static_cast<int>(product.type)); break; } case input::product_type::harmonix_rockband_guitar: { ui->chooseProduct->addItem(tr("Rock Band", "Harmonix Rock Band Guitar"), static_cast<int>(product.type)); break; } case input::product_type::red_octane_gh_drum_kit: { ui->chooseProduct->addItem(tr("Guitar Hero", "RedOctane Guitar Hero Drum Kit"), static_cast<int>(product.type)); break; } case input::product_type::red_octane_gh_guitar: { ui->chooseProduct->addItem(tr("Guitar Hero", "RedOctane Guitar Hero Guitar"), static_cast<int>(product.type)); break; } case input::product_type::rock_revolution_drum_kit: { ui->chooseProduct->addItem(tr("Rock Revolution", "Rock Revolution Drum Controller"), static_cast<int>(product.type)); break; } case input::product_type::ps_move_navigation: { ui->chooseProduct->addItem(tr("PS Move Navigation", "PS Move Navigation Controller"), static_cast<int>(product.type)); break; } case input::product_type::ride_skateboard: { ui->chooseProduct->addItem(tr("RIDE Skateboard", "Tony Hawk RIDE Skateboard Controller"), static_cast<int>(product.type)); break; } case input::product_type::guncon_3: { ui->chooseProduct->addItem(tr("GunCon 3", "GunCon 3 Controller"), static_cast<int>(product.type)); break; } case input::product_type::top_shot_elite: { ui->chooseProduct->addItem(tr("Top Shot Elite", "Top Shot Elite Controller"), static_cast<int>(product.type)); break; } case input::product_type::top_shot_fearmaster: { ui->chooseProduct->addItem(tr("Top Shot Fearmaster", "Top Shot Fearmaster Controller"), static_cast<int>(product.type)); break; } case input::product_type::udraw_gametablet: { ui->chooseProduct->addItem(tr("uDraw GameTablet", "uDraw GameTablet Controller"), static_cast<int>(product.type)); break; } } } } void pad_settings_dialog::AddConfigFile() { QInputDialog* dialog = new QInputDialog(this); dialog->setWindowTitle(tr("Choose a unique name")); dialog->setLabelText(tr("Configuration Name: ")); dialog->setFixedSize(500, 100); while (dialog->exec() != QDialog::Rejected) { const QString config_name = dialog->textValue(); if (config_name.isEmpty()) { QMessageBox::warning(this, tr("Error"), tr("Name cannot be empty")); continue; } if (config_name.contains(".")) { QMessageBox::warning(this, tr("Error"), tr("Must choose a name without '.'")); continue; } if (ui->chooseConfig->findText(config_name) != -1) { QMessageBox::warning(this, tr("Error"), tr("Please choose a non-existing name")); continue; } if (CreateConfigFile(qstr(rpcs3::utils::get_input_config_dir(m_title_id)), config_name)) { ui->chooseConfig->addItem(config_name); ui->chooseConfig->setCurrentText(config_name); } break; } } void pad_settings_dialog::RefreshHandlers() { const u32 player_id = GetPlayerIndex(); // Set the current input type from config. Disable signal to have ChangeHandler always executed exactly once ui->chooseHandler->blockSignals(true); ui->chooseHandler->clear(); if (GetIsLddPad(player_id)) { ui->chooseHandler->addItem(tr("Reserved")); } else { const std::vector<std::string> str_inputs = g_cfg_input.player[0]->handler.to_list(); for (usz i = 0; i < str_inputs.size(); i++) { const QString item_data = qstr(str_inputs[i]); ui->chooseHandler->addItem(GetLocalizedPadHandler(item_data, static_cast<pad_handler>(i)), QVariant(item_data)); } const auto& handler = g_cfg_input.player[player_id]->handler; ui->chooseHandler->setCurrentText(GetLocalizedPadHandler(qstr(handler.to_string()), handler)); } ui->chooseHandler->blockSignals(false); // Force Change ChangeHandler(); } void pad_settings_dialog::ApplyCurrentPlayerConfig(int new_player_id) { if (!m_handler || new_player_id < 0 || static_cast<u32>(new_player_id) >= g_cfg_input.player.size()) { return; } m_duplicate_buttons[m_last_player_id].clear(); auto& player = g_cfg_input.player[m_last_player_id]; m_last_player_id = new_player_id; // Check for duplicate button choices if (m_handler->m_type != pad_handler::null) { std::set<std::string> unique_keys; for (const auto& [id, button] : m_cfg_entries) { // Let's ignore special keys, unless we're using a keyboard if ((id == button_ids::id_pressure_intensity || id == button_ids::id_analog_limiter) && m_handler->m_type != pad_handler::keyboard) continue; for (const std::string& key : cfg_pad::get_buttons(button.keys)) { if (const auto& [it, ok] = unique_keys.insert(key); !ok) { m_duplicate_buttons[m_last_player_id] = key; break; } } } } // Apply buttons for (const auto& entry : m_cfg_entries) { entry.second.cfg_text->from_string(entry.second.keys); } // Apply rest of config auto& cfg = player->config; cfg.lstickmultiplier.set(ui->stick_multi_left->value() * 100); cfg.rstickmultiplier.set(ui->stick_multi_right->value() * 100); cfg.lpadsquircling.set(ui->squircle_left->value()); cfg.rpadsquircling.set(ui->squircle_right->value()); if (m_handler->has_rumble()) { cfg.enable_vibration_motor_large.set(ui->chb_vibration_large->isChecked()); cfg.enable_vibration_motor_small.set(ui->chb_vibration_small->isChecked()); cfg.switch_vibration_motors.set(ui->chb_vibration_switch->isChecked()); } if (m_handler->has_deadzones()) { cfg.ltriggerthreshold.set(ui->slider_trigger_left->value()); cfg.rtriggerthreshold.set(ui->slider_trigger_right->value()); cfg.lstickdeadzone.set(ui->slider_stick_left->value()); cfg.rstickdeadzone.set(ui->slider_stick_right->value()); cfg.lstick_anti_deadzone.set(ui->anti_deadzone_slider_stick_left->value()); cfg.rstick_anti_deadzone.set(ui->anti_deadzone_slider_stick_right->value()); } if (m_handler->has_analog_limiter_button()) { cfg.analog_limiter_toggle_mode.set(ui->cb_analog_limiter_toggle_mode->isChecked()); } if (m_handler->has_pressure_intensity_button()) { cfg.pressure_intensity.set(ui->sb_pressure_intensity->value()); cfg.pressure_intensity_toggle_mode.set(ui->cb_pressure_intensity_toggle_mode->isChecked()); } cfg.pressure_intensity_deadzone.set(ui->pressure_intensity_deadzone->value()); if (m_handler->m_type == pad_handler::keyboard) { const int mouse_move_mode = ui->mouse_movement->currentData().toInt(); ensure(mouse_move_mode >= 0 && mouse_move_mode <= 1); cfg.mouse_move_mode.set(static_cast<mouse_movement_mode>(mouse_move_mode)); cfg.mouse_acceleration_x.set(ui->mouse_accel_x->value() * 100); cfg.mouse_acceleration_y.set(ui->mouse_accel_y->value() * 100); cfg.mouse_deadzone_x.set(ui->mouse_dz_x->value()); cfg.mouse_deadzone_y.set(ui->mouse_dz_y->value()); cfg.l_stick_lerp_factor.set(ui->left_stick_lerp->value() * 100); cfg.r_stick_lerp_factor.set(ui->right_stick_lerp->value() * 100); } cfg.device_class_type.set(ui->chooseClass->currentData().toUInt()); const auto info = input::get_product_info(static_cast<input::product_type>(ui->chooseProduct->currentData().toInt())); cfg.vendor_id.set(info.vendor_id); cfg.product_id.set(info.product_id); } void pad_settings_dialog::SaveExit() { ApplyCurrentPlayerConfig(m_last_player_id); for (const auto& [player_id, key] : m_duplicate_buttons) { if (!key.empty()) { int result = QMessageBox::Yes; m_gui_settings->ShowConfirmationBox( tr("Warning!"), tr("The %0 button <b>%1</b> of <b>Player %2</b> was assigned at least twice.<br>Please consider adjusting the configuration.<br><br>Continue anyway?<br>") .arg(qstr(g_cfg_input.player[player_id]->handler.to_string())).arg(qstr(key)).arg(player_id + 1), gui::ib_same_buttons, &result, this); if (result == QMessageBox::No) return; break; } } const std::string config_file_key = m_title_id.empty() ? g_cfg_input_configs.global_key : m_title_id; g_cfg_input_configs.active_configs.set_value(config_file_key, m_config_file); g_cfg_input_configs.save(); g_cfg_input.save(m_title_id, m_config_file); QDialog::accept(); } void pad_settings_dialog::CancelExit() { // Reloads configs from file or defaults g_cfg_input_configs.load(); g_cfg_input.from_default(); QDialog::reject(); } QString pad_settings_dialog::GetLocalizedPadHandler(const QString& original, pad_handler handler) { switch (handler) { case pad_handler::null: return tr("Null"); case pad_handler::keyboard: return tr("Keyboard"); case pad_handler::ds3: return tr("DualShock 3"); case pad_handler::ds4: return tr("DualShock 4"); case pad_handler::dualsense: return tr("DualSense"); case pad_handler::skateboard: return tr("Skateboard"); #ifdef _WIN32 case pad_handler::xinput: return tr("XInput"); case pad_handler::mm: return tr("MMJoystick"); #endif #ifdef HAVE_SDL2 case pad_handler::sdl: return tr("SDL"); #endif #ifdef HAVE_LIBEVDEV case pad_handler::evdev: return tr("Evdev"); #endif } return original; } QString pad_settings_dialog::GetLocalizedPadName(pad_handler handler, const QString& original, usz index) { switch (handler) { case pad_handler::null: return tr("Default Null Device"); case pad_handler::keyboard: return tr("Keyboard"); case pad_handler::ds3: return tr("DS3 Pad #%0").arg(index); case pad_handler::ds4: return tr("DS4 Pad #%0").arg(index); case pad_handler::dualsense: return tr("DualSense Pad #%0").arg(index); case pad_handler::skateboard: return tr("Skateboard #%0").arg(index); #ifdef _WIN32 case pad_handler::xinput: return tr("XInput Pad #%0").arg(index); case pad_handler::mm: return tr("Joystick #%0").arg(index); #endif #ifdef HAVE_SDL2 case pad_handler::sdl: break; // Localization not feasible. Names differ for each device. #endif #ifdef HAVE_LIBEVDEV case pad_handler::evdev: break; // Localization not feasible. Names differ for each device. #endif } return original; } bool pad_settings_dialog::GetIsLddPad(u32 index) const { // We only check for ldd pads if the current dialog may affect the running application. // To simplify this we include the global pad config indiscriminately as well as the relevant custom pad config. if (!Emu.IsStopped() && (m_title_id.empty() || m_title_id == Emu.GetTitleID())) { std::lock_guard lock(pad::g_pad_mutex); if (const auto handler = pad::get_current_handler(true)) { ensure(index < handler->GetPads().size()); if (const std::shared_ptr<Pad> pad = ::at32(handler->GetPads(), index)) { return pad->ldd; } } } return false; } u32 pad_settings_dialog::GetPlayerIndex() const { const int player_id = ui->tabWidget->currentIndex(); ensure(player_id >= 0 && static_cast<u32>(player_id) < g_cfg_input.player.size()); return static_cast<u32>(player_id); } cfg_pad& pad_settings_dialog::GetPlayerConfig() const { return g_cfg_input.player[GetPlayerIndex()]->config; } std::string pad_settings_dialog::GetDeviceName() const { return g_cfg_input.player[GetPlayerIndex()]->device.to_string(); } void pad_settings_dialog::SetDeviceName(const std::string& name) { m_device_name = name; if (!g_cfg_input.player[GetPlayerIndex()]->device.from_string(m_device_name)) { cfg_log.error("Failed to convert device string: %s", m_device_name); } } void pad_settings_dialog::ResizeDialog() { // Widgets const QSize buttons_size(0, ui->buttonBox->sizeHint().height()); const QSize tabwidget_size = ui->tabWidget->sizeHint(); // Spacing const int nr_of_spacings = 1; // Number of widgets - 1 const QSize spacing_size(0, layout()->spacing() * nr_of_spacings); // Margins const auto margins = layout()->contentsMargins(); const QSize margin_size(margins.left() + margins.right(), margins.top() + margins.bottom()); resize(tabwidget_size + buttons_size + margin_size + spacing_size); } void pad_settings_dialog::SubscribeTooltip(QObject* object, const QString& tooltip) { ensure(!!object); m_descriptions[object] = tooltip; object->installEventFilter(this); } void pad_settings_dialog::SubscribeTooltips() { // Localized tooltips const Tooltips tooltips; SubscribeTooltip(ui->gb_analog_limiter, tooltips.gamepad_settings.analog_limiter); SubscribeTooltip(ui->gb_pressure_intensity, tooltips.gamepad_settings.pressure_intensity); SubscribeTooltip(ui->gb_pressure_intensity_deadzone, tooltips.gamepad_settings.pressure_deadzone); SubscribeTooltip(ui->gb_squircle, tooltips.gamepad_settings.squircle_factor); SubscribeTooltip(ui->gb_stick_multi, tooltips.gamepad_settings.stick_multiplier); SubscribeTooltip(ui->gb_vibration, tooltips.gamepad_settings.vibration); SubscribeTooltip(ui->gb_motion_controls, tooltips.gamepad_settings.motion_controls); SubscribeTooltip(ui->gb_stick_deadzones, tooltips.gamepad_settings.stick_deadzones); SubscribeTooltip(ui->gb_stick_anti_deadzones, tooltips.gamepad_settings.stick_deadzones); SubscribeTooltip(ui->gb_stick_preview, tooltips.gamepad_settings.emulated_preview); SubscribeTooltip(ui->gb_triggers, tooltips.gamepad_settings.trigger_deadzones); SubscribeTooltip(ui->gb_stick_lerp, tooltips.gamepad_settings.stick_lerp); SubscribeTooltip(ui->gb_mouse_accel, tooltips.gamepad_settings.mouse_acceleration); SubscribeTooltip(ui->gb_mouse_dz, tooltips.gamepad_settings.mouse_deadzones); SubscribeTooltip(ui->gb_mouse_movement, tooltips.gamepad_settings.mouse_movement); for (int i = button_ids::id_pad_begin + 1; i < button_ids::id_pad_end; i++) { SubscribeTooltip(m_pad_buttons->button(i), tooltips.gamepad_settings.button_assignment); } } void pad_settings_dialog::start_input_thread() { m_input_thread_state = input_thread_state::active; } void pad_settings_dialog::pause_input_thread() { if (m_input_thread) { m_input_thread_state = input_thread_state::pausing; while (m_input_thread_state != input_thread_state::paused) { std::this_thread::sleep_for(1ms); } } }
70,680
C++
.cpp
1,776
36.976351
316
0.718592
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,134
qt_camera_handler.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/qt_camera_handler.cpp
#include "stdafx.h" #include "qt_camera_handler.h" #include "Emu/system_config.h" #include "Emu/System.h" #include "Emu/Io/camera_config.h" #include <QMediaDevices> #if QT_CONFIG(permissions) #include <QGuiApplication> #include <QPermissions> #endif LOG_CHANNEL(camera_log, "Camera"); qt_camera_handler::qt_camera_handler() : camera_handler_base() { // List available cameras for (const QCameraDevice& camera_device : QMediaDevices::videoInputs()) { camera_log.success("Found camera: id=%s, description=%s", camera_device.id().toStdString(), camera_device.description()); } if (!g_cfg_camera.load()) { camera_log.notice("Could not load camera config. Using defaults."); } } qt_camera_handler::~qt_camera_handler() { Emu.BlockingCallFromMainThread([&]() { close_camera(); reset(); }); } void qt_camera_handler::reset() { m_camera.reset(); m_video_sink.reset(); m_media_capture_session.reset(); } void qt_camera_handler::set_camera(const QCameraDevice& camera_info) { if (camera_info.isNull()) { reset(); return; } // Determine if the camera is front facing, in which case we will need to flip the image horizontally. const bool front_facing = camera_info.position() == QCameraDevice::Position::FrontFace; camera_log.success("Using camera: id=\"%s\", description=\"%s\", front_facing=%d", camera_info.id().toStdString(), camera_info.description(), front_facing); // Create camera and video surface m_media_capture_session.reset(new QMediaCaptureSession(nullptr)); m_video_sink.reset(new qt_camera_video_sink(front_facing, nullptr)); m_camera.reset(new QCamera(camera_info)); connect(m_camera.get(), &QCamera::activeChanged, this, &qt_camera_handler::handle_camera_active); connect(m_camera.get(), &QCamera::errorOccurred, this, &qt_camera_handler::handle_camera_error); // Setup video sink m_media_capture_session->setCamera(m_camera.get()); m_media_capture_session->setVideoSink(m_video_sink.get()); // Update the settings update_camera_settings(); } void qt_camera_handler::handle_camera_active(bool is_active) { camera_log.notice("Camera active status changed to %d", is_active); if (is_active) { m_state = camera_handler_state::running; } else { m_state = camera_handler_state::closed; } } void qt_camera_handler::handle_camera_error(QCamera::Error error, const QString& errorString) { camera_log.error("Error event: \"%s\" (error=%d)", errorString, static_cast<int>(error)); } void qt_camera_handler::open_camera() { camera_log.notice("Loading camera"); if (const std::string camera_id = g_cfg.io.camera_id.to_string(); m_camera_id != camera_id) { camera_log.notice("Switching camera from %s to %s", m_camera_id, camera_id); camera_log.notice("Stopping old camera..."); if (m_camera) m_camera->stop(); m_camera_id = camera_id; } QCameraDevice selected_camera{}; if (m_camera_id == g_cfg.io.camera_id.def) { selected_camera = QMediaDevices::defaultVideoInput(); } else if (!m_camera_id.empty()) { const QString camera_id = QString::fromStdString(m_camera_id); for (const QCameraDevice& camera_device : QMediaDevices::videoInputs()) { if (camera_id == camera_device.id()) { selected_camera = camera_device; break; } } } set_camera(selected_camera); if (!m_camera) { if (m_camera_id.empty()) camera_log.notice("Camera disabled"); else camera_log.error("No camera found"); m_state = camera_handler_state::closed; return; } if (m_camera->isActive()) { camera_log.notice("Camera already active"); return; } // List all supported formats for debugging for (const QCameraFormat& format : m_camera->cameraDevice().videoFormats()) { camera_log.notice("Supported format: pixelformat=%s, resolution=%dx%d framerate=%f-%f", format.pixelFormat(), format.resolution().width(), format.resolution().height(), format.minFrameRate(), format.maxFrameRate()); } // Update camera and view finder settings update_camera_settings(); m_state = camera_handler_state::open; } void qt_camera_handler::close_camera() { camera_log.notice("Unloading camera"); if (!m_camera) { if (m_camera_id.empty()) camera_log.notice("Camera disabled"); else camera_log.error("No camera found"); m_state = camera_handler_state::closed; return; } // Unload/close camera m_camera->stop(); } void qt_camera_handler::start_camera() { camera_log.notice("Starting camera"); if (!m_camera) { if (m_camera_id.empty()) camera_log.notice("Camera disabled"); else camera_log.error("No camera found"); m_state = camera_handler_state::closed; return; } if (m_camera->isActive()) { camera_log.notice("Camera already started"); return; } #if QT_CONFIG(permissions) const QCameraPermission permission; switch (qApp->checkPermission(permission)) { case Qt::PermissionStatus::Undetermined: camera_log.notice("Requesting camera permission"); qApp->requestPermission(permission, this, [this]() { start_camera(); }); return; case Qt::PermissionStatus::Denied: camera_log.error("RPCS3 has no permissions to access cameras on this device."); return; case Qt::PermissionStatus::Granted: camera_log.notice("Camera permission granted"); break; } #endif // Start camera. We will start receiving frames now. m_camera->start(); } void qt_camera_handler::stop_camera() { camera_log.notice("Stopping camera"); if (!m_camera) { if (m_camera_id.empty()) camera_log.notice("Camera disabled"); else camera_log.error("No camera found"); m_state = camera_handler_state::closed; return; } if (!m_camera->isActive()) { camera_log.notice("Camera already stopped"); return; } // Stop camera. The camera will still be drawing power. m_camera->stop(); } void qt_camera_handler::set_format(s32 format, u32 bytesize) { m_format = format; m_bytesize = bytesize; if (m_video_sink) { m_video_sink->set_format(m_format, m_bytesize); } } void qt_camera_handler::set_frame_rate(u32 frame_rate) { m_frame_rate = frame_rate; } void qt_camera_handler::set_resolution(u32 width, u32 height) { m_width = width; m_height = height; if (m_video_sink) { m_video_sink->set_resolution(m_width, m_height); } } void qt_camera_handler::set_mirrored(bool mirrored) { m_mirrored = mirrored; if (m_video_sink) { m_video_sink->set_mirrored(m_mirrored); } } u64 qt_camera_handler::frame_number() const { return m_video_sink ? m_video_sink->frame_number() : 0; } camera_handler_base::camera_handler_state qt_camera_handler::get_image(u8* buf, u64 size, u32& width, u32& height, u64& frame_number, u64& bytes_read) { width = 0; height = 0; frame_number = 0; bytes_read = 0; if (const std::string camera_id = g_cfg.io.camera_id.to_string(); m_camera_id != camera_id) { camera_log.notice("Switching cameras"); m_state = camera_handler_state::closed; return camera_handler_state::closed; } if (m_camera_id.empty()) { camera_log.notice("Camera disabled"); m_state = camera_handler_state::closed; return camera_handler_state::closed; } if (!m_camera || !m_video_sink) { camera_log.fatal("Error: camera invalid"); m_state = camera_handler_state::closed; return camera_handler_state::closed; } // Backup current state. State may change through events. const camera_handler_state current_state = m_state; if (current_state == camera_handler_state::running) { // Copy latest image into out buffer. m_video_sink->get_image(buf, size, width, height, frame_number, bytes_read); } else { camera_log.error("Camera not running (m_state=%d)", static_cast<int>(current_state)); } return current_state; } void qt_camera_handler::update_camera_settings() { // Update camera if possible. We can only do this if it is already loaded. if (m_camera && m_camera->isAvailable()) { // Get camera id. Use camera id of Qt default if the "Default" camera is selected. const std::string camera_id = (m_camera_id == g_cfg.io.camera_id.def) ? QMediaDevices::defaultVideoInput().id().toStdString() : m_camera_id; // Load selected settings from config file bool success = false; cfg_camera::camera_setting cfg_setting = g_cfg_camera.get_camera_setting(camera_id, success); if (success) { camera_log.notice("Found config entry for camera \"%s\" (m_camera_id='%s')", camera_id, m_camera_id); // List all available settings and choose the proper value if possible. const double epsilon = 0.001; success = false; for (const QCameraFormat& supported_setting : m_camera->cameraDevice().videoFormats()) { if (supported_setting.resolution().width() == cfg_setting.width && supported_setting.resolution().height() == cfg_setting.height && supported_setting.minFrameRate() >= (cfg_setting.min_fps - epsilon) && supported_setting.minFrameRate() <= (cfg_setting.min_fps + epsilon) && supported_setting.maxFrameRate() >= (cfg_setting.max_fps - epsilon) && supported_setting.maxFrameRate() <= (cfg_setting.max_fps + epsilon) && supported_setting.pixelFormat() == static_cast<QVideoFrameFormat::PixelFormat>(cfg_setting.format)) { // Apply settings. camera_log.notice("Setting view finder settings: frame_rate=%f, width=%d, height=%d, pixel_format=%s", supported_setting.maxFrameRate(), supported_setting.resolution().width(), supported_setting.resolution().height(), supported_setting.pixelFormat()); m_camera->setCameraFormat(supported_setting); success = true; break; } } if (!success) { camera_log.warning("No matching camera setting available for the camera config: max_fps=%f, width=%d, height=%d, format=%d", cfg_setting.max_fps, cfg_setting.width, cfg_setting.height, cfg_setting.format); } } if (!success) { camera_log.notice("Using default view finder settings"); } } // Update video surface if possible if (m_video_sink) { m_video_sink->set_resolution(m_width, m_height); m_video_sink->set_format(m_format, m_bytesize); m_video_sink->set_mirrored(m_mirrored); } }
10,008
C++
.cpp
318
28.91195
217
0.719285
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,135
trophy_manager_dialog.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/trophy_manager_dialog.cpp
#include "stdafx.h" #include "trophy_manager_dialog.h" #include "custom_table_widget_item.h" #include "game_list_delegate.h" #include "qt_utils.h" #include "game_list.h" #include "gui_settings.h" #include "progress_dialog.h" #include "persistent_settings.h" #include "util/logs.hpp" #include "Utilities/StrUtil.h" #include "Utilities/File.h" #include "Emu/VFS.h" #include "Emu/System.h" #include "Emu/system_utils.hpp" #include "Emu/Cell/Modules/sceNpTrophy.h" #include "Emu/Cell/Modules/cellRtc.h" #include <QApplication> #include <QClipboard> #include <QtConcurrent> #include <QFutureWatcher> #include <QHeaderView> #include <QVBoxLayout> #include <QCheckBox> #include <QGroupBox> #include <QPixmap> #include <QDir> #include <QMenu> #include <QDesktopServices> #include <QScrollBar> #include <QWheelEvent> #include <QGuiApplication> #include <QScreen> #include <QTimeZone> LOG_CHANNEL(gui_log, "GUI"); enum GameUserRole { GameIndex = Qt::UserRole, GamePixmapLoaded, GamePixmap }; trophy_manager_dialog::trophy_manager_dialog(std::shared_ptr<gui_settings> gui_settings) : QWidget() , m_gui_settings(std::move(gui_settings)) { // Nonspecific widget settings setWindowTitle(tr("Trophy Manager")); setObjectName("trophy_manager"); setAttribute(Qt::WA_DeleteOnClose); setAttribute(Qt::WA_StyledBackground); m_game_icon_size_index = m_gui_settings->GetValue(gui::tr_game_iconSize).toInt(); m_icon_height = m_gui_settings->GetValue(gui::tr_icon_height).toInt(); m_show_locked_trophies = m_gui_settings->GetValue(gui::tr_show_locked).toBool(); m_show_unlocked_trophies = m_gui_settings->GetValue(gui::tr_show_unlocked).toBool(); m_show_hidden_trophies = m_gui_settings->GetValue(gui::tr_show_hidden).toBool(); m_show_bronze_trophies = m_gui_settings->GetValue(gui::tr_show_bronze).toBool(); m_show_silver_trophies = m_gui_settings->GetValue(gui::tr_show_silver).toBool(); m_show_gold_trophies = m_gui_settings->GetValue(gui::tr_show_gold).toBool(); m_show_platinum_trophies = m_gui_settings->GetValue(gui::tr_show_platinum).toBool(); // Make sure the directory is mounted vfs::mount("/dev_hdd0", rpcs3::utils::get_hdd0_dir()); // Get the currently selected user's trophy path. m_trophy_dir = "/dev_hdd0/home/" + Emu.GetUsr() + "/trophy/"; // Game chooser combo box m_game_combo = new QComboBox(); m_game_combo->setSizeAdjustPolicy(QComboBox::AdjustToMinimumContentsLengthWithIcon); // Game progression label m_game_progress = new QLabel(tr("Progress: %1% (%2/%3)").arg(0).arg(0).arg(0)); // Games Table m_game_table = new game_list(); m_game_table->setObjectName("trophy_manager_game_table"); m_game_table->setShowGrid(false); m_game_table->setVerticalScrollMode(QAbstractItemView::ScrollPerPixel); m_game_table->setHorizontalScrollMode(QAbstractItemView::ScrollPerPixel); m_game_table->verticalScrollBar()->installEventFilter(this); m_game_table->verticalScrollBar()->setSingleStep(20); m_game_table->horizontalScrollBar()->setSingleStep(20); m_game_table->setItemDelegate(new game_list_delegate(m_game_table)); m_game_table->setSelectionBehavior(QAbstractItemView::SelectRows); m_game_table->setSelectionMode(QAbstractItemView::SingleSelection); m_game_table->setEditTriggers(QAbstractItemView::NoEditTriggers); m_game_table->setColumnCount(static_cast<int>(gui::trophy_game_list_columns::count)); m_game_table->horizontalHeader()->setDefaultAlignment(Qt::AlignLeft); m_game_table->horizontalHeader()->setStretchLastSection(true); m_game_table->verticalHeader()->setSectionResizeMode(QHeaderView::Fixed); m_game_table->setContextMenuPolicy(Qt::CustomContextMenu); m_game_table->verticalHeader()->setVisible(false); m_game_table->setAlternatingRowColors(true); m_game_table->installEventFilter(this); auto add_game_column = [this](gui::trophy_game_list_columns col, const QString& header_text, const QString& action_text) { m_game_table->setHorizontalHeaderItem(static_cast<int>(col), new QTableWidgetItem(header_text)); m_game_column_acts.append(new QAction(action_text, this)); }; add_game_column(gui::trophy_game_list_columns::icon, tr("Icon"), tr("Show Icons")); add_game_column(gui::trophy_game_list_columns::name, tr("Game"), tr("Show Games")); add_game_column(gui::trophy_game_list_columns::progress, tr("Progress"), tr("Show Progress")); add_game_column(gui::trophy_game_list_columns::trophies, tr("Trophies"), tr("Show Trophies")); // Trophy Table m_trophy_table = new game_list(); m_trophy_table->setObjectName("trophy_manager_trophy_table"); m_trophy_table->setShowGrid(false); m_trophy_table->setVerticalScrollMode(QAbstractItemView::ScrollPerPixel); m_trophy_table->setHorizontalScrollMode(QAbstractItemView::ScrollPerPixel); m_trophy_table->verticalScrollBar()->installEventFilter(this); m_trophy_table->verticalScrollBar()->setSingleStep(20); m_trophy_table->horizontalScrollBar()->setSingleStep(20); m_trophy_table->setItemDelegate(new game_list_delegate(m_trophy_table)); m_trophy_table->setSelectionBehavior(QAbstractItemView::SelectRows); m_trophy_table->setEditTriggers(QAbstractItemView::NoEditTriggers); m_trophy_table->setColumnCount(static_cast<int>(gui::trophy_list_columns::count)); m_trophy_table->horizontalHeader()->setDefaultAlignment(Qt::AlignLeft); m_trophy_table->horizontalHeader()->setStretchLastSection(true); m_trophy_table->horizontalHeader()->setSectionResizeMode(static_cast<int>(gui::trophy_list_columns::icon), QHeaderView::Fixed); m_trophy_table->verticalHeader()->setVisible(false); m_trophy_table->verticalHeader()->setSectionResizeMode(QHeaderView::Fixed); m_trophy_table->setContextMenuPolicy(Qt::CustomContextMenu); m_trophy_table->setAlternatingRowColors(true); m_trophy_table->installEventFilter(this); auto add_trophy_column = [this](gui::trophy_list_columns col, const QString& header_text, const QString& action_text) { m_trophy_table->setHorizontalHeaderItem(static_cast<int>(col), new QTableWidgetItem(header_text)); m_trophy_column_acts.append(new QAction(action_text, this)); }; add_trophy_column(gui::trophy_list_columns::icon, tr("Icon"), tr("Show Icons")); add_trophy_column(gui::trophy_list_columns::name, tr("Name"), tr("Show Names")); add_trophy_column(gui::trophy_list_columns::description, tr("Description"), tr("Show Descriptions")); add_trophy_column(gui::trophy_list_columns::type, tr("Type"), tr("Show Types")); add_trophy_column(gui::trophy_list_columns::is_unlocked, tr("Status"), tr("Show Status")); add_trophy_column(gui::trophy_list_columns::id, tr("ID"), tr("Show IDs")); add_trophy_column(gui::trophy_list_columns::platinum_link, tr("Platinum Relevant"), tr("Show Platinum Relevant")); add_trophy_column(gui::trophy_list_columns::time_unlocked, tr("Time Unlocked"), tr("Show Time Unlocked")); m_splitter = new QSplitter(); m_splitter->addWidget(m_game_table); m_splitter->addWidget(m_trophy_table); m_game_icon_size = gui_settings::SizeFromSlider(m_game_icon_size_index); // Checkboxes to control dialog QCheckBox* check_lock_trophy = new QCheckBox(tr("Show Not Earned Trophies")); check_lock_trophy->setCheckable(true); check_lock_trophy->setChecked(m_show_locked_trophies); QCheckBox* check_unlock_trophy = new QCheckBox(tr("Show Earned Trophies")); check_unlock_trophy->setCheckable(true); check_unlock_trophy->setChecked(m_show_unlocked_trophies); QCheckBox* check_hidden_trophy = new QCheckBox(tr("Show Hidden Trophies")); check_hidden_trophy->setCheckable(true); check_hidden_trophy->setChecked(m_show_hidden_trophies); QCheckBox* check_bronze_trophy = new QCheckBox(tr("Show Bronze Trophies")); check_bronze_trophy->setCheckable(true); check_bronze_trophy->setChecked(m_show_bronze_trophies); QCheckBox* check_silver_trophy = new QCheckBox(tr("Show Silver Trophies")); check_silver_trophy->setCheckable(true); check_silver_trophy->setChecked(m_show_silver_trophies); QCheckBox* check_gold_trophy = new QCheckBox(tr("Show Gold Trophies")); check_gold_trophy->setCheckable(true); check_gold_trophy->setChecked(m_show_gold_trophies); QCheckBox* check_platinum_trophy = new QCheckBox(tr("Show Platinum Trophies")); check_platinum_trophy->setCheckable(true); check_platinum_trophy->setChecked(m_show_platinum_trophies); QLabel* trophy_slider_label = new QLabel(); trophy_slider_label->setText(tr("Trophy Icon Size: %0x%1").arg(m_icon_height).arg(m_icon_height)); QLabel* game_slider_label = new QLabel(); game_slider_label->setText(tr("Game Icon Size: %0x%1").arg(m_game_icon_size.width()).arg(m_game_icon_size.height())); m_icon_slider = new QSlider(Qt::Horizontal); m_icon_slider->setRange(25, 225); m_icon_slider->setValue(m_icon_height); m_game_icon_slider = new QSlider(Qt::Horizontal); m_game_icon_slider->setRange(0, gui::gl_max_slider_pos); m_game_icon_slider->setValue(m_game_icon_size_index); // LAYOUTS QGroupBox* choose_game = new QGroupBox(tr("Choose Game")); QVBoxLayout* choose_layout = new QVBoxLayout(); choose_layout->addWidget(m_game_combo); choose_game->setLayout(choose_layout); QGroupBox* trophy_info = new QGroupBox(tr("Trophy Info")); QVBoxLayout* info_layout = new QVBoxLayout(); info_layout->addWidget(m_game_progress); trophy_info->setLayout(info_layout); QGroupBox* show_settings = new QGroupBox(tr("Trophy View Options")); QVBoxLayout* settings_layout = new QVBoxLayout(); settings_layout->addWidget(check_lock_trophy); settings_layout->addWidget(check_unlock_trophy); settings_layout->addWidget(check_hidden_trophy); settings_layout->addWidget(check_bronze_trophy); settings_layout->addWidget(check_silver_trophy); settings_layout->addWidget(check_gold_trophy); settings_layout->addWidget(check_platinum_trophy); show_settings->setLayout(settings_layout); QGroupBox* icon_settings = new QGroupBox(tr("Icon Options")); QVBoxLayout* slider_layout = new QVBoxLayout(); slider_layout->addWidget(trophy_slider_label); slider_layout->addWidget(m_icon_slider); slider_layout->addWidget(game_slider_label); slider_layout->addWidget(m_game_icon_slider); icon_settings->setLayout(slider_layout); QVBoxLayout* options_layout = new QVBoxLayout(); options_layout->addWidget(choose_game); options_layout->addWidget(trophy_info); options_layout->addWidget(show_settings); options_layout->addWidget(icon_settings); options_layout->addStretch(); QHBoxLayout* all_layout = new QHBoxLayout(this); all_layout->addLayout(options_layout); all_layout->addWidget(m_splitter); all_layout->setStretch(1, 1); setLayout(all_layout); // Make connects connect(m_icon_slider, &QSlider::valueChanged, this, [this, trophy_slider_label](int val) { m_icon_height = val; if (trophy_slider_label) { trophy_slider_label->setText(tr("Trophy Icon Size: %0x%1").arg(val).arg(val)); } ResizeTrophyIcons(); if (m_save_icon_height) { m_save_icon_height = false; m_gui_settings->SetValue(gui::tr_icon_height, val); } }); connect(m_icon_slider, &QSlider::sliderReleased, this, [this]() { m_gui_settings->SetValue(gui::tr_icon_height, m_icon_slider->value()); }); connect(m_icon_slider, &QSlider::actionTriggered, this, [this](int action) { if (action != QAbstractSlider::SliderNoAction && action != QAbstractSlider::SliderMove) { // we only want to save on mouseclicks or slider release (the other connect handles this) m_save_icon_height = true; // actionTriggered happens before the value was changed } }); connect(m_game_icon_slider, &QSlider::valueChanged, this, [this, game_slider_label](int val) { m_game_icon_size_index = val; m_game_icon_size = gui_settings::SizeFromSlider(val); if (game_slider_label) { game_slider_label->setText(tr("Game Icon Size: %0x%1").arg(m_game_icon_size.width()).arg(m_game_icon_size.height())); } ResizeGameIcons(); if (m_save_game_icon_size) { m_save_game_icon_size = false; m_gui_settings->SetValue(gui::tr_game_iconSize, val); } }); connect(m_game_icon_slider, &QSlider::sliderReleased, this, [this]() { m_gui_settings->SetValue(gui::tr_game_iconSize, m_game_icon_slider->value()); }); connect(m_game_icon_slider, &QSlider::actionTriggered, this, [this](int action) { if (action != QAbstractSlider::SliderNoAction && action != QAbstractSlider::SliderMove) { // we only want to save on mouseclicks or slider release (the other connect handles this) m_save_game_icon_size = true; // actionTriggered happens before the value was changed } }); connect(check_lock_trophy, &QCheckBox::clicked, this, [this](bool checked) { m_show_locked_trophies = checked; ApplyFilter(); m_gui_settings->SetValue(gui::tr_show_locked, checked); }); connect(check_unlock_trophy, &QCheckBox::clicked, this, [this](bool checked) { m_show_unlocked_trophies = checked; ApplyFilter(); m_gui_settings->SetValue(gui::tr_show_unlocked, checked); }); connect(check_hidden_trophy, &QCheckBox::clicked, this, [this](bool checked) { m_show_hidden_trophies = checked; ApplyFilter(); m_gui_settings->SetValue(gui::tr_show_hidden, checked); }); connect(check_bronze_trophy, &QCheckBox::clicked, this, [this](bool checked) { m_show_bronze_trophies = checked; ApplyFilter(); m_gui_settings->SetValue(gui::tr_show_bronze, checked); }); connect(check_silver_trophy, &QCheckBox::clicked, this, [this](bool checked) { m_show_silver_trophies = checked; ApplyFilter(); m_gui_settings->SetValue(gui::tr_show_silver, checked); }); connect(check_gold_trophy, &QCheckBox::clicked, this, [this](bool checked) { m_show_gold_trophies = checked; ApplyFilter(); m_gui_settings->SetValue(gui::tr_show_gold, checked); }); connect(check_platinum_trophy, &QCheckBox::clicked, this, [this](bool checked) { m_show_platinum_trophies = checked; ApplyFilter(); m_gui_settings->SetValue(gui::tr_show_platinum, checked); }); connect(m_trophy_table, &QTableWidget::customContextMenuRequested, this, &trophy_manager_dialog::ShowTrophyTableContextMenu); connect(m_game_combo, &QComboBox::currentTextChanged, this, [this] { PopulateTrophyTable(); ApplyFilter(); }); connect(m_game_table, &QTableWidget::customContextMenuRequested, this, &trophy_manager_dialog::ShowGameTableContextMenu); connect(m_game_table, &QTableWidget::itemSelectionChanged, this, [this] { if (m_game_table->selectedItems().isEmpty()) { return; } QTableWidgetItem* item = m_game_table->item(m_game_table->selectedItems().first()->row(), static_cast<int>(gui::trophy_game_list_columns::name)); if (!item) { return; } m_game_combo->setCurrentText(item->text()); }); connect(this, &trophy_manager_dialog::TrophyIconReady, this, [this](int index, const QPixmap& pixmap) { if (QTableWidgetItem* icon_item = m_trophy_table->item(index, static_cast<int>(gui::trophy_list_columns::icon))) { icon_item->setData(Qt::DecorationRole, pixmap); } }); connect(this, &trophy_manager_dialog::GameIconReady, this, [this](int index, const QPixmap& pixmap) { if (QTableWidgetItem* icon_item = m_game_table->item(index, static_cast<int>(gui::trophy_game_list_columns::icon))) { icon_item->setData(Qt::DecorationRole, pixmap); } }); m_trophy_table->create_header_actions(m_trophy_column_acts, [this](int col) { return m_gui_settings->GetTrophylistColVisibility(static_cast<gui::trophy_list_columns>(col)); }, [this](int col, bool visible) { m_gui_settings->SetTrophylistColVisibility(static_cast<gui::trophy_list_columns>(col), visible); }); m_game_table->create_header_actions(m_game_column_acts, [this](int col) { return m_gui_settings->GetTrophyGamelistColVisibility(static_cast<gui::trophy_game_list_columns>(col)); }, [this](int col, bool visible) { m_gui_settings->SetTrophyGamelistColVisibility(static_cast<gui::trophy_game_list_columns>(col), visible); }); RepaintUI(true); StartTrophyLoadThreads(); } trophy_manager_dialog::~trophy_manager_dialog() { WaitAndAbortGameRepaintThreads(); WaitAndAbortTrophyRepaintThreads(); } bool trophy_manager_dialog::LoadTrophyFolderToDB(const std::string& trop_name) { const std::string trophy_path = m_trophy_dir + trop_name; const std::string vfs_path = vfs::get(trophy_path + "/"); if (vfs_path.empty()) { gui_log.error("Failed to load trophy database for %s. Path empty!", trop_name); return false; } // Populate GameTrophiesData std::unique_ptr<GameTrophiesData> game_trophy_data = std::make_unique<GameTrophiesData>(); game_trophy_data->path = vfs_path; game_trophy_data->trop_usr.reset(new TROPUSRLoader()); const std::string tropusr_path = trophy_path + "/TROPUSR.DAT"; const std::string tropconf_path = trophy_path + "/TROPCONF.SFM"; const bool success = game_trophy_data->trop_usr->Load(tropusr_path, tropconf_path).success; fs::file config(vfs::get(tropconf_path)); if (!success || !config) { gui_log.error("Failed to load trophy database for %s", trop_name); return false; } const u32 trophy_count = game_trophy_data->trop_usr->GetTrophiesCount(); if (trophy_count == 0) { gui_log.error("Warning game %s in trophy folder %s usr file reports zero trophies. Cannot load in trophy manager.", game_trophy_data->game_name, game_trophy_data->path); return false; } for (u32 trophy_id = 0; trophy_id < trophy_count; ++trophy_id) { // A trophy icon has 3 digits from 000 to 999, for example TROP001.PNG game_trophy_data->trophy_image_paths[trophy_id] = QString::fromStdString(fmt::format("%sTROP%03d.PNG", game_trophy_data->path, trophy_id)); } // Get game name pugi::xml_parse_result res = game_trophy_data->trop_config.Read(config.to_string()); if (!res) { gui_log.error("Failed to read trophy xml: %s", tropconf_path); return false; } std::shared_ptr<rXmlNode> trophy_base = game_trophy_data->trop_config.GetRoot(); if (!trophy_base) { gui_log.error("Failed to read trophy xml (root is null): %s", tropconf_path); return false; } for (std::shared_ptr<rXmlNode> n = trophy_base->GetChildren(); n; n = n->GetNext()) { if (n->GetName() == "title-name") { game_trophy_data->game_name = n->GetNodeContent(); break; } } { std::scoped_lock lock(m_trophies_db_mtx); m_trophies_db.push_back(std::move(game_trophy_data)); } config.release(); return true; } void trophy_manager_dialog::RepaintUI(bool restore_layout) { if (m_gui_settings->GetValue(gui::m_enableUIColors).toBool()) { m_game_icon_color = m_gui_settings->GetValue(gui::tr_icon_color).value<QColor>(); } else { m_game_icon_color = gui::utils::get_label_color("trophy_manager_icon_background_color", Qt::transparent, Qt::transparent); } PopulateGameTable(); if (restore_layout && !restoreGeometry(m_gui_settings->GetValue(gui::tr_geometry).toByteArray())) { resize(QGuiApplication::primaryScreen()->availableSize() * 0.7); } if (restore_layout && !m_splitter->restoreState(m_gui_settings->GetValue(gui::tr_splitterState).toByteArray())) { const int width_left = m_splitter->width() * 0.4; const int width_right = m_splitter->width() - width_left; m_splitter->setSizes({ width_left, width_right }); } PopulateTrophyTable(); const QByteArray game_table_state = m_gui_settings->GetValue(gui::tr_games_state).toByteArray(); if (restore_layout && !m_game_table->horizontalHeader()->restoreState(game_table_state) && m_game_table->rowCount()) { // If no settings exist, resize to contents. (disabled) //m_game_table->verticalHeader()->resizeSections(QHeaderView::ResizeMode::ResizeToContents); //m_game_table->horizontalHeader()->resizeSections(QHeaderView::ResizeMode::ResizeToContents); } const QByteArray trophy_table_state = m_gui_settings->GetValue(gui::tr_trophy_state).toByteArray(); if (restore_layout && !m_trophy_table->horizontalHeader()->restoreState(trophy_table_state) && m_trophy_table->rowCount()) { // If no settings exist, resize to contents. (disabled) //m_trophy_table->verticalHeader()->resizeSections(QHeaderView::ResizeMode::ResizeToContents); //m_trophy_table->horizontalHeader()->resizeSections(QHeaderView::ResizeMode::ResizeToContents); } if (restore_layout) { // Make sure the actions and the headers are synced m_game_table->sync_header_actions(m_game_column_acts, [this](int col) { return m_gui_settings->GetTrophyGamelistColVisibility(static_cast<gui::trophy_game_list_columns>(col)); }); m_trophy_table->sync_header_actions(m_trophy_column_acts, [this](int col) { return m_gui_settings->GetTrophylistColVisibility(static_cast<gui::trophy_list_columns>(col)); }); } ApplyFilter(); // Show dialog and then paint gui in order to adjust headers correctly show(); ReadjustGameTable(); ReadjustTrophyTable(); } void trophy_manager_dialog::HandleRepaintUiRequest() { const QSize window_size = size(); const QByteArray splitter_state = m_splitter->saveState(); const QByteArray game_table_state = m_game_table->horizontalHeader()->saveState(); const QByteArray trophy_table_state = m_trophy_table->horizontalHeader()->saveState(); RepaintUI(false); m_splitter->restoreState(splitter_state); m_game_table->horizontalHeader()->restoreState(game_table_state); m_trophy_table->horizontalHeader()->restoreState(trophy_table_state); // Make sure the actions and the headers are synced m_game_table->sync_header_actions(m_game_column_acts, [this](int col) { return m_gui_settings->GetTrophyGamelistColVisibility(static_cast<gui::trophy_game_list_columns>(col)); }); m_trophy_table->sync_header_actions(m_trophy_column_acts, [this](int col) { return m_gui_settings->GetTrophylistColVisibility(static_cast<gui::trophy_list_columns>(col)); }); resize(window_size); } void trophy_manager_dialog::ResizeGameIcons() { if (m_game_combo->count() <= 0) return; WaitAndAbortGameRepaintThreads(); QPixmap placeholder(m_game_icon_size); placeholder.fill(Qt::transparent); qRegisterMetaType<QVector<int>>("QVector<int>"); for (int i = 0; i < m_game_table->rowCount(); ++i) { if (QTableWidgetItem* icon_item = m_game_table->item(i, static_cast<int>(gui::trophy_game_list_columns::icon))) { icon_item->setData(Qt::DecorationRole, placeholder); } } ReadjustGameTable(); for (int i = 0; i < m_game_table->rowCount(); ++i) { if (movie_item* item = static_cast<movie_item*>(m_game_table->item(i, static_cast<int>(gui::trophy_game_list_columns::icon)))) { const qreal dpr = devicePixelRatioF(); const int trophy_index = item->data(GameUserRole::GameIndex).toInt(); const std::string icon_path = m_trophies_db[trophy_index]->path + "ICON0.PNG"; item->set_icon_load_func([this, icon_path, trophy_index, cancel = item->icon_loading_aborted(), dpr](int index) { if (cancel && cancel->load()) { return; } QPixmap icon; if (movie_item* item = static_cast<movie_item*>(m_game_table->item(index, static_cast<int>(gui::trophy_game_list_columns::icon)))) { if (!item->data(GameUserRole::GamePixmapLoaded).toBool()) { // Load game icon const std::string icon_path = m_trophies_db[trophy_index]->path + "ICON0.PNG"; if (!icon.load(QString::fromStdString(icon_path))) { gui_log.warning("Could not load trophy game icon from path %s", icon_path); } item->setData(GameUserRole::GamePixmapLoaded, true); item->setData(GameUserRole::GamePixmap, icon); } else { icon = item->data(GameUserRole::GamePixmap).value<QPixmap>(); } } if (cancel && cancel->load()) { return; } QPixmap new_icon(icon.size() * dpr); new_icon.setDevicePixelRatio(dpr); new_icon.fill(m_game_icon_color); if (!icon.isNull()) { QPainter painter(&new_icon); painter.setRenderHint(QPainter::SmoothPixmapTransform); painter.drawPixmap(QPoint(0, 0), icon); painter.end(); } new_icon = new_icon.scaled(m_game_icon_size * dpr, Qt::KeepAspectRatio, Qt::TransformationMode::SmoothTransformation); if (!cancel || !cancel->load()) { Q_EMIT GameIconReady(index, new_icon); } }); } } } void trophy_manager_dialog::ResizeTrophyIcons() { if (m_game_combo->count() <= 0) return; WaitAndAbortTrophyRepaintThreads(); const int db_pos = m_game_combo->currentData().toInt(); const qreal dpr = devicePixelRatioF(); const int new_height = m_icon_height * dpr; QPixmap placeholder(m_icon_height, m_icon_height); placeholder.fill(Qt::transparent); for (int i = 0; i < m_trophy_table->rowCount(); ++i) { if (QTableWidgetItem* icon_item = m_trophy_table->item(i, static_cast<int>(gui::trophy_list_columns::icon))) { icon_item->setData(Qt::DecorationRole, placeholder); } } ReadjustTrophyTable(); for (int i = 0; i < m_trophy_table->rowCount(); ++i) { if (QTableWidgetItem* id_item = m_trophy_table->item(i, static_cast<int>(gui::trophy_list_columns::id))) { if (movie_item* item = static_cast<movie_item*>(m_trophy_table->item(i, static_cast<int>(gui::trophy_list_columns::icon)))) { item->set_icon_load_func([this, data = ::at32(m_trophies_db, db_pos).get(), trophy_id = id_item->text().toInt(), cancel = item->icon_loading_aborted(), dpr, new_height](int index) { if (cancel && cancel->load()) { return; } QPixmap icon; if (data) { bool found_icon{}; QString path; { std::scoped_lock lock(m_trophies_db_mtx); found_icon = data->trophy_images.contains(trophy_id); if (found_icon) { icon = data->trophy_images[trophy_id]; } else { path = data->trophy_image_paths[trophy_id]; } } if (!found_icon) { if (icon.load(path)) { std::scoped_lock lock(m_trophies_db_mtx); data->trophy_images[trophy_id] = icon; } else { gui_log.error("Failed to load trophy icon for trophy %d (icon='%s')", trophy_id, path); } } } if (cancel && cancel->load()) { return; } QPixmap new_icon(icon.size() * dpr); new_icon.setDevicePixelRatio(dpr); new_icon.fill(m_game_icon_color); if (!icon.isNull()) { QPainter painter(&new_icon); painter.setRenderHint(QPainter::SmoothPixmapTransform); painter.drawPixmap(QPoint(0, 0), icon); painter.end(); } new_icon = new_icon.scaledToHeight(new_height, Qt::SmoothTransformation); if (!cancel || !cancel->load()) { Q_EMIT TrophyIconReady(index, new_icon); } }); } } } } void trophy_manager_dialog::ApplyFilter() { if (!m_game_combo || m_game_combo->count() <= 0) return; const int db_pos = m_game_combo->currentData().toInt(); if (db_pos < 0 || static_cast<usz>(db_pos) >= m_trophies_db.size() || !m_trophies_db[db_pos]) return; const TROPUSRLoader* trop_usr = m_trophies_db[db_pos]->trop_usr.get(); if (!trop_usr) return; for (int i = 0; i < m_trophy_table->rowCount(); ++i) { QTableWidgetItem* item = m_trophy_table->item(i, static_cast<int>(gui::trophy_list_columns::id)); QTableWidgetItem* type_item = m_trophy_table->item(i, static_cast<int>(gui::trophy_list_columns::type)); QTableWidgetItem* icon_item = m_trophy_table->item(i, static_cast<int>(gui::trophy_list_columns::icon)); if (!item || !type_item || !icon_item) { continue; } const int trophy_id = item->text().toInt(); const int trophy_type = type_item->data(Qt::UserRole).toInt(); // I could use boolean logic and reduce this to something much shorter and also much more confusing... const bool hidden = icon_item->data(Qt::UserRole).toBool(); const bool trophy_unlocked = trop_usr->GetTrophyUnlockState(trophy_id); bool hide = false; // Special override to show *just* hidden trophies. if (!m_show_unlocked_trophies && !m_show_locked_trophies && m_show_hidden_trophies) { hide = !hidden; } else if ((trophy_unlocked && !m_show_unlocked_trophies) || (!trophy_unlocked && !m_show_locked_trophies) || (hidden && !trophy_unlocked && !m_show_hidden_trophies) || (trophy_type == SCE_NP_TROPHY_GRADE_BRONZE && !m_show_bronze_trophies) || (trophy_type == SCE_NP_TROPHY_GRADE_SILVER && !m_show_silver_trophies) || (trophy_type == SCE_NP_TROPHY_GRADE_GOLD && !m_show_gold_trophies) || (trophy_type == SCE_NP_TROPHY_GRADE_PLATINUM && !m_show_platinum_trophies)) { hide = true; } m_trophy_table->setRowHidden(i, hide); } ReadjustTrophyTable(); } void trophy_manager_dialog::ShowTrophyTableContextMenu(const QPoint& pos) { const int row = m_trophy_table->currentRow(); if (!m_trophy_table->item(row, static_cast<int>(gui::trophy_list_columns::icon))) { return; } QMenu* menu = new QMenu(); QAction* show_trophy_dir = new QAction(tr("&Open Trophy Directory"), menu); const int db_ind = m_game_combo->currentData().toInt(); connect(show_trophy_dir, &QAction::triggered, this, [this, db_ind]() { const QString path = QString::fromStdString(m_trophies_db[db_ind]->path); gui::utils::open_dir(path); }); menu->addAction(show_trophy_dir); const QTableWidgetItem* name_item = m_trophy_table->item(row, static_cast<int>(gui::trophy_list_columns::name)); const QTableWidgetItem* desc_item = m_trophy_table->item(row, static_cast<int>(gui::trophy_list_columns::description)); const QString name = name_item ? name_item->text() : ""; const QString desc = desc_item ? desc_item->text() : ""; if (!name.isEmpty() || !desc.isEmpty()) { QMenu* copy_menu = new QMenu(tr("&Copy Info"), menu); if (!name.isEmpty() && !desc.isEmpty()) { QAction* copy_both = new QAction(tr("&Copy Name + Description"), copy_menu); connect(copy_both, &QAction::triggered, this, [this, name, desc]() { QApplication::clipboard()->setText(name % QStringLiteral("\n\n") % desc); }); copy_menu->addAction(copy_both); } if (!name.isEmpty()) { QAction* copy_name = new QAction(tr("&Copy Name"), copy_menu); connect(copy_name, &QAction::triggered, this, [this, name]() { QApplication::clipboard()->setText(name); }); copy_menu->addAction(copy_name); } if (!desc.isEmpty()) { QAction* copy_desc = new QAction(tr("&Copy Description"), copy_menu); connect(copy_desc, &QAction::triggered, this, [this, desc]() { QApplication::clipboard()->setText(desc); }); copy_menu->addAction(copy_desc); } menu->addMenu(copy_menu); } const QTableWidgetItem* id_item = m_trophy_table->item(row, static_cast<int>(gui::trophy_list_columns::id)); const QTableWidgetItem* type_item = m_trophy_table->item(row, static_cast<int>(gui::trophy_list_columns::type)); if (id_item && type_item && !Emu.IsRunning()) { const int type = type_item->data(Qt::UserRole).toInt(); const int trophy_id = id_item->text().toInt(); const bool is_unlocked = m_trophies_db[db_ind]->trop_usr->GetTrophyUnlockState(trophy_id); QAction* lock_unlock_trophy = new QAction(is_unlocked ? tr("&Lock Trophy") : tr("&Unlock Trophy"), menu); connect(lock_unlock_trophy, &QAction::triggered, this, [this, db_ind, trophy_id, is_unlocked, row, type]() { if (type == SCE_NP_TROPHY_GRADE_PLATINUM && !is_unlocked) { QMessageBox::information(this, tr("Action not permitted."), tr("Platinum trophies can only be unlocked ingame."), QMessageBox::Ok); return; } auto& db = m_trophies_db[db_ind]; const std::string path = vfs::retrieve(db->path); const std::string tropusr_path = path + "/TROPUSR.DAT"; const std::string tropconf_path = path + "/TROPCONF.SFM"; // Reload trophy file just make sure it hasn't changed if (!db->trop_usr->Load(tropusr_path, tropconf_path).success) { gui_log.error("Failed to load trophy file"); return; } u64 tick = 0; if (is_unlocked) { if (!db->trop_usr->LockTrophy(trophy_id)) { gui_log.error("Failed to lock trophy %d", trophy_id); return; } } else { tick = DateTimeToTick(QDateTime::currentDateTime()); if (!db->trop_usr->UnlockTrophy(trophy_id, tick, tick)) { gui_log.error("Failed to unlock trophy %d", trophy_id); return; } } if (!db->trop_usr->Save(tropusr_path)) { gui_log.error("Failed to save '%s': error=%s", path, fs::g_tls_error); return; } if (QTableWidgetItem* lock_item = m_trophy_table->item(row, static_cast<int>(gui::trophy_list_columns::is_unlocked))) { lock_item->setText(db->trop_usr->GetTrophyUnlockState(trophy_id) ? tr("Earned") : tr("Not Earned")); } if (QTableWidgetItem* date_item = m_trophy_table->item(row, static_cast<int>(gui::trophy_list_columns::time_unlocked))) { date_item->setText(tick ? QLocale().toString(TickToDateTime(tick), gui::persistent::last_played_date_with_time_of_day_format) : tr("Unknown")); date_item->setData(Qt::UserRole, QVariant::fromValue<qulonglong>(tick)); } }); menu->addSeparator(); menu->addAction(lock_unlock_trophy); } menu->exec(m_trophy_table->viewport()->mapToGlobal(pos)); } void trophy_manager_dialog::ShowGameTableContextMenu(const QPoint& pos) { const int row = m_game_table->currentRow(); if (!m_game_table->item(row, static_cast<int>(gui::trophy_game_list_columns::icon))) { return; } QMenu* menu = new QMenu(); QAction* remove_trophy_dir = new QAction(tr("&Remove"), this); QAction* show_trophy_dir = new QAction(tr("&Open Trophy Directory"), menu); const int db_ind = m_game_combo->currentData().toInt(); const QTableWidgetItem* name_item = m_game_table->item(row, static_cast<int>(gui::trophy_game_list_columns::name)); const QString name = name_item ? name_item->text() : ""; connect(remove_trophy_dir, &QAction::triggered, this, [this, name, db_ind]() { if (QMessageBox::question(this, tr("Delete Confirmation"), tr("Are you sure you want to delete the trophies for:\n%1?").arg(name), QMessageBox::Yes, QMessageBox::No) == QMessageBox::Yes) { const std::string path = m_trophies_db[db_ind]->path; ensure(path != vfs::get(m_trophy_dir)); // Make sure we aren't deleting the root path by accident fs::remove_all(path + "/"); // Remove the game's trophy folder StartTrophyLoadThreads(); // Reload the trophy list } }); connect(show_trophy_dir, &QAction::triggered, this, [this, db_ind]() { const QString path = QString::fromStdString(m_trophies_db[db_ind]->path); gui::utils::open_dir(path); }); menu->addAction(remove_trophy_dir); menu->addAction(show_trophy_dir); if (!name.isEmpty()) { QAction* copy_name = new QAction(tr("&Copy Name"), menu); connect(copy_name, &QAction::triggered, this, [this, name]() { QApplication::clipboard()->setText(name); }); menu->addAction(copy_name); } menu->exec(m_game_table->viewport()->mapToGlobal(pos)); } void trophy_manager_dialog::StartTrophyLoadThreads() { WaitAndAbortGameRepaintThreads(); WaitAndAbortTrophyRepaintThreads(); m_trophies_db.clear(); const QString trophy_path = QString::fromStdString(vfs::get(m_trophy_dir)); if (trophy_path.isEmpty()) { gui_log.error("Cannot load trophy dir. Path empty!"); RepaintUI(true); return; } const QDir trophy_dir(trophy_path); const QStringList folder_list = trophy_dir.entryList(QDir::Dirs | QDir::NoDotAndDotDot); const int count = folder_list.count(); if (count <= 0) { RepaintUI(true); return; } qRegisterMetaType<QVector<int>>("QVector<int>"); QList<int> indices; for (int i = 0; i < count; ++i) indices.append(i); QFutureWatcher<void> futureWatcher; progress_dialog progressDialog(tr("Loading trophies"), tr("Loading trophy data, please wait..."), tr("Cancel"), 0, 1, false, this, Qt::Dialog | Qt::WindowTitleHint | Qt::CustomizeWindowHint); connect(&futureWatcher, &QFutureWatcher<void>::progressRangeChanged, &progressDialog, &QProgressDialog::setRange); connect(&futureWatcher, &QFutureWatcher<void>::progressValueChanged, &progressDialog, &QProgressDialog::setValue); connect(&futureWatcher, &QFutureWatcher<void>::finished, this, [this]() { RepaintUI(true); }); connect(&progressDialog, &QProgressDialog::canceled, this, [this, &futureWatcher]() { futureWatcher.cancel(); close(); // It's pointless to show an empty window }); atomic_t<usz> error_count{}; futureWatcher.setFuture(QtConcurrent::map(indices, [this, &error_count, &folder_list](const int& i) { const std::string dir_name = folder_list.value(i).toStdString(); gui_log.trace("Loading trophy dir: %s", dir_name); if (!LoadTrophyFolderToDB(dir_name)) { // TODO: add a way of showing the number of corrupted/invalid folders in UI somewhere. gui_log.error("Error occurred while parsing folder %s for trophies.", dir_name); error_count++; } })); progressDialog.exec(); futureWatcher.waitForFinished(); if (error_count != 0) { gui_log.error("Failed to load %d of %d trophy folders!", error_count.load(), count); } } void trophy_manager_dialog::PopulateGameTable() { WaitAndAbortGameRepaintThreads(); m_game_table->setSortingEnabled(false); // Disable sorting before using setItem calls m_game_table->clearContents(); m_game_table->setRowCount(static_cast<int>(m_trophies_db.size())); m_game_combo->clear(); m_game_combo->blockSignals(true); qRegisterMetaType<QVector<int>>("QVector<int>"); QList<int> indices; for (usz i = 0; i < m_trophies_db.size(); ++i) indices.append(static_cast<int>(i)); QPixmap placeholder(m_game_icon_size); placeholder.fill(Qt::transparent); for (int i = 0; i < indices.count(); ++i) { const int all_trophies = m_trophies_db[i]->trop_usr->GetTrophiesCount(); const int unlocked_trophies = m_trophies_db[i]->trop_usr->GetUnlockedTrophiesCount(); const int percentage = 100 * unlocked_trophies / all_trophies; const QString progress = tr("%0% (%1/%2)").arg(percentage).arg(unlocked_trophies).arg(all_trophies); const QString name = QString::fromStdString(m_trophies_db[i]->game_name).simplified(); custom_table_widget_item* icon_item = new custom_table_widget_item; icon_item->setData(Qt::DecorationRole, placeholder); icon_item->setData(GameUserRole::GameIndex, i); icon_item->setData(GameUserRole::GamePixmapLoaded, false); icon_item->setData(GameUserRole::GamePixmap, QPixmap()); m_game_table->setItem(i, static_cast<int>(gui::trophy_game_list_columns::icon), icon_item); m_game_table->setItem(i, static_cast<int>(gui::trophy_game_list_columns::name), new custom_table_widget_item(name)); m_game_table->setItem(i, static_cast<int>(gui::trophy_game_list_columns::progress), new custom_table_widget_item(progress, Qt::UserRole, percentage)); m_game_table->setItem(i, static_cast<int>(gui::trophy_game_list_columns::trophies), new custom_table_widget_item(QString::number(all_trophies), Qt::UserRole, all_trophies)); m_game_combo->addItem(name, i); } m_game_combo->model()->sort(0, Qt::AscendingOrder); m_game_combo->blockSignals(false); m_game_combo->setCurrentIndex(0); m_game_table->setSortingEnabled(true); // Enable sorting only after using setItem calls ResizeGameIcons(); gui::utils::resize_combo_box_view(m_game_combo); } void trophy_manager_dialog::PopulateTrophyTable() { if (m_game_combo->count() <= 0) return; auto& data = m_trophies_db[m_game_combo->currentData().toInt()]; gui_log.trace("Populating Trophy Manager UI with %s %s", data->game_name, data->path); const int all_trophies = data->trop_usr->GetTrophiesCount(); const int unlocked_trophies = data->trop_usr->GetUnlockedTrophiesCount(); const int percentage = 100 * unlocked_trophies / all_trophies; m_game_progress->setText(tr("Progress: %1% (%2/%3)").arg(percentage).arg(unlocked_trophies).arg(all_trophies)); m_trophy_table->clear_list(); m_trophy_table->setRowCount(all_trophies); m_trophy_table->setSortingEnabled(false); // Disable sorting before using setItem calls QPixmap placeholder(m_icon_height, m_icon_height); placeholder.fill(Qt::transparent); const QLocale locale{}; std::shared_ptr<rXmlNode> trophy_base = data->trop_config.GetRoot(); if (!trophy_base) { gui_log.error("Populating Trophy Manager UI failed (root is null): %s %s", data->game_name, data->path); } int i = 0; for (std::shared_ptr<rXmlNode> n = trophy_base ? trophy_base->GetChildren() : nullptr; n; n = n->GetNext()) { // Only show trophies. if (n->GetName() != "trophy") { continue; } // Get data (stolen graciously from sceNpTrophy.cpp) SceNpTrophyDetails details{}; // Get trophy id const s32 trophy_id = atoi(n->GetAttribute("id").c_str()); details.trophyId = trophy_id; // Get platinum link id (we assume there only exists one platinum trophy per game for now) const s32 platinum_link_id = atoi(n->GetAttribute("pid").c_str()); const QString platinum_relevant = platinum_link_id < 0 ? tr("No") : tr("Yes"); // Get trophy type QString trophy_type; switch (n->GetAttribute("ttype")[0]) { case 'B': details.trophyGrade = SCE_NP_TROPHY_GRADE_BRONZE; trophy_type = tr("Bronze", "Trophy type"); break; case 'S': details.trophyGrade = SCE_NP_TROPHY_GRADE_SILVER; trophy_type = tr("Silver", "Trophy type"); break; case 'G': details.trophyGrade = SCE_NP_TROPHY_GRADE_GOLD; trophy_type = tr("Gold", "Trophy type"); break; case 'P': details.trophyGrade = SCE_NP_TROPHY_GRADE_PLATINUM; trophy_type = tr("Platinum", "Trophy type"); break; default: gui_log.warning("Unknown trophy grade %s", n->GetAttribute("ttype")); break; } // Get hidden state const bool hidden = n->GetAttribute("hidden")[0] == 'y'; details.hidden = hidden; // Get name and detail for (std::shared_ptr<rXmlNode> n2 = n->GetChildren(); n2; n2 = n2->GetNext()) { if (n2->GetName() == "name") { strcpy_trunc(details.name, n2->GetNodeContent()); } if (n2->GetName() == "detail") { strcpy_trunc(details.description, n2->GetNodeContent()); } } // Get timestamp const u64 tick = data->trop_usr->GetTrophyTimestamp(trophy_id); const QString datetime = tick ? locale.toString(TickToDateTime(tick), gui::persistent::last_played_date_with_time_of_day_format) : tr("Unknown"); const QString unlockstate = data->trop_usr->GetTrophyUnlockState(trophy_id) ? tr("Earned") : tr("Not Earned"); custom_table_widget_item* icon_item = new custom_table_widget_item(); icon_item->setData(Qt::UserRole, hidden, true); icon_item->setData(Qt::DecorationRole, placeholder); custom_table_widget_item* type_item = new custom_table_widget_item(trophy_type); type_item->setData(Qt::UserRole, static_cast<uint>(details.trophyGrade), true); m_trophy_table->setItem(i, static_cast<int>(gui::trophy_list_columns::icon), icon_item); m_trophy_table->setItem(i, static_cast<int>(gui::trophy_list_columns::name), new custom_table_widget_item(QString::fromStdString(details.name))); m_trophy_table->setItem(i, static_cast<int>(gui::trophy_list_columns::description), new custom_table_widget_item(QString::fromStdString(details.description))); m_trophy_table->setItem(i, static_cast<int>(gui::trophy_list_columns::type), type_item); m_trophy_table->setItem(i, static_cast<int>(gui::trophy_list_columns::is_unlocked), new custom_table_widget_item(unlockstate)); m_trophy_table->setItem(i, static_cast<int>(gui::trophy_list_columns::id), new custom_table_widget_item(QString::number(trophy_id), Qt::UserRole, trophy_id)); m_trophy_table->setItem(i, static_cast<int>(gui::trophy_list_columns::platinum_link), new custom_table_widget_item(platinum_relevant, Qt::UserRole, platinum_link_id)); m_trophy_table->setItem(i, static_cast<int>(gui::trophy_list_columns::time_unlocked), new custom_table_widget_item(datetime, Qt::UserRole, QVariant::fromValue<qulonglong>(tick))); ++i; } m_trophy_table->setSortingEnabled(true); // Re-enable sorting after using setItem calls ResizeTrophyIcons(); } void trophy_manager_dialog::ReadjustGameTable() const { // Fixate vertical header and row height m_game_table->verticalHeader()->setMinimumSectionSize(m_game_icon_size.height()); m_game_table->verticalHeader()->setMaximumSectionSize(m_game_icon_size.height()); m_game_table->resizeRowsToContents(); // Resize and fixate icon column m_game_table->resizeColumnToContents(static_cast<int>(gui::trophy_game_list_columns::icon)); m_game_table->horizontalHeader()->setSectionResizeMode(static_cast<int>(gui::trophy_game_list_columns::icon), QHeaderView::Fixed); // Shorten the last section to remove horizontal scrollbar if possible m_game_table->resizeColumnToContents(static_cast<int>(gui::trophy_game_list_columns::count) - 1); } void trophy_manager_dialog::ReadjustTrophyTable() const { // Fixate vertical header and row height m_trophy_table->verticalHeader()->setMinimumSectionSize(m_icon_height); m_trophy_table->verticalHeader()->setMaximumSectionSize(m_icon_height); m_trophy_table->resizeRowsToContents(); // Resize and fixate icon column m_trophy_table->resizeColumnToContents(static_cast<int>(gui::trophy_list_columns::icon)); // Shorten the last section to remove horizontal scrollbar if possible m_trophy_table->resizeColumnToContents(static_cast<int>(gui::trophy_list_columns::count) - 1); } bool trophy_manager_dialog::eventFilter(QObject *object, QEvent *event) { const bool is_trophy_scroll = object == m_trophy_table->verticalScrollBar(); const bool is_trophy_table = object == m_trophy_table; const bool is_game_scroll = object == m_game_table->verticalScrollBar(); const bool is_game_table = object == m_game_table; int zoom_val = 0; switch (event->type()) { case QEvent::Wheel: { QWheelEvent *wheelEvent = static_cast<QWheelEvent *>(event); if (wheelEvent->modifiers() & Qt::ControlModifier && (is_trophy_scroll || is_game_scroll)) { const QPoint numSteps = wheelEvent->angleDelta() / 8 / 15; // http://doc.qt.io/qt-5/qwheelevent.html#pixelDelta zoom_val = numSteps.y(); } break; } case QEvent::KeyPress: { QKeyEvent *keyEvent = static_cast<QKeyEvent *>(event); if (keyEvent && keyEvent->modifiers() == Qt::ControlModifier && (is_trophy_table || is_game_table)) { if (keyEvent->key() == Qt::Key_Plus) { zoom_val = 1; } else if (keyEvent->key() == Qt::Key_Minus) { zoom_val = -1; } } break; } default: break; } if (zoom_val != 0) { if (m_icon_slider && (is_trophy_table || is_trophy_scroll)) { m_save_icon_height = true; m_icon_slider->setSliderPosition(zoom_val + m_icon_slider->value()); } else if (m_game_icon_slider && (is_game_table || is_game_scroll)) { m_save_game_icon_size = true; m_game_icon_slider->setSliderPosition(zoom_val + m_game_icon_slider->value()); } return true; } return QWidget::eventFilter(object, event); } void trophy_manager_dialog::closeEvent(QCloseEvent *event) { // Save gui settings m_gui_settings->SetValue(gui::tr_geometry, saveGeometry(), false); m_gui_settings->SetValue(gui::tr_splitterState, m_splitter->saveState(), false); m_gui_settings->SetValue(gui::tr_games_state, m_game_table->horizontalHeader()->saveState(), false); m_gui_settings->SetValue(gui::tr_trophy_state, m_trophy_table->horizontalHeader()->saveState(), true); QWidget::closeEvent(event); } void trophy_manager_dialog::WaitAndAbortGameRepaintThreads() { for (int i = 0; i < m_game_table->rowCount(); i++) { if (movie_item* item = static_cast<movie_item*>(m_game_table->item(i, static_cast<int>(gui::trophy_game_list_columns::icon)))) { item->wait_for_icon_loading(true); } } } void trophy_manager_dialog::WaitAndAbortTrophyRepaintThreads() { for (int i = 0; i < m_trophy_table->rowCount(); i++) { if (movie_item* item = static_cast<movie_item*>(m_trophy_table->item(i, static_cast<int>(gui::trophy_list_columns::icon)))) { item->wait_for_icon_loading(true); } } } QDateTime trophy_manager_dialog::TickToDateTime(u64 tick) { const CellRtcDateTime rtc_date = tick_to_date_time(tick); const QDateTime datetime( QDate(rtc_date.year, rtc_date.month, rtc_date.day), QTime(rtc_date.hour, rtc_date.minute, rtc_date.second, rtc_date.microsecond / 1000), QTimeZone::UTC); return datetime.toLocalTime(); } u64 trophy_manager_dialog::DateTimeToTick(QDateTime date_time) { const QDateTime utc = date_time.toUTC(); const QDate date = utc.date(); const QTime time = utc.time(); const CellRtcDateTime rtc_date = { .year = static_cast<u16>(date.year()), .month = static_cast<u16>(date.month()), .day = static_cast<u16>(date.day()), .hour = static_cast<u16>(time.hour()), .minute = static_cast<u16>(time.minute()), .second = static_cast<u16>(time.second()), .microsecond = static_cast<u32>(time.msec() * 1000), }; return date_time_to_tick(rtc_date); }
48,734
C++
.cpp
1,133
39.980583
192
0.717295
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,136
infinity_dialog.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/infinity_dialog.cpp
#include "stdafx.h" #include "Utilities/File.h" #include "Crypto/md5.h" #include "Crypto/aes.h" #include "Crypto/sha1.h" #include "infinity_dialog.h" #include "Emu/Io/Infinity.h" #include "util/asm.hpp" #include <locale> #include <QLabel> #include <QGroupBox> #include <QFileDialog> #include <QVBoxLayout> #include <QMessageBox> #include <QComboBox> #include <QPushButton> #include <QStringList> #include <QCompleter> infinity_dialog* infinity_dialog::inst = nullptr; std::array<std::optional<u32>, 9> infinity_dialog::figure_slots = {}; static QString s_last_figure_path; LOG_CHANNEL(infinity_log, "infinity"); static constexpr std::array<u8, 16> BLANK_BLOCK = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; static constexpr std::array<u8, 32> SHA1_CONSTANT = { 0xAF, 0x62, 0xD2, 0xEC, 0x04, 0x91, 0x96, 0x8C, 0xC5, 0x2A, 0x1A, 0x71, 0x65, 0xF8, 0x65, 0xFE, 0x28, 0x63, 0x29, 0x20, 0x44, 0x69, 0x73, 0x6e, 0x65, 0x79, 0x20, 0x32, 0x30, 0x31, 0x33}; const std::map<const u32, const std::pair<const u8, const std::string>> list_figures = { {0x0F4241, {1, "Mr. Incredible"}}, {0x0F4242, {1, "Sulley"}}, {0x0F4243, {1, "Jack Sparrow"}}, {0x0F4244, {1, "Lone Ranger"}}, {0x0F4245, {1, "Tonto"}}, {0x0F4246, {1, "Lightning McQueen"}}, {0x0F4247, {1, "Holley Shiftwell"}}, {0x0F4248, {1, "Buzz Lightyear"}}, {0x0F4249, {1, "Jessie"}}, {0x0F424A, {1, "Mike"}}, {0x0F424B, {1, "Mrs. Incredible"}}, {0x0F424C, {1, "Hector Barbossa"}}, {0x0F424D, {1, "Davy Jones"}}, {0x0F424E, {1, "Randy"}}, {0x0F424F, {1, "Syndrome"}}, {0x0F4250, {1, "Woody"}}, {0x0F4251, {1, "Mater"}}, {0x0F4252, {1, "Dash"}}, {0x0F4253, {1, "Violet"}}, {0x0F4254, {1, "Francesco Bernoulli"}}, {0x0F4255, {1, "Sorcerer's Apprentice Mickey"}}, {0x0F4256, {1, "Jack Skellington"}}, {0x0F4257, {1, "Rapunzel"}}, {0x0F4258, {1, "Anna"}}, {0x0F4259, {1, "Elsa"}}, {0x0F425A, {1, "Phineas"}}, {0x0F425B, {1, "Agent P"}}, {0x0F425C, {1, "Wreck-It Ralph"}}, {0x0F425D, {1, "Vanellope"}}, {0x0F425E, {1, "Mr. Incredible (Crystal)"}}, {0x0F425F, {1, "Jack Sparrow (Crystal)"}}, {0x0F4260, {1, "Sulley (Crystal)"}}, {0x0F4261, {1, "Lightning McQueen (Crystal)"}}, {0x0F4262, {1, "Lone Ranger (Crystal)"}}, {0x0F4263, {1, "Buzz Lightyear (Crystal)"}}, {0x0F4264, {1, "Agent P (Crystal)"}}, {0x0F4265, {1, "Sorcerer's Apprentice Mickey (Crystal)"}}, {0x0F4266, {1, "Buzz Lightyear (Glowing)"}}, {0x0F42A4, {2, "Captain America"}}, {0x0F42A5, {2, "Hulk"}}, {0x0F42A6, {2, "Iron Man"}}, {0x0F42A7, {2, "Thor"}}, {0x0F42A8, {2, "Groot"}}, {0x0F42A9, {2, "Rocket Raccoon"}}, {0x0F42AA, {2, "Star-Lord"}}, {0x0F42AB, {2, "Spider-Man"}}, {0x0F42AC, {2, "Nick Fury"}}, {0x0F42AD, {2, "Black Widow"}}, {0x0F42AE, {2, "Hawkeye"}}, {0x0F42AF, {2, "Drax"}}, {0x0F42B0, {2, "Gamora"}}, {0x0F42B1, {2, "Iron Fist"}}, {0x0F42B2, {2, "Nova"}}, {0x0F42B3, {2, "Venom"}}, {0x0F42B4, {2, "Donald Duck"}}, {0x0F42B5, {2, "Aladdin"}}, {0x0F42B6, {2, "Stitch"}}, {0x0F42B7, {2, "Merida"}}, {0x0F42B8, {2, "Tinker Bell"}}, {0x0F42B9, {2, "Maleficent"}}, {0x0F42BA, {2, "Hiro"}}, {0x0F42BB, {2, "Baymax"}}, {0x0F42BC, {2, "Loki"}}, {0x0F42BD, {2, "Ronan"}}, {0x0F42BE, {2, "Green Goblin"}}, {0x0F42BF, {2, "Falcon"}}, {0x0F42C0, {2, "Yondu"}}, {0x0F42C1, {2, "Jasmine"}}, {0x0F42C6, {2, "Black Suit Spider-Man"}}, {0x0F42D6, {3, "Sam Flynn"}}, {0x0F42D7, {3, "Quorra"}}, {0x0F4308, {3, "Anakin Skywalker"}}, {0x0F4309, {3, "Obi-Wan Kenobi"}}, {0x0F430A, {3, "Yoda"}}, {0x0F430B, {3, "Ahsoka Tano"}}, {0x0F430C, {3, "Darth Maul"}}, {0x0F430E, {3, "Luke Skywalker"}}, {0x0F430F, {3, "Han Solo"}}, {0x0F4310, {3, "Princess Leia"}}, {0x0F4311, {3, "Chewbacca"}}, {0x0F4312, {3, "Darth Vader"}}, {0x0F4313, {3, "Boba Fett"}}, {0x0F4314, {3, "Ezra Bridger"}}, {0x0F4315, {3, "Kanan Jarrus"}}, {0x0F4316, {3, "Sabine Wren"}}, {0x0F4317, {3, "Zeb Orrelios"}}, {0x0F4318, {3, "Joy"}}, {0x0F4319, {3, "Anger"}}, {0x0F431A, {3, "Fear"}}, {0x0F431B, {3, "Sadness"}}, {0x0F431C, {3, "Disgust"}}, {0x0F431D, {3, "Mickey Mouse"}}, {0x0F431E, {3, "Minnie Mouse"}}, {0x0F431F, {3, "Mulan"}}, {0x0F4320, {3, "Olaf"}}, {0x0F4321, {3, "Vision"}}, {0x0F4322, {3, "Ultron"}}, {0x0F4323, {3, "Ant-Man"}}, {0x0F4325, {3, "Captain America - The First Avenger"}}, {0x0F4326, {3, "Finn"}}, {0x0F4327, {3, "Kylo Ren"}}, {0x0F4328, {3, "Poe Dameron"}}, {0x0F4329, {3, "Rey"}}, {0x0F432B, {3, "Spot"}}, {0x0F432C, {3, "Nick Wilde"}}, {0x0F432D, {3, "Judy Hopps"}}, {0x0F432E, {3, "Hulkbuster"}}, {0x0F432F, {3, "Anakin Skywalker (Light FX)"}}, {0x0F4330, {3, "Obi-Wan Kenobi (Light FX)"}}, {0x0F4331, {3, "Yoda (Light FX)"}}, {0x0F4332, {3, "Luke Skywalker (Light FX)"}}, {0x0F4333, {3, "Darth Vader (Light FX)"}}, {0x0F4334, {3, "Kanan Jarrus (Light FX)"}}, {0x0F4335, {3, "Kylo Ren (Light FX)"}}, {0x0F4336, {3, "Black Panther"}}, {0x0F436C, {3, "Nemo"}}, {0x0F436D, {3, "Dory"}}, {0x0F436E, {3, "Baloo"}}, {0x0F436F, {3, "Alice"}}, {0x0F4370, {3, "Mad Hatter"}}, {0x0F4371, {3, "Time"}}, {0x0F4372, {3, "Peter Pan"}}, {0x1E8481, {1, "Starter Play Set"}}, {0x1E8482, {1, "Lone Ranger Play Set"}}, {0x1E8483, {1, "Cars Play Set"}}, {0x1E8484, {1, "Toy Story in Space Play Set"}}, {0x1E84E4, {2, "Marvel's The Avengers Play Set"}}, {0x1E84E5, {2, "Marvel's Spider-Man Play Set"}}, {0x1E84E6, {2, "Marvel's Guardians of the Galaxy Play Set"}}, {0x1E84E7, {2, "Assault on Asgard"}}, {0x1E84E8, {2, "Escape from the Kyln"}}, {0x1E84E9, {2, "Stitch's Tropical Rescue"}}, {0x1E84EA, {2, "Brave Forest Siege"}}, {0x1E8548, {3, "Inside Out Play Set"}}, {0x1E8549, {3, "Star Wars: Twilight of the Republic Play Set"}}, {0x1E854A, {3, "Star Wars: Rise Against the Empire Play Set"}}, {0x1E854B, {3, "Star Wars: The Force Awakens Play Set"}}, {0x1E854C, {3, "Marvel Battlegrounds Play Set"}}, {0x1E854D, {3, "Toy Box Speedway"}}, {0x1E854E, {3, "Toy Box Takeover"}}, {0x1E85AC, {3, "Finding Dory Play Set"}}, {0x2DC6C3, {1, "Bolt's Super Strength"}}, {0x2DC6C4, {1, "Ralph's Power of Destruction"}}, {0x2DC6C5, {1, "Chernabog's Power"}}, {0x2DC6C6, {1, "C.H.R.O.M.E. Damage Increaser"}}, {0x2DC6C7, {1, "Dr. Doofenshmirtz's Damage-Inator!"}}, {0x2DC6C8, {1, "Electro-Charge"}}, {0x2DC6C9, {1, "Fix-It Felix's Repair Power"}}, {0x2DC6CA, {1, "Rapunzel's Healing"}}, {0x2DC6CB, {1, "C.H.R.O.M.E. Armor Shield"}}, {0x2DC6CC, {1, "Star Command Shield"}}, {0x2DC6CD, {1, "Violet's Force Field"}}, {0x2DC6CE, {1, "Pieces of Eight"}}, {0x2DC6CF, {1, "Scrooge McDuck's Lucky Dime"}}, {0x2DC6D0, {1, "User Control"}}, {0x2DC6D1, {1, "Sorcerer Mickey's Hat"}}, {0x2DC6FE, {1, "Emperor Zurg's Wrath"}}, {0x2DC6FF, {1, "Merlin's Summon"}}, {0x2DC765, {2, "Enchanted Rose"}}, {0x2DC766, {2, "Mulan's Training Uniform"}}, {0x2DC767, {2, "Flubber"}}, {0x2DC768, {2, "S.H.I.E.L.D. Helicarrier Strike"}}, {0x2DC769, {2, "Zeus' Thunderbolts"}}, {0x2DC76A, {2, "King Louie's Monkeys"}}, {0x2DC76B, {2, "Infinity Gauntlet"}}, {0x2DC76D, {2, "Sorcerer Supreme"}}, {0x2DC76E, {2, "Maleficent's Spell Cast"}}, {0x2DC76F, {2, "Chernabog's Spirit Cyclone"}}, {0x2DC770, {2, "Marvel Team-Up: Capt. Marvel"}}, {0x2DC771, {2, "Marvel Team-Up: Iron Patriot"}}, {0x2DC772, {2, "Marvel Team-Up: Ant-Man"}}, {0x2DC773, {2, "Marvel Team-Up: White Tiger"}}, {0x2DC774, {2, "Marvel Team-Up: Yondu"}}, {0x2DC775, {2, "Marvel Team-Up: Winter Soldier"}}, {0x2DC776, {2, "Stark Arc Reactor"}}, {0x2DC777, {2, "Gamma Rays"}}, {0x2DC778, {2, "Alien Symbiote"}}, {0x2DC779, {2, "All for One"}}, {0x2DC77A, {2, "Sandy Claws Surprise"}}, {0x2DC77B, {2, "Glory Days"}}, {0x2DC77C, {2, "Cursed Pirate Gold"}}, {0x2DC77D, {2, "Sentinel of Liberty"}}, {0x2DC77E, {2, "The Immortal Iron Fist"}}, {0x2DC77F, {2, "Space Armor"}}, {0x2DC780, {2, "Rags to Riches"}}, {0x2DC781, {2, "Ultimate Falcon"}}, {0x2DC788, {3, "Tomorrowland Time Bomb"}}, {0x2DC78E, {3, "Galactic Team-Up: Mace Windu"}}, {0x2DC791, {3, "Luke's Rebel Alliance Flight Suit Costume"}}, {0x2DC798, {3, "Finn's Stormtrooper Costume"}}, {0x2DC799, {3, "Poe's Resistance Jacket"}}, {0x2DC79A, {3, "Resistance Tactical Strike"}}, {0x2DC79E, {3, "Officer Nick Wilde"}}, {0x2DC79F, {3, "Meter Maid Judy"}}, {0x2DC7A2, {3, "Darkhawk's Blast"}}, {0x2DC7A3, {3, "Cosmic Cube Blast"}}, {0x2DC7A4, {3, "Princess Leia's Boushh Disguise"}}, {0x2DC7A6, {3, "Nova Corps Strike"}}, {0x2DC7A7, {3, "King Mickey"}}, {0x3D0912, {1, "Mickey's Car"}}, {0x3D0913, {1, "Cinderella's Coach"}}, {0x3D0914, {1, "Electric Mayhem Bus"}}, {0x3D0915, {1, "Cruella De Vil's Car"}}, {0x3D0916, {1, "Pizza Planet Delivery Truck"}}, {0x3D0917, {1, "Mike's New Car"}}, {0x3D0919, {1, "Parking Lot Tram"}}, {0x3D091A, {1, "Captain Hook's Ship"}}, {0x3D091B, {1, "Dumbo"}}, {0x3D091C, {1, "Calico Helicopter"}}, {0x3D091D, {1, "Maximus"}}, {0x3D091E, {1, "Angus"}}, {0x3D091F, {1, "Abu the Elephant"}}, {0x3D0920, {1, "Headless Horseman's Horse"}}, {0x3D0921, {1, "Phillipe"}}, {0x3D0922, {1, "Khan"}}, {0x3D0923, {1, "Tantor"}}, {0x3D0924, {1, "Dragon Firework Cannon"}}, {0x3D0925, {1, "Stitch's Blaster"}}, {0x3D0926, {1, "Toy Story Mania Blaster"}}, {0x3D0927, {1, "Flamingo Croquet Mallet"}}, {0x3D0928, {1, "Carl Fredricksen's Cane"}}, {0x3D0929, {1, "Hangin' Ten Stitch With Surfboard"}}, {0x3D092A, {1, "Condorman Glider"}}, {0x3D092B, {1, "WALL-E's Fire Extinguisher"}}, {0x3D092C, {1, "On the Grid"}}, {0x3D092D, {1, "WALL-E's Collection"}}, {0x3D092E, {1, "King Candy's Dessert Toppings"}}, {0x3D0930, {1, "Victor's Experiments"}}, {0x3D0931, {1, "Jack's Scary Decorations"}}, {0x3D0933, {1, "Frozen Flourish"}}, {0x3D0934, {1, "Rapunzel's Kingdom"}}, {0x3D0935, {1, "TRON Interface"}}, {0x3D0936, {1, "Buy N Large Atmosphere"}}, {0x3D0937, {1, "Sugar Rush Sky"}}, {0x3D0939, {1, "New Holland Skyline"}}, {0x3D093A, {1, "Halloween Town Sky"}}, {0x3D093C, {1, "Chill in the Air"}}, {0x3D093D, {1, "Rapunzel's Birthday Sky"}}, {0x3D0940, {1, "Astro Blasters Space Cruiser"}}, {0x3D0941, {1, "Marlin's Reef"}}, {0x3D0942, {1, "Nemo's Seascape"}}, {0x3D0943, {1, "Alice's Wonderland"}}, {0x3D0944, {1, "Tulgey Wood"}}, {0x3D0945, {1, "Tri-State Area Terrain"}}, {0x3D0946, {1, "Danville Sky"}}, {0x3D0965, {2, "Stark Tech"}}, {0x3D0966, {2, "Spider-Streets"}}, {0x3D0967, {2, "World War Hulk"}}, {0x3D0968, {2, "Gravity Falls Forest"}}, {0x3D0969, {2, "Neverland"}}, {0x3D096A, {2, "Simba's Pridelands"}}, {0x3D096C, {2, "Calhoun's Command"}}, {0x3D096D, {2, "Star-Lord's Galaxy"}}, {0x3D096E, {2, "Dinosaur World"}}, {0x3D096F, {2, "Groot's Roots"}}, {0x3D0970, {2, "Mulan's Countryside"}}, {0x3D0971, {2, "The Sands of Agrabah"}}, {0x3D0974, {2, "A Small World"}}, {0x3D0975, {2, "View from the Suit"}}, {0x3D0976, {2, "Spider-Sky"}}, {0x3D0977, {2, "World War Hulk Sky"}}, {0x3D0978, {2, "Gravity Falls Sky"}}, {0x3D0979, {2, "Second Star to the Right"}}, {0x3D097A, {2, "The King's Domain"}}, {0x3D097C, {2, "CyBug Swarm"}}, {0x3D097D, {2, "The Rip"}}, {0x3D097E, {2, "Forgotten Skies"}}, {0x3D097F, {2, "Groot's View"}}, {0x3D0980, {2, "The Middle Kingdom"}}, {0x3D0984, {2, "Skies of the World"}}, {0x3D0985, {2, "S.H.I.E.L.D. Containment Truck"}}, {0x3D0986, {2, "Main Street Electrical Parade Float"}}, {0x3D0987, {2, "Mr. Toad's Motorcar"}}, {0x3D0988, {2, "Le Maximum"}}, {0x3D0989, {2, "Alice in Wonderland's Caterpillar"}}, {0x3D098A, {2, "Eglantine's Motorcycle"}}, {0x3D098B, {2, "Medusa's Swamp Mobile"}}, {0x3D098C, {2, "Hydra Motorcycle"}}, {0x3D098D, {2, "Darkwing Duck's Ratcatcher"}}, {0x3D098F, {2, "The USS Swinetrek"}}, {0x3D0991, {2, "Spider-Copter"}}, {0x3D0992, {2, "Aerial Area Rug"}}, {0x3D0993, {2, "Jack-O-Lantern's Glider"}}, {0x3D0994, {2, "Spider-Buggy"}}, {0x3D0995, {2, "Jack Skellington's Reindeer"}}, {0x3D0996, {2, "Fantasyland Carousel Horse"}}, {0x3D0997, {2, "Odin's Horse"}}, {0x3D0998, {2, "Gus the Mule"}}, {0x3D099A, {2, "Darkwing Duck's Grappling Gun"}}, {0x3D099C, {2, "Ghost Rider's Chain Whip"}}, {0x3D099D, {2, "Lew Zealand's Boomerang Fish"}}, {0x3D099E, {2, "Sergeant Calhoun's Blaster"}}, {0x3D09A0, {2, "Falcon's Wings"}}, {0x3D09A1, {2, "Mabel's Kittens for Fists"}}, {0x3D09A2, {2, "Jim Hawkins' Solar Board"}}, {0x3D09A3, {2, "Black Panther's Vibranium Knives"}}, {0x3D09A4, {2, "Cloak of Levitation"}}, {0x3D09A5, {2, "Aladdin's Magic Carpet"}}, {0x3D09A6, {2, "Honey Lemon's Ice Capsules"}}, {0x3D09A7, {2, "Jasmine's Palace View"}}, {0x3D09C1, {2, "Lola"}}, {0x3D09C2, {2, "Spider-Cycle"}}, {0x3D09C3, {2, "The Avenjet"}}, {0x3D09C4, {2, "Spider-Glider"}}, {0x3D09C5, {2, "Light Cycle"}}, {0x3D09C6, {2, "Light Jet"}}, {0x3D09C9, {3, "Retro Ray Gun"}}, {0x3D09CA, {3, "Tomorrowland Futurescape"}}, {0x3D09CB, {3, "Tomorrowland Stratosphere"}}, {0x3D09CC, {3, "Skies Over Felucia"}}, {0x3D09CD, {3, "Forests of Felucia"}}, {0x3D09CF, {3, "General Grievous' Wheel Bike"}}, {0x3D09D2, {3, "Slave I Flyer"}}, {0x3D09D3, {3, "Y-Wing Fighter"}}, {0x3D09D4, {3, "Arlo"}}, {0x3D09D5, {3, "Nash"}}, {0x3D09D6, {3, "Butch"}}, {0x3D09D7, {3, "Ramsey"}}, {0x3D09DC, {3, "Stars Over Sahara Square"}}, {0x3D09DD, {3, "Sahara Square Sands"}}, {0x3D09E0, {3, "Ghost Rider's Motorcycle"}}, {0x3D09E5, {3, "Quad Jumper"}}}; u32 infinity_crc32(u16 init_value, const u8* buffer, u32 size) { const std::array<u32, 256> CRC32_TABLE = { 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d}; u32 crc = init_value; for (u32 i = 0; i < size; i++) { u8 index = u8(crc & 0xFF) ^ buffer[i]; crc = ((crc >> 8) ^ CRC32_TABLE[index]); } return crc; } figure_creator_dialog::figure_creator_dialog(QWidget* parent, u8 slot) : QDialog(parent) { setWindowTitle(tr("Figure Creator")); setObjectName("figure_creator"); setMinimumSize(QSize(500, 150)); QVBoxLayout* vbox_panel = new QVBoxLayout(); QComboBox* combo_figlist = new QComboBox(); QStringList filterlist; u32 first_entry = 0; for (const auto& [figure, entry] : list_figures) { // Only display entry if it is a piece appropriate for the slot if ((slot == 0 && ((figure > 0x1E8480 && figure < 0x2DC6BF) || (figure > 0x3D0900 && figure < 0x4C4B3F))) || ((slot == 1 || slot == 2) && (figure > 0x3D0900 && figure < 0x4C4B3F)) || ((slot == 3 || slot == 6) && figure < 0x1E847F) || ((slot == 4 || slot == 5 || slot == 7 || slot == 8) && (figure > 0x2DC6C0 && figure < 0x3D08FF))) { const auto& [num, figure_name] = entry; const u32 qnum = (figure << 8) | num; QString name = QString::fromStdString(figure_name); combo_figlist->addItem(name, QVariant(qnum)); filterlist << std::move(name); if (first_entry == 0) { first_entry = figure; } } } combo_figlist->addItem(tr("--Unknown--"), QVariant(0xFFFFFFFF)); combo_figlist->setEditable(true); combo_figlist->setInsertPolicy(QComboBox::NoInsert); combo_figlist->model()->sort(0, Qt::AscendingOrder); QCompleter* co_compl = new QCompleter(filterlist, this); co_compl->setCaseSensitivity(Qt::CaseInsensitive); co_compl->setCompletionMode(QCompleter::PopupCompletion); co_compl->setFilterMode(Qt::MatchContains); combo_figlist->setCompleter(co_compl); vbox_panel->addWidget(combo_figlist); QFrame* line = new QFrame(); line->setFrameShape(QFrame::HLine); line->setFrameShadow(QFrame::Sunken); vbox_panel->addWidget(line); QHBoxLayout* hbox_number = new QHBoxLayout(); QLabel* label_number = new QLabel(tr("Figure Number:")); QLineEdit* edit_number = new QLineEdit(QString::number(first_entry)); QLabel* label_series = new QLabel(tr("Series:")); QLineEdit* edit_series = new QLineEdit("1"); QRegularExpressionValidator* rxv = new QRegularExpressionValidator(QRegularExpression("\\d*"), this); QIntValidator* valid_series = new QIntValidator(1, 3, this); edit_number->setValidator(rxv); edit_series->setValidator(valid_series); hbox_number->addWidget(label_number); hbox_number->addWidget(edit_number); hbox_number->addWidget(label_series); hbox_number->addWidget(edit_series); vbox_panel->addLayout(hbox_number); QHBoxLayout* hbox_buttons = new QHBoxLayout(); QPushButton* btn_create = new QPushButton(tr("Create"), this); QPushButton* btn_cancel = new QPushButton(tr("Cancel"), this); hbox_buttons->addStretch(); hbox_buttons->addWidget(btn_create); hbox_buttons->addWidget(btn_cancel); vbox_panel->addLayout(hbox_buttons); setLayout(vbox_panel); connect(combo_figlist, QOverload<int>::of(&QComboBox::currentIndexChanged), [=](int index) { const u32 fig_info = combo_figlist->itemData(index).toUInt(); if (fig_info != 0xFFFFFFFF) { const u32 fig_num = fig_info >> 8; const u8 series = fig_info & 0xFF; edit_number->setText(QString::number(fig_num)); edit_series->setText(QString::number(series)); } }); connect(btn_create, &QAbstractButton::clicked, this, [=, this]() { bool ok_num = false, ok_series = false; const u32 fig_num = edit_number->text().toULong(&ok_num); if (!ok_num) { QMessageBox::warning(this, tr("Error converting value"), tr("Figure number entered is invalid!"), QMessageBox::Ok); return; } const u8 series = edit_series->text().toUShort(&ok_series); if (!ok_series || series > 3 || series < 1) { QMessageBox::warning(this, tr("Error converting value"), tr("Series number entered is invalid!"), QMessageBox::Ok); return; } const auto found_figure = list_figures.find(fig_num); if (found_figure != list_figures.cend()) { s_last_figure_path += QString::fromStdString(found_figure->second.second + ".bin"); } else { s_last_figure_path += QString("Unknown(%1 %2).bin").arg(fig_num).arg(series); } m_file_path = QFileDialog::getSaveFileName(this, tr("Create Figure File"), s_last_figure_path, tr("Infinity Figure (*.bin);;")); if (m_file_path.isEmpty()) { return; } if (!create_blank_figure(fig_num, series)) { QMessageBox::warning(this, tr("Failed to create figure file!"), tr("Failed to create figure file:\n%1").arg(m_file_path), QMessageBox::Ok); return; } s_last_figure_path = QFileInfo(m_file_path).absolutePath() + "/"; accept(); }); connect(btn_cancel, &QAbstractButton::clicked, this, &QDialog::reject); connect(co_compl, QOverload<const QString&>::of(&QCompleter::activated), [=](const QString& text) { combo_figlist->setCurrentIndex(combo_figlist->findText(text)); }); } bool figure_creator_dialog::create_blank_figure(u32 character, u8 series) { infinity_log.trace("File path: %s Character: %d Series: %d", m_file_path, character, series); fs::file inf_file(m_file_path.toStdString(), fs::read + fs::write + fs::create); if (!inf_file) { return false; } // Create a 320 byte file with standard NFC read/write permissions std::array<u8, 0x14 * 0x10> file_data{}; u32 first_block = 0x17878E; u32 other_blocks = 0x778788; for (u8 i = 0; i < 3; i++) { file_data[0x36 + i] = u8((first_block >> (2 - i) * 8) & 0xFF); } for (u32 index = 1; index < 5; index++) { for (u8 i = 0; i < 3; i++) { file_data[((index * 0x40) + 0x36) + i] = u8((other_blocks >> (2 - i) * 8) & 0xFF); } } // Create the vector to calculate the SHA1 hash with std::vector<u8> sha1_calc = {SHA1_CONSTANT.begin(), SHA1_CONSTANT.end() - 1}; // Generate random UID, used for AES encrypt/decrypt std::array<u8, 16> uid_data = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0x44, 0x00, 0xC2}; for (u8 i = 0; i < 7; i++) { u8 random = rand() % 255; sha1_calc.push_back(random); uid_data[i] = random; } std::array<u8, 16> figure_data = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, series, 0xD1, 0x1F}; // Figure Number, input by end user figure_data[1] = u8((character >> 16) & 0xFF); figure_data[2] = u8((character >> 8) & 0xFF); figure_data[3] = u8(character & 0xFF); // Manufacture date, formatted as YY/MM/DD. Set to release date of figure's series if (series == 1) { figure_data[4] = 0x0D; figure_data[5] = 0x08; figure_data[6] = 0x12; } else if (series == 2) { figure_data[4] = 0x0E; figure_data[5] = 0x09; figure_data[6] = 0x12; } else if (series == 3) { figure_data[4] = 0x0F; figure_data[5] = 0x08; figure_data[6] = 0x1C; } u32 checksum = infinity_crc32(0, figure_data.data(), 12); for (s8 i = 0; i < 4; i++) { figure_data[12 + i] = u8((checksum >> (3 - i) * 8) & 0xFF); } if (figure_data[1] == 0) return false; sha1_context ctx; u8 output[20]; sha1_starts(&ctx); sha1_update(&ctx, sha1_calc.data(), sha1_calc.size()); sha1_finish(&ctx, output); u8 key[0x10]; for (int i = 0; i < 4; i++) { for (int x = 0; x < 4; x++) { key[x + (i * 4)] = output[(3 - x) + (i * 4)]; } } // Create AES Encrypt context based on AES key, use this to encrypt the character data and 4 blank // blocks aes_context aes; aes_setkey_enc(&aes, key, 128); std::array<u8, 16> encrypted_block{}; std::array<u8, 16> encrypted_blank{}; aes_crypt_ecb(&aes, AES_ENCRYPT, figure_data.data(), encrypted_block.data()); aes_crypt_ecb(&aes, AES_ENCRYPT, BLANK_BLOCK.data(), encrypted_blank.data()); // Copy encrypted data and UID data to the Figure File memcpy(&file_data[0], uid_data.data(), uid_data.size()); memcpy(&file_data[16], encrypted_block.data(), encrypted_block.size()); memcpy(&file_data[16 * 0x04], encrypted_blank.data(), encrypted_blank.size()); memcpy(&file_data[16 * 0x08], encrypted_blank.data(), encrypted_blank.size()); memcpy(&file_data[16 * 0x0C], encrypted_blank.data(), encrypted_blank.size()); memcpy(&file_data[16 * 0x0D], encrypted_blank.data(), encrypted_blank.size()); inf_file.write(file_data.data(), file_data.size()); inf_file.close(); return true; } QString figure_creator_dialog::get_file_path() const { return m_file_path; } infinity_dialog::infinity_dialog(QWidget* parent) : QDialog(parent) { setWindowTitle(tr("Infinity Manager")); setObjectName("infinity_manager"); setAttribute(Qt::WA_DeleteOnClose); setMinimumSize(QSize(700, 200)); QVBoxLayout* vbox_panel = new QVBoxLayout(); auto add_line = [](QVBoxLayout* vbox) { QFrame* line = new QFrame(); line->setFrameShape(QFrame::HLine); line->setFrameShadow(QFrame::Sunken); vbox->addWidget(line); }; QGroupBox* group_figures = new QGroupBox(tr("Active Infinity Figures:")); QVBoxLayout* vbox_group = new QVBoxLayout(); add_figure_slot(vbox_group, QString(tr("Play Set/Power Disc")), 0); add_line(vbox_group); add_figure_slot(vbox_group, QString(tr("Power Disc Two")), 1); add_line(vbox_group); add_figure_slot(vbox_group, QString(tr("Power Disc Three")), 2); add_line(vbox_group); add_figure_slot(vbox_group, QString(tr("Player One")), 3); add_line(vbox_group); add_figure_slot(vbox_group, QString(tr("Player One Ability One")), 4); add_line(vbox_group); add_figure_slot(vbox_group, QString(tr("Player One Ability Two")), 5); add_line(vbox_group); add_figure_slot(vbox_group, QString(tr("Player Two")), 6); add_line(vbox_group); add_figure_slot(vbox_group, QString(tr("Player Two Ability One")), 7); add_line(vbox_group); add_figure_slot(vbox_group, QString(tr("Player Two Ability Two")), 8); group_figures->setLayout(vbox_group); vbox_panel->addWidget(group_figures); setLayout(vbox_panel); } infinity_dialog::~infinity_dialog() { inst = nullptr; } infinity_dialog* infinity_dialog::get_dlg(QWidget* parent) { if (inst == nullptr) inst = new infinity_dialog(parent); return inst; } void infinity_dialog::add_figure_slot(QVBoxLayout* vbox_group, QString name, u8 slot) { ensure(slot < figure_slots.size()); QHBoxLayout* hbox_infinity = new QHBoxLayout(); QLabel* label_figname = new QLabel(name); QPushButton* clear_btn = new QPushButton(tr("Clear")); QPushButton* create_btn = new QPushButton(tr("Create")); QPushButton* load_btn = new QPushButton(tr("Load")); m_edit_figures[slot] = new QLineEdit(); m_edit_figures[slot]->setEnabled(false); if (figure_slots[slot]) { const auto found_figure = list_figures.find(figure_slots[slot].value()); if (found_figure != list_figures.cend()) { m_edit_figures[slot]->setText(QString::fromStdString(found_figure->second.second)); } else { m_edit_figures[slot]->setText(tr("Unknown Figure")); } } else { m_edit_figures[slot]->setText(tr("None")); } connect(clear_btn, &QAbstractButton::clicked, this, [this, slot] { clear_figure(slot); }); connect(create_btn, &QAbstractButton::clicked, this, [this, slot] { create_figure(slot); }); connect(load_btn, &QAbstractButton::clicked, this, [this, slot] { load_figure(slot); }); hbox_infinity->addWidget(label_figname); hbox_infinity->addWidget(m_edit_figures[slot]); hbox_infinity->addWidget(clear_btn); hbox_infinity->addWidget(create_btn); hbox_infinity->addWidget(load_btn); vbox_group->addLayout(hbox_infinity); } void infinity_dialog::clear_figure(u8 slot) { ensure(slot < figure_slots.size()); if (figure_slots[slot]) { g_infinitybase.remove_figure(slot); figure_slots[slot] = 0; m_edit_figures[slot]->setText(tr("None")); } } void infinity_dialog::create_figure(u8 slot) { ensure(slot < figure_slots.size()); figure_creator_dialog create_dlg(this, slot); if (create_dlg.exec() == Accepted) { load_figure_path(slot, create_dlg.get_file_path()); } } void infinity_dialog::load_figure(u8 slot) { ensure(slot < figure_slots.size()); const QString file_path = QFileDialog::getOpenFileName(this, tr("Select Infinity File"), s_last_figure_path, tr("Infinity Figure (*.bin);;")); if (file_path.isEmpty()) { return; } s_last_figure_path = QFileInfo(file_path).absolutePath() + "/"; load_figure_path(slot, file_path); } void infinity_dialog::load_figure_path(u8 slot, const QString& path) { fs::file inf_file(path.toStdString(), fs::read + fs::write + fs::lock); if (!inf_file) { QMessageBox::warning(this, tr("Failed to open the figure file!"), tr("Failed to open the figure file(%1)!\nFile may already be in use on the base.").arg(path), QMessageBox::Ok); return; } std::array<u8, 0x14 * 0x10> data; if (inf_file.read(data.data(), data.size()) != data.size()) { QMessageBox::warning(this, tr("Failed to read the figure file!"), tr("Failed to read the figure file(%1)!\nFile was too small.").arg(path), QMessageBox::Ok); return; } clear_figure(slot); const u32 fignum = g_infinitybase.load_figure(data, std::move(inf_file), slot); const auto name = list_figures.find(fignum); if (name != list_figures.cend()) { m_edit_figures[slot]->setText(QString::fromStdString(name->second.second)); } else { m_edit_figures[slot]->setText(tr("Unknown Figure")); } figure_slots[slot] = fignum; }
29,799
C++
.cpp
770
36.37013
179
0.677314
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,137
downloader.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/downloader.cpp
#include <QApplication> #include <QThread> #include "downloader.h" #include "curl_handle.h" #include "progress_dialog.h" #include "util/logs.hpp" LOG_CHANNEL(network_log, "NET"); usz curl_write_cb_compat(char* ptr, usz /*size*/, usz nmemb, void* userdata) { downloader* download = static_cast<downloader*>(userdata); return download->update_buffer(ptr, nmemb); } downloader::downloader(QWidget* parent) : QObject(parent) , m_parent(parent) , m_curl(new rpcs3::curl::curl_handle()) { } downloader::~downloader() { if (m_thread && m_thread->isRunning()) { m_curl_abort = true; m_thread->wait(); } } void downloader::start(const std::string& url, bool follow_location, bool show_progress_dialog, const QString& progress_dialog_title, bool keep_progress_dialog_open, int expected_size) { network_log.notice("Starting download from URL: %s", url); if (m_thread) { if (m_thread->isRunning()) { m_curl_abort = true; m_thread->wait(); } m_thread->deleteLater(); } m_keep_progress_dialog_open = keep_progress_dialog_open; m_curl_buf.clear(); m_curl_abort = false; CURLcode err = curl_easy_setopt(m_curl->get_curl(), CURLOPT_URL, url.c_str()); if (err != CURLE_OK) network_log.error("curl_easy_setopt(CURLOPT_URL, %s) error: %s", url, curl_easy_strerror(err)); err = curl_easy_setopt(m_curl->get_curl(), CURLOPT_WRITEFUNCTION, curl_write_cb_compat); if (err != CURLE_OK) network_log.error("curl_easy_setopt(CURLOPT_WRITEFUNCTION, curl_write_cb_compat) error: %s", curl_easy_strerror(err)); err = curl_easy_setopt(m_curl->get_curl(), CURLOPT_WRITEDATA, this); if (err != CURLE_OK) network_log.error("curl_easy_setopt(CURLOPT_WRITEDATA) error: %s", curl_easy_strerror(err)); err = curl_easy_setopt(m_curl->get_curl(), CURLOPT_FOLLOWLOCATION, follow_location ? 1 : 0); if (err != CURLE_OK) network_log.error("curl_easy_setopt(CURLOPT_FOLLOWLOCATION, %d) error: %s", follow_location, curl_easy_strerror(err)); m_thread = QThread::create([this] { // Reset error buffer before we call curl_easy_perform m_curl->reset_error_buffer(); const CURLcode result = curl_easy_perform(m_curl->get_curl()); m_curl_success = result == CURLE_OK; if (!m_curl_success && !m_curl_abort) { const std::string error = fmt::format("curl_easy_perform(): %s", m_curl->get_verbose_error(result)); network_log.error("%s", error); Q_EMIT signal_download_error(QString::fromStdString(error)); } }); connect(m_thread, &QThread::finished, this, [this]() { if (m_curl_abort) { network_log.notice("Download aborted"); return; } if (!m_keep_progress_dialog_open || !m_curl_success) { close_progress_dialog(); } if (m_curl_success) { network_log.notice("Download finished"); Q_EMIT signal_download_finished(m_curl_buf); } }); // The downloader's signals are expected to be disconnected and customized before start is called. // Therefore we need to (re)connect its signal(s) here and not in the constructor. connect(this, &downloader::signal_buffer_update, this, &downloader::handle_buffer_update); if (show_progress_dialog) { const int maximum = expected_size > 0 ? expected_size : 100; if (m_progress_dialog) { m_progress_dialog->setWindowTitle(progress_dialog_title); m_progress_dialog->setAutoClose(!m_keep_progress_dialog_open); m_progress_dialog->SetRange(0, maximum); } else { m_progress_dialog = new progress_dialog(progress_dialog_title, tr("Please wait..."), tr("Abort"), 0, maximum, true, m_parent); m_progress_dialog->setAutoReset(false); m_progress_dialog->setAutoClose(!m_keep_progress_dialog_open); m_progress_dialog->show(); // Handle abort connect(m_progress_dialog, &QProgressDialog::canceled, this, [this]() { m_curl_abort = true; m_progress_dialog = nullptr; // The progress dialog deletes itself on close Q_EMIT signal_download_canceled(); }); connect(m_progress_dialog, &QProgressDialog::finished, this, [this]() { m_progress_dialog = nullptr; // The progress dialog deletes itself on close }); } } m_thread->setObjectName("Download Thread"); m_thread->setParent(this); m_thread->start(); } void downloader::update_progress_dialog(const QString& title) const { if (m_progress_dialog) { m_progress_dialog->setWindowTitle(title); } } void downloader::close_progress_dialog() { if (m_progress_dialog) { m_progress_dialog->accept(); m_progress_dialog = nullptr; } } progress_dialog* downloader::get_progress_dialog() const { return m_progress_dialog; } usz downloader::update_buffer(char* data, usz size) { if (m_curl_abort) { return 0; } const auto old_size = m_curl_buf.size(); const auto new_size = old_size + size; m_curl_buf.resize(static_cast<int>(new_size)); memcpy(m_curl_buf.data() + old_size, data, size); int max = 0; if (m_actual_download_size < 0) { if (curl_easy_getinfo(m_curl->get_curl(), CURLINFO_CONTENT_LENGTH_DOWNLOAD_T, &m_actual_download_size) == CURLE_OK && m_actual_download_size > 0) { max = static_cast<int>(m_actual_download_size); } } Q_EMIT signal_buffer_update(static_cast<int>(new_size), max); return size; } void downloader::handle_buffer_update(int size, int max) const { if (m_curl_abort) { return; } if (m_progress_dialog) { m_progress_dialog->SetRange(0, max > 0 ? max : m_progress_dialog->maximum()); m_progress_dialog->SetValue(size); QApplication::processEvents(); } }
5,448
C++
.cpp
167
30.005988
184
0.710065
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,138
msg_dialog_frame.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/msg_dialog_frame.cpp
#include "msg_dialog_frame.h" #include "custom_dialog.h" #include <QCoreApplication> #include <QPushButton> #include <QFormLayout> void msg_dialog_frame::Create(const std::string& msg, const std::string& title) { state = MsgDialogState::Open; static const auto& barWidth = [](){return QLabel("This is the very length of the progressbar due to hidpi reasons.").sizeHint().width();}; Close(true); m_dialog = new custom_dialog(type.disable_cancel); m_dialog->setWindowTitle(title.empty() ? (type.se_normal ? tr("Normal dialog") : tr("Error dialog")) : QString::fromStdString(title)); m_dialog->setWindowOpacity(type.bg_invisible ? 1. : 0.75); m_text = new QLabel(QString::fromStdString(msg)); m_text->setAlignment(Qt::AlignCenter); // Layout QFormLayout* layout = new QFormLayout(m_dialog); layout->setFormAlignment(Qt::AlignHCenter); layout->addRow(m_text); auto l_AddGauge = [this, layout](QProgressBar* &bar, QLabel* &text) { text = new QLabel("", m_dialog); bar = new QProgressBar(m_dialog); bar->setRange(0, 100); bar->setValue(0); bar->setFixedWidth(barWidth()); bar->setAlignment(Qt::AlignCenter); QHBoxLayout* barLayout = new QHBoxLayout; barLayout->addStretch(); barLayout->addWidget(bar); barLayout->addStretch(); QHBoxLayout* textLayout = new QHBoxLayout; textLayout->setAlignment(Qt::AlignCenter); textLayout->addWidget(text); layout->addRow(textLayout); layout->addRow(barLayout); }; if (type.progress_bar_count >= 1) { l_AddGauge(m_gauge1, m_text1); m_progress_indicator = std::make_unique<progress_indicator>(0, 100); } if (type.progress_bar_count >= 2) { l_AddGauge(m_gauge2, m_text2); } if (type.button_type.unshifted() == CELL_MSGDIALOG_TYPE_BUTTON_TYPE_YESNO) { m_dialog->setModal(true); QPushButton* m_button_yes = new QPushButton(tr("&Yes"), m_dialog); QPushButton* m_button_no = new QPushButton(tr("&No"), m_dialog); QHBoxLayout* hBoxButtons = new QHBoxLayout; hBoxButtons->setAlignment(Qt::AlignCenter); hBoxButtons->addWidget(m_button_yes); hBoxButtons->addWidget(m_button_no); layout->addRow(hBoxButtons); if (type.default_cursor == 1) { m_button_no->setFocus(); } else { m_button_yes->setFocus(); } connect(m_button_yes, &QAbstractButton::clicked, [this]() { if (on_close) on_close(CELL_MSGDIALOG_BUTTON_YES); m_dialog->accept(); }); connect(m_button_no, &QAbstractButton::clicked, [this]() { if (on_close) on_close(CELL_MSGDIALOG_BUTTON_NO); m_dialog->accept(); }); } if (type.button_type.unshifted() == CELL_MSGDIALOG_TYPE_BUTTON_TYPE_OK) { m_dialog->setModal(true); QPushButton* m_button_ok = new QPushButton(tr("&OK"), m_dialog); m_button_ok->setFixedWidth(50); QHBoxLayout* hBoxButtons = new QHBoxLayout; hBoxButtons->setAlignment(Qt::AlignCenter); hBoxButtons->addWidget(m_button_ok); layout->addRow(hBoxButtons); if (type.default_cursor == 0) { m_button_ok->setFocus(); } connect(m_button_ok, &QAbstractButton::clicked, [this]() { if (on_close) on_close(CELL_MSGDIALOG_BUTTON_OK); m_dialog->accept(); }); } m_dialog->setLayout(layout); connect(m_dialog, &QDialog::rejected, [this]() { if (!type.disable_cancel) { if (on_close) on_close(CELL_MSGDIALOG_BUTTON_ESCAPE); } }); // Fix size m_dialog->layout()->setSizeConstraint(QLayout::SetFixedSize); m_dialog->show(); // if we do this before, the QWinTaskbarProgress won't show if (m_progress_indicator) m_progress_indicator->show(m_dialog->windowHandle()); } void msg_dialog_frame::Close(bool success) { if (m_dialog) { m_dialog->done(success ? QDialog::Accepted : QDialog::Rejected); m_dialog->deleteLater(); } } msg_dialog_frame::~msg_dialog_frame() { if (m_dialog) { m_dialog->deleteLater(); } } void msg_dialog_frame::SetMsg(const std::string& msg) { if (m_dialog) { m_text->setText(QString::fromStdString(msg)); } } void msg_dialog_frame::ProgressBarSetMsg(u32 index, const std::string& msg) { if (m_dialog) { if (index == 0) { if (m_text1) { m_text1->setText(QString::fromStdString(msg)); } } else if (index == 1) { if (m_text2) { m_text2->setText(QString::fromStdString(msg)); } } } } void msg_dialog_frame::ProgressBarReset(u32 index) { if (!m_dialog) { return; } if (index == 0) { if (m_gauge1) { m_gauge1->setValue(0); } } else if (index == 1) { if (m_gauge2) { m_gauge2->setValue(0); } } if (index == taskbar_index + 0u) { if (m_progress_indicator) { m_progress_indicator->reset(); } } } void msg_dialog_frame::ProgressBarInc(u32 index, u32 delta) { if (!m_dialog) { return; } if (index == 0) { if (m_gauge1) { m_gauge1->setValue(std::min(m_gauge1->value() + static_cast<int>(delta), m_gauge1->maximum())); } } else if (index == 1) { if (m_gauge2) { m_gauge2->setValue(std::min(m_gauge2->value() + static_cast<int>(delta), m_gauge2->maximum())); } } if (index == taskbar_index + 0u || taskbar_index == -1) { if (m_progress_indicator) { m_progress_indicator->set_value(m_progress_indicator->value() + static_cast<int>(delta)); } } } void msg_dialog_frame::ProgressBarSetValue(u32 index, u32 value) { if (!m_dialog) { return; } if (index == 0) { if (m_gauge1) { m_gauge1->setValue(std::min(static_cast<int>(value), m_gauge1->maximum())); } } else if (index == 1) { if (m_gauge2) { m_gauge2->setValue(std::min(static_cast<int>(value), m_gauge2->maximum())); } } if (index == taskbar_index + 0u || taskbar_index == -1) { if (m_progress_indicator) { m_progress_indicator->set_value(static_cast<int>(value)); } } } void msg_dialog_frame::ProgressBarSetLimit(u32 index, u32 limit) { if (!m_dialog) { return; } if (index == 0) { if (m_gauge1) { m_gauge1->setMaximum(limit); } } else if (index == 1) { if (m_gauge2) { m_gauge2->setMaximum(limit); } } [[maybe_unused]] bool set_taskbar_limit = false; if (index == taskbar_index + 0u) { m_gauge_max = limit; set_taskbar_limit = true; } else if (taskbar_index == -1) { m_gauge_max += limit; set_taskbar_limit = true; } if (set_taskbar_limit && m_progress_indicator) { m_progress_indicator->set_range(0, m_gauge_max); } }
6,299
C++
.cpp
270
20.659259
139
0.68004
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,139
screenshot_preview.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/screenshot_preview.cpp
#include "screenshot_preview.h" #include "qt_utils.h" #include <QAction> #include <QApplication> #include <QClipboard> #include <QImage> #include <QImageReader> #include <QMenu> #include <QResizeEvent> screenshot_preview::screenshot_preview(const QString& filepath, QWidget* parent) : QLabel(parent) , m_filepath(filepath) { QImageReader reader(filepath); reader.setAutoTransform(true); m_image = reader.read(); setWindowTitle(tr("Screenshot Viewer")); setObjectName("screenshot_preview"); setContextMenuPolicy(Qt::CustomContextMenu); setAttribute(Qt::WA_DeleteOnClose); setAlignment(Qt::AlignHCenter | Qt::AlignVCenter); setPixmap(QPixmap::fromImage(m_image)); setMinimumSize(160, 90); connect(this, &screenshot_preview::customContextMenuRequested, this, &screenshot_preview::show_context_menu); } void screenshot_preview::show_context_menu(const QPoint& pos) { QMenu* menu = new QMenu(); menu->addAction(tr("&Copy"), [this]() { QGuiApplication::clipboard()->setImage(m_image); }); menu->addSeparator(); menu->addAction(tr("&Open file location"), [this]() { gui::utils::open_dir(m_filepath); }); menu->addSeparator(); QAction* reset_act = menu->addAction(tr("To &Normal Size"), [this]() { scale(m_image.size()); }); reset_act->setEnabled(pixmap(Qt::ReturnByValue).size() != m_image.size()); QAction* stretch_act = menu->addAction(tr("&Stretch to size"), [this]() { m_stretch = !m_stretch; scale(size()); }); stretch_act->setCheckable(true); stretch_act->setChecked(m_stretch); menu->addSeparator(); menu->addAction(tr("E&xit"), this, &QLabel::close); menu->exec(mapToGlobal(pos)); } void screenshot_preview::scale(const QSize& new_size) { if (new_size != size()) { resize(new_size); } setPixmap(QPixmap::fromImage(m_image.scaled(new_size, m_stretch ? Qt::IgnoreAspectRatio : Qt::KeepAspectRatio, Qt::SmoothTransformation))); } void screenshot_preview::resizeEvent(QResizeEvent* event) { scale(event->size()); event->ignore(); }
1,975
C++
.cpp
54
34.722222
140
0.740178
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
true
false
false
true
false
false
5,140
vfs_dialog_path_widget.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/vfs_dialog_path_widget.cpp
#include "vfs_dialog_path_widget.h" #include "Utilities/Config.h" #include <QFileDialog> #include <QCoreApplication> #include <QHBoxLayout> #include <QPushButton> vfs_dialog_path_widget::vfs_dialog_path_widget(const QString& name, const QString& current_path, QString default_path, gui_save list_location, std::shared_ptr<gui_settings> _gui_settings, QWidget* parent) : QWidget(parent), m_default_path(std::move(default_path)), m_list_location(std::move(list_location)), m_gui_settings(std::move(_gui_settings)) { m_dir_list = new QListWidget(this); const QStringList all_dirs = m_gui_settings->GetValue(m_list_location).toStringList(); QListWidgetItem* selected_item = nullptr; for (const QString& dir : all_dirs) { QListWidgetItem* item = new QListWidgetItem(dir, m_dir_list); if (dir == current_path) selected_item = item; } // We must show the currently selected config. if (!selected_item) selected_item = new QListWidgetItem(current_path, m_dir_list); selected_item->setSelected(true); m_dir_list->setMinimumWidth(m_dir_list->sizeHintForColumn(0)); QPushButton* add_directory_button = new QPushButton(QStringLiteral("+")); add_directory_button->setToolTip(tr("Add new directory")); add_directory_button->setFixedWidth(add_directory_button->sizeHint().height()); // Make button square connect(add_directory_button, &QAbstractButton::clicked, this, &vfs_dialog_path_widget::add_new_directory); QPushButton* button_remove_dir = new QPushButton(QStringLiteral("-")); button_remove_dir->setToolTip(tr("Remove directory")); button_remove_dir->setFixedWidth(button_remove_dir->sizeHint().height()); // Make button square button_remove_dir->setEnabled(false); connect(button_remove_dir, &QAbstractButton::clicked, this, &vfs_dialog_path_widget::remove_directory); QHBoxLayout* selected_config_layout = new QHBoxLayout; m_selected_config_label = new QLabel(current_path.isEmpty() ? EmptyPath : current_path); selected_config_layout->addWidget(new QLabel(tr("Used %0 directory:").arg(name))); selected_config_layout->addWidget(m_selected_config_label); selected_config_layout->addStretch(); selected_config_layout->addWidget(add_directory_button); selected_config_layout->addWidget(button_remove_dir); QVBoxLayout* vbox = new QVBoxLayout; vbox->addWidget(m_dir_list); vbox->addLayout(selected_config_layout); setLayout(vbox); connect(m_dir_list, &QListWidget::currentRowChanged, this, [this, button_remove_dir](int row) { QListWidgetItem* item = m_dir_list->item(row); m_selected_config_label->setText((item && !item->text().isEmpty()) ? item->text() : EmptyPath); button_remove_dir->setEnabled(item && row > 0); }); } void vfs_dialog_path_widget::reset() const { m_dir_list->clear(); m_dir_list->setCurrentItem(new QListWidgetItem(m_default_path, m_dir_list)); } void vfs_dialog_path_widget::add_new_directory() const { QString dir = QFileDialog::getExistingDirectory(nullptr, tr("Choose a directory"), QCoreApplication::applicationDirPath(), QFileDialog::DontResolveSymlinks); if (dir.isEmpty()) return; if (!dir.endsWith("/")) dir += '/'; m_dir_list->setCurrentItem(new QListWidgetItem(dir, m_dir_list)); } void vfs_dialog_path_widget::remove_directory() const { const int row = m_dir_list->currentRow(); if (row > 0) { QListWidgetItem* item = m_dir_list->takeItem(row); delete item; } } QStringList vfs_dialog_path_widget::get_dir_list() const { QStringList all_dirs; for (int i = 0; i < m_dir_list->count(); ++i) { all_dirs += m_dir_list->item(i)->text(); } return all_dirs; } std::string vfs_dialog_path_widget::get_selected_path() const { return m_selected_config_label->text() == EmptyPath ? "" : m_selected_config_label->text().toStdString(); }
3,747
C++
.cpp
86
41.44186
204
0.748557
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,141
custom_dialog.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/custom_dialog.cpp
#include "custom_dialog.h" custom_dialog::custom_dialog(bool disableCancel, QWidget* parent) : QDialog(parent), m_disable_cancel(disableCancel) { if (m_disable_cancel) { setWindowFlags(windowFlags() & ~Qt::WindowCloseButtonHint); } } void custom_dialog::keyPressEvent(QKeyEvent* event) { // this won't work with Alt+F4, the window still closes if (m_disable_cancel && event->key() == Qt::Key_Escape) { event->ignore(); } else { QDialog::keyPressEvent(event); } } void custom_dialog::closeEvent(QCloseEvent* event) { // spontaneous: don't close on external system level events like Alt+F4 if (m_disable_cancel && event->spontaneous()) { event->ignore(); } else { QDialog::closeEvent(event); } }
725
C++
.cpp
33
20.030303
72
0.731495
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
true
false
false
true
false
false
5,142
table_item_delegate.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/table_item_delegate.cpp
#include "table_item_delegate.h" #include "gui_settings.h" table_item_delegate::table_item_delegate(QObject* parent, bool has_icons) : QStyledItemDelegate(parent), m_has_icons(has_icons) { } void table_item_delegate::initStyleOption(QStyleOptionViewItem *option, const QModelIndex &index) const { // Remove the focus frame around selected items option->state &= ~QStyle::State_HasFocus; if (m_has_icons && index.column() == 0) { // Don't highlight icons option->state &= ~QStyle::State_Selected; // Center icons option->decorationAlignment = Qt::AlignCenter; option->decorationPosition = QStyleOptionViewItem::Top; } QStyledItemDelegate::initStyleOption(option, index); } void table_item_delegate::paint(QPainter *painter, const QStyleOptionViewItem &option, const QModelIndex &index) const { if (m_has_icons && index.column() == static_cast<int>(gui::game_list_columns::icon) && option.state & QStyle::State_Selected) { // Add background highlight color to icons painter->fillRect(option.rect, option.palette.color(QPalette::Highlight)); } QStyledItemDelegate::paint(painter, option, index); }
1,127
C++
.cpp
29
36.758621
126
0.764436
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,143
game_list.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/game_list.cpp
#include "stdafx.h" #include "game_list.h" #include "movie_item.h" #include <QApplication> #include <QHeaderView> #include <QMenu> game_list::game_list() : QTableWidget(), game_list_base() { m_icon_ready_callback = [this](const game_info& game) { Q_EMIT IconReady(game); }; } void game_list::sync_header_actions(QList<QAction*>& actions, std::function<bool(int)> get_visibility) { ensure(get_visibility); bool is_dirty = false; for (int col = 0; col < actions.count(); ++col) { const bool is_hidden = !get_visibility(col); actions[col]->setChecked(!is_hidden); if (isColumnHidden(col) != is_hidden) { setColumnHidden(col, is_hidden); is_dirty = true; } } if (is_dirty) { fix_narrow_columns(); } } void game_list::create_header_actions(QList<QAction*>& actions, std::function<bool(int)> get_visibility, std::function<void(int, bool)> set_visibility) { ensure(get_visibility); ensure(set_visibility); horizontalHeader()->setContextMenuPolicy(Qt::CustomContextMenu); connect(horizontalHeader(), &QHeaderView::customContextMenuRequested, this, [this, &actions](const QPoint& pos) { QMenu* configure = new QMenu(this); configure->addActions(actions); configure->exec(horizontalHeader()->viewport()->mapToGlobal(pos)); }); for (int col = 0; col < actions.count(); ++col) { actions[col]->setCheckable(true); connect(actions[col], &QAction::triggered, this, [this, &actions, get_visibility, set_visibility, col](bool checked) { if (!checked) // be sure to have at least one column left so you can call the context menu at all time { int c = 0; for (int i = 0; i < actions.count(); ++i) { if (get_visibility(i) && ++c > 1) break; } if (c < 2) { actions[col]->setChecked(true); // re-enable the checkbox if we don't change the actual state return; } } setColumnHidden(col, !checked); // Negate because it's a set col hidden and we have menu say show. set_visibility(col, checked); if (checked) // handle hidden columns that have zero width after showing them (stuck between others) { fix_narrow_columns(); } }); } sync_header_actions(actions, get_visibility); } void game_list::clear_list() { m_last_hover_item = nullptr; clearSelection(); clearContents(); } void game_list::fix_narrow_columns() { QApplication::processEvents(); // handle columns (other than the icon column) that have zero width after showing them (stuck between others) for (int col = 1; col < columnCount(); ++col) { if (isColumnHidden(col)) { continue; } if (columnWidth(col) <= horizontalHeader()->minimumSectionSize()) { setColumnWidth(col, horizontalHeader()->minimumSectionSize()); } } } void game_list::mousePressEvent(QMouseEvent* event) { if (QTableWidgetItem* item = itemAt(event->pos()); !item || !item->data(Qt::UserRole).isValid()) { clearSelection(); setCurrentItem(nullptr); // Needed for currentItemChanged } QTableWidget::mousePressEvent(event); } void game_list::mouseMoveEvent(QMouseEvent* event) { movie_item* new_item = static_cast<movie_item*>(itemAt(event->pos())); if (new_item != m_last_hover_item) { if (m_last_hover_item) { m_last_hover_item->set_active(false); } if (new_item) { new_item->set_active(true); } } m_last_hover_item = new_item; } void game_list::mouseDoubleClickEvent(QMouseEvent* ev) { if (!ev) return; // Qt's itemDoubleClicked signal doesn't distinguish between mouse buttons and there is no simple way to get the pressed button. // So we have to ignore this event when another button is pressed. if (ev->button() != Qt::LeftButton) { ev->ignore(); return; } QTableWidget::mouseDoubleClickEvent(ev); } void game_list::keyPressEvent(QKeyEvent* event) { const auto modifiers = event->modifiers(); if (modifiers == Qt::ControlModifier && event->key() == Qt::Key_F && !event->isAutoRepeat()) { Q_EMIT FocusToSearchBar(); return; } QTableWidget::keyPressEvent(event); } void game_list::leaveEvent(QEvent* /*event*/) { if (m_last_hover_item) { m_last_hover_item->set_active(false); m_last_hover_item = nullptr; } } void game_list::FocusAndSelectFirstEntryIfNoneIs() { if (QTableWidgetItem* item = itemAt(0, 0); item && selectedIndexes().isEmpty()) { setCurrentItem(item); } setFocus(); }
4,339
C++
.cpp
157
25.019108
151
0.706024
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,144
register_editor_dialog.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/register_editor_dialog.cpp
#include "register_editor_dialog.h" #include "Emu/Cell/PPUThread.h" #include "Emu/Cell/SPUThread.h" #include "Emu/CPU/CPUThread.h" #include "Emu/CPU/CPUDisAsm.h" #include "Emu/Memory/vm_reservation.h" #include "Emu/Cell/lv2/sys_ppu_thread.h" #include <QLabel> #include <QHBoxLayout> #include <QPushButton> #include <QMessageBox> #include <QCompleter> #include <charconv> #include "util/v128.hpp" #include "util/asm.hpp" constexpr auto qstr = QString::fromStdString; inline std::string sstr(const QString& _in) { return _in.toStdString(); } inline std::string sstr(const QVariant& _in) { return sstr(_in.toString()); } enum registers : int { ppu_r0, ppu_r31 = ppu_r0 + 31, ppu_f0, ppu_f31 = ppu_f0 + 31, ppu_ff0, ppu_ff31 = ppu_ff0 + 31, ppu_v0, ppu_v31 = ppu_v0 + 31, spu_r0 = utils::align(ppu_v31 + 1u, 128), spu_r127 = spu_r0 + 127, PPU_CR, PPU_LR, PPU_CTR, PPU_XER, PPU_VSCR, PPU_PRIO, PPU_PRIO2, // sys_mutex special priority protocol stuff PPU_FPSCR, PPU_VRSAVE, MFC_PEVENTS, MFC_EVENTS_MASK, MFC_EVENTS_COUNT, MFC_TAG_UPD, MFC_TAG_MASK, MFC_ATOMIC_STAT, SPU_SRR0, SPU_SNR1, SPU_SNR2, SPU_OUT_MBOX, SPU_OUT_INTR_MBOX, SPU_FPSCR, RESERVATION_LOST, PC, }; register_editor_dialog::register_editor_dialog(QWidget *parent, CPUDisAsm* _disasm, std::function<cpu_thread*()> func) : QDialog(parent) , m_disasm(_disasm) , m_get_cpu(std::move(func)) { setWindowTitle(tr("Edit registers")); setAttribute(Qt::WA_DeleteOnClose); QVBoxLayout* vbox_panel = new QVBoxLayout(); QHBoxLayout* hbox_panel = new QHBoxLayout(); QVBoxLayout* vbox_left_panel = new QVBoxLayout(); QVBoxLayout* vbox_right_panel = new QVBoxLayout(); QHBoxLayout* hbox_button_panel = new QHBoxLayout(); QLabel* t1_text = new QLabel(tr("Register: "), this); QLabel* t2_text = new QLabel(tr("Value (Hex):"), this); QPushButton* button_ok = new QPushButton(tr("&Ok")); QPushButton* button_cancel = new QPushButton(tr("&Cancel")); button_ok->setFixedWidth(80); button_cancel->setFixedWidth(80); m_register_combo = new QComboBox(this); m_register_combo->setMaxVisibleItems(20); m_register_combo->setEditable(true); m_register_combo->setInsertPolicy(QComboBox::NoInsert); m_register_combo->lineEdit()->setPlaceholderText(tr("Search a register")); m_register_combo->completer()->setCompletionMode(QCompleter::PopupCompletion); m_register_combo->completer()->setMaxVisibleItems(20); m_register_combo->completer()->setFilterMode(Qt::MatchContains); m_value_line = new QLineEdit(this); m_value_line->setFixedWidth(200); // Layouts vbox_left_panel->addWidget(t1_text); vbox_left_panel->addWidget(t2_text); vbox_right_panel->addWidget(m_register_combo); vbox_right_panel->addWidget(m_value_line); hbox_button_panel->addWidget(button_ok); hbox_button_panel->addWidget(button_cancel); hbox_button_panel->setAlignment(Qt::AlignCenter); if (const auto cpu = m_get_cpu()) { if (cpu->get_class() == thread_class::ppu) { for (int i = ppu_r0; i <= ppu_r31; i++) m_register_combo->addItem(qstr(fmt::format("r%d", i % 32)), i); for (int i = ppu_f0; i <= ppu_f31; i++) m_register_combo->addItem(qstr(fmt::format("f%d", i % 32)), i); for (int i = ppu_ff0; i <= ppu_ff31; i++) m_register_combo->addItem(qstr(fmt::format("ff%d", i % 32)), i); for (int i = ppu_v0; i <= ppu_v31; i++) m_register_combo->addItem(qstr(fmt::format("v%d", i % 32)), i); m_register_combo->addItem("CR", +PPU_CR); m_register_combo->addItem("LR", +PPU_LR); m_register_combo->addItem("CTR", PPU_CTR); m_register_combo->addItem("VRSAVE", +PPU_VRSAVE); //m_register_combo->addItem("XER", +PPU_XER); //m_register_combo->addItem("FPSCR", +PPU_FPSCR); //m_register_combo->addItem("VSCR", +PPU_VSCR); m_register_combo->addItem("Priority", +PPU_PRIO); //m_register_combo->addItem("Priority 2", +PPU_PRIO2); } else if (cpu->get_class() == thread_class::spu) { for (int i = spu_r0; i <= spu_r127; i++) m_register_combo->addItem(qstr(fmt::format("r%d", i % 128)), i); m_register_combo->addItem("MFC Pending Events", +MFC_PEVENTS); m_register_combo->addItem("MFC Events Mask", +MFC_EVENTS_MASK); m_register_combo->addItem("MFC Events Count", +MFC_EVENTS_COUNT); m_register_combo->addItem("MFC Tag Mask", +MFC_TAG_MASK); //m_register_combo->addItem("MFC Tag Update", +MFC_TAG_UPD); //m_register_combo->addItem("MFC Atomic Status", +MFC_ATOMIC_STAT); m_register_combo->addItem("SPU SNR1", +SPU_SNR1); m_register_combo->addItem("SPU SNR2", +SPU_SNR2); m_register_combo->addItem("SPU Out Mailbox", +SPU_OUT_MBOX); m_register_combo->addItem("SPU Out-Intr Mailbox", +SPU_OUT_INTR_MBOX); m_register_combo->addItem("SRR0", +SPU_SRR0); } m_register_combo->addItem("Reservation Clear", +RESERVATION_LOST); m_register_combo->addItem("PC", +PC); } // Main Layout hbox_panel->addLayout(vbox_left_panel); hbox_panel->addSpacing(10); hbox_panel->addLayout(vbox_right_panel); vbox_panel->addLayout(hbox_panel); vbox_panel->addSpacing(10); vbox_panel->addLayout(hbox_button_panel); setLayout(vbox_panel); // Events connect(button_ok, &QAbstractButton::clicked, this, [this](){ OnOkay(); accept(); }); connect(button_cancel, &QAbstractButton::clicked, this, &register_editor_dialog::reject); connect(m_register_combo, &QComboBox::currentTextChanged, this, [this](const QString&) { if (const auto qvar = m_register_combo->currentData(); qvar.canConvert<int>()) { updateRegister(qvar.toInt()); } }); updateRegister(m_register_combo->currentData().toInt()); } void register_editor_dialog::updateRegister(int reg) const { std::string str = sstr(tr("Error parsing register value!")); const auto cpu = m_get_cpu(); if (!cpu) { } else if (cpu->get_class() == thread_class::ppu) { const auto& ppu = *static_cast<const ppu_thread*>(cpu); if (reg >= ppu_r0 && reg <= ppu_v31) { const u32 reg_index = reg % 32; if (reg >= ppu_r0 && reg <= ppu_r31) str = fmt::format("%016llx", ppu.gpr[reg_index]); else if (reg >= ppu_ff0 && reg <= ppu_ff31) str = fmt::format("%g", ppu.fpr[reg_index]); else if (reg >= ppu_f0 && reg <= ppu_f31) str = fmt::format("%016llx", std::bit_cast<u64>(ppu.fpr[reg_index])); else if (reg >= ppu_v0 && reg <= ppu_v31) { const v128 r = ppu.vr[reg_index]; str = !r._u ? fmt::format("%08x$", r._u32[0]) : fmt::format("%08x %08x %08x %08x", r.u32r[0], r.u32r[1], r.u32r[2], r.u32r[3]); } } else if (reg == PPU_CR) str = fmt::format("%08x", ppu.cr.pack()); else if (reg == PPU_LR) str = fmt::format("%016llx", ppu.lr); else if (reg == PPU_CTR) str = fmt::format("%016llx", ppu.ctr); else if (reg == PPU_VRSAVE) str = fmt::format("%08x", ppu.vrsave); else if (reg == PPU_PRIO) str = fmt::format("%08x", ppu.prio.load().prio); else if (reg == RESERVATION_LOST) str = sstr(ppu.raddr ? tr("Lose reservation on OK") : tr("Reservation is inactive")); else if (reg == PC) str = fmt::format("%08x", ppu.cia); } else if (cpu->get_class() == thread_class::spu) { const auto& spu = *static_cast<const spu_thread*>(cpu); if (reg >= spu_r0 && reg <= spu_r127) { const u32 reg_index = reg % 128; const v128 r = spu.gpr[reg_index]; str = !r._u ? fmt::format("%08x$", r._u32[0]) : fmt::format("%08x %08x %08x %08x", r.u32r[0], r.u32r[1], r.u32r[2], r.u32r[3]); } else if (reg == MFC_PEVENTS) str = fmt::format("%08x", +spu.ch_events.load().events); else if (reg == MFC_EVENTS_MASK) str = fmt::format("%08x", +spu.ch_events.load().mask); else if (reg == MFC_EVENTS_COUNT) str = fmt::format("%u", +spu.ch_events.load().count); else if (reg == MFC_TAG_MASK) str = fmt::format("%08x", spu.ch_tag_mask); else if (reg == SPU_SRR0) str = fmt::format("%08x", spu.srr0); else if (reg == SPU_SNR1) str = fmt::format("%s", spu.ch_snr1); else if (reg == SPU_SNR2) str = fmt::format("%s", spu.ch_snr2); else if (reg == SPU_OUT_MBOX) str = fmt::format("%s", spu.ch_out_mbox); else if (reg == SPU_OUT_INTR_MBOX) str = fmt::format("%s", spu.ch_out_intr_mbox); else if (reg == RESERVATION_LOST) str = sstr(spu.raddr ? tr("Lose reservation on OK") : tr("Reservation is inactive")); else if (reg == PC) str = fmt::format("%08x", spu.pc); } m_value_line->setText(qstr(str)); } void register_editor_dialog::OnOkay() { const int reg = m_register_combo->currentData().toInt(); std::string value = sstr(m_value_line->text()); auto check_res = [](std::from_chars_result res, const char* end) { return res.ec == std::errc() && res.ptr == end; }; auto pad = [&](u32 size) { if (value.empty() || value.size() > size) { value.clear(); return; } value.insert(0, size - value.size(), '0'); }; if (!(reg >= ppu_ff0 && reg <= ppu_ff31)) { if (value.starts_with("0x") || value.starts_with("0X")) { value = value.substr(2); } } const auto cpu = m_get_cpu(); if (!cpu || value.empty()) { if (!cpu) { close(); } } else if (cpu->get_class() == thread_class::ppu) { auto& ppu = *static_cast<ppu_thread*>(cpu); if (reg >= ppu_r0 && reg <= ppu_v31) { const u32 reg_index = reg % 32; if ((reg >= ppu_r0 && reg <= ppu_r31) || (reg >= ppu_f0 && reg <= ppu_f31)) { pad(16); if (u64 reg_value; check_res(std::from_chars(value.c_str(), value.c_str() + 16, reg_value, 16), value.c_str() + 16)) { if (reg >= ppu_r0 && reg <= ppu_r31) ppu.gpr[reg_index] = reg_value; else if (reg >= ppu_f0 && reg <= ppu_f31) ppu.fpr[reg_index] = std::bit_cast<f64>(reg_value); return; } } else if (reg >= ppu_ff0 && reg <= ppu_ff31) { char* end{}; if (const double reg_value = std::strtod(value.c_str(), &end); end != value.c_str()) { ppu.fpr[reg_index] = static_cast<f64>(reg_value); return; } } else if (reg >= ppu_v0 && reg <= ppu_v31) { if (value.ends_with("$")) { pad(9); if (u32 broadcast; check_res(std::from_chars(value.c_str(), value.c_str() + 8, broadcast, 16), value.c_str() + 8)) { ppu.vr[reg_index] = v128::from32p(broadcast); return; } } value.erase(std::remove_if(value.begin(), value.end(), [](uchar c){ return std::isspace(c); }), value.end()); pad(32); u64 reg_value0, reg_value1; if (check_res(std::from_chars(value.c_str(), value.c_str() + 16, reg_value0, 16), value.c_str() + 16) && check_res(std::from_chars(value.c_str() + 16, value.c_str() + 32, reg_value1, 16), value.c_str() + 32)) { ppu.vr[reg_index] = v128::from64(reg_value0, reg_value1); return; } } } else if (reg == PPU_LR || reg == PPU_CTR) { pad(16); if (u64 reg_value; check_res(std::from_chars(value.c_str(), value.c_str() + 16, reg_value, 16), value.c_str() + 16)) { if (reg == PPU_LR) ppu.lr = reg_value; else if (reg == PPU_CTR) ppu.ctr = reg_value; return; } } else if (reg == PPU_CR || reg == PPU_VRSAVE || reg == PPU_PRIO || reg == PC) { pad(8); if (u32 reg_value; check_res(std::from_chars(value.c_str(), value.c_str() + 8, reg_value, 16), value.c_str() + 8)) { bool ok = true; if (reg == PPU_CR) ppu.cr.unpack(reg_value); else if (reg == PPU_VRSAVE) ppu.vrsave = reg_value; else if (reg == PPU_PRIO && !sys_ppu_thread_set_priority(ppu, ppu.id, reg_value)) {} else if (reg == PC && reg_value % 4 == 0 && vm::check_addr(reg_value, vm::page_executable)) ppu.cia = reg_value & -4; else ok = false; if (ok) return; } } else if (reg == RESERVATION_LOST) { if (const u32 raddr = ppu.raddr) vm::reservation_update(raddr); return; } } else if (cpu->get_class() == thread_class::spu) { auto& spu = *static_cast<spu_thread*>(cpu); if (reg >= spu_r0 && reg <= spu_r127) { const u32 reg_index = reg % 128; if (value.ends_with("$")) { pad(9); if (u32 broadcast; check_res(std::from_chars(value.c_str(), value.c_str() + 8, broadcast, 16), value.c_str() + 8)) { spu.gpr[reg_index] = v128::from32p(broadcast); return; } } value.erase(std::remove_if(value.begin(), value.end(), [](uchar c){ return std::isspace(c); }), value.end()); pad(32); u64 reg_value0, reg_value1; if (check_res(std::from_chars(value.c_str(), value.c_str() + 16, reg_value0, 16), value.c_str() + 16) && check_res(std::from_chars(value.c_str() + 16, value.c_str() + 32, reg_value1, 16), value.c_str() + 32)) { spu.gpr[reg_index] = v128::from64(reg_value0, reg_value1); return; } } else if (reg == MFC_PEVENTS || reg == MFC_EVENTS_MASK || reg == MFC_TAG_MASK || reg == SPU_SRR0 || reg == PC) { if (u32 reg_value; check_res(std::from_chars(value.c_str() + 24, value.c_str() + 32, reg_value, 16), value.c_str() + 32)) { bool ok = true; if (reg == MFC_PEVENTS && !(reg_value & ~SPU_EVENT_IMPLEMENTED)) spu.ch_events.atomic_op([&](spu_thread::ch_events_t& events){ events.events = reg_value; }); else if (reg == MFC_EVENTS_MASK && !(reg_value & ~SPU_EVENT_IMPLEMENTED)) spu.ch_events.atomic_op([&](spu_thread::ch_events_t& events){ events.mask = reg_value; }); else if (reg == MFC_EVENTS_COUNT && reg_value <= 1u) spu.ch_events.atomic_op([&](spu_thread::ch_events_t& events){ events.count = reg_value; }); else if (reg == MFC_TAG_MASK) spu.ch_tag_mask = reg_value; else if (reg == SPU_SRR0 && !(reg_value & ~0x3fffc)) spu.srr0 = reg_value; else if (reg == PC && !(reg_value & ~0x3fffc)) spu.pc = reg_value; else ok = false; if (ok) return; } } else if (reg == SPU_SNR1 || reg == SPU_SNR2 || reg == SPU_OUT_MBOX || reg == SPU_OUT_INTR_MBOX) { const bool count = (value != "empty"); if (count) { pad(8); } if (u32 reg_value = 0; !count || check_res(std::from_chars(value.c_str(), value.c_str() + 8, reg_value, 16), value.c_str() + 8)) { if (reg == SPU_SNR1) spu.ch_snr1.set_value(reg_value, count); else if (reg == SPU_SNR2) spu.ch_snr2.set_value(reg_value, count); else if (reg == SPU_OUT_MBOX) spu.ch_out_mbox.set_value(reg_value, count); else if (reg == SPU_OUT_INTR_MBOX) spu.ch_out_intr_mbox.set_value(reg_value, count); return; } } else if (reg == RESERVATION_LOST) { if (const u32 raddr = spu.raddr) vm::reservation_update(raddr); return; } } QMessageBox::critical(this, tr("Error"), tr("This value could not be converted.\nNo changes were made.")); }
14,367
C++
.cpp
373
35.217158
168
0.635268
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,145
screenshot_manager_dialog.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/screenshot_manager_dialog.cpp
#include "stdafx.h" #include "screenshot_manager_dialog.h" #include "screenshot_preview.h" #include "screenshot_item.h" #include "flow_widget.h" #include "qt_utils.h" #include "Utilities/File.h" #include "Emu/VFS.h" #include "Emu/system_utils.hpp" #include <QApplication> #include <QDir> #include <QDirIterator> #include <QScreen> #include <QVBoxLayout> #include <QtConcurrent> LOG_CHANNEL(gui_log, "GUI"); screenshot_manager_dialog::screenshot_manager_dialog(QWidget* parent) : QDialog(parent) { setWindowTitle(tr("Screenshots")); setAttribute(Qt::WA_DeleteOnClose); m_icon_size = QSize(160, 90); m_flow_widget = new flow_widget(this); m_flow_widget->setObjectName("flow_widget"); m_placeholder = QPixmap(m_icon_size); m_placeholder.fill(Qt::gray); connect(this, &screenshot_manager_dialog::signal_icon_preview, this, &screenshot_manager_dialog::show_preview); connect(this, &screenshot_manager_dialog::signal_entry_parsed, this, &screenshot_manager_dialog::add_entry); QVBoxLayout* layout = new QVBoxLayout; layout->setContentsMargins(0, 0, 0, 0); layout->addWidget(m_flow_widget); setLayout(layout); resize(QGuiApplication::primaryScreen()->availableSize() * 3 / 5); } screenshot_manager_dialog::~screenshot_manager_dialog() { m_abort_parsing = true; gui::utils::stop_future_watcher(m_parsing_watcher, true); } void screenshot_manager_dialog::add_entry(const QString& path) { screenshot_item* item = new screenshot_item(m_flow_widget); ensure(item->label); item->setToolTip(path); item->installEventFilter(this); item->label->setPixmap(m_placeholder); item->icon_path = path; item->icon_size = m_icon_size; connect(item, &screenshot_item::signal_icon_update, this, &screenshot_manager_dialog::update_icon); m_flow_widget->add_widget(item); } void screenshot_manager_dialog::show_preview(const QString& path) { screenshot_preview* preview = new screenshot_preview(path); preview->show(); } void screenshot_manager_dialog::update_icon(const QPixmap& pixmap) { if (screenshot_item* item = static_cast<screenshot_item*>(QObject::sender())) { if (item->label) { item->label->setPixmap(pixmap); } } } void screenshot_manager_dialog::reload() { m_abort_parsing = true; gui::utils::stop_future_watcher(m_parsing_watcher, true); const std::string screenshot_path_qt = fs::get_config_dir() + "screenshots/"; const std::string screenshot_path_cell = rpcs3::utils::get_hdd0_dir() + "/photo/"; m_flow_widget->clear(); m_abort_parsing = false; m_parsing_watcher.setFuture(QtConcurrent::map(m_parsing_threads, [this, screenshot_path_qt, screenshot_path_cell](int index) { if (index != 0) { return; } const QStringList filter{ QStringLiteral("*.png") }; for (const std::string& path : { screenshot_path_qt, screenshot_path_cell }) { if (m_abort_parsing) { return; } if (path.empty()) { gui_log.error("Screenshot manager: Trying to load screenshots from empty path!"); continue; } QDirIterator dir_iter(QString::fromStdString(path), filter, QDir::Files | QDir::NoDotAndDotDot, QDirIterator::Subdirectories); while (dir_iter.hasNext() && !m_abort_parsing) { Q_EMIT signal_entry_parsed(dir_iter.next()); } } })); } void screenshot_manager_dialog::showEvent(QShowEvent* event) { QDialog::showEvent(event); reload(); } bool screenshot_manager_dialog::eventFilter(QObject* watched, QEvent* event) { if (event && event->type() == QEvent::MouseButtonDblClick && static_cast<QMouseEvent*>(event)->button() == Qt::LeftButton) { if (screenshot_item* item = static_cast<screenshot_item*>(watched)) { Q_EMIT signal_icon_preview(item->icon_path); return true; } } return false; }
3,708
C++
.cpp
116
29.646552
129
0.736827
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,146
about_dialog.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/about_dialog.cpp
#include "about_dialog.h" #include "ui_about_dialog.h" #include "rpcs3_version.h" #include "qt_utils.h" #include <QDesktopServices> #include <QUrl> #include <QSvgWidget> about_dialog::about_dialog(QWidget* parent) : QDialog(parent), ui(new Ui::about_dialog) { ui->setupUi(this); ui->close->setDefault(true); ui->icon->load(QStringLiteral(":/rpcs3.svg")); ui->version->setText(tr("RPCS3 Version: %1").arg(QString::fromStdString(rpcs3::get_verbose_version()))); ui->description->setText(tr( R"( <p style="white-space: nowrap;"> RPCS3 is an open-source Sony PlayStation 3 emulator and debugger.<br> It is written in C++ for Windows, Linux, FreeBSD and MacOS funded with <a %0 href="https://rpcs3.net/patreon">Patreon</a>.<br> Our developers and contributors are always working hard to ensure this project is the best that it can be.<br> There are still plenty of implementations to make and optimizations to do. </p> )" ).arg(gui::utils::get_link_style())); // Events connect(ui->gitHub, &QPushButton::clicked, [] { QDesktopServices::openUrl(QUrl("https://www.github.com/RPCS3")); }); connect(ui->website, &QPushButton::clicked, [] { QDesktopServices::openUrl(QUrl("https://rpcs3.net")); }); connect(ui->forum, &QPushButton::clicked, [] { QDesktopServices::openUrl(QUrl("https://forums.rpcs3.net")); }); connect(ui->patreon, &QPushButton::clicked, [] { QDesktopServices::openUrl(QUrl("https://rpcs3.net/patreon")); }); connect(ui->discord, &QPushButton::clicked, [] { QDesktopServices::openUrl(QUrl("https://discord.me/RPCS3")); }); connect(ui->wiki, &QPushButton::clicked, [] { QDesktopServices::openUrl(QUrl("https://wiki.rpcs3.net/index.php?title=Main_Page")); }); connect(ui->close, &QPushButton::clicked, this, &QWidget::close); } about_dialog::~about_dialog() { }
1,813
C++
.cpp
35
49.485714
135
0.715011
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,147
vfs_dialog.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/vfs_dialog.cpp
#include "vfs_dialog.h" #include "vfs_dialog_tab.h" #include "vfs_dialog_usb_tab.h" #include "gui_settings.h" #include <QTabWidget> #include <QDialogButtonBox> #include <QPushButton> #include <QMessageBox> #include <QVBoxLayout> #include "Emu/System.h" #include "Emu/vfs_config.h" vfs_dialog::vfs_dialog(std::shared_ptr<gui_settings> _gui_settings, QWidget* parent) : QDialog(parent), m_gui_settings(std::move(_gui_settings)) { setWindowTitle(tr("Virtual File System")); setObjectName("vfs_dialog"); QTabWidget* tabs = new QTabWidget(); tabs->setUsesScrollButtons(false); g_cfg_vfs.load(); // Create tabs vfs_dialog_tab* emulator_tab = new vfs_dialog_tab("$(EmulatorDir)", gui::fs_emulator_dir_list, &g_cfg_vfs.emulator_dir, m_gui_settings, this); vfs_dialog_tab* dev_hdd0_tab = new vfs_dialog_tab("dev_hdd0", gui::fs_dev_hdd0_list, &g_cfg_vfs.dev_hdd0, m_gui_settings, this); vfs_dialog_tab* dev_hdd1_tab = new vfs_dialog_tab("dev_hdd1", gui::fs_dev_hdd1_list, &g_cfg_vfs.dev_hdd1, m_gui_settings, this); vfs_dialog_tab* dev_flash_tab = new vfs_dialog_tab("dev_flash", gui::fs_dev_flash_list, &g_cfg_vfs.dev_flash, m_gui_settings, this); vfs_dialog_tab* dev_flash2_tab = new vfs_dialog_tab("dev_flash2", gui::fs_dev_flash2_list, &g_cfg_vfs.dev_flash2, m_gui_settings, this); vfs_dialog_tab* dev_flash3_tab = new vfs_dialog_tab("dev_flash3", gui::fs_dev_flash3_list, &g_cfg_vfs.dev_flash3, m_gui_settings, this); vfs_dialog_tab* dev_bdvd_tab = new vfs_dialog_tab("dev_bdvd", gui::fs_dev_bdvd_list, &g_cfg_vfs.dev_bdvd, m_gui_settings, this); vfs_dialog_usb_tab* dev_usb_tab = new vfs_dialog_usb_tab(&g_cfg_vfs.dev_usb, m_gui_settings, this); vfs_dialog_tab* games_tab = new vfs_dialog_tab("games", gui::fs_games_list, &g_cfg_vfs.games_dir, m_gui_settings, this); tabs->addTab(emulator_tab, "$(EmulatorDir)"); tabs->addTab(dev_hdd0_tab, "dev_hdd0"); tabs->addTab(dev_hdd1_tab, "dev_hdd1"); tabs->addTab(dev_flash_tab, "dev_flash"); tabs->addTab(dev_flash2_tab, "dev_flash2"); tabs->addTab(dev_flash3_tab, "dev_flash3"); tabs->addTab(dev_bdvd_tab, "dev_bdvd"); tabs->addTab(dev_usb_tab, "dev_usb"); tabs->addTab(games_tab, "games"); // Create buttons QDialogButtonBox* buttons = new QDialogButtonBox(QDialogButtonBox::Close | QDialogButtonBox::Save | QDialogButtonBox::RestoreDefaults); buttons->button(QDialogButtonBox::RestoreDefaults)->setText(tr("Reset Directories")); buttons->button(QDialogButtonBox::Save)->setDefault(true); connect(buttons, &QDialogButtonBox::clicked, this, [this, buttons, tabs](QAbstractButton* button) { if (button == buttons->button(QDialogButtonBox::RestoreDefaults)) { if (QMessageBox::question(this, tr("Confirm Reset"), tr("Reset all file system directories?")) != QMessageBox::Yes) return; for (int i = 0; i < tabs->count(); ++i) { if (tabs->tabText(i) == "dev_usb") { static_cast<vfs_dialog_usb_tab*>(tabs->widget(i))->reset(); } else { static_cast<vfs_dialog_tab*>(tabs->widget(i))->reset(); } } } else if (button == buttons->button(QDialogButtonBox::Save)) { for (int i = 0; i < tabs->count(); ++i) { if (tabs->tabText(i) == "dev_usb") { static_cast<vfs_dialog_usb_tab*>(tabs->widget(i))->set_settings(); } else { static_cast<vfs_dialog_tab*>(tabs->widget(i))->set_settings(); } } g_cfg_vfs.save(); // Recreate folder structure for new VFS paths if (Emu.IsStopped()) { Emu.Init(); } accept(); } else if (button == buttons->button(QDialogButtonBox::Close)) { reject(); } }); QVBoxLayout* vbox = new QVBoxLayout; vbox->addWidget(tabs); vbox->addWidget(buttons); setLayout(vbox); buttons->button(QDialogButtonBox::Save)->setFocus(); }
3,756
C++
.cpp
92
37.771739
143
0.693805
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,148
curl_handle.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/curl_handle.cpp
#include "stdafx.h" #include "curl_handle.h" #include "Emu/system_utils.hpp" #include "util/logs.hpp" #ifdef _WIN32 #include "Utilities/StrUtil.h" #endif LOG_CHANNEL(network_log, "NET"); namespace rpcs3::curl { curl_handle::curl_handle() { reset_error_buffer(); m_curl = curl_easy_init(); CURLcode err = curl_easy_setopt(m_curl, CURLOPT_ERRORBUFFER, m_error_buffer.data()); if (err != CURLE_OK) network_log.error("curl_easy_setopt(CURLOPT_ERRORBUFFER): %s", curl_easy_strerror(err)); m_uses_error_buffer = err == CURLE_OK; err = curl_easy_setopt(m_curl, CURLOPT_VERBOSE, g_curl_verbose); if (err != CURLE_OK) network_log.error("curl_easy_setopt(CURLOPT_VERBOSE, %d): %s", g_curl_verbose, curl_easy_strerror(err)); #ifdef _WIN32 // Tell curl to use the native CA store for certificate verification err = curl_easy_setopt(m_curl, CURLOPT_SSL_OPTIONS, CURLSSLOPT_NATIVE_CA); if (err != CURLE_OK) network_log.error("curl_easy_setopt(CURLOPT_SSL_OPTIONS): %s", curl_easy_strerror(err)); #endif } curl_handle::~curl_handle() { curl_easy_cleanup(m_curl); } CURL* curl_handle::get_curl() const { return m_curl; } void curl_handle::reset_error_buffer() { ensure(m_error_buffer.size() == CURL_ERROR_SIZE); m_error_buffer[0] = 0; } std::string curl_handle::get_verbose_error(CURLcode code) const { if (m_uses_error_buffer) { ensure(m_error_buffer.size() == CURL_ERROR_SIZE); if (m_error_buffer[0]) { return fmt::format("Curl error (%d): %s\nDetails: %s", static_cast<int>(code), curl_easy_strerror(code), m_error_buffer.data()); } } return fmt::format("Curl error (%d): %s", static_cast<int>(code), curl_easy_strerror(code)); } } #ifdef _WIN32 // Functions exported from our user_settings.h in WolfSSL, implemented in RPCS3 extern "C" { FILE* wolfSSL_fopen_utf8(const char* name, const char* mode) { return _wfopen(utf8_to_wchar(name).c_str(), utf8_to_wchar(mode).c_str()); } int wolfSSL_stat_utf8(const char* path, struct _stat* buffer) { return _wstat(utf8_to_wchar(path).c_str(), buffer); } } #endif
2,042
C++
.cpp
65
29.646154
131
0.719101
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,149
custom_table_widget_item.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/custom_table_widget_item.cpp
#include "custom_table_widget_item.h" #include "Utilities/StrFmt.h" #include <QDateTime> custom_table_widget_item::custom_table_widget_item(const std::string& text, int sort_role, const QVariant& sort_value) : movie_item(QString::fromStdString(text).simplified()) // simplified() forces single line text { if (sort_role != Qt::DisplayRole) { setData(sort_role, sort_value, true); } } custom_table_widget_item::custom_table_widget_item(const QString& text, int sort_role, const QVariant& sort_value) : movie_item(text.simplified()) // simplified() forces single line text { if (sort_role != Qt::DisplayRole) { setData(sort_role, sort_value, true); } } bool custom_table_widget_item::operator<(const QTableWidgetItem& other) const { if (m_sort_role == Qt::DisplayRole) { return QTableWidgetItem::operator<(other); } const QVariant data_l = data(m_sort_role); const QVariant data_r = other.data(m_sort_role); const int type_l = data_l.metaType().id(); const int type_r = data_r.metaType().id(); ensure(type_l == type_r); switch (type_l) { case QMetaType::Type::Bool: case QMetaType::Type::Int: return data_l.toInt() < data_r.toInt(); case QMetaType::Type::UInt: return data_l.toUInt() < data_r.toUInt(); case QMetaType::Type::LongLong: return data_l.toLongLong() < data_r.toLongLong(); case QMetaType::Type::ULongLong: return data_l.toULongLong() < data_r.toULongLong(); case QMetaType::Type::Double: return data_l.toDouble() < data_r.toDouble(); case QMetaType::Type::QDate: return data_l.toDate() < data_r.toDate(); case QMetaType::Type::QTime: return data_l.toTime() < data_r.toTime(); case QMetaType::Type::QDateTime: return data_l.toDateTime() < data_r.toDateTime(); case QMetaType::Type::Char: case QMetaType::Type::QString: return data_l.toString() < data_r.toString(); default: fmt::throw_exception("Unimplemented type %s", QMetaType(type_l).name()); } } void custom_table_widget_item::setData(int role, const QVariant& value, bool assign_sort_role) { if (assign_sort_role) { m_sort_role = role; } QTableWidgetItem::setData(role, value); }
2,134
C++
.cpp
64
31.140625
118
0.71969
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,150
shortcut_settings.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/shortcut_settings.cpp
#include "shortcut_settings.h" using namespace gui::shortcuts; template <> void fmt_class_string<shortcut>::format(std::string& out, u64 arg) { format_enum(out, arg, [](shortcut value) { switch (value) { case shortcut::mw_start: return "mw_start"; case shortcut::mw_stop: return "mw_stop"; case shortcut::mw_pause: return "mw_pause"; case shortcut::mw_restart: return "mw_restart"; case shortcut::mw_toggle_fullscreen: return "mw_toggle_fullscreen"; case shortcut::mw_exit_fullscreen: return "mw_exit_fullscreen"; case shortcut::mw_refresh: return "mw_refresh"; case shortcut::gw_toggle_fullscreen: return "gw_toggle_fullscreen"; case shortcut::gw_exit_fullscreen: return "gw_exit_fullscreen"; case shortcut::gw_log_mark: return "gw_log_mark"; case shortcut::gw_mouse_lock: return "gw_mouse_lock"; case shortcut::gw_screenshot: return "gw_screenshot"; case shortcut::gw_toggle_recording: return "gw_toggle_recording"; case shortcut::gw_pause_play: return "gw_pause_play"; case shortcut::gw_savestate: return "gw_savestate"; case shortcut::gw_restart: return "gw_restart"; case shortcut::gw_rsx_capture: return "gw_rsx_capture"; case shortcut::gw_frame_limit: return "gw_frame_limit"; case shortcut::gw_toggle_mouse_and_keyboard: return "gw_toggle_mouse_and_keyboard"; case shortcut::gw_home_menu: return "gw_home_menu"; case shortcut::count: return "count"; } return unknown; }); } template <> void fmt_class_string<shortcut_handler_id>::format(std::string& out, u64 arg) { format_enum(out, arg, [](gui::shortcuts::shortcut_handler_id value) { switch (value) { case shortcut_handler_id::main_window: return "main_window"; case shortcut_handler_id::game_window: return "game_window"; } return unknown; }); } shortcut_settings::shortcut_settings() : shortcut_map({ { shortcut::mw_start, shortcut_info{ "main_window_start", tr("Start"), "Ctrl+E", shortcut_handler_id::main_window } }, { shortcut::mw_stop, shortcut_info{ "main_window_stop", tr("Stop"), "Ctrl+S", shortcut_handler_id::main_window } }, { shortcut::mw_pause, shortcut_info{ "main_window_pause", tr("Pause"), "Ctrl+P", shortcut_handler_id::main_window } }, { shortcut::mw_restart, shortcut_info{ "main_window_restart", tr("Restart"), "Ctrl+R", shortcut_handler_id::main_window } }, { shortcut::mw_toggle_fullscreen, shortcut_info{ "main_window_toggle_fullscreen", tr("Toggle Fullscreen"), "Alt+Return", shortcut_handler_id::main_window } }, { shortcut::mw_exit_fullscreen, shortcut_info{ "main_window_exit_fullscreen", tr("Exit Fullscreen"), "Esc", shortcut_handler_id::main_window } }, { shortcut::mw_refresh, shortcut_info{ "main_window_refresh", tr("Refresh"), "Ctrl+F5", shortcut_handler_id::main_window } }, { shortcut::gw_toggle_fullscreen, shortcut_info{ "game_window_toggle_fullscreen", tr("Toggle Fullscreen"), "Alt+Return", shortcut_handler_id::game_window } }, { shortcut::gw_exit_fullscreen, shortcut_info{ "game_window_exit_fullscreen", tr("Exit Fullscreen"), "Esc", shortcut_handler_id::game_window } }, { shortcut::gw_log_mark, shortcut_info{ "game_window_log_mark", tr("Add Log Mark"), "Alt+L", shortcut_handler_id::game_window } }, { shortcut::gw_mouse_lock, shortcut_info{ "game_window_mouse_lock", tr("Mouse lock"), "Ctrl+L", shortcut_handler_id::game_window } }, { shortcut::gw_toggle_recording, shortcut_info{ "game_window_toggle_recording", tr("Start/Stop Recording"), "F11", shortcut_handler_id::game_window } }, { shortcut::gw_screenshot, shortcut_info{ "game_window_screenshot", tr("Screenshot"), "F12", shortcut_handler_id::game_window } }, { shortcut::gw_pause_play, shortcut_info{ "game_window_pause_play", tr("Pause/Play"), "Ctrl+P", shortcut_handler_id::game_window } }, { shortcut::gw_savestate, shortcut_info{ "game_window_savestate", tr("Savestate"), "Ctrl+S", shortcut_handler_id::game_window } }, { shortcut::gw_restart, shortcut_info{ "game_window_restart", tr("Restart"), "Ctrl+R", shortcut_handler_id::game_window } }, { shortcut::gw_rsx_capture, shortcut_info{ "game_window_rsx_capture", tr("RSX Capture"), "Alt+C", shortcut_handler_id::game_window } }, { shortcut::gw_frame_limit, shortcut_info{ "game_window_frame_limit", tr("Toggle Framelimit"), "Ctrl+F10", shortcut_handler_id::game_window } }, { shortcut::gw_toggle_mouse_and_keyboard, shortcut_info{ "game_window_toggle_mouse_and_keyboard", tr("Toggle Keyboard"), "Ctrl+F11", shortcut_handler_id::game_window } }, { shortcut::gw_home_menu, shortcut_info{ "gw_home_menu", tr("Open Home Menu"), "Shift+F10", shortcut_handler_id::game_window } }, }) { } shortcut_settings::~shortcut_settings() { } gui_save shortcut_settings::get_shortcut_gui_save(const QString& shortcut_name) { const auto it = std::find_if(shortcut_map.begin(), shortcut_map.end(), [&](const auto& entry) { return entry.second.name == shortcut_name; }); if (it != shortcut_map.cend()) { return gui_save(gui::sc, it->second.name, it->second.key_sequence); } return gui_save(); } QKeySequence shortcut_settings::get_key_sequence(const shortcut_info& entry, const std::shared_ptr<gui_settings>& gui_settings) { if (!gui_settings) return {}; const QString saved_value = gui_settings->GetValue(get_shortcut_gui_save(entry.name)).toString(); QKeySequence key_sequence = QKeySequence::fromString(saved_value); if (key_sequence.isEmpty()) { // Use the default shortcut if no shortcut was configured key_sequence = QKeySequence::fromString(entry.key_sequence); } return key_sequence; }
5,545
C++
.cpp
97
54.649485
172
0.720228
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,151
log_viewer.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/log_viewer.cpp
#include "stdafx.h" #include "log_viewer.h" #include "gui_settings.h" #include "syntax_highlighter.h" #include "config_checker.h" #include "Crypto/unzip.h" #include <QActionGroup> #include <QApplication> #include <QClipboard> #include <QMenu> #include <QFile> #include <QFileDialog> #include <QInputDialog> #include <QTextStream> #include <QHBoxLayout> #include <QFontDatabase> #include <QMimeData> #include <QScrollBar> #include <QMessageBox> #include <deque> #include <map> LOG_CHANNEL(gui_log, "GUI"); log_viewer::log_viewer(std::shared_ptr<gui_settings> gui_settings) : m_gui_settings(std::move(gui_settings)) { setWindowTitle(tr("Log Viewer")); setObjectName("log_viewer"); setAttribute(Qt::WA_DeleteOnClose); setAttribute(Qt::WA_StyledBackground); setAcceptDrops(true); setMinimumSize(QSize(200, 150)); // seems fine on win 10 resize(QSize(620, 395)); m_path_last = m_gui_settings->GetValue(gui::fd_log_viewer).toString(); m_show_timestamps = m_gui_settings->GetValue(gui::lv_show_timestamps).toBool(); m_show_threads = m_gui_settings->GetValue(gui::lv_show_threads).toBool(); m_log_levels = std::bitset<32>(m_gui_settings->GetValue(gui::lv_log_levels).toUInt()); m_log_text = new QPlainTextEdit(this); m_log_text->setReadOnly(true); m_log_text->setContextMenuPolicy(Qt::CustomContextMenu); m_log_text->setWordWrapMode(QTextOption::NoWrap); m_log_text->setFont(QFontDatabase::systemFont(QFontDatabase::FixedFont)); m_log_text->installEventFilter(this); // m_log_text syntax highlighter m_log_highlighter = new LogHighlighter(m_log_text->document()); QHBoxLayout* layout = new QHBoxLayout(); layout->addWidget(m_log_text); setLayout(layout); connect(m_log_text, &QWidget::customContextMenuRequested, this, &log_viewer::show_context_menu); } void log_viewer::show_context_menu(const QPoint& pos) { QMenu menu; QAction* clear = new QAction(tr("&Clear")); QAction* copy = new QAction(tr("&Copy")); QAction* open = new QAction(tr("&Open log file")); QAction* save = new QAction(tr("&Save filtered log")); QAction* filter = new QAction(tr("&Filter log")); QAction* config = new QAction(tr("&Check config")); QAction* timestamps = new QAction(tr("&Show Timestamps")); timestamps->setCheckable(true); timestamps->setChecked(m_show_timestamps); QAction* threads = new QAction(tr("&Show Threads")); threads->setCheckable(true); threads->setChecked(m_show_threads); QAction* last_actions_only = new QAction(tr("&Last actions only")); last_actions_only->setCheckable(true); last_actions_only->setChecked(m_last_actions_only); QActionGroup* log_level_acts = new QActionGroup(this); QAction* fatal_act = new QAction(tr("Fatal"), log_level_acts); QAction* error_act = new QAction(tr("Error"), log_level_acts); QAction* todo_act = new QAction(tr("Todo"), log_level_acts); QAction* success_act = new QAction(tr("Success"), log_level_acts); QAction* warning_act = new QAction(tr("Warning"), log_level_acts); QAction* notice_act = new QAction(tr("Notice"), log_level_acts); QAction* trace_act = new QAction(tr("Trace"), log_level_acts); log_level_acts->setExclusive(false); auto init_action = [this](QAction* act, logs::level logLevel) { act->setCheckable(true); act->setChecked(m_log_levels.test(static_cast<u32>(logLevel))); // This sets the log level properly when the action is triggered. connect(act, &QAction::triggered, this, [this, logLevel](bool checked) { m_log_levels.set(static_cast<u32>(logLevel), checked); m_gui_settings->SetValue(gui::lv_log_levels, ::narrow<u32>(m_log_levels.to_ulong())); filter_log(); }); }; init_action(fatal_act, logs::level::fatal); init_action(error_act, logs::level::error); init_action(todo_act, logs::level::todo); init_action(success_act, logs::level::success); init_action(warning_act, logs::level::warning); init_action(notice_act, logs::level::notice); init_action(trace_act, logs::level::trace); menu.addAction(copy); menu.addSeparator(); menu.addAction(open); menu.addSeparator(); menu.addAction(save); menu.addSeparator(); menu.addAction(config); menu.addSeparator(); menu.addAction(filter); menu.addSeparator(); menu.addAction(timestamps); menu.addSeparator(); menu.addAction(threads); menu.addSeparator(); menu.addAction(last_actions_only); menu.addSeparator(); menu.addActions(log_level_acts->actions()); menu.addSeparator(); menu.addAction(clear); connect(copy, &QAction::triggered, this, [this]() { m_log_text->copy(); }); connect(clear, &QAction::triggered, this, [this]() { m_log_text->clear(); m_full_log.clear(); }); connect(open, &QAction::triggered, this, [this]() { const QString file_path = QFileDialog::getOpenFileName(this, tr("Select log file"), m_path_last, tr("Log files (*.log *.gz);;All files (*.*)")); if (file_path.isEmpty()) return; m_path_last = file_path; show_log(); }); connect(save, &QAction::triggered, this, [this]() { const QString file_path = QFileDialog::getSaveFileName(this, tr("Save to file"), m_path_last, tr("Log files (*.log *.gz);;All files (*.*)")); if (file_path.isEmpty()) return; if (fs::file log_file; log_file.open(file_path.toStdString(), fs::rewrite)) { const QByteArray bytes = m_log_text->toPlainText().toUtf8(); if (file_path.endsWith(".gz")) { if (!zip(bytes.constData(), bytes.size(), log_file)) { gui_log.error("Failed to zip filtered log to file '%s'", file_path); return; } } else { log_file.write(bytes.constData(), bytes.size()); } gui_log.success("Exported filtered log to file '%s'", file_path); } else { gui_log.error("Failed to export filtered log to file '%s'", file_path); } }); connect(config, &QAction::triggered, this, [this]() { config_checker* dlg = new config_checker(this, m_full_log, true); dlg->exec(); }); connect(filter, &QAction::triggered, this, [this]() { m_filter_term = QInputDialog::getText(this, tr("Filter log"), tr("Enter text"), QLineEdit::EchoMode::Normal, m_filter_term); filter_log(); }); connect(threads, &QAction::toggled, this, [this](bool checked) { m_show_threads = checked; m_gui_settings->SetValue(gui::lv_show_threads, m_show_threads); filter_log(); }); connect(timestamps, &QAction::toggled, this, [this](bool checked) { m_show_timestamps = checked; m_gui_settings->SetValue(gui::lv_show_timestamps, m_show_timestamps); filter_log(); }); connect(last_actions_only, &QAction::toggled, this, [this](bool checked) { m_last_actions_only = checked; filter_log(); }); const auto obj = qobject_cast<QPlainTextEdit*>(sender()); QPoint origin; if (obj == m_log_text) { origin = m_log_text->viewport()->mapToGlobal(pos); } else { origin = mapToGlobal(pos); } menu.exec(origin); } void log_viewer::show_log() { if (m_path_last.isEmpty()) { return; } m_full_log.clear(); m_log_text->clear(); m_log_text->setPlainText(tr("Loading file...")); QApplication::processEvents(); bool failed = false; if (m_path_last.endsWith(".gz")) { if (fs::file file{m_path_last.toStdString()}) { const std::vector<u8> decompressed = unzip(file.to_vector<u8>()); m_full_log = QString::fromUtf8(reinterpret_cast<const char*>(decompressed.data()), decompressed.size()); } else { failed = true; } } else if (QFile file(m_path_last); file.exists() && file.open(QIODevice::ReadOnly | QIODevice::Text)) { // TODO: Due to a bug in Qt 6.5.2 QTextStream::readAll is ridiculously slow to the point where it gets stuck on large files. // In Qt 5.15.2 this was much faster than QFile::readAll. Use QTextStream again once this bug is fixed upstream. //QTextStream stream(&file); //m_full_log = stream.readAll(); m_full_log = file.readAll(); } else { failed = true; } if (failed) { gui_log.error("log_viewer: Failed to open %s", m_path_last); m_log_text->setPlainText(tr("Failed to open '%0'").arg(m_path_last)); } else { m_gui_settings->SetValue(gui::fd_log_viewer, m_path_last); m_full_log.replace('\0', '0'); } filter_log(); } void log_viewer::set_text_and_keep_position(const QString& text) { m_log_text->setPlainText(tr("Pasting...")); QApplication::processEvents(); const int pos = m_log_text->verticalScrollBar()->value(); m_log_text->setPlainText(text); m_log_text->verticalScrollBar()->setValue(pos); } void log_viewer::filter_log() { if (m_full_log.isEmpty()) { set_text_and_keep_position(m_full_log); return; } m_log_text->setPlainText(tr("Filtering...")); std::vector<QString> excluded_log_levels; if (!m_log_levels.test(static_cast<u32>(logs::level::fatal))) excluded_log_levels.push_back("·F "); if (!m_log_levels.test(static_cast<u32>(logs::level::error))) excluded_log_levels.push_back("·E "); if (!m_log_levels.test(static_cast<u32>(logs::level::todo))) excluded_log_levels.push_back("·U "); if (!m_log_levels.test(static_cast<u32>(logs::level::success))) excluded_log_levels.push_back("·S "); if (!m_log_levels.test(static_cast<u32>(logs::level::warning))) excluded_log_levels.push_back("·W "); if (!m_log_levels.test(static_cast<u32>(logs::level::notice))) excluded_log_levels.push_back("·! "); if (!m_log_levels.test(static_cast<u32>(logs::level::trace))) excluded_log_levels.push_back("·T "); if (m_filter_term.isEmpty() && excluded_log_levels.empty() && m_show_timestamps && m_show_threads && !m_last_actions_only) { set_text_and_keep_position(m_full_log); return; } QString result; QTextStream stream(&m_full_log); const QRegularExpression thread_regexp("\\{.*\\} "); const QRegularExpression timestamp_regexp("\\d?\\d:\\d\\d:\\d\\d\\.\\d\\d\\d\\d\\d\\d "); const auto add_line = [this, &result, &excluded_log_levels, &timestamp_regexp, &thread_regexp](QString& line) { bool exclude_line = false; for (const QString& log_level_prefix : excluded_log_levels) { if (line.startsWith(log_level_prefix)) { exclude_line = true; break; } } if (exclude_line) { return; } if (m_filter_term.isEmpty() || line.contains(m_filter_term)) { if (line.isEmpty()) { result += "\n"; return; } if (!m_show_timestamps) { line.remove(timestamp_regexp); } if (!m_show_threads) { line.remove(thread_regexp); } if (!line.isEmpty()) { result += line + "\n"; } } }; if (m_last_actions_only) { if (const int start_pos = m_full_log.lastIndexOf("LDR: Used configuration:"); start_pos >= 0) { if (!stream.seek(start_pos)) // TODO: is this correct ? { gui_log.error("Log viewer failed to seek to pos %d of log.", start_pos); } std::map<QString, std::deque<QString>> all_thread_actions; for (QString line = stream.readLine(); !line.isNull(); line = stream.readLine()) { if (const QRegularExpressionMatch match = thread_regexp.match(line); match.hasMatch()) { if (const QString thread_name = match.captured(); !thread_name.isEmpty()) { std::deque<QString>& actions = all_thread_actions[thread_name]; actions.push_back(line); if (actions.size() > 10) { actions.pop_front(); } } } } for (auto& [thread_name, actions] : all_thread_actions) { for (QString& line : actions) { add_line(line); } } set_text_and_keep_position(result); return; } QMessageBox::information(this, tr("Ooops!"), tr("Cannot find any game boot!")); // Pass through to regular log filter } if (!stream.seek(0)) { gui_log.error("Log viewer failed to seek to beginning of log."); } for (QString line = stream.readLine(); !line.isNull(); line = stream.readLine()) { add_line(line); }; set_text_and_keep_position(result); } bool log_viewer::is_valid_file(const QMimeData& md, bool save) { const QList<QUrl> urls = md.urls(); if (urls.count() > 1) { return false; } const QString suffix = QFileInfo(urls[0].fileName()).suffix().toLower(); if (suffix == "log" || suffix == "gz") { if (save) { m_path_last = urls[0].toLocalFile(); } return true; } return false; } void log_viewer::dropEvent(QDropEvent* ev) { if (is_valid_file(*ev->mimeData(), true)) { show_log(); } } void log_viewer::dragEnterEvent(QDragEnterEvent* ev) { if (is_valid_file(*ev->mimeData())) { ev->accept(); } } void log_viewer::dragMoveEvent(QDragMoveEvent* ev) { if (is_valid_file(*ev->mimeData())) { ev->accept(); } } void log_viewer::dragLeaveEvent(QDragLeaveEvent* ev) { ev->accept(); } bool log_viewer::eventFilter(QObject* object, QEvent* event) { if (object != m_log_text) { return QWidget::eventFilter(object, event); } if (event->type() == QEvent::KeyPress) { QKeyEvent* e = static_cast<QKeyEvent*>(event); if (e && !e->isAutoRepeat() && e->modifiers() == Qt::ControlModifier && e->key() == Qt::Key_F) { if (m_find_dialog && m_find_dialog->isVisible()) m_find_dialog->close(); m_find_dialog.reset(new find_dialog(static_cast<QPlainTextEdit*>(object), this)); } } return QWidget::eventFilter(object, event); }
13,093
C++
.cpp
421
28.32304
146
0.688409
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,152
pkg_install_dialog.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/pkg_install_dialog.cpp
#include "pkg_install_dialog.h" #include "game_compatibility.h" #include "numbered_widget_item.h" #include "richtext_item_delegate.h" #include <QDialogButtonBox> #include <QPushButton> #include <QFileInfo> #include <QHBoxLayout> #include <QLabel> #include <QToolButton> enum Roles { FullPathRole = Qt::UserRole + 0, ChangelogRole = Qt::UserRole + 1, TitleRole = Qt::UserRole + 2, TitleIdRole = Qt::UserRole + 3, VersionRole = Qt::UserRole + 4, }; pkg_install_dialog::pkg_install_dialog(const QStringList& paths, game_compatibility* compat, QWidget* parent) : QDialog(parent) { m_dir_list = new QListWidget(this); m_dir_list->setItemDelegate(new richtext_item_delegate(m_dir_list->itemDelegate())); for (const QString& path : paths) { const compat::package_info info = game_compatibility::GetPkgInfo(path, compat); if (!info.is_valid) { continue; } const QFileInfo file_info(path); // We have to build our complicated localized string in some annoying manner QString accumulated_info; QString tooltip; const auto append_comma = [&accumulated_info]() { if (!accumulated_info.isEmpty()) { accumulated_info += ", "; } }; if (!info.title_id.isEmpty()) { accumulated_info = info.title_id; } if (info.type != compat::package_type::other) { append_comma(); if (info.type == compat::package_type::dlc) { accumulated_info += tr("DLC", "Package type info (DLC)"); } else { accumulated_info += tr("Update", "Package type info (Update)"); } } else if (!info.local_cat.isEmpty()) { append_comma(); accumulated_info += tr("%0", "Package type info").arg(info.local_cat); } if (!info.version.isEmpty()) { append_comma(); accumulated_info += tr("v.%0", "Version info").arg(info.version); } if (info.changelog.isEmpty()) { tooltip = tr("No info", "Changelog info placeholder"); } else { tooltip = tr("Changelog:\n\n%0", "Changelog info").arg(info.changelog); } append_comma(); accumulated_info += file_info.fileName(); const QString text = tr("<b>%0</b> (%2)", "Package text").arg(info.title.simplified()).arg(accumulated_info); QListWidgetItem* item = new numbered_widget_item(text, m_dir_list); item->setData(Roles::FullPathRole, info.path); item->setData(Roles::ChangelogRole, info.changelog); item->setData(Roles::TitleRole, info.title); item->setData(Roles::TitleIdRole, info.title_id); item->setData(Roles::VersionRole, info.version); item->setToolTip(tooltip); item->setFlags(item->flags() | Qt::ItemIsUserCheckable); item->setCheckState(Qt::Checked); } m_dir_list->sortItems(); m_dir_list->setCurrentRow(0); m_dir_list->setMinimumWidth((m_dir_list->sizeHintForColumn(0) * 125) / 100); // Create buttons QDialogButtonBox* buttons = new QDialogButtonBox(QDialogButtonBox::Cancel | QDialogButtonBox::Ok); buttons->button(QDialogButtonBox::Ok)->setText(tr("Install")); buttons->button(QDialogButtonBox::Ok)->setDefault(true); connect(buttons, &QDialogButtonBox::clicked, this, [this, buttons](QAbstractButton* button) { if (button == buttons->button(QDialogButtonBox::Ok)) { accept(); } else if (button == buttons->button(QDialogButtonBox::Cancel)) { reject(); } }); connect(m_dir_list, &QListWidget::itemChanged, this, [this, buttons](QListWidgetItem*) { bool any_checked = false; for (int i = 0; i < m_dir_list->count(); i++) { if (m_dir_list->item(i)->checkState() == Qt::Checked) { any_checked = true; break; } } buttons->button(QDialogButtonBox::Ok)->setEnabled(any_checked); }); QToolButton* move_up = new QToolButton; move_up->setArrowType(Qt::UpArrow); move_up->setToolTip(tr("Move selected item up")); connect(move_up, &QToolButton::clicked, this, [this]() { MoveItem(-1); }); QToolButton* move_down = new QToolButton; move_down->setArrowType(Qt::DownArrow); move_down->setToolTip(tr("Move selected item down")); connect(move_down, &QToolButton::clicked, this, [this]() { MoveItem(1); }); QHBoxLayout* hbox = new QHBoxLayout; hbox->addStretch(); hbox->addWidget(move_up); hbox->addWidget(move_down); QLabel* description = new QLabel(tr("You are about to install multiple packages.\nReorder and/or exclude them if needed, then click \"Install\" to proceed.")); QVBoxLayout* vbox = new QVBoxLayout; vbox->addWidget(description); vbox->addLayout(hbox); vbox->addWidget(m_dir_list); vbox->addWidget(buttons); setLayout(vbox); setWindowTitle(tr("Batch PKG Installation")); setObjectName("pkg_install_dialog"); } void pkg_install_dialog::MoveItem(int offset) const { const int src_index = m_dir_list->currentRow(); const int dest_index = src_index + offset; if (src_index >= 0 && src_index < m_dir_list->count() && dest_index >= 0 && dest_index < m_dir_list->count()) { QListWidgetItem* item = m_dir_list->takeItem(src_index); m_dir_list->insertItem(dest_index, item); m_dir_list->setCurrentItem(item); } } std::vector<compat::package_info> pkg_install_dialog::GetPathsToInstall() const { std::vector<compat::package_info> result; for (int i = 0; i < m_dir_list->count(); i++) { const QListWidgetItem* item = m_dir_list->item(i); if (item && item->checkState() == Qt::Checked) { compat::package_info info; info.path = item->data(Roles::FullPathRole).toString(); info.title = item->data(Roles::TitleRole).toString(); info.title_id = item->data(Roles::TitleIdRole).toString(); info.changelog = item->data(Roles::ChangelogRole).toString(); info.version = item->data(Roles::VersionRole).toString(); result.push_back(info); } } return result; }
5,675
C++
.cpp
172
30.156977
160
0.699196
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
true
false
false
true
false
false
5,153
recvmessage_dialog_frame.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/recvmessage_dialog_frame.cpp
#include <QVBoxLayout> #include <QHBoxLayout> #include <QPushButton> #include <QMessageBox> #include <QTimer> #include "recvmessage_dialog_frame.h" #include "Emu/IdManager.h" #include "Emu/System.h" #include "util/logs.hpp" LOG_CHANNEL(recvmessage_dlg_log, "recvmessage dlg"); void recvmessage_callback(void* param, std::shared_ptr<std::pair<std::string, message_data>> new_msg, u64 msg_id) { auto* dlg = static_cast<recvmessage_dialog_frame*>(param); dlg->callback_handler(std::move(new_msg), msg_id); } recvmessage_dialog_frame::~recvmessage_dialog_frame() { if (m_dialog) { m_dialog->deleteLater(); } } error_code recvmessage_dialog_frame::Exec(SceNpBasicMessageMainType type, SceNpBasicMessageRecvOptions options, SceNpBasicMessageRecvAction& recv_result, u64& chosen_msg_id) { qRegisterMetaType<recvmessage_signal_struct>(); if (m_dialog) { m_dialog->close(); delete m_dialog; } m_dialog = new custom_dialog(false); m_dialog->setModal(true); m_dialog->setWindowTitle(tr("Choose message:")); m_rpcn = rpcn::rpcn_client::get_instance(true); QVBoxLayout* vbox_global = new QVBoxLayout(); m_lst_messages = new QListWidget(); vbox_global->addWidget(m_lst_messages); QHBoxLayout* hbox_btns = new QHBoxLayout(); hbox_btns->addStretch(); QPushButton* btn_accept = new QPushButton(tr("Accept")); QPushButton* btn_deny = new QPushButton(tr("Deny")); QPushButton* btn_cancel = new QPushButton(tr("Cancel")); hbox_btns->addWidget(btn_accept); hbox_btns->addWidget(btn_deny); hbox_btns->addWidget(btn_cancel); vbox_global->addLayout(hbox_btns); m_dialog->setLayout(vbox_global); error_code result = CELL_CANCEL; const bool preserve = options & SCE_NP_BASIC_RECV_MESSAGE_OPTIONS_PRESERVE; const bool include_bootable = options & SCE_NP_BASIC_RECV_MESSAGE_OPTIONS_INCLUDE_BOOTABLE; auto accept_or_deny = [preserve, this, &result, &recv_result, &chosen_msg_id](SceNpBasicMessageRecvAction result_from_action) { auto selected = m_lst_messages->selectedItems(); if (selected.empty()) { QMessageBox::critical(m_dialog, tr("Error receiving a message!"), tr("You must select a message!"), QMessageBox::Ok); return; } chosen_msg_id = selected[0]->data(Qt::UserRole).toULongLong(); recv_result = result_from_action; result = CELL_OK; if (!preserve) { m_rpcn->mark_message_used(chosen_msg_id); } m_dialog->close(); }; connect(btn_accept, &QAbstractButton::clicked, this, [&accept_or_deny]() { accept_or_deny(SCE_NP_BASIC_MESSAGE_ACTION_ACCEPT); }); connect(btn_deny, &QAbstractButton::clicked, this, [&accept_or_deny]() { accept_or_deny(SCE_NP_BASIC_MESSAGE_ACTION_DENY); }); connect(btn_cancel, &QAbstractButton::clicked, this, [this]() { m_dialog->close(); }); connect(this, &recvmessage_dialog_frame::signal_new_message, this, &recvmessage_dialog_frame::slot_new_message); // Get list of messages const auto messages = m_rpcn->get_messages_and_register_cb(type, include_bootable, recvmessage_callback, this); for (const auto& [id, message] : messages) { add_message(message, id); } auto& nps = g_fxo->get<np_state>(); QTimer timer; connect(&timer, &QTimer::timeout, this, [this, &nps, &timer]() { bool abort = Emu.IsStopped(); if (!abort && nps.abort_gui_flag.exchange(false)) { recvmessage_dlg_log.warning("Aborted by sceNp!"); abort = true; } if (abort) { if (m_dialog) { m_dialog->close(); } timer.stop(); } }); timer.start(10ms); m_dialog->exec(); m_rpcn->remove_message_cb(recvmessage_callback, this); return result; } void recvmessage_dialog_frame::add_message(const std::shared_ptr<std::pair<std::string, message_data>>& msg, u64 msg_id) { ensure(msg); auto new_item = new QListWidgetItem(QString::fromStdString(msg->first)); new_item->setData(Qt::UserRole, static_cast<qulonglong>(msg_id)); m_lst_messages->addItem(new_item); } void recvmessage_dialog_frame::slot_new_message(recvmessage_signal_struct msg_and_id) { add_message(msg_and_id.msg, msg_and_id.msg_id); } void recvmessage_dialog_frame::callback_handler(std::shared_ptr<std::pair<std::string, message_data>> new_msg, u64 msg_id) { recvmessage_signal_struct signal_struct = { .msg = new_msg, .msg_id = msg_id, }; Q_EMIT signal_new_message(signal_struct); }
4,316
C++
.cpp
123
32.658537
173
0.72476
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,154
gui_settings.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/gui_settings.cpp
#include "gui_settings.h" #include "qt_utils.h" #include "localized.h" #include "Emu/System.h" #include <QCheckBox> #include <QCoreApplication> #include <QMessageBox> #include <thread> LOG_CHANNEL(cfg_log, "CFG"); namespace gui { QString stylesheet; bool custom_stylesheet_active = false; QString get_game_list_column_name(game_list_columns col) { switch (col) { case game_list_columns::icon: return "column_icon"; case game_list_columns::name: return "column_name"; case game_list_columns::serial: return "column_serial"; case game_list_columns::firmware: return "column_firmware"; case game_list_columns::version: return "column_version"; case game_list_columns::category: return "column_category"; case game_list_columns::path: return "column_path"; case game_list_columns::move: return "column_move"; case game_list_columns::resolution: return "column_resolution"; case game_list_columns::sound: return "column_sound"; case game_list_columns::parental: return "column_parental"; case game_list_columns::last_play: return "column_last_play"; case game_list_columns::playtime: return "column_playtime"; case game_list_columns::compat: return "column_compat"; case game_list_columns::dir_size: return "column_dir_size"; case game_list_columns::count: return ""; } fmt::throw_exception("get_game_list_column_name: Invalid column"); } QString get_trophy_list_column_name(trophy_list_columns col) { switch (col) { case trophy_list_columns::icon: return "trophy_column_icon"; case trophy_list_columns::name: return "trophy_column_name"; case trophy_list_columns::description: return "trophy_column_description"; case trophy_list_columns::type: return "trophy_column_type"; case trophy_list_columns::is_unlocked: return "trophy_column_is_unlocked"; case trophy_list_columns::id: return "trophy_column_id"; case trophy_list_columns::platinum_link: return "trophy_column_platinum_link"; case trophy_list_columns::time_unlocked: return "trophy_column_time_unlocked"; case trophy_list_columns::count: return ""; } fmt::throw_exception("get_trophy_list_column_name: Invalid column"); } QString get_trophy_game_list_column_name(trophy_game_list_columns col) { switch (col) { case trophy_game_list_columns::icon: return "trophy_game_column_icon"; case trophy_game_list_columns::name: return "trophy_game_column_name"; case trophy_game_list_columns::progress: return "trophy_game_column_progress"; case trophy_game_list_columns::trophies: return "trophy_game_column_trophies"; case trophy_game_list_columns::count: return ""; } fmt::throw_exception("get_trophy_game_list_column_name: Invalid column"); } } gui_settings::gui_settings(QObject* parent) : settings(parent) { m_settings = std::make_unique<QSettings>(ComputeSettingsDir() + gui::Settings + ".ini", QSettings::Format::IniFormat, parent); } QStringList gui_settings::GetGameListCategoryFilters(bool is_list_mode) const { QStringList filterList; if (GetCategoryVisibility(Category::HDD_Game, is_list_mode)) filterList.append(cat::cat_hdd_game); if (GetCategoryVisibility(Category::Disc_Game, is_list_mode)) filterList.append(cat::cat_disc_game); if (GetCategoryVisibility(Category::PS1_Game, is_list_mode)) filterList.append(cat::cat_ps1_game); if (GetCategoryVisibility(Category::PS2_Game, is_list_mode)) filterList.append(cat::ps2_games); if (GetCategoryVisibility(Category::PSP_Game, is_list_mode)) filterList.append(cat::psp_games); if (GetCategoryVisibility(Category::Home, is_list_mode)) filterList.append(cat::cat_home); if (GetCategoryVisibility(Category::Media, is_list_mode)) filterList.append(cat::media); if (GetCategoryVisibility(Category::Data, is_list_mode)) filterList.append(cat::data); if (GetCategoryVisibility(Category::Unknown_Cat, is_list_mode)) filterList.append(cat::cat_unknown); if (GetCategoryVisibility(Category::Others, is_list_mode)) filterList.append(cat::others); return filterList; } bool gui_settings::GetCategoryVisibility(int cat, bool is_list_mode) const { const gui_save value = GetGuiSaveForCategory(cat, is_list_mode); return GetValue(value).toBool(); } void gui_settings::SetCategoryVisibility(int cat, bool val, bool is_list_mode) const { const gui_save value = GetGuiSaveForCategory(cat, is_list_mode); SetValue(value, val); } void gui_settings::ShowBox(QMessageBox::Icon icon, const QString& title, const QString& text, const gui_save& entry, int* result = nullptr, QWidget* parent = nullptr, bool always_on_top = false) { const std::string dialog_type = icon != QMessageBox::Information ? "Confirmation" : "Info"; const bool has_gui_setting = !entry.name.isEmpty(); if (has_gui_setting && !GetValue(entry).toBool()) { cfg_log.notice("%s Dialog for Entry %s was ignored", dialog_type, entry.name); return; } const QFlags<QMessageBox::StandardButton> buttons = icon != QMessageBox::Information ? QMessageBox::Yes | QMessageBox::No : QMessageBox::Ok; QMessageBox mb(icon, title, text, buttons, parent, Qt::Dialog | Qt::MSWindowsFixedSizeDialogHint | (always_on_top ? Qt::WindowStaysOnTopHint : Qt::Widget)); mb.setTextFormat(Qt::RichText); if (has_gui_setting && icon != QMessageBox::Critical) { mb.setCheckBox(new QCheckBox(tr("Don't show again"))); } connect(&mb, &QMessageBox::finished, [&](int res) { if (result) { *result = res; } const auto checkBox = mb.checkBox(); if (checkBox && checkBox->isChecked()) { SetValue(entry, false); cfg_log.notice("%s Dialog for Entry %s is now disabled", dialog_type, entry.name); } }); mb.exec(); } void gui_settings::ShowConfirmationBox(const QString& title, const QString& text, const gui_save& entry, int* result = nullptr, QWidget* parent = nullptr) { ShowBox(QMessageBox::Question, title, text, entry, result, parent, true); } void gui_settings::ShowInfoBox(const QString& title, const QString& text, const gui_save& entry, QWidget* parent = nullptr) { ShowBox(QMessageBox::Information, title, text, entry, nullptr, parent, false); } bool gui_settings::GetBootConfirmation(QWidget* parent, const gui_save& gui_save_entry) { while (Emu.GetStatus(false) == system_state::stopping) { QCoreApplication::processEvents(); std::this_thread::sleep_for(16ms); } if (!Emu.IsStopped()) { QString title = tr("Close Running Game?"); QString message = tr("Performing this action will close the current game.<br>Do you really want to continue?<br><br>Any unsaved progress will be lost!<br>"); if (gui_save_entry == gui::ib_confirm_boot) { message = tr("Booting another game will close the current game.<br>Do you really want to boot another game?<br><br>Any unsaved progress will be lost!<br>"); } else if (gui_save_entry == gui::ib_confirm_exit) { title = tr("Exit RPCS3?"); message = tr("A game is currently running. Do you really want to close RPCS3?<br><br>Any unsaved progress will be lost!<br>"); } int result = QMessageBox::Yes; ShowBox(QMessageBox::Question, title, message, gui_save_entry, &result, parent); if (result != QMessageBox::Yes) { return false; } cfg_log.notice("User accepted to stop the current emulation."); } return true; } void gui_settings::SetTrophyGamelistColVisibility(gui::trophy_game_list_columns col, bool val) const { SetValue(GetGuiSaveForTrophyGameColumn(col), val); } void gui_settings::SetTrophylistColVisibility(gui::trophy_list_columns col, bool val) const { SetValue(GetGuiSaveForTrophyColumn(col), val); } void gui_settings::SetGamelistColVisibility(gui::game_list_columns col, bool val) const { SetValue(GetGuiSaveForGameColumn(col), val); } void gui_settings::SetCustomColor(int col, const QColor& val) const { SetValue(gui_save(gui::meta, "CustomColor" + QString::number(col), gui::gl_icon_color), val); } logs::level gui_settings::GetLogLevel() const { return logs::level(GetValue(gui::l_level).toUInt()); } bool gui_settings::GetTrophyGamelistColVisibility(gui::trophy_game_list_columns col) const { return GetValue(GetGuiSaveForTrophyGameColumn(col)).toBool(); } bool gui_settings::GetTrophylistColVisibility(gui::trophy_list_columns col) const { return GetValue(GetGuiSaveForTrophyColumn(col)).toBool(); } bool gui_settings::GetGamelistColVisibility(gui::game_list_columns col) const { return GetValue(GetGuiSaveForGameColumn(col)).toBool(); } QColor gui_settings::GetCustomColor(int col) const { return GetValue(gui_save(gui::meta, "CustomColor" + QString::number(col), gui::gl_icon_color)).value<QColor>(); } QStringList gui_settings::GetStylesheetEntries() const { const QStringList name_filter = QStringList("*.qss"); QStringList res = gui::utils::get_dir_entries(m_settings_dir, name_filter); #if !defined(_WIN32) // Makes stylesheets load if using AppImage (App Bundle) or installed to /usr/bin #ifdef __APPLE__ QDir platformStylesheetDir = QCoreApplication::applicationDirPath() + "/../Resources/GuiConfigs/"; #else QDir platformStylesheetDir = QCoreApplication::applicationDirPath() + "/../share/rpcs3/GuiConfigs/"; #ifdef DATADIR const QString data_dir = (DATADIR); res.append(gui::utils::get_dir_entries(data_dir + "/GuiConfigs/", name_filter)); #endif #endif res.append(gui::utils::get_dir_entries(QCoreApplication::applicationDirPath() + "/GuiConfigs/", name_filter)); res.append(gui::utils::get_dir_entries(platformStylesheetDir, name_filter)); res.removeDuplicates(); #endif res.sort(); return res; } QSize gui_settings::SizeFromSlider(int pos) { return gui::gl_icon_size_min + (gui::gl_icon_size_max - gui::gl_icon_size_min) * (1.f * pos / gui::gl_max_slider_pos); } gui_save gui_settings::GetGuiSaveForTrophyGameColumn(gui::trophy_game_list_columns col) { return gui_save{ gui::trophy, "visibility_" + gui::get_trophy_game_list_column_name(col), true }; } gui_save gui_settings::GetGuiSaveForTrophyColumn(gui::trophy_list_columns col) { return gui_save{ gui::trophy, "visibility_" + gui::get_trophy_list_column_name(col), true }; } gui_save gui_settings::GetGuiSaveForGameColumn(gui::game_list_columns col) { // hide sound format, parental level, firmware version and path by default const bool show = col != gui::game_list_columns::sound && col != gui::game_list_columns::parental && col != gui::game_list_columns::firmware && col != gui::game_list_columns::path; return gui_save{ gui::game_list, "visibility_" + gui::get_game_list_column_name(col), show }; } gui_save gui_settings::GetGuiSaveForCategory(int cat, bool is_list_mode) { switch (cat) { case Category::HDD_Game: return is_list_mode ? gui::cat_hdd_game : gui::grid_cat_hdd_game; case Category::Disc_Game: return is_list_mode ? gui::cat_disc_game : gui::grid_cat_disc_game; case Category::Home: return is_list_mode ? gui::cat_home : gui::grid_cat_home; case Category::PS1_Game: return is_list_mode ? gui::cat_ps1_game : gui::grid_cat_ps1_game; case Category::PS2_Game: return is_list_mode ? gui::cat_ps2_game : gui::grid_cat_ps2_game; case Category::PSP_Game: return is_list_mode ? gui::cat_psp_game : gui::grid_cat_psp_game; case Category::Media: return is_list_mode ? gui::cat_audio_video : gui::grid_cat_audio_video; case Category::Data: return is_list_mode ? gui::cat_game_data : gui::grid_cat_game_data; case Category::Unknown_Cat: return is_list_mode ? gui::cat_unknown : gui::grid_cat_unknown; case Category::Others: return is_list_mode ? gui::cat_other : gui::grid_cat_other; default: cfg_log.warning("GetGuiSaveForCategory: wrong cat <%d>", cat); return {}; } }
11,615
C++
.cpp
288
37.944444
194
0.746451
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,155
save_data_dialog.cpp
RPCS3_rpcs3/rpcs3/rpcs3qt/save_data_dialog.cpp
#include "save_data_dialog.h" #include "save_data_list_dialog.h" #include "Emu/System.h" #include "Emu/IdManager.h" #include "Emu/Io/interception.h" #include "../Emu/RSX/Overlays/overlay_manager.h" #include "Emu/RSX/Overlays/overlay_save_dialog.h" #include "Emu/Cell/Modules/cellSysutil.h" #include "Utilities/Thread.h" #include "util/logs.hpp" LOG_CHANNEL(cellSaveData); s32 save_data_dialog::ShowSaveDataList(std::vector<SaveDataEntry>& save_entries, s32 focused, u32 op, vm::ptr<CellSaveDataListSet> listSet, bool enable_overlay) { cellSaveData.notice("ShowSaveDataList(save_entries=%d, focused=%d, op=0x%x, listSet=*0x%x, enable_overlay=%d)", save_entries.size(), focused, op, listSet, enable_overlay); // TODO: Implement proper error checking in savedata_op? const bool use_end = sysutil_send_system_cmd(CELL_SYSUTIL_DRAWING_BEGIN, 0) >= 0; if (!use_end) { cellSaveData.error("ShowSaveDataList(): Not able to notify DRAWING_BEGIN callback because one has already been sent!"); } // TODO: Install native shell as an Emu callback if (auto manager = g_fxo->try_get<rsx::overlays::display_manager>()) { cellSaveData.notice("ShowSaveDataList: Showing native UI dialog"); const s32 result = manager->create<rsx::overlays::save_dialog>()->show(save_entries, focused, op, listSet, enable_overlay); if (result != rsx::overlays::user_interface::selection_code::error) { cellSaveData.notice("ShowSaveDataList: Native UI dialog returned with selection %d", result); if (use_end) sysutil_send_system_cmd(CELL_SYSUTIL_DRAWING_END, 0); return result; } cellSaveData.error("ShowSaveDataList: Native UI dialog returned error"); } if (!Emu.HasGui()) { cellSaveData.notice("ShowSaveDataList(): Aborting: Emulation has no GUI attached"); if (use_end) sysutil_send_system_cmd(CELL_SYSUTIL_DRAWING_END, 0); return -2; } // Fall back to front-end GUI cellSaveData.notice("ShowSaveDataList(): Using fallback GUI"); atomic_t<s32> selection = 0; input::SetIntercepted(true); Emu.BlockingCallFromMainThread([&]() { save_data_list_dialog sdid(save_entries, focused, op, listSet); sdid.exec(); selection = sdid.GetSelection(); }); input::SetIntercepted(false); if (use_end) sysutil_send_system_cmd(CELL_SYSUTIL_DRAWING_END, 0); return selection.load(); }
2,305
C++
.cpp
53
41.09434
172
0.750447
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,156
cache_utils.cpp
RPCS3_rpcs3/rpcs3/Emu/cache_utils.cpp
#include "stdafx.h" #include "cache_utils.hpp" #include "system_utils.hpp" #include "system_config.h" #include "IdManager.h" #include "Emu/Cell/PPUAnalyser.h" #include "Emu/Cell/PPUThread.h" LOG_CHANNEL(sys_log, "SYS"); namespace rpcs3::cache { std::string get_ppu_cache() { const auto _main = g_fxo->try_get<main_ppu_module>(); if (!_main || _main->cache.empty()) { ppu_log.error("PPU Cache location not initialized."); return {}; } return _main->cache; } void limit_cache_size() { const std::string cache_location = rpcs3::utils::get_hdd1_dir() + "/caches"; if (!fs::is_dir(cache_location)) { sys_log.warning("Cache does not exist (%s)", cache_location); return; } const u64 size = fs::get_dir_size(cache_location); if (size == umax) { sys_log.error("Could not calculate cache directory '%s' size (%s)", cache_location, fs::g_tls_error); return; } const u64 max_size = static_cast<u64>(g_cfg.vfs.cache_max_size) * 1024 * 1024; if (max_size == 0) // Everything must go, so no need to do checks { fs::remove_all(cache_location, false); sys_log.success("Cleared disk cache"); return; } if (size <= max_size) { sys_log.trace("Cache size below limit: %llu/%llu", size, max_size); return; } sys_log.success("Cleaning disk cache..."); std::vector<fs::dir_entry> file_list{}; fs::dir cache_dir(cache_location); if (!cache_dir) { sys_log.error("Could not open cache directory '%s' (%s)", cache_location, fs::g_tls_error); return; } // retrieve items to delete for (const auto &item : cache_dir) { if (item.name != "." && item.name != "..") file_list.push_back(item); } cache_dir.close(); // sort oldest first std::sort(file_list.begin(), file_list.end(), FN(x.mtime < y.mtime)); // keep removing until cache is empty or enough bytes have been cleared // cache is cleared down to 80% of limit to increase interval between clears const u64 to_remove = static_cast<u64>(size - max_size * 0.8); u64 removed = 0; for (const auto &item : file_list) { const std::string &name = cache_location + "/" + item.name; const bool is_dir = fs::is_dir(name); const u64 item_size = is_dir ? fs::get_dir_size(name) : item.size; if (is_dir && item_size == umax) { sys_log.error("Failed to calculate '%s' item '%s' size (%s)", cache_location, item.name, fs::g_tls_error); break; } if (is_dir ? !fs::remove_all(name, true, true) : !fs::remove_file(name)) { sys_log.error("Could not remove cache directory '%s' item '%s' (%s)", cache_location, item.name, fs::g_tls_error); break; } removed += item_size; if (removed >= to_remove) break; } sys_log.success("Cleaned disk cache, removed %.2f MB", size / 1024.0 / 1024.0); } }
2,801
C++
.cpp
89
28.146067
118
0.650446
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
true
false
5,157
title.cpp
RPCS3_rpcs3/rpcs3/Emu/title.cpp
#include "stdafx.h" #include "title.h" #include "rpcs3_version.h" #include "util/sysinfo.hpp" namespace rpcs3 { std::string get_formatted_title(const title_format_data& title_data) { // Parse title format string std::string title_string; for (usz i = 0; i < title_data.format.size();) { const char c1 = title_data.format[i]; if (c1 == '\0') { break; } switch (c1) { case '%': { const char c2 = title_data.format[i + 1]; if (c2 == '\0') { title_string += '%'; i++; continue; } switch (c2) { case '%': { title_string += '%'; break; } case 'T': { title_string += title_data.title; break; } case 't': { title_string += title_data.title_id; break; } case 'R': { fmt::append(title_string, "%s", title_data.renderer); break; } case 'V': { static const std::string version = rpcs3::get_version_and_branch(); title_string += version; break; } case 'F': { fmt::append(title_string, "%.2f", title_data.fps); break; } case 'G': { title_string += title_data.vulkan_adapter; break; } case 'C': { static const std::string brand = utils::get_cpu_brand(); title_string += brand; break; } case 'c': { fmt::append(title_string, "%d", utils::get_thread_count()); break; } case 'M': { fmt::append(title_string, "%.2f", utils::get_total_memory() / (1024.0f * 1024 * 1024)); break; } default: { title_string += '%'; title_string += c2; break; } } i += 2; break; } default: { title_string += c1; i += 1; break; } } } return title_string; } }
1,805
C++
.cpp
103
12.76699
92
0.526875
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
true
false
false
true
true
false
5,158
system_progress.cpp
RPCS3_rpcs3/rpcs3/Emu/system_progress.cpp
#include "stdafx.h" #include "system_progress.hpp" #include "Emu/Cell/Modules/cellMsgDialog.h" #include "Emu/RSX/RSXThread.h" #include "Emu/RSX/Overlays/overlay_manager.h" #include "Emu/RSX/Overlays/overlay_message_dialog.h" #include "Emu/RSX/Overlays/overlay_message.h" #include "Emu/RSX/Overlays/overlay_compile_notification.h" #include "Emu/System.h" #include "util/asm.hpp" LOG_CHANNEL(sys_log, "SYS"); // Progress display server synchronization variables atomic_t<progress_dialog_string_t> g_progr_text{}; atomic_t<u32> g_progr_ftotal{0}; atomic_t<u32> g_progr_fdone{0}; atomic_t<u64> g_progr_ftotal_bits{0}; atomic_t<u64> g_progr_fknown_bits{0}; atomic_t<u32> g_progr_ptotal{0}; atomic_t<u32> g_progr_pdone{0}; // For Batch PPU Compilation atomic_t<bool> g_system_progress_canceled{false}; // For showing feedback while stopping emulation atomic_t<bool> g_system_progress_stopping{false}; namespace rsx::overlays { class progress_dialog : public message_dialog { public: using message_dialog::message_dialog; }; } // namespace rsx::overlays void progress_dialog_server::operator()() { std::shared_ptr<rsx::overlays::progress_dialog> native_dlg; g_system_progress_stopping = false; const auto get_state = []() { auto whole_state = std::make_tuple(+g_progr_text.load(), +g_progr_ftotal, +g_progr_fdone, +g_progr_ftotal_bits, +g_progr_fknown_bits, +g_progr_ptotal, +g_progr_pdone); while (true) { auto new_state = std::make_tuple(+g_progr_text.load(), +g_progr_ftotal, +g_progr_fdone, +g_progr_ftotal_bits, +g_progr_fknown_bits, +g_progr_ptotal, +g_progr_pdone); if (new_state == whole_state) { // Only leave while it has a complete (atomic) state return whole_state; } whole_state = std::move(new_state); } return whole_state; }; while (!g_system_progress_stopping && thread_ctrl::state() != thread_state::aborting) { // Wait for the start condition const char* text0 = g_progr_text.load(); while (!text0) { if (g_system_progress_stopping || thread_ctrl::state() == thread_state::aborting) { break; } if (g_progr_ftotal || g_progr_fdone || g_progr_ptotal || g_progr_pdone) { const auto& [text_new, ftotal, fdone, ftotal_bits, fknown_bits, ptotal, pdone] = get_state(); if (text_new) { text0 = text_new; break; } if ((ftotal || ptotal) && ftotal == fdone && ptotal == pdone) { // Cleanup (missed message but do not cry over spilt milk) g_progr_fdone -= fdone; g_progr_pdone -= pdone; g_progr_ftotal_bits -= ftotal_bits; g_progr_fknown_bits -= fknown_bits; g_progr_ftotal -= ftotal; g_progr_ptotal -= ptotal; g_progr_ptotal.notify_all(); } } thread_ctrl::wait_for(5000); text0 = g_progr_text.load(); } if (g_system_progress_stopping || thread_ctrl::state() == thread_state::aborting) { break; } g_system_progress_canceled = false; // Initialize message dialog bool show_overlay_message = false; // Only show an overlay message after initial loading is done. std::shared_ptr<MsgDialogBase> dlg; if (const auto renderer = rsx::get_current_renderer()) { // Some backends like OpenGL actually initialize a lot of driver objects in the "on_init" method. // Wait for init to complete within reasonable time. Abort just in case we have hardware/driver issues. renderer->is_initialized.wait(0, atomic_wait_timeout(5 * 1000000000ull)); auto manager = g_fxo->try_get<rsx::overlays::display_manager>(); show_overlay_message = g_fxo->get<progress_dialog_workaround>().show_overlay_message_only; if (manager && !show_overlay_message) { MsgDialogType type{}; type.se_mute_on = true; type.se_normal = true; type.bg_invisible = true; type.disable_cancel = true; type.progress_bar_count = 1; native_dlg = manager->create<rsx::overlays::progress_dialog>(true); native_dlg->show(false, text0, type, msg_dialog_source::sys_progress, nullptr); native_dlg->progress_bar_set_message(0, get_localized_string(localized_string_id::PROGRESS_DIALOG_PLEASE_WAIT)); } } if (!show_overlay_message && !native_dlg && (dlg = Emu.GetCallbacks().get_msg_dialog())) { dlg->type.se_normal = true; dlg->type.bg_invisible = true; dlg->type.progress_bar_count = 1; dlg->on_close = [](s32 /*status*/) { Emu.CallFromMainThread([]() { // Abort everything sys_log.notice("Aborted progress dialog"); Emu.GracefulShutdown(false, true); }); g_system_progress_canceled = true; }; Emu.CallFromMainThread([dlg, text0]() { dlg->Create(text0, text0); }); } u32 ftotal = 0; u32 fdone = 0; u64 fknown_bits = 0; u64 ftotal_bits = 0; u32 ptotal = 0; u32 pdone = 0; const char* text1 = nullptr; const u64 start_time = get_system_time(); u64 wait_no_update_count = 0; std::shared_ptr<atomic_t<u32>> ppu_cue_refs; std::vector<std::pair<u64, u64>> time_left_queue(1024); usz time_left_queue_idx = 0; // Update progress for (u64 sleep_until = get_system_time(), sleep_for = 500; !g_system_progress_stopping && thread_ctrl::state() != thread_state::aborting; thread_ctrl::wait_until(&sleep_until, std::exchange(sleep_for, 500))) { const auto& [text_new, ftotal_new, fdone_new, ftotal_bits_new, fknown_bits_new, ptotal_new, pdone_new] = get_state(); // Force-update every 20 seconds to update remaining time if (wait_no_update_count == 100u * 20 || ftotal != ftotal_new || fdone != fdone_new || fknown_bits != fknown_bits_new || ftotal_bits != ftotal_bits_new || ptotal != ptotal_new || pdone != pdone_new || text_new != text1) { wait_no_update_count = 0; ftotal = ftotal_new; fdone = fdone_new; ftotal_bits = ftotal_bits_new; fknown_bits = fknown_bits_new; ptotal = ptotal_new; pdone = pdone_new; const bool text_changed = text_new && text_new != text1; if (text_new) { text1 = text_new; } if (!text1) { // Cannot do anything continue; } if (show_overlay_message) { // Show a message instead (if compilation period is estimated to be lengthy) if (pdone < ptotal && g_cfg.misc.show_ppu_compilation_hint) { const u64 passed_usec = (get_system_time() - start_time); const u64 remaining_usec = pdone ? utils::rational_mul<u64>(passed_usec, static_cast<u64>(ptotal) - pdone, pdone) : (passed_usec * ptotal); // Only show compile notification if we estimate at least 100ms if (remaining_usec >= 100'000ULL) { if (!ppu_cue_refs || !*ppu_cue_refs) { ppu_cue_refs = rsx::overlays::show_ppu_compile_notification(); } // Make sure to update any pending messages. PPU compilation may freeze the image. rsx::overlays::refresh_message_queue(); } } if (pdone >= ptotal) { if (ppu_cue_refs) { *ppu_cue_refs = 0; ppu_cue_refs.reset(); rsx::overlays::refresh_message_queue(); } } sleep_for = 10000; continue; } // Compute new progress in percents // Assume not all programs were found if files were not compiled (as it may contain more) const bool use_bits = fknown_bits && ftotal_bits; const u64 known_files = use_bits ? fknown_bits : ftotal; const u64 total = utils::rational_mul<u64>(std::max<u64>(ptotal, 1), std::max<u64>(use_bits ? ftotal_bits : ftotal, 1), std::max<u64>(known_files, 1)); const u64 done = pdone; const u32 value = static_cast<u32>(done >= total ? 100 : done * 100 / total); std::string progr; if (ftotal || ptotal) { progr = get_localized_string(localized_string_id::PROGRESS_DIALOG_PROGRESS); if (ftotal) fmt::append(progr, " %s %u %s %u%s", get_localized_string(localized_string_id::PROGRESS_DIALOG_FILE), fdone, get_localized_string(localized_string_id::PROGRESS_DIALOG_OF), ftotal, ptotal ? "," : ""); if (ptotal) fmt::append(progr, " %s %u %s %u", get_localized_string(localized_string_id::PROGRESS_DIALOG_MODULE), pdone, get_localized_string(localized_string_id::PROGRESS_DIALOG_OF), ptotal); const u32 of_1000 = static_cast<u32>(done >= total ? 1000 : done * 1000 / total); if (of_1000 >= 2) { const u64 passed = (get_system_time() - start_time); const u64 total = utils::rational_mul<u64>(passed, 1000, of_1000); const u64 remaining = total - passed; // Stabilize the result by using the maximum one from the recent history // This is a very simple approach yet appears to solve most inconsistencies u64 max_remaining = remaining; for (usz i = 0; i < time_left_queue.size(); i++) { const auto& sample = time_left_queue[(time_left_queue.size() + time_left_queue_idx - i) % time_left_queue.size()]; const u64 sample_age = passed - sample.first; if (passed - sample.first >= 4'000'000) { // Ignore old samples break; } max_remaining = std::max<u64>(max_remaining, sample.second >= sample_age ? sample.second - sample_age : 0); } if (auto new_val = std::make_pair(passed, remaining); time_left_queue[time_left_queue_idx] != new_val) { time_left_queue_idx = (time_left_queue_idx + 1) % time_left_queue.size(); time_left_queue[time_left_queue_idx] = new_val; } const u64 max_seconds_remaining = max_remaining / 1'000'000; const u64 seconds = max_seconds_remaining % 60; const u64 minutes = (max_seconds_remaining / 60) % 60; const u64 hours = (max_seconds_remaining / 3600); if (passed < 4'000'000) { // Cannot rely on such small duration of time for estimation } else if (done >= total) { fmt::append(progr, " (%s)", get_localized_string(localized_string_id::PROGRESS_DIALOG_DONE)); } else if (hours) { fmt::append(progr, " (%uh %2um %s)", hours, minutes, get_localized_string(localized_string_id::PROGRESS_DIALOG_REMAINING)); } else if (minutes >= 2) { fmt::append(progr, " (%um %s)", minutes, get_localized_string(localized_string_id::PROGRESS_DIALOG_REMAINING)); } else if (minutes == 0) { fmt::append(progr, " (%us %s)", std::max<u64>(seconds, 1), get_localized_string(localized_string_id::PROGRESS_DIALOG_REMAINING)); } else { fmt::append(progr, " (%um %2us %s)", minutes, seconds, get_localized_string(localized_string_id::PROGRESS_DIALOG_REMAINING)); } } } else { progr = get_localized_string(localized_string_id::PROGRESS_DIALOG_PROGRESS_ANALYZING); } // Changes detected, send update if (native_dlg) { if (text_changed) { native_dlg->set_text(text1); } native_dlg->progress_bar_set_message(0, std::move(progr)); native_dlg->progress_bar_set_value(0, static_cast<f32>(value)); } else if (dlg) { Emu.CallFromMainThread([=]() { if (text_changed) { dlg->SetMsg(text1); } dlg->ProgressBarSetMsg(0, progr); dlg->ProgressBarSetValue(0, value); }); } } if (show_overlay_message) { // Make sure to update any pending messages. PPU compilation may freeze the image. rsx::overlays::refresh_message_queue(); } // Leave only if total count is equal to done count if (ftotal == fdone && ptotal == pdone && !text_new) { // Complete state, empty message: close dialog break; } sleep_for = 10'000; wait_no_update_count++; } if (ppu_cue_refs) { *ppu_cue_refs = 0; } if (g_system_progress_stopping || thread_ctrl::state() == thread_state::aborting) { break; } if (show_overlay_message) { // Do nothing } else if (native_dlg) { native_dlg->close(false, false); } else if (dlg) { Emu.CallFromMainThread([=]() { dlg->Close(true); }); } // Cleanup g_progr_fdone -= fdone; g_progr_pdone -= pdone; g_progr_ftotal_bits -= ftotal_bits; g_progr_fknown_bits -= fknown_bits; g_progr_ftotal -= ftotal; g_progr_ptotal -= ptotal; g_progr_ptotal.notify_all(); } if (native_dlg && g_system_progress_stopping) { native_dlg->set_text(get_localized_string(localized_string_id::PROGRESS_DIALOG_STOPPING_PLEASE_WAIT)); native_dlg->refresh(); } if (g_progr_ptotal.exchange(0)) { g_progr_ptotal.notify_all(); } } progress_dialog_server::~progress_dialog_server() { g_progr_ftotal.release(0); g_progr_fdone.release(0); g_progr_ftotal_bits.release(0); g_progr_fknown_bits.release(0); g_progr_ptotal.release(0); g_progr_pdone.release(0); g_progr_text.release(progress_dialog_string_t{}); }
12,805
C++
.cpp
363
30.53719
205
0.652522
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,159
IPC_socket.cpp
RPCS3_rpcs3/rpcs3/Emu/IPC_socket.cpp
#include "stdafx.h" #include "System.h" #include "Emu/IPC_config.h" #include "IPC_socket.h" #include "rpcs3_version.h" namespace IPC_socket { const u8& IPC_impl::read8(u32 addr) { return vm::read8(addr); } void IPC_impl::write8(u32 addr, u8 value) { vm::write8(addr, value); } const be_t<u16>& IPC_impl::read16(u32 addr) { return vm::read16(addr); } void IPC_impl::write16(u32 addr, be_t<u16> value) { vm::write16(addr, value); } const be_t<u32>& IPC_impl::read32(u32 addr) { return vm::read32(addr); } void IPC_impl::write32(u32 addr, be_t<u32> value) { vm::write32(addr, value); } const be_t<u64>& IPC_impl::read64(u32 addr) { return vm::read64(addr); } void IPC_impl::write64(u32 addr, be_t<u64> value) { vm::write64(addr, value); } int IPC_impl::get_port() { return g_cfg_ipc.get_port(); } pine::EmuStatus IPC_impl::get_status() { switch (Emu.GetStatus()) { case system_state::running: return pine::EmuStatus::Running; case system_state::paused: return pine::EmuStatus::Paused; default: return pine::EmuStatus::Shutdown; } } const std::string& IPC_impl::get_title() { return Emu.GetTitle(); } const std::string& IPC_impl::get_title_ID() { return Emu.GetTitleID(); } const std::string& IPC_impl::get_executable_hash() { return Emu.GetExecutableHash(); } const std::string& IPC_impl::get_app_version() { return Emu.GetAppVersion(); } std::string IPC_impl::get_version_and_branch() { return rpcs3::get_version_and_branch(); } IPC_impl& IPC_impl::operator=(thread_state) { return *this; } IPC_server_manager::IPC_server_manager(bool enabled) { // Enable IPC if needed set_server_enabled(enabled); } void IPC_server_manager::set_server_enabled(bool enabled) { if (enabled) { int port = g_cfg_ipc.get_port(); if (!m_ipc_server || port != m_old_port) { IPC.notice("Starting server with port %d", port); m_ipc_server = std::make_unique<IPC_server>(); m_old_port = port; } } else if (m_ipc_server) { IPC.notice("Stopping server"); m_ipc_server.reset(); } } }
2,124
C++
.cpp
103
17.970874
58
0.677822
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,160
games_config.cpp
RPCS3_rpcs3/rpcs3/Emu/games_config.cpp
#include "stdafx.h" #include "games_config.h" #include "util/logs.hpp" #include "util/yaml.hpp" #include "Utilities/File.h" LOG_CHANNEL(cfg_log, "CFG"); games_config::games_config() { load(); } games_config::~games_config() { if (m_dirty) { save(); } } const std::map<std::string, std::string> games_config::get_games() const { std::lock_guard lock(m_mutex); return m_games; } std::string games_config::get_path(const std::string& title_id) const { if (title_id.empty()) { return {}; } std::lock_guard lock(m_mutex); if (const auto it = m_games.find(title_id); it != m_games.cend()) { return it->second; } return {}; } games_config::result games_config::add_game(const std::string& key, const std::string& path) { std::lock_guard lock(m_mutex); // Access or create node if does not exist if (auto it = m_games.find(key); it != m_games.end()) { if (it->second == path) { // Nothing to do return result::exists; } it->second = path; } else { m_games.emplace(key, path); } m_dirty = true; if (m_save_on_dirty && !save_nl()) { return result::failure; } return result::success; } games_config::result games_config::add_external_hdd_game(const std::string& key, std::string& path) { // Don't use the C00 subdirectory in our game list if (path.ends_with("/C00") || path.ends_with("\\C00")) { path = path.substr(0, path.size() - 4); } const result res = add_game(key, path); switch (res) { case result::failure: cfg_log.error("Failed to save HG game location of title '%s' (error=%s)", key, fs::g_tls_error); break; case result::success: cfg_log.notice("Registered HG game directory for title '%s': %s", key, path); break; case result::exists: break; } return res; } games_config::result games_config::remove_game(const std::string& key) { std::lock_guard lock(m_mutex); // Remove node if (m_games.erase(key) == 0) // If node not found { // Nothing to do return result::success; } m_dirty = true; if (m_save_on_dirty && !save_nl()) { return result::failure; } return result::success; } bool games_config::save_nl() { YAML::Emitter out; out << m_games; fs::pending_file temp(fs::get_config_dir() + "/games.yml"); if (temp.file && temp.file.write(out.c_str(), out.size()) >= out.size() && temp.commit()) { m_dirty = false; return true; } cfg_log.error("Failed to save games.yml: %s", fs::g_tls_error); return false; } bool games_config::save() { std::lock_guard lock(m_mutex); return save_nl(); } void games_config::load() { std::lock_guard lock(m_mutex); m_games.clear(); if (fs::file f{fs::get_config_dir() + "/games.yml", fs::read + fs::create}) { auto [result, error] = yaml_load(f.to_string()); if (!error.empty()) { cfg_log.error("Failed to load games.yml: %s", error); } if (!result.IsMap()) { if (!result.IsNull()) { cfg_log.error("Failed to load games.yml: type %d not a map", result.Type()); } return; } for (const auto& entry : result) { if (!entry.first.Scalar().empty() && entry.second.IsScalar() && !entry.second.Scalar().empty()) { m_games.emplace(entry.first.Scalar(), entry.second.Scalar()); } } } }
3,203
C++
.cpp
142
20.176056
99
0.658738
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,161
system_config_types.cpp
RPCS3_rpcs3/rpcs3/Emu/system_config_types.cpp
#include "stdafx.h" #include "system_config_types.h" template <> void fmt_class_string<mouse_handler>::format(std::string& out, u64 arg) { format_enum(out, arg, [](mouse_handler value) { switch (value) { case mouse_handler::null: return "Null"; case mouse_handler::basic: return "Basic"; case mouse_handler::raw: return "Raw"; } return unknown; }); } template <> void fmt_class_string<video_renderer>::format(std::string& out, u64 arg) { format_enum(out, arg, [](video_renderer value) { switch (value) { case video_renderer::null: return "Null"; case video_renderer::opengl: return "OpenGL"; case video_renderer::vulkan: return "Vulkan"; } return unknown; }); } template <> void fmt_class_string<video_resolution>::format(std::string& out, u64 arg) { format_enum(out, arg, [](video_resolution value) { switch (value) { case video_resolution::_1080p: return "1920x1080"; case video_resolution::_1080i: return "1920x1080i"; case video_resolution::_720p: return "1280x720"; case video_resolution::_480p: return "720x480"; case video_resolution::_480i: return "720x480i"; case video_resolution::_576p: return "720x576"; case video_resolution::_576i: return "720x576i"; case video_resolution::_1600x1080p: return "1600x1080"; case video_resolution::_1440x1080p: return "1440x1080"; case video_resolution::_1280x1080p: return "1280x1080"; case video_resolution::_960x1080p: return "960x1080"; } return unknown; }); } template <> void fmt_class_string<video_aspect>::format(std::string& out, u64 arg) { format_enum(out, arg, [](video_aspect value) { switch (value) { case video_aspect::_4_3: return "4:3"; case video_aspect::_16_9: return "16:9"; } return unknown; }); } template <> void fmt_class_string<msaa_level>::format(std::string& out, u64 arg) { format_enum(out, arg, [](msaa_level value) { switch (value) { case msaa_level::none: return "Disabled"; case msaa_level::_auto: return "Auto"; } return unknown; }); } template <> void fmt_class_string<keyboard_handler>::format(std::string& out, u64 arg) { format_enum(out, arg, [](keyboard_handler value) { switch (value) { case keyboard_handler::null: return "Null"; case keyboard_handler::basic: return "Basic"; } return unknown; }); } template <> void fmt_class_string<audio_renderer>::format(std::string& out, u64 arg) { format_enum(out, arg, [](audio_renderer value) { switch (value) { case audio_renderer::null: return "Null"; #ifdef _WIN32 case audio_renderer::xaudio: return "XAudio2"; #endif case audio_renderer::cubeb: return "Cubeb"; #ifdef HAVE_FAUDIO case audio_renderer::faudio: return "FAudio"; #endif } return unknown; }); } template <> void fmt_class_string<audio_channel_layout>::format(std::string& out, u64 arg) { format_enum(out, arg, [](audio_channel_layout value) { switch (value) { case audio_channel_layout::automatic: return "Automatic"; case audio_channel_layout::mono: return "Mono"; case audio_channel_layout::stereo: return "Stereo"; case audio_channel_layout::stereo_lfe: return "Stereo LFE"; case audio_channel_layout::quadraphonic: return "Quadraphonic"; case audio_channel_layout::quadraphonic_lfe: return "Quadraphonic LFE"; case audio_channel_layout::surround_5_1: return "Surround 5.1"; case audio_channel_layout::surround_7_1: return "Surround 7.1"; } return unknown; }); } template <> void fmt_class_string<detail_level>::format(std::string& out, u64 arg) { format_enum(out, arg, [](detail_level value) { switch (value) { case detail_level::none: return "None"; case detail_level::minimal: return "Minimal"; case detail_level::low: return "Low"; case detail_level::medium: return "Medium"; case detail_level::high: return "High"; } return unknown; }); } template <> void fmt_class_string<perf_graph_detail_level>::format(std::string& out, u64 arg) { format_enum(out, arg, [](perf_graph_detail_level value) { switch (value) { case perf_graph_detail_level::minimal: return "Minimal"; case perf_graph_detail_level::show_min_max: return "Show min and max"; case perf_graph_detail_level::show_one_percent_avg: return "Show 1% and average"; case perf_graph_detail_level::show_all: return "All"; } return unknown; }); } template <> void fmt_class_string<screen_quadrant>::format(std::string& out, u64 arg) { format_enum(out, arg, [](screen_quadrant value) { switch (value) { case screen_quadrant::top_left: return "Top Left"; case screen_quadrant::top_right: return "Top Right"; case screen_quadrant::bottom_left: return "Bottom Left"; case screen_quadrant::bottom_right: return "Bottom Right"; } return unknown; }); } template <> void fmt_class_string<tsx_usage>::format(std::string& out, u64 arg) { format_enum(out, arg, [](tsx_usage value) { switch (value) { case tsx_usage::disabled: return "Disabled"; case tsx_usage::enabled: return "Enabled"; case tsx_usage::forced: return "Forced"; } return unknown; }); } template <> void fmt_class_string<rsx_fifo_mode>::format(std::string& out, u64 arg) { format_enum(out, arg, [](rsx_fifo_mode value) { switch (value) { case rsx_fifo_mode::fast: return "Fast"; case rsx_fifo_mode::atomic: return "Atomic"; case rsx_fifo_mode::atomic_ordered: return "Ordered & Atomic"; case rsx_fifo_mode::as_ps3: return "PS3"; } return unknown; }); } template <> void fmt_class_string<sleep_timers_accuracy_level>::format(std::string& out, u64 arg) { format_enum(out, arg, [](sleep_timers_accuracy_level value) { switch (value) { case sleep_timers_accuracy_level::_as_host: return "As Host"; case sleep_timers_accuracy_level::_usleep: return "Usleep Only"; case sleep_timers_accuracy_level::_all_timers: return "All Timers"; } return unknown; }); } template <> void fmt_class_string<enter_button_assign>::format(std::string& out, u64 arg) { format_enum(out, arg, [](enter_button_assign value) { switch (value) { case enter_button_assign::circle: return "Enter with circle"; case enter_button_assign::cross: return "Enter with cross"; } return unknown; }); } template <> void fmt_class_string<np_internet_status>::format(std::string& out, u64 arg) { format_enum(out, arg, [](np_internet_status value) { switch (value) { case np_internet_status::disabled: return "Disconnected"; case np_internet_status::enabled: return "Connected"; } return unknown; }); } template <> void fmt_class_string<np_psn_status>::format(std::string& out, u64 arg) { format_enum(out, arg, [](np_psn_status value) { switch (value) { case np_psn_status::disabled: return "Disconnected"; case np_psn_status::psn_fake: return "Simulated"; case np_psn_status::psn_rpcn: return "RPCN"; } return unknown; }); } template <> void fmt_class_string<spu_decoder_type>::format(std::string& out, u64 arg) { format_enum(out, arg, [](spu_decoder_type type) { switch (type) { case spu_decoder_type::_static: return "Interpreter (static)"; case spu_decoder_type::dynamic: return "Interpreter (dynamic)"; case spu_decoder_type::asmjit: return "Recompiler (ASMJIT)"; case spu_decoder_type::llvm: return "Recompiler (LLVM)"; } return unknown; }); } template <> void fmt_class_string<spu_block_size_type>::format(std::string& out, u64 arg) { format_enum(out, arg, [](spu_block_size_type type) { switch (type) { case spu_block_size_type::safe: return "Safe"; case spu_block_size_type::mega: return "Mega"; case spu_block_size_type::giga: return "Giga"; } return unknown; }); } template <> void fmt_class_string<frame_limit_type>::format(std::string& out, u64 arg) { format_enum(out, arg, [](frame_limit_type value) { switch (value) { case frame_limit_type::none: return "Off"; case frame_limit_type::_30: return "30"; case frame_limit_type::_50: return "50"; case frame_limit_type::_60: return "60"; case frame_limit_type::_120: return "120"; case frame_limit_type::display_rate: return "Display"; case frame_limit_type::_auto: return "Auto"; case frame_limit_type::_ps3: return "PS3 Native"; case frame_limit_type::infinite: return "Infinite"; } return unknown; }); } template <> void fmt_class_string<microphone_handler>::format(std::string& out, u64 arg) { format_enum(out, arg, [](auto value) { switch (value) { case microphone_handler::null: return "Null"; case microphone_handler::standard: return "Standard"; case microphone_handler::singstar: return "SingStar"; case microphone_handler::real_singstar: return "Real SingStar"; case microphone_handler::rocksmith: return "Rocksmith"; } return unknown; }); } template <> void fmt_class_string<camera_handler>::format(std::string& out, u64 arg) { format_enum(out, arg, [](auto value) { switch (value) { case camera_handler::null: return "Null"; case camera_handler::fake: return "Fake"; case camera_handler::qt: return "Qt"; } return unknown; }); } template <> void fmt_class_string<music_handler>::format(std::string& out, u64 arg) { format_enum(out, arg, [](auto value) { switch (value) { case music_handler::null: return "Null"; case music_handler::qt: return "Qt"; } return unknown; }); } template <> void fmt_class_string<fake_camera_type>::format(std::string& out, u64 arg) { format_enum(out, arg, [](auto value) { switch (value) { case fake_camera_type::unknown: return "Unknown"; case fake_camera_type::eyetoy: return "EyeToy"; case fake_camera_type::eyetoy2: return "PS Eye"; case fake_camera_type::uvc1_1: return "UVC 1.1"; } return unknown; }); } template <> void fmt_class_string<camera_flip>::format(std::string& out, u64 arg) { format_enum(out, arg, [](auto value) { switch (value) { case camera_flip::none: return "None"; case camera_flip::horizontal: return "Horizontal"; case camera_flip::vertical: return "Vertical"; case camera_flip::both: return "Both"; } return unknown; }); } template <> void fmt_class_string<move_handler>::format(std::string& out, u64 arg) { format_enum(out, arg, [](auto value) { switch (value) { case move_handler::null: return "Null"; case move_handler::fake: return "Fake"; case move_handler::mouse: return "Mouse"; case move_handler::raw_mouse: return "Raw Mouse"; #ifdef HAVE_LIBEVDEV case move_handler::gun: return "Gun"; #endif } return unknown; }); } template <> void fmt_class_string<pad_handler_mode>::format(std::string& out, u64 arg) { format_enum(out, arg, [](auto value) { switch (value) { case pad_handler_mode::single_threaded: return "Single-threaded"; case pad_handler_mode::multi_threaded: return "Multi-threaded"; } return unknown; }); } template <> void fmt_class_string<buzz_handler>::format(std::string& out, u64 arg) { format_enum(out, arg, [](auto value) { switch (value) { case buzz_handler::null: return "Null"; case buzz_handler::one_controller: return "1 controller"; case buzz_handler::two_controllers: return "2 controllers"; } return unknown; }); } template <> void fmt_class_string<turntable_handler>::format(std::string& out, u64 arg) { format_enum(out, arg, [](auto value) { switch (value) { case turntable_handler::null: return "Null"; case turntable_handler::one_controller: return "1 controller"; case turntable_handler::two_controllers: return "2 controllers"; } return unknown; }); } template <> void fmt_class_string<ghltar_handler>::format(std::string& out, u64 arg) { format_enum(out, arg, [](auto value) { switch (value) { case ghltar_handler::null: return "Null"; case ghltar_handler::one_controller: return "1 controller"; case ghltar_handler::two_controllers: return "2 controllers"; } return unknown; }); } template <> void fmt_class_string<ppu_decoder_type>::format(std::string& out, u64 arg) { format_enum(out, arg, [](ppu_decoder_type type) { switch (type) { case ppu_decoder_type::_static: return "Interpreter (static)"; case ppu_decoder_type::llvm: return "Recompiler (LLVM)"; } return unknown; }); } template <> void fmt_class_string<shader_mode>::format(std::string& out, u64 arg) { format_enum(out, arg, [](shader_mode value) { switch (value) { case shader_mode::recompiler: return "Shader Recompiler"; case shader_mode::async_recompiler: return "Async Shader Recompiler"; case shader_mode::async_with_interpreter: return "Async with Shader Interpreter"; case shader_mode::interpreter_only: return "Shader Interpreter only"; } return unknown; }); } template <> void fmt_class_string<audio_provider>::format(std::string& out, u64 arg) { format_enum(out, arg, [](audio_provider value) { switch (value) { case audio_provider::none: return "None"; case audio_provider::cell_audio: return "CellAudio"; case audio_provider::rsxaudio: return "RSXAudio"; } return unknown; }); } template <> void fmt_class_string<audio_avport>::format(std::string& out, u64 arg) { format_enum(out, arg, [](audio_avport value) { switch (value) { case audio_avport::hdmi_0: return "HDMI 0"; case audio_avport::hdmi_1: return "HDMI 1"; case audio_avport::avmulti: return "AV multiout"; case audio_avport::spdif_0: return "SPDIF 0"; case audio_avport::spdif_1: return "SPDIF 1"; } return unknown; }); } template <> void fmt_class_string<audio_format>::format(std::string& out, u64 arg) { format_enum(out, arg, [](audio_format value) { switch (value) { case audio_format::stereo: return "Stereo"; case audio_format::surround_5_1: return "Surround 5.1"; case audio_format::surround_7_1: return "Surround 7.1"; case audio_format::automatic: return "Automatic"; case audio_format::manual: return "Manual"; } return unknown; }); } template <> void fmt_class_string<vk_gpu_scheduler_mode>::format(std::string& out, u64 arg) { format_enum(out, arg, [](vk_gpu_scheduler_mode value) { switch (value) { case vk_gpu_scheduler_mode::safe: return "Safe"; case vk_gpu_scheduler_mode::fast: return "Fast"; } return unknown; }); } template <> void fmt_class_string<thread_scheduler_mode>::format(std::string& out, u64 arg) { format_enum(out, arg, [](thread_scheduler_mode value) { switch (value) { case thread_scheduler_mode::old: return "RPCS3 Scheduler"; case thread_scheduler_mode::alt: return "RPCS3 Alternative Scheduler"; case thread_scheduler_mode::os: return "Operating System"; } return unknown; }); } template <> void fmt_class_string<gpu_preset_level>::format(std::string& out, u64 arg) { format_enum(out, arg, [](gpu_preset_level value) { switch (value) { case gpu_preset_level::_auto: return "Auto"; case gpu_preset_level::ultra: return "Ultra"; case gpu_preset_level::high: return "High"; case gpu_preset_level::low: return "Low"; } return unknown; }); } template <> void fmt_class_string<vk_exclusive_fs_mode>::format(std::string& out, u64 arg) { format_enum(out, arg, [](vk_exclusive_fs_mode value) { switch (value) { case vk_exclusive_fs_mode::unspecified: return "Automatic"; case vk_exclusive_fs_mode::disable: return "Disable"; case vk_exclusive_fs_mode::enable: return "Enable"; } return unknown; }); } template <> void fmt_class_string<stereo_render_mode_options>::format(std::string& out, u64 arg) { format_enum(out, arg, [](stereo_render_mode_options value) { switch (value) { case stereo_render_mode_options::disabled: return "Disabled"; case stereo_render_mode_options::side_by_side: return "Side-by-Side"; case stereo_render_mode_options::over_under: return "Over-Under"; case stereo_render_mode_options::interlaced: return "Interlaced"; case stereo_render_mode_options::anaglyph_red_green: return "Anaglyph Red-Green"; case stereo_render_mode_options::anaglyph_red_blue: return "Anaglyph Red-Blue"; case stereo_render_mode_options::anaglyph_red_cyan: return "Anaglyph Red-Cyan"; case stereo_render_mode_options::anaglyph_magenta_cyan: return "Anaglyph Magenta-Cyan"; case stereo_render_mode_options::anaglyph_trioscopic: return "Anaglyph Trioscopic"; case stereo_render_mode_options::anaglyph_amber_blue: return "Anaglyph Amber-Blue"; } return unknown; }); } template <> void fmt_class_string<output_scaling_mode>::format(std::string& out, u64 arg) { format_enum(out, arg, [](output_scaling_mode value) { switch (value) { case output_scaling_mode::nearest: return "Nearest"; case output_scaling_mode::bilinear: return "Bilinear"; case output_scaling_mode::fsr: return "FidelityFX Super Resolution"; } return unknown; }); } template <> void fmt_class_string<xfloat_accuracy>::format(std::string& out, u64 arg) { format_enum(out, arg, [](xfloat_accuracy value) { switch (value) { case xfloat_accuracy::accurate: return "Accurate"; case xfloat_accuracy::approximate: return "Approximate"; case xfloat_accuracy::relaxed: return "Relaxed"; case xfloat_accuracy::inaccurate: return "Inaccurate"; } return unknown; }); }
17,106
C++
.cpp
633
24.606635
90
0.714321
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,162
perf_meter.cpp
RPCS3_rpcs3/rpcs3/Emu/perf_meter.cpp
#include "stdafx.h" #include "perf_meter.hpp" #include "util/sysinfo.hpp" #include "util/fence.hpp" #include "util/tsc.hpp" #include "Utilities/Thread.h" #include <map> #include <mutex> void perf_stat_base::push(u64 ns[66]) noexcept { if (!ns[0]) { return; } for (u32 i = 0; i < 66; i++) { m_log[i] += atomic_storage<u64>::exchange(ns[i], 0); } } void perf_stat_base::print(const char* name) const noexcept { if (u64 num_total = m_log[0].load()) { perf_log.notice(u8"Perf stats for %s: total events: %u (total time %.4fs, avg %.4fµs)", name, num_total, m_log[65].load() / 1000'000'000., m_log[65].load() / 1000. / num_total); for (u32 i = 0; i < 13; i++) { if (u64 count = m_log[i + 1].load()) { perf_log.notice(u8"Perf stats for %s: events < %.3fµs: %u", name, std::pow(2., i) / 1000., count); } } for (u32 i = 14; i < 23; i++) { if (u64 count = m_log[i + 1].load()) [[unlikely]] { perf_log.notice("Perf stats for %s: events < %.3fms: %u", name, std::pow(2., i) / 1000'000., count); } } for (u32 i = 24; i < 33; i++) { if (u64 count = m_log[i + 1].load()) [[unlikely]] { perf_log.notice("Perf stats for %s: events < %.3fs: %u", name, std::pow(2., i) / 1000'000'000., count); } } for (u32 i = 34; i < 43; i++) { if (u64 count = m_log[i + 1].load()) [[unlikely]] { perf_log.notice("Perf stats for %s: events < %.0f SEC: %u", name, std::pow(2., i) / 1000'000'000., count); } } for (u32 i = 44; i < 63; i++) { if (u64 count = m_log[i + 1].load()) [[unlikely]] { perf_log.notice("Perf stats for %s: events < %.0f MIN: %u", name, std::pow(2., i) / 60'000'000'000., count); } } } } SAFE_BUFFERS(void) perf_stat_base::push(u64 data[66], u64 start_time, const char* name) noexcept { // Event end const u64 end_time = (utils::lfence(), utils::get_tsc()); // Compute difference in seconds const f64 diff = (end_time - start_time) * 1. / utils::get_tsc_freq(); // Register perf stat in nanoseconds const u64 ns = static_cast<u64>(diff * 1000'000'000.); // Print in microseconds if (static_cast<u64>(diff * 1000'000.) >= g_cfg.core.perf_report_threshold) { perf_log.notice(u8"%s: %.3fµs", name, diff * 1000'000.); } data[0] += ns != 0; data[64 - std::countl_zero(ns)]++; data[65] += ns; } static shared_mutex s_perf_mutex; static std::map<std::string, perf_stat_base> s_perf_acc; static std::multimap<std::string, u64*> s_perf_sources; void perf_stat_base::add(u64 ns[66], const char* name) noexcept { // Don't attempt to register some foreign/unnamed threads if (!thread_ctrl::get_current()) { return; } std::lock_guard lock(s_perf_mutex); s_perf_sources.emplace(name, ns); s_perf_acc[name]; } void perf_stat_base::remove(u64 ns[66], const char* name) noexcept { if (!thread_ctrl::get_current()) { return; } std::lock_guard lock(s_perf_mutex); const auto found = s_perf_sources.equal_range(name); for (auto it = found.first; it != found.second; it++) { if (it->second == ns) { s_perf_acc[name].push(ns); s_perf_sources.erase(it); break; } } } void perf_stat_base::report() noexcept { std::lock_guard lock(s_perf_mutex); perf_log.notice("Performance report begin (%u src, %u acc):", s_perf_sources.size(), s_perf_acc.size()); for (auto& [name, ns] : s_perf_sources) { s_perf_acc[name].push(ns); } for (auto& [name, data] : s_perf_acc) { data.print(name.c_str()); } s_perf_acc.clear(); perf_log.notice("Performance report end."); }
3,529
C++
.cpp
125
25.552
179
0.623107
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
true
false
5,163
System.cpp
RPCS3_rpcs3/rpcs3/Emu/System.cpp
#include "stdafx.h" #include "VFS.h" #include "Utilities/bin_patch.h" #include "Emu/Memory/vm.h" #include "Emu/System.h" #include "Emu/system_progress.hpp" #include "Emu/system_utils.hpp" #include "Emu/perf_meter.hpp" #include "Emu/perf_monitor.hpp" #include "Emu/vfs_config.h" #include "Emu/IPC_config.h" #include "Emu/savestate_utils.hpp" #include "Emu/Cell/ErrorCodes.h" #include "Emu/Cell/PPUThread.h" #include "Emu/Cell/PPUDisAsm.h" #include "Emu/Cell/PPUAnalyser.h" #include "Emu/Cell/SPUThread.h" #include "Emu/Cell/SPURecompiler.h" #include "Emu/RSX/RSXThread.h" #include "Emu/Cell/lv2/sys_process.h" #include "Emu/Cell/lv2/sys_sync.h" #include "Emu/Cell/lv2/sys_prx.h" #include "Emu/Cell/lv2/sys_overlay.h" #include "Emu/Cell/lv2/sys_spu.h" #include "Emu/Cell/Modules/cellGame.h" #include "Emu/Cell/Modules/cellSysutil.h" #include "Emu/title.h" #include "Emu/IdManager.h" #include "Emu/RSX/Capture/rsx_replay.h" #include "Emu/RSX/Overlays/overlay_message.h" #include "Loader/PSF.h" #include "Loader/TAR.h" #include "Loader/ELF.h" #include "Loader/disc.h" #include "rpcs3_version.h" #include "Utilities/StrUtil.h" #include "../Crypto/unself.h" #include "../Crypto/unzip.h" #include "util/logs.hpp" #include "util/init_mutex.hpp" #include <fstream> #include <memory> #include <regex> #include <optional> #include "Utilities/JIT.h" #include "display_sleep_control.h" #include "Emu/IPC_socket.h" #if defined(HAVE_VULKAN) #include "Emu/RSX/VK/VulkanAPI.h" #endif LOG_CHANNEL(sys_log, "SYS"); // Preallocate 32 MiB stx::manual_typemap<void, 0x20'00000, 128> g_fixed_typemap; bool g_log_all_errors = false; bool g_use_rtm = false; u64 g_rtm_tx_limit1 = 0; u64 g_rtm_tx_limit2 = 0; std::string g_cfg_defaults; atomic_t<u64> g_watchdog_hold_ctr{0}; extern bool ppu_load_exec(const ppu_exec_object&, bool virtual_load, const std::string&, utils::serial* = nullptr); extern void spu_load_exec(const spu_exec_object&); extern void spu_load_rel_exec(const spu_rel_object&); extern void ppu_precompile(std::vector<std::string>& dir_queue, std::vector<ppu_module*>* loaded_prx); extern bool ppu_initialize(const ppu_module&, bool check_only = false, u64 file_size = 0); extern void ppu_finalize(const ppu_module&); extern void ppu_unload_prx(const lv2_prx&); extern std::shared_ptr<lv2_prx> ppu_load_prx(const ppu_prx_object&, bool virtual_load, const std::string&, s64 = 0, utils::serial* = nullptr); extern std::pair<std::shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_exec_object&, bool virtual_load, const std::string& path, s64 = 0, utils::serial* = nullptr); extern bool ppu_load_rel_exec(const ppu_rel_object&); extern void send_close_home_menu_cmds(); extern void check_microphone_permissions(); extern void signal_system_cache_can_stay(); fs::file make_file_view(const fs::file& file, u64 offset, u64 size); extern std::string get_syscache_state_corruption_indicator_file_path(std::string_view dir_path); fs::file g_tty; atomic_t<s64> g_tty_size{0}; std::array<std::deque<std::string>, 16> g_tty_input; std::mutex g_tty_mutex; thread_local std::string_view g_tls_serialize_name; extern thread_local std::string(*g_tls_log_prefix)(); extern f64 get_cpu_program_usage_percent(u64 hash); // Report error and call std::abort(), defined in main.cpp [[noreturn]] void report_fatal_error(std::string_view text, bool is_html = false, bool include_help_text = true); void initialize_timebased_time(u64 timebased_init, bool reset = false); namespace atomic_wait { extern void parse_hashtable(bool(*cb)(u64 id, u32 refs, u64 ptr, u32 max_coll)); } namespace rsx { void set_native_ui_flip(); } template<> void fmt_class_string<game_boot_result>::format(std::string& out, u64 arg) { format_enum(out, arg, [](game_boot_result value) { switch (value) { case game_boot_result::no_errors: return "No errors"; case game_boot_result::generic_error: return "Generic error"; case game_boot_result::nothing_to_boot: return "Nothing to boot"; case game_boot_result::wrong_disc_location: return "Wrong disc location"; case game_boot_result::invalid_file_or_folder: return "Invalid file or folder"; case game_boot_result::invalid_bdvd_folder: return "Invalid dev_bdvd folder"; case game_boot_result::install_failed: return "Game install failed"; case game_boot_result::decryption_error: return "Failed to decrypt content"; case game_boot_result::file_creation_error: return "Could not create important files"; case game_boot_result::firmware_missing: return "Firmware is missing"; case game_boot_result::unsupported_disc_type: return "This disc type is not supported yet"; case game_boot_result::savestate_corrupted: return "Savestate data is corrupted or it's not an RPCS3 savestate"; case game_boot_result::savestate_version_unsupported: return "Savestate versioning data differs from your RPCS3 build.\nTry to use an older or newer RPCS3 build.\nEspecially if you know the build that created the savestate."; case game_boot_result::still_running: return "Game is still running"; case game_boot_result::already_added: return "Game was already added"; } return unknown; }); } template<> void fmt_class_string<cfg_mode>::format(std::string& out, u64 arg) { format_enum(out, arg, [](cfg_mode value) { switch (value) { case cfg_mode::custom: return "custom config"; case cfg_mode::custom_selection: return "custom config selection"; case cfg_mode::global: return "global config"; case cfg_mode::config_override: return "config override"; case cfg_mode::continuous: return "continuous config"; case cfg_mode::default_config: return "default config"; } return unknown; }); } void Emulator::CallFromMainThread(std::function<void()>&& func, atomic_t<u32>* wake_up, bool track_emu_state, u64 stop_ctr, std::source_location src_loc) const { std::function<void()> final_func = [this, before = IsStopped(), track_emu_state, thread_name = thread_ctrl::get_name(), src = src_loc , count = (stop_ctr == umax ? +m_stop_ctr : stop_ctr), func = std::move(func)] { const bool call_it = (!track_emu_state || (count == m_stop_ctr && before == IsStopped())); sys_log.trace("Callback from thread '%s' at [%s] is %s", thread_name, src, call_it ? "called" : "skipped"); if (call_it) { func(); } }; m_cb.call_from_main_thread(std::move(final_func), wake_up); } void Emulator::BlockingCallFromMainThread(std::function<void()>&& func, std::source_location src_loc) const { atomic_t<u32> wake_up = 0; sys_log.trace("Blocking Callback from thread '%s' at [%s] is queued", thread_ctrl::get_name(), src_loc); CallFromMainThread(std::move(func), &wake_up, true, umax, src_loc); bool logged = false; while (!wake_up) { if (!logged && !thread_ctrl::get_current()) { logged = true; sys_log.error("Calling thread of BlockingCallFromMainThread is not of named_thread<>, calling from %s", src_loc); } wake_up.wait(0); } } // This function ensures constant initialization order between different compilers and builds void init_fxo_for_exec(utils::serial* ar, bool full = false) { g_fxo->init<main_ppu_module>(); void init_ppu_functions(utils::serial* ar, bool full); if (full) { init_ppu_functions(ar, true); } Emu.ConfigurePPUCache(); g_fxo->init(false, ar, [](){ Emu.ExecPostponedInitCode(); }); Emu.GetCallbacks().init_gs_render(ar); Emu.GetCallbacks().init_pad_handler(Emu.GetTitleID()); Emu.GetCallbacks().init_kb_handler(); Emu.GetCallbacks().init_mouse_handler(); usz pos = 0; if (ar) { pos = ar->pos; } // TODO: Remove second call when possible Emu.ExecPostponedInitCode(); if (ar) { ensure(pos == ar->pos); (*ar)(Emu.m_savestate_extension_flags1); const usz advance = (Emu.m_savestate_extension_flags1 & Emulator::SaveStateExtentionFlags1::SupportsMenuOpenResume ? 32 : 31); // Reserved area if (!load_and_check_reserved(*ar, advance)) { sys_log.error("Potential failure to load savestate: padding buyes are not 0. %s", *ar); } } } // Some settings are not allowed in certain PPU decoders static void fixup_settings(const psf::registry* _psf) { if (g_cfg.core.ppu_decoder != ppu_decoder_type::_static) { if (g_cfg.core.ppu_use_nj_bit) { sys_log.todo("The setting '%s' is currently not supported with PPU decoder type '%s' and will therefore be disabled during emulation.", g_cfg.core.ppu_use_nj_bit.get_name(), g_cfg.core.ppu_decoder.get()); g_cfg.core.ppu_use_nj_bit.set(false); } if (g_cfg.core.ppu_set_vnan) { sys_log.todo("The setting '%s' is currently not supported with PPU decoder type '%s' and will therefore be disabled during emulation.", g_cfg.core.ppu_set_vnan.get_name(), g_cfg.core.ppu_decoder.get()); g_cfg.core.ppu_set_vnan.set(false); } if (g_cfg.core.ppu_set_fpcc) { sys_log.todo("The setting '%s' is currently not supported with PPU decoder type '%s' and will therefore be disabled during emulation.", g_cfg.core.ppu_set_fpcc.get_name(), g_cfg.core.ppu_decoder.get()); g_cfg.core.ppu_set_fpcc.set(false); } } if (const u32 psf_resolution = _psf ? psf::get_integer(*_psf, "RESOLUTION", 0) : 0) { const std::map<video_resolution, u32> resolutions { { video_resolution::_480p, psf::resolution_flag::_480 | psf::resolution_flag::_480_16_9 }, { video_resolution::_480i, psf::resolution_flag::_480 | psf::resolution_flag::_480_16_9 }, { video_resolution::_576p, psf::resolution_flag::_576 | psf::resolution_flag::_576_16_9 }, { video_resolution::_576i, psf::resolution_flag::_576 | psf::resolution_flag::_576_16_9 }, { video_resolution::_720p, psf::resolution_flag::_720 }, { video_resolution::_1080p, psf::resolution_flag::_1080 }, { video_resolution::_1080i, psf::resolution_flag::_1080 }, { video_resolution::_1600x1080p, psf::resolution_flag::_1080 }, { video_resolution::_1440x1080p, psf::resolution_flag::_1080 }, { video_resolution::_1280x1080p, psf::resolution_flag::_1080 }, { video_resolution::_960x1080p, psf::resolution_flag::_1080 }, }; const video_resolution resolution = g_cfg.video.resolution; constexpr video_resolution new_resolution = video_resolution::_720p; if (!resolutions.contains(resolution) || !(psf_resolution & resolutions.at(resolution))) { sys_log.error("The game does not support a resolution of %s, so we are forcing the resolution to %s.", resolution, new_resolution); g_cfg.video.resolution.set(new_resolution); } } } extern void dump_executable(std::span<const u8> data, const ppu_module* _module, std::string_view title_id) { std::string_view filename = _module->path; filename = filename.substr(filename.find_last_of('/') + 1); const std::string lower = fmt::to_lower(filename); // Format filename and directory name // Make each directory for each file so tools like IDA can work on it cleanly const std::string dir_path = fs::get_cache_dir() + "ppu_progs/" + std::string{!title_id.empty() ? title_id : "untitled"} + fmt::format("-%s-%s", fmt::base57(_module->sha1), filename) + '/'; const std::string file_path = dir_path + (lower.ends_with(".prx") || lower.ends_with(".sprx") ? "prog.prx" : "exec.elf"); if (fs::create_dir(dir_path) || fs::g_tls_error == fs::error::exist) { if (fs::file out{file_path, fs::create + fs::write}) { if (out.size() == data.size()) { // Risky optimization: assume if file size match they are equal and does not need to rewrite it // But it is a debug option and if there are problems the user/developer can remove the previous file } else { out.trunc(0); out.write(data.data(), data.size()); } } else { sys_log.error("Failed to save decrypted executable of \"%s\": Failure to create file \"%s\" (%s)", Emu.GetBoot(), filename, fs::g_tls_error); } } else { sys_log.error("Failed to save decrypted executable of \"%s\": Failure to create directory \"%s\" (%s)", Emu.GetBoot(), dir_path, fs::g_tls_error); } } void Emulator::Init() { jit_runtime::initialize(); if (!g_tty) { const auto tty_path = fs::get_cache_dir() + "TTY.log"; g_tty.open(tty_path, fs::rewrite + fs::append); if (!g_tty) { sys_log.fatal("Failed to create TTY log: %s (%s)", tty_path, fs::g_tls_error); } } g_fxo->reset(); // Reset defaults, cache them g_cfg_vfs.from_default(); g_cfg.from_default(); g_cfg.name.clear(); // Not all renderers are known at compile time, so set a provided default if possible if (m_default_renderer == video_renderer::vulkan && !m_default_graphics_adapter.empty()) { g_cfg.video.renderer.set(m_default_renderer); g_cfg.video.vk.adapter.from_string(m_default_graphics_adapter); } g_cfg_defaults = g_cfg.to_string(); const std::string cfg_path = fs::get_config_dir() + "/config.yml"; // Save new global config if it doesn't exist or is empty if (fs::stat_t info{}; !fs::get_stat(cfg_path, info) || info.size == 0) { Emulator::SaveSettings(g_cfg_defaults, {}); } // Load VFS config g_cfg_vfs.load(); sys_log.notice("Using VFS config:\n%s", g_cfg_vfs.to_string()); // Mount all devices const std::string emu_dir = rpcs3::utils::get_emu_dir(); const std::string elf_dir = fs::get_parent_dir(m_path); const std::string dev_bdvd = g_cfg_vfs.get(g_cfg_vfs.dev_bdvd, emu_dir); // Only used for make_path const std::string dev_hdd0 = g_cfg_vfs.get(g_cfg_vfs.dev_hdd0, emu_dir); const std::string dev_hdd1 = g_cfg_vfs.get(g_cfg_vfs.dev_hdd1, emu_dir); const std::string dev_flash = g_cfg_vfs.get_dev_flash(); const std::string dev_flash2 = g_cfg_vfs.get_dev_flash2(); const std::string dev_flash3 = g_cfg_vfs.get_dev_flash3(); vfs::mount("/dev_hdd0", dev_hdd0); vfs::mount("/dev_flash", dev_flash); vfs::mount("/dev_flash2", dev_flash2); vfs::mount("/dev_flash3", dev_flash3); vfs::mount("/app_home", g_cfg_vfs.app_home.to_string().empty() ? elf_dir + '/' : g_cfg_vfs.get(g_cfg_vfs.app_home, emu_dir)); std::string dev_usb; for (const auto& [key, value] : g_cfg_vfs.dev_usb.get_map()) { const cfg::device_info usb_info = g_cfg_vfs.get_device(g_cfg_vfs.dev_usb, key, emu_dir); if (key.size() != 11 || !key.starts_with("/dev_usb00"sv) || key.back() < '0' || key.back() > '7') { sys_log.error("Trying to mount unsupported usb device: %s", key); continue; } if (fs::is_dir(usb_info.path)) vfs::mount(key, usb_info.path); if (key == "/dev_usb000"sv) { dev_usb = usb_info.path; } } ensure(!dev_usb.empty()); if (!hdd1.empty()) { vfs::mount("/dev_hdd1", hdd1); sys_log.notice("Hdd1: %s", vfs::get("/dev_hdd1")); } const bool is_exitspawn = m_config_mode == cfg_mode::continuous; // Load config file if (m_config_mode == cfg_mode::config_override) { if (const fs::file cfg_file{m_config_path, fs::read + fs::create}) { sys_log.notice("Applying config override: %s", m_config_path); if (!g_cfg.from_string(cfg_file.to_string())) { sys_log.fatal("Failed to apply config: %s. Proceeding with regular configuration.", m_config_path); m_config_path.clear(); m_config_mode = cfg_mode::custom; } else { sys_log.success("Applied config override: %s", m_config_path); g_cfg.name = m_config_path; } } else { sys_log.fatal("Failed to access config: %s (%s). Proceeding with regular configuration.", m_config_path, fs::g_tls_error); m_config_path.clear(); m_config_mode = cfg_mode::custom; } } // Reload global configuration if (m_config_mode != cfg_mode::config_override && m_config_mode != cfg_mode::default_config) { if (const fs::file cfg_file{cfg_path, fs::read + fs::create}) { sys_log.notice("Applying global config: %s", cfg_path); if (!g_cfg.from_string(cfg_file.to_string())) { sys_log.fatal("Failed to apply global config: %s", cfg_path); } g_cfg.name = cfg_path; } else { sys_log.fatal("Failed to access global config: %s (%s)", cfg_path, fs::g_tls_error); } } // Disable incompatible settings fixup_settings(nullptr); // Backup config g_backup_cfg.from_string(g_cfg.to_string()); // Create directories (can be disabled if necessary) auto make_path_verbose = [&](const std::string& path, bool must_exist_outside_emu_dir) { if (fs::is_dir(path)) { return true; } if (must_exist_outside_emu_dir) { const std::string parent = fs::get_parent_dir(path); const std::string emu_dir_no_delim = emu_dir.substr(0, emu_dir.find_last_not_of(fs::delim) + 1); if (parent != emu_dir_no_delim && GetCallbacks().resolve_path(parent) != GetCallbacks().resolve_path(emu_dir_no_delim)) { sys_log.fatal("Cannot use '%s' for Virtual File System because it does not exist.\nPlease specify an existing and writable directory path in Toolbar -> Manage -> Virtual File System.", path); return false; } } if (!fs::create_path(path)) { sys_log.fatal("Failed to create path: %s (%s)", path, fs::g_tls_error); return false; } return true; }; const std::string save_path = dev_hdd0 + "home/" + m_usr + "/savedata/"; const std::string user_path = dev_hdd0 + "home/" + m_usr + "/localusername"; if (g_cfg.vfs.init_dirs) { make_path_verbose(dev_bdvd, true); make_path_verbose(dev_flash, true); make_path_verbose(dev_flash2, true); make_path_verbose(dev_flash3, true); if (make_path_verbose(dev_usb, true)) { make_path_verbose(dev_usb + "MUSIC/", false); make_path_verbose(dev_usb + "VIDEO/", false); make_path_verbose(dev_usb + "PICTURE/", false); make_path_verbose(dev_usb + "PS3/EXPORT/PSV/", false); // PS1 and PS2 Saves go here make_path_verbose(dev_usb + "PS3/SAVEDATA", false); make_path_verbose(dev_usb + "PS3/THEME", false); make_path_verbose(dev_usb + "PS3/UPDATE", false); } if (make_path_verbose(dev_hdd1, true)) { make_path_verbose(dev_hdd1 + "caches/", false); } if (make_path_verbose(dev_hdd0, true)) { make_path_verbose(dev_hdd0 + "game/", false); make_path_verbose(dev_hdd0 + reinterpret_cast<const char*>(u8"game/$locks/"), false); make_path_verbose(dev_hdd0 + "game/TEST12345/USRDIR/", false); // Some test elfs rely on this make_path_verbose(dev_hdd0 + "home/", false); make_path_verbose(dev_hdd0 + "home/" + m_usr + "/", false); make_path_verbose(dev_hdd0 + "home/" + m_usr + "/exdata/", false); make_path_verbose(save_path, false); make_path_verbose(dev_hdd0 + "home/" + m_usr + "/trophy/", false); if (!fs::write_file(user_path, fs::create + fs::excl + fs::write, "User"s)) { if (fs::g_tls_error != fs::error::exist) { sys_log.fatal("Failed to create file: %s (%s)", user_path, fs::g_tls_error); } } make_path_verbose(dev_hdd0 + "savedata/", false); make_path_verbose(dev_hdd0 + "savedata/vmc/", false); make_path_verbose(dev_hdd0 + "photo/", false); make_path_verbose(dev_hdd0 + "music/", false); make_path_verbose(dev_hdd0 + "theme/", false); make_path_verbose(dev_hdd0 + "video/", false); make_path_verbose(dev_hdd0 + "drm/", false); make_path_verbose(dev_hdd0 + "vsh/", false); make_path_verbose(dev_hdd0 + "crash_report/", false); make_path_verbose(dev_hdd0 + "tmp/", false); make_path_verbose(dev_hdd0 + "mms/", false); //multimedia server for vsh, created from rebuilding the database make_path_verbose(dev_hdd0 + "data/", false); make_path_verbose(dev_hdd0 + "vm/", false); } const std::string games_common_dir = g_cfg_vfs.get(g_cfg_vfs.games_dir, emu_dir); if (make_path_verbose(games_common_dir, true)) { fs::write_file(games_common_dir + "/Disc Games Can Be Put Here For Automatic Detection.txt", fs::create + fs::excl + fs::write, ""s); #ifdef _WIN32 if (const std::string rpcs3_shortcuts = games_common_dir + "/shortcuts"; make_path_verbose(rpcs3_shortcuts, false)) { fs::write_file(rpcs3_shortcuts + "/Copyable Shortcuts For Installed Games Would Be Added Here.txt", fs::create + fs::excl + fs::write, ""s); } #endif } } make_path_verbose(fs::get_cache_dir() + "shaderlog/", false); make_path_verbose(fs::get_cache_dir() + "spu_progs/", false); make_path_verbose(fs::get_cache_dir() + "ppu_progs/", false); make_path_verbose(fs::get_parent_dir(get_savestate_file("NO_ID", "/NO_FILE", -1, -1)), false); make_path_verbose(fs::get_config_dir() + "captures/", false); make_path_verbose(fs::get_config_dir() + "sounds/", false); make_path_verbose(patch_engine::get_patches_path(), false); // Log user if (m_usr.empty()) { sys_log.fatal("No user configured"); } else { std::string username; if (const fs::file file = fs::file(user_path)) { if (const std::string localusername = file.to_string(); !localusername.empty()) { username = localusername; } else { sys_log.warning("Empty username in file: '%s'. Consider setting a username for user '%s' in the user manager.", user_path, m_usr); } } else { sys_log.error("Could not read file: '%s'", user_path); } if (username.empty()) { sys_log.notice("Logged in as user '%s'", m_usr); } else { sys_log.notice("Logged in as user '%s' with the username '%s'", m_usr, username); } } if (is_exitspawn) { // Actions not taken during exitspawn return; } // Fixup savedata for (const auto& entry : fs::dir(save_path)) { if (entry.is_directory && entry.name.starts_with(".backup_")) { const std::string desired = entry.name.substr(8); const std::string pending = save_path + ".working_" + desired; if (fs::is_dir(pending)) { // Finalize interrupted saving if (!fs::rename(pending, save_path + desired, false)) { sys_log.fatal("Failed to fix save data: %s (%s)", pending, fs::g_tls_error); continue; } sys_log.success("Fixed save data: %s", desired); } // Remove pending backup data if (!fs::remove_all(save_path + entry.name)) { sys_log.fatal("Failed to remove save data backup: %s%s (%s)", save_path, entry.name, fs::g_tls_error); } else { sys_log.success("Removed save data backup: %s%s", save_path, entry.name); } } } // Limit cache size if (g_cfg.vfs.limit_cache_size) { rpcs3::cache::limit_cache_size(); } // Wipe clean VSH's temporary directory of choice if (g_cfg.vfs.empty_hdd0_tmp && !fs::remove_all(dev_hdd0 + "tmp/", false, true)) { sys_log.error("Could not clean /dev_hdd0/tmp/ (%s)", fs::g_tls_error); } // Remove temporary game data that would have been removed when cellGame has been properly shut for (const auto& entry : fs::dir(dev_hdd0 + "game/")) { if (entry.name.starts_with("_GDATA_") && fs::is_dir(dev_hdd0 + "game/" + entry.name + "/USRDIR/")) { const std::string target = dev_hdd0 + "game/" + entry.name; if (!fs::remove_all(target, true, true)) { sys_log.error("Could not clean \"%s\" (%s)", target, fs::g_tls_error); } } } // Load IPC config g_cfg_ipc.load(); sys_log.notice("Using IPC config:\n%s", g_cfg_ipc.to_string()); // Create and start IPC server only if needed if (g_cfg_ipc.get_server_enabled()) { g_fxo->init<IPC_socket::IPC_server_manager>(true); } } void Emulator::SetUsr(const std::string& user) { sys_log.notice("Setting user ID '%s'", user); const u32 id = rpcs3::utils::check_user(user); if (id == 0) { fmt::throw_exception("Failed to set user ID '%s'", user); } m_usrid = id; m_usr = user; } std::string Emulator::GetBackgroundPicturePath() const { // Try to find a custom icon first std::string path = fs::get_config_dir() + "/Icons/game_icons/" + GetTitleID() + "/PIC1.PNG"; if (fs::is_file(path)) { return path; } std::string disc_dir = vfs::get("/dev_bdvd/PS3_GAME"); if (m_sfo_dir == disc_dir) { disc_dir.clear(); } constexpr auto search_barrier = "barrier"; std::initializer_list<std::string> testees = { m_sfo_dir + "/PIC0.PNG", m_sfo_dir + "/PIC1.PNG", m_sfo_dir + "/PIC2.PNG", m_sfo_dir + "/PIC3.PNG", search_barrier, !disc_dir.empty() ? (disc_dir + "/PIC0.PNG") : disc_dir, !disc_dir.empty() ? (disc_dir + "/PIC1.PNG") : disc_dir, !disc_dir.empty() ? (disc_dir + "/PIC2.PNG") : disc_dir, !disc_dir.empty() ? (disc_dir + "/PIC3.PNG") : disc_dir, search_barrier, m_sfo_dir + "/ICON0.PNG", search_barrier, !disc_dir.empty() ? (disc_dir + "/ICON0.PNG") : disc_dir, }; // Try to return the picture with the highest resultion // Be naive and assume that its the one that spans over the most bytes usz max_file_size = 0; usz index_of_largest_file = umax; for (usz index = 0; index < testees.size(); index++) { const std::string& path = testees.begin()[index]; fs::stat_t file_stat{}; if (path == search_barrier) { if (index_of_largest_file != umax) { // Found a file in the preferred image group break; } continue; } if (path.empty() || !fs::get_stat(path, file_stat) || file_stat.is_directory) { continue; } if (max_file_size < file_stat.size) { max_file_size = file_stat.size; index_of_largest_file = index; } } if (index_of_largest_file == umax) { return {}; } return testees.begin()[index_of_largest_file]; } bool Emulator::BootRsxCapture(const std::string& path) { if (m_state != system_state::stopped) { return false; } fs::file in_file(path); if (!in_file) { return false; } std::unique_ptr<rsx::frame_capture_data> frame = std::make_unique<rsx::frame_capture_data>(); utils::serial load; load.set_reading_state(); const std::string lower = fmt::to_lower(path); if (lower.ends_with(".gz") || lower.ends_with(".zst")) { if (lower.ends_with(".gz")) { load.m_file_handler = make_compressed_serialization_file_handler(std::move(in_file)); } else { load.m_file_handler = make_compressed_zstd_serialization_file_handler(std::move(in_file)); } // Forcefully read some data to check validity load.pop<uchar>(); load.pos -= sizeof(uchar); if (load.data.empty()) { sys_log.error("Failed to unzip rsx capture file!"); return false; } } else { load.m_file_handler = make_uncompressed_serialization_file_handler(std::move(in_file)); } load(*frame); if (frame->magic != rsx::c_fc_magic) { sys_log.error("Invalid rsx capture file!"); return false; } if (frame->version != rsx::c_fc_version) { sys_log.error("Rsx capture file version not supported! Expected %d, found %d", +rsx::c_fc_version, frame->version); return false; } if (frame->LE_format != u32{std::endian::little == std::endian::native}) { static constexpr std::string_view machines[2]{"Big-Endian", "Little-Endian"}; sys_log.error("Rsx capture byte endianness not supported! Expected %s format, found %s format" , machines[frame->LE_format ^ 1], machines[frame->LE_format]); return false; } Init(); g_cfg.video.disable_on_disk_shader_cache.set(true); vm::init(); g_fxo->init(false); // Initialize progress dialog g_fxo->init<named_thread<progress_dialog_server>>(); // Initialize performance monitor g_fxo->init<named_thread<perf_monitor>>(); // PS3 'executable' m_state = system_state::ready; GetCallbacks().on_ready(); GetCallbacks().init_gs_render(nullptr); GetCallbacks().init_pad_handler(""); GetCallbacks().on_run(false); m_state = system_state::starting; ensure(g_fxo->init<named_thread<rsx::rsx_replay_thread>>("RSX Replay", std::move(frame))); return true; } game_boot_result Emulator::GetElfPathFromDir(std::string& elf_path, const std::string& path) { if (!fs::is_dir(path)) { return game_boot_result::invalid_file_or_folder; } static const char* boot_list[] = { "/EBOOT.BIN", "/USRDIR/EBOOT.BIN", "/USRDIR/ISO.BIN.EDAT", "/PS3_GAME/USRDIR/EBOOT.BIN", }; for (std::string elf : boot_list) { elf = path + elf; if (fs::is_file(elf)) { elf_path = elf; return game_boot_result::no_errors; } } return game_boot_result::invalid_file_or_folder; } game_boot_result Emulator::BootGame(const std::string& path, const std::string& title_id, bool direct, cfg_mode config_mode, const std::string& config_path) { auto save_args = std::make_tuple(m_path, m_path_original, argv, envp, data, disc, klic, hdd1, m_config_mode, m_config_path); auto restore_on_no_boot = [&](game_boot_result result) { if (IsStopped() || result != game_boot_result::no_errors) { std::tie(m_path, m_path_original, argv, envp, data, disc, klic, hdd1, m_config_mode, m_config_path) = std::move(save_args); } return result; }; if (m_path_original.empty() || config_mode != cfg_mode::continuous) { m_path_original = m_path; } m_path_old = m_path; m_config_mode = config_mode; m_config_path = config_path; // Handle files and special paths inside Load unmodified if (direct || !fs::is_dir(path)) { m_path = path; return restore_on_no_boot(Load(title_id)); } game_boot_result result = game_boot_result::nothing_to_boot; std::string elf; if (const game_boot_result res = GetElfPathFromDir(elf, path); res == game_boot_result::no_errors) { ensure(!elf.empty()); m_path = elf; result = Load(title_id); } return restore_on_no_boot(result); } void Emulator::SetForceBoot(bool force_boot) { m_force_boot = force_boot; } game_boot_result Emulator::Load(const std::string& title_id, bool is_disc_patch, usz recursion_count) { if (m_state != system_state::stopped) { return game_boot_result::still_running; } // Enable logging rpcs3::utils::configure_logs(true); m_ar.reset(); { if (m_config_mode == cfg_mode::continuous) { // The program is being booted from another running program // CELL_GAME_GAMETYPE_GAMEDATA is not used as boot type if (m_cat == "DG"sv) { m_boot_source_type = CELL_GAME_GAMETYPE_DISC; } else if (m_cat == "HM"sv) { m_boot_source_type = CELL_GAME_GAMETYPE_HOME; } else { m_boot_source_type = CELL_GAME_GAMETYPE_HDD; } } else { fs::file save{m_path, fs::isfile + fs::read}; if (m_path.ends_with(".SAVESTAT") && save && save.size() >= 8 && save.read<u64>() == "RPCS3SAV"_u64) { m_ar = std::make_shared<utils::serial>(); m_ar->set_reading_state(); m_ar->m_file_handler = make_uncompressed_serialization_file_handler(std::move(save)); } else if (save && m_path.ends_with(".zst")) { m_ar = std::make_shared<utils::serial>(); m_ar->set_reading_state(); m_ar->m_file_handler = make_compressed_zstd_serialization_file_handler(std::move(save)); if (m_ar->try_read<u64>().second != "RPCS3SAV"_u64) { m_ar.reset(); } else { m_ar->pos = 0; } } else if (save && m_path.ends_with(".gz")) { m_ar = std::make_shared<utils::serial>(); m_ar->set_reading_state(); m_ar->m_file_handler = make_compressed_serialization_file_handler(std::move(save)); if (m_ar->try_read<u64>().second != "RPCS3SAV"_u64) { m_ar.reset(); } else { m_ar->pos = 0; } } m_boot_source_type = CELL_GAME_GAMETYPE_SYS; } } if (!title_id.empty()) { m_title_id = title_id; } sys_log.notice("Selected config: mode=%s, path=\"%s\"", m_config_mode, m_config_path); sys_log.notice("Path: %s", m_path); struct cleanup_t { Emulator* _this; bool cleanup = true; ~cleanup_t() { if (cleanup && _this->IsStopped()) { _this->Kill(false); } } } cleanup{this}; std::string inherited_ps3_game_path; { Init(); m_state_inspection_savestate = g_cfg.savestate.state_inspection_mode.get(); m_savestate_extension_flags1 = {}; bool resolve_path_as_vfs_path = false; const bool from_dev_flash = IsPathInsideDir(m_path, g_cfg_vfs.get_dev_flash()); std::string savestate_build_version; std::string savestate_creation_date; std::string savestate_app_title; if (m_ar) { struct file_header { ENABLE_BITWISE_SERIALIZATION; nse_t<u64, 1> magic; bool LE_format; bool state_inspection_support; nse_t<u64, 1> offset; b8 flag_versions_is_following_data; }; const auto header = m_ar->try_read<file_header>().second; if (header.magic != "RPCS3SAV"_u64) { return game_boot_result::savestate_corrupted; } if (header.LE_format != (std::endian::native == std::endian::little) || header.offset >= m_ar->get_size(header.offset)) { return game_boot_result::savestate_corrupted; } g_cfg.savestate.state_inspection_mode.set(header.state_inspection_support); bool is_incompatible = false; if (header.flag_versions_is_following_data) { ensure(header.offset == m_ar->pos); if (!is_savestate_version_compatible(m_ar->pop<std::vector<version_entry>>(), true)) { is_incompatible = true; } } else { // Read data on another container to keep the existing data utils::serial ar_temp; ar_temp.set_reading_state({}, true); ar_temp.swap_handler(*m_ar); ar_temp.seek_pos(header.offset); if (!is_savestate_version_compatible(ar_temp.pop<std::vector<version_entry>>(), true)) { is_incompatible = true; } // Restore file handler ar_temp.swap_handler(*m_ar); } const bool contains_version = m_ar->pop<b8>(); if (contains_version) { savestate_build_version = m_ar->pop<std::string>(); savestate_creation_date = m_ar->pop<std::string>(); savestate_app_title = m_ar->pop<std::string>(); m_ar->pop<std::string>(); // User note (unused) (is_incompatible ? sys_log.error : sys_log.success)("Savestate information: creation time: %s, RPCS3 build: \"%s\"\nGame/Title: \"%s\"", savestate_creation_date, savestate_build_version, savestate_app_title); } if (is_incompatible) { return game_boot_result::savestate_version_unsupported; } usz reserved_count = 32; reserved_count -= (header.flag_versions_is_following_data ? 0 : 1); reserved_count -= (contains_version ? 0 : 1); if (!load_and_check_reserved(*m_ar, reserved_count)) { return game_boot_result::savestate_version_unsupported; } argv.clear(); klic.clear(); std::string disc_info; m_ar->serialize(argv.emplace_back(), disc_info, klic.emplace_back(), m_game_dir, hdd1); if (!klic[0]) { klic.clear(); } if (!disc_info.empty() && disc_info[0] != '/') { // Restore disc path for disc games (must exist in games.yml i.e. your game library) m_title_id = disc_info; // Load /dev_bdvd/ from game list if available if (std::string game_path = m_games_config.get_path(m_title_id); !game_path.empty()) { if (game_path.ends_with("/./")) { // Marked as PS3_GAME directory inherited_ps3_game_path = std::move(game_path).substr(0, game_path.size() - 3); } else { disc = std::move(game_path); } } else if (!g_cfg.savestate.state_inspection_mode) { sys_log.fatal("Disc directory not found. Savestate cannot be loaded. ('%s')", m_title_id); return game_boot_result::invalid_file_or_folder; } } auto load_tar = [&](const std::string& path, const std::string& special_file) { const usz size = m_ar->pop<usz>(); const usz max_data_size = m_ar->get_size(utils::add_saturate<usz>(size, m_ar->pos)); if (size % 512 || max_data_size < size || max_data_size - size < m_ar->pos) { fmt::throw_exception("TAR desrialization failed: Invalid size. TAR size: 0x%x, path='%s', ar: %s", size, path, *m_ar); } fs::remove_all(path, size == 0); if (!special_file.empty()) { fs::write_file<true>(special_file, fs::write_new); } if (size) { m_ar->breathe(true); m_ar->m_max_data = m_ar->pos + size; ensure(tar_object(*m_ar).extract(path)); if (m_ar->m_max_data != m_ar->pos) { fmt::throw_exception("TAR desrialization failed: read bytes: 0x%x, expected: 0x%x, path='%s', ar: %s", m_ar->pos - (m_ar->m_max_data - size), size, path, *m_ar); } m_ar->m_max_data = umax; m_ar->breathe(); } }; if (!hdd1.empty()) { hdd1 = rpcs3::utils::get_hdd1_dir() + "caches/" + hdd1 + "/"; load_tar(hdd1, get_syscache_state_corruption_indicator_file_path(hdd1)); } for (const std::string hdd0_game = rpcs3::utils::get_hdd0_dir() + "game/";;) { const std::string game_data = m_ar->pop<std::string>(); if (game_data.empty()) { break; } if (game_data.find_first_of('\0') != umax || !sysutil_check_name_string(game_data.c_str(), 1, CELL_GAME_DIRNAME_SIZE)) { const std::span<const u8> dirname{reinterpret_cast<const u8*>(game_data.data()), game_data.size()}; fmt::throw_exception("HDD0 deserialization failed: Invalid directory name: %s, ar=%s", dirname.subspan(0, CELL_GAME_DIRNAME_SIZE + 1), *m_ar); } load_tar(hdd0_game + game_data, ""); } // Reserved area if (!load_and_check_reserved(*m_ar, 32)) { return game_boot_result::savestate_version_unsupported; } if (disc_info.starts_with("/"sv)) { // Restore SFO directory for PSN games if (disc_info.starts_with("/dev_hdd0"sv)) { disc = rpcs3::utils::get_hdd0_dir(); disc += std::string_view(disc_info).substr(9); } else if (disc_info.starts_with("/host_root"sv)) { sys_log.error("Host root has been used in savestates!"); disc = disc_info.substr(9); } else { sys_log.error("Unknown source for game SFO directory: %s", disc_info); } m_cat.clear(); } m_path_old = m_path; resolve_path_as_vfs_path = true; } else if (m_path.starts_with(vfs_boot_prefix)) { m_path = m_path.substr(vfs_boot_prefix.size()); if (!m_path.empty() && m_path[0] != '/') { // Make valid for VFS m_path.insert(0, "/"); } if (!argv.empty()) { argv[0] = m_path; } else { argv.emplace_back(m_path); } resolve_path_as_vfs_path = true; } else if (m_path.starts_with(game_id_boot_prefix)) { // Try to boot a game through game ID only m_title_id = m_path.substr(game_id_boot_prefix.size()); m_title_id = m_title_id.substr(0, m_title_id.find_first_of(fs::delim)); if (m_title_id.size() < 3 && m_title_id.find_first_not_of('.') == umax) { // Do not allow if TITLE_ID result in path redirection sys_log.fatal("Game directory not found using GAMEID token. ('%s')", m_title_id); return game_boot_result::invalid_file_or_folder; } std::string tail = m_path.substr(game_id_boot_prefix.size() + m_title_id.size()); if (tail.find_first_not_of(fs::delim) == umax) { // Treat slashes-only trail as if game ID only was provided tail.clear(); } bool ok = false; std::string title_path; // const overload does not create new node on failure if (std::string game_path = m_games_config.get_path(m_title_id); !game_path.empty()) { title_path = std::move(game_path); } for (std::string test_path : { rpcs3::utils::get_hdd0_dir() + "game/" + m_title_id + "/USRDIR/EBOOT.BIN" , tail.empty() ? "" : title_path + tail + "/USRDIR/EBOOT.BIN" , title_path + "/PS3_GAME/USRDIR/EBOOT.BIN" , title_path + "/USRDIR/EBOOT.BIN" }) { if (!test_path.empty() && fs::is_file(test_path)) { m_path = std::move(test_path); ok = true; break; } } if (!ok) { sys_log.fatal("Game directory not found using GAMEID token. ('%s')", m_title_id + tail); return game_boot_result::invalid_file_or_folder; } } if (resolve_path_as_vfs_path) { if (argv[0].starts_with("/dev_hdd0"sv)) { m_path = rpcs3::utils::get_hdd0_dir(); m_path += std::string_view(argv[0]).substr(9); constexpr auto game0_path = "/dev_hdd0/game/"sv; if (argv[0].starts_with(game0_path) && !fs::is_file(vfs::get(argv[0]))) { std::string dirname = argv[0].substr(game0_path.size()); dirname = dirname.substr(0, dirname.find_first_of('/')); // Try to load game directory from list if available if (std::string game_path = m_games_config.get_path(m_title_id); !game_path.empty()) { disc = std::move(game_path); m_path = disc + argv[0].substr(game0_path.size() + dirname.size()); } } } else if (argv[0].starts_with("/dev_flash"sv)) { m_path = g_cfg_vfs.get_dev_flash(); m_path += std::string_view(argv[0]).substr(10); } else if (argv[0].starts_with("/dev_bdvd"sv)) { m_path = disc; m_path += std::string_view(argv[0]).substr(9); } else if (argv[0].starts_with("/host_root/"sv)) { sys_log.error("Host root has been used in path redirection!"); m_path = argv[0].substr(11); } else if (argv[0].starts_with("/dev_hdd1"sv)) { sys_log.error("HDD1 has been used to store executable in path redirection!"); m_path = rpcs3::utils::get_hdd1_dir(); m_path += std::string_view(argv[0]).substr(9); } else { sys_log.error("Unknown source for path redirection: %s", argv[0]); } if (argv.size() == 1) { // Resolve later properly as if booted through host path argv.clear(); } sys_log.notice("Restored executable path: \'%s\'", m_path); } const std::string resolved_path = GetCallbacks().resolve_path(m_path); const std::string elf_dir = fs::get_parent_dir(m_path); // Mount /app_home again since m_path might have changed due to savestates. vfs::mount("/app_home", g_cfg_vfs.app_home.to_string().empty() ? elf_dir + '/' : g_cfg_vfs.get(g_cfg_vfs.app_home, rpcs3::utils::get_emu_dir())); // Load PARAM.SFO (TODO) { if (fs::is_dir(m_path)) { // Special case (directory scan) m_sfo_dir = rpcs3::utils::get_sfo_dir_from_game_path(m_path, m_title_id); } else if (!disc.empty()) { // Check previously used category before it's overwritten if (m_cat == "DG") { m_sfo_dir = disc + "/" + m_game_dir; } else if (m_cat == "GD") { m_sfo_dir = rpcs3::utils::get_hdd0_dir() + "game/" + m_title_id; } else { m_sfo_dir = rpcs3::utils::get_sfo_dir_from_game_path(disc, m_title_id); } } else { m_sfo_dir = rpcs3::utils::get_sfo_dir_from_game_path(fs::get_parent_dir(elf_dir), m_title_id); } } const psf::registry _psf = psf::load_object(m_sfo_dir + "/PARAM.SFO"); m_title = std::string(psf::get_string(_psf, "TITLE", std::string_view(m_path).substr(m_path.find_last_of(fs::delim) + 1))); m_title_id = std::string(psf::get_string(_psf, "TITLE_ID")); m_cat = std::string(psf::get_string(_psf, "CATEGORY")); const auto version_app = psf::get_string(_psf, "APP_VER", "Unknown"); const auto version_disc = psf::get_string(_psf, "VERSION", "Unknown"); m_app_version = version_app == "Unknown" ? version_disc : version_app; if (!_psf.empty() && m_cat.empty()) { sys_log.fatal("Corrupted PARAM.SFO found! Try reinstalling the game."); return game_boot_result::invalid_file_or_folder; } sys_log.notice("Title: %s", GetTitle()); sys_log.notice("Serial: %s", GetTitleID()); sys_log.notice("Category: %s", GetCat()); sys_log.notice("Version: APP_VER=%s VERSION=%s", version_app, version_disc); { if (m_config_mode == cfg_mode::custom_selection || (m_config_mode == cfg_mode::continuous && !m_config_path.empty())) { if (fs::file cfg_file{ m_config_path }) { sys_log.notice("Applying %s: %s", m_config_mode, m_config_path); if (g_cfg.from_string(cfg_file.to_string())) { g_cfg.name = m_config_path; } else { sys_log.fatal("Failed to apply %s: %s", m_config_mode, m_config_path); } } else { sys_log.fatal("Failed to access %s: %s", m_config_mode, m_config_path); } } else if (m_config_mode == cfg_mode::custom) { // Load custom configs for (std::string config_path : { m_path + ".yml", rpcs3::utils::get_custom_config_path(from_dev_flash ? m_path.substr(m_path.find_last_of(fs::delim) + 1) : m_title_id), }) { if (config_path.empty()) { continue; } if (fs::file cfg_file{config_path}) { sys_log.notice("Applying custom config: %s", config_path); if (g_cfg.from_string(cfg_file.to_string())) { g_cfg.name = config_path; m_config_path = config_path; break; } sys_log.fatal("Failed to apply custom config: %s", config_path); } } } // Disable incompatible settings fixup_settings(&_psf); // Force audio provider if (m_path.ends_with("vsh.self"sv)) { g_cfg.audio.provider.set(audio_provider::rsxaudio); } else { g_cfg.audio.provider.set(audio_provider::cell_audio); } // Backup config g_backup_cfg.from_string(g_cfg.to_string()); } // Set RTM usage g_use_rtm = utils::has_rtm() && (((utils::has_mpx() && !utils::has_tsx_force_abort()) && g_cfg.core.enable_TSX == tsx_usage::enabled) || g_cfg.core.enable_TSX == tsx_usage::forced); { // Log some extra info in case of boot #if defined(HAVE_VULKAN) if (g_cfg.video.renderer == video_renderer::vulkan) { sys_log.notice("Vulkan SDK Revision: %d", VK_HEADER_VERSION); } #endif sys_log.notice("Used configuration:\n%s\n", g_cfg.to_string()); if (g_use_rtm && (!utils::has_mpx() || utils::has_tsx_force_abort())) { sys_log.warning("TSX forced by User"); } // Initialize patch engine g_fxo->need<patch_engine>(); // Load patches from different locations g_fxo->get<patch_engine>().append_global_patches(); g_fxo->get<patch_engine>().append_title_patches(m_title_id); } if (g_use_rtm) { // Update supplementary settings const f64 _1ns = utils::get_tsc_freq() / 1000'000'000.; g_rtm_tx_limit1 = static_cast<u64>(g_cfg.core.tx_limit1_ns * _1ns); g_rtm_tx_limit2 = static_cast<u64>(g_cfg.core.tx_limit2_ns * _1ns); } // Set bdvd_dir std::string bdvd_dir = g_cfg_vfs.get(g_cfg_vfs.dev_bdvd, rpcs3::utils::get_emu_dir()); { if (!bdvd_dir.empty()) { if (bdvd_dir.back() != fs::delim[0] && bdvd_dir.back() != fs::delim[1]) { bdvd_dir.push_back('/'); } if (!fs::is_file(bdvd_dir + "PS3_DISC.SFB")) { if (fs::get_dir_size(bdvd_dir) == 0) { // Ignore empty dir. We will need it later for disc games in dev_hdd0. sys_log.notice("Ignoring empty vfs BDVD directory: '%s'", bdvd_dir); } else { // Unuse if invalid sys_log.error("Failed to use custom BDVD directory: '%s'", bdvd_dir); } bdvd_dir.clear(); } } } // Special boot mode (directory scan) if (fs::is_dir(m_path)) { m_state = system_state::ready; GetCallbacks().on_ready(); g_fxo->init<main_ppu_module>(); vm::init(); m_force_boot = false; // Force LLVM recompiler g_cfg.core.ppu_decoder.from_default(); // Force SPU cache and precompilation g_cfg.core.llvm_precompilation.set(true); g_cfg.core.spu_cache.set(true); // Disable incompatible settings fixup_settings(&_psf); // Force LLE lib loading mode g_cfg.core.libraries_control.set_set([]() { std::set<std::string> set; extern const std::map<std::string_view, int> g_prx_list; for (const auto& lib : g_prx_list) { set.emplace(std::string(lib.first) + ":lle"); } return set; }()); // Fake arg (workaround) argv.resize(1); argv[0] = "/dev_bdvd/PS3_GAME/USRDIR/EBOOT.BIN"; m_dir = "/dev_bdvd/PS3_GAME/"; std::string path; std::vector<std::string> dir_queue; dir_queue.emplace_back(m_path + '/'); init_fxo_for_exec(nullptr, true); { if (m_title_id.empty()) { // Check if we are trying to scan vsh/module const std::string vsh_path = g_cfg_vfs.get_dev_flash() + "vsh/module"; if (IsPathInsideDir(m_path, vsh_path)) { // Memorize path to vsh.self path = vsh_path + "/vsh.self"; } } else { // Find game update to use EBOOT.BIN from it, also add its directory to scan if (m_cat == "DG") { const std::string hdd0_path = vfs::get("/dev_hdd0/game/") + m_title_id; if (fs::is_file(hdd0_path + "/USRDIR/EBOOT.BIN")) { m_path = hdd0_path; } dir_queue.emplace_back(hdd0_path + '/'); } // Memorize path to EBOOT.BIN path = m_path + "/USRDIR/EBOOT.BIN"; // Try to add all related directories const std::set<std::string> dirs = GetGameDirs(); dir_queue.insert(std::end(dir_queue), std::begin(dirs), std::end(dirs)); } if (fs::is_file(path)) { // Compile binary first ppu_log.notice("Trying to load binary: %s", path); fs::file src{path}; src = decrypt_self(std::move(src)); const ppu_exec_object obj = src; if (obj == elf_error::ok && ppu_load_exec(obj, true, path)) { ensure(g_fxo->try_get<main_ppu_module>())->path = path; } else { sys_log.error("Failed to load binary '%s' (%s)", path, obj.get_error()); } } } g_fxo->init<named_thread>("SPRX Loader"sv, [this, dir_queue]() mutable { if (auto& _main = *ensure(g_fxo->try_get<main_ppu_module>()); !_main.path.empty()) { if (!_main.analyse(0, _main.elf_entry, _main.seg0_code_end, _main.applied_patches, std::vector<u32>{}, [](){ return Emu.IsStopped(); })) { return; } Emu.ConfigurePPUCache(); ppu_initialize(_main); } if (Emu.IsStopped()) { return; } ppu_precompile(dir_queue, nullptr); if (Emu.IsStopped()) { return; } spu_cache::initialize(false); // Exit "process" CallFromMainThread([this] { Emu.Kill(false); m_path = m_path_old; // Reset m_path to fix boot from gui }); }); Run(false); return game_boot_result::no_errors; } // Check microphone permissions if (g_cfg.audio.microphone_type != microphone_handler::null) { if (const std::vector<std::string> device_list = fmt::split(g_cfg.audio.microphone_devices.to_string(), {"@@@"}); !device_list.empty()) { check_microphone_permissions(); } } // Detect boot location const std::string hdd0_game = vfs::get("/dev_hdd0/game/"); const bool from_hdd0_game = IsPathInsideDir(m_path, hdd0_game); if (game_boot_result error = VerifyPathCasing(m_path, hdd0_game, from_hdd0_game); error != game_boot_result::no_errors) { return error; } if (game_boot_result error = VerifyPathCasing(m_path, g_cfg_vfs.get_dev_flash(), from_dev_flash); error != game_boot_result::no_errors) { return error; } // Mount /dev_bdvd/ if necessary if (bdvd_dir.empty() && disc.empty()) { std::string sfb_dir; GetBdvdDir(bdvd_dir, sfb_dir, m_game_dir, elf_dir); if (!sfb_dir.empty() && from_hdd0_game) { // Booting disc game from wrong location sys_log.error("Disc game %s found at invalid location /dev_hdd0/game/", m_title_id); const std::string games_common = rpcs3::utils::get_games_dir(); const std::string dst_dir = games_common + sfb_dir.substr(hdd0_game.size()); // Move and retry from correct location if (fs::create_path(fs::get_parent_dir(dst_dir)) && fs::rename(sfb_dir, dst_dir, false)) { sys_log.success("Disc game %s moved to special location '%s'", m_title_id, dst_dir); m_path = games_common + m_path.substr(hdd0_game.size()); return Load(m_title_id); } sys_log.error("Failed to move disc game %s to '%s' (%s)", m_title_id, dst_dir, fs::g_tls_error); return game_boot_result::wrong_disc_location; } } if (bdvd_dir.empty() && disc.empty() && !is_disc_patch) { // Reset original disc game dir if this is neither disc nor disc patch m_game_dir = "PS3_GAME"; } // Booting patch data if ((is_disc_patch || m_cat == "GD") && bdvd_dir.empty() && disc.empty()) { // Load /dev_bdvd/ from game list if available if (std::string game_path = m_games_config.get_path(m_title_id); !game_path.empty()) { if (game_path.ends_with("/./")) { // Marked as PS3_GAME directory inherited_ps3_game_path = std::move(game_path).substr(0, game_path.size() - 3); } else { bdvd_dir = std::move(game_path); } } else { sys_log.fatal("Disc directory not found. Try to run the game from the actual game disc directory."); return game_boot_result::invalid_file_or_folder; } } // Check /dev_bdvd/ if (disc.empty() && !bdvd_dir.empty() && fs::is_dir(bdvd_dir)) { vfs::mount("/dev_bdvd", bdvd_dir); sys_log.notice("Disc: %s", vfs::get("/dev_bdvd")); vfs::mount("/dev_bdvd/PS3_GAME", bdvd_dir + m_game_dir + "/"); sys_log.notice("Game: %s", vfs::get("/dev_bdvd/PS3_GAME")); if (const std::string sfb_path = vfs::get("/dev_bdvd/PS3_DISC.SFB"); !IsValidSfb(sfb_path)) { sys_log.error("Invalid disc directory for the disc game %s. (%s)", m_title_id, sfb_path); return game_boot_result::invalid_file_or_folder; } const auto game_psf = psf::load_object(vfs::get("/dev_bdvd/PS3_GAME/PARAM.SFO")); const auto bdvd_title_id = psf::get_string(game_psf, "TITLE_ID"); if (m_title_id.empty()) { // We are most likely booting a binary inside a disc directory sys_log.error("Booting binary without TITLE_ID inside disc dir of '%s'", bdvd_title_id); } else { if (bdvd_title_id != m_title_id) { // Not really an error just an odd situation sys_log.error("Unexpected PARAM.SFO found in disc directory '%s' (found '%s')", m_title_id, bdvd_title_id); } // Store /dev_bdvd/ location if (games_config::result res = m_games_config.add_game(m_title_id, bdvd_dir); res == games_config::result::success) { sys_log.notice("Registered BDVD game directory for title '%s': %s", m_title_id, bdvd_dir); } else if (res == games_config::result::failure) { sys_log.error("Failed to save BDVD location of title '%s' (error=%s)", m_title_id, fs::g_tls_error); } } } else if (m_cat == "1P" && from_hdd0_game) { // PS1 Classic located in dev_hdd0/game sys_log.notice("PS1 Game: %s, %s", m_title_id, m_title); const std::string tail = m_path.substr(hdd0_game.size()); const std::string dirname = fmt::trim_front(tail, fs::delim).substr(0, tail.find_first_of(fs::delim)); const std::string game_path = "/dev_hdd0/game/" + dirname; argv.resize(9); argv[0] = "/dev_flash/ps1emu/ps1_newemu.self"; argv[1] = m_title_id + "_mc1.VM1"; // virtual mc 1 /dev_hdd0/savedata/vmc/%argv[1]% argv[2] = m_title_id + "_mc2.VM1"; // virtual mc 2 /dev_hdd0/savedata/vmc/%argv[2]% argv[3] = "0082"; // region target argv[4] = "1600"; // ??? arg4 600 / 1200 / 1600, resolution scale? (purely a guess, the numbers seem to match closely to resolutions tho) argv[5] = game_path; // ps1 game folder path (not the game serial) argv[6] = "1"; // ??? arg6 1 ? argv[7] = "2"; // ??? arg7 2 -- full screen on/off 2/1 ? argv[8] = "1"; // ??? arg8 2 -- smoothing on/off = 1/0 ? // TODO, this seems like it would normally be done by sysutil etc // Basically make 2 128KB memory cards 0 filled and let the games handle formatting. fs::file card_1_file(vfs::get("/dev_hdd0/savedata/vmc/" + argv[1]), fs::write + fs::create); card_1_file.trunc(128 * 1024); fs::file card_2_file(vfs::get("/dev_hdd0/savedata/vmc/" + argv[2]), fs::write + fs::create); card_2_file.trunc(128 * 1024); } else if (m_cat == "PE" && from_hdd0_game) { // PSP Remaster located in dev_hdd0/game sys_log.notice("PSP Remaster Game: %s, %s", m_title_id, m_title); const std::string tail = m_path.substr(hdd0_game.size()); const std::string dirname = fmt::trim_front(tail, fs::delim).substr(0, tail.find_first_of(fs::delim)); const std::string game_path = "/dev_hdd0/game/" + dirname; argv.resize(2); argv[0] = "/dev_flash/pspemu/psp_emulator.self"; argv[1] = game_path; } else if (m_cat != "DG" && m_cat != "GD") { // Don't need /dev_bdvd if (!m_title_id.empty() && !from_hdd0_game && m_cat == "HG") { std::string game_dir = m_sfo_dir; // Add HG games not in HDD0 to games.yml [[maybe_unused]] const games_config::result res = m_games_config.add_external_hdd_game(m_title_id, game_dir); const std::string dir = fmt::trim(game_dir.substr(fs::get_parent_dir_view(game_dir).size() + 1), fs::delim); vfs::mount("/dev_hdd0/game/" + dir, game_dir + '/'); } } else if (!inherited_ps3_game_path.empty() || (from_hdd0_game && m_cat == "DG" && disc.empty())) { // Disc game located in dev_hdd0/game bdvd_dir = g_cfg_vfs.get(g_cfg_vfs.dev_bdvd, rpcs3::utils::get_emu_dir()); if (fs::get_dir_size(bdvd_dir)) { sys_log.error("Failed to load disc game from dev_hdd0. The virtual bdvd_dir path does not exist or the directory is not empty: '%s'", bdvd_dir); return game_boot_result::invalid_bdvd_folder; } // TODO: Verify timestamps and error codes with sys_fs vfs::mount("/dev_bdvd", bdvd_dir); vfs::mount("/dev_bdvd/PS3_GAME", inherited_ps3_game_path.empty() ? hdd0_game + m_path.substr(hdd0_game.size(), 10) : inherited_ps3_game_path); const std::string new_ps3_game = vfs::get("/dev_bdvd/PS3_GAME"); sys_log.notice("Game: %s", new_ps3_game); // Store /dev_bdvd/PS3_GAME location if (games_config::result res = m_games_config.add_game(m_title_id, new_ps3_game + "/./"); res == games_config::result::success) { sys_log.notice("Registered BDVD/PS3_GAME game directory for title '%s': %s", m_title_id, new_ps3_game); } else if (res == games_config::result::failure) { sys_log.error("Failed to save BDVD/PS3_GAME location of title '%s' (error=%s)", m_title_id, fs::g_tls_error); } } else if (disc.empty()) { sys_log.error("Failed to mount disc directory for the disc game %s", m_title_id); return game_boot_result::invalid_file_or_folder; } else { // Disc game bdvd_dir = disc; vfs::mount("/dev_bdvd", bdvd_dir); vfs::mount("/dev_bdvd/PS3_GAME", bdvd_dir + m_game_dir); sys_log.notice("Disk: %s, Dir: %s", vfs::get("/dev_bdvd"), m_game_dir); } // Initialize progress dialog g_fxo->init<named_thread<progress_dialog_server>>(); // Initialize performance monitor g_fxo->init<named_thread<perf_monitor>>(); // Set title to actual disc title if necessary const std::string disc_sfo_dir = vfs::get("/dev_bdvd/PS3_GAME/PARAM.SFO"); const auto disc_psf_obj = psf::load_object(disc_sfo_dir); // Install PKGDIR, INSDIR, PS3_EXTRA if (!bdvd_dir.empty()) { std::string ins_dir = vfs::get("/dev_bdvd/PS3_GAME/INSDIR/"); std::string pkg_dir = vfs::get("/dev_bdvd/PS3_GAME/PKGDIR/"); std::string extra_dir = vfs::get("/dev_bdvd/PS3_EXTRA/"); fs::file lock_file; for (const auto path_ptr : {&ins_dir, &pkg_dir, &extra_dir}) { if (!fs::is_dir(*path_ptr)) { path_ptr->clear(); } } const std::string lock_file_path = fmt::format("%s%s%s_v%s", hdd0_game, u8"$locks/", m_title_id, psf::get_string(disc_psf_obj, "APP_VER")); if (!ins_dir.empty() || !pkg_dir.empty() || !extra_dir.empty()) { // For backwards compatibility if (!lock_file.open(hdd0_game + ".locks/" + m_title_id)) { // Check if already installed lock_file.open(lock_file_path); } } std::vector<std::string> pkgs; if (!lock_file && !ins_dir.empty()) { sys_log.notice("Found INSDIR: %s", ins_dir); for (auto&& entry : fs::dir{ins_dir}) { const std::string pkg_file = ins_dir + entry.name; if (!entry.is_directory && entry.name.ends_with(".PKG")) { pkgs.push_back(pkg_file); } } } if (!lock_file && !pkg_dir.empty()) { sys_log.notice("Found PKGDIR: %s", pkg_dir); for (auto&& entry : fs::dir{pkg_dir}) { if (entry.is_directory && entry.name.starts_with("PKG")) { const std::string pkg_file = pkg_dir + entry.name + "/INSTALL.PKG"; if (fs::is_file(pkg_file)) { pkgs.push_back(pkg_file); } } } } if (!lock_file && !extra_dir.empty()) { sys_log.notice("Found PS3_EXTRA: %s", extra_dir); for (auto&& entry : fs::dir{extra_dir}) { if (entry.is_directory && entry.name[0] == 'D') { const std::string pkg_file = extra_dir + entry.name + "/DATA000.PKG"; if (fs::is_file(pkg_file)) { pkgs.push_back(pkg_file); } } } } if (!pkgs.empty()) { bool install_success = true; BlockingCallFromMainThread([this, &pkgs, &install_success]() { if (!GetCallbacks().on_install_pkgs(pkgs)) { install_success = false; } }); if (!install_success) { sys_log.error("Failed to install packages"); return game_boot_result::install_failed; } } if (!lock_file) { // Create lock file to prevent double installation // Do it after installation to prevent false positives when RPCS3 closed in the middle of the operation lock_file.open(lock_file_path, fs::read + fs::create + fs::excl); } } // Check game updates if (const std::string hdd0_boot = hdd0_game + m_title_id + "/USRDIR/EBOOT.BIN"; !m_ar && recursion_count == 0 && disc.empty() && !bdvd_dir.empty() && !m_title_id.empty() && resolved_path == GetCallbacks().resolve_path(vfs::get("/dev_bdvd/PS3_GAME/USRDIR/EBOOT.BIN")) && resolved_path != GetCallbacks().resolve_path(hdd0_boot) && fs::is_file(hdd0_boot)) { if (const psf::registry update_sfo = psf::load(hdd0_game + m_title_id + "/PARAM.SFO").sfo; psf::get_string(update_sfo, "TITLE_ID") == m_title_id && psf::get_string(update_sfo, "CATEGORY") == "GD") { // Booting game update sys_log.success("Updates found at /dev_hdd0/game/%s/", m_title_id); m_path = hdd0_boot; const game_boot_result boot_result = Load(m_title_id, true, recursion_count + 1); if (boot_result == game_boot_result::no_errors) { return game_boot_result::no_errors; } sys_log.error("Failed to boot update at \"%s\", game update may be corrupted! Consider uninstalling or reinstalling it. (reason: %s)", m_path, boot_result); return boot_result; } } if (!disc_psf_obj.empty()) { const auto bdvd_title = psf::get_string(disc_psf_obj, "TITLE"); if (!bdvd_title.empty() && bdvd_title != m_title) { sys_log.notice("Title was set from %s to %s", m_title, bdvd_title); m_title = bdvd_title; } } for (auto& c : m_title) { // Replace newlines with spaces if (c == '\n') c = ' '; } // Mount /host_root/ if necessary (special value) if (g_cfg.vfs.host_root) { vfs::mount("/host_root", "/"); } // Open SELF or ELF std::string elf_path = m_path; if (m_cat == "1P" || m_cat == "PE") { // Use emulator path elf_path = vfs::get(argv[0]); } if (m_ar) { g_tls_log_prefix = []() { return fmt::format("Emu State Load Thread: '%s'", g_tls_serialize_name); }; } fs::file elf_file(elf_path); if (!elf_file) { sys_log.error("Failed to open executable: %s", elf_path); if (m_ar) { sys_log.warning("State Inspection Savestate Mode!"); vm::init(); vm::load(*m_ar); if (!hdd1.empty()) { vfs::mount("/dev_hdd1", hdd1); sys_log.notice("Hdd1: %s", hdd1); } init_fxo_for_exec(DeserialManager(), true); return game_boot_result::no_errors; } return game_boot_result::invalid_file_or_folder; } bool had_been_decrypted = false; // Check SELF header if (elf_file.size() >= 4 && elf_file.read<u32>() == "SCE\0"_u32) { // Decrypt SELF had_been_decrypted = true; elf_file = decrypt_self(std::move(elf_file), klic.empty() ? nullptr : reinterpret_cast<u8*>(&klic[0]), &g_ps3_process_info.self_info); } else { g_ps3_process_info.self_info.valid = false; } if (!elf_file) { sys_log.error("Failed to decrypt SELF: %s", elf_path); return game_boot_result::decryption_error; } m_state = system_state::ready; ppu_exec_object ppu_exec; ppu_prx_object ppu_prx; ppu_rel_object ppu_rel; spu_exec_object spu_exec; spu_rel_object spu_rel; vm::init(); if (m_ar) { vm::load(*m_ar); } if (!hdd1.empty()) { vfs::mount("/dev_hdd1", hdd1); sys_log.notice("Hdd1: %s", vfs::get("/dev_hdd1")); } if (ppu_exec.open(elf_file) == elf_error::ok) { // PS3 executable GetCallbacks().on_ready(); if (argv.empty()) { argv.resize(1); } if (argv[0].empty()) { auto unescape = [](std::string_view path) { // Unescape from host FS std::vector<std::string> escaped = fmt::split(path, {std::string_view{&fs::delim[0], 1}, std::string_view{&fs::delim[1], 1}}); std::vector<std::string> result; for (auto& sv : escaped) result.emplace_back(vfs::unescape(sv)); return fmt::merge(result, "/"); }; const std::string resolved_hdd0 = GetCallbacks().resolve_path(hdd0_game) + '/'; if (from_hdd0_game && m_cat == "DG") { const std::string tail = resolved_path.substr(resolved_hdd0.size()); const std::string tail_usrdir = tail.substr(tail.find_first_of(fs::delim) + 1); const std::string dirname = tail.substr(0, tail.find_first_of(fs::delim)); argv[0] = "/dev_bdvd/PS3_GAME/" + unescape(tail_usrdir); m_dir = "/dev_hdd0/game/" + dirname + "/"; sys_log.notice("Disc path: %s", m_dir); } else if (from_hdd0_game) { const std::string tail = resolved_path.substr(resolved_hdd0.size()); const std::string dirname = tail.substr(0, tail.find_first_of(fs::delim)); argv[0] = "/dev_hdd0/game/" + unescape(tail); m_dir = "/dev_hdd0/game/" + dirname + "/"; sys_log.notice("Boot path: %s", m_dir); } else if (!bdvd_dir.empty() && fs::is_dir(bdvd_dir)) { // Disc games are on /dev_bdvd/ const usz pos = resolved_path.rfind(m_game_dir); argv[0] = "/dev_bdvd/PS3_GAME/" + unescape(resolved_path.substr(pos + m_game_dir.size() + 1)); m_dir = "/dev_bdvd/PS3_GAME/"; } else if (from_dev_flash) { // Firmware executables argv[0] = "/dev_flash" + resolved_path.substr(GetCallbacks().resolve_path(g_cfg_vfs.get_dev_flash()).size()); m_dir = fs::get_parent_dir(argv[0]) + '/'; } else if (!m_title_id.empty() && m_cat == "HG") { std::string game_dir = m_sfo_dir; // Remove the C00 suffix if (game_dir.ends_with("/C00") || game_dir.ends_with("\\C00")) { game_dir = game_dir.substr(0, game_dir.size() - 4); } const std::string dir = fmt::trim(game_dir.substr(fs::get_parent_dir_view(game_dir).size() + 1), fs::delim); m_dir = "/dev_hdd0/game/" + dir + '/'; argv[0] = m_dir + unescape(resolved_path.substr(GetCallbacks().resolve_path(game_dir).size())); sys_log.notice("Boot path: %s", m_dir); } else if (g_cfg.vfs.host_root) { // For homebrew argv[0] = "/host_root/" + resolved_path; m_dir = "/host_root/" + elf_dir + '/'; } else { // Use /app_home if /host_root is disabled argv[0] = "/app_home/" + resolved_path.substr(resolved_path.find_last_of(fs::delim) + 1); m_dir = "/app_home/"; } sys_log.notice("Elf path: %s", argv[0]); } if (!argv[0].starts_with("/dev_hdd0/game"sv) && m_cat == "HG"sv) { sys_log.error("Booting HG category outside of HDD0!"); } const auto _main = g_fxo->init<main_ppu_module>(); if (ppu_load_exec(ppu_exec, false, m_path, DeserialManager())) { if (g_cfg.core.ppu_debug && had_been_decrypted) { // Auto-dump decrypted binaries if PPU debug is enabled const auto exec_bin = elf_file.to_vector<u8>(); dump_executable({exec_bin.data(), exec_bin.size()}, _main, GetTitleID()); } } // Overlay (OVL) executable (only load it) else { GetCallbacks().on_ready(); g_fxo->init(false); if (!vm::map(0x3000'0000, 0x1000'0000, 0x200) || !ppu_load_overlay(ppu_exec, false, m_path).first) { ppu_exec.set_error(elf_error::header_type); } else { // Preserve emulation state for OVL executable Pause(true); } } if (ppu_exec != elf_error::ok) { Kill(false); sys_log.error("Invalid or unsupported PPU executable format: %s", elf_path); return game_boot_result::invalid_file_or_folder; } } else if (ppu_prx.open(elf_file) == elf_error::ok) { // PPU PRX GetCallbacks().on_ready(); g_fxo->init(false); ppu_load_prx(ppu_prx, false, m_path); Pause(true); } else if (spu_exec.open(elf_file) == elf_error::ok) { // SPU executable GetCallbacks().on_ready(); g_fxo->init(false); spu_load_exec(spu_exec); Pause(true); } else if (spu_rel.open(elf_file) == elf_error::ok) { // SPU linker file GetCallbacks().on_ready(); g_fxo->init(false); spu_load_rel_exec(spu_rel); Pause(true); } else if (ppu_rel.open(elf_file) == elf_error::ok) { // PPU linker file GetCallbacks().on_ready(); g_fxo->init(false); ppu_load_rel_exec(ppu_rel); Pause(true); } else { sys_log.error("Invalid or unsupported file format: %s", elf_path); sys_log.warning("** ppu_exec -> %s", ppu_exec.get_error()); sys_log.warning("** ppu_prx -> %s", ppu_prx.get_error()); sys_log.warning("** spu_exec -> %s", spu_exec.get_error()); sys_log.warning("** spu_rel -> %s", spu_rel.get_error()); sys_log.warning("** ppu_rel -> %s", ppu_rel.get_error()); Kill(false); return game_boot_result::invalid_file_or_folder; } if (ppu_exec == elf_error::ok && !fs::is_file(g_cfg_vfs.get_dev_flash() + "sys/external/liblv2.sprx")) { const auto& libs = g_cfg.core.libraries_control.get_set(); extern const std::map<std::string_view, int> g_prx_list; // Check if there are any firmware SPRX which may be LLEd during emulation // Don't prompt GUI confirmation if there aren't any if (std::any_of(g_prx_list.begin(), g_prx_list.end(), [&libs](auto& lib) { return libs.count(std::string(lib.first) + ":lle") || (!lib.second && !libs.count(std::string(lib.first) + ":hle")); })) { Kill(false); CallFromMainThread([this]() { GetCallbacks().on_missing_fw(); }); return game_boot_result::firmware_missing; } } const bool autostart = m_ar || (std::exchange(m_force_boot, false) || g_cfg.misc.autostart); if (IsReady()) { if (autostart) { Run(true); } } return game_boot_result::no_errors; } } void Emulator::Run(bool start_playtime) { ensure(IsReady() || GetStatus(false) == system_state::frozen); GetCallbacks().on_run(start_playtime); m_pause_start_time = 0; m_pause_amend_time = 0; m_tty_file_init_pos = g_tty ? g_tty.pos() : usz{umax}; rpcs3::utils::configure_logs(); m_state = system_state::starting; if (g_cfg.misc.prevent_display_sleep) { disable_display_sleep(); } } void Emulator::RunPPU() { ensure(IsStarting()); bool signalled_thread = false; // Run main thread idm::select<named_thread<ppu_thread>>([&](u32, named_thread<ppu_thread>& cpu) { if (std::exchange(cpu.stop_flag_removal_protection, false)) { return; } ensure(cpu.state.test_and_reset(cpu_flag::stop)); cpu.state.notify_one(); signalled_thread = true; }); if (!signalled_thread) { FixGuestTime(); FinalizeRunRequest(); } if (auto thr = g_fxo->try_get<named_thread<rsx::rsx_replay_thread>>()) { thr->state -= cpu_flag::stop; thr->state.notify_one(); } } void Emulator::FixGuestTime() { if (m_ar) { initialize_timebased_time(m_ar->pop<u64>()); g_cfg.savestate.state_inspection_mode.set(m_state_inspection_savestate); CallFromMainThread([this] { // Mark a known savestate location and the one we try to boot (in case we boot a moved/copied savestate) if (g_cfg.savestate.suspend_emu) { for (std::string old_path : std::initializer_list<std::string>{m_ar ? m_path_old : "", m_title_id.empty() ? "" : get_savestate_file(m_title_id, m_path_old, 0, 0)}) { if (old_path.empty()) { continue; } std::string new_path = old_path.substr(0, old_path.find_last_not_of(fs::delim) + 1); const usz insert_pos = new_path.find_last_of(fs::delim) + 1; const auto prefix = "used_"sv; if (new_path.compare(insert_pos, prefix.size(), prefix) != 0) { new_path.insert(insert_pos, prefix); if (fs::rename(old_path, new_path, true)) { sys_log.success("Savestate has been moved (hidden) to path='%s'", new_path); } } } } g_tls_log_prefix = []() { return std::string(); }; }); } else { initialize_timebased_time(0); } } void Emulator::FinalizeRunRequest() { const bool autostart = !m_ar || !!g_cfg.misc.autostart; bs_t<cpu_flag> add_flags = cpu_flag::dbg_global_pause; if (autostart) { add_flags -= cpu_flag::dbg_global_pause; } auto spu_select = [&](u32, spu_thread& spu) { bs_t<cpu_flag> sub_flags = cpu_flag::stop; if (spu.group && spu.index == spu.group->waiter_spu_index) { sub_flags -= cpu_flag::stop; } else if (std::exchange(spu.stop_flag_removal_protection, false)) { sub_flags -= cpu_flag::stop; } spu.add_remove_flags(add_flags, sub_flags); }; auto ppu_select = [&](u32, ppu_thread& ppu) { ppu.state += add_flags; }; if (auto rsx = g_fxo->try_get<rsx::thread>()) { static_cast<cpu_thread*>(rsx)->add_remove_flags(add_flags, cpu_flag::suspend); } if (m_savestate_extension_flags1 & SaveStateExtentionFlags1::ShouldCloseMenu) { g_fxo->get<SysutilMenuOpenStatus>().active = true; } idm::select<named_thread<spu_thread>>(spu_select); idm::select<named_thread<ppu_thread>>(ppu_select); lv2_obj::make_scheduler_ready(); m_state.compare_and_swap_test(system_state::starting, system_state::running); m_ar.reset(); if (!autostart) { Pause(); } if (m_savestate_extension_flags1 & SaveStateExtentionFlags1::ShouldCloseMenu) { std::thread([this, info = GetEmulationIdentifier()]() { thread_base::set_name("Close Home Menu"); std::this_thread::sleep_for(2s); CallFromMainThread([this]() { send_close_home_menu_cmds(); }, info); }).detach(); } } bool Emulator::Pause(bool freeze_emulation, bool show_resume_message) { const u64 start = get_system_time(); const system_state pause_state = freeze_emulation ? system_state::frozen : system_state::paused; // Try to pause const auto [old_state, done] = m_state.fetch_op([&](system_state& state) { if (state == system_state::running) { state = pause_state; return true; } if (!freeze_emulation) { return false; } if (state == system_state::ready || state == system_state::paused) { state = pause_state; return true; } return false; }); if (!done) { return false; } if (old_state == system_state::ready || old_state == system_state::paused) { // Perform the side effects of Resume here when transforming paused to frozen state BlockingCallFromMainThread([this]() { for (auto& ref : m_pause_msgs_refs) { // Delete the message queued on pause *ref = 0; } m_pause_msgs_refs.clear(); }); } // Signal profilers to print results (if enabled) cpu_thread::flush_profilers(); auto on_select = [](u32, cpu_thread& cpu) { cpu.state += cpu_flag::dbg_global_pause; }; idm::select<named_thread<ppu_thread>>(on_select); idm::select<named_thread<spu_thread>>(on_select); if (auto rsx = g_fxo->try_get<rsx::thread>()) { rsx->state += cpu_flag::dbg_global_pause; } GetCallbacks().on_pause(); BlockingCallFromMainThread([this, show_resume_message]() { const auto status = Emu.GetStatus(false); if (!show_resume_message || (status != system_state::paused && status != system_state::frozen)) { return; } auto msg_ref = std::make_shared<atomic_t<u32>>(1); // No timeout rsx::overlays::queue_message(status == system_state::paused ? localized_string_id::EMULATION_PAUSED_RESUME_WITH_START : localized_string_id::EMULATION_FROZEN, -1, msg_ref); m_pause_msgs_refs.emplace_back(msg_ref); auto refresh_l = [this, msg_ref, status]() { while (*msg_ref && GetStatus(false) == status) { // Refresh Native UI rsx::set_native_ui_flip(); thread_ctrl::wait_for(33'000); } msg_ref->release(0); }; struct thread_t { std::unique_ptr<named_thread<decltype(refresh_l)>> m_thread; }; g_fxo->get<thread_t>().m_thread.reset(); g_fxo->get<thread_t>().m_thread = std::make_unique<named_thread<decltype(refresh_l)>>("Pause Message Thread"sv, std::move(refresh_l)); }); static atomic_t<u32> pause_mark = 0; if (freeze_emulation) { sys_log.warning("Emulation has been frozen! You can either use debugger tools to inspect current emulation state or terminate it."); } else { sys_log.success("Emulation is being paused... (mark=%u)", pause_mark++); } // Update pause start time if (m_pause_start_time.exchange(start)) { sys_log.error("Emulator::Pause() error: concurrent access"); } // Always Enable display sleep, not only if it was prevented. enable_display_sleep(); return true; } void Emulator::Resume() { if (m_state != system_state::paused) { return; } // Print and reset debug data collected if (g_cfg.core.ppu_decoder == ppu_decoder_type::_static && g_cfg.core.ppu_debug) { PPUDisAsm dis_asm(cpu_disasm_mode::dump, vm::g_sudo_addr); std::string dump; for (u32 i = 0x10000; i < 0xE0000000;) { if (vm::check_addr(i, vm::page_executable)) { if (auto& data = *reinterpret_cast<be_t<u32>*>(vm::g_stat_addr + i)) { dis_asm.disasm(i); fmt::append(dump, "\n\t'%08X' %s", data, dis_asm.last_opcode); data = 0; } i += sizeof(u32); } else { i += 4096; } } if (!dump.empty()) { ppu_log.notice("[RESUME] Dumping instruction stats:%s", dump); } } // Try to resume if (!m_state.compare_and_swap_test(system_state::paused, system_state::running)) { return; } // Get pause start time const u64 time = m_pause_start_time.exchange(0); // Try to increment summary pause time if (time) { m_pause_amend_time += get_system_time() - time; } else { sys_log.error("Emulator::Resume() error: concurrent access"); } perf_stat_base::report(); auto on_select = [](u32, cpu_thread& cpu) { cpu.state -= cpu_flag::dbg_global_pause; cpu.state.notify_one(); }; idm::select<named_thread<ppu_thread>>(on_select); idm::select<named_thread<spu_thread>>(on_select); if (auto rsx = g_fxo->try_get<rsx::thread>()) { // TODO: notify? rsx->state -= cpu_flag::dbg_global_pause; } GetCallbacks().on_resume(); sys_log.success("Emulation has been resumed!"); BlockingCallFromMainThread([this]() { for (auto& ref : m_pause_msgs_refs) { // Delete the message queued on pause *ref = 0; } m_pause_msgs_refs.clear(); }); if (g_cfg.misc.prevent_display_sleep) { disable_display_sleep(); } } u64 get_sysutil_cb_manager_read_count(); void process_qt_events(); void Emulator::GracefulShutdown(bool allow_autoexit, bool async_op, bool savestate) { const auto old_state = m_state.load(); if (old_state == system_state::stopped || old_state == system_state::stopping) { while (!async_op && m_state != system_state::stopped) { process_qt_events(); std::this_thread::sleep_for(16ms); } return; } if (!savestate && m_emu_state_close_pending) { while (!async_op && m_state != system_state::stopped) { process_qt_events(); std::this_thread::sleep_for(16ms); } return; } if (old_state == system_state::paused) { Resume(); } const u64 read_counter = get_sysutil_cb_manager_read_count(); if (old_state == system_state::frozen || savestate || !sysutil_send_system_cmd(0x0101 /* CELL_SYSUTIL_REQUEST_EXITGAME */, 0)) { // The callback has been rudely ignored, we have no other option but to force termination Kill(allow_autoexit && !savestate, savestate); while (!async_op && m_state != system_state::stopped) { process_qt_events(); std::this_thread::sleep_for(16ms); } return; } auto perform_kill = [read_counter, allow_autoexit, this, info = GetEmulationIdentifier()]() { bool read_sysutil_signal = false; for (u32 i = 100; i < 140; i++) { std::this_thread::sleep_for(50ms); // TODO: Prevent pausing by other threads while in this loop CallFromMainThread([this]() { Resume(); }, nullptr, true, read_counter); process_qt_events(); // Is nullified when performed on non-main thread if (!read_sysutil_signal && read_counter != get_sysutil_cb_manager_read_count()) { i -= 100; // Grant 5 seconds (if signal is not read force kill after two second) read_sysutil_signal = true; } if (static_cast<u64>(info) != m_stop_ctr) { return; } } // An inevitable attempt to terminate the *current* emulation course will be issued after 7s CallFromMainThread([allow_autoexit, this]() { Kill(allow_autoexit); }, info); }; if (async_op) { std::thread{perform_kill}.detach(); } else { perform_kill(); while (m_state != system_state::stopped) { process_qt_events(); std::this_thread::sleep_for(16ms); } } } extern bool check_if_vdec_contexts_exist(); extern bool try_lock_spu_threads_in_a_state_compatible_with_savestates(bool revert_lock = false, std::vector<std::pair<std::shared_ptr<named_thread<spu_thread>>, u32>>* out_list = nullptr); void Emulator::Kill(bool allow_autoexit, bool savestate, savestate_stage* save_stage) { static const auto make_ptr = [](auto ptr) { return std::shared_ptr<std::remove_pointer_t<decltype(ptr)>>(ptr); }; if (!IsStopped() && savestate) { if (!save_stage || !save_stage->prepared) { if (m_emu_state_close_pending.exchange(true)) { return; } std::shared_ptr<std::shared_ptr<void>> pause_thread = std::make_shared<std::shared_ptr<void>>(); *pause_thread = make_ptr(new named_thread("Savestate Prepare Thread"sv, [pause_thread, allow_autoexit, this]() mutable { std::vector<std::pair<std::shared_ptr<named_thread<spu_thread>>, u32>> paused_spus; if (!try_lock_spu_threads_in_a_state_compatible_with_savestates(false, &paused_spus)) { sys_log.error("Failed to savestate: failed to lock SPU threads execution."); if (!g_cfg.savestate.compatible_mode) { rsx::overlays::queue_message(localized_string_id::SAVESTATE_FAILED_DUE_TO_MISSING_SPU_SETTING); sys_log.error("Enabling SPU Savestates-Compatible Mode in Advanced tab may fix this."); } else { rsx::overlays::queue_message(localized_string_id::SAVESTATE_FAILED_DUE_TO_SPU); } m_emu_state_close_pending = false; CallFromMainThread([pause = std::move(pause_thread)]() { // Join thread }, nullptr, false); return; } bool savedata_error = false; bool vdec_error = false; if (!g_fxo->get<hle_locks_t>().try_finalize([&]() { // List of conditions required for emulation to save properly vdec_error = check_if_vdec_contexts_exist(); return !vdec_error; })) { // Unlock SPUs try_lock_spu_threads_in_a_state_compatible_with_savestates(true); savedata_error = !vdec_error; // For now it is implied a savedata error if (vdec_error) { rsx::overlays::queue_message(localized_string_id::SAVESTATE_FAILED_DUE_TO_VDEC); sys_log.error("Failed to savestate: HLE VDEC (video decoder) context(s) exist." "\nLLE libvdec.sprx by selecting it in Advanced tab -> Firmware Libraries." "\nYou need to close the game for it to take effect." "\nIf you cannot close the game due to losing important progress, your best chance is to skip the current cutscenes if any are played and retry."); } if (savedata_error) { rsx::overlays::queue_message(localized_string_id::SAVESTATE_FAILED_DUE_TO_SAVEDATA); sys_log.error("Failed to savestate: Savedata operation is active." "\nYour best chance is to wait for the current game saving operation to finish and retry." "\nThe game is probably displaying a saving cicrle or other gesture to indicate that it is saving."); } m_emu_state_close_pending = false; CallFromMainThread([pause = std::move(pause_thread)]() { // Join thread }, nullptr, false); return; } CallFromMainThread([allow_autoexit, this, paused_spus]() { savestate_stage stage{}; stage.prepared = true; stage.paused_spus = paused_spus; Kill(allow_autoexit, true, &stage); }); CallFromMainThread([pause = std::move(pause_thread)]() { // Join thread }, nullptr, false); })); return; } } g_tls_log_prefix = []() { return std::string(); }; if (save_stage && save_stage->prepared) { // } else if (m_emu_state_close_pending.exchange(true)) { return; } if (system_state old_state = m_state.fetch_op([](system_state& state) { if (state == system_state::stopping || state == system_state::stopped) { return false; } state = system_state::stopping; return true; }).first; old_state <= system_state::stopping) { if (old_state == system_state::stopping) { // Termination is in progress return; } // Ensure clean state m_ar.reset(); argv.clear(); envp.clear(); data.clear(); disc.clear(); klic.clear(); hdd1.clear(); init_mem_containers = nullptr; after_kill_callback = nullptr; m_config_path.clear(); m_config_mode = cfg_mode::custom; read_used_savestate_versions(); m_savestate_extension_flags1 = {}; m_emu_state_close_pending = false; // Enable logging rpcs3::utils::configure_logs(true); return; } // Enable logging rpcs3::utils::configure_logs(true); sys_log.notice("Stopping emulator..."); { // Show visual feedback to the user in case that stopping takes a while. // This needs to be done before actually stopping, because otherwise the necessary threads will be terminated before we can show an image. if (auto progress_dialog = g_fxo->try_get<named_thread<progress_dialog_server>>(); progress_dialog && g_progr_text.load()) { // We are currently showing a progress dialog. Notify it that we are going to stop emulation. g_system_progress_stopping = true; std::this_thread::sleep_for(20ms); // Enough for one frame to be rendered } } // Signal threads if (auto rsx = g_fxo->try_get<rsx::thread>()) { *static_cast<cpu_thread*>(rsx) = thread_state::aborting; } for (const auto& [type, data] : *g_fxo) { if (type.thread_op) { type.thread_op(data, thread_state::aborting); } } sys_log.notice("All emulation threads have been signaled."); // Wait fot newly created cpu_thread to see that emulation has been stopped id_manager::g_mutex.lock_unlock(); // Type-less smart pointer container for thread (cannot know its type with this approach) // There is no race condition because it is only accessed by the same thread std::shared_ptr<std::shared_ptr<void>> join_thread = std::make_shared<std::shared_ptr<void>>(); *join_thread = make_ptr(new named_thread("Emulation Join Thread"sv, [join_thread, savestate, allow_autoexit, save_stage = save_stage ? *save_stage : savestate_stage{}, this]() mutable { fs::pending_file file; auto verbose_message = std::make_shared<atomic_ptr<std::string>>(); auto init_mtx = std::make_shared<stx::init_mutex>(); auto join_ended = std::make_shared<bool>(false); auto to_ar = std::make_shared<atomic_ptr<utils::serial>>(); auto stop_watchdog = make_ptr(new named_thread("Stop Watchdog"sv, [to_ar, init_mtx, join_ended, verbose_message, this]() { const auto closed_sucessfully = std::make_shared<atomic_t<bool>>(false); bool is_being_held_longer = false; for (int i = 0; !*join_ended && thread_ctrl::state() != thread_state::aborting;) { if (g_watchdog_hold_ctr) { is_being_held_longer = true; } // We don't need accurate timekeeping, using clocks may interfere with debugging if (i >= (is_being_held_longer ? 5000 : 2000)) { // Total amount of waiting: about 10s GetCallbacks().on_emulation_stop_no_response(closed_sucessfully, is_being_held_longer ? 25 : 10); while (thread_ctrl::state() != thread_state::aborting) { thread_ctrl::wait_for(5'000); } break; } thread_ctrl::wait_for(5'000); } while (thread_ctrl::state() != thread_state::aborting) { if (auto ar_ptr = to_ar->load()) { // Total amount of waiting: about 10s GetCallbacks().on_save_state_progress(closed_sucessfully, ar_ptr, verbose_message.get(), init_mtx); while (thread_ctrl::state() != thread_state::aborting) { thread_ctrl::wait_for(5'000); } break; } thread_ctrl::wait_for(5'000); } *closed_sucessfully = true; })); // Join threads for (const auto& [type, data] : *g_fxo) { if (type.thread_op) { type.thread_op(data, thread_state::finished); } } for (const auto& spu : save_stage.paused_spus) { if (spu.first->pc != spu.second || spu.first->unsavable) { std::string dump; spu.first->dump_all(dump); sys_log.error("SPU thread continued after being paused. (old_pc=0x%x, pc=0x%x, unsavable=%d)", spu.second, spu.first->pc, spu.first->unsavable); spu_log.notice("SPU thread context:\n%s", dump); } } // Save it first for maximum timing accuracy const u64 timestamp = get_timebased_time(); const u64 start_time = get_system_time(); sys_log.notice("All threads have been stopped."); std::string path; static_cast<void>(init_mtx->init()); auto set_progress_message = [&](std::string_view text) { *verbose_message = stx::make_single<std::string>(text); }; while (savestate) { set_progress_message("Creating File"); path = get_savestate_file(m_title_id, m_path, 0, 0); // The function is meant for reading files, so if there is no ZST file it would not return compressed file path // So this is the only place where the result is edited if need to be constexpr std::string_view save = ".SAVESTAT"; path.resize(path.rfind(save) + save.size()); path += ".zst"; if (!fs::create_path(fs::get_parent_dir(path))) { sys_log.error("Failed to create savestate directory! (path='%s', %s)", fs::get_parent_dir(path), fs::g_tls_error); savestate = false; break; } if (!file.open(path)) { sys_log.error("Failed to create savestate temporary file! (path='%s', %s)", file.get_temp_path(), fs::g_tls_error); savestate = false; break; } auto serial_ptr = stx::make_single<utils::serial>(); serial_ptr->m_file_handler = make_compressed_zstd_serialization_file_handler(file.file); *to_ar = std::move(serial_ptr); signal_system_cache_can_stay(); break; } *join_ended = true; if (savestate) { // Savestate thread named_thread emu_state_cap_thread("Emu State Capture Thread", [&]() { g_tls_log_prefix = []() { return fmt::format("Emu State Capture Thread: '%s'", g_tls_serialize_name); }; auto& ar = *to_ar->load(); read_used_savestate_versions(); // Reset version data USING_SERIALIZATION_VERSION(global_version); // Avoid duplicating TAR object memory because it can be very large auto save_tar = [&](const std::string& path) { if (!fs::is_dir(path)) { ar(usz{}); return; } // Cached file list from the first call std::vector<fs::dir_entry> dir_entries; // Calculate memory requirements utils::serial ar_null; ar_null.m_file_handler = make_null_serialization_file_handler(); tar_object::save_directory(path, ar_null, {}, std::move(dir_entries), false); ar(ar_null.pos); ar.breathe(); const usz old_pos = ar.seek_end(); tar_object::save_directory(path, ar, {}, std::move(dir_entries), true); const usz new_pos = ar.seek_end(); const usz tar_size = new_pos - old_pos; if (tar_size % 512 || tar_size != ar_null.pos) { fmt::throw_exception("Unexpected TAR entry size (size=0x%x, expected=0x%x, entries=0x%x)", tar_size, ar_null.pos, dir_entries.size()); } sys_log.success("Saved the contents of directory '%s' (size=0x%x)", path, tar_size); }; auto save_hdd1 = [&]() { const std::string _path = vfs::get("/dev_hdd1"); std::string_view path = _path; path = path.substr(0, path.find_last_not_of(fs::delim) + 1); ar(std::string(path.substr(path.find_last_of(fs::delim) + 1))); if (!_path.empty()) { save_tar(_path); } }; auto save_hdd0 = [&]() { if (g_cfg.savestate.save_disc_game_data) { const std::string path = vfs::get("/dev_hdd0/game/"); for (auto& entry : fs::dir(path)) { if (entry.is_directory && entry.name != "." && entry.name != "..") { if (auto res = psf::load(path + entry.name + "/PARAM.SFO"); res && /*!m_title_id.empty() &&*/ psf::get_string(res.sfo, "TITLE_ID") == m_title_id && psf::get_string(res.sfo, "CATEGORY") == "GD") { ar(entry.name); save_tar(path + entry.name); } } } } ar(std::string{}); }; set_progress_message("Creating Header"); ar("RPCS3SAV"_u64); ar(std::endian::native == std::endian::little); ar(g_cfg.savestate.state_inspection_mode.get()); ar(usz{10 + sizeof(usz) + sizeof(u8)}); // Offset of versioning data (fixed to the following data) { // Gather versions because with compressed format going back and patching offset is not efficient utils::serial ar_temp; ar_temp.m_file_handler = make_null_serialization_file_handler(); g_fxo->save(ar_temp); ar(u8{1}); ar(read_used_savestate_versions()); } ar(u8{1}); ar(rpcs3::get_verbose_version()); ar(fmt::format("%s", std::chrono::system_clock::now())); ar(GetTitleAndTitleID()); ar(std::string{}); // Possible user note ar(std::array<u8, 32>{}); // Reserved for future use if (auto dir = vfs::get("/dev_bdvd/PS3_GAME"); fs::is_dir(dir) && !fs::is_file(fs::get_parent_dir(dir) + "/PS3_DISC.SFB")) { // Fake /dev_bdvd/PS3_GAME detected, use HDD0 for m_path restoration ensure(vfs::unmount("/dev_bdvd/PS3_GAME")); ar(vfs::retrieve(m_path)); ar(vfs::retrieve(disc)); ensure(vfs::mount("/dev_bdvd/PS3_GAME", dir)); } else { ar(vfs::retrieve(m_path)); ar(!m_title_id.empty() && !vfs::get("/dev_bdvd").empty() ? m_title_id : vfs::retrieve(disc)); } ar(klic.empty() ? std::array<u8, 16>{} : std::bit_cast<std::array<u8, 16>>(klic[0])); ar(m_game_dir); set_progress_message("Saving HDD1"); save_hdd1(); set_progress_message("Saving HDD0"); save_hdd0(); ar(std::array<u8, 32>{}); // Reserved for future use set_progress_message("Saving VMemory"); vm::save(ar); set_progress_message("Saving FXO"); g_fxo->save(ar); set_progress_message("Finalizing File"); bs_t<SaveStateExtentionFlags1> extension_flags{SaveStateExtentionFlags1::SupportsMenuOpenResume}; if (g_fxo->get<SysutilMenuOpenStatus>().active) { extension_flags += SaveStateExtentionFlags1::ShouldCloseMenu; } ar(extension_flags); ar(std::array<u8, 32>{}); // Reserved for future use ar(timestamp); // Final file write, the file is ready to be committed ar.seek_end(); ar.m_file_handler->finalize(ar); }); // Join it emu_state_cap_thread(); if (emu_state_cap_thread == thread_state::errored) { sys_log.error("Saving savestate failed due to fatal error!"); to_ar->reset(); savestate = false; } } if (savestate) { fs::stat_t file_stat{}; set_progress_message("Commiting File"); { auto& ar = *to_ar->load(); auto reset = init_mtx->reset(); ar = {}; ar.set_reading_state(); // Guard against using it reset.set_init(); } if (!file.commit() || !fs::get_stat(path, file_stat)) { sys_log.error("Failed to write savestate to file! (path='%s', %s)", path, fs::g_tls_error); savestate = false; } else { std::string old_path = path.substr(0, path.find_last_not_of(fs::delim)); std::string old_path2 = old_path; old_path2.insert(old_path.find_last_of(fs::delim) + 1, "old-"sv); old_path.insert(old_path.find_last_of(fs::delim) + 1, "used_"sv); if (fs::remove_file(old_path)) { sys_log.success("Old savestate has been removed: path='%s'", old_path); } // For backwards compatibility - avoid having loose files if (fs::remove_file(old_path2)) { sys_log.success("Old savestate has been removed: path='%s'", old_path2); } sys_log.success("Saved savestate! path='%s' (file_size=0x%x, time_to_save=%gs)", path, file_stat.size, (get_system_time() - start_time) / 1000000.); if (!g_cfg.savestate.suspend_emu) { // Allow to reboot from GUI m_path = path; } } } // Log additional debug information - do not do it on the main thread due to the concern of halting UI events if (g_tty && sys_log.notice) { // Write merged TTY output after emulation has been safely stopped if (usz attempted_read_size = utils::sub_saturate<usz>(g_tty.pos(), m_tty_file_init_pos)) { if (fs::file tty_read_fd{fs::get_cache_dir() + "TTY.log"}) { // Enforce an arbitrary limit for now to avoid OOM in case the guest code has bombarded TTY // 3MB, this should be enough constexpr usz c_max_tty_spill_size = 0x30'0000; std::string tty_buffer(std::min<usz>(attempted_read_size, c_max_tty_spill_size), '\0'); tty_buffer.resize(tty_read_fd.read_at(m_tty_file_init_pos, tty_buffer.data(), tty_buffer.size())); tty_read_fd.close(); if (!tty_buffer.empty()) { // Mark start and end very clearly with RPCS3 put in it sys_log.notice("\nAccumulated RPCS3 TTY:\n\n\n%s\n\n\nEnd RPCS3 TTY Section.\n", tty_buffer); } } } } if (g_cfg.core.spu_debug && sys_log.notice) { const std::string cache_path = rpcs3::cache::get_ppu_cache(); if (fs::file spu_log{cache_path + "/spu.log"}) { // 96MB limit, this may be a lot but this only has an effect when enabling the debug option constexpr usz c_max_spu_log_spill_size = 0x600'0000; const usz total_size = spu_log.size(); std::string log_buffer(std::min<usz>(spu_log.size(), c_max_spu_log_spill_size), '\0'); log_buffer.resize(spu_log.read(log_buffer.data(), log_buffer.size())); spu_log.close(); if (!log_buffer.empty()) { usz to_remove = 0; usz part_ctr = 1; for (std::string_view not_logged = log_buffer; !not_logged.empty(); part_ctr++, not_logged.remove_prefix(to_remove)) { std::string_view to_log = not_logged; to_log = to_log.substr(0, 0x8000); to_log = to_log.substr(0, utils::add_saturate<usz>(to_log.rfind("\n========== SPU BLOCK"sv), 1)); to_remove = to_log.size(); std::string new_log(to_log); for (usz iter = 0, out_added = 0; iter < to_log.size();) { const usz index = to_log.find(") ==========", iter); if (index == umax) { break; } const std::string_view until = to_log.substr(0, index); const usz seperator = until.rfind(", "); if (seperator == umax) { iter = index + 1; continue; } const std::string_view prog_hash = until.substr(seperator + 2); if (prog_hash.empty()) { iter = index + 1; continue; } const fmt::base57_result result = fmt::base57_result::from_string(prog_hash); if (result.size < sizeof(be_t<u64>)) { iter = index + 1; continue; } const u64 hash_val = read_from_ptr<be_t<u64>>(result.data) & -65536; const f64 usage = get_cpu_program_usage_percent(hash_val); if (usage == 0) { iter = index + 1; continue; } const std::string text_append = fmt::format("usage %%%g, ", usage); new_log.insert(new_log.begin() + seperator + out_added + 2, text_append.begin(), text_append.end()); out_added += text_append.size(); iter = index + 1; } // Cannot log it all at once due to technical reasons, split it to 8MB at maximum of whole functions // Assume the block prefix exists because it is created by RPCS3 (or log it in an ugly manner if it does not exist) sys_log.notice("Logging spu.log #%u:\n\n%s\n", part_ctr, new_log); } sys_log.notice("End spu.log (%u bytes)", total_size); } } } set_progress_message("Resetting Objects"); // Final termination from main thread (move the last ownership of join thread in order to destroy it) CallFromMainThread([join_thread = std::move(join_thread), verbose_message, stop_watchdog, init_mtx, allow_autoexit, this]() { cpu_thread::cleanup(); lv2_obj::cleanup(); g_fxo->reset(); sys_log.notice("Objects cleared..."); vm::close(); *stop_watchdog = thread_state::finished; static_cast<void>(init_mtx->reset()); jit_runtime::finalize(); perf_stat_base::report(); static u64 aw_refs = 0; static u64 aw_colm = 0; static u64 aw_colc = 0; static u64 aw_used = 0; aw_refs = 0; aw_colm = 0; aw_colc = 0; aw_used = 0; atomic_wait::parse_hashtable([](u64 /*id*/, u32 refs, u64 ptr, u32 maxc) -> bool { aw_refs += refs != 0; aw_used += ptr != 0; aw_colm = std::max<u64>(aw_colm, maxc); aw_colc += maxc != 0; return false; }); sys_log.notice("Atomic wait hashtable stats: [in_use=%u, used=%u, max_collision_weight=%u, total_collisions=%u]", aw_refs, aw_used, aw_colm, aw_colc); m_stop_ctr++; m_stop_ctr.notify_all(); // Boot arg cleanup (preserved in the case restarting) argv.clear(); envp.clear(); data.clear(); disc.clear(); klic.clear(); hdd1.clear(); init_mem_containers = nullptr; m_config_path.clear(); m_config_mode = cfg_mode::custom; m_ar.reset(); read_used_savestate_versions(); m_savestate_extension_flags1 = {}; m_emu_state_close_pending = false; initialize_timebased_time(0, true); // Complete the operation m_state = system_state::stopped; GetCallbacks().on_stop(); // Always Enable display sleep, not only if it was prevented. enable_display_sleep(); if (allow_autoexit) { Quit(g_cfg.misc.autoexit.get()); } if (after_kill_callback) { // Make after_kill_callback empty before call const auto callback = std::move(after_kill_callback); callback(); } }); })); } game_boot_result Emulator::Restart(bool graceful) { if (m_state == system_state::stopping) { // Emulation stop is in progress return game_boot_result::still_running; } Emu.after_kill_callback = [this] { // Reload with prior configs. if (const auto error = Load(m_title_id); error != game_boot_result::no_errors) { sys_log.error("Restart failed: %s", error); } }; if (!IsStopped()) { auto save_args = std::make_tuple(argv, envp, data, disc, klic, hdd1, m_config_mode, m_config_path); if (graceful) GracefulShutdown(false, false); else Kill(false); std::tie(argv, envp, data, disc, klic, hdd1, m_config_mode, m_config_path) = std::move(save_args); } else { // Execute and empty the callback ::as_rvalue(std::move(Emu.after_kill_callback))(); } return game_boot_result::no_errors; } bool Emulator::Quit(bool force_quit) { m_force_boot = false; // The callback is only used if we actually quit RPCS3 const auto on_exit = []() { Emu.CleanUp(); }; if (GetCallbacks().try_to_quit) { return GetCallbacks().try_to_quit(force_quit, on_exit); } on_exit(); return true; } void Emulator::CleanUp() { // Deinitialize object manager to prevent any hanging objects at program exit g_fxo->clear(); } std::string Emulator::GetFormattedTitle(double fps) const { rpcs3::title_format_data title_data; title_data.format = g_cfg.misc.title_format.to_string(); title_data.title = GetTitle(); title_data.title_id = GetTitleID(); title_data.renderer = g_cfg.video.renderer.to_string(); title_data.vulkan_adapter = g_cfg.video.vk.adapter.to_string(); title_data.fps = fps; return rpcs3::get_formatted_title(title_data); } s32 error_code::error_report(s32 result, const logs::message* channel, const char* fmt, const fmt_type_info* sup, const u64* args) { static thread_local std::string g_tls_error_str; static thread_local std::unordered_map<std::string, usz> g_tls_error_stats; if (!channel) { channel = &sys_log.error; } if (!sup && !args) { if (!fmt) { // Report and clean error state for (auto&& pair : g_tls_error_stats) { if (pair.second > 3) { channel->operator()("Stat: %s [x%u]", pair.first, pair.second); } } g_tls_error_stats.clear(); return 0; } } ensure(fmt); const char* func = "Unknown function"; if (auto ppu = get_current_cpu_thread<ppu_thread>()) { if (auto current = ppu->current_function) { func = current; } } else if (auto spu = get_current_cpu_thread<spu_thread>()) { if (auto current = spu->current_func; current && spu->start_time) { func = current; } } // Format log message (use preallocated buffer) g_tls_error_str.clear(); fmt::append(g_tls_error_str, "'%s' failed with 0x%08x", func, result); // Add spacer between error and fmt if necessary if (fmt[0] != ' ') g_tls_error_str += " : "; fmt::raw_append(g_tls_error_str, fmt, sup, args); // Update stats and check log threshold if (g_log_all_errors) [[unlikely]] { if (!g_tls_error_stats.empty()) { // Report and clean error state error_report(0, nullptr, nullptr, nullptr, nullptr); } channel->operator()("%s", g_tls_error_str); } else { const auto stat = ++g_tls_error_stats[g_tls_error_str]; if (stat <= 3) { channel->operator()("%s [%u]", g_tls_error_str, stat); } } return result; } void Emulator::ConfigurePPUCache() const { auto& _main = g_fxo->get<main_ppu_module>(); _main.cache = rpcs3::utils::get_cache_dir(_main.path); fmt::append(_main.cache, "ppu-%s-%s/", fmt::base57(_main.sha1), _main.path.substr(_main.path.find_last_of('/') + 1)); if (!fs::create_path(_main.cache)) { sys_log.error("Failed to create cache directory: %s (%s)", _main.cache, fs::g_tls_error); } else { sys_log.notice("Cache: %s", _main.cache); } } std::set<std::string> Emulator::GetGameDirs() const { std::set<std::string> dirs; // Add boot directory. // For installed titles and disc titles with updates this is usually /dev_hdd0/game/<title_id>/ // For disc titles without updates this is /dev_bdvd/PS3_GAME/ if (const std::string dir = vfs::get(GetDir()); !dir.empty()) { dirs.insert(dir + '/'); } // Add more paths for disc titles. if (const std::string dev_bdvd = vfs::get("/dev_bdvd/PS3_GAME"); !dev_bdvd.empty() && !GetTitleID().empty()) { // Add the dev_bdvd dir if available. This is necessary for disc titles with installed updates. dirs.insert(dev_bdvd + '/'); // Naive search for all matching game data dirs. const std::string game_dir = vfs::get("/dev_hdd0/game/"); for (auto&& entry : fs::dir(game_dir)) { if (entry.is_directory && entry.name.starts_with(GetTitleID())) { const std::string sfo_dir = game_dir + entry.name + '/'; const std::string sfo_path = sfo_dir + "PARAM.SFO"; const fs::file sfo_file(sfo_path); if (!sfo_file) { continue; } const psf::registry psf = psf::load_object(sfo_file, sfo_path); const std::string title_id = std::string(psf::get_string(psf, "TITLE_ID", "")); if (title_id == GetTitleID()) { dirs.insert(sfo_dir); } } } } return dirs; } u32 Emulator::AddGamesFromDir(const std::string& path) { u32 games_added = 0; m_games_config.set_save_on_dirty(false); // search dropped path first or else the direct parent to an elf is wrongly skipped if (const game_boot_result error = AddGame(path); error == game_boot_result::no_errors) { games_added++; } process_qt_events(); // search direct subdirectories, that way we can drop one folder containing all games for (auto&& dir_entry : fs::dir(path)) { if (!dir_entry.is_directory || dir_entry.name == "." || dir_entry.name == "..") { continue; } const std::string dir_path = path + '/' + dir_entry.name; if (const game_boot_result error = AddGame(dir_path); error == game_boot_result::no_errors) { games_added++; } process_qt_events(); } m_games_config.set_save_on_dirty(true); if (m_games_config.is_dirty() && !m_games_config.save()) { sys_log.error("Failed to save games.yml after adding games"); } return games_added; } game_boot_result Emulator::AddGame(const std::string& path) { // Handle files directly if (!fs::is_dir(path)) { return AddGameToYml(path); } game_boot_result result = game_boot_result::nothing_to_boot; bool result_set = false; std::string elf; if (const game_boot_result res = GetElfPathFromDir(elf, path); res == game_boot_result::no_errors) { ensure(!elf.empty()); result = AddGameToYml(elf); result_set = true; } for (auto&& entry : fs::dir{ path }) { if (entry.name == "." || entry.name == "..") { continue; } if (entry.is_directory && std::regex_match(entry.name, std::regex("^PS3_GM[[:digit:]]{2}$"))) { const std::string elf = path + "/" + entry.name + "/USRDIR/EBOOT.BIN"; if (fs::is_file(elf)) { if (const auto err = AddGameToYml(elf); err != game_boot_result::no_errors) { if (err != game_boot_result::already_added || !result_set) { result = err; result_set = true; } } } } } return result; } game_boot_result Emulator::AddGameToYml(const std::string& path) { // Detect boot location const auto is_invalid_path = [this](std::string_view path, std::string_view dir) -> game_boot_result { if (IsPathInsideDir(path, dir)) { sys_log.error("Adding games from dev_flash is not allowed."); return game_boot_result::invalid_file_or_folder; } return VerifyPathCasing(path, dir, false); }; if (game_boot_result error = is_invalid_path(path, rpcs3::utils::get_hdd0_dir()); error != game_boot_result::no_errors) { sys_log.error("Adding games from dev_hdd0 is not allowed."); return error; } if (game_boot_result error = is_invalid_path(path, g_cfg_vfs.get_dev_flash()); error != game_boot_result::no_errors) { sys_log.error("Adding games from dev_flash is not allowed."); return error; } // Load PARAM.SFO const std::string elf_dir = fs::get_parent_dir(path); std::string sfo_dir = rpcs3::utils::get_sfo_dir_from_game_path(fs::get_parent_dir(elf_dir)); const psf::registry _psf = psf::load_object(sfo_dir + "/PARAM.SFO"); const std::string title_id = std::string(psf::get_string(_psf, "TITLE_ID")); const std::string cat = std::string(psf::get_string(_psf, "CATEGORY")); if (!_psf.empty() && cat.empty()) { sys_log.fatal("Corrupted PARAM.SFO found! Try reinstalling the game."); return game_boot_result::invalid_file_or_folder; } if (title_id.empty()) { sys_log.notice("Can not add binary without TITLE_ID to games.yml. (path=%s, category=%s)", path, cat); return game_boot_result::invalid_file_or_folder; } if (cat == "GD") { sys_log.notice("Can not add game data to games.yml. (path=%s, title_id=%s, category=%s)", path, title_id, cat); return game_boot_result::invalid_file_or_folder; } // Set bdvd_dir std::string bdvd_dir; std::string game_dir; std::string sfb_dir; GetBdvdDir(bdvd_dir, sfb_dir, game_dir, elf_dir); // Check /dev_bdvd/ if (bdvd_dir.empty()) { // Add HG games not in HDD0 to games.yml if (cat == "HG") { switch (m_games_config.add_external_hdd_game(title_id, sfo_dir)) { case games_config::result::failure: return game_boot_result::generic_error; case games_config::result::success: return game_boot_result::no_errors; case games_config::result::exists: return game_boot_result::already_added; } return game_boot_result::generic_error; } } else if (fs::is_dir(bdvd_dir)) { if (const std::string sfb_path = bdvd_dir + "/PS3_DISC.SFB"; !IsValidSfb(sfb_path)) { sys_log.error("Invalid disc directory for the disc game %s. (%s)", title_id, sfb_path); return game_boot_result::invalid_file_or_folder; } // Store /dev_bdvd/ location switch (m_games_config.add_game(title_id, bdvd_dir)) { case games_config::result::failure: { sys_log.error("Failed to save BDVD location of title '%s' (error=%s)", title_id, fs::g_tls_error); return game_boot_result::generic_error; } case games_config::result::success: { sys_log.notice("Registered BDVD game directory for title '%s': %s", title_id, bdvd_dir); return game_boot_result::no_errors; } case games_config::result::exists: { return game_boot_result::already_added; } } return game_boot_result::generic_error; } sys_log.notice("Nothing to add in path %s (title_id=%s, category=%s)", path, title_id, cat); return game_boot_result::invalid_file_or_folder; } u32 Emulator::RemoveGames(const std::vector<std::string>& title_id_list, bool save_on_disk) { if (title_id_list.empty()) { return 0; } u32 games_removed = 0; m_games_config.set_save_on_dirty(false); for (const std::string& title_id : title_id_list) { if (RemoveGameFromYml(title_id) == game_boot_result::no_errors) { games_removed++; } } m_games_config.set_save_on_dirty(true); if (save_on_disk && m_games_config.is_dirty() && !m_games_config.save()) { sys_log.error("Failed to save games.yml after removing games"); } return games_removed; } game_boot_result Emulator::RemoveGameFromYml(const std::string& title_id) { // Remove title from games.yml switch (m_games_config.remove_game(title_id)) { case games_config::result::failure: { sys_log.error("Failed to remove title '%s' (error=%s)", title_id, fs::g_tls_error); return game_boot_result::generic_error; } case games_config::result::success: case games_config::result::exists: // not applicable for m_games_config.remove_game(). Added just to avoid compilation warnings! { sys_log.notice("Removed title '%s'", title_id); return game_boot_result::no_errors; } } return game_boot_result::generic_error; } bool Emulator::IsPathInsideDir(std::string_view path, std::string_view dir) const { const std::string dir_path = GetCallbacks().resolve_path(dir); const std::string resolved_path = GetCallbacks().resolve_path(path); return !dir_path.empty() && !resolved_path.empty() && (resolved_path + '/').starts_with((dir_path.back() == '/') ? dir_path : (dir_path + '/')); } game_boot_result Emulator::VerifyPathCasing( [[maybe_unused]] std::string_view path, [[maybe_unused]] std::string_view dir, [[maybe_unused]] bool from_dir) const { #ifdef _WIN32 // path might be passed from command line with differences in uppercase/lowercase on windows. if (!from_dir && IsPathInsideDir(fmt::to_lower(path), fmt::to_lower(dir))) { // Let's just abort to prevent errors down the line. sys_log.error("The path seems to contain incorrectly cased characters. Please adjust the path and try again."); return game_boot_result::invalid_file_or_folder; } #endif return game_boot_result::no_errors; } const std::string& Emulator::GetFakeCat() const { if (m_cat == "DG") { const std::string mount_point = vfs::get("/dev_bdvd"); if (mount_point.empty() || !IsPathInsideDir(m_path, mount_point)) { static const std::string s_hg = "HG"; return s_hg; } } return m_cat; } const std::string Emulator::GetSfoDir(bool prefer_disc_sfo) const { if (prefer_disc_sfo) { const std::string sfo_dir = vfs::get("/dev_bdvd/PS3_GAME"); if (!sfo_dir.empty()) { return sfo_dir; } } return m_sfo_dir; } void Emulator::GetBdvdDir(std::string& bdvd_dir, std::string& sfb_dir, std::string& game_dir, const std::string& elf_dir) { // Find disc directory by searching a valid PS3_DISC.SFB closest to root directory std::string main_dir; std::string_view main_dir_name; std::string parent_dir; for (std::string search_dir = elf_dir.substr(0, elf_dir.find_last_not_of(fs::delim) + 1);; search_dir = std::move(parent_dir)) { parent_dir = fs::get_parent_dir(search_dir); if (parent_dir.size() == search_dir.size()) { // Keep looking until root directory is reached break; } std::string_view dir_name = std::string_view{search_dir}.substr(search_dir.find_last_of(fs::delim) + 1); if (dir_name.size() != ("PS3_GAME"sv).size()) { continue; } if (dir_name == "PS3_GAME"sv || std::regex_match(dir_name.begin(), dir_name.end(), std::regex("^PS3_GM[[:digit:]]{2}$"))) { if (IsValidSfb(parent_dir + "/PS3_DISC.SFB")) { // Remember valid disc directory main_dir_name = {}; // Remove old string reference main_dir = search_dir; sfb_dir = parent_dir; main_dir_name = std::string_view{main_dir}.substr(main_dir.find_last_of(fs::delim) + 1); } } } if (!sfb_dir.empty()) { bdvd_dir = sfb_dir + "/"; game_dir = std::string{main_dir_name}; } } void Emulator::EjectDisc() { if (!Emu.IsRunning()) { sys_log.error("Can not eject disc if the Emulator is not running!"); return; } if (vfs::get("/dev_bdvd").empty() && vfs::get("/dev_ps2disc").empty()) { sys_log.error("Can not eject disc if both dev_bdvd and dev_ps2disc are not mounted!"); return; } sys_log.notice("Ejecting disc..."); m_sfo_dir.clear(); if (g_fxo->is_init<disc_change_manager>()) { g_fxo->get<disc_change_manager>().eject_disc(); } } game_boot_result Emulator::InsertDisc(const std::string& path) { if (!Emu.IsRunning()) { sys_log.error("Can not insert disc if the Emulator is not running!"); return game_boot_result::generic_error; } sys_log.notice("Inserting disc... (path='%s')", path); const std::string hdd0_game = vfs::get("/dev_hdd0/game/"); const bool from_hdd0_game = IsPathInsideDir(path, hdd0_game); if (from_hdd0_game) { sys_log.error("Inserting disc failed: Can not mount discs from '/dev_hdd0/game/'. (path='%s')", path); return game_boot_result::wrong_disc_location; } std::string disc_root; std::string ps3_game_dir; const disc::disc_type disc_type = disc::get_disc_type(path, disc_root, ps3_game_dir); if (disc_type == disc::disc_type::invalid) { sys_log.error("Inserting disc failed: not a disc (path='%s')", path); return game_boot_result::wrong_disc_location; } ensure(!disc_root.empty()); u32 type = CELL_GAME_DISCTYPE_OTHER; std::string title_id; if (disc_type == disc::disc_type::ps3) { type = CELL_GAME_DISCTYPE_PS3; // Double check PARAM.SFO const std::string sfo_dir = rpcs3::utils::get_sfo_dir_from_game_path(disc_root); const psf::registry _psf = psf::load_object(sfo_dir + "/PARAM.SFO"); if (_psf.empty()) { sys_log.error("Inserting disc failed: Corrupted PARAM.SFO found! (path='%s/PARAM.SFO')", sfo_dir); return game_boot_result::invalid_file_or_folder; } title_id = std::string(psf::get_string(_psf, "TITLE_ID")); if (title_id.empty()) { sys_log.error("Inserting disc failed: Corrupted PARAM.SFO found! TITLE_ID empty (path='%s/PARAM.SFO')", sfo_dir); return game_boot_result::invalid_file_or_folder; } m_sfo_dir = sfo_dir; m_game_dir = ps3_game_dir; sys_log.notice("New sfo dir: %s", m_sfo_dir); sys_log.notice("New game dir: %s", m_game_dir); ensure(vfs::mount("/dev_bdvd", disc_root)); ensure(vfs::mount("/dev_bdvd/PS3_GAME", disc_root + m_game_dir + "/")); } else if (disc_type == disc::disc_type::ps2) { type = CELL_GAME_DISCTYPE_PS2; ensure(vfs::mount("/dev_ps2disc", disc_root)); } else { // TODO: find out where other discs are mounted sys_log.todo("Mounting non-ps2/ps3 disc in dev_bdvd. Is this correct? (path='%s')", disc_root); ensure(vfs::mount("/dev_bdvd", disc_root)); } if (g_fxo->is_init<disc_change_manager>()) { g_fxo->get<disc_change_manager>().insert_disc(type, std::move(title_id)); } return game_boot_result::no_errors; } utils::serial* Emulator::DeserialManager() const { ensure(!m_ar || !m_ar->is_writing()); return m_ar.get(); } bool Emulator::IsVsh() { return g_ps3_process_info.self_info.valid && (g_ps3_process_info.self_info.prog_id_hdr.program_authority_id >> 36 == 0x1070000); // Not only VSH but also most CoreOS LV2 SELFs need the special treatment } bool Emulator::IsValidSfb(const std::string& path) { fs::file sfb_file{path, fs::read + fs::isfile}; if (sfb_file) { if (sfb_file.size() < 4 || sfb_file.read<u32>() != ".SFB"_u32) { sys_log.error("PS3_DISC.SFB file may be truncated or corrupted. (path='%s')", path); return false; } return true; } return false; } void Emulator::SaveSettings(const std::string& settings, const std::string& title_id) { std::string config_name; if (title_id.empty()) { config_name = fs::get_config_dir() + "/config.yml"; } else { config_name = rpcs3::utils::get_custom_config_path(title_id); } // Save config atomically fs::pending_file temp(config_name); if (!temp.file) { sys_log.error("Could not save config to %s (failed to create temporary file) (error=%s)", config_name, fs::g_tls_error); } else { temp.file.write(settings.c_str(), settings.size()); if (!temp.commit()) { sys_log.error("Could not save config to %s (failed to commit) (error=%s)", config_name, fs::g_tls_error); } } // Check if the running config/title is the same as the edited config/title. if (config_name == g_cfg.name || title_id == Emu.GetTitleID()) { // Update current config if (!g_cfg.from_string({settings.c_str(), settings.size()}, !Emu.IsStopped())) { sys_log.fatal("Failed to update configuration"); } else if (!Emu.IsStopped()) // Don't spam the log while emulation is stopped. The config will be logged on boot anyway. { sys_log.notice("Updated configuration:\n%s\n", g_cfg.to_string()); } } // Backup config g_backup_cfg.from_string(g_cfg.to_string()); } Emulator Emu;
122,210
C++
.cpp
3,636
29.774477
227
0.651335
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,164
system_utils.cpp
RPCS3_rpcs3/rpcs3/Emu/system_utils.cpp
#include "stdafx.h" #include "system_utils.hpp" #include "system_config.h" #include "vfs_config.h" #include "Emu/Io/pad_config.h" #include "Emu/System.h" #include "util/sysinfo.hpp" #include "Utilities/File.h" #include "Utilities/StrUtil.h" #include "Utilities/Thread.h" #include "Crypto/unpkg.h" #include "Crypto/unself.h" #include "Crypto/unedat.h" #include <charconv> #include <thread> LOG_CHANNEL(sys_log, "SYS"); namespace rpcs3::utils { u32 get_max_threads() { const u32 max_threads = static_cast<u32>(g_cfg.core.llvm_threads); const u32 hw_threads = ::utils::get_thread_count(); const u32 thread_count = max_threads > 0 ? std::min(max_threads, hw_threads) : hw_threads; return thread_count; } void configure_logs(bool force_enable) { static bool was_silenced = false; const bool silenced = g_cfg.misc.silence_all_logs.get() && !force_enable; if (silenced) { if (!was_silenced) { sys_log.always()("Disabling logging! Do not create issues on GitHub or on the forums while logging is disabled."); } logs::silence(); } else { logs::reset(); logs::set_channel_levels(g_cfg.log.get_map()); if (was_silenced) { sys_log.success("Logging enabled"); } } was_silenced = silenced; } u32 check_user(const std::string& user) { u32 id = 0; if (user.size() == 8) { std::from_chars(&user.front(), &user.back() + 1, id); } return id; } bool install_pkg(const std::string& path) { sys_log.success("Installing package: %s", path); int int_progress = 0; std::deque<package_reader> reader; reader.emplace_back(path); // Run PKG unpacking asynchronously named_thread worker("PKG Installer", [&] { std::deque<std::string> bootables; const package_install_result result = package_reader::extract_data(reader, bootables); return result.error == package_install_result::error_type::no_error; }); // Wait for the completion while (std::this_thread::sleep_for(5ms), worker <= thread_state::aborting) { // TODO: update unified progress dialog const int pval = reader[0].get_progress(100); if (pval > int_progress) { int_progress = pval; sys_log.success("... %u%%", int_progress); } } return worker(); } std::string get_emu_dir() { const std::string& emu_dir_ = g_cfg_vfs.emulator_dir; return emu_dir_.empty() ? fs::get_config_dir() : emu_dir_; } std::string get_games_dir() { return g_cfg_vfs.get(g_cfg_vfs.games_dir, get_emu_dir()); } std::string get_hdd0_dir() { return g_cfg_vfs.get(g_cfg_vfs.dev_hdd0, get_emu_dir()); } std::string get_hdd1_dir() { return g_cfg_vfs.get(g_cfg_vfs.dev_hdd1, get_emu_dir()); } std::string get_cache_dir() { return fs::get_cache_dir() + "cache/"; } std::string get_cache_dir(std::string_view module_path) { std::string cache_dir = get_cache_dir(); const std::string dev_flash = g_cfg_vfs.get_dev_flash(); const bool in_dev_flash = Emu.IsPathInsideDir(module_path, dev_flash); if (in_dev_flash && !Emu.IsPathInsideDir(module_path, dev_flash + "sys/external/")) { // Add prefix for vsh cache_dir += "vsh/"; } else if (!in_dev_flash && !Emu.GetTitleID().empty() && Emu.GetCat() != "1P") { // Add prefix for anything except dev_flash files, standalone elfs or PS1 classics cache_dir += Emu.GetTitleID(); cache_dir += '/'; } return cache_dir; } std::string get_rap_file_path(const std::string_view& rap) { const std::string home_dir = get_hdd0_dir() + "home"; std::string rap_path; for (auto&& entry : fs::dir(home_dir)) { if (entry.is_directory && check_user(entry.name)) { rap_path = fmt::format("%s/%s/exdata/%s.rap", home_dir, entry.name, rap); if (fs::is_file(rap_path)) { return rap_path; } } } // Return a sample path tested for logging purposes return rap_path; } std::string get_c00_unlock_edat_path(const std::string_view& content_id) { const std::string home_dir = get_hdd0_dir() + "home"; std::string edat_path; for (auto&& entry : fs::dir(home_dir)) { if (entry.is_directory && check_user(entry.name)) { edat_path = fmt::format("%s/%s/exdata/%s.edat", home_dir, entry.name, content_id); if (fs::is_file(edat_path)) { return edat_path; } } } // Return a sample path tested for logging purposes return edat_path; } bool verify_c00_unlock_edat(const std::string_view& content_id, bool fast) { const std::string edat_path = rpcs3::utils::get_c00_unlock_edat_path(content_id); // Check if user has unlock EDAT installed fs::file enc_file(edat_path); if (!enc_file) { sys_log.notice("verify_c00_unlock_edat(): '%s' not found", edat_path); return false; } // Use simple check for GUI if (fast) return true; u128 k_licensee = get_default_self_klic(); NPD_HEADER npd; if (!VerifyEDATHeaderWithKLicense(enc_file, edat_path, reinterpret_cast<u8*>(&k_licensee), &npd)) { sys_log.error("verify_c00_unlock_edat(): Failed to verify npd file '%s'", edat_path); return false; } std::string edat_content_id = npd.content_id; if (edat_content_id != content_id) { sys_log.error("verify_c00_unlock_edat(): Content ID mismatch in npd header of '%s'", edat_path); return false; } // Decrypt EDAT and verify its contents fs::file dec_file = DecryptEDAT(enc_file, edat_path, 8, reinterpret_cast<u8*>(&k_licensee)); if (!dec_file) { sys_log.error("verify_c00_unlock_edat(): Failed to decrypt '%s'", edat_path); return false; } u32 magic{}; dec_file.read<u32>(magic); if (magic != "GOMA"_u32) { sys_log.error("verify_c00_unlock_edat(): Bad header magic in unlock EDAT '%s'", edat_path); return false; } // Read null-terminated string dec_file.seek(0x10); dec_file.read(edat_content_id, 0x30); edat_content_id.resize(std::min<usz>(0x30, edat_content_id.find_first_of('\0'))); if (edat_content_id != content_id) { sys_log.error("verify_c00_unlock_edat(): Content ID mismatch in unlock EDAT '%s'", edat_path); return false; } // Game has been purchased and EDAT is verified return true; } std::string get_sfo_dir_from_game_path(const std::string& game_path, const std::string& title_id) { if (fs::is_file(game_path + "/PS3_DISC.SFB")) { // This is a disc game. if (!title_id.empty()) { for (auto&& entry : fs::dir{game_path}) { if (entry.name == "." || entry.name == "..") { continue; } const std::string sfo_path = game_path + "/" + entry.name + "/PARAM.SFO"; if (entry.is_directory && fs::is_file(sfo_path)) { const auto psf = psf::load_object(sfo_path); const auto serial = psf::get_string(psf, "TITLE_ID"); if (serial == title_id) { return game_path + "/" + entry.name; } } } } return game_path + "/PS3_GAME"; } const auto psf = psf::load_object(game_path + "/PARAM.SFO"); const auto category = psf::get_string(psf, "CATEGORY"); const auto content_id = psf::get_string(psf, "CONTENT_ID"); if (category == "HG" && !content_id.empty()) { // This is a trial game. Check if the user has EDAT file to unlock it. const auto c00_title_id = psf::get_string(psf, "TITLE_ID"); if (fs::is_file(game_path + "/C00/PARAM.SFO") && verify_c00_unlock_edat(content_id, true)) { // Load full game data. sys_log.notice("Found EDAT file %s.edat for trial game %s", content_id, c00_title_id); return game_path + "/C00"; } } return game_path; } std::string get_custom_config_dir() { #ifdef _WIN32 return fs::get_config_dir() + "config/custom_configs/"; #else return fs::get_config_dir() + "custom_configs/"; #endif } std::string get_custom_config_path(const std::string& identifier) { if (identifier.empty()) { return {}; } return get_custom_config_dir() + "config_" + identifier + ".yml"; } std::string get_input_config_root() { #ifdef _WIN32 return fs::get_config_dir() + "config/input_configs/"; #else return fs::get_config_dir() + "input_configs/"; #endif } std::string get_input_config_dir(const std::string& title_id) { return get_input_config_root() + (title_id.empty() ? "global" : title_id) + "/"; } std::string get_custom_input_config_path(const std::string& title_id) { if (title_id.empty()) return ""; return get_input_config_dir(title_id) + g_cfg_input_configs.default_config + ".yml"; } }
8,448
C++
.cpp
287
26.083624
118
0.659546
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
true
false
5,165
perf_monitor.cpp
RPCS3_rpcs3/rpcs3/Emu/perf_monitor.cpp
#include "stdafx.h" #include "perf_monitor.hpp" #include "Emu/System.h" #include "util/cpu_stats.hpp" #include "Utilities/Thread.h" LOG_CHANNEL(perf_log, "PERF"); void perf_monitor::operator()() { constexpr u64 update_interval_us = 1000000; // Update every second constexpr u64 log_interval_us = 10000000; // Log every 10 seconds u64 elapsed_us = 0; utils::cpu_stats stats; stats.init_cpu_query(); u32 logged_pause = 0; u64 last_pause_time = umax; std::vector<double> per_core_usage; std::string msg; for (u64 sleep_until = get_system_time(); thread_ctrl::state() != thread_state::aborting;) { thread_ctrl::wait_until(&sleep_until, update_interval_us); elapsed_us += update_interval_us; if (thread_ctrl::state() == thread_state::aborting) { break; } double total_usage = 0.0; stats.get_per_core_usage(per_core_usage, total_usage); if (elapsed_us >= log_interval_us) { elapsed_us = 0; const bool is_paused = Emu.IsPaused(); const u64 pause_time = Emu.GetPauseTime(); if (!is_paused || last_pause_time != pause_time) { // Resumed or not paused since last check logged_pause = 0; last_pause_time = pause_time; } if (is_paused) { if (logged_pause >= 2) { // Let's not spam the log when emulation is paused // But still emit the message two times so even paused state can be debugged and inspected continue; } logged_pause++; } msg.clear(); fmt::append(msg, "CPU Usage: Total: %.1f%%", total_usage); if (!per_core_usage.empty()) { fmt::append(msg, ", Cores:"); } for (usz i = 0; i < per_core_usage.size(); i++) { fmt::append(msg, "%s %.1f%%", i > 0 ? "," : "", per_core_usage[i]); } perf_log.notice("%s", msg); } } } perf_monitor::~perf_monitor() { }
1,808
C++
.cpp
65
24.353846
95
0.650029
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,166
system_config.cpp
RPCS3_rpcs3/rpcs3/Emu/system_config.cpp
#include "stdafx.h" #include "system_config.h" #include "util/sysinfo.hpp" cfg_root g_cfg{}; cfg_root g_backup_cfg{}; bool cfg_root::node_core::enable_tsx_by_default() { return utils::has_rtm() && utils::has_mpx() && !utils::has_tsx_force_abort(); } std::string cfg_root::node_sys::get_random_system_name() { std::srand(static_cast<u32>(std::time(nullptr))); return "RPCS3-" + std::to_string(100 + std::rand() % 899); }
427
C++
.cpp
14
29
78
0.687042
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,167
localized_string.cpp
RPCS3_rpcs3/rpcs3/Emu/localized_string.cpp
#include "stdafx.h" #include "localized_string.h" #include "System.h" std::string get_localized_string(localized_string_id id, const char* args) { return Emu.GetCallbacks().get_localized_string(id, args); } std::u32string get_localized_u32string(localized_string_id id, const char* args) { return Emu.GetCallbacks().get_localized_u32string(id, args); }
357
C++
.cpp
11
31.090909
80
0.77907
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
true
false
true
true
true
false
5,168
VFS.cpp
RPCS3_rpcs3/rpcs3/Emu/VFS.cpp
#include "stdafx.h" #include "IdManager.h" #include "System.h" #include "VFS.h" #include "Cell/lv2/sys_fs.h" #include "Utilities/mutex.h" #include "Utilities/StrUtil.h" #ifdef _WIN32 #include <Windows.h> #endif #include <thread> #include <map> LOG_CHANNEL(vfs_log, "VFS"); struct vfs_directory { // Real path (empty if root or not exists) std::string path; // Virtual subdirectories std::map<std::string, std::unique_ptr<vfs_directory>> dirs; }; struct vfs_manager { shared_mutex mutex{}; // VFS root vfs_directory root{}; }; bool vfs::mount(std::string_view vpath, std::string_view path, bool is_dir) { if (vpath.empty()) { // Empty relative path, should set relative path base; unsupported vfs_log.error("Cannot mount empty path to \"%s\"", path); return false; } // Initialize vfs_manager if not yet initialized (e.g. g_fxo->reset() was previously invoked) g_fxo->need<vfs_manager>(); auto& table = g_fxo->get<vfs_manager>(); // TODO: scan roots of mounted devices for undeleted vfs::host::unlink remnants, and try to delete them (_WIN32 only) std::lock_guard lock(table.mutex); const std::string_view vpath_backup = vpath; for (std::vector<vfs_directory*> list{&table.root};;) { // Skip one or more '/' const auto pos = vpath.find_first_not_of('/'); if (pos == 0) { // Mounting relative path is not supported vfs_log.error("Cannot mount relative path \"%s\" to \"%s\"", vpath_backup, path); return false; } if (pos == umax) { // Mounting completed; fixup for directories due to resolve_path messing with trailing / list.back()->path = Emu.GetCallbacks().resolve_path(path); if (list.back()->path.empty()) list.back()->path = std::string(path); // Fallback when resolving failed if (is_dir && !list.back()->path.ends_with('/')) list.back()->path += '/'; if (!is_dir && list.back()->path.ends_with('/')) vfs_log.error("File mounted with trailing /."); if (path == "/") // Special list.back()->path = "/"; vfs_log.notice("Mounted path \"%s\" to \"%s\"", vpath_backup, list.back()->path); return true; } // Get fragment name const auto name = vpath.substr(pos, vpath.find_first_of('/', pos) - pos); vpath.remove_prefix(name.size() + pos); if (name == ".") { // Keep current continue; } if (name == "..") { // Root parent is root if (list.size() == 1) { continue; } // Go back one level list.pop_back(); continue; } // Find or add vfs_directory* last = list.back(); for (auto& [path, dir] : last->dirs) { if (path == name) { list.push_back(dir.get()); break; } } if (last == list.back()) { // Add new entry std::unique_ptr<vfs_directory> new_entry = std::make_unique<vfs_directory>(); list.push_back(new_entry.get()); last->dirs.emplace(name, std::move(new_entry)); } } } bool vfs::unmount(std::string_view vpath) { if (vpath.empty()) { vfs_log.error("Cannot unmount empty path"); return false; } const std::vector<std::string> entry_list = fmt::split(vpath, {"/"}); if (entry_list.empty()) { vfs_log.error("Cannot unmount path: '%s'", vpath); return false; } vfs_log.notice("About to unmount '%s'", vpath); if (!g_fxo->is_init<vfs_manager>()) { return false; } auto& table = g_fxo->get<vfs_manager>(); std::lock_guard lock(table.mutex); // Search entry recursively and remove it (including all children) std::function<void(vfs_directory&, usz)> unmount_children; unmount_children = [&entry_list, &unmount_children](vfs_directory& dir, usz depth) -> void { if (depth >= entry_list.size()) { return; } // Get the current name based on the depth const std::string& name = ::at32(entry_list, depth); // Go through all children of this node for (auto it = dir.dirs.begin(); it != dir.dirs.end();) { // Find the matching node if (it->first == name) { // Remove the matching node if we reached the maximum depth if (depth + 1 == entry_list.size()) { vfs_log.notice("Unmounting '%s' = '%s'", it->first, it->second->path); it = dir.dirs.erase(it); continue; } // Otherwise continue searching in the next level of depth unmount_children(*it->second, depth + 1); } ++it; } }; unmount_children(table.root, 0); return true; } std::string vfs::get(std::string_view vpath, std::vector<std::string>* out_dir, std::string* out_path) { // Just to make the code more robust. // It should never happen because we take care to initialize Emu (and so also vfs_manager) with Emu.Init() before this function is invoked if (!g_fxo->is_init<vfs_manager>()) { fmt::throw_exception("vfs_manager not initialized"); } auto& table = g_fxo->get<vfs_manager>(); reader_lock lock(table.mutex); // Resulting path fragments: decoded ones std::vector<std::string_view> result; result.reserve(vpath.size() / 2); // Mounted path std::string_view result_base; if (vpath.empty()) { // Empty relative path (reuse further return) vpath = "."; } // Fragments for out_path std::vector<std::string_view> name_list; if (out_path) { name_list.reserve(vpath.size() / 2); } for (std::vector<const vfs_directory*> list{&table.root};;) { // Skip one or more '/' const auto pos = vpath.find_first_not_of('/'); if (pos == 0) { // Relative path: point to non-existent location return fs::get_config_dir() + "delete_this_dir.../delete_this..."; } if (pos == umax) { // Absolute path: finalize for (auto it = list.rbegin(), rend = list.rend(); it != rend; it++) { if (auto* dir = *it; dir && (!dir->path.empty() || list.size() == 1)) { // Save latest valid mount path result_base = dir->path; // Erase unnecessary path fragments result.erase(result.begin(), result.begin() + (std::distance(it, rend) - 1)); // Extract mounted subdirectories (TODO) if (out_dir) { for (auto& pair : dir->dirs) { if (!pair.second->path.empty()) { out_dir->emplace_back(pair.first); } } } break; } } if (!vpath.empty()) { // Finalize path with '/' result.emplace_back(""); } break; } // Get fragment name const auto name = vpath.substr(pos, vpath.find_first_of('/', pos) - pos); vpath.remove_prefix(name.size() + pos); // Process special directories if (name == ".") { // Keep current continue; } if (name == "..") { // Root parent is root if (list.size() == 1) { continue; } // Go back one level if (out_path) { name_list.pop_back(); } list.pop_back(); result.pop_back(); continue; } const auto last = list.back(); list.push_back(nullptr); if (out_path) { name_list.push_back(name); } result.push_back(name); if (!last) { continue; } for (auto& [path, dir] : last->dirs) { if (path == name) { list.back() = dir.get(); if (dir->path == "/"sv) { if (vpath.size() <= 1) { return fs::get_config_dir() + "delete_this_dir.../delete_this..."; } // Handle /host_root (not escaped, not processed) if (out_path) { out_path->clear(); *out_path += '/'; *out_path += fmt::merge(name_list, "/"); *out_path += vpath; } return std::string{vpath.substr(1)}; } break; } } } if (result_base.empty()) { // Not mounted return {}; } // Merge path fragments if (out_path) { out_path->clear(); *out_path += '/'; *out_path += fmt::merge(name_list, "/"); } // Escape for host FS std::vector<std::string> escaped; escaped.reserve(result.size()); for (auto& sv : result) escaped.emplace_back(vfs::escape(sv)); return std::string{result_base} + fmt::merge(escaped, "/"); } using char2 = char8_t; std::string vfs::retrieve(std::string_view path, const vfs_directory* node, std::vector<std::string_view>* mount_path) { // Just to make the code more robust. // It should never happen because we take care to initialize Emu (and so also vfs_manager) with Emu.Init() before this function is invoked if (!g_fxo->is_init<vfs_manager>()) { fmt::throw_exception("vfs_manager not initialized"); } auto& table = g_fxo->get<vfs_manager>(); if (!node) { if (path.starts_with(".") || path.empty()) { return {}; } reader_lock lock(table.mutex); std::vector<std::string_view> mount_path_empty; const std::string rpath = Emu.GetCallbacks().resolve_path(path); if (!rpath.empty()) { if (std::string res = vfs::retrieve(rpath, &table.root, &mount_path_empty); !res.empty()) { return res; } } mount_path_empty.clear(); return vfs::retrieve(path, &table.root, &mount_path_empty); } mount_path->emplace_back(); // Try to extract host root mount point name (if exists) std::string_view host_root_name; std::string result; std::string result_dir; for (const auto& [name, dir] : node->dirs) { mount_path->back() = name; if (std::string res = vfs::retrieve(path, dir.get(), mount_path); !res.empty()) { // Avoid app_home // Prefer dev_bdvd over dev_hdd0 if (result.empty() || (name == "app_home") < (result_dir == "app_home") || (name == "dev_bdvd") > (result_dir == "dev_bdvd")) { result = std::move(res); result_dir = name; } } if (dir->path == "/"sv) { host_root_name = name; } } if (!result.empty()) { return result; } mount_path->pop_back(); if (node->path.size() > 1 && path.starts_with(node->path)) { auto unescape_path = [](std::string_view path) { // Unescape from host FS std::vector<std::string> escaped = fmt::split(path, {std::string_view{&fs::delim[0], 1}, std::string_view{&fs::delim[1], 1}}); std::vector<std::string> result; for (auto& sv : escaped) result.emplace_back(vfs::unescape(sv)); return fmt::merge(result, "/"); }; std::string result{"/"}; for (const auto& name : *mount_path) { result += name; result += '/'; } result += unescape_path(path.substr(node->path.size())); return result; } if (!host_root_name.empty()) { // If failed to find mount point for path and /host_root is mounted // Prepend "/host_root" to path and return the constructed string result.clear(); result += '/'; for (const auto& name : *mount_path) { result += name; result += '/'; } result += host_root_name; result += '/'; result += path; return result; } return result; } std::string vfs::escape(std::string_view name, bool escape_slash) { std::string result; if (name.size() <= 2 && name.find_first_not_of('.') == umax) { // Return . or .. as is result = name; return result; } // Emulate NTS (limited) auto get_char = [&](usz pos) -> char2 { if (pos < name.size()) { return name[pos]; } else { return '\0'; } }; // Escape NUL, LPT ant other trash if (name.size() > 2) { // Pack first 3 characters const u32 triple = std::bit_cast<le_t<u32>, u32>(toupper(name[0]) | toupper(name[1]) << 8 | toupper(name[2]) << 16); switch (triple) { case "COM"_u32: case "LPT"_u32: { if (name.size() >= 4 && name[3] >= '1' && name[3] <= '9') { if (name.size() == 4 || name[4] == '.') { // Escape first character (C or L) result = reinterpret_cast<const char*>(u8"!"); } } break; } case "NUL"_u32: case "CON"_u32: case "AUX"_u32: case "PRN"_u32: { if (name.size() == 3 || name[3] == '.') { result = reinterpret_cast<const char*>(u8"!"); } break; } } } result.reserve(result.size() + name.size()); for (usz i = 0, s = name.size(); i < s; i++) { switch (char2 c = name[i]) { case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: case 8: case 9: { result += reinterpret_cast<const char*>(u8"0"); result.back() += c; break; } case 10: case 11: case 12: case 13: case 14: case 15: case 16: case 17: case 18: case 19: case 20: case 21: case 22: case 23: case 24: case 25: case 26: case 27: case 28: case 29: case 30: case 31: { result += reinterpret_cast<const char*>(u8"A"); result.back() += c; result.back() -= 10; break; } case '<': { result += reinterpret_cast<const char*>(u8"<"); break; } case '>': { result += reinterpret_cast<const char*>(u8">"); break; } case ':': { result += reinterpret_cast<const char*>(u8":"); break; } case '"': { result += reinterpret_cast<const char*>(u8"""); break; } case '\\': { result += reinterpret_cast<const char*>(u8"\"); break; } case '|': { result += reinterpret_cast<const char*>(u8"|"); break; } case '?': { result += reinterpret_cast<const char*>(u8"?"); break; } case '*': { result += reinterpret_cast<const char*>(u8"*"); break; } case '/': { if (escape_slash) { result += reinterpret_cast<const char*>(u8"/"); break; } result += c; break; } case '.': case ' ': { if (!get_char(i + 1)) { switch (c) { // Directory name ended with a space or a period, not allowed on Windows. case '.': result += reinterpret_cast<const char*>(u8"."); break; case ' ': result += reinterpret_cast<const char*>(u8"_"); break; } break; } result += c; break; } case char2{u8"!"[0]}: { // Escape full-width characters 0xFF01..0xFF5e with ! (0xFF01) switch (get_char(i + 1)) { case char2{u8"!"[1]}: { const uchar c3 = get_char(i + 2); if (c3 >= 0x81 && c3 <= 0xbf) { result += reinterpret_cast<const char*>(u8"!"); } break; } case char2{u8"`"[1]}: { const uchar c3 = get_char(i + 2); if (c3 >= 0x80 && c3 <= 0x9e) { result += reinterpret_cast<const char*>(u8"!"); } break; } default: break; } result += c; break; } default: { result += c; break; } } } return result; } std::string vfs::unescape(std::string_view name) { std::string result; result.reserve(name.size()); // Emulate NTS auto get_char = [&](usz pos) -> char2 { if (pos < name.size()) { return name[pos]; } else { return '\0'; } }; for (usz i = 0, s = name.size(); i < s; i++) { switch (char2 c = name[i]) { case char2{u8"!"[0]}: { switch (get_char(i + 1)) { case char2{u8"!"[1]}: { const uchar c3 = get_char(i + 2); if (c3 >= 0x81 && c3 <= 0xbf) { switch (static_cast<char2>(c3)) { case char2{u8"0"[2]}: case char2{u8"1"[2]}: case char2{u8"2"[2]}: case char2{u8"3"[2]}: case char2{u8"4"[2]}: case char2{u8"5"[2]}: case char2{u8"6"[2]}: case char2{u8"7"[2]}: case char2{u8"8"[2]}: case char2{u8"9"[2]}: { result += static_cast<char>(c3); result.back() -= u8"0"[2]; continue; } case char2{u8"A"[2]}: case char2{u8"B"[2]}: case char2{u8"C"[2]}: case char2{u8"D"[2]}: case char2{u8"E"[2]}: case char2{u8"F"[2]}: case char2{u8"G"[2]}: case char2{u8"H"[2]}: case char2{u8"I"[2]}: case char2{u8"J"[2]}: case char2{u8"K"[2]}: case char2{u8"L"[2]}: case char2{u8"M"[2]}: case char2{u8"N"[2]}: case char2{u8"O"[2]}: case char2{u8"P"[2]}: case char2{u8"Q"[2]}: case char2{u8"R"[2]}: case char2{u8"S"[2]}: case char2{u8"T"[2]}: case char2{u8"U"[2]}: case char2{u8"V"[2]}: { result += static_cast<char>(c3); result.back() -= u8"A"[2]; result.back() += 10; continue; } case char2{u8"!"[2]}: { if (const char2 c4 = get_char(i + 3)) { // Escape anything but null character result += c4; } else { return result; } i += 3; continue; } case char2{u8"_"[2]}: { result += ' '; break; } case char2{u8"."[2]}: { result += '.'; break; } case char2{u8"<"[2]}: { result += '<'; break; } case char2{u8">"[2]}: { result += '>'; break; } case char2{u8":"[2]}: { result += ':'; break; } case char2{u8"""[2]}: { result += '"'; break; } case char2{u8"\"[2]}: { result += '\\'; break; } case char2{u8"?"[2]}: { result += '?'; break; } case char2{u8"*"[2]}: { result += '*'; break; } case char2{u8"$"[2]}: { if (i == 0) { // Special case: filename starts with full-width $ likely created by vfs::host::unlink result.resize(1, '.'); return result; } [[fallthrough]]; } default: { // Unrecognized character (ignored) break; } } i += 2; } else { result += c; } break; } case char2{u8"`"[1]}: { const uchar c3 = get_char(i + 2); if (c3 >= 0x80 && c3 <= 0x9e) { switch (static_cast<char2>(c3)) { case char2{u8"|"[2]}: { result += '|'; break; } default: { // Unrecognized character (ignored) break; } } i += 2; } else { result += c; } break; } default: { result += c; break; } } break; } case 0: { // NTS detected return result; } default: { result += c; break; } } } return result; } std::string vfs::host::hash_path(const std::string& path, const std::string& dev_root, std::string_view prefix) { return fmt::format(u8"%s/$%s%s%s", dev_root, fmt::base57(std::hash<std::string>()(path)), prefix, fmt::base57(utils::get_unique_tsc())); } bool vfs::host::rename(const std::string& from, const std::string& to, const lv2_fs_mount_point* mp, bool overwrite, bool lock) { // Lock mount point, close file descriptors, retry const auto from0 = std::string_view(from).substr(0, from.find_last_not_of(fs::delim) + 1); std::vector<std::pair<std::shared_ptr<lv2_file>, std::string>> escaped_real; std::unique_lock mp_lock(mp->mutex, std::defer_lock); if (lock) { mp_lock.lock(); } if (fs::rename(from, to, overwrite)) { return true; } if (fs::g_tls_error != fs::error::acces) { return false; } const auto escaped_from = Emu.GetCallbacks().resolve_path(from); auto check_path = [&](std::string_view path) { return path.starts_with(from) && (path.size() == from.size() || path[from.size()] == fs::delim[0] || path[from.size()] == fs::delim[1]); }; idm::select<lv2_fs_object, lv2_file>([&](u32 id, lv2_file& file) { if (file.mp != mp) { return; } std::string escaped = Emu.GetCallbacks().resolve_path(file.real_path); if (check_path(escaped)) { if (!file.file) { return; } file.restore_data.seek_pos = file.file.pos(); file.file.close(); // Actually close it! escaped_real.emplace_back(ensure(idm::get_unlocked<lv2_fs_object, lv2_file>(id)), std::move(escaped)); } }); bool res = false; for (;; std::this_thread::yield()) { if (fs::rename(from, to, overwrite)) { res = true; break; } if (Emu.IsStopped() || fs::g_tls_error != fs::error::acces) { res = false; break; } } const auto fs_error = fs::g_tls_error; for (const auto& [file_ptr, real_path] : escaped_real) { lv2_file& file = *file_ptr; { // Update internal path if (res) { file.real_path = to + (real_path != escaped_from ? '/' + file.real_path.substr(from0.size()) : ""s); } // Reopen with ignored TRUNC, APPEND, CREATE and EXCL flags auto res0 = lv2_file::open_raw(file.real_path, file.flags & CELL_FS_O_ACCMODE, file.mode, file.type, file.mp); file.file = std::move(res0.file); ensure(file.file.operator bool()); file.file.seek(file.restore_data.seek_pos); } } fs::g_tls_error = fs_error; return res; } bool vfs::host::unlink(const std::string& path, [[maybe_unused]] const std::string& dev_root) { #ifdef _WIN32 if (auto device = fs::get_virtual_device(path)) { return device->remove(path); } else { // Rename to special dummy name which will be ignored by VFS (but opened file handles can still read or write it) std::string dummy = hash_path(path, dev_root, "file"); while (true) { if (fs::rename(path, dummy, false)) { break; } if (fs::g_tls_error != fs::error::exist) { return false; } dummy = hash_path(path, dev_root, "file"); } if (fs::file f{dummy, fs::read + fs::write}) { // Set to delete on close on last handle FILE_DISPOSITION_INFO disp; disp.DeleteFileW = true; SetFileInformationByHandle(f.get_handle(), FileDispositionInfo, &disp, sizeof(disp)); return true; } // TODO: what could cause this and how to handle it return true; } #else return fs::remove_file(path); #endif } bool vfs::host::remove_all(const std::string& path, [[maybe_unused]] const std::string& dev_root, [[maybe_unused]] const lv2_fs_mount_point* mp, [[maybe_unused]] bool remove_root, [[maybe_unused]] bool lock, [[maybe_unused]] bool force_atomic) { #ifndef _WIN32 if (!force_atomic) { return fs::remove_all(path, remove_root); } #endif if (remove_root) { // Rename to special dummy folder which will be ignored by VFS (but opened file handles can still read or write it) std::string dummy = hash_path(path, dev_root, "dir"); while (true) { if (vfs::host::rename(path, dummy, mp, false, lock)) { break; } if (fs::g_tls_error != fs::error::exist) { return false; } dummy = hash_path(path, dev_root, "dir"); } if (!vfs::host::remove_all(dummy, dev_root, mp, false, lock)) { return false; } if (!fs::remove_dir(dummy)) { return false; } } else { const auto root_dir = fs::dir(path); if (!root_dir) { return false; } for (const auto& entry : root_dir) { if (entry.name == "." || entry.name == "..") { continue; } if (!entry.is_directory) { if (!vfs::host::unlink(path + '/' + entry.name, dev_root)) { return false; } } else { if (!vfs::host::remove_all(path + '/' + entry.name, dev_root, mp, true, lock)) { return false; } } } } return true; }
22,552
C++
.cpp
989
18.793731
243
0.590749
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,169
savestate_utils.cpp
RPCS3_rpcs3/rpcs3/Emu/savestate_utils.cpp
#include "stdafx.h" #include "util/types.hpp" #include "util/logs.hpp" #include "util/asm.hpp" #include "util/v128.hpp" #include "util/simd.hpp" #include "Utilities/File.h" #include "Utilities/StrFmt.h" #include "system_config.h" #include "savestate_utils.hpp" #include "System.h" #include <set> #include <any> #include <span> LOG_CHANNEL(sys_log, "SYS"); struct serial_ver_t { std::string ver_name; bool used = false; u16 current_version = 0; std::set<u16> compatible_versions; }; static std::array<serial_ver_t, 27> s_serial_versions; #define SERIALIZATION_VER(name, identifier, ...) \ \ const bool s_##name##_serialization_fill = []() { auto& e = ::s_serial_versions[identifier]; if (e.compatible_versions.empty()) { e.compatible_versions = {__VA_ARGS__}; e.ver_name = #name; } return true; }();\ \ extern void using_##name##_serialization()\ {\ ensure(Emu.IsStopped());\ ::s_serial_versions[identifier].used = true;\ }\ \ extern s32 get_##name##_serialization_version()\ {\ return ::s_serial_versions[identifier].current_version;\ } SERIALIZATION_VER(global_version, 0, 16) // For stuff not listed here SERIALIZATION_VER(ppu, 1, 1, 2/*PPU sleep order*/, 3/*PPU FNID and module*/) SERIALIZATION_VER(spu, 2, 1) SERIALIZATION_VER(lv2_sync, 3, 1) SERIALIZATION_VER(lv2_vm, 4, 1) SERIALIZATION_VER(lv2_net, 5, 1, 2/*TCP Feign conection loss*/) SERIALIZATION_VER(lv2_fs, 6, 1, 2/*NPDRM key saving*/) SERIALIZATION_VER(lv2_prx_overlay, 7, 1) SERIALIZATION_VER(lv2_memory, 8, 1) SERIALIZATION_VER(lv2_config, 9, 1) namespace rsx { SERIALIZATION_VER(rsx, 10, 1, 2/*Pending flip*/, 3/*avconf scan_mode*/) } namespace np { SERIALIZATION_VER(sceNp, 11, 1) } #ifdef _MSC_VER // Compiler bug, lambda function body does seem to inherit used namespace atleast for function declaration SERIALIZATION_VER(rsx, 10) SERIALIZATION_VER(sceNp, 11) #endif SERIALIZATION_VER(cellVdec, 12, 1) SERIALIZATION_VER(cellAudio, 13, 1) SERIALIZATION_VER(cellCamera, 14, 1) SERIALIZATION_VER(cellGem, 15, 1) SERIALIZATION_VER(sceNpTrophy, 16, 1) SERIALIZATION_VER(cellMusic, 17, 1) SERIALIZATION_VER(cellVoice, 18, 1) SERIALIZATION_VER(cellGcm, 19, 1) SERIALIZATION_VER(sysPrxForUser, 20, 1) SERIALIZATION_VER(cellSaveData, 21, 1) SERIALIZATION_VER(cellAudioOut, 22, 1) SERIALIZATION_VER(sys_io, 23, 2) // Misc versions for HLE/LLE not included so main version would not invalidated SERIALIZATION_VER(LLE, 24, 1) SERIALIZATION_VER(HLE, 25, 1) SERIALIZATION_VER(cellSysutil, 26, 1, 2/*AVC2 Muting,Volume*/) template <> void fmt_class_string<std::remove_cvref_t<decltype(s_serial_versions)>>::format(std::string& out, u64 arg) { bool is_first = true; const auto& serials = get_object(arg); out += "{ "; for (auto& entry : serials) { if (entry.current_version) { if (!is_first) { out += ", "; } is_first = false; fmt::append(out, "%s=%d", entry.ver_name, entry.current_version); } } out += " }"; } std::vector<version_entry> get_savestate_versioning_data(fs::file&& file, std::string_view filepath) { if (!file) { return {}; } file.seek(0); utils::serial ar; ar.set_reading_state({}, true); if (filepath.ends_with(".zst")) { ar.m_file_handler = make_compressed_zstd_serialization_file_handler(std::move(file)); } else if (filepath.ends_with(".gz")) { ar.m_file_handler = make_compressed_serialization_file_handler(std::move(file)); } else { ar.m_file_handler = make_uncompressed_serialization_file_handler(std::move(file)); } if (u64 r = 0; ar.try_read(r) != 0 || r != "RPCS3SAV"_u64) { return {}; } ar.pos = 10; u64 offs = ar.try_read<u64>().second; const usz fsize = ar.get_size(offs); if (!offs || fsize <= offs) { return {}; } ar.seek_pos(offs); ar.breathe(true); std::vector<version_entry> ver_data = ar.pop<std::vector<version_entry>>(); return ver_data; } bool is_savestate_version_compatible(const std::vector<version_entry>& data, bool is_boot_check) { if (data.empty()) { return false; } bool ok = true; if (is_boot_check) { for (auto& entry : s_serial_versions) { // Version 0 means that the entire constructor using the version should be skipped entry.current_version = 0; } } auto& channel = (is_boot_check ? sys_log.error : sys_log.trace); for (const auto& entry : data) { if (entry.type >= s_serial_versions.size()) { channel("Savestate version identifier is unknown! (category=%u, version=%u)", entry.type, entry.version); ok = false; // Log all mismatches } else if (!s_serial_versions[entry.type].compatible_versions.count(entry.version)) { channel("Savestate version is not supported. (category=%u, version=%u)", entry.type, entry.version); ok = false; } else if (is_boot_check) { s_serial_versions[entry.type].current_version = entry.version; } } if (is_boot_check) { if (ok) { sys_log.success("Savestate versions: %s", s_serial_versions); } else { for (auto& entry : s_serial_versions) { entry.current_version = 0; } } } return ok; } std::string get_savestate_file(std::string_view title_id, std::string_view boot_path, s64 abs_id, s64 rel_id) { const std::string title = std::string{title_id.empty() ? boot_path.substr(boot_path.find_last_of(fs::delim) + 1) : title_id}; if (abs_id == -1 && rel_id == -1) { // Return directory return fs::get_config_dir() + "savestates/" + title + "/"; } ensure(rel_id < 0 || abs_id >= 0, "Unimplemented!"); const std::string save_id = fmt::format("%d", abs_id); // Make sure that savestate file with higher IDs are placed at the bottom of "by name" file ordering in directory view by adding a single character prefix which tells the ID length // While not needing to keep a 59 chars long suffix at all times for this purpose const char prefix = ::at32("0123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"sv, save_id.size()); std::string path = fs::get_config_dir() + "/savestates/" + title + "/" + title + '_' + prefix + '_' + save_id + ".SAVESTAT"; if (std::string path_compressed = path + ".zst"; fs::is_file(path_compressed)) { return path_compressed; } if (std::string path_compressed = path + ".gz"; fs::is_file(path_compressed)) { return path_compressed; } return path; } bool is_savestate_compatible(fs::file&& file, std::string_view filepath) { return is_savestate_version_compatible(get_savestate_versioning_data(std::move(file), filepath), false); } std::vector<version_entry> read_used_savestate_versions() { std::vector<version_entry> used_serial; used_serial.reserve(s_serial_versions.size()); for (serial_ver_t& ver : s_serial_versions) { if (std::exchange(ver.used, false)) { used_serial.push_back(version_entry{static_cast<u16>(&ver - s_serial_versions.data()), *ver.compatible_versions.rbegin()}); } ver.current_version = 0; } return used_serial; } bool boot_last_savestate(bool testing) { if (!g_cfg.savestate.suspend_emu && !Emu.GetTitleID().empty() && (Emu.IsRunning() || Emu.GetStatus() == system_state::paused)) { const std::string save_dir = get_savestate_file(Emu.GetTitleID(), Emu.GetBoot(), -1, -1); std::string savestate_path; s64 mtime = smin; for (auto&& entry : fs::dir(save_dir)) { if (entry.is_directory) { continue; } // Find the latest savestate file compatible with the game (TODO: Check app version and anything more) if (entry.name.find(Emu.GetTitleID()) != umax && mtime <= entry.mtime) { if (std::string path = save_dir + entry.name + ".zst"; is_savestate_compatible(fs::file(path), path)) { savestate_path = std::move(path); mtime = entry.mtime; } else if (std::string path = save_dir + entry.name + ".gz"; is_savestate_compatible(fs::file(path), path)) { savestate_path = std::move(path); mtime = entry.mtime; } else if (std::string path = save_dir + entry.name; is_savestate_compatible(fs::file(path), path)) { savestate_path = std::move(path); mtime = entry.mtime; } } } const bool result = fs::is_file(savestate_path); if (testing) { sys_log.trace("boot_last_savestate(true) returned %s.", result); return result; } if (result) { sys_log.success("Booting the most recent savestate \'%s\' using the Reload shortcut.", savestate_path); Emu.GracefulShutdown(false); if (game_boot_result error = Emu.BootGame(savestate_path, "", true); error != game_boot_result::no_errors) { sys_log.error("Failed to boot savestate \'%s\' using the Reload shortcut. (error: %s)", savestate_path, error); } else { return true; } } sys_log.error("No compatible savestate file found in \'%s\''", save_dir); } return false; } bool load_and_check_reserved(utils::serial& ar, usz size) { u8 bytes[4096]; ensure(size < std::size(bytes)); std::memset(&bytes[size & (0 - sizeof(v128))], 0, sizeof(v128)); const usz old_pos = ar.pos; ar(std::span<u8>(bytes, size)); // Check if all are 0 for (usz i = 0; i < size; i += sizeof(v128)) { if (v128::loadu(&bytes[i]) != v128{}) { return false; } } return old_pos + size == ar.pos; } namespace stx { extern u16 serial_breathe_and_tag(utils::serial& ar, std::string_view name, bool tag_bit) { thread_local std::string_view s_tls_object_name = "none"; thread_local u64 s_tls_call_count = 1, s_tls_current_pos = 0; if (s_tls_current_pos >= ar.pos) { // Reset, probably a new utils::serial object s_tls_call_count = 1; } s_tls_current_pos = ar.pos; constexpr u16 data_mask = 0x7fff; if (ar.m_file_handler && ar.m_file_handler->is_null()) { return (tag_bit ? data_mask + 1 : 0); } u16 tag = static_cast<u16>((static_cast<u16>(ar.pos / 2) & data_mask) | (tag_bit ? data_mask + 1 : 0)); u16 saved = tag; ar(saved); sys_log.trace("serial_breathe_and_tag(%u): %s, object: '%s', next-object: '%s', expected/tag: 0x%x == 0x%x", s_tls_call_count, ar, s_tls_object_name, name, tag, saved); if ((saved ^ tag) & data_mask) { ensure(!ar.is_writing()); fmt::throw_exception("serial_breathe_and_tag(%u): %s, object: '%s', next-object: '%s', expected/tag: 0x%x != 0x%x,", s_tls_call_count, ar, s_tls_object_name, name, tag, saved); } s_tls_object_name = name; s_tls_call_count++; ar.breathe(); return saved; } } // MSVC bug workaround, see above similar case extern u16 serial_breathe_and_tag(utils::serial& ar, std::string_view name, bool tag_bit) { return ::stx::serial_breathe_and_tag(ar, name, tag_bit); } [[noreturn]] void hle_locks_t::lock() { // Unreachable fmt::throw_exception("Unreachable"); } bool hle_locks_t::try_lock() { while (true) { auto [old, success] = lock_val.fetch_op([](s64& value) { if (value >= 0) { value++; return true; } return false; }); if (success) { return true; } if (old == finalized) { break; } lock_val.wait(old); } return false; } void hle_locks_t::unlock() { lock_val--; } bool hle_locks_t::try_finalize(std::function<bool()> test) { if (!test()) { return false; } if (!lock_val.compare_and_swap_test(0, waiting_for_evaluation)) { return false; } if (!test()) { // Failed ensure(lock_val.compare_and_swap_test(waiting_for_evaluation, 0)); return false; } ensure(lock_val.compare_and_swap_test(waiting_for_evaluation, finalized)); // Sanity check when debugging (the result is not expected to change after finalization) //ensure(test()); lock_val.notify_all(); return true; }
12,378
C++
.cpp
390
29.117949
210
0.634644
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,170
GDB.cpp
RPCS3_rpcs3/rpcs3/Emu/GDB.cpp
#include "stdafx.h" #include "GDB.h" #include "util/logs.hpp" #include "Utilities/StrUtil.h" #include "Emu/Memory/vm.h" #include "Emu/System.h" #include "Emu/system_config.h" #include "Emu/IdManager.h" #include "Emu/CPU/CPUThread.h" #include "Emu/Cell/PPUThread.h" #ifdef _WIN32 #include <WinSock2.h> #include <WS2tcpip.h> #include <afunix.h> // sockaddr_un #else #ifdef __clang__ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wold-style-cast" #endif #include <errno.h> #include <sys/types.h> #include <sys/socket.h> #include <netinet/in.h> #include <netinet/tcp.h> #include <netdb.h> #include <arpa/inet.h> #include <unistd.h> #include <fcntl.h> #include <sys/un.h> // sockaddr_un #ifdef __clang__ #pragma GCC diagnostic pop #endif #endif #include <charconv> #include <regex> #include <string_view> extern bool ppu_breakpoint(u32 addr, bool is_adding); LOG_CHANNEL(GDB); #ifndef _WIN32 int closesocket(int s) { return close(s); } void set_nonblocking(int s) { fcntl(s, F_SETFL, fcntl(s, F_GETFL) | O_NONBLOCK); } #define sscanf_s sscanf #else void set_nonblocking(int s) { u_long mode = 1; ioctlsocket(s, FIONBIO, &mode); } #endif struct gdb_cmd { std::string cmd{}; std::string data{}; u8 checksum{}; }; bool check_errno_again() { #ifdef _WIN32 int err = GetLastError(); return (err == WSAEWOULDBLOCK); #else int err = errno; return (err == EAGAIN) || (err == EWOULDBLOCK); #endif } std::string u32_to_hex(u32 i) { return fmt::format("%x", i); } std::string u64_to_padded_hex(u64 value) { return fmt::format("%.16x", value); } std::string u32_to_padded_hex(u32 value) { return fmt::format("%.8x", value); } template <typename T> T hex_to(std::string_view val) { T result; auto [ptr, err] = std::from_chars(val.data(), val.data() + val.size(), result, 16); if (err != std::errc()) { fmt::throw_exception("Failed to read hex string: %s", std::make_error_code(err).message()); } return result; } constexpr auto& hex_to_u8 = hex_to<u8>; constexpr auto& hex_to_u32 = hex_to<u32>; constexpr auto& hex_to_u64 = hex_to<u64>; void gdb_thread::start_server() { // IPv4 address:port in format 127.0.0.1:2345 static const std::regex ipv4_regex("^([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3})\\:([0-9]{1,5})$"); auto [sname, sshared] = g_cfg.misc.gdb_server.get(); if (sname[0] == '\0') { // Empty string or starts with null: GDB server disabled GDB.notice("GDB Server is disabled."); return; } // Try to detect socket type std::smatch match; if (std::regex_match(sname, match, ipv4_regex)) { struct addrinfo hints{}; struct addrinfo* info; hints.ai_flags = AI_PASSIVE; hints.ai_socktype = SOCK_STREAM; std::string bind_addr = match[1].str(); std::string bind_port = match[2].str(); if (getaddrinfo(bind_addr.c_str(), bind_port.c_str(), &hints, &info) == 0) { server_socket = static_cast<int>(socket(info->ai_family, info->ai_socktype, info->ai_protocol)); if (server_socket == -1) { GDB.error("Error creating IP socket for '%s'.", sname); freeaddrinfo(info); return; } set_nonblocking(server_socket); if (bind(server_socket, info->ai_addr, static_cast<int>(info->ai_addrlen)) != 0) { GDB.error("Failed to bind socket on '%s'.", sname); freeaddrinfo(info); return; } freeaddrinfo(info); if (listen(server_socket, 1) != 0) { GDB.error("Failed to listen on '%s'.", sname); return; } GDB.notice("Started listening on '%s'.", sname); return; } } // Fallback to UNIX socket server_socket = static_cast<int>(socket(AF_UNIX, SOCK_STREAM, 0)); if (server_socket == -1) { GDB.error("Failed to create Unix socket. Possibly unsupported."); return; } // Delete existing socket (TODO?) fs::remove_file(sname); set_nonblocking(server_socket); sockaddr_un unix_saddr; unix_saddr.sun_family = AF_UNIX; strcpy_trunc(unix_saddr.sun_path, sname); if (bind(server_socket, reinterpret_cast<struct sockaddr*>(&unix_saddr), sizeof(unix_saddr)) != 0) { GDB.error("Failed to bind Unix socket '%s'.", sname); return; } if (listen(server_socket, 1) != 0) { GDB.error("Failed to listen on Unix socket '%s'.", sname); return; } GDB.notice("Started listening on Unix socket '%s'.", sname); } int gdb_thread::read(void* buf, int cnt) const { while (thread_ctrl::state() != thread_state::aborting) { const int result = recv(client_socket, static_cast<char*>(buf), cnt, 0); if (result == -1) { if (check_errno_again()) { thread_ctrl::wait_for(5000); continue; } GDB.error("Error during socket read."); fmt::throw_exception("Error during socket read"); } return result; } return 0; } char gdb_thread::read_char() { char result; int cnt = read(&result, 1); if (!cnt) { fmt::throw_exception("Tried to read char, but no data was available"); } return result; } u8 gdb_thread::read_hexbyte() { std::string s; s += read_char(); s += read_char(); return hex_to_u8(s); } bool gdb_thread::try_read_cmd(gdb_cmd& out_cmd) { char c = read_char(); //interrupt if (c == 0x03) [[unlikely]] { out_cmd.cmd = '\x03'; out_cmd.data = ""; out_cmd.checksum = 0; return true; } if (c != '$') [[unlikely]] { //gdb starts conversation with + for some reason if (c == '+') { c = read_char(); } if (c != '$') { fmt::throw_exception("Expected start of packet character '$', got '%c' instead", c); } } //clear packet data out_cmd.cmd = ""; out_cmd.data = ""; out_cmd.checksum = 0; bool cmd_part = true; u8 checksum = 0; while (true) { c = read_char(); if (c == '#') { break; } checksum = (checksum + reinterpret_cast<u8&>(c)) % 256; //escaped char if (c == '}') { c = read_char() ^ 0x20; checksum = (checksum + reinterpret_cast<u8&>(c)) % 256; } //cmd-data splitters if (cmd_part && ((c == ':') || (c == '.') || (c == ';'))) { cmd_part = false; } if (cmd_part) { out_cmd.cmd += c; //only q and v commands can have multi-char command if ((out_cmd.cmd.length() == 1) && (c != 'q') && (c != 'v')) { cmd_part = false; } } else { out_cmd.data += c; } } out_cmd.checksum = read_hexbyte(); return out_cmd.checksum == checksum; } bool gdb_thread::read_cmd(gdb_cmd& out_cmd) { while (true) { if (try_read_cmd(out_cmd)) { ack(true); return true; } ack(false); } } void gdb_thread::send(const char* buf, int cnt) const { GDB.trace("Sending %s (%d bytes).", buf, cnt); while (thread_ctrl::state() != thread_state::aborting) { int res = ::send(client_socket, buf, cnt, 0); if (res == -1) { if (check_errno_again()) { thread_ctrl::wait_for(5000); continue; } GDB.error("Failed sending %d bytes.", cnt); return; } return; } } void gdb_thread::send_char(char c) { send(&c, 1); } void gdb_thread::ack(bool accepted) { send_char(accepted ? '+' : '-'); } void gdb_thread::send_cmd(const std::string& cmd) { u8 checksum = 0; std::string buf; buf.reserve(cmd.length() + 4); buf += "$"; for (usz i = 0; i < cmd.length(); ++i) { checksum = (checksum + append_encoded_char(cmd[i], buf)) % 256; } buf += "#"; buf += to_hexbyte(checksum); send(buf.c_str(), static_cast<int>(buf.length())); } bool gdb_thread::send_cmd_ack(const std::string& cmd) { while (true) { send_cmd(cmd); char c = read_char(); if (c == '+') [[likely]] return true; if (c != '-') [[unlikely]] { GDB.error("Wrong acknowledge character received: '%c'.", c); return false; } GDB.warning("Client rejected our cmd."); } } u8 gdb_thread::append_encoded_char(char c, std::string& str) { u8 checksum = 0; if ((c == '#') || (c == '$') || (c == '}')) [[unlikely]] { str += '}'; c ^= 0x20; checksum = '}'; } checksum = (checksum + reinterpret_cast<u8&>(c)) % 256; str += c; return checksum; } std::string gdb_thread::to_hexbyte(u8 i) { std::string result = "00"; u8 i1 = i & 0xF; u8 i2 = i >> 4; result[0] = i2 > 9 ? 'a' + i2 - 10 : '0' + i2; result[1] = i1 > 9 ? 'a' + i1 - 10 : '0' + i1; return result; } bool gdb_thread::select_thread(u64 id) { //in case we have none at all selected_thread.reset(); const auto on_select = [&](u32, cpu_thread& cpu) { return (id == ALL_THREADS) || (id == ANY_THREAD) || (cpu.id == id); }; if (auto ppu = idm::select<named_thread<ppu_thread>>(on_select)) { selected_thread = ppu.ptr; return true; } GDB.warning("Unable to select thread! Is the emulator running?"); return false; } std::string gdb_thread::get_reg(ppu_thread* thread, u32 rid) { //ids from gdb/features/rs6000/powerpc-64.c //pc switch (rid) { case 64: return u64_to_padded_hex(thread->cia); //msr? case 65: return std::string(16, 'x'); case 66: return u32_to_padded_hex(thread->cr.pack()); case 67: return u64_to_padded_hex(thread->lr); case 68: return u64_to_padded_hex(thread->ctr); //xer case 69: return std::string(8, 'x'); //fpscr case 70: return std::string(8, 'x'); default: if (rid > 70) return ""; return (rid > 31) ? u64_to_padded_hex(std::bit_cast<u64>(thread->fpr[rid - 32])) //fpr : u64_to_padded_hex(thread->gpr[rid]); //gpr } } bool gdb_thread::set_reg(ppu_thread* thread, u32 rid, const std::string& value) { switch (rid) { case 64: thread->cia = static_cast<u32>(hex_to_u64(value)); return true; //msr? case 65: return true; case 66: thread->cr.unpack(hex_to_u32(value)); return true; case 67: thread->lr = hex_to_u64(value); return true; case 68: thread->ctr = hex_to_u64(value); return true; //xer case 69: return true; //fpscr case 70: return true; default: if (rid > 70) return false; if (rid > 31) { const u64 val = hex_to_u64(value); thread->fpr[rid - 32] = std::bit_cast<f64>(val); } else { thread->gpr[rid] = hex_to_u64(value); } return true; } } u32 gdb_thread::get_reg_size(ppu_thread*, u32 rid) { switch (rid) { case 66: case 69: case 70: return 4; default: if (rid > 70) { return 0; } return 8; } } bool gdb_thread::send_reason() { return send_cmd_ack("S05"); } void gdb_thread::wait_with_interrupts() { char c; while (!paused) { int result = recv(client_socket, &c, 1, 0); if (result == -1) { if (check_errno_again()) { thread_ctrl::wait_for(5000); continue; } GDB.error("Error during socket read."); fmt::throw_exception("Error during socket read"); } else if (c == 0x03) { paused = true; } } } bool gdb_thread::cmd_extended_mode(gdb_cmd&) { return send_cmd_ack("OK"); } bool gdb_thread::cmd_reason(gdb_cmd&) { return send_reason(); } bool gdb_thread::cmd_supported(gdb_cmd&) { return send_cmd_ack("PacketSize=1200"); } bool gdb_thread::cmd_thread_info(gdb_cmd&) { std::string result; const auto on_select = [&](u32, cpu_thread& cpu) { if (!result.empty()) { result += ","; } result += u64_to_padded_hex(static_cast<u64>(cpu.id)); }; idm::select<named_thread<ppu_thread>>(on_select); //idm::select<named_thread<spu_thread>>(on_select); //todo: this may exceed max command length result = "m" + result + "l"; return send_cmd_ack(result); } bool gdb_thread::cmd_current_thread(gdb_cmd&) { return send_cmd_ack(selected_thread.expired() ? "" : ("QC" + u64_to_padded_hex(selected_thread.lock()->id))); } bool gdb_thread::cmd_read_register(gdb_cmd& cmd) { if (!select_thread(general_ops_thread_id)) { return send_cmd_ack("E02"); } auto th = selected_thread.lock(); if (auto ppu = th->try_get<named_thread<ppu_thread>>()) { u32 rid = hex_to_u32(cmd.data); std::string result = get_reg(ppu, rid); if (result.empty()) { GDB.warning("Wrong register id %d.", rid); return send_cmd_ack("E01"); } return send_cmd_ack(result); } GDB.warning("Unimplemented thread type %d.", th->id_type()); return send_cmd_ack(""); } bool gdb_thread::cmd_write_register(gdb_cmd& cmd) { if (!select_thread(general_ops_thread_id)) { return send_cmd_ack("E02"); } auto th = selected_thread.lock(); if (th->get_class() == thread_class::ppu) { auto ppu = static_cast<named_thread<ppu_thread>*>(th.get()); usz eq_pos = cmd.data.find('='); if (eq_pos == umax) { GDB.warning("Wrong write_register cmd data '%s'.", cmd.data); return send_cmd_ack("E02"); } u32 rid = hex_to_u32(cmd.data.substr(0, eq_pos)); std::string value = cmd.data.substr(eq_pos + 1); if (!set_reg(ppu, rid, value)) { GDB.warning("Wrong register id %d.", rid); return send_cmd_ack("E01"); } return send_cmd_ack("OK"); } GDB.warning("Unimplemented thread type %d.", th->id_type()); return send_cmd_ack(""); } bool gdb_thread::cmd_read_memory(gdb_cmd& cmd) { usz s = cmd.data.find(','); u32 addr = hex_to_u32(cmd.data.substr(0, s)); u32 len = hex_to_u32(cmd.data.substr(s + 1)); std::string result; result.reserve(len * 2); for (u32 i = 0; i < len; ++i) { if (vm::check_addr(addr)) { result += to_hexbyte(vm::read8(addr + i)); } else { break; //result += "xx"; } } if (len && result.empty()) { //nothing read return send_cmd_ack("E01"); } return send_cmd_ack(result); } bool gdb_thread::cmd_write_memory(gdb_cmd& cmd) { usz s = cmd.data.find(','); usz s2 = cmd.data.find(':'); if ((s == umax) || (s2 == umax)) { GDB.warning("Malformed write memory request received: '%s'.", cmd.data); return send_cmd_ack("E01"); } u32 addr = hex_to_u32(cmd.data.substr(0, s)); u32 len = hex_to_u32(cmd.data.substr(s + 1, s2 - s - 1)); const char* data_ptr = (cmd.data.c_str()) + s2 + 1; for (u32 i = 0; i < len; ++i) { if (vm::check_addr(addr + i, vm::page_writable)) { u8 val; int res = sscanf_s(data_ptr, "%02hhX", &val); if (!res) { GDB.warning("Couldn't read u8 from string '%s'.", data_ptr); return send_cmd_ack("E02"); } data_ptr += 2; vm::write8(addr + i, val); } else { return send_cmd_ack("E03"); } } return send_cmd_ack("OK"); } bool gdb_thread::cmd_read_all_registers(gdb_cmd&) { std::string result; select_thread(general_ops_thread_id); auto th = selected_thread.lock(); if (th->get_class() == thread_class::ppu) { auto ppu = static_cast<named_thread<ppu_thread>*>(th.get()); //68 64-bit registers, and 3 32-bit result.reserve(68*16 + 3*8); for (int i = 0; i < 71; ++i) { result += get_reg(ppu, i); } return send_cmd_ack(result); } GDB.warning("Unimplemented thread type %d.", th->id_type()); return send_cmd_ack(""); } bool gdb_thread::cmd_write_all_registers(gdb_cmd& cmd) { select_thread(general_ops_thread_id); auto th = selected_thread.lock(); if (th->get_class() == thread_class::ppu) { auto ppu = static_cast<named_thread<ppu_thread>*>(th.get()); int ptr = 0; for (int i = 0; i < 71; ++i) { int sz = get_reg_size(ppu, i); set_reg(ppu, i, cmd.data.substr(ptr, sz * 2)); ptr += sz * 2; } return send_cmd_ack("OK"); } GDB.warning("Unimplemented thread type %d.", th->id_type()); return send_cmd_ack("E01"); } bool gdb_thread::cmd_set_thread_ops(gdb_cmd& cmd) { char type = cmd.data[0]; std::string thread = cmd.data.substr(1); u64 id = thread == "-1" ? ALL_THREADS : hex_to_u64(thread); if (type == 'c') { continue_ops_thread_id = id; } else { general_ops_thread_id = id; } if (select_thread(id)) { return send_cmd_ack("OK"); } GDB.warning("Client asked to use thread 0x%x for %s, but no matching thread was found.", id, type == 'c' ? "continue ops" : "general ops"); return send_cmd_ack("E01"); } bool gdb_thread::cmd_attached_to_what(gdb_cmd&) { //creating processes from client is not available yet return send_cmd_ack("1"); } bool gdb_thread::cmd_kill(gdb_cmd&) { GDB.notice("Kill command issued"); Emu.CallFromMainThread([](){ Emu.GracefulShutdown(); }); return true; } bool gdb_thread::cmd_continue_support(gdb_cmd&) { return send_cmd_ack("vCont;c;s;C;S"); } bool gdb_thread::cmd_vcont(gdb_cmd& cmd) { //todo: handle multiple actions and thread ids this->from_breakpoint = false; if (cmd.data[1] == 'c' || cmd.data[1] == 's') { select_thread(continue_ops_thread_id); auto ppu = std::static_pointer_cast<named_thread<ppu_thread>>(selected_thread.lock()); paused = false; if (cmd.data[1] == 's') { ppu->state += cpu_flag::dbg_step; } ppu->state -= cpu_flag::dbg_pause; //special case if app didn't start yet (only loaded) if (Emu.IsReady()) { Emu.Run(true); } if (Emu.IsPaused()) { Emu.Resume(); } else { ppu->state.notify_one(); } wait_with_interrupts(); //we are in all-stop mode Emu.Pause(); select_thread(pausedBy); // we have to remove dbg_pause from thread that paused execution, otherwise // it will be paused forever (Emu.Resume only removes dbg_global_pause) ppu = std::static_pointer_cast<named_thread<ppu_thread>>(selected_thread.lock()); if (ppu) ppu->state -= cpu_flag::dbg_pause; return send_reason(); } return send_cmd_ack(""); } static const u32 INVALID_PTR = 0xffffffff; bool gdb_thread::cmd_set_breakpoint(gdb_cmd& cmd) { char type = cmd.data[0]; //software breakpoint if (type == '0') { u32 addr = INVALID_PTR; if (cmd.data.find(';') != umax) { GDB.warning("Received request to set breakpoint with condition, but they are not supported."); return send_cmd_ack("E01"); } sscanf_s(cmd.data.c_str(), "0,%x", &addr); if (addr == INVALID_PTR) { GDB.warning("Can't parse breakpoint request, data: '%s'.", cmd.data); return send_cmd_ack("E02"); } ppu_breakpoint(addr, true); return send_cmd_ack("OK"); } //other breakpoint types are not supported return send_cmd_ack(""); } bool gdb_thread::cmd_remove_breakpoint(gdb_cmd& cmd) { char type = cmd.data[0]; //software breakpoint if (type == '0') { u32 addr = INVALID_PTR; sscanf_s(cmd.data.c_str(), "0,%x", &addr); if (addr == INVALID_PTR) { GDB.warning("Can't parse breakpoint remove request, data: '%s'.", cmd.data); return send_cmd_ack("E01"); } ppu_breakpoint(addr, false); return send_cmd_ack("OK"); } //other breakpoint types are not supported return send_cmd_ack(""); } #define PROCESS_CMD(cmds,handler) if (cmd.cmd == cmds) { if (!handler(cmd)) break; else continue; } gdb_thread::gdb_thread() noexcept { } gdb_thread::~gdb_thread() { if (server_socket != -1) { closesocket(server_socket); } if (client_socket != -1) { closesocket(client_socket); } } void gdb_thread::operator()() { start_server(); for (u64 sleep_until = get_system_time(); server_socket != -1 && thread_ctrl::state() != thread_state::aborting;) { sockaddr_in client; socklen_t client_len = sizeof(client); client_socket = static_cast<int>(accept(server_socket, reinterpret_cast<struct sockaddr*>(&client), &client_len)); if (client_socket == -1) { if (check_errno_again()) { thread_ctrl::wait_until(&sleep_until, 5000); continue; } GDB.error("Could not establish new connection."); return; } //stop immediately if (Emu.IsRunning()) { Emu.Pause(); } { char hostbuf[32]; inet_ntop(client.sin_family, reinterpret_cast<void*>(&client.sin_addr), hostbuf, 32); GDB.success("Got connection to GDB debug server from %s:%d.", hostbuf, client.sin_port); gdb_cmd cmd; while (thread_ctrl::state() != thread_state::aborting) { if (!read_cmd(cmd)) { break; } GDB.trace("Command %s with data %s received.", cmd.cmd, cmd.data); PROCESS_CMD("!", cmd_extended_mode); PROCESS_CMD("?", cmd_reason); PROCESS_CMD("qSupported", cmd_supported); PROCESS_CMD("qfThreadInfo", cmd_thread_info); PROCESS_CMD("qC", cmd_current_thread); PROCESS_CMD("p", cmd_read_register); PROCESS_CMD("P", cmd_write_register); PROCESS_CMD("m", cmd_read_memory); PROCESS_CMD("M", cmd_write_memory); PROCESS_CMD("g", cmd_read_all_registers); PROCESS_CMD("G", cmd_write_all_registers); PROCESS_CMD("H", cmd_set_thread_ops); PROCESS_CMD("qAttached", cmd_attached_to_what); PROCESS_CMD("k", cmd_kill); PROCESS_CMD("vCont?", cmd_continue_support); PROCESS_CMD("vCont", cmd_vcont); PROCESS_CMD("z", cmd_remove_breakpoint); PROCESS_CMD("Z", cmd_set_breakpoint); GDB.trace("Unsupported command received: '%s'.", cmd.cmd); if (!send_cmd_ack("")) { break; } } } } } #undef PROCESS_CMD void gdb_thread::pause_from(cpu_thread* t) { if (paused) { return; } paused = true; pausedBy = t->id; thread_ctrl::notify(*static_cast<gdb_server*>(this)); } #ifndef _WIN32 #undef sscanf_s #endif #undef HEX_U32 #undef HEX_U64
20,587
C++
.cpp
879
20.886234
140
0.646333
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
true
false
5,171
IPC_config.cpp
RPCS3_rpcs3/rpcs3/Emu/IPC_config.cpp
#include "stdafx.h" #include "IPC_config.h" cfg_ipc g_cfg_ipc; LOG_CHANNEL(IPC); void cfg_ipc::load() { const std::string path = cfg_ipc::get_path(); fs::file cfg_file(path, fs::read); if (cfg_file) { IPC.notice("Loading IPC config. Path: %s", path); from_string(cfg_file.to_string()); } else { IPC.notice("IPC config missing. Using default settings. Path: %s", path); from_default(); } } void cfg_ipc::save() const { #ifdef _WIN32 const std::string path_to_cfg = fs::get_config_dir() + "config/"; if (!fs::create_path(path_to_cfg)) { IPC.error("Could not create path: %s", path_to_cfg); } #endif const std::string path = cfg_ipc::get_path(); if (!cfg::node::save(path)) { IPC.error("Could not save config: %s (error=%s)", path, fs::g_tls_error); } } std::string cfg_ipc::get_path() { #ifdef _WIN32 return fs::get_config_dir() + "config/ipc.yml"; #else return fs::get_config_dir() + "ipc.yml"; #endif } bool cfg_ipc::get_server_enabled() const { return ipc_server_enabled.get(); } int cfg_ipc::get_port() const { return ipc_port; } void cfg_ipc::set_server_enabled(const bool enabled) { this->ipc_server_enabled.set(enabled); } void cfg_ipc::set_port(const int port) { this->ipc_port.set(port); }
1,243
C++
.cpp
58
19.637931
75
0.683717
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,172
IdManager.cpp
RPCS3_rpcs3/rpcs3/Emu/IdManager.cpp
#include "stdafx.h" #include "IdManager.h" #include "Utilities/Thread.h" shared_mutex id_manager::g_mutex; namespace id_manager { thread_local u32 g_id = 0; } template <> bool serialize<std::shared_ptr<utils::serial>>(utils::serial& ar, std::shared_ptr<utils::serial>& o) { if (!o) { o = std::make_shared<utils::serial>(); } if (!ar.is_writing()) { o->set_reading_state(); } ar(o->data); return true; } std::vector<std::pair<u128, id_manager::typeinfo>>& id_manager::get_typeinfo_map() { // Magic static static std::vector<std::pair<u128, id_manager::typeinfo>> s_map; return s_map; } idm::map_data* idm::allocate_id(std::vector<map_data>& vec, u32 type_id, u32 dst_id, u32 base, u32 step, u32 count, bool uses_lowest_id, std::pair<u32, u32> invl_range) { if (dst_id != (base ? 0 : u32{umax})) { // Fixed position construction const u32 index = id_manager::get_index(dst_id, base, step, count, invl_range); ensure(index < count); vec.resize(std::max<usz>(vec.size(), index + 1)); if (vec[index].second) { return nullptr; } id_manager::g_id = dst_id; vec[index] = {id_manager::id_key(dst_id, type_id), nullptr}; return &vec[index]; } if (uses_lowest_id) { // Disable the optimization below (hurts accuracy for known cases) vec.resize(count); } else if (vec.size() < count) { // Try to emplace back const u32 _next = base + step * ::size32(vec); id_manager::g_id = _next; vec.emplace_back(id_manager::id_key(_next, type_id), nullptr); return &vec.back(); } // Check all IDs starting from "next id" (TODO) for (u32 i = 0, next = base; i < count; i++, next += step) { const auto ptr = &vec[i]; // Look for free ID if (!ptr->second) { // Incremenet ID invalidation counter const u32 id = next | ((ptr->first + (1u << invl_range.first)) & (invl_range.second ? (((1u << invl_range.second) - 1) << invl_range.first) : 0)); id_manager::g_id = id; ptr->first = id_manager::id_key(id, type_id); return ptr; } } // Out of IDs return nullptr; }
2,039
C++
.cpp
74
25.108108
168
0.655561
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,173
vfs_config.cpp
RPCS3_rpcs3/rpcs3/Emu/vfs_config.cpp
#include "stdafx.h" #include "vfs_config.h" #include "Utilities/StrUtil.h" #include "Utilities/StrFmt.h" LOG_CHANNEL(vfs_log, "VFS"); cfg_vfs g_cfg_vfs{}; std::string cfg_vfs::get(const cfg::string& _cfg, std::string_view emu_dir) const { return get(_cfg.to_string(), _cfg.def, emu_dir); } std::string cfg_vfs::get(const std::string& _cfg, const std::string& def, std::string_view emu_dir) const { std::string path = _cfg; // Fallback if (path.empty()) { if (def.empty()) { vfs_log.notice("VFS config called with empty path and empty default"); return {}; } path = def; } std::string _emu_dir; // Storage only if (emu_dir.empty()) { // Optimization if provided arg _emu_dir = emulator_dir; if (_emu_dir.empty()) { _emu_dir = fs::get_config_dir() + '/'; } // Check if path does not end with a delimiter else if (_emu_dir.back() != fs::delim[0] && _emu_dir.back() != fs::delim[1]) { _emu_dir += '/'; } emu_dir = _emu_dir; } path = fmt::replace_all(path, "$(EmulatorDir)", emu_dir); // Check if path does not end with a delimiter if (path.empty()) { vfs_log.error("VFS config path empty (_cfg='%s', def='%s', emu_dir='%s')", _cfg, def, emu_dir); } else if (path.back() != fs::delim[0] && path.back() != fs::delim[1]) { path += '/'; } return path; } cfg::device_info cfg_vfs::get_device(const cfg::device_entry& _cfg, std::string_view key, std::string_view emu_dir) const { return get_device_info(_cfg, key, emu_dir); } cfg::device_info cfg_vfs::get_device_info(const cfg::device_entry& _cfg, std::string_view key, std::string_view emu_dir) const { const auto& device_map = _cfg.get_map(); if (auto it = device_map.find(key); it != device_map.cend()) { // Make sure the path is properly resolved cfg::device_info info = it->second; const auto& def_map = _cfg.get_default(); std::string def_path; if (auto def_it = def_map.find(key); def_it != def_map.cend()) { def_path = def_it->second.path; } if (info.path.empty() && def_path.empty()) { return info; } info.path = get(info.path, def_path, emu_dir); return info; } return {}; } void cfg_vfs::load() { const std::string path = cfg_vfs::get_path(); if (fs::file cfg_file{path, fs::read}) { vfs_log.notice("Loading VFS config. Path: %s", path); from_string(cfg_file.to_string()); } else { vfs_log.notice("VFS config missing. Using default settings. Path: %s", path); from_default(); } } void cfg_vfs::save() const { #ifdef _WIN32 const std::string path_to_cfg = fs::get_config_dir() + "config/"; if (!fs::create_path(path_to_cfg)) { vfs_log.error("Could not create path: %s", path_to_cfg); } #endif fs::pending_file temp(cfg_vfs::get_path()); if (!temp.file) { vfs_log.error("Could not save config: \"%s\" (error=%s)", cfg_vfs::get_path(), fs::g_tls_error); return; } temp.file.write(to_string()); if (!temp.commit()) { vfs_log.error("Could not save config: \"%s\" (error=%s)", cfg_vfs::get_path(), fs::g_tls_error); } } std::string cfg_vfs::get_path() { #ifdef _WIN32 return fs::get_config_dir() + "config/vfs.yml"; #else return fs::get_config_dir() + "vfs.yml"; #endif }
3,182
C++
.cpp
120
24.15
126
0.646341
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,174
PPUInterpreter.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/PPUInterpreter.cpp
#include "stdafx.h" #include "PPUInterpreter.h" #include "Emu/Memory/vm_reservation.h" #include "Emu/system_config.h" #include "PPUThread.h" #include "Emu/Cell/Common.h" #include "Emu/Cell/PPUFunction.h" #include "Emu/Cell/PPUAnalyser.h" #include "Emu/Cell/timers.hpp" #include "Emu/IdManager.h" #include <bit> #include <cmath> #include <climits> #include "util/asm.hpp" #include "util/v128.hpp" #include "util/simd.hpp" #include "util/sysinfo.hpp" #include "Utilities/JIT.h" #if !defined(_MSC_VER) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wold-style-cast" #pragma GCC diagnostic ignored "-Wuninitialized" #endif extern bool is_debugger_present(); extern const ppu_decoder<ppu_itype> g_ppu_itype; extern const ppu_decoder<ppu_iname> g_ppu_iname; enum class ppu_exec_bit : u64 { has_oe, has_rc, set_sat, use_nj, fix_nj, set_vnan, fix_vnan, set_fpcc, use_dfma, set_cr_stats, set_call_history, use_feed_data, __bitset_enum_max }; using enum ppu_exec_bit; // Helper for combining only used subset of exec flags at compile time template <ppu_exec_bit... Flags0> struct ppu_exec_select { template <ppu_exec_bit Flag, ppu_exec_bit... Flags, typename F> static ppu_intrp_func_t select(bs_t<ppu_exec_bit> selected, F func) { // Make sure there is no flag duplication, otherwise skip flag if constexpr (((Flags0 != Flag) && ...)) { // Test only relevant flags at runtime initialization (compile both variants) if (selected & Flag) { // In this branch, selected flag is added to Flags0 return ppu_exec_select<Flags0..., Flag>::template select<Flags...>(selected, func); } } return ppu_exec_select<Flags0...>::template select<Flags...>(selected, func); } template <typename F> static ppu_intrp_func_t select(bs_t<ppu_exec_bit>, F func) { // Instantiate interpreter function with required set of flags return func.template operator()<Flags0...>(); } template <ppu_exec_bit... Flags1> static auto select() { #ifndef __INTELLISENSE__ return [](bs_t<ppu_exec_bit> selected, auto func) { return ppu_exec_select::select<Flags1...>(selected, func); }; #endif } }; // Switch between inlined interpreter invocation (exec) and builder function #if defined(ARCH_X64) #define RETURN(...) \ if constexpr (Build == 0) { \ static_cast<void>(exec); \ static const ppu_intrp_func_t f = build_function_asm<ppu_intrp_func_t, asmjit::ppu_builder>("ppu_"s + __func__, [&](asmjit::ppu_builder& c, native_args&) { \ static ppu_opcode_t op{}; \ static ppu_abstract_t ppu; \ exec(__VA_ARGS__); \ c.ppu_ret(); \ return !c.fail_flag; \ }); \ if (f) return f; \ ppu_log.error("Can't build instruction %s", __func__); \ RETURN_(__VA_ARGS__); \ } #else #define RETURN RETURN_ #endif #define RETURN_(...) \ if constexpr (Build == 0) { \ static_cast<void>(exec); \ if (is_debugger_present()) return +[](ppu_thread& ppu, ppu_opcode_t op, be_t<u32>* this_op, ppu_intrp_func* next_fn) { \ exec(__VA_ARGS__); \ const auto next_op = this_op + 1; \ const auto fn = atomic_storage<ppu_intrp_func_t>::load(next_fn->fn); \ ppu.cia = vm::get_addr(next_op); \ return fn(ppu, {*next_op}, next_op, next_fn + 1); \ }; \ return +[](ppu_thread& ppu, ppu_opcode_t op, be_t<u32>* this_op, ppu_intrp_func* next_fn) { \ exec(__VA_ARGS__); \ const auto next_op = this_op + 1; \ const auto fn = atomic_storage<ppu_intrp_func_t>::observe(next_fn->fn); \ return fn(ppu, {*next_op}, next_op, next_fn + 1); \ }; \ } #ifdef ARCH_X64 static constexpr ppu_opcode_t s_op{}; #endif namespace asmjit { #if defined(ARCH_X64) struct ppu_builder : vec_builder { using base = vec_builder; #ifdef _WIN32 static constexpr x86::Gp arg_ppu = x86::rcx; static constexpr x86::Gp arg_op = x86::edx; static constexpr x86::Gp arg_this_op = x86::r8; static constexpr x86::Gp arg_next_fn = x86::r9; #else static constexpr x86::Gp arg_ppu = x86::rdi; static constexpr x86::Gp arg_op = x86::esi; static constexpr x86::Gp arg_this_op = x86::rdx; static constexpr x86::Gp arg_next_fn = x86::rcx; #endif u32 xmm_count = 0; u32 ppu_base = 0; x86::Xmm tmp; ppu_builder(CodeHolder* ch) : base(ch) { } // Indexed offset to ppu.member template <auto MPtr, u32 Size = sizeof((std::declval<ppu_thread&>().*MPtr)[0]), uint I, uint N> x86::Mem ppu_mem(const bf_t<u32, I, N>&, bool last = false) { // Required index shift for array indexing constexpr u32 Shift = std::countr_zero(sizeof((std::declval<ppu_thread&>().*MPtr)[0])); const u32 offset = ::offset32(MPtr); auto tmp_r32 = x86::eax; auto reg_ppu = arg_ppu; if (last) { tmp_r32 = arg_op.r32(); } else { base::mov(tmp_r32, arg_op); if (offset % 16 == 0 && ppu_base != offset) { // Optimistically precompute offset to avoid [ppu + tmp*x + offset] addressing base::lea(x86::r10, x86::qword_ptr(arg_ppu, static_cast<s32>(offset))); ppu_base = offset; } } if (ppu_base == offset) { reg_ppu = x86::r10; } // Use max possible index shift constexpr u32 X86Shift = Shift > 3 ? 3 : Shift; constexpr u32 AddShift = Shift - X86Shift; constexpr u32 AndMask = (1u << N) - 1; if constexpr (I >= AddShift) { if constexpr (I != AddShift) base::shr(tmp_r32, I - AddShift); base::and_(tmp_r32, AndMask << AddShift); } else { base::and_(tmp_r32, AndMask << I); base::shl(tmp_r32, I + AddShift); } return x86::ptr(reg_ppu, tmp_r32.r64(), X86Shift, static_cast<s32>(offset - ppu_base), Size); } // Generic offset to ppu.member template <auto MPtr, u32 Size = sizeof(std::declval<ppu_thread&>().*MPtr)> x86::Mem ppu_mem() { return x86::ptr(arg_ppu, static_cast<s32>(::offset32(MPtr)), Size); } template <u32 Size = 16, uint I, uint N> x86::Mem ppu_vr(const bf_t<u32, I, N>& bf, bool last = false) { return ppu_mem<&ppu_thread::vr, Size>(bf, last); } x86::Mem ppu_sat() { return ppu_mem<&ppu_thread::sat>(); } void ppu_ret(bool last = true) { // Initialize pointer to next function base::mov(x86::rax, x86::qword_ptr(arg_next_fn)); base::add(arg_this_op, 4); if (is_debugger_present()) base::mov(ppu_mem<&ppu_thread::cia>(), arg_this_op.r32()); base::mov(arg_op, x86::dword_ptr(arg_this_op)); base::bswap(arg_op); base::add(arg_next_fn, 8); base::jmp(x86::rax); // Embed constants (TODO: after last return) if (last) base::emit_consts(); } }; #elif defined(ARCH_ARM64) struct ppu_builder : a64::Assembler { }; #else struct ppu_builder { }; #endif } struct ppu_abstract_t { struct abstract_vr { template <uint I, uint N> struct lazy_vr : asmjit::mem_lazy { const asmjit::Operand& eval(bool is_lv) { if (is_lv && !this->isReg()) { Operand::operator=(g_vc->vec_alloc()); #if defined(ARCH_X64) g_vc->emit(asmjit::x86::Inst::kIdMovaps, *this, static_cast<asmjit::ppu_builder*>(g_vc)->ppu_vr(bf_t<u32, I, N>{}, false)); #endif } if (!is_lv) { if (this->isReg()) { g_vc->vec_dealloc(asmjit::vec_type{this->id()}); } else { #if defined(ARCH_X64) Operand::operator=(static_cast<asmjit::ppu_builder*>(g_vc)->ppu_vr(bf_t<u32, I, N>{}, false)); #endif } } return *this; } template <typename T> void operator=([[maybe_unused]] T&& _val) const { FOR_X64(store_op, kIdMovaps, kIdVmovaps, static_cast<asmjit::ppu_builder*>(g_vc)->ppu_vr(bf_t<u32, I, N>{}, true), std::forward<T>(_val)); } }; template <uint I, uint N> lazy_vr<I, N> operator[](const bf_t<u32, I, N>&) const { return {}; } } vr; struct abstract_sat : asmjit::mem_type { abstract_sat() #if defined(ARCH_X64) : asmjit::mem_type(static_cast<asmjit::ppu_builder*>(g_vc)->ppu_sat()) #endif { } template <typename T> void operator=([[maybe_unused]] T&& _val) const { FOR_X64(store_op, kIdMovaps, kIdVmovaps, *this, std::forward<T>(_val)); } } sat{}; }; extern void do_cell_atomic_128_store(u32 addr, const void* to_write); inline u64 dup32(u32 x) { return x | static_cast<u64>(x) << 32; } // Write values to CR field inline void ppu_cr_set(ppu_thread& ppu, u32 field, bool le, bool gt, bool eq, bool so) { ppu.cr[field * 4 + 0] = le; ppu.cr[field * 4 + 1] = gt; ppu.cr[field * 4 + 2] = eq; ppu.cr[field * 4 + 3] = so; if (g_cfg.core.ppu_debug) [[unlikely]] { *reinterpret_cast<u32*>(vm::g_stat_addr + ppu.cia) |= *reinterpret_cast<u32*>(ppu.cr.bits + field * 4); } } // Write comparison results to CR field template<typename T> inline void ppu_cr_set(ppu_thread& ppu, u32 field, const T& a, const T& b) { ppu_cr_set(ppu, field, a < b, a > b, a == b, ppu.xer.so); } // TODO template <ppu_exec_bit... Flags> void ppu_set_cr(ppu_thread& ppu, u32 field, bool le, bool gt, bool eq, bool so) { ppu.cr[field * 4 + 0] = le; ppu.cr[field * 4 + 1] = gt; ppu.cr[field * 4 + 2] = eq; ppu.cr[field * 4 + 3] = so; if constexpr (((Flags == set_cr_stats) || ...)) { *reinterpret_cast<u32*>(vm::g_stat_addr + ppu.cia) |= *reinterpret_cast<u32*>(ppu.cr.bits + field * 4); } } // Set XER.OV bit (overflow) inline void ppu_ov_set(ppu_thread& ppu, bool bit) { ppu.xer.ov = bit; ppu.xer.so |= bit; } // Write comparison results to FPCC field with optional CR field update template <ppu_exec_bit... Flags> void ppu_set_fpcc(ppu_thread& ppu, f64 a, f64 b, u64 cr_field = 1) { if constexpr (((Flags == set_fpcc || Flags == has_rc) || ...)) { static_assert(std::endian::native == std::endian::little, "Not implemented"); bool fpcc[4]; #if defined(ARCH_X64) && !defined(_M_X64) __asm__("comisd %[b], %[a]\n" : "=@ccb" (fpcc[0]) , "=@cca" (fpcc[1]) , "=@ccz" (fpcc[2]) , "=@ccp" (fpcc[3]) : [a] "x" (a) , [b] "x" (b) : "cc"); if (fpcc[3]) [[unlikely]] { fpcc[0] = fpcc[1] = fpcc[2] = false; } #else const auto cmp = a <=> b; fpcc[0] = cmp == std::partial_ordering::less; fpcc[1] = cmp == std::partial_ordering::greater; fpcc[2] = cmp == std::partial_ordering::equivalent; fpcc[3] = cmp == std::partial_ordering::unordered; #endif const u32 data = std::bit_cast<u32>(fpcc); // Write FPCC ppu.fpscr.fields[4] = data; if constexpr (((Flags == has_rc) || ...)) { // Previous behaviour was throwing an exception; TODO ppu.cr.fields[cr_field] = data; if (g_cfg.core.ppu_debug) [[unlikely]] { *reinterpret_cast<u32*>(vm::g_stat_addr + ppu.cia) |= data; } } } } // Validate read data in case does not match reservation template <typename T, ppu_exec_bit... Flags> auto ppu_feed_data(ppu_thread& ppu, u64 addr) { static_assert(sizeof(T) <= 128, "Incompatible type-size, break down into smaller loads"); auto value = vm::_ref<T>(vm::cast(addr)); if constexpr (!((Flags == use_feed_data) || ...)) { return value; } if (!ppu.use_full_rdata) { return value; } const u32 raddr = ppu.raddr; if (addr / 128 > raddr / 128 || (addr + sizeof(T) - 1) / 128 < raddr / 128) { // Out of range or reservation does not exist return value; } if (sizeof(T) == 1 || addr / 128 == (addr + sizeof(T) - 1) / 128) { // Optimized comparison if (std::memcmp(&value, &ppu.rdata[addr & 127], sizeof(T))) { // Reservation was lost ppu.raddr = 0; } } else { alignas(16) std::byte buffer[sizeof(T)]; std::memcpy(buffer, &value, sizeof(value)); // Put in memory explicitly (ensure the compiler won't do it beforehand) const std::byte* src; u32 size; u32 offs = 0; if (raddr / 128 == addr / 128) src = &ppu.rdata[addr & 127], size = std::min<u32>(128 - (addr % 128), sizeof(T)); else src = &ppu.rdata[0], size = (addr + u32{sizeof(T)}) % 127, offs = sizeof(T) - size; if (std::memcmp(buffer + offs, src, size)) { ppu.raddr = 0; } } return value; } // Push called address to custom call history for debugging template <ppu_exec_bit... Flags> u32 ppu_record_call(ppu_thread& ppu, u32 new_cia, ppu_opcode_t op, bool indirect = false) { if constexpr (!((Flags == set_call_history) || ...)) { return new_cia; } if (auto& history = ppu.call_history; !history.data.empty()) { if (!op.lk) { if (indirect) { // Register LLE exported function trampolines // Trampolines do not change the stack pointer, and ones to exported functions change RTOC if (ppu.gpr[1] == history.last_r1 && ppu.gpr[2] != history.last_r2) { // Cancel condition history.last_r1 = umax; history.last_r2 = ppu.gpr[2]; // Register trampolie with TOC history.data[history.index++ % ppu.call_history_max_size] = new_cia; } } return new_cia; } history.data[history.index++ % ppu.call_history_max_size] = new_cia; history.last_r1 = ppu.gpr[1]; history.last_r2 = ppu.gpr[2]; } return new_cia; } template<typename T> struct add_flags_result_t { T result; bool carry; add_flags_result_t() = default; // Straighforward ADD with flags add_flags_result_t(T a, T b) : result(a + b) , carry(result < a) { } // Straighforward ADC with flags add_flags_result_t(T a, T b, bool c) : add_flags_result_t(a, b) { add_flags_result_t r(result, c); result = r.result; carry |= r.carry; } }; static add_flags_result_t<u64> add64_flags(u64 a, u64 b) { return{ a, b }; } static add_flags_result_t<u64> add64_flags(u64 a, u64 b, bool c) { return{ a, b, c }; } extern void ppu_execute_syscall(ppu_thread& ppu, u64 code); extern u32 ppu_lwarx(ppu_thread& ppu, u32 addr); extern u64 ppu_ldarx(ppu_thread& ppu, u32 addr); extern bool ppu_stwcx(ppu_thread& ppu, u32 addr, u32 reg_value); extern bool ppu_stdcx(ppu_thread& ppu, u32 addr, u64 reg_value); extern void ppu_trap(ppu_thread& ppu, u64 addr); // NaNs production precedence: NaN from Va, Vb, Vc // and lastly the result of the operation in case none of the operands is a NaN // Signaling NaNs are 'quieted' (MSB of fraction is set) with other bits of data remain the same inline v128 ppu_select_vnan(v128 a) { return a; } inline v128 ppu_select_vnan(v128 a, v128 b) { return gv_selectfs(gv_eqfs(a, a), b, a | gv_bcst32(0x7fc00000u)); } inline v128 ppu_select_vnan(v128 a, v128 b, Vector128 auto... args) { return ppu_select_vnan(a, ppu_select_vnan(b, args...)); } // Flush denormals to zero if NJ is 1 template <bool Result = false, ppu_exec_bit... Flags> inline v128 ppu_flush_denormal(const v128& mask, const v128& a) { if constexpr (((Flags == use_nj) || ...) || (Result && ((Flags == fix_nj) || ...))) { return gv_andn(gv_shr32(gv_eq32(mask & a, gv_bcst32(0)), 1), a); } else { return a; } } inline v128 ppu_fix_vnan(v128 r) { return gv_selectfs(gv_eqfs(r, r), r, gv_bcst32(0x7fc00000u)); } template <ppu_exec_bit... Flags> inline v128 ppu_set_vnan(v128 r, Vector128 auto... args) { if constexpr (((Flags == set_vnan) || ...) && sizeof...(args) > 0) { // Full propagation return ppu_select_vnan(args..., ppu_fix_vnan(r)); } else if constexpr (((Flags == fix_vnan) || ...)) { // Only fix the result return ppu_fix_vnan(r); } else { // Return as is return r; } } template <u32 Build, ppu_exec_bit... Flags> auto MFVSCR() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_sat>(); static const auto exec = [](auto&& d, auto&& sat, auto&& nj) { u32 sat_bit = 0; if constexpr (((Flags == set_sat) || ...)) sat_bit = !gv_testz(sat); //!!sat._u; d._u64[0] = 0; d._u64[1] = u64(sat_bit | (u32{nj} << 16)) << 32; }; RETURN_(ppu.vr[op.vd], ppu.sat, ppu.nj); } template <u32 Build, ppu_exec_bit... Flags> auto MTVSCR() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_sat, use_nj, fix_nj>(); static const auto exec = [](auto&& sat, auto&& nj, auto&& jm_mask, auto&& b) { const u32 vscr = b._u32[3]; if constexpr (((Flags == set_sat) || ...)) sat._u = vscr & 1; if constexpr (((Flags == use_nj || Flags == fix_nj) || ...)) jm_mask = (vscr & 0x10000) ? 0x7f80'0000 : 0x7fff'ffff; nj = (vscr & 0x10000) != 0; }; RETURN_(ppu.sat, ppu.nj, ppu.jm_mask, ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VADDCUW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { // ~a is how much can be added to a without carry d = gv_sub32(gv_geu32(gv_not32(std::move(a)), std::move(b)), gv_bcst32(-1)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VADDFP() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_nj, fix_nj, set_vnan, fix_vnan>(); static const auto exec = [](auto&& d, auto&& a_, auto&& b_, auto&& jm_mask) { auto m = gv_bcst32(jm_mask, &ppu_thread::jm_mask); auto a = ppu_flush_denormal<false, Flags...>(m, std::move(a_)); auto b = ppu_flush_denormal<false, Flags...>(m, std::move(b_)); d = ppu_flush_denormal<true, Flags...>(std::move(m), ppu_set_vnan<Flags...>(gv_addfs(a, b), a, b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.jm_mask); } template <u32 Build, ppu_exec_bit... Flags> auto VADDSBS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_sat>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& sat) { if constexpr (((Flags == set_sat) || ...)) { auto r = gv_adds_s8(a, b); sat = gv_or32(gv_xor32(gv_add8(std::move(a), std::move(b)), r), std::move(sat)); d = std::move(r); } else { d = gv_adds_s8(std::move(a), std::move(b)); } }; RETURN(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.sat); } template <u32 Build, ppu_exec_bit... Flags> auto VADDSHS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_sat>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& sat) { if constexpr (((Flags == set_sat) || ...)) { auto r = gv_adds_s16(a, b); sat = gv_or32(gv_xor32(gv_add16(std::move(a), std::move(b)), r), std::move(sat)); d = std::move(r); } else { d = gv_adds_s16(std::move(a), std::move(b)); } }; RETURN(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.sat); } template <u32 Build, ppu_exec_bit... Flags> auto VADDSWS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_sat>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& sat) { if constexpr (((Flags == set_sat) || ...)) { auto r = gv_adds_s32(a, b); sat = gv_or32(gv_xor32(gv_add32(std::move(a), std::move(b)), r), std::move(sat)); d = std::move(r); } else { d = gv_adds_s32(std::move(a), std::move(b)); } }; RETURN(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.sat); } template <u32 Build, ppu_exec_bit... Flags> auto VADDUBM() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_add8(std::move(a), std::move(b)); }; RETURN(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VADDUBS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_sat>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& sat) { if constexpr (((Flags == set_sat) || ...)) { auto r = gv_addus_u8(a, b); sat = gv_or32(gv_xor32(gv_add8(std::move(a), std::move(b)), r), std::move(sat)); d = std::move(r); } else { d = gv_addus_u8(std::move(a), std::move(b)); } }; RETURN(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.sat); } template <u32 Build, ppu_exec_bit... Flags> auto VADDUHM() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_add16(std::move(a), std::move(b)); }; RETURN(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VADDUHS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_sat>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& sat) { if constexpr (((Flags == set_sat) || ...)) { auto r = gv_addus_u16(a, b); sat = gv_or32(gv_xor32(gv_add16(std::move(a), std::move(b)), r), std::move(sat)); d = std::move(r); } else { d = gv_addus_u16(std::move(a), std::move(b)); } }; RETURN(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.sat); } template <u32 Build, ppu_exec_bit... Flags> auto VADDUWM() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_add32(std::move(a), std::move(b)); }; RETURN(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VADDUWS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_sat>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& sat) { if constexpr (((Flags == set_sat) || ...)) { auto r = gv_addus_u32(a, b); sat = gv_or32(gv_xor32(gv_add32(std::move(a), std::move(b)), r), std::move(sat)); d = std::move(r); } else { d = gv_addus_u32(std::move(a), std::move(b)); } }; RETURN(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.sat); } template <u32 Build, ppu_exec_bit... Flags> auto VAND() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_andfs(std::move(a), std::move(b)); }; RETURN(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VANDC() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_andnfs(std::move(b), std::move(a)); }; RETURN(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VAVGSB() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_avgs8(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VAVGSH() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_avgs16(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VAVGSW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_avgs32(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VAVGUB() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_avgu8(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VAVGUH() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_avgu16(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VAVGUW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_avgu32(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VCFSX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& b, u32 i) { d = gv_subus_u16(gv_cvts32_tofs(std::move(b)), gv_bcst32(i)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.vb], op.vuimm << 23); } template <u32 Build, ppu_exec_bit... Flags> auto VCFUX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& b, u32 i) { d = gv_subus_u16(gv_cvtu32_tofs(std::move(b)), gv_bcst32(i)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.vb], op.vuimm << 23); } template <u32 Build, ppu_exec_bit... Flags> auto VCMPBFP() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, auto&& d, auto&& a, auto&& b) { auto sign = gv_bcstfs(-0.); auto cmp1 = gv_nlefs(a, b); auto cmp2 = gv_ngefs(a, b ^ sign); auto r = (std::move(cmp1) & sign) | gv_shr32(std::move(cmp2) & sign, 1); if constexpr (((Flags == has_oe) || ...)) ppu_cr_set(ppu, 6, false, false, gv_testz(r), false); d = std::move(r); }; RETURN_(ppu, ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VCMPEQFP() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, auto&& d, auto&& a, auto&& b) { auto r = gv_eqfs(std::move(a), std::move(b)); if constexpr (((Flags == has_oe) || ...)) ppu_cr_set(ppu, 6, gv_testall1(r), false, gv_testall0(r), false); d = r; }; RETURN_(ppu, ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VCMPEQUB() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, auto&& d, auto&& a, auto&& b) { auto r = gv_eq8(std::move(a), std::move(b)); if constexpr (((Flags == has_oe) || ...)) ppu_cr_set(ppu, 6, gv_testall1(r), false, gv_testall0(r), false); d = r; }; RETURN_(ppu, ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VCMPEQUH() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, auto&& d, auto&& a, auto&& b) { auto r = gv_eq16(std::move(a), std::move(b)); if constexpr (((Flags == has_oe) || ...)) ppu_cr_set(ppu, 6, gv_testall1(r), false, gv_testall0(r), false); d = r; }; RETURN_(ppu, ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VCMPEQUW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, auto&& d, auto&& a, auto&& b) { auto r = gv_eq32(std::move(a), std::move(b)); if constexpr (((Flags == has_oe) || ...)) ppu_cr_set(ppu, 6, gv_testall1(r), false, gv_testall0(r), false); d = r; }; RETURN_(ppu, ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VCMPGEFP() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, auto&& d, auto&& a, auto&& b) { auto r = gv_gefs(std::move(a), std::move(b)); if constexpr (((Flags == has_oe) || ...)) ppu_cr_set(ppu, 6, gv_testall1(r), false, gv_testall0(r), false); d = r; }; RETURN_(ppu, ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VCMPGTFP() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, auto&& d, auto&& a, auto&& b) { auto r = gv_gtfs(std::move(a), std::move(b)); if constexpr (((Flags == has_oe) || ...)) ppu_cr_set(ppu, 6, gv_testall1(r), false, gv_testall0(r), false); d = r; }; RETURN_(ppu, ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VCMPGTSB() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, auto&& d, auto&& a, auto&& b) { auto r = gv_gts8(std::move(a), std::move(b)); if constexpr (((Flags == has_oe) || ...)) ppu_cr_set(ppu, 6, gv_testall1(r), false, gv_testall0(r), false); d = r; }; RETURN_(ppu, ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VCMPGTSH() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, auto&& d, auto&& a, auto&& b) { auto r = gv_gts16(std::move(a), std::move(b)); if constexpr (((Flags == has_oe) || ...)) ppu_cr_set(ppu, 6, gv_testall1(r), false, gv_testall0(r), false); d = r; }; RETURN_(ppu, ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VCMPGTSW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, auto&& d, auto&& a, auto&& b) { auto r = gv_gts32(std::move(a), std::move(b)); if constexpr (((Flags == has_oe) || ...)) ppu_cr_set(ppu, 6, gv_testall1(r), false, gv_testall0(r), false); d = r; }; RETURN_(ppu, ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VCMPGTUB() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, auto&& d, auto&& a, auto&& b) { auto r = gv_gtu8(std::move(a), std::move(b)); if constexpr (((Flags == has_oe) || ...)) ppu_cr_set(ppu, 6, gv_testall1(r), false, gv_testall0(r), false); d = r; }; RETURN_(ppu, ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VCMPGTUH() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, auto&& d, auto&& a, auto&& b) { auto r = gv_gtu16(std::move(a), std::move(b)); if constexpr (((Flags == has_oe) || ...)) ppu_cr_set(ppu, 6, gv_testall1(r), false, gv_testall0(r), false); d = r; }; RETURN_(ppu, ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VCMPGTUW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, auto&& d, auto&& a, auto&& b) { auto r = gv_gtu32(std::move(a), std::move(b)); if constexpr (((Flags == has_oe) || ...)) ppu_cr_set(ppu, 6, gv_testall1(r), false, gv_testall0(r), false); d = r; }; RETURN_(ppu, ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VCTSXS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<fix_vnan, set_sat>(); static const auto exec = [](auto&& d, auto&& b, auto&& sat, u32 i) { auto r = gv_mulfs(b, gv_bcst32(i)); auto l = gv_ltfs(r, gv_bcstfs(-2147483648.)); auto h = gv_gefs(r, gv_bcstfs(2147483648.)); #if !defined(ARCH_X64) && !defined(ARCH_ARM64) r = gv_selectfs(l, gv_bcstfs(-2147483648.), std::move(r)); #endif r = gv_cvtfs_tos32(std::move(r)); #if !defined(ARCH_ARM64) r = gv_select32(h, gv_bcst32(0x7fffffff), std::move(r)); #endif if constexpr (((Flags == fix_vnan) || ...)) r = gv_and32(std::move(r), gv_eqfs(b, b)); if constexpr (((Flags == set_sat) || ...)) sat = gv_or32(gv_or32(std::move(l), std::move(h)), std::move(sat)); d = std::move(r); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.vb], ppu.sat, (op.vuimm + 127) << 23); } template <u32 Build, ppu_exec_bit... Flags> auto VCTUXS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<fix_vnan, set_sat>(); static const auto exec = [](auto&& d, auto&& b, auto&& sat, u32 i) { auto r = gv_mulfs(b, gv_bcst32(i)); auto l = gv_ltfs(r, gv_bcstfs(0.)); auto h = gv_gefs(r, gv_bcstfs(4294967296.)); r = gv_cvtfs_tou32(std::move(r)); #if !defined(ARCH_ARM64) r = gv_andn32(l, std::move(r)); // saturate to zero #endif #if !defined(__AVX512VL__) && !defined(ARCH_ARM64) r = gv_or32(std::move(r), h); // saturate to 0xffffffff #endif if constexpr (((Flags == fix_vnan) || ...)) r = gv_and32(std::move(r), gv_eqfs(b, b)); if constexpr (((Flags == set_sat) || ...)) sat = gv_or32(gv_or32(std::move(l), std::move(h)), std::move(sat)); d = std::move(r); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.vb], ppu.sat, (op.vuimm + 127) << 23); } template <u32 Build, ppu_exec_bit... Flags> auto VEXPTEFP() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<fix_vnan>(); static const auto exec = [](auto&& d, auto&& b) { // for (u32 i = 0; i < 4; i++) d._f[i] = std::exp2f(b._f[i]); d = ppu_set_vnan<Flags...>(gv_exp2_approxfs(std::move(b))); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VLOGEFP() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<fix_vnan>(); static const auto exec = [](auto&& d, auto&& b) { // for (u32 i = 0; i < 4; i++) d._f[i] = std::log2f(b._f[i]); d = ppu_set_vnan<Flags...>(gv_log2_approxfs(std::move(b))); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VMADDFP() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_nj, fix_nj, set_vnan, fix_vnan>(); static const auto exec = [](auto&& d, auto&& a_, auto&& b_, auto&& c_, auto&& jm_mask) { auto m = gv_bcst32(jm_mask, &ppu_thread::jm_mask); auto a = ppu_flush_denormal<false, Flags...>(m, std::move(a_)); auto b = ppu_flush_denormal<false, Flags...>(m, std::move(b_)); auto c = ppu_flush_denormal<false, Flags...>(m, std::move(c_)); d = ppu_flush_denormal<true, Flags...>(std::move(m), ppu_set_vnan<Flags...>(gv_fmafs(a, c, b))); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.vr[op.vc], ppu.jm_mask); } template <u32 Build, ppu_exec_bit... Flags> auto VMAXFP() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_nj, fix_nj, set_vnan, fix_vnan>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& jm_mask) { d = ppu_flush_denormal<true, Flags...>(gv_bcst32(jm_mask, &ppu_thread::jm_mask), ppu_set_vnan<Flags...>(gv_maxfs(a, b), a, b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.jm_mask); } template <u32 Build, ppu_exec_bit... Flags> auto VMAXSB() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_maxs8(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VMAXSH() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_maxs16(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VMAXSW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_maxs32(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VMAXUB() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_maxu8(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VMAXUH() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_maxu16(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VMAXUW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_maxu32(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VMHADDSHS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_sat>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& c, auto&& sat) { auto m = gv_muls_hds16(a, b); auto f = gv_gts16(gv_bcst16(0), c); auto x = gv_eq16(gv_maxs16(std::move(a), std::move(b)), gv_bcst16(0x8000)); auto r = gv_sub16(gv_adds_s16(m, c), gv_and32(x, f)); auto s = gv_add16(std::move(m), std::move(c)); if constexpr (((Flags == set_sat) || ...)) sat = gv_or32(gv_or32(gv_andn32(std::move(f), x), gv_andn32(x, gv_xor32(std::move(s), r))), sat); d = std::move(r); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.vr[op.vc], ppu.sat); } template <u32 Build, ppu_exec_bit... Flags> auto VMHRADDSHS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_sat>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& c, auto&& sat) { if constexpr (((Flags != set_sat) && ...)) { d = gv_rmuladds_hds16(std::move(a), std::move(b), std::move(c)); } else { auto m = gv_rmuls_hds16(a, b); auto f = gv_gts16(gv_bcst16(0), c); auto x = gv_eq16(gv_maxs16(std::move(a), std::move(b)), gv_bcst16(0x8000)); auto r = gv_sub16(gv_adds_s16(m, c), gv_and32(x, f)); auto s = gv_add16(std::move(m), std::move(c)); if constexpr (((Flags == set_sat) || ...)) sat = gv_or32(gv_or32(gv_andn32(std::move(f), x), gv_andn32(x, gv_xor32(std::move(s), r))), sat); d = std::move(r); } }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.vr[op.vc], ppu.sat); } template <u32 Build, ppu_exec_bit... Flags> auto VMINFP() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<fix_nj, set_vnan, fix_vnan>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& jm_mask) { d = ppu_flush_denormal<true, Flags...>(gv_bcst32(jm_mask, &ppu_thread::jm_mask), ppu_set_vnan<Flags...>(gv_minfs(a, b), a, b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.jm_mask); } template <u32 Build, ppu_exec_bit... Flags> auto VMINSB() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_mins8(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VMINSH() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_mins16(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VMINSW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_mins32(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VMINUB() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_minu8(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VMINUH() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_minu16(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VMINUW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_minu32(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VMLADDUHM() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& c) { d = gv_muladd16(std::move(a), std::move(b), std::move(c)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.vr[op.vc]); } template <u32 Build, ppu_exec_bit... Flags> auto VMRGHB() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_unpackhi8(std::move(b), std::move(a)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VMRGHH() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_unpackhi16(std::move(b), std::move(a)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VMRGHW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_unpackhi32(std::move(b), std::move(a)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VMRGLB() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_unpacklo8(std::move(b), std::move(a)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VMRGLH() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_unpacklo16(std::move(b), std::move(a)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VMRGLW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_unpacklo32(std::move(b), std::move(a)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VMSUMMBM() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& c) { d = gv_dotu8s8x4(std::move(b), std::move(a), std::move(c)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.vr[op.vc]); } template <u32 Build, ppu_exec_bit... Flags> auto VMSUMSHM() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& c) { d = gv_dots16x2(std::move(a), std::move(b), std::move(c)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.vr[op.vc]); } template <u32 Build, ppu_exec_bit... Flags> auto VMSUMSHS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_sat>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& c, auto&& sat) { auto r = gv_dots_s16x2(a, b, c); auto s = gv_dots16x2(std::move(a), std::move(b), std::move(c)); if constexpr (((Flags == set_sat) || ...)) sat = gv_or32(gv_xor32(std::move(s), r), std::move(sat)); d = std::move(r); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.vr[op.vc], ppu.sat); } template <u32 Build, ppu_exec_bit... Flags> auto VMSUMUBM() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& c) { d = gv_dotu8x4(std::move(a), std::move(b), std::move(c)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.vr[op.vc]); } template <u32 Build, ppu_exec_bit... Flags> auto VMSUMUHM() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& c) { d = gv_add32(std::move(c), gv_dotu16x2(std::move(a), std::move(b))); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.vr[op.vc]); } template <u32 Build, ppu_exec_bit... Flags> auto VMSUMUHS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_sat>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& c, auto&& sat) { auto m1 = gv_mul_even_u16(a, b); auto m2 = gv_mul_odds_u16(std::move(a), std::move(b)); auto s1 = gv_add32(m1, m2); auto x1 = gv_gtu32(m1, s1); auto s2 = gv_or32(gv_add32(s1, std::move(c)), x1); auto x2 = gv_gtu32(std::move(s1), s2); if constexpr (((Flags == set_sat) || ...)) sat = gv_or32(gv_or32(std::move(x1), x2), std::move(sat)); d = gv_or32(std::move(s2), std::move(x2)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.vr[op.vc], ppu.sat); } template <u32 Build, ppu_exec_bit... Flags> auto VMULESB() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_mul16(gv_sar16(std::move(a), 8), gv_sar16(std::move(b), 8)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VMULESH() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_mul_odds_s16(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VMULEUB() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_mul16(gv_shr16(std::move(a), 8), gv_shr16(std::move(b), 8)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VMULEUH() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_mul_odds_u16(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VMULOSB() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_mul16(gv_sar16(gv_shl16(std::move(a), 8), 8), gv_sar16(gv_shl16(std::move(b), 8), 8)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VMULOSH() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_mul_even_s16(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VMULOUB() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { auto mask = gv_bcst16(0x00ff); d = gv_mul16(gv_and32(std::move(a), mask), gv_and32(std::move(b), mask)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VMULOUH() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_mul_even_u16(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VNMSUBFP() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_nj, fix_nj, set_vnan, fix_vnan>(); static const auto exec = [](auto&& d, auto&& a_, auto&& b_, auto&& c_, auto&& jm_mask) { // An odd case with (FLT_MIN, FLT_MIN, FLT_MIN) produces FLT_MIN instead of 0 auto s = gv_bcstfs(-0.0f); auto m = gv_bcst32(jm_mask, &ppu_thread::jm_mask); auto a = ppu_flush_denormal<false, Flags...>(m, std::move(a_)); auto b = ppu_flush_denormal<false, Flags...>(m, std::move(b_)); auto c = ppu_flush_denormal<false, Flags...>(m, std::move(c_)); auto r = gv_xorfs(std::move(s), gv_fmafs(std::move(a), std::move(c), gv_xorfs(std::move(b), s))); d = ppu_flush_denormal<true, Flags...>(std::move(m), ppu_set_vnan<Flags...>(std::move(r))); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.vr[op.vc], ppu.jm_mask); } template <u32 Build, ppu_exec_bit... Flags> auto VNOR() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_notfs(gv_orfs(std::move(a), std::move(b))); }; RETURN(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VOR() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_orfs(std::move(a), std::move(b)); }; RETURN(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VPERM() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); #if defined (ARCH_X64) if constexpr (Build == 0) { static const ppu_intrp_func_t f = build_function_asm<ppu_intrp_func_t, asmjit::ppu_builder>("ppu_VPERM", [&](asmjit::ppu_builder& c, native_args&) { const auto [v0, v1, v2, v3] = c.vec_alloc<4>(); c.movdqa(v0, c.ppu_vr(s_op.vc)); c.pandn(v0, c.get_const(v128::from8p(0x1f))); c.movdqa(v1, v0); c.pcmpgtb(v1, c.get_const(v128::from8p(0xf))); c.movdqa(v2, c.ppu_vr(s_op.va)); c.movdqa(v3, c.ppu_vr(s_op.vb)); c.pshufb(v2, v0); c.pshufb(v3, v0); c.pand(v2, v1); c.pandn(v1, v3); c.por(v1, v2); c.movdqa(c.ppu_vr(s_op.vd), v1); c.ppu_ret(); }); if (utils::has_ssse3()) { return f; } } #endif static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& c) { #if defined(ARCH_ARM64) uint8x16x2_t ab; ab.val[0] = b; ab.val[1] = a; d = vqtbl2q_u8(ab, vbicq_u8(vdupq_n_u8(0x1f), c)); #else u8 ab[32]; std::memcpy(ab + 0, &b, 16); std::memcpy(ab + 16, &a, 16); for (u32 i = 0; i < 16; i++) { d._u8[i] = ab[~c._u8[i] & 0x1f]; } #endif }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.vr[op.vc]); } template <u32 Build, ppu_exec_bit... Flags> auto VPKPX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { auto a1 = gv_sar32(gv_shl32(a, 7), 7 + 9); auto b1 = gv_sar32(gv_shl32(b, 7), 7 + 9); auto a2 = gv_sar32(gv_shl32(a, 16), 16 + 3); auto b2 = gv_sar32(gv_shl32(b, 16), 16 + 3); auto p1 = gv_packss_s32(b1, a1); auto p2 = gv_packss_s32(b2, a2); d = gv_or32(gv_or32(gv_and32(p1, gv_bcst16(0xfc00)), gv_shl16(gv_and32(p1, gv_bcst16(0x7c)), 3)), gv_and32(p2, gv_bcst16(0x1f))); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VPKSHSS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_sat>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& sat) { if constexpr (((Flags == set_sat) || ...)) sat = gv_or32(gv_shr16(gv_add16(a, gv_bcst16(0x80)) | gv_add16(b, gv_bcst16(0x80)), 8), std::move(sat)); d = gv_packss_s16(std::move(b), std::move(a)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.sat); } template <u32 Build, ppu_exec_bit... Flags> auto VPKSHUS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_sat>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& sat) { if constexpr (((Flags == set_sat) || ...)) sat = gv_or32(gv_shr16(a | b, 8), std::move(sat)); d = gv_packus_s16(std::move(b), std::move(a)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.sat); } template <u32 Build, ppu_exec_bit... Flags> auto VPKSWSS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_sat>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& sat) { if constexpr (((Flags == set_sat) || ...)) sat = gv_or32(gv_shr32(gv_add32(a, gv_bcst32(0x8000)) | gv_add32(b, gv_bcst32(0x8000)), 16), std::move(sat)); d = gv_packss_s32(std::move(b), std::move(a)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.sat); } template <u32 Build, ppu_exec_bit... Flags> auto VPKSWUS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_sat>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& sat) { if constexpr (((Flags == set_sat) || ...)) sat = gv_or32(gv_shr32(a | b, 16), std::move(sat)); d = gv_packus_s32(std::move(b), std::move(a)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.sat); } template <u32 Build, ppu_exec_bit... Flags> auto VPKUHUM() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_packtu16(std::move(b), std::move(a)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VPKUHUS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_sat>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& sat) { if constexpr (((Flags == set_sat) || ...)) sat = gv_or32(gv_shr16(a | b, 8), std::move(sat)); d = gv_packus_u16(std::move(b), std::move(a)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.sat); } template <u32 Build, ppu_exec_bit... Flags> auto VPKUWUM() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_packtu32(std::move(b), std::move(a)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VPKUWUS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_sat>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& sat) { if constexpr (((Flags == set_sat) || ...)) sat = gv_or32(gv_shr32(a | b, 16), std::move(sat)); d = gv_packus_u32(std::move(b), std::move(a)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.sat); } template <u32 Build, ppu_exec_bit... Flags> auto VREFP() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_nj, fix_nj, set_vnan, fix_vnan>(); static const auto exec = [](auto&& d, auto&& b_, auto&& jm_mask) { auto m = gv_bcst32(jm_mask, &ppu_thread::jm_mask); auto b = ppu_flush_denormal<false, Flags...>(m, std::move(b_)); d = ppu_flush_denormal<true, Flags...>(std::move(m), ppu_set_vnan<Flags...>(gv_divfs(gv_bcstfs(1.0f), b), b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.vb], ppu.jm_mask); } template <u32 Build, ppu_exec_bit... Flags> auto VRFIM() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_nj, fix_nj, set_vnan, fix_vnan>(); static const auto exec = [](auto&& d, auto&& b_, auto&& jm_mask) { auto m = gv_bcst32(jm_mask, &ppu_thread::jm_mask); auto b = ppu_flush_denormal<false, Flags...>(m, std::move(b_)); d = ppu_flush_denormal<true, Flags...>(std::move(m), ppu_set_vnan<Flags...>(gv_roundfs_floor(b), b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.vb], ppu.jm_mask); } template <u32 Build, ppu_exec_bit... Flags> auto VRFIN() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<fix_nj, set_vnan, fix_vnan>(); static const auto exec = [](auto&& d, auto&& b, auto&& jm_mask) { auto m = gv_bcst32(jm_mask, &ppu_thread::jm_mask); d = ppu_flush_denormal<true, Flags...>(std::move(m), ppu_set_vnan<Flags...>(gv_roundfs_even(b), b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.vb], ppu.jm_mask); } template <u32 Build, ppu_exec_bit... Flags> auto VRFIP() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_nj, fix_nj, set_vnan, fix_vnan>(); static const auto exec = [](auto&& d, auto&& b_, auto&& jm_mask) { auto m = gv_bcst32(jm_mask, &ppu_thread::jm_mask); auto b = ppu_flush_denormal<false, Flags...>(m, std::move(b_)); d = ppu_flush_denormal<true, Flags...>(std::move(m), ppu_set_vnan<Flags...>(gv_roundfs_ceil(b), b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.vb], ppu.jm_mask); } template <u32 Build, ppu_exec_bit... Flags> auto VRFIZ() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<fix_nj, set_vnan, fix_vnan>(); static const auto exec = [](auto&& d, auto&& b, auto&& jm_mask) { auto m = gv_bcst32(jm_mask, &ppu_thread::jm_mask); d = ppu_flush_denormal<true, Flags...>(std::move(m), ppu_set_vnan<Flags...>(gv_roundfs_trunc(b), b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.vb], ppu.jm_mask); } template <u32 Build, ppu_exec_bit... Flags> auto VRLB() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_rol8(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VRLH() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_rol16(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VRLW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_rol32(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VRSQRTEFP() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_nj, fix_nj, set_vnan, fix_vnan>(); static const auto exec = [](auto&& d, auto&& b_, auto&& jm_mask) { auto m = gv_bcst32(jm_mask, &ppu_thread::jm_mask); auto b = ppu_flush_denormal<false, Flags...>(m, std::move(b_)); d = ppu_flush_denormal<true, Flags...>(std::move(m), ppu_set_vnan<Flags...>(gv_divfs(gv_bcstfs(1.0f), gv_sqrtfs(b)), b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.vb], ppu.jm_mask); } template <u32 Build, ppu_exec_bit... Flags> auto VSEL() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& c) { auto x = gv_andfs(std::move(b), c); d = gv_orfs(std::move(x), gv_andnfs(std::move(c), std::move(a))); }; RETURN(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.vr[op.vc]); } template <u32 Build, ppu_exec_bit... Flags> auto VSL() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_fshl8(std::move(a), gv_shuffle_left<1>(a), std::move(b)); }; RETURN(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VSLB() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_shl8(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Count> struct VSLDOI { template <ppu_exec_bit... Flags> static auto select(bs_t<ppu_exec_bit> selected, auto func) { return ppu_exec_select<>::select<Flags...>(selected, func); } template <u32 Build, ppu_exec_bit... Flags> static auto impl() { static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_or32(gv_shuffle_left<Count>(std::move(a)), gv_shuffle_right<16 - Count>(std::move(b))); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } }; template <u32 Build, ppu_exec_bit... Flags> auto VSLH() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_shl16(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VSLO() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d._u = a._u << (b._u8[0] & 0x78); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VSLW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_shl32(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VSPLTB() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& b, auto&& imm) { d = gv_bcst8(b.u8r[imm & 15]); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.vb], op.vuimm); } template <u32 Build, ppu_exec_bit... Flags> auto VSPLTH() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& b, auto&& imm) { d = gv_bcst16(b.u16r[imm & 7]); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.vb], op.vuimm); } template <u32 Build, ppu_exec_bit... Flags> auto VSPLTISB() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& imm) { d = gv_bcst8(imm); }; RETURN_(ppu.vr[op.vd], op.vsimm); } template <u32 Build, ppu_exec_bit... Flags> auto VSPLTISH() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& imm) { d = gv_bcst16(imm); }; RETURN_(ppu.vr[op.vd], op.vsimm); } template <u32 Build, ppu_exec_bit... Flags> auto VSPLTISW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& imm) { d = gv_bcst32(imm); }; RETURN_(ppu.vr[op.vd], op.vsimm); } template <u32 Build, ppu_exec_bit... Flags> auto VSPLTW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& b, auto&& imm) { d = gv_bcst32(b.u32r[imm & 3]); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.vb], op.vuimm); } template <u32 Build, ppu_exec_bit... Flags> auto VSR() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_fshr8(gv_shuffle_right<1>(a), std::move(a), std::move(b)); }; RETURN(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VSRAB() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_sar8(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VSRAH() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_sar16(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VSRAW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_sar32(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VSRB() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_shr8(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VSRH() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_shr16(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VSRO() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d._u = a._u >> (b._u8[0] & 0x78); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VSRW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_shr32(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VSUBCUW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_shr32(gv_geu32(std::move(a), std::move(b)), 31); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VSUBFP() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_nj, fix_nj, set_vnan, fix_vnan>(); static const auto exec = [](auto&& d, auto&& a_, auto&& b_, auto&& jm_mask) { auto m = gv_bcst32(jm_mask, &ppu_thread::jm_mask); auto a = ppu_flush_denormal<false, Flags...>(m, std::move(a_)); auto b = ppu_flush_denormal<false, Flags...>(m, std::move(b_)); d = ppu_flush_denormal<true, Flags...>(std::move(m), ppu_set_vnan<Flags...>(gv_subfs(a, b), a, b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.jm_mask); } template <u32 Build, ppu_exec_bit... Flags> auto VSUBSBS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_sat>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& sat) { if constexpr (((Flags == set_sat) || ...)) { auto r = gv_subs_s8(a, b); sat = gv_or32(gv_xor32(gv_sub8(std::move(a), std::move(b)), r), std::move(sat)); d = std::move(r); } else { d = gv_subs_s8(std::move(a), std::move(b)); } }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.sat); } template <u32 Build, ppu_exec_bit... Flags> auto VSUBSHS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_sat>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& sat) { if constexpr (((Flags == set_sat) || ...)) { auto r = gv_subs_s16(a, b); sat = gv_or32(gv_xor32(gv_sub16(std::move(a), std::move(b)), r), std::move(sat)); d = std::move(r); } else { d = gv_subs_s16(std::move(a), std::move(b)); } }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.sat); } template <u32 Build, ppu_exec_bit... Flags> auto VSUBSWS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_sat>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& sat) { if constexpr (((Flags == set_sat) || ...)) { auto r = gv_subs_s32(a, b); sat = gv_or32(gv_xor32(gv_sub32(std::move(a), std::move(b)), r), std::move(sat)); d = std::move(r); } else { d = gv_subs_s32(std::move(a), std::move(b)); } }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.sat); } template <u32 Build, ppu_exec_bit... Flags> auto VSUBUBM() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_sub8(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VSUBUBS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_sat>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& sat) { if constexpr (((Flags == set_sat) || ...)) { auto r = gv_subus_u8(a, b); sat = gv_or32(gv_xor32(gv_sub8(std::move(a), std::move(b)), r), std::move(sat)); d = std::move(r); } else { d = gv_subus_u8(std::move(a), std::move(b)); } }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.sat); } template <u32 Build, ppu_exec_bit... Flags> auto VSUBUHM() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_sub16(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VSUBUHS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_sat>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& sat) { if constexpr (((Flags == set_sat) || ...)) { auto r = gv_subus_u16(a, b); sat = gv_or32(gv_xor32(gv_sub16(std::move(a), std::move(b)), r), std::move(sat)); d = std::move(r); } else { d = gv_subus_u16(std::move(a), std::move(b)); } }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.sat); } template <u32 Build, ppu_exec_bit... Flags> auto VSUBUWM() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_sub32(std::move(a), std::move(b)); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VSUBUWS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_sat>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& sat) { if constexpr (((Flags == set_sat) || ...)) { auto r = gv_subus_u32(a, b); sat = gv_or32(gv_xor32(gv_sub32(std::move(a), std::move(b)), r), std::move(sat)); d = std::move(r); } else { d = gv_subus_u32(std::move(a), std::move(b)); } }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.sat); } template <u32 Build, ppu_exec_bit... Flags> auto VSUMSWS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_sat>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& sat) { s64 sum = s64{b._s32[0]} + a._s32[0] + a._s32[1] + a._s32[2] + a._s32[3]; if (sum > INT32_MAX) { sum = u32(INT32_MAX); if constexpr (((Flags == set_sat) || ...)) sat._bytes[0] = 1; } else if (sum < INT32_MIN) { sum = u32(INT32_MIN); if constexpr (((Flags == set_sat) || ...)) sat._bytes[0] = 1; } else { sum = static_cast<u32>(sum); } d._u = sum; }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.sat); } template <u32 Build, ppu_exec_bit... Flags> auto VSUM2SWS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_sat>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& sat) { #if defined(__AVX512VL__) const auto x = gv_add64(gv_sar64(gv_shl64(a, 32), 32), gv_sar64(a, 32)); const auto y = gv_add64(x, gv_sar64(gv_shl64(b, 32), 32)); const auto r = _mm_unpacklo_epi32(_mm_cvtsepi64_epi32(y), _mm_setzero_si128()); #elif defined(ARCH_ARM64) const auto x = vaddl_s32(vget_low_s32(vuzp1q_s32(a, a)), vget_low_s32(vuzp2q_s32(a, a))); const auto y = vaddw_s32(x, vget_low_s32(vuzp1q_s32(b, b))); const auto r = vmovl_u32(uint32x2_t(vqmovn_s64(y))); #else v128 y{}; y._s64[0] = s64{a._s32[0]} + a._s32[1] + b._s32[0]; y._s64[1] = s64{a._s32[2]} + a._s32[3] + b._s32[2]; v128 r{}; r._u64[0] = y._s64[0] > INT32_MAX ? INT32_MAX : y._s64[0] < INT32_MIN ? u32(INT32_MIN) : static_cast<u32>(y._s64[0]); r._u64[1] = y._s64[1] > INT32_MAX ? INT32_MAX : y._s64[1] < INT32_MIN ? u32(INT32_MIN) : static_cast<u32>(y._s64[1]); #endif if constexpr (((Flags == set_sat) || ...)) sat = gv_or32(gv_shr64(gv_add64(y, gv_bcst64(0x80000000u)), 32), std::move(sat)); d = r; }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.sat); } template <u32 Build, ppu_exec_bit... Flags> auto VSUM4SBS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_sat>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& sat) { auto r = gv_dots_u8s8x4(gv_bcst8(1), a, b); if constexpr (((Flags == set_sat) || ...)) sat = gv_or32(gv_xor32(gv_hadds8x4(std::move(a), std::move(b)), r), std::move(sat)); d = std::move(r); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.sat); } template <u32 Build, ppu_exec_bit... Flags> auto VSUM4SHS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_sat>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& sat) { auto r = gv_dots_s16x2(a, gv_bcst16(1), b); if constexpr (((Flags == set_sat) || ...)) sat = gv_or32(gv_xor32(gv_hadds16x2(std::move(a), std::move(b)), r), std::move(sat)); d = std::move(r); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.sat); } template <u32 Build, ppu_exec_bit... Flags> auto VSUM4UBS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_sat>(); static const auto exec = [](auto&& d, auto&& a, auto&& b, auto&& sat) { auto x = gv_haddu8x4(a); auto r = gv_addus_u32(x, b); if constexpr (((Flags == set_sat) || ...)) sat = gv_or32(gv_xor32(gv_add32(std::move(x), std::move(b)), r), std::move(sat)); d = std::move(r); }; RETURN_(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb], ppu.sat); } template <u32 Build, ppu_exec_bit... Flags> auto VUPKHPX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& b) { auto x = gv_extend_hi_s16(std::move(b)); auto y = gv_or32(gv_and32(gv_shl32(x, 6), gv_bcst32(0x1f0000)), gv_and32(gv_shl32(x, 3), gv_bcst32(0x1f00))); d = gv_or32(std::move(y), gv_and32(std::move(x), gv_bcst32(0xff00001f))); }; RETURN(ppu.vr[op.vd], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VUPKHSB() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& b) { d = gv_extend_hi_s8(std::move(b)); }; RETURN(ppu.vr[op.vd], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VUPKHSH() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& b) { d = gv_extend_hi_s16(std::move(b)); }; RETURN(ppu.vr[op.vd], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VUPKLPX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& b) { auto x = gv_extend_lo_s16(std::move(b)); auto y = gv_or32(gv_and32(gv_shl32(x, 6), gv_bcst32(0x1f0000)), gv_and32(gv_shl32(x, 3), gv_bcst32(0x1f00))); d = gv_or32(std::move(y), gv_and32(std::move(x), gv_bcst32(0xff00001f))); }; RETURN(ppu.vr[op.vd], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VUPKLSB() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& b) { d = gv_extend_lo_s8(std::move(b)); }; RETURN(ppu.vr[op.vd], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VUPKLSH() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& b) { d = gv_extend_lo_s16(std::move(b)); }; RETURN(ppu.vr[op.vd], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto VXOR() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& d, auto&& a, auto&& b) { d = gv_xorfs(std::move(a), std::move(b)); }; RETURN(ppu.vr[op.vd], ppu.vr[op.va], ppu.vr[op.vb]); } template <u32 Build, ppu_exec_bit... Flags> auto TDI() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); if constexpr (Build == 0) return +[](ppu_thread& ppu, ppu_opcode_t op, be_t<u32>* this_op, ppu_intrp_func* next_fn) { const s64 a = ppu.gpr[op.ra], b = op.simm16; const u64 a_ = a, b_ = b; if (((op.bo & 0x10) && a < b) || ((op.bo & 0x8) && a > b) || ((op.bo & 0x4) && a == b) || ((op.bo & 0x2) && a_ < b_) || ((op.bo & 0x1) && a_ > b_)) { [[unlikely]] ppu_trap(ppu, vm::get_addr(this_op)); return; } return next_fn->fn(ppu, {this_op[1]}, this_op + 1, next_fn + 1); }; } template <u32 Build, ppu_exec_bit... Flags> auto TWI() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); if constexpr (Build == 0) return +[](ppu_thread& ppu, ppu_opcode_t op, be_t<u32>* this_op, ppu_intrp_func* next_fn) { const s32 a = static_cast<u32>(ppu.gpr[op.ra]), b = op.simm16; const u32 a_ = a, b_ = b; if (((op.bo & 0x10) && a < b) || ((op.bo & 0x8) && a > b) || ((op.bo & 0x4) && a == b) || ((op.bo & 0x2) && a_ < b_) || ((op.bo & 0x1) && a_ > b_)) { [[unlikely]] ppu_trap(ppu, vm::get_addr(this_op)); return; } return next_fn->fn(ppu, {this_op[1]}, this_op + 1, next_fn + 1); }; } template <u32 Build, ppu_exec_bit... Flags> auto MULLI() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.rd] = static_cast<s64>(ppu.gpr[op.ra]) * op.simm16; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto SUBFIC() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 a = ppu.gpr[op.ra]; const s64 i = op.simm16; const auto r = add64_flags(~a, i, 1); ppu.gpr[op.rd] = r.result; ppu.xer.ca = r.carry; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto CMPLI() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { if (op.l10) { ppu_cr_set<u64>(ppu, op.crfd, ppu.gpr[op.ra], op.uimm16); } else { ppu_cr_set<u32>(ppu, op.crfd, static_cast<u32>(ppu.gpr[op.ra]), op.uimm16); } }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto CMPI() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { if (op.l10) { ppu_cr_set<s64>(ppu, op.crfd, ppu.gpr[op.ra], op.simm16); } else { ppu_cr_set<s32>(ppu, op.crfd, static_cast<u32>(ppu.gpr[op.ra]), op.simm16); } }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto ADDIC() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const s64 a = ppu.gpr[op.ra]; const s64 i = op.simm16; const auto r = add64_flags(a, i); ppu.gpr[op.rd] = r.result; ppu.xer.ca = r.carry; if (op.main & 1) [[unlikely]] ppu_cr_set<s64>(ppu, 0, r.result, 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto ADDI() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.rd] = op.ra ? ppu.gpr[op.ra] + op.simm16 : op.simm16; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto ADDIS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.rd] = op.ra ? ppu.gpr[op.ra] + (op.simm16 * 65536) : (op.simm16 * 65536); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto BC() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_call_history>(); if constexpr (Build == 0) return +[](ppu_thread& ppu, ppu_opcode_t op, be_t<u32>* this_op, ppu_intrp_func* next_fn) { const bool bo0 = (op.bo & 0x10) != 0; const bool bo1 = (op.bo & 0x08) != 0; const bool bo2 = (op.bo & 0x04) != 0; const bool bo3 = (op.bo & 0x02) != 0; ppu.ctr -= (bo2 ^ true); const u32 link = vm::get_addr(this_op) + 4; if (op.lk) ppu.lr = link; const bool ctr_ok = bo2 | ((ppu.ctr != 0) ^ bo3); const bool cond_ok = bo0 | (!!(ppu.cr[op.bi]) ^ (bo1 ^ true)); const u32 old_cia = ppu.cia; if (ctr_ok && cond_ok) { ppu.cia = vm::get_addr(this_op); // Provide additional information by using the origin of the call // Because this is a fixed target branch there's no abiguity about it ppu_record_call<Flags...>(ppu, ppu.cia, op); ppu.cia = (op.aa ? 0 : ppu.cia) + op.bt14; } else if (!ppu.state) [[likely]] { return next_fn->fn(ppu, {this_op[1]}, this_op + 1, next_fn + 1); } else { ppu.cia = link; } ppu.exec_bytes += link - old_cia; }; } template <u32 Build, ppu_exec_bit... Flags> auto SC() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); if constexpr (Build == 0) { return +[](ppu_thread& ppu, ppu_opcode_t op, be_t<u32>* this_op, ppu_intrp_func*) { const u32 old_cia = ppu.cia; ppu.cia = vm::get_addr(this_op); ppu.exec_bytes += ppu.cia - old_cia; if (op.opcode != ppu_instructions::SC(0)) { fmt::throw_exception("Unknown/Illegal SC: 0x%08x", op.opcode); } ppu_execute_syscall(ppu, ppu.gpr[11]); }; } } template <u32 Build, ppu_exec_bit... Flags> auto B() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_call_history>(); if constexpr (Build == 0) return +[](ppu_thread& ppu, ppu_opcode_t op, be_t<u32>* this_op, ppu_intrp_func*) { const u32 old_cia = ppu.cia; const u32 link = (ppu.cia = vm::get_addr(this_op)) + 4; // Provide additional information by using the origin of the call // Because this is a fixed target branch there's no abiguity about it ppu_record_call<Flags...>(ppu, ppu.cia, op); ppu.cia = (op.aa ? 0 : ppu.cia) + op.bt24; if (op.lk) ppu.lr = link; ppu.exec_bytes += link - old_cia; }; } template <u32 Build, ppu_exec_bit... Flags> auto MCRF() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { CHECK_SIZE(ppu_thread::cr, 32); ppu.cr.fields[op.crfd] = ppu.cr.fields[op.crfs]; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto BCLR() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_call_history>(); if constexpr (Build == 0) return +[](ppu_thread& ppu, ppu_opcode_t op, be_t<u32>* this_op, ppu_intrp_func* next_fn) { const bool bo0 = (op.bo & 0x10) != 0; const bool bo1 = (op.bo & 0x08) != 0; const bool bo2 = (op.bo & 0x04) != 0; const bool bo3 = (op.bo & 0x02) != 0; ppu.ctr -= (bo2 ^ true); const bool ctr_ok = bo2 | ((ppu.ctr != 0) ^ bo3); const bool cond_ok = bo0 | (!!(ppu.cr[op.bi]) ^ (bo1 ^ true)); const u32 target = static_cast<u32>(ppu.lr) & ~3; const u32 link = vm::get_addr(this_op) + 4; if (op.lk) ppu.lr = link; const u32 old_cia = ppu.cia; if (ctr_ok && cond_ok) { ppu_record_call<Flags...>(ppu, target, op, true); ppu.cia = target; } else if (!ppu.state) [[likely]] { return next_fn->fn(ppu, {this_op[1]}, this_op + 1, next_fn + 1); } else { ppu.cia = link; } ppu.exec_bytes += link - old_cia; }; } template <u32 Build, ppu_exec_bit... Flags> auto CRNOR() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.cr[op.crbd] = (ppu.cr[op.crba] | ppu.cr[op.crbb]) ^ true; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto CRANDC() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.cr[op.crbd] = ppu.cr[op.crba] & (ppu.cr[op.crbb] ^ true); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto ISYNC() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&&, auto) { atomic_fence_acquire(); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto CRXOR() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.cr[op.crbd] = ppu.cr[op.crba] ^ ppu.cr[op.crbb]; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto CRNAND() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.cr[op.crbd] = (ppu.cr[op.crba] & ppu.cr[op.crbb]) ^ true; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto CRAND() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.cr[op.crbd] = ppu.cr[op.crba] & ppu.cr[op.crbb]; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto CREQV() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.cr[op.crbd] = (ppu.cr[op.crba] ^ ppu.cr[op.crbb]) ^ true; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto CRORC() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.cr[op.crbd] = ppu.cr[op.crba] | (ppu.cr[op.crbb] ^ true); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto CROR() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.cr[op.crbd] = ppu.cr[op.crba] | ppu.cr[op.crbb]; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto BCCTR() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_call_history>(); if constexpr (Build == 0) return +[](ppu_thread& ppu, ppu_opcode_t op, be_t<u32>* this_op, ppu_intrp_func* next_fn) { const u32 link = vm::get_addr(this_op) + 4; if (op.lk) ppu.lr = link; const u32 old_cia = ppu.cia; if (op.bo & 0x10 || ppu.cr[op.bi] == ((op.bo & 0x8) != 0)) { const u32 target = static_cast<u32>(ppu.ctr) & ~3; ppu_record_call<Flags...>(ppu, target, op, true); ppu.cia = target; } else if (!ppu.state) [[likely]] { return next_fn->fn(ppu, {this_op[1]}, this_op + 1, next_fn + 1); } else { ppu.cia = link; } ppu.exec_bytes += link - old_cia; }; } template <u32 Build, ppu_exec_bit... Flags> auto RLWIMI() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 mask = ppu_rotate_mask(32 + op.mb32, 32 + op.me32); ppu.gpr[op.ra] = (ppu.gpr[op.ra] & ~mask) | (dup32(utils::rol32(static_cast<u32>(ppu.gpr[op.rs]), op.sh32)) & mask); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto RLWINM() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.ra] = dup32(utils::rol32(static_cast<u32>(ppu.gpr[op.rs]), op.sh32)) & ppu_rotate_mask(32 + op.mb32, 32 + op.me32); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto RLWNM() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.ra] = dup32(utils::rol32(static_cast<u32>(ppu.gpr[op.rs]), ppu.gpr[op.rb] & 0x1f)) & ppu_rotate_mask(32 + op.mb32, 32 + op.me32); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto ORI() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.ra] = ppu.gpr[op.rs] | op.uimm16; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto ORIS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.ra] = ppu.gpr[op.rs] | (u64{op.uimm16} << 16); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto XORI() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.ra] = ppu.gpr[op.rs] ^ op.uimm16; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto XORIS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.ra] = ppu.gpr[op.rs] ^ (u64{op.uimm16} << 16); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto ANDI() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.ra] = ppu.gpr[op.rs] & op.uimm16; ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto ANDIS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.ra] = ppu.gpr[op.rs] & (u64{op.uimm16} << 16); ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto RLDICL() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.ra] = utils::rol64(ppu.gpr[op.rs], op.sh64) & (~0ull >> op.mbe64); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto RLDICR() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.ra] = utils::rol64(ppu.gpr[op.rs], op.sh64) & (~0ull << (op.mbe64 ^ 63)); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto RLDIC() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.ra] = utils::rol64(ppu.gpr[op.rs], op.sh64) & ppu_rotate_mask(op.mbe64, op.sh64 ^ 63); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto RLDIMI() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 mask = ppu_rotate_mask(op.mbe64, op.sh64 ^ 63); ppu.gpr[op.ra] = (ppu.gpr[op.ra] & ~mask) | (utils::rol64(ppu.gpr[op.rs], op.sh64) & mask); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto RLDCL() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.ra] = utils::rol64(ppu.gpr[op.rs], ppu.gpr[op.rb] & 0x3f) & (~0ull >> op.mbe64); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto RLDCR() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.ra] = utils::rol64(ppu.gpr[op.rs], ppu.gpr[op.rb] & 0x3f) & (~0ull << (op.mbe64 ^ 63)); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto CMP() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { if (op.l10) { ppu_cr_set<s64>(ppu, op.crfd, ppu.gpr[op.ra], ppu.gpr[op.rb]); } else { ppu_cr_set<s32>(ppu, op.crfd, static_cast<u32>(ppu.gpr[op.ra]), static_cast<u32>(ppu.gpr[op.rb])); } }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto TW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); if constexpr (Build == 0) return +[](ppu_thread& ppu, ppu_opcode_t op, be_t<u32>* this_op, ppu_intrp_func* next_fn) { s32 a = static_cast<s32>(ppu.gpr[op.ra]); s32 b = static_cast<s32>(ppu.gpr[op.rb]); if ((a < b && (op.bo & 0x10)) || (a > b && (op.bo & 0x8)) || (a == b && (op.bo & 0x4)) || (static_cast<u32>(a) < static_cast<u32>(b) && (op.bo & 0x2)) || (static_cast<u32>(a) > static_cast<u32>(b) && (op.bo & 0x1))) { [[unlikely]] ppu_trap(ppu, vm::get_addr(this_op)); return; } return next_fn->fn(ppu, {this_op[1]}, this_op + 1, next_fn + 1); }; } const v128 s_lvsl_base = v128::from64r(0x0001020304050607, 0x08090a0b0c0d0e0f); const v128 s_lvsl_consts[16] = { gv_add8(s_lvsl_base, gv_bcst8(0)), gv_add8(s_lvsl_base, gv_bcst8(1)), gv_add8(s_lvsl_base, gv_bcst8(2)), gv_add8(s_lvsl_base, gv_bcst8(3)), gv_add8(s_lvsl_base, gv_bcst8(4)), gv_add8(s_lvsl_base, gv_bcst8(5)), gv_add8(s_lvsl_base, gv_bcst8(6)), gv_add8(s_lvsl_base, gv_bcst8(7)), gv_add8(s_lvsl_base, gv_bcst8(8)), gv_add8(s_lvsl_base, gv_bcst8(9)), gv_add8(s_lvsl_base, gv_bcst8(10)), gv_add8(s_lvsl_base, gv_bcst8(11)), gv_add8(s_lvsl_base, gv_bcst8(12)), gv_add8(s_lvsl_base, gv_bcst8(13)), gv_add8(s_lvsl_base, gv_bcst8(14)), gv_add8(s_lvsl_base, gv_bcst8(15)), }; template <u32 Build, ppu_exec_bit... Flags> auto LVSL() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; ppu.vr[op.vd] = s_lvsl_consts[addr % 16]; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LVEBX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = (op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]) & ~0xfull; ppu.vr[op.vd] = ppu_feed_data<v128, Flags...>(ppu, addr); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto SUBFC() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 RA = ppu.gpr[op.ra]; const u64 RB = ppu.gpr[op.rb]; const auto r = add64_flags(~RA, RB, 1); ppu.gpr[op.rd] = r.result; ppu.xer.ca = r.carry; if constexpr (((Flags == has_oe) || ...)) ppu_ov_set(ppu, (~RA >> 63 == RB >> 63) && (~RA >> 63 != ppu.gpr[op.rd] >> 63)); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, r.result, 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto MULHDU() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.rd] = utils::umulh64(ppu.gpr[op.ra], ppu.gpr[op.rb]); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.rd], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto ADDC() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 RA = ppu.gpr[op.ra]; const u64 RB = ppu.gpr[op.rb]; const auto r = add64_flags(RA, RB); ppu.gpr[op.rd] = r.result; ppu.xer.ca = r.carry; if constexpr (((Flags == has_oe) || ...)) ppu_ov_set(ppu, (RA >> 63 == RB >> 63) && (RA >> 63 != ppu.gpr[op.rd] >> 63)); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, r.result, 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto MULHWU() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { u32 a = static_cast<u32>(ppu.gpr[op.ra]); u32 b = static_cast<u32>(ppu.gpr[op.rb]); ppu.gpr[op.rd] = (u64{a} * b) >> 32; if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.rd], 0); }; RETURN_(ppu, op); } template <u32 N> struct MFOCRF { template <ppu_exec_bit... Flags> static auto select(bs_t<ppu_exec_bit> selected, auto func) { return ppu_exec_select<>::select<Flags...>(selected, func); } template <u32 Build, ppu_exec_bit... Flags> static auto impl() { static const auto exec = [](ppu_thread& ppu, auto&& d) { const u32 p = N * 4; const u32 v = ppu.cr[p + 0] << 3 | ppu.cr[p + 1] << 2 | ppu.cr[p + 2] << 1 | ppu.cr[p + 3] << 0; d = v << (p ^ 0x1c); }; RETURN_(ppu, ppu.gpr[op.rd]); } }; template <u32 Build, ppu_exec_bit... Flags> auto MFCR() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, auto&& d) { #if defined(ARCH_X64) be_t<v128> lane0, lane1; std::memcpy(&lane0, ppu.cr.bits, sizeof(v128)); std::memcpy(&lane1, ppu.cr.bits + 16, sizeof(v128)); const u32 mh = _mm_movemask_epi8(_mm_slli_epi64(lane0.value(), 7)); const u32 ml = _mm_movemask_epi8(_mm_slli_epi64(lane1.value(), 7)); d = (mh << 16) | ml; #else d = ppu.cr.pack(); #endif }; RETURN_(ppu, ppu.gpr[op.rd]); } template <u32 Build, ppu_exec_bit... Flags> auto LWARX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; ppu.gpr[op.rd] = ppu_lwarx(ppu, vm::cast(addr)); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LDX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; ppu.gpr[op.rd] = ppu_feed_data<u64, Flags...>(ppu, addr); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LWZX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; ppu.gpr[op.rd] = ppu_feed_data<u32, Flags...>(ppu, addr); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto SLW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.ra] = static_cast<u32>(ppu.gpr[op.rs] << (ppu.gpr[op.rb] & 0x3f)); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto CNTLZW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.ra] = std::countl_zero(static_cast<u32>(ppu.gpr[op.rs])); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto SLD() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u32 n = ppu.gpr[op.rb] & 0x7f; ppu.gpr[op.ra] = n & 0x40 ? 0 : ppu.gpr[op.rs] << n; if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto AND() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.ra] = ppu.gpr[op.rs] & ppu.gpr[op.rb]; if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto CMPL() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { if (op.l10) { ppu_cr_set<u64>(ppu, op.crfd, ppu.gpr[op.ra], ppu.gpr[op.rb]); } else { ppu_cr_set<u32>(ppu, op.crfd, static_cast<u32>(ppu.gpr[op.ra]), static_cast<u32>(ppu.gpr[op.rb])); } }; RETURN_(ppu, op); } const v128 s_lvsr_consts[16] = { gv_add8(s_lvsl_base, gv_bcst8(16)), gv_add8(s_lvsl_base, gv_bcst8(15)), gv_add8(s_lvsl_base, gv_bcst8(14)), gv_add8(s_lvsl_base, gv_bcst8(13)), gv_add8(s_lvsl_base, gv_bcst8(12)), gv_add8(s_lvsl_base, gv_bcst8(11)), gv_add8(s_lvsl_base, gv_bcst8(10)), gv_add8(s_lvsl_base, gv_bcst8(9)), gv_add8(s_lvsl_base, gv_bcst8(8)), gv_add8(s_lvsl_base, gv_bcst8(7)), gv_add8(s_lvsl_base, gv_bcst8(6)), gv_add8(s_lvsl_base, gv_bcst8(5)), gv_add8(s_lvsl_base, gv_bcst8(4)), gv_add8(s_lvsl_base, gv_bcst8(3)), gv_add8(s_lvsl_base, gv_bcst8(2)), gv_add8(s_lvsl_base, gv_bcst8(1)), }; template <u32 Build, ppu_exec_bit... Flags> auto LVSR() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; ppu.vr[op.vd] = s_lvsr_consts[addr % 16]; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LVEHX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = (op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]) & ~0xfull; ppu.vr[op.vd] = ppu_feed_data<v128, Flags...>(ppu, addr); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto SUBF() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 RA = ppu.gpr[op.ra]; const u64 RB = ppu.gpr[op.rb]; ppu.gpr[op.rd] = RB - RA; if constexpr (((Flags == has_oe) || ...)) ppu_ov_set(ppu, (~RA >> 63 == RB >> 63) && (~RA >> 63 != ppu.gpr[op.rd] >> 63)); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.rd], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LDUX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = ppu.gpr[op.ra] + ppu.gpr[op.rb]; ppu.gpr[op.rd] = ppu_feed_data<u64, Flags...>(ppu, addr); ppu.gpr[op.ra] = addr; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto DCBST() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&&, auto) { }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LWZUX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = ppu.gpr[op.ra] + ppu.gpr[op.rb]; ppu.gpr[op.rd] = ppu_feed_data<u32, Flags...>(ppu, addr); ppu.gpr[op.ra] = addr; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto CNTLZD() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.ra] = std::countl_zero(ppu.gpr[op.rs]); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto ANDC() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.ra] = ppu.gpr[op.rs] & ~ppu.gpr[op.rb]; if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto TD() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); if constexpr (Build == 0) return +[](ppu_thread& ppu, ppu_opcode_t op, be_t<u32>* this_op, ppu_intrp_func* next_fn) { const s64 a = ppu.gpr[op.ra], b = ppu.gpr[op.rb]; const u64 a_ = a, b_ = b; if (((op.bo & 0x10) && a < b) || ((op.bo & 0x8) && a > b) || ((op.bo & 0x4) && a == b) || ((op.bo & 0x2) && a_ < b_) || ((op.bo & 0x1) && a_ > b_)) { [[unlikely]] ppu_trap(ppu, vm::get_addr(this_op)); return; } return next_fn->fn(ppu, {this_op[1]}, this_op + 1, next_fn + 1); }; } template <u32 Build, ppu_exec_bit... Flags> auto LVEWX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = (op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]) & ~0xfull; ppu.vr[op.vd] = ppu_feed_data<v128, Flags...>(ppu, addr); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto MULHD() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.rd] = utils::mulh64(ppu.gpr[op.ra], ppu.gpr[op.rb]); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.rd], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto MULHW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { s32 a = static_cast<s32>(ppu.gpr[op.ra]); s32 b = static_cast<s32>(ppu.gpr[op.rb]); ppu.gpr[op.rd] = (s64{a} * b) >> 32; if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.rd], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LDARX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; ppu.gpr[op.rd] = ppu_ldarx(ppu, vm::cast(addr)); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto DCBF() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&&, auto) { }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LBZX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; ppu.gpr[op.rd] = ppu_feed_data<u8, Flags...>(ppu, addr); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LVX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = (op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]) & ~0xfull; ppu.vr[op.vd] = ppu_feed_data<v128, Flags...>(ppu, addr); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto NEG() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 RA = ppu.gpr[op.ra]; ppu.gpr[op.rd] = 0 - RA; if constexpr (((Flags == has_oe) || ...)) ppu_ov_set(ppu, RA == (1ull << 63)); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.rd], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LBZUX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = ppu.gpr[op.ra] + ppu.gpr[op.rb]; ppu.gpr[op.rd] = ppu_feed_data<u8, Flags...>(ppu, addr); ppu.gpr[op.ra] = addr; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto NOR() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.ra] = ~(ppu.gpr[op.rs] | ppu.gpr[op.rb]); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STVEBX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; const u8 eb = addr & 0xf; vm::write8(vm::cast(addr), ppu.vr[op.vs]._u8[15 - eb]); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto SUBFE() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 RA = ppu.gpr[op.ra]; const u64 RB = ppu.gpr[op.rb]; const auto r = add64_flags(~RA, RB, ppu.xer.ca); ppu.gpr[op.rd] = r.result; ppu.xer.ca = r.carry; if constexpr (((Flags == has_oe) || ...)) ppu_ov_set(ppu, (~RA >> 63 == RB >> 63) && (~RA >> 63 != ppu.gpr[op.rd] >> 63)); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, r.result, 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto ADDE() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 RA = ppu.gpr[op.ra]; const u64 RB = ppu.gpr[op.rb]; const auto r = add64_flags(RA, RB, ppu.xer.ca); ppu.gpr[op.rd] = r.result; ppu.xer.ca = r.carry; if constexpr (((Flags == has_oe) || ...)) ppu_ov_set(ppu, (RA >> 63 == RB >> 63) && (RA >> 63 != ppu.gpr[op.rd] >> 63)); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, r.result, 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto MTOCRF() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { alignas(4) static const u8 s_table[16][4] { {0, 0, 0, 0}, {0, 0, 0, 1}, {0, 0, 1, 0}, {0, 0, 1, 1}, {0, 1, 0, 0}, {0, 1, 0, 1}, {0, 1, 1, 0}, {0, 1, 1, 1}, {1, 0, 0, 0}, {1, 0, 0, 1}, {1, 0, 1, 0}, {1, 0, 1, 1}, {1, 1, 0, 0}, {1, 1, 0, 1}, {1, 1, 1, 0}, {1, 1, 1, 1}, }; const u64 s = ppu.gpr[op.rs]; if (op.l11) { // MTOCRF const u32 n = std::countl_zero<u32>(op.crm) & 7; const u64 v = (s >> ((n * 4) ^ 0x1c)) & 0xf; ppu.cr.fields[n] = *reinterpret_cast<const u32*>(s_table + v); } else { // MTCRF for (u32 i = 0; i < 8; i++) { if (op.crm & (128 >> i)) { const u64 v = (s >> ((i * 4) ^ 0x1c)) & 0xf; ppu.cr.fields[i] = *reinterpret_cast<const u32*>(s_table + v); } } } }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STDX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; vm::write64(vm::cast(addr), ppu.gpr[op.rs]); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STWCX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; ppu_cr_set(ppu, 0, false, false, ppu_stwcx(ppu, vm::cast(addr), static_cast<u32>(ppu.gpr[op.rs])), ppu.xer.so); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STWX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; vm::write32(vm::cast(addr), static_cast<u32>(ppu.gpr[op.rs])); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STVEHX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = (op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]) & ~1ULL; const u8 eb = (addr & 0xf) >> 1; vm::write16(vm::cast(addr), ppu.vr[op.vs]._u16[7 - eb]); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STDUX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = ppu.gpr[op.ra] + ppu.gpr[op.rb]; vm::write64(vm::cast(addr), ppu.gpr[op.rs]); ppu.gpr[op.ra] = addr; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STWUX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = ppu.gpr[op.ra] + ppu.gpr[op.rb]; vm::write32(vm::cast(addr), static_cast<u32>(ppu.gpr[op.rs])); ppu.gpr[op.ra] = addr; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STVEWX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = (op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]) & ~3ULL; const u8 eb = (addr & 0xf) >> 2; vm::write32(vm::cast(addr), ppu.vr[op.vs]._u32[3 - eb]); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto SUBFZE() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 RA = ppu.gpr[op.ra]; const auto r = add64_flags(~RA, 0, ppu.xer.ca); ppu.gpr[op.rd] = r.result; ppu.xer.ca = r.carry; if constexpr (((Flags == has_oe) || ...)) ppu_ov_set(ppu, (~RA >> 63 == 0) && (~RA >> 63 != ppu.gpr[op.rd] >> 63)); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, r.result, 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto ADDZE() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 RA = ppu.gpr[op.ra]; const auto r = add64_flags(RA, 0, ppu.xer.ca); ppu.gpr[op.rd] = r.result; ppu.xer.ca = r.carry; if constexpr (((Flags == has_oe) || ...)) ppu_ov_set(ppu, (RA >> 63 == 0) && (RA >> 63 != ppu.gpr[op.rd] >> 63)); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, r.result, 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STDCX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; ppu_cr_set(ppu, 0, false, false, ppu_stdcx(ppu, vm::cast(addr), ppu.gpr[op.rs]), ppu.xer.so); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STBX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; vm::write8(vm::cast(addr), static_cast<u8>(ppu.gpr[op.rs])); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STVX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = (op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]) & ~0xfull; vm::_ref<v128>(vm::cast(addr)) = ppu.vr[op.vs]; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto MULLD() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const s64 RA = ppu.gpr[op.ra]; const s64 RB = ppu.gpr[op.rb]; ppu.gpr[op.rd] = RA * RB; if (op.oe) [[unlikely]] { const s64 high = utils::mulh64(RA, RB); ppu_ov_set(ppu, high != s64(ppu.gpr[op.rd]) >> 63); } if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.rd], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto SUBFME() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 RA = ppu.gpr[op.ra]; const auto r = add64_flags(~RA, ~0ull, ppu.xer.ca); ppu.gpr[op.rd] = r.result; ppu.xer.ca = r.carry; if constexpr (((Flags == has_oe) || ...)) ppu_ov_set(ppu, (~RA >> 63 == 1) && (~RA >> 63 != ppu.gpr[op.rd] >> 63)); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, r.result, 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto ADDME() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const s64 RA = ppu.gpr[op.ra]; const auto r = add64_flags(RA, ~0ull, ppu.xer.ca); ppu.gpr[op.rd] = r.result; ppu.xer.ca = r.carry; if constexpr (((Flags == has_oe) || ...)) ppu_ov_set(ppu, (u64(RA) >> 63 == 1) && (u64(RA) >> 63 != ppu.gpr[op.rd] >> 63)); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, r.result, 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto MULLW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.rd] = s64{static_cast<s32>(ppu.gpr[op.ra])} * static_cast<s32>(ppu.gpr[op.rb]); if constexpr (((Flags == has_oe) || ...)) ppu_ov_set(ppu, s64(ppu.gpr[op.rd]) < INT32_MIN || s64(ppu.gpr[op.rd]) > INT32_MAX); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.rd], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto DCBTST() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&&, auto) { }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STBUX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = ppu.gpr[op.ra] + ppu.gpr[op.rb]; vm::write8(vm::cast(addr), static_cast<u8>(ppu.gpr[op.rs])); ppu.gpr[op.ra] = addr; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto ADD() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 RA = ppu.gpr[op.ra]; const u64 RB = ppu.gpr[op.rb]; ppu.gpr[op.rd] = RA + RB; if constexpr (((Flags == has_oe) || ...)) ppu_ov_set(ppu, (RA >> 63 == RB >> 63) && (RA >> 63 != ppu.gpr[op.rd] >> 63)); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.rd], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto DCBT() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&&, auto) { }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LHZX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; ppu.gpr[op.rd] = ppu_feed_data<u16, Flags...>(ppu, addr); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto EQV() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.ra] = ~(ppu.gpr[op.rs] ^ ppu.gpr[op.rb]); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto ECIWX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&&, auto) { fmt::throw_exception("ECIWX"); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LHZUX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; ppu.gpr[op.rd] = ppu_feed_data<u16, Flags...>(ppu, addr); ppu.gpr[op.ra] = addr; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto XOR() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.ra] = ppu.gpr[op.rs] ^ ppu.gpr[op.rb]; if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto MFSPR() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u32 n = (op.spr >> 5) | ((op.spr & 0x1f) << 5); switch (n) { case 0x001: ppu.gpr[op.rd] = u32{ppu.xer.so} << 31 | ppu.xer.ov << 30 | ppu.xer.ca << 29 | ppu.xer.cnt; break; case 0x008: ppu.gpr[op.rd] = ppu.lr; break; case 0x009: ppu.gpr[op.rd] = ppu.ctr; break; case 0x100: ppu.gpr[op.rd] = ppu.vrsave; break; case 0x10C: ppu.gpr[op.rd] = get_timebased_time(); break; case 0x10D: ppu.gpr[op.rd] = get_timebased_time() >> 32; break; default: fmt::throw_exception("MFSPR 0x%x", n); } }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LWAX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; ppu.gpr[op.rd] = ppu_feed_data<s32, Flags...>(ppu, addr); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto DST() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&&, auto) { }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LHAX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; ppu.gpr[op.rd] = ppu_feed_data<s16, Flags...>(ppu, addr); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LVXL() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = (op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]) & ~0xfull; ppu.vr[op.vd] = ppu_feed_data<v128, Flags...>(ppu, addr); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto MFTB() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u32 n = (op.spr >> 5) | ((op.spr & 0x1f) << 5); switch (n) { case 0x10C: ppu.gpr[op.rd] = get_timebased_time(); break; case 0x10D: ppu.gpr[op.rd] = get_timebased_time() >> 32; break; default: fmt::throw_exception("MFTB 0x%x", n); } }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LWAUX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; ppu.gpr[op.rd] = ppu_feed_data<s32, Flags...>(ppu, addr); ppu.gpr[op.ra] = addr; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto DSTST() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&&, auto) { }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LHAUX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; ppu.gpr[op.rd] = ppu_feed_data<s16, Flags...>(ppu, addr); ppu.gpr[op.ra] = addr; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STHX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; vm::write16(vm::cast(addr), static_cast<u16>(ppu.gpr[op.rs])); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto ORC() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.ra] = ppu.gpr[op.rs] | ~ppu.gpr[op.rb]; if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto ECOWX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&&, auto) { fmt::throw_exception("ECOWX"); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STHUX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = ppu.gpr[op.ra] + ppu.gpr[op.rb]; vm::write16(vm::cast(addr), static_cast<u16>(ppu.gpr[op.rs])); ppu.gpr[op.ra] = addr; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto OR() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.ra] = ppu.gpr[op.rs] | ppu.gpr[op.rb]; if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto DIVDU() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 RA = ppu.gpr[op.ra]; const u64 RB = ppu.gpr[op.rb]; ppu.gpr[op.rd] = RB == 0 ? 0 : RA / RB; if constexpr (((Flags == has_oe) || ...)) ppu_ov_set(ppu, RB == 0); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.rd], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto DIVWU() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u32 RA = static_cast<u32>(ppu.gpr[op.ra]); const u32 RB = static_cast<u32>(ppu.gpr[op.rb]); ppu.gpr[op.rd] = RB == 0 ? 0 : RA / RB; if constexpr (((Flags == has_oe) || ...)) ppu_ov_set(ppu, RB == 0); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.rd], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto MTSPR() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u32 n = (op.spr >> 5) | ((op.spr & 0x1f) << 5); switch (n) { case 0x001: { const u64 value = ppu.gpr[op.rs]; ppu.xer.so = (value & 0x80000000) != 0; ppu.xer.ov = (value & 0x40000000) != 0; ppu.xer.ca = (value & 0x20000000) != 0; ppu.xer.cnt = value & 0x7f; break; } case 0x008: ppu.lr = ppu.gpr[op.rs]; break; case 0x009: ppu.ctr = ppu.gpr[op.rs]; break; case 0x100: ppu.vrsave = static_cast<u32>(ppu.gpr[op.rs]); break; default: fmt::throw_exception("MTSPR 0x%x", n); } }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto DCBI() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&&, auto) { }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto NAND() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.ra] = ~(ppu.gpr[op.rs] & ppu.gpr[op.rb]); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STVXL() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = (op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]) & ~0xfull; vm::_ref<v128>(vm::cast(addr)) = ppu.vr[op.vs]; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto DIVD() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const s64 RA = ppu.gpr[op.ra]; const s64 RB = ppu.gpr[op.rb]; const bool o = RB == 0 || (RA == INT64_MIN && RB == -1); ppu.gpr[op.rd] = o ? 0 : RA / RB; if constexpr (((Flags == has_oe) || ...)) ppu_ov_set(ppu, o); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.rd], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto DIVW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const s32 RA = static_cast<s32>(ppu.gpr[op.ra]); const s32 RB = static_cast<s32>(ppu.gpr[op.rb]); const bool o = RB == 0 || (RA == INT32_MIN && RB == -1); ppu.gpr[op.rd] = o ? 0 : static_cast<u32>(RA / RB); if constexpr (((Flags == has_oe) || ...)) ppu_ov_set(ppu, o); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.rd], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LVLX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; const u128 data = ppu_feed_data<u128, Flags...>(ppu, addr & -16); ppu.vr[op.vd] = data << ((addr & 15) * 8); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LDBRX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; ppu.gpr[op.rd] = ppu_feed_data<le_t<u64>, Flags...>(ppu, addr); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LSWX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; u32 count = ppu.xer.cnt & 0x7f; for (; count >= 4; count -= 4, addr += 4, op.rd = (op.rd + 1) & 31) { ppu.gpr[op.rd] = ppu_feed_data<u32, Flags...>(ppu, addr); } if (count) { u32 value = 0; for (u32 byte = 0; byte < count; byte++) { u32 byte_value = ppu_feed_data<u8, Flags...>(ppu, addr + byte); value |= byte_value << ((3 ^ byte) * 8); } ppu.gpr[op.rd] = value; } }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LWBRX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; ppu.gpr[op.rd] = ppu_feed_data<le_t<u32>, Flags...>(ppu, addr); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LFSX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; ppu.fpr[op.frd] = ppu_feed_data<f32, Flags...>(ppu, addr); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto SRW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.ra] = (ppu.gpr[op.rs] & 0xffffffff) >> (ppu.gpr[op.rb] & 0x3f); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto SRD() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u32 n = ppu.gpr[op.rb] & 0x7f; ppu.gpr[op.ra] = n & 0x40 ? 0 : ppu.gpr[op.rs] >> n; if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LVRX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; // Read from instruction address if offset is 0, this prevents accessing potentially bad memory from addr (because no actual memory is dereferenced) const u128 data = ppu_feed_data<u128, Flags...>(ppu, ((addr & 15) == 0 ? ppu.cia : addr) & -16); ppu.vr[op.vd] = data >> ((~addr & 15) * 8) >> 8; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LSWI() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { u64 addr = op.ra ? ppu.gpr[op.ra] : 0; u64 N = op.rb ? op.rb : 32; u8 reg = op.rd; while (N > 0) { if (N > 3) { ppu.gpr[reg] = ppu_feed_data<u32, Flags...>(ppu, addr); addr += 4; N -= 4; } else { u32 buf = 0; u32 i = 3; while (N > 0) { N = N - 1; buf |= ppu_feed_data<u8, Flags...>(ppu, addr) << (i * 8); addr++; i--; } ppu.gpr[reg] = buf; } reg = (reg + 1) % 32; } }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LFSUX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = ppu.gpr[op.ra] + ppu.gpr[op.rb]; ppu.fpr[op.frd] = ppu_feed_data<f32, Flags...>(ppu, addr); ppu.gpr[op.ra] = addr; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto SYNC() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&&, auto) { atomic_fence_seq_cst(); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LFDX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; ppu.fpr[op.frd] = ppu_feed_data<f64, Flags...>(ppu, addr); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LFDUX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = ppu.gpr[op.ra] + ppu.gpr[op.rb]; ppu.fpr[op.frd] = ppu_feed_data<f64, Flags...>(ppu, addr); ppu.gpr[op.ra] = addr; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STVLX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& s, ppu_opcode_t op, auto&& a, auto&& b) { const u64 addr = op.ra ? a + b : b; const u32 tail = u32(addr & 15); u8* ptr = vm::_ptr<u8>(addr); for (u32 j = 0; j < 16 - tail; j++) ptr[j] = s.u8r[j]; }; RETURN_(ppu.vr[op.vs], op, ppu.gpr[op.ra], ppu.gpr[op.rb]); } template <u32 Build, ppu_exec_bit... Flags> auto STDBRX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; vm::_ref<le_t<u64>>(vm::cast(addr)) = ppu.gpr[op.rs]; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STSWX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; u32 count = ppu.xer.cnt & 0x7F; for (; count >= 4; count -= 4, addr += 4, op.rs = (op.rs + 1) & 31) { vm::write32(vm::cast(addr), static_cast<u32>(ppu.gpr[op.rs])); } if (count) { u32 value = static_cast<u32>(ppu.gpr[op.rs]); for (u32 byte = 0; byte < count; byte++) { u8 byte_value = static_cast<u8>(value >> ((3 ^ byte) * 8)); vm::write8(vm::cast(addr + byte), byte_value); } } }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STWBRX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; vm::_ref<le_t<u32>>(vm::cast(addr)) = static_cast<u32>(ppu.gpr[op.rs]); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STFSX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; vm::_ref<f32>(vm::cast(addr)) = static_cast<float>(ppu.fpr[op.frs]); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STVRX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& s, ppu_opcode_t op, auto&& a, auto&& b) { const u64 addr = op.ra ? a + b : b; const u32 tail = u32(addr & 15); u8* ptr = vm::_ptr<u8>(addr - 16); for (u32 i = 15; i > 15 - tail; i--) ptr[i] = s.u8r[i]; }; RETURN_(ppu.vr[op.vs], op, ppu.gpr[op.ra], ppu.gpr[op.rb]); } template <u32 Build, ppu_exec_bit... Flags> auto STFSUX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = ppu.gpr[op.ra] + ppu.gpr[op.rb]; vm::_ref<f32>(vm::cast(addr)) = static_cast<float>(ppu.fpr[op.frs]); ppu.gpr[op.ra] = addr; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STSWI() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { u64 addr = op.ra ? ppu.gpr[op.ra] : 0; u64 N = op.rb ? op.rb : 32; u8 reg = op.rd; while (N > 0) { if (N > 3) { vm::write32(vm::cast(addr), static_cast<u32>(ppu.gpr[reg])); addr += 4; N -= 4; } else { u32 buf = static_cast<u32>(ppu.gpr[reg]); while (N > 0) { N = N - 1; vm::write8(vm::cast(addr), (0xFF000000 & buf) >> 24); buf <<= 8; addr++; } } reg = (reg + 1) % 32; } }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STFDX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; vm::_ref<f64>(vm::cast(addr)) = ppu.fpr[op.frs]; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STFDUX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = ppu.gpr[op.ra] + ppu.gpr[op.rb]; vm::_ref<f64>(vm::cast(addr)) = ppu.fpr[op.frs]; ppu.gpr[op.ra] = addr; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LVLXL() { return LVLX<Build, Flags...>(); } template <u32 Build, ppu_exec_bit... Flags> auto LHBRX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; ppu.gpr[op.rd] = ppu_feed_data<le_t<u16>, Flags...>(ppu, addr); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto SRAW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { s32 RS = static_cast<s32>(ppu.gpr[op.rs]); u8 shift = ppu.gpr[op.rb] & 63; if (shift > 31) { ppu.gpr[op.ra] = 0 - (RS < 0); ppu.xer.ca = (RS < 0); } else { ppu.gpr[op.ra] = RS >> shift; ppu.xer.ca = (RS < 0) && ((ppu.gpr[op.ra] << shift) != static_cast<u64>(RS)); } if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto SRAD() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { s64 RS = ppu.gpr[op.rs]; u8 shift = ppu.gpr[op.rb] & 127; if (shift > 63) { ppu.gpr[op.ra] = 0 - (RS < 0); ppu.xer.ca = (RS < 0); } else { ppu.gpr[op.ra] = RS >> shift; ppu.xer.ca = (RS < 0) && ((ppu.gpr[op.ra] << shift) != static_cast<u64>(RS)); } if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LVRXL() { return LVRX<Build, Flags...>(); } template <u32 Build, ppu_exec_bit... Flags> auto DSS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&&, auto) { }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto SRAWI() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { s32 RS = static_cast<u32>(ppu.gpr[op.rs]); ppu.gpr[op.ra] = RS >> op.sh32; ppu.xer.ca = (RS < 0) && (static_cast<u32>(ppu.gpr[op.ra] << op.sh32) != static_cast<u32>(RS)); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto SRADI() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { auto sh = op.sh64; s64 RS = ppu.gpr[op.rs]; ppu.gpr[op.ra] = RS >> sh; ppu.xer.ca = (RS < 0) && ((ppu.gpr[op.ra] << sh) != static_cast<u64>(RS)); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto EIEIO() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&&, auto) { atomic_fence_seq_cst(); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STVLXL() { return STVLX<Build, Flags...>(); } template <u32 Build, ppu_exec_bit... Flags> auto STHBRX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; vm::_ref<le_t<u16>>(vm::cast(addr)) = static_cast<u16>(ppu.gpr[op.rs]); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto EXTSH() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.ra] = static_cast<s16>(ppu.gpr[op.rs]); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STVRXL() { return STVRX<Build, Flags...>(); } template <u32 Build, ppu_exec_bit... Flags> auto EXTSB() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.ra] = static_cast<s8>(ppu.gpr[op.rs]); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STFIWX() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; vm::write32(vm::cast(addr), static_cast<u32>(std::bit_cast<u64>(ppu.fpr[op.frs]))); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto EXTSW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.gpr[op.ra] = static_cast<s32>(ppu.gpr[op.rs]); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set<s64>(ppu, 0, ppu.gpr[op.ra], 0); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto ICBI() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&&, auto) { }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto DCBZ() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra ? ppu.gpr[op.ra] + ppu.gpr[op.rb] : ppu.gpr[op.rb]; const u32 addr0 = vm::cast(addr) & ~127; if (g_cfg.core.accurate_cache_line_stores) { alignas(64) static constexpr u8 zero_buf[128]{}; do_cell_atomic_128_store(addr0, zero_buf); return; } std::memset(vm::base(addr0), 0, 128); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LWZ() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra || 1 ? ppu.gpr[op.ra] + op.simm16 : op.simm16; ppu.gpr[op.rd] = ppu_feed_data<u32, Flags...>(ppu, addr); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LWZU() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = ppu.gpr[op.ra] + op.simm16; ppu.gpr[op.rd] = ppu_feed_data<u32, Flags...>(ppu, addr); ppu.gpr[op.ra] = addr; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LBZ() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra || 1 ? ppu.gpr[op.ra] + op.simm16 : op.simm16; ppu.gpr[op.rd] = ppu_feed_data<u8, Flags...>(ppu, addr); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LBZU() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = ppu.gpr[op.ra] + op.simm16; ppu.gpr[op.rd] = ppu_feed_data<u8, Flags...>(ppu, addr); ppu.gpr[op.ra] = addr; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra || 1 ? ppu.gpr[op.ra] + op.simm16 : op.simm16; const u32 value = static_cast<u32>(ppu.gpr[op.rs]); vm::write32(vm::cast(addr), value); //Insomniac engine v3 & v4 (newer R&C, Fuse, Resitance 3) if (value == 0xAAAAAAAA) [[unlikely]] { vm::reservation_update(vm::cast(addr)); } }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STWU() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = ppu.gpr[op.ra] + op.simm16; vm::write32(vm::cast(addr), static_cast<u32>(ppu.gpr[op.rs])); ppu.gpr[op.ra] = addr; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STB() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra || 1 ? ppu.gpr[op.ra] + op.simm16 : op.simm16; vm::write8(vm::cast(addr), static_cast<u8>(ppu.gpr[op.rs])); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STBU() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = ppu.gpr[op.ra] + op.simm16; vm::write8(vm::cast(addr), static_cast<u8>(ppu.gpr[op.rs])); ppu.gpr[op.ra] = addr; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LHZ() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra || 1 ? ppu.gpr[op.ra] + op.simm16 : op.simm16; ppu.gpr[op.rd] = ppu_feed_data<u16, Flags...>(ppu, addr); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LHZU() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = ppu.gpr[op.ra] + op.simm16; ppu.gpr[op.rd] = ppu_feed_data<u16, Flags...>(ppu, addr); ppu.gpr[op.ra] = addr; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LHA() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra || 1 ? ppu.gpr[op.ra] + op.simm16 : op.simm16; ppu.gpr[op.rd] = ppu_feed_data<s16, Flags...>(ppu, addr); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LHAU() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = ppu.gpr[op.ra] + op.simm16; ppu.gpr[op.rd] = ppu_feed_data<s16, Flags...>(ppu, addr); ppu.gpr[op.ra] = addr; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STH() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra || 1 ? ppu.gpr[op.ra] + op.simm16 : op.simm16; vm::write16(vm::cast(addr), static_cast<u16>(ppu.gpr[op.rs])); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STHU() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = ppu.gpr[op.ra] + op.simm16; vm::write16(vm::cast(addr), static_cast<u16>(ppu.gpr[op.rs])); ppu.gpr[op.ra] = addr; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LMW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { u64 addr = op.ra ? ppu.gpr[op.ra] + op.simm16 : op.simm16; for (u32 i = op.rd; i<32; ++i, addr += 4) { ppu.gpr[i] = ppu_feed_data<u32, Flags...>(ppu, addr); } }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STMW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { u64 addr = op.ra ? ppu.gpr[op.ra] + op.simm16 : op.simm16; for (u32 i = op.rs; i<32; ++i, addr += 4) { vm::write32(vm::cast(addr), static_cast<u32>(ppu.gpr[i])); } }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LFS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra || 1 ? ppu.gpr[op.ra] + op.simm16 : op.simm16; ppu.fpr[op.frd] = ppu_feed_data<f32, Flags...>(ppu, addr); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LFSU() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = ppu.gpr[op.ra] + op.simm16; ppu.fpr[op.frd] = ppu_feed_data<f32, Flags...>(ppu, addr); ppu.gpr[op.ra] = addr; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LFD() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra || 1 ? ppu.gpr[op.ra] + op.simm16 : op.simm16; ppu.fpr[op.frd] = ppu_feed_data<f64, Flags...>(ppu, addr); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LFDU() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = ppu.gpr[op.ra] + op.simm16; ppu.fpr[op.frd] = ppu_feed_data<f64, Flags...>(ppu, addr); ppu.gpr[op.ra] = addr; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STFS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra || 1 ? ppu.gpr[op.ra] + op.simm16 : op.simm16; vm::_ref<f32>(vm::cast(addr)) = static_cast<float>(ppu.fpr[op.frs]); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STFSU() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = ppu.gpr[op.ra] + op.simm16; vm::_ref<f32>(vm::cast(addr)) = static_cast<float>(ppu.fpr[op.frs]); ppu.gpr[op.ra] = addr; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STFD() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = op.ra || 1 ? ppu.gpr[op.ra] + op.simm16 : op.simm16; vm::_ref<f64>(vm::cast(addr)) = ppu.fpr[op.frs]; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STFDU() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = ppu.gpr[op.ra] + op.simm16; vm::_ref<f64>(vm::cast(addr)) = ppu.fpr[op.frs]; ppu.gpr[op.ra] = addr; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LD() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = (op.simm16 & ~3) + (op.ra ? ppu.gpr[op.ra] : 0); ppu.gpr[op.rd] = ppu_feed_data<u64, Flags...>(ppu, addr); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LDU() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = ppu.gpr[op.ra] + (op.simm16 & ~3); ppu.gpr[op.rd] = ppu_feed_data<u64, Flags...>(ppu, addr); ppu.gpr[op.ra] = addr; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto LWA() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_feed_data>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = (op.simm16 & ~3) + (op.ra ? ppu.gpr[op.ra] : 0); ppu.gpr[op.rd] = ppu_feed_data<s32, Flags...>(ppu, addr); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STD() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = (op.simm16 & ~3) + (op.ra ? ppu.gpr[op.ra] : 0); vm::write64(vm::cast(addr), ppu.gpr[op.rs]); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto STDU() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 addr = ppu.gpr[op.ra] + (op.simm16 & ~3); vm::write64(vm::cast(addr), ppu.gpr[op.rs]); ppu.gpr[op.ra] = addr; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto FDIVS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_fpcc>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.fpr[op.frd] = f32(ppu.fpr[op.fra] / ppu.fpr[op.frb]); ppu_set_fpcc<Flags...>(ppu, ppu.fpr[op.frd], 0.); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto FSUBS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_fpcc>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.fpr[op.frd] = f32(ppu.fpr[op.fra] - ppu.fpr[op.frb]); ppu_set_fpcc<Flags...>(ppu, ppu.fpr[op.frd], 0.); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto FADDS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_fpcc>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.fpr[op.frd] = f32(ppu.fpr[op.fra] + ppu.fpr[op.frb]); ppu_set_fpcc<Flags...>(ppu, ppu.fpr[op.frd], 0.); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto FSQRTS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_fpcc>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.fpr[op.frd] = f32(std::sqrt(ppu.fpr[op.frb])); ppu_set_fpcc<Flags...>(ppu, ppu.fpr[op.frd], 0.); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto FRES() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_fpcc>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const f64 a = ppu.fpr[op.frb]; const u64 b = std::bit_cast<u64>(a); const u64 e = (b >> 52) & 0x7ff; // double exp const u64 i = (b >> 45) & 0x7f; // mantissa LUT index const u64 r = e >= (0x3ff + 0x80) ? 0 : (0x7ff - 2 - e) << 52 | u64{ppu_fres_mantissas[i]} << (32 - 3); ppu.fpr[op.frd] = f32(std::bit_cast<f64>(a == a ? (b & 0x8000'0000'0000'0000) | r : (0x8'0000'0000'0000 | b))); ppu_set_fpcc<Flags...>(ppu, ppu.fpr[op.frd], 0.); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto FMULS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_fpcc>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.fpr[op.frd] = f32(ppu.fpr[op.fra] * ppu.fpr[op.frc]); ppu_set_fpcc<Flags...>(ppu, ppu.fpr[op.frd], 0.); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto FMADDS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_fpcc>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { if constexpr (((Flags == use_dfma) || ...)) ppu.fpr[op.frd] = f32(std::fma(ppu.fpr[op.fra], ppu.fpr[op.frc], ppu.fpr[op.frb])); else ppu.fpr[op.frd] = f32(ppu.fpr[op.fra] * ppu.fpr[op.frc] + ppu.fpr[op.frb]); ppu_set_fpcc<Flags...>(ppu, ppu.fpr[op.frd], 0.); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto FMSUBS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_fpcc, use_dfma>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { if constexpr (((Flags == use_dfma) || ...)) ppu.fpr[op.frd] = f32(std::fma(ppu.fpr[op.fra], ppu.fpr[op.frc], -ppu.fpr[op.frb])); else ppu.fpr[op.frd] = f32(ppu.fpr[op.fra] * ppu.fpr[op.frc] - ppu.fpr[op.frb]); ppu_set_fpcc<Flags...>(ppu, ppu.fpr[op.frd], 0.); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto FNMSUBS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_fpcc, use_dfma>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { if constexpr (((Flags == use_dfma) || ...)) ppu.fpr[op.frd] = f32(-std::fma(ppu.fpr[op.fra], ppu.fpr[op.frc], -ppu.fpr[op.frb])); else ppu.fpr[op.frd] = f32(-(ppu.fpr[op.fra] * ppu.fpr[op.frc] - ppu.fpr[op.frb])); ppu_set_fpcc<Flags...>(ppu, ppu.fpr[op.frd], 0.); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto FNMADDS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_fpcc, use_dfma>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { if constexpr (((Flags == use_dfma) || ...)) ppu.fpr[op.frd] = f32(-std::fma(ppu.fpr[op.fra], ppu.fpr[op.frc], ppu.fpr[op.frb])); else ppu.fpr[op.frd] = f32(-(ppu.fpr[op.fra] * ppu.fpr[op.frc] + ppu.fpr[op.frb])); ppu_set_fpcc<Flags...>(ppu, ppu.fpr[op.frd], 0.); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto MTFSB1() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u32 bit = op.crbd; if (bit < 16 || bit > 19) ppu_log.warning("MTFSB1(%d)", bit); ppu.fpscr.bits[bit] = 1; if constexpr (((Flags == has_rc) || ...)) ppu_cr_set(ppu, 1, ppu.fpscr.fg, ppu.fpscr.fl, ppu.fpscr.fe, ppu.fpscr.fu); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto MCRFS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { if (op.crfs != 4) ppu_log.warning("MCRFS(%d)", op.crfs); ppu.cr.fields[op.crfd] = ppu.fpscr.fields[op.crfs]; }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto MTFSB0() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u32 bit = op.crbd; if (bit < 16 || bit > 19) ppu_log.warning("MTFSB0(%d)", bit); ppu.fpscr.bits[bit] = 0; if constexpr (((Flags == has_rc) || ...)) ppu_cr_set(ppu, 1, ppu.fpscr.fg, ppu.fpscr.fl, ppu.fpscr.fe, ppu.fpscr.fu); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto MTFSFI() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u32 bf = op.crfd; if (bf != 4) { // Do nothing on non-FPCC field (TODO) ppu_log.warning("MTFSFI(%d)", op.crfd); } else { static constexpr std::array<u32, 16> all_values = []() -> std::array<u32, 16> { std::array<u32, 16> values{}; for (u32 i = 0; i < values.size(); i++) { u32 value = 0, im = i; value |= (im & 1) << (8 * 3); im >>= 1; value |= (im & 1) << (8 * 2); im >>= 1; value |= (im & 1) << (8 * 1); im >>= 1; value |= (im & 1) << (8 * 0); values[i] = value; } return values; }(); ppu.fpscr.fields[bf] = all_values[op.i]; } if constexpr (((Flags == has_rc) || ...)) ppu_cr_set(ppu, 1, ppu.fpscr.fg, ppu.fpscr.fl, ppu.fpscr.fe, ppu.fpscr.fu); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto MFFS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu_log.warning("MFFS"); ppu.fpr[op.frd] = std::bit_cast<f64>(u64{ppu.fpscr.fl} << 15 | u64{ppu.fpscr.fg} << 14 | u64{ppu.fpscr.fe} << 13 | u64{ppu.fpscr.fu} << 12); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set(ppu, 1, ppu.fpscr.fg, ppu.fpscr.fl, ppu.fpscr.fe, ppu.fpscr.fu); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto MTFSF() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](auto&& ppu, auto) { ppu_log.warning("MTFSF"); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set(ppu, 1, ppu.fpscr.fg, ppu.fpscr.fl, ppu.fpscr.fe, ppu.fpscr.fu); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto FCMPU() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const f64 a = ppu.fpr[op.fra]; const f64 b = ppu.fpr[op.frb]; ppu_set_fpcc<set_fpcc, has_rc, Flags...>(ppu, a, b, op.crfd); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto FCTIW() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_fpcc>(); static const auto exec = [](ppu_thread& ppu, auto&& d, auto&& b) { #if defined(ARCH_X64) const auto val = _mm_set_sd(b); const auto res = _mm_xor_si128(_mm_cvtpd_epi32(val), _mm_castpd_si128(_mm_cmpge_pd(val, _mm_set1_pd(0x80000000)))); d = std::bit_cast<f64, s64>(_mm_cvtsi128_si32(res)); #elif defined(ARCH_ARM64) d = std::bit_cast<f64, s64>(!(b == b) ? INT32_MIN : vqmovnd_s64(std::bit_cast<f64>(vrndi_f64(std::bit_cast<float64x1_t>(b))))); #endif ppu_set_fpcc<Flags...>(ppu, 0., 0.); // undefined (TODO) }; RETURN_(ppu, ppu.fpr[op.frd], ppu.fpr[op.frb]); } template <u32 Build, ppu_exec_bit... Flags> auto FCTIWZ() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_fpcc>(); static const auto exec = [](ppu_thread& ppu, auto&& d, auto&& b) { #if defined(ARCH_X64) const auto val = _mm_set_sd(b); const auto res = _mm_xor_si128(_mm_cvttpd_epi32(val), _mm_castpd_si128(_mm_cmpge_pd(val, _mm_set1_pd(0x80000000)))); d = std::bit_cast<f64, s64>(_mm_cvtsi128_si32(res)); #elif defined(ARCH_ARM64) d = std::bit_cast<f64, s64>(!(b == b) ? INT32_MIN : vqmovnd_s64(std::bit_cast<s64>(vcvt_s64_f64(std::bit_cast<float64x1_t>(b))))); #endif ppu_set_fpcc<Flags...>(ppu, 0., 0.); // undefined (TODO) }; RETURN_(ppu, ppu.fpr[op.frd], ppu.fpr[op.frb]); } template <u32 Build, ppu_exec_bit... Flags> auto FRSP() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_fpcc>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.fpr[op.frd] = f32(ppu.fpr[op.frb]); ppu_set_fpcc<Flags...>(ppu, ppu.fpr[op.frd], 0.); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto FDIV() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_fpcc>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.fpr[op.frd] = ppu.fpr[op.fra] / ppu.fpr[op.frb]; ppu_set_fpcc<Flags...>(ppu, ppu.fpr[op.frd], 0.); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto FSUB() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_fpcc>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.fpr[op.frd] = ppu.fpr[op.fra] - ppu.fpr[op.frb]; ppu_set_fpcc<Flags...>(ppu, ppu.fpr[op.frd], 0.); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto FADD() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_fpcc>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.fpr[op.frd] = ppu.fpr[op.fra] + ppu.fpr[op.frb]; ppu_set_fpcc<Flags...>(ppu, ppu.fpr[op.frd], 0.); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto FSQRT() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_fpcc>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.fpr[op.frd] = std::sqrt(ppu.fpr[op.frb]); ppu_set_fpcc<Flags...>(ppu, ppu.fpr[op.frd], 0.); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto FSEL() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.fpr[op.frd] = ppu.fpr[op.fra] >= 0.0 ? ppu.fpr[op.frc] : ppu.fpr[op.frb]; if constexpr (((Flags == has_rc) || ...)) ppu_cr_set(ppu, 1, ppu.fpscr.fg, ppu.fpscr.fl, ppu.fpscr.fe, ppu.fpscr.fu); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto FMUL() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_fpcc>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.fpr[op.frd] = ppu.fpr[op.fra] * ppu.fpr[op.frc]; ppu_set_fpcc<Flags...>(ppu, ppu.fpr[op.frd], 0.); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto FRSQRTE() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_fpcc>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { const u64 b = std::bit_cast<u64>(ppu.fpr[op.frb]); ppu.fpr[op.frd] = std::bit_cast<f64>(u64{ppu_frqrte_lut.data[b >> 49]} << 32); ppu_set_fpcc<Flags...>(ppu, ppu.fpr[op.frd], 0.); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto FMSUB() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_fpcc, use_dfma>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { if constexpr (((Flags == use_dfma) || ...)) ppu.fpr[op.frd] = std::fma(ppu.fpr[op.fra], ppu.fpr[op.frc], -ppu.fpr[op.frb]); else ppu.fpr[op.frd] = ppu.fpr[op.fra] * ppu.fpr[op.frc] - ppu.fpr[op.frb]; ppu_set_fpcc<Flags...>(ppu, ppu.fpr[op.frd], 0.); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto FMADD() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_fpcc, use_dfma>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { if constexpr (((Flags == use_dfma) || ...)) ppu.fpr[op.frd] = std::fma(ppu.fpr[op.fra], ppu.fpr[op.frc], ppu.fpr[op.frb]); else ppu.fpr[op.frd] = ppu.fpr[op.fra] * ppu.fpr[op.frc] + ppu.fpr[op.frb]; ppu_set_fpcc<Flags...>(ppu, ppu.fpr[op.frd], 0.); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto FNMSUB() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_fpcc, use_dfma>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { if constexpr (((Flags == use_dfma) || ...)) ppu.fpr[op.frd] = -std::fma(ppu.fpr[op.fra], ppu.fpr[op.frc], -ppu.fpr[op.frb]); else ppu.fpr[op.frd] = -(ppu.fpr[op.fra] * ppu.fpr[op.frc] - ppu.fpr[op.frb]); ppu_set_fpcc<Flags...>(ppu, ppu.fpr[op.frd], 0.); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto FNMADD() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_fpcc, use_dfma>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { if constexpr (((Flags == use_dfma) || ...)) ppu.fpr[op.frd] = -std::fma(ppu.fpr[op.fra], ppu.fpr[op.frc], ppu.fpr[op.frb]); else ppu.fpr[op.frd] = -(ppu.fpr[op.fra] * ppu.fpr[op.frc] + ppu.fpr[op.frb]); ppu_set_fpcc<Flags...>(ppu, ppu.fpr[op.frd], 0.); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto FCMPO() { return FCMPU<Build, Flags...>(); } template <u32 Build, ppu_exec_bit... Flags> auto FNEG() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<use_dfma>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.fpr[op.frd] = -ppu.fpr[op.frb]; if constexpr (((Flags == has_rc) || ...)) ppu_cr_set(ppu, 1, ppu.fpscr.fg, ppu.fpscr.fl, ppu.fpscr.fe, ppu.fpscr.fu); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto FMR() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.fpr[op.frd] = ppu.fpr[op.frb]; if constexpr (((Flags == has_rc) || ...)) ppu_cr_set(ppu, 1, ppu.fpscr.fg, ppu.fpscr.fl, ppu.fpscr.fe, ppu.fpscr.fu); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto FNABS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.fpr[op.frd] = -std::fabs(ppu.fpr[op.frb]); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set(ppu, 1, ppu.fpscr.fg, ppu.fpscr.fl, ppu.fpscr.fe, ppu.fpscr.fu); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto FABS() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { ppu.fpr[op.frd] = std::fabs(ppu.fpr[op.frb]); if constexpr (((Flags == has_rc) || ...)) ppu_cr_set(ppu, 1, ppu.fpscr.fg, ppu.fpscr.fl, ppu.fpscr.fe, ppu.fpscr.fu); }; RETURN_(ppu, op); } template <u32 Build, ppu_exec_bit... Flags> auto FCTID() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_fpcc>(); static const auto exec = [](ppu_thread& ppu, auto&& d, auto&& b) { #if defined(ARCH_X64) const auto val = _mm_set_sd(b); const auto res = _mm_xor_si128(_mm_set1_epi64x(_mm_cvtsd_si64(val)), _mm_castpd_si128(_mm_cmpge_pd(val, _mm_set1_pd(f64(1ull << 63))))); d = std::bit_cast<f64>(_mm_cvtsi128_si64(res)); #elif defined(ARCH_ARM64) d = std::bit_cast<f64, s64>(!(b == b) ? f64{INT64_MIN} : std::bit_cast<f64>(vrndi_f64(std::bit_cast<float64x1_t>(b)))); #endif ppu_set_fpcc<Flags...>(ppu, 0., 0.); // undefined (TODO) }; RETURN_(ppu, ppu.fpr[op.frd], ppu.fpr[op.frb]); } template <u32 Build, ppu_exec_bit... Flags> auto FCTIDZ() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_fpcc>(); static const auto exec = [](ppu_thread& ppu, auto&& d, auto&& b) { #if defined(ARCH_X64) const auto val = _mm_set_sd(b); const auto res = _mm_xor_si128(_mm_set1_epi64x(_mm_cvttsd_si64(val)), _mm_castpd_si128(_mm_cmpge_pd(val, _mm_set1_pd(f64(1ull << 63))))); d = std::bit_cast<f64>(_mm_cvtsi128_si64(res)); #elif defined(ARCH_ARM64) d = std::bit_cast<f64>(!(b == b) ? int64x1_t{INT64_MIN} : vcvt_s64_f64(std::bit_cast<float64x1_t>(b))); #endif ppu_set_fpcc<Flags...>(ppu, 0., 0.); // undefined (TODO) }; RETURN_(ppu, ppu.fpr[op.frd], ppu.fpr[op.frb]); } template <u32 Build, ppu_exec_bit... Flags> auto FCFID() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<set_fpcc>(); static const auto exec = [](ppu_thread& ppu, auto&& d, auto&& b) { f64 r = static_cast<f64>(std::bit_cast<s64>(b)); d = r; ppu_set_fpcc<Flags...>(ppu, r, 0.); }; RETURN_(ppu, ppu.fpr[op.frd], ppu.fpr[op.frb]); } template <u32 Build, ppu_exec_bit... Flags> auto UNK() { if constexpr (Build == 0xf1a6) return ppu_exec_select<Flags...>::template select<>(); if constexpr (Build == 0) { return +[](ppu_thread& ppu, ppu_opcode_t op, be_t<u32>* this_op, ppu_intrp_func*) { const u32 old_cia = ppu.cia; ppu.cia = vm::get_addr(this_op); ppu.exec_bytes += ppu.cia - old_cia; // HLE function index const u32 index = (ppu.cia - g_fxo->get<ppu_function_manager>().addr) / 8; const auto& hle_funcs = ppu_function_manager::get(); if (ppu.cia % 8 == 4 && index < hle_funcs.size()) { return hle_funcs[index](ppu, op, this_op, nullptr); } fmt::throw_exception("Unknown/Illegal opcode: 0x%08x at 0x%x", op.opcode, ppu.cia); }; } } template <typename IT> struct ppu_interpreter_t { IT MFVSCR; IT MTVSCR; IT VADDCUW; IT VADDFP; IT VADDSBS; IT VADDSHS; IT VADDSWS; IT VADDUBM; IT VADDUBS; IT VADDUHM; IT VADDUHS; IT VADDUWM; IT VADDUWS; IT VAND; IT VANDC; IT VAVGSB; IT VAVGSH; IT VAVGSW; IT VAVGUB; IT VAVGUH; IT VAVGUW; IT VCFSX; IT VCFUX; IT VCMPBFP; IT VCMPBFP_; IT VCMPEQFP; IT VCMPEQFP_; IT VCMPEQUB; IT VCMPEQUB_; IT VCMPEQUH; IT VCMPEQUH_; IT VCMPEQUW; IT VCMPEQUW_; IT VCMPGEFP; IT VCMPGEFP_; IT VCMPGTFP; IT VCMPGTFP_; IT VCMPGTSB; IT VCMPGTSB_; IT VCMPGTSH; IT VCMPGTSH_; IT VCMPGTSW; IT VCMPGTSW_; IT VCMPGTUB; IT VCMPGTUB_; IT VCMPGTUH; IT VCMPGTUH_; IT VCMPGTUW; IT VCMPGTUW_; IT VCTSXS; IT VCTUXS; IT VEXPTEFP; IT VLOGEFP; IT VMADDFP; IT VMAXFP; IT VMAXSB; IT VMAXSH; IT VMAXSW; IT VMAXUB; IT VMAXUH; IT VMAXUW; IT VMHADDSHS; IT VMHRADDSHS; IT VMINFP; IT VMINSB; IT VMINSH; IT VMINSW; IT VMINUB; IT VMINUH; IT VMINUW; IT VMLADDUHM; IT VMRGHB; IT VMRGHH; IT VMRGHW; IT VMRGLB; IT VMRGLH; IT VMRGLW; IT VMSUMMBM; IT VMSUMSHM; IT VMSUMSHS; IT VMSUMUBM; IT VMSUMUHM; IT VMSUMUHS; IT VMULESB; IT VMULESH; IT VMULEUB; IT VMULEUH; IT VMULOSB; IT VMULOSH; IT VMULOUB; IT VMULOUH; IT VNMSUBFP; IT VNOR; IT VOR; IT VPERM; IT VPKPX; IT VPKSHSS; IT VPKSHUS; IT VPKSWSS; IT VPKSWUS; IT VPKUHUM; IT VPKUHUS; IT VPKUWUM; IT VPKUWUS; IT VREFP; IT VRFIM; IT VRFIN; IT VRFIP; IT VRFIZ; IT VRLB; IT VRLH; IT VRLW; IT VRSQRTEFP; IT VSEL; IT VSL; IT VSLB; IT VSLDOI{}; IT VSLDOI_[16]; IT VSLH; IT VSLO; IT VSLW; IT VSPLTB; IT VSPLTH; IT VSPLTISB; IT VSPLTISH; IT VSPLTISW; IT VSPLTW; IT VSR; IT VSRAB; IT VSRAH; IT VSRAW; IT VSRB; IT VSRH; IT VSRO; IT VSRW; IT VSUBCUW; IT VSUBFP; IT VSUBSBS; IT VSUBSHS; IT VSUBSWS; IT VSUBUBM; IT VSUBUBS; IT VSUBUHM; IT VSUBUHS; IT VSUBUWM; IT VSUBUWS; IT VSUMSWS; IT VSUM2SWS; IT VSUM4SBS; IT VSUM4SHS; IT VSUM4UBS; IT VUPKHPX; IT VUPKHSB; IT VUPKHSH; IT VUPKLPX; IT VUPKLSB; IT VUPKLSH; IT VXOR; IT TDI; IT TWI; IT MULLI; IT SUBFIC; IT CMPLI; IT CMPI; IT ADDIC; IT ADDI; IT ADDIS; IT BC; IT SC; IT B; IT MCRF; IT BCLR; IT CRNOR; IT CRANDC; IT ISYNC; IT CRXOR; IT CRNAND; IT CRAND; IT CREQV; IT CRORC; IT CROR; IT BCCTR; IT RLWIMI; IT RLWINM; IT RLWNM; IT ORI; IT ORIS; IT XORI; IT XORIS; IT ANDI; IT ANDIS; IT RLDICL; IT RLDICR; IT RLDIC; IT RLDIMI; IT RLDCL; IT RLDCR; IT CMP; IT TW; IT LVSL; IT LVEBX; IT SUBFC; IT ADDC; IT MULHDU; IT MULHWU; IT MFOCRF{}; IT MFOCRF_[8]; IT MFCR; //+ IT LWARX; IT LDX; IT LWZX; IT SLW; IT CNTLZW; IT SLD; IT AND; IT CMPL; IT LVSR; IT LVEHX; IT SUBF; IT LDUX; IT DCBST; IT LWZUX; IT CNTLZD; IT ANDC; IT TD; IT LVEWX; IT MULHD; IT MULHW; IT LDARX; IT DCBF; IT LBZX; IT LVX; IT NEG; IT LBZUX; IT NOR; IT STVEBX; IT SUBFE; IT ADDE; IT MTOCRF; IT STDX; IT STWCX; IT STWX; IT STVEHX; IT STDUX; IT STWUX; IT STVEWX; IT SUBFZE; IT ADDZE; IT STDCX; IT STBX; IT STVX; IT SUBFME; IT MULLD; IT ADDME; IT MULLW; IT DCBTST; IT STBUX; IT ADD; IT DCBT; IT LHZX; IT EQV; IT ECIWX; IT LHZUX; IT XOR; IT MFSPR; IT LWAX; IT DST; IT LHAX; IT LVXL; IT MFTB; IT LWAUX; IT DSTST; IT LHAUX; IT STHX; IT ORC; IT ECOWX; IT STHUX; IT OR; IT DIVDU; IT DIVWU; IT MTSPR; IT DCBI; IT NAND; IT STVXL; IT DIVD; IT DIVW; IT LVLX; IT LDBRX; IT LSWX; IT LWBRX; IT LFSX; IT SRW; IT SRD; IT LVRX; IT LSWI; IT LFSUX; IT SYNC; IT LFDX; IT LFDUX; IT STVLX; IT STDBRX; IT STSWX; IT STWBRX; IT STFSX; IT STVRX; IT STFSUX; IT STSWI; IT STFDX; IT STFDUX; IT LVLXL; IT LHBRX; IT SRAW; IT SRAD; IT LVRXL; IT DSS; IT SRAWI; IT SRADI; IT EIEIO; IT STVLXL; IT STHBRX; IT EXTSH; IT STVRXL; IT EXTSB; IT STFIWX; IT EXTSW; IT ICBI; IT DCBZ; IT LWZ; IT LWZU; IT LBZ; IT LBZU; IT STW; IT STWU; IT STB; IT STBU; IT LHZ; IT LHZU; IT LHA; IT LHAU; IT STH; IT STHU; IT LMW; IT STMW; IT LFS; IT LFSU; IT LFD; IT LFDU; IT STFS; IT STFSU; IT STFD; IT STFDU; IT LD; IT LDU; IT LWA; IT STD; IT STDU; IT FDIVS; IT FSUBS; IT FADDS; IT FSQRTS; IT FRES; IT FMULS; IT FMADDS; IT FMSUBS; IT FNMSUBS; IT FNMADDS; IT MTFSB1; IT MCRFS; IT MTFSB0; IT MTFSFI; IT MFFS; IT MTFSF; IT FCMPU; IT FRSP; IT FCTIW; IT FCTIWZ; IT FDIV; IT FSUB; IT FADD; IT FSQRT; IT FSEL; IT FMUL; IT FRSQRTE; IT FMSUB; IT FMADD; IT FNMSUB; IT FNMADD; IT FCMPO; IT FNEG; IT FMR; IT FNABS; IT FABS; IT FCTID; IT FCTIDZ; IT FCFID; IT UNK; IT SUBFCO; IT ADDCO; IT SUBFO; IT NEGO; IT SUBFEO; IT ADDEO; IT SUBFZEO; IT ADDZEO; IT SUBFMEO; IT MULLDO; IT ADDMEO; IT MULLWO; IT ADDO; IT DIVDUO; IT DIVWUO; IT DIVDO; IT DIVWO; IT SUBFCO_; IT ADDCO_; IT SUBFO_; IT NEGO_; IT SUBFEO_; IT ADDEO_; IT SUBFZEO_; IT ADDZEO_; IT SUBFMEO_; IT MULLDO_; IT ADDMEO_; IT MULLWO_; IT ADDO_; IT DIVDUO_; IT DIVWUO_; IT DIVDO_; IT DIVWO_; IT RLWIMI_; IT RLWINM_; IT RLWNM_; IT RLDICL_; IT RLDICR_; IT RLDIC_; IT RLDIMI_; IT RLDCL_; IT RLDCR_; IT SUBFC_; IT MULHDU_; IT ADDC_; IT MULHWU_; IT SLW_; IT CNTLZW_; IT SLD_; IT AND_; IT SUBF_; IT CNTLZD_; IT ANDC_; IT MULHD_; IT MULHW_; IT NEG_; IT NOR_; IT SUBFE_; IT ADDE_; IT SUBFZE_; IT ADDZE_; IT MULLD_; IT SUBFME_; IT ADDME_; IT MULLW_; IT ADD_; IT EQV_; IT XOR_; IT ORC_; IT OR_; IT DIVDU_; IT DIVWU_; IT NAND_; IT DIVD_; IT DIVW_; IT SRW_; IT SRD_; IT SRAW_; IT SRAD_; IT SRAWI_; IT SRADI_; IT EXTSH_; IT EXTSB_; IT EXTSW_; IT FDIVS_; IT FSUBS_; IT FADDS_; IT FSQRTS_; IT FRES_; IT FMULS_; IT FMADDS_; IT FMSUBS_; IT FNMSUBS_; IT FNMADDS_; IT MTFSB1_; IT MTFSB0_; IT MTFSFI_; IT MFFS_; IT MTFSF_; IT FRSP_; IT FCTIW_; IT FCTIWZ_; IT FDIV_; IT FSUB_; IT FADD_; IT FSQRT_; IT FSEL_; IT FMUL_; IT FRSQRTE_; IT FMSUB_; IT FMADD_; IT FNMSUB_; IT FNMADD_; IT FNEG_; IT FMR_; IT FNABS_; IT FABS_; IT FCTID_; IT FCTIDZ_; IT FCFID_; /* Optimized variants */ }; ppu_interpreter_rt_base::ppu_interpreter_rt_base() noexcept { // Obtain required set of flags from settings bs_t<ppu_exec_bit> selected{}; if (g_cfg.core.ppu_set_sat_bit) selected += set_sat; if (g_cfg.core.ppu_use_nj_bit) selected += use_nj + fix_nj; if (g_cfg.core.ppu_llvm_nj_fixup) selected += fix_nj; if (g_cfg.core.ppu_set_vnan) selected += set_vnan + fix_vnan; if (g_cfg.core.ppu_fix_vnan) selected += fix_vnan; if (g_cfg.core.ppu_set_fpcc) selected += set_fpcc; if (g_cfg.core.use_accurate_dfma) selected += use_dfma; if (g_cfg.core.ppu_debug) selected += set_cr_stats; // TODO if (g_cfg.core.ppu_call_history) selected += set_call_history; if (g_cfg.core.ppu_128_reservations_loop_max_length != 0) selected += use_feed_data; if (selected & use_nj) ppu_log.success("Enabled: Accurate Non-Java Mode"); else if (selected & fix_nj) ppu_log.success("Enabled: Non-Java Mode Fixup"); if (selected & set_vnan) ppu_log.success("Enabled: Accurate VNAN"); else if (selected & fix_vnan) ppu_log.success("Enabled: VNAN Fixup"); if (selected & set_sat) ppu_log.success("Enabled: Accurate SAT"); if (selected & set_fpcc) ppu_log.success("Enabled: Accurate FPCC"); ptrs = std::make_unique<decltype(ptrs)::element_type>(); #ifndef __INTELLISENSE__ #define INIT_VCMP(name) \ ptrs->name = ::name<0>(); \ ptrs->name##_ = ::name<0, has_oe>(); \ #define INIT_OV(name) \ ptrs->name = ::name<0>(); \ ptrs->name##O = ::name<0, has_oe>(); \ #define INIT_RC(name) \ ptrs->name = ::name<0xf1a6>()(selected, []<ppu_exec_bit... Flags>() { \ return ::name<0, Flags...>(); \ }); \ ptrs->name##_ = ::name<0xf1a6, set_fpcc>()(selected, []<ppu_exec_bit... Flags>() { \ /* Minor optimization: has_rc implies set_fpcc so don't compile has_rc alone */ \ return ::name<0, has_rc, Flags...>(); \ }); \ #define INIT_RC_OV(name) \ ptrs->name = ::name<0>(); \ ptrs->name##O = ::name<0, has_oe>(); \ ptrs->name##_ = ::name<0, has_rc>(); \ ptrs->name##O_ = ::name<0, has_oe, has_rc>(); \ // Initialize instructions with their own sets of supported flags (except INIT_VCMP, INIT_OV, INIT_RC_OV) #define INIT(name) \ ptrs->name = ::name<0xf1a6>()(selected, []<ppu_exec_bit... Flags>() { \ return ::name<0, Flags...>(); \ }); \ #define INIT_ONE(name, bits) \ ptrs->name##_[0b##bits] = ::name<0b##bits>::select(selected, []<ppu_exec_bit... Flags>() { \ return ::name<0b##bits>::impl<0, Flags...>(); \ }); \ #define INIT_PACK2(name, bits) \ INIT_ONE(name, bits##0) \ INIT_ONE(name, bits##1) \ #define INIT_PACK4(name, bits) \ INIT_PACK2(name, bits##0) \ INIT_PACK2(name, bits##1) \ #define INIT_PACK8(name, bits) \ INIT_PACK4(name, bits##0) \ INIT_PACK4(name, bits##1) \ #define INIT_PACK16(name, bits) \ INIT_PACK8(name, bits##0) \ INIT_PACK8(name, bits##1) \ INIT(MFVSCR); INIT(MTVSCR); INIT(VADDCUW); INIT(VADDFP); INIT(VADDSBS); INIT(VADDSHS); INIT(VADDSWS); INIT(VADDUBM); INIT(VADDUBS); INIT(VADDUHM); INIT(VADDUHS); INIT(VADDUWM); INIT(VADDUWS); INIT(VAND); INIT(VANDC); INIT(VAVGSB); INIT(VAVGSH); INIT(VAVGSW); INIT(VAVGUB); INIT(VAVGUH); INIT(VAVGUW); INIT(VCFSX); INIT(VCFUX); INIT_VCMP(VCMPBFP); INIT_VCMP(VCMPEQFP); INIT_VCMP(VCMPEQUB); INIT_VCMP(VCMPEQUH); INIT_VCMP(VCMPEQUW); INIT_VCMP(VCMPGEFP); INIT_VCMP(VCMPGTFP); INIT_VCMP(VCMPGTSB); INIT_VCMP(VCMPGTSH); INIT_VCMP(VCMPGTSW); INIT_VCMP(VCMPGTUB); INIT_VCMP(VCMPGTUH); INIT_VCMP(VCMPGTUW); INIT(VCTSXS); INIT(VCTUXS); INIT(VEXPTEFP); INIT(VLOGEFP); INIT(VMADDFP); INIT(VMAXFP); INIT(VMAXSB); INIT(VMAXSH); INIT(VMAXSW); INIT(VMAXUB); INIT(VMAXUH); INIT(VMAXUW); INIT(VMHADDSHS); INIT(VMHRADDSHS); INIT(VMINFP); INIT(VMINSB); INIT(VMINSH); INIT(VMINSW); INIT(VMINUB); INIT(VMINUH); INIT(VMINUW); INIT(VMLADDUHM); INIT(VMRGHB); INIT(VMRGHH); INIT(VMRGHW); INIT(VMRGLB); INIT(VMRGLH); INIT(VMRGLW); INIT(VMSUMMBM); INIT(VMSUMSHM); INIT(VMSUMSHS); INIT(VMSUMUBM); INIT(VMSUMUHM); INIT(VMSUMUHS); INIT(VMULESB); INIT(VMULESH); INIT(VMULEUB); INIT(VMULEUH); INIT(VMULOSB); INIT(VMULOSH); INIT(VMULOUB); INIT(VMULOUH); INIT(VNMSUBFP); INIT(VNOR); INIT(VOR); INIT(VPERM); INIT(VPKPX); INIT(VPKSHSS); INIT(VPKSHUS); INIT(VPKSWSS); INIT(VPKSWUS); INIT(VPKUHUM); INIT(VPKUHUS); INIT(VPKUWUM); INIT(VPKUWUS); INIT(VREFP); INIT(VRFIM); INIT(VRFIN); INIT(VRFIP); INIT(VRFIZ); INIT(VRLB); INIT(VRLH); INIT(VRLW); INIT(VRSQRTEFP); INIT(VSEL); INIT(VSL); INIT(VSLB); INIT_PACK16(VSLDOI,); INIT(VSLH); INIT(VSLO); INIT(VSLW); INIT(VSPLTB); INIT(VSPLTH); INIT(VSPLTISB); INIT(VSPLTISH); INIT(VSPLTISW); INIT(VSPLTW); INIT(VSR); INIT(VSRAB); INIT(VSRAH); INIT(VSRAW); INIT(VSRB); INIT(VSRH); INIT(VSRO); INIT(VSRW); INIT(VSUBCUW); INIT(VSUBFP); INIT(VSUBSBS); INIT(VSUBSHS); INIT(VSUBSWS); INIT(VSUBUBM); INIT(VSUBUBS); INIT(VSUBUHM); INIT(VSUBUHS); INIT(VSUBUWM); INIT(VSUBUWS); INIT(VSUMSWS); INIT(VSUM2SWS); INIT(VSUM4SBS); INIT(VSUM4SHS); INIT(VSUM4UBS); INIT(VUPKHPX); INIT(VUPKHSB); INIT(VUPKHSH); INIT(VUPKLPX); INIT(VUPKLSB); INIT(VUPKLSH); INIT(VXOR); INIT(TDI); INIT(TWI); INIT(MULLI); INIT(SUBFIC); INIT(CMPLI); INIT(CMPI); INIT(ADDIC); INIT(ADDI); INIT(ADDIS); INIT(BC); INIT(SC); INIT(B); INIT(MCRF); INIT(BCLR); INIT(CRNOR); INIT(CRANDC); INIT(ISYNC); INIT(CRXOR); INIT(CRNAND); INIT(CRAND); INIT(CREQV); INIT(CRORC); INIT(CROR); INIT(BCCTR); INIT_RC(RLWIMI); INIT_RC(RLWINM); INIT_RC(RLWNM); INIT(ORI); INIT(ORIS); INIT(XORI); INIT(XORIS); INIT(ANDI); INIT(ANDIS); INIT_RC(RLDICL); INIT_RC(RLDICR); INIT_RC(RLDIC); INIT_RC(RLDIMI); INIT_RC(RLDCL); INIT_RC(RLDCR); INIT(CMP); INIT(TW); INIT(LVSL); INIT(LVEBX); INIT_RC_OV(SUBFC); INIT_RC_OV(ADDC); INIT_RC(MULHDU); INIT_RC(MULHWU); INIT_PACK8(MFOCRF,); INIT(MFCR); //+ INIT(LWARX); INIT(LDX); INIT(LWZX); INIT_RC(SLW); INIT_RC(CNTLZW); INIT_RC(SLD); INIT_RC(AND); INIT(CMPL); INIT(LVSR); INIT(LVEHX); INIT_RC_OV(SUBF); INIT(LDUX); INIT(DCBST); INIT(LWZUX); INIT_RC(CNTLZD); INIT_RC(ANDC); INIT(TD); INIT(LVEWX); INIT_RC(MULHD); INIT_RC(MULHW); INIT(LDARX); INIT(DCBF); INIT(LBZX); INIT(LVX); INIT_RC_OV(NEG); INIT(LBZUX); INIT_RC(NOR); INIT(STVEBX); INIT_OV(SUBFE); INIT_OV(ADDE); INIT(MTOCRF); INIT(STDX); INIT(STWCX); INIT(STWX); INIT(STVEHX); INIT(STDUX); INIT(STWUX); INIT(STVEWX); INIT_RC_OV(SUBFZE); INIT_RC_OV(ADDZE); INIT(STDCX); INIT(STBX); INIT(STVX); INIT_RC_OV(SUBFME); INIT_RC_OV(MULLD); INIT_RC_OV(ADDME); INIT_RC_OV(MULLW); INIT(DCBTST); INIT(STBUX); INIT_RC_OV(ADD); INIT(DCBT); INIT(LHZX); INIT_RC(EQV); INIT(ECIWX); INIT(LHZUX); INIT_RC(XOR); INIT(MFSPR); INIT(LWAX); INIT(DST); INIT(LHAX); INIT(LVXL); INIT(MFTB); INIT(LWAUX); INIT(DSTST); INIT(LHAUX); INIT(STHX); INIT_RC(ORC); INIT(ECOWX); INIT(STHUX); INIT_RC(OR); INIT_RC_OV(DIVDU); INIT_RC_OV(DIVWU); INIT(MTSPR); INIT(DCBI); INIT_RC(NAND); INIT(STVXL); INIT_RC_OV(DIVD); INIT_RC_OV(DIVW); INIT(LVLX); INIT(LDBRX); INIT(LSWX); INIT(LWBRX); INIT(LFSX); INIT_RC(SRW); INIT_RC(SRD); INIT(LVRX); INIT(LSWI); INIT(LFSUX); INIT(SYNC); INIT(LFDX); INIT(LFDUX); INIT(STVLX); INIT(STDBRX); INIT(STSWX); INIT(STWBRX); INIT(STFSX); INIT(STVRX); INIT(STFSUX); INIT(STSWI); INIT(STFDX); INIT(STFDUX); INIT(LVLXL); INIT(LHBRX); INIT_RC(SRAW); INIT_RC(SRAD); INIT(LVRXL); INIT(DSS); INIT_RC(SRAWI); INIT_RC(SRADI); INIT(EIEIO); INIT(STVLXL); INIT(STHBRX); INIT_RC(EXTSH); INIT(STVRXL); INIT_RC(EXTSB); INIT(STFIWX); INIT_RC(EXTSW); INIT(ICBI); INIT(DCBZ); INIT(LWZ); INIT(LWZU); INIT(LBZ); INIT(LBZU); INIT(STW); INIT(STWU); INIT(STB); INIT(STBU); INIT(LHZ); INIT(LHZU); INIT(LHA); INIT(LHAU); INIT(STH); INIT(STHU); INIT(LMW); INIT(STMW); INIT(LFS); INIT(LFSU); INIT(LFD); INIT(LFDU); INIT(STFS); INIT(STFSU); INIT(STFD); INIT(STFDU); INIT(LD); INIT(LDU); INIT(LWA); INIT(STD); INIT(STDU); INIT_RC(FDIVS); INIT_RC(FSUBS); INIT_RC(FADDS); INIT_RC(FSQRTS); INIT_RC(FRES); INIT_RC(FMULS); INIT_RC(FMADDS); INIT_RC(FMSUBS); INIT_RC(FNMSUBS); INIT_RC(FNMADDS); INIT_RC(MTFSB1); INIT(MCRFS); INIT_RC(MTFSB0); INIT_RC(MTFSFI); INIT_RC(MFFS); INIT_RC(MTFSF); INIT(FCMPU); INIT_RC(FRSP); INIT_RC(FCTIW); INIT_RC(FCTIWZ); INIT_RC(FDIV); INIT_RC(FSUB); INIT_RC(FADD); INIT_RC(FSQRT); INIT_RC(FSEL); INIT_RC(FMUL); INIT_RC(FRSQRTE); INIT_RC(FMSUB); INIT_RC(FMADD); INIT_RC(FNMSUB); INIT_RC(FNMADD); INIT(FCMPO); INIT_RC(FNEG); INIT_RC(FMR); INIT_RC(FNABS); INIT_RC(FABS); INIT_RC(FCTID); INIT_RC(FCTIDZ); INIT_RC(FCFID); INIT(UNK); #endif } ppu_interpreter_rt_base::~ppu_interpreter_rt_base() { } ppu_interpreter_rt::ppu_interpreter_rt() noexcept : ppu_interpreter_rt_base() , table(*ptrs) { } ppu_intrp_func_t ppu_interpreter_rt::decode(u32 opv) const noexcept { const auto op = ppu_opcode_t{opv}; switch (g_ppu_itype.decode(opv)) { case ppu_itype::LWZ: case ppu_itype::LBZ: case ppu_itype::STW: case ppu_itype::STB: case ppu_itype::LHZ: case ppu_itype::LHA: case ppu_itype::STH: case ppu_itype::LFS: case ppu_itype::LFD: case ppu_itype::STFS: case ppu_itype::STFD: { // Minor optimization: 16-bit absolute addressing never points to a valid memory if (!op.ra) { return [](ppu_thread&, ppu_opcode_t op, be_t<u32>*, ppu_intrp_func*) { fmt::throw_exception("Invalid instruction: %s r%d,0x%016x(r0)", g_ppu_iname.decode(op.opcode), op.rd, op.simm16); }; } break; } case ppu_itype::VSLDOI: return ptrs->VSLDOI_[op.vsh]; case ppu_itype::MFOCRF: { if (op.l11) { const u32 n = std::countl_zero<u32>(op.crm) & 7; if (0x80u >> n != op.crm) { return [](ppu_thread&, ppu_opcode_t op, be_t<u32>*, ppu_intrp_func*) { fmt::throw_exception("Invalid instruction: MFOCRF with bits 0x%x", op.crm); }; } return ptrs->MFOCRF_[n]; } return ptrs->MFCR; } default: break; } return table.decode(opv); }
196,680
C++
.cpp
6,754
26.919455
159
0.63573
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,175
SPUThread.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/SPUThread.cpp
#include "stdafx.h" #include "Utilities/JIT.h" #include "Utilities/date_time.h" #include "Emu/Memory/vm.h" #include "Emu/Memory/vm_ptr.h" #include "Emu/Memory/vm_reservation.h" #include "Loader/ELF.h" #include "Emu/VFS.h" #include "Emu/IdManager.h" #include "Emu/perf_meter.hpp" #include "Emu/Cell/PPUThread.h" #include "Emu/Cell/ErrorCodes.h" #include "Emu/Cell/lv2/sys_spu.h" #include "Emu/Cell/lv2/sys_event_flag.h" #include "Emu/Cell/lv2/sys_event.h" #include "Emu/Cell/lv2/sys_interrupt.h" #include "Emu/Cell/SPUDisAsm.h" #include "Emu/Cell/SPUAnalyser.h" #include "Emu/Cell/SPUThread.h" #include "Emu/Cell/SPURecompiler.h" #include "Emu/Cell/timers.hpp" #include "Emu/RSX/Core/RSXReservationLock.hpp" #include "Emu/RSX/RSXThread.h" #include <cmath> #include <cfenv> #include <thread> #include <shared_mutex> #include <span> #include "util/vm.hpp" #include "util/asm.hpp" #include "util/v128.hpp" #include "util/simd.hpp" #include "util/sysinfo.hpp" #include "util/serialization.hpp" #if defined(ARCH_X64) #ifdef _MSC_VER #include <intrin.h> #include <immintrin.h> #else #include <x86intrin.h> #endif #endif // LUTs for SPU instructions const u32 spu_frest_fraction_lut[32] = { 0x7FFBE0, 0x7F87A6, 0x70EF72, 0x708B40, 0x638B12, 0x633AEA, 0x5792C4, 0x574AA0, 0x4CCA7E, 0x4C9262, 0x430A44, 0x42D62A, 0x3A2E12, 0x39FDFA, 0x3215E4, 0x31F1D2, 0x2AA9BE, 0x2A85AC, 0x23D59A, 0x23BD8E, 0x1D8576, 0x1D8576, 0x17AD5A, 0x17AD5A, 0x124543, 0x124543, 0x0D392D, 0x0D392D, 0x08851A, 0x08851A, 0x041D07, 0x041D07 }; const u32 spu_frest_exponent_lut[256] = { 0x7F800000, 0x7E000000, 0x7D800000, 0x7D000000, 0x7C800000, 0x7C000000, 0x7B800000, 0x7B000000, 0x7A800000, 0x7A000000, 0x79800000, 0x79000000, 0x78800000, 0x78000000, 0x77800000, 0x77000000, 0x76800000, 0x76000000, 0x75800000, 0x75000000, 0x74800000, 0x74000000, 0x73800000, 0x73000000, 0x72800000, 0x72000000, 0x71800000, 0x71000000, 0x70800000, 0x70000000, 0x6F800000, 0x6F000000, 0x6E800000, 0x6E000000, 0x6D800000, 0x6D000000, 0x6C800000, 0x6C000000, 0x6B800000, 0x6B000000, 0x6A800000, 0x6A000000, 0x69800000, 0x69000000, 0x68800000, 0x68000000, 0x67800000, 0x67000000, 0x66800000, 0x66000000, 0x65800000, 0x65000000, 0x64800000, 0x64000000, 0x63800000, 0x63000000, 0x62800000, 0x62000000, 0x61800000, 0x61000000, 0x60800000, 0x60000000, 0x5F800000, 0x5F000000, 0x5E800000, 0x5E000000, 0x5D800000, 0x5D000000, 0x5C800000, 0x5C000000, 0x5B800000, 0x5B000000, 0x5A800000, 0x5A000000, 0x59800000, 0x59000000, 0x58800000, 0x58000000, 0x57800000, 0x57000000, 0x56800000, 0x56000000, 0x55800000, 0x55000000, 0x54800000, 0x54000000, 0x53800000, 0x53000000, 0x52800000, 0x52000000, 0x51800000, 0x51000000, 0x50800000, 0x50000000, 0x4F800000, 0x4F000000, 0x4E800000, 0x4E000000, 0x4D800000, 0x4D000000, 0x4C800000, 0x4C000000, 0x4B800000, 0x4B000000, 0x4A800000, 0x4A000000, 0x49800000, 0x49000000, 0x48800000, 0x48000000, 0x47800000, 0x47000000, 0x46800000, 0x46000000, 0x45800000, 0x45000000, 0x44800000, 0x44000000, 0x43800000, 0x43000000, 0x42800000, 0x42000000, 0x41800000, 0x41000000, 0x40800000, 0x40000000, 0x3F800000, 0x3F000000, 0x3E800000, 0x3E000000, 0x3D800000, 0x3D000000, 0x3C800000, 0x3C000000, 0x3B800000, 0x3B000000, 0x3A800000, 0x3A000000, 0x39800000, 0x39000000, 0x38800000, 0x38000000, 0x37800000, 0x37000000, 0x36800000, 0x36000000, 0x35800000, 0x35000000, 0x34800000, 0x34000000, 0x33800000, 0x33000000, 0x32800000, 0x32000000, 0x31800000, 0x31000000, 0x30800000, 0x30000000, 0x2F800000, 0x2F000000, 0x2E800000, 0x2E000000, 0x2D800000, 0x2D000000, 0x2C800000, 0x2C000000, 0x2B800000, 0x2B000000, 0x2A800000, 0x2A000000, 0x29800000, 0x29000000, 0x28800000, 0x28000000, 0x27800000, 0x27000000, 0x26800000, 0x26000000, 0x25800000, 0x25000000, 0x24800000, 0x24000000, 0x23800000, 0x23000000, 0x22800000, 0x22000000, 0x21800000, 0x21000000, 0x20800000, 0x20000000, 0x1F800000, 0x1F000000, 0x1E800000, 0x1E000000, 0x1D800000, 0x1D000000, 0x1C800000, 0x1C000000, 0x1B800000, 0x1B000000, 0x1A800000, 0x1A000000, 0x19800000, 0x19000000, 0x18800000, 0x18000000, 0x17800000, 0x17000000, 0x16800000, 0x16000000, 0x15800000, 0x15000000, 0x14800000, 0x14000000, 0x13800000, 0x13000000, 0x12800000, 0x12000000, 0x11800000, 0x11000000, 0x10800000, 0x10000000, 0x0F800000, 0x0F000000, 0x0E800000, 0x0E000000, 0x0D800000, 0x0D000000, 0x0C800000, 0x0C000000, 0x0B800000, 0x0B000000, 0x0A800000, 0x0A000000, 0x09800000, 0x09000000, 0x08800000, 0x08000000, 0x07800000, 0x07000000, 0x06800000, 0x06000000, 0x05800000, 0x05000000, 0x04800000, 0x04000000, 0x03800000, 0x03000000, 0x02800000, 0x02000000, 0x01800000, 0x01000000, 0x00800000, 0x00000000, 0x00000000, 0x00000000 }; const u32 spu_frsqest_fraction_lut[64] = { 0x350160, 0x34E954, 0x2F993D, 0x2F993D, 0x2AA523, 0x2AA523, 0x26190D, 0x26190D, 0x21E4F9, 0x21E4F9, 0x1E00E9, 0x1E00E9, 0x1A5CD9, 0x1A5CD9, 0x16F8CB, 0x16F8CB, 0x13CCC0, 0x13CCC0, 0x10CCB3, 0x10CCB3, 0x0E00AA, 0x0E00AA, 0x0B58A1, 0x0B58A1, 0x08D498, 0x08D498, 0x067491, 0x067491, 0x043089, 0x043089, 0x020C83, 0x020C83, 0x7FFDF4, 0x7FD1DE, 0x7859C8, 0x783DBA, 0x71559C, 0x71559C, 0x6AE57C, 0x6AE57C, 0x64F561, 0x64F561, 0x5F7149, 0x5F7149, 0x5A4D33, 0x5A4D33, 0x55811F, 0x55811F, 0x51050F, 0x51050F, 0x4CC8FE, 0x4CC8FE, 0x48D0F0, 0x48D0F0, 0x4510E4, 0x4510E4, 0x4180D7, 0x4180D7, 0x3E24CC, 0x3E24CC, 0x3AF4C3, 0x3AF4C3, 0x37E8BA, 0x37E8BA }; const u32 spu_frsqest_exponent_lut[256] = { 0x7F800000, 0x5E800000, 0x5E800000, 0x5E000000, 0x5E000000, 0x5D800000, 0x5D800000, 0x5D000000, 0x5D000000, 0x5C800000, 0x5C800000, 0x5C000000, 0x5C000000, 0x5B800000, 0x5B800000, 0x5B000000, 0x5B000000, 0x5A800000, 0x5A800000, 0x5A000000, 0x5A000000, 0x59800000, 0x59800000, 0x59000000, 0x59000000, 0x58800000, 0x58800000, 0x58000000, 0x58000000, 0x57800000, 0x57800000, 0x57000000, 0x57000000, 0x56800000, 0x56800000, 0x56000000, 0x56000000, 0x55800000, 0x55800000, 0x55000000, 0x55000000, 0x54800000, 0x54800000, 0x54000000, 0x54000000, 0x53800000, 0x53800000, 0x53000000, 0x53000000, 0x52800000, 0x52800000, 0x52000000, 0x52000000, 0x51800000, 0x51800000, 0x51000000, 0x51000000, 0x50800000, 0x50800000, 0x50000000, 0x50000000, 0x4F800000, 0x4F800000, 0x4F000000, 0x4F000000, 0x4E800000, 0x4E800000, 0x4E000000, 0x4E000000, 0x4D800000, 0x4D800000, 0x4D000000, 0x4D000000, 0x4C800000, 0x4C800000, 0x4C000000, 0x4C000000, 0x4B800000, 0x4B800000, 0x4B000000, 0x4B000000, 0x4A800000, 0x4A800000, 0x4A000000, 0x4A000000, 0x49800000, 0x49800000, 0x49000000, 0x49000000, 0x48800000, 0x48800000, 0x48000000, 0x48000000, 0x47800000, 0x47800000, 0x47000000, 0x47000000, 0x46800000, 0x46800000, 0x46000000, 0x46000000, 0x45800000, 0x45800000, 0x45000000, 0x45000000, 0x44800000, 0x44800000, 0x44000000, 0x44000000, 0x43800000, 0x43800000, 0x43000000, 0x43000000, 0x42800000, 0x42800000, 0x42000000, 0x42000000, 0x41800000, 0x41800000, 0x41000000, 0x41000000, 0x40800000, 0x40800000, 0x40000000, 0x40000000, 0x3F800000, 0x3F800000, 0x3F000000, 0x3F000000, 0x3E800000, 0x3E800000, 0x3E000000, 0x3E000000, 0x3D800000, 0x3D800000, 0x3D000000, 0x3D000000, 0x3C800000, 0x3C800000, 0x3C000000, 0x3C000000, 0x3B800000, 0x3B800000, 0x3B000000, 0x3B000000, 0x3A800000, 0x3A800000, 0x3A000000, 0x3A000000, 0x39800000, 0x39800000, 0x39000000, 0x39000000, 0x38800000, 0x38800000, 0x38000000, 0x38000000, 0x37800000, 0x37800000, 0x37000000, 0x37000000, 0x36800000, 0x36800000, 0x36000000, 0x36000000, 0x35800000, 0x35800000, 0x35000000, 0x35000000, 0x34800000, 0x34800000, 0x34000000, 0x34000000, 0x33800000, 0x33800000, 0x33000000, 0x33000000, 0x32800000, 0x32800000, 0x32000000, 0x32000000, 0x31800000, 0x31800000, 0x31000000, 0x31000000, 0x30800000, 0x30800000, 0x30000000, 0x30000000, 0x2F800000, 0x2F800000, 0x2F000000, 0x2F000000, 0x2E800000, 0x2E800000, 0x2E000000, 0x2E000000, 0x2D800000, 0x2D800000, 0x2D000000, 0x2D000000, 0x2C800000, 0x2C800000, 0x2C000000, 0x2C000000, 0x2B800000, 0x2B800000, 0x2B000000, 0x2B000000, 0x2A800000, 0x2A800000, 0x2A000000, 0x2A000000, 0x29800000, 0x29800000, 0x29000000, 0x29000000, 0x28800000, 0x28800000, 0x28000000, 0x28000000, 0x27800000, 0x27800000, 0x27000000, 0x27000000, 0x26800000, 0x26800000, 0x26000000, 0x26000000, 0x25800000, 0x25800000, 0x25000000, 0x25000000, 0x24800000, 0x24800000, 0x24000000, 0x24000000, 0x23800000, 0x23800000, 0x23000000, 0x23000000, 0x22800000, 0x22800000, 0x22000000, 0x22000000, 0x21800000, 0x21800000, 0x21000000, 0x21000000, 0x20800000, 0x20800000, 0x20000000, 0x20000000, 0x1F800000, 0x1F800000, 0x1F000000 }; using spu_rdata_t = decltype(spu_thread::rdata); template <> void fmt_class_string<mfc_atomic_status>::format(std::string& out, u64 arg) { format_enum(out, arg, [](mfc_atomic_status arg) { switch (arg) { case MFC_PUTLLC_SUCCESS: return "PUTLLC"; case MFC_PUTLLC_FAILURE: return "PUTLLC-FAIL"; case MFC_PUTLLUC_SUCCESS: return "PUTLLUC"; case MFC_GETLLAR_SUCCESS: return "GETLLAR"; } return unknown; }); } template <> void fmt_class_string<mfc_tag_update>::format(std::string& out, u64 arg) { format_enum(out, arg, [](mfc_tag_update arg) { switch (arg) { case MFC_TAG_UPDATE_IMMEDIATE: return "empty"; case MFC_TAG_UPDATE_ANY: return "ANY"; case MFC_TAG_UPDATE_ALL: return "ALL"; } return unknown; }); } template <> void fmt_class_string<spu_type>::format(std::string& out, u64 arg) { format_enum(out, arg, [](spu_type arg) { switch (arg) { case spu_type::threaded: return "Threaded"; case spu_type::raw: return "Raw"; case spu_type::isolated: return "Isolated"; } return unknown; }); } template <> void fmt_class_string<spu_block_hash>::format(std::string& out, u64 arg) { fmt::append(out, "%s", fmt::base57(be_t<u64>{arg})); // Print only 7 hash characters out of 11 (which covers roughly 48 bits) out.resize(out.size() - 4); // Print chunk address from lowest 16 bits fmt::append(out, "...chunk-0x%05x", (arg & 0xffff) * 4); } enum class spu_block_hash_short : u64{}; template <> void fmt_class_string<spu_block_hash_short>::format(std::string& out, u64 arg) { fmt::append(out, "%s", fmt::base57(be_t<u64>{arg})); // Print only 7 hash characters out of 11 (which covers roughly 48 bits) out.resize(out.size() - 4); } // Verify AVX availability for TSX transactions static const bool s_tsx_avx = utils::has_avx(); // Threshold for when rep mosvb is expected to outperform simd copies // The threshold will be 0xFFFFFFFF when the performance of rep movsb is expected to be bad static const u32 s_rep_movsb_threshold = utils::get_rep_movsb_threshold(); #if defined(_M_X64) extern "C" void __movsb(uchar*, const uchar*, size_t); #elif defined(ARCH_X64) static FORCE_INLINE void __movsb(unsigned char * Dst, const unsigned char * Src, size_t Size) { __asm__ __volatile__ ( "rep; movsb" : [Dst] "=D" (Dst), [Src] "=S" (Src), [Size] "=c" (Size) : "[Dst]" (Dst), "[Src]" (Src), "[Size]" (Size) ); } #else #define s_rep_movsb_threshold umax #define __movsb std::memcpy #endif #if defined(ARCH_X64) static FORCE_INLINE bool cmp_rdata_avx(const __m256i* lhs, const __m256i* rhs) { #if defined(_MSC_VER) || defined(__AVX__) // Interleave 2 cache line accesses (optimization) const __m256 x0 = _mm256_xor_ps(_mm256_castsi256_ps(_mm256_loadu_si256(lhs + 0)), _mm256_castsi256_ps(_mm256_loadu_si256(rhs + 0))); const __m256 x2 = _mm256_xor_ps(_mm256_castsi256_ps(_mm256_loadu_si256(lhs + 2)), _mm256_castsi256_ps(_mm256_loadu_si256(rhs + 2))); const __m256 x1 = _mm256_xor_ps(_mm256_castsi256_ps(_mm256_loadu_si256(lhs + 1)), _mm256_castsi256_ps(_mm256_loadu_si256(rhs + 1))); const __m256 x3 = _mm256_xor_ps(_mm256_castsi256_ps(_mm256_loadu_si256(lhs + 3)), _mm256_castsi256_ps(_mm256_loadu_si256(rhs + 3))); const __m256 c0 = _mm256_or_ps(x0, x1); const __m256 c1 = _mm256_or_ps(x2, x3); const __m256 c2 = _mm256_or_ps(c0, c1); return _mm256_testz_si256(_mm256_castps_si256(c2), _mm256_castps_si256(c2)) != 0; #else bool result = 0; __asm__( "vmovups 0*32(%[lhs]), %%ymm0;" // load "vmovups 2*32(%[lhs]), %%ymm2;" "vmovups 1*32(%[lhs]), %%ymm1;" "vmovups 3*32(%[lhs]), %%ymm3;" "vxorps 0*32(%[rhs]), %%ymm0, %%ymm0;" // compare "vxorps 2*32(%[rhs]), %%ymm2, %%ymm2;" "vxorps 1*32(%[rhs]), %%ymm1, %%ymm1;" "vxorps 3*32(%[rhs]), %%ymm3, %%ymm3;" "vorps %%ymm0, %%ymm1, %%ymm0;" // merge "vorps %%ymm2, %%ymm3, %%ymm2;" "vorps %%ymm0, %%ymm2, %%ymm0;" "vptest %%ymm0, %%ymm0;" // test "vzeroupper" : "=@ccz" (result) : [lhs] "r" (lhs) , [rhs] "r" (rhs) : "cc" // Clobber flags , "xmm0" // Clobber registers ymm0-ymm3 (see mov_rdata_avx) , "xmm1" , "xmm2" , "xmm3" ); return result; #endif } #endif #ifdef _MSC_VER __forceinline #endif extern bool cmp_rdata(const spu_rdata_t& _lhs, const spu_rdata_t& _rhs) { #if defined(ARCH_X64) #ifndef __AVX__ if (s_tsx_avx) [[likely]] #endif { return cmp_rdata_avx(reinterpret_cast<const __m256i*>(_lhs), reinterpret_cast<const __m256i*>(_rhs)); } #endif const auto lhs = reinterpret_cast<const v128*>(_lhs); const auto rhs = reinterpret_cast<const v128*>(_rhs); const v128 a = (lhs[0] ^ rhs[0]) | (lhs[1] ^ rhs[1]); const v128 c = (lhs[4] ^ rhs[4]) | (lhs[5] ^ rhs[5]); const v128 b = (lhs[2] ^ rhs[2]) | (lhs[3] ^ rhs[3]); const v128 d = (lhs[6] ^ rhs[6]) | (lhs[7] ^ rhs[7]); const v128 r = (a | b) | (c | d); return gv_testz(r); } #if defined(ARCH_X64) static FORCE_INLINE void mov_rdata_avx(__m256i* dst, const __m256i* src) { #ifdef _MSC_VER _mm256_storeu_si256(dst + 0, _mm256_loadu_si256(src + 0)); _mm256_storeu_si256(dst + 2, _mm256_loadu_si256(src + 2)); _mm256_storeu_si256(dst + 1, _mm256_loadu_si256(src + 1)); _mm256_storeu_si256(dst + 3, _mm256_loadu_si256(src + 3)); #else __asm__( "vmovdqu 0*32(%[src]), %%ymm0;" // load "vmovdqu %%ymm0, 0*32(%[dst]);" // store "vmovdqu 2*32(%[src]), %%ymm0;" "vmovdqu %%ymm0, 2*32(%[dst]);" "vmovdqu 1*32(%[src]), %%ymm0;" "vmovdqu %%ymm0, 1*32(%[dst]);" "vmovdqu 3*32(%[src]), %%ymm0;" "vmovdqu %%ymm0, 3*32(%[dst]);" #ifndef __AVX__ "vzeroupper" // Don't need in AVX mode (should be emitted automatically) #endif : : [src] "r" (src) , [dst] "r" (dst) #ifdef __AVX__ : "ymm0" // Clobber ymm0 register (acknowledge its modification) #else : "xmm0" // ymm0 is "unknown" if not compiled in AVX mode, so clobber xmm0 only #endif ); #endif } #endif #ifdef _MSC_VER __forceinline #endif extern void mov_rdata(spu_rdata_t& _dst, const spu_rdata_t& _src) { #if defined(ARCH_X64) #ifndef __AVX__ if (s_tsx_avx) [[likely]] #endif { mov_rdata_avx(reinterpret_cast<__m256i*>(_dst), reinterpret_cast<const __m256i*>(_src)); return; } { const __m128i v0 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(_src + 0)); const __m128i v1 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(_src + 16)); const __m128i v2 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(_src + 32)); const __m128i v3 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(_src + 48)); _mm_storeu_si128(reinterpret_cast<__m128i*>(_dst + 0), v0); _mm_storeu_si128(reinterpret_cast<__m128i*>(_dst + 16), v1); _mm_storeu_si128(reinterpret_cast<__m128i*>(_dst + 32), v2); _mm_storeu_si128(reinterpret_cast<__m128i*>(_dst + 48), v3); } const __m128i v0 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(_src + 64)); const __m128i v1 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(_src + 80)); const __m128i v2 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(_src + 96)); const __m128i v3 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(_src + 112)); _mm_storeu_si128(reinterpret_cast<__m128i*>(_dst + 64), v0); _mm_storeu_si128(reinterpret_cast<__m128i*>(_dst + 80), v1); _mm_storeu_si128(reinterpret_cast<__m128i*>(_dst + 96), v2); _mm_storeu_si128(reinterpret_cast<__m128i*>(_dst + 112), v3); #else std::memcpy(_dst, _src, 128); #endif } #if defined(ARCH_X64) static FORCE_INLINE void mov_rdata_nt_avx(__m256i* dst, const __m256i* src) { #ifdef _MSC_VER _mm256_stream_si256(dst + 0, _mm256_load_si256(src + 0)); _mm256_stream_si256(dst + 2, _mm256_load_si256(src + 2)); _mm256_stream_si256(dst + 1, _mm256_load_si256(src + 1)); _mm256_stream_si256(dst + 3, _mm256_load_si256(src + 3)); #else __asm__( "vmovdqa 0*32(%[src]), %%ymm0;" // load "vmovntdq %%ymm0, 0*32(%[dst]);" // store "vmovdqa 2*32(%[src]), %%ymm0;" "vmovntdq %%ymm0, 2*32(%[dst]);" "vmovdqa 1*32(%[src]), %%ymm0;" "vmovntdq %%ymm0, 1*32(%[dst]);" "vmovdqa 3*32(%[src]), %%ymm0;" "vmovntdq %%ymm0, 3*32(%[dst]);" #ifndef __AVX__ "vzeroupper" // Don't need in AVX mode (should be emitted automatically) #endif : : [src] "r" (src) , [dst] "r" (dst) #ifdef __AVX__ : "ymm0" // Clobber ymm0 register (acknowledge its modification) #else : "xmm0" // ymm0 is "unknown" if not compiled in AVX mode, so clobber xmm0 only #endif ); #endif } #endif extern void mov_rdata_nt(spu_rdata_t& _dst, const spu_rdata_t& _src) { #if defined(ARCH_X64) #ifndef __AVX__ if (s_tsx_avx) [[likely]] #endif { mov_rdata_nt_avx(reinterpret_cast<__m256i*>(_dst), reinterpret_cast<const __m256i*>(_src)); return; } { const __m128i v0 = _mm_load_si128(reinterpret_cast<const __m128i*>(_src + 0)); const __m128i v1 = _mm_load_si128(reinterpret_cast<const __m128i*>(_src + 16)); const __m128i v2 = _mm_load_si128(reinterpret_cast<const __m128i*>(_src + 32)); const __m128i v3 = _mm_load_si128(reinterpret_cast<const __m128i*>(_src + 48)); _mm_stream_si128(reinterpret_cast<__m128i*>(_dst + 0), v0); _mm_stream_si128(reinterpret_cast<__m128i*>(_dst + 16), v1); _mm_stream_si128(reinterpret_cast<__m128i*>(_dst + 32), v2); _mm_stream_si128(reinterpret_cast<__m128i*>(_dst + 48), v3); } const __m128i v0 = _mm_load_si128(reinterpret_cast<const __m128i*>(_src + 64)); const __m128i v1 = _mm_load_si128(reinterpret_cast<const __m128i*>(_src + 80)); const __m128i v2 = _mm_load_si128(reinterpret_cast<const __m128i*>(_src + 96)); const __m128i v3 = _mm_load_si128(reinterpret_cast<const __m128i*>(_src + 112)); _mm_stream_si128(reinterpret_cast<__m128i*>(_dst + 64), v0); _mm_stream_si128(reinterpret_cast<__m128i*>(_dst + 80), v1); _mm_stream_si128(reinterpret_cast<__m128i*>(_dst + 96), v2); _mm_stream_si128(reinterpret_cast<__m128i*>(_dst + 112), v3); #else std::memcpy(_dst, _src, 128); #endif } #if defined(_MSC_VER) #define mwaitx_func #define waitpkg_func #else #define mwaitx_func __attribute__((__target__("mwaitx"))) #define waitpkg_func __attribute__((__target__("waitpkg"))) #endif #if defined(ARCH_X64) // Waits for a number of TSC clock cycles in power optimized state // Cstate is represented in bits [7:4]+1 cstate. So C0 requires bits [7:4] to be set to 0xf, C1 requires bits [7:4] to be set to 0. template <typename T, typename... Args> mwaitx_func static void __mwaitx(u32 cycles, u32 cstate, const void* cline, const Args&... args) { constexpr u32 timer_enable = 0x2; // monitorx will wake if the cache line is written to, use it for reservations which fits it almost perfectly _mm_monitorx(const_cast<void*>(cline), 0, 0); // Use static function to force inline if (T::needs_wait(args...)) { _mm_mwaitx(timer_enable, cstate, cycles); } } // First bit indicates cstate, 0x0 for C.02 state (lower power) or 0x1 for C.01 state (higher power) waitpkg_func static void __tpause(u32 cycles, u32 cstate) { const u64 tsc = utils::get_tsc() + cycles; _tpause(cstate, tsc); } #endif namespace vm { std::array<atomic_t<reservation_waiter_t>, 512> g_resrv_waiters_count{}; } void do_cell_atomic_128_store(u32 addr, const void* to_write); extern thread_local u64 g_tls_fault_spu; const spu_decoder<spu_itype> s_spu_itype; namespace vm { extern atomic_t<u64, 64> g_range_lock_set[64]; // Defined here for performance reasons writer_lock::~writer_lock() noexcept { if (range_lock) { g_range_lock_bits[1] &= ~(1ull << (range_lock - g_range_lock_set)); range_lock->release(0); return; } g_range_lock_bits[1].release(0); } } namespace spu { namespace scheduler { std::array<atomic_t<u8>, 65536> atomic_instruction_table = {}; constexpr u32 native_jiffy_duration_us = 1500; //About 1ms resolution with a half offset void acquire_pc_address(spu_thread& spu, u32 pc, u32 timeout_ms, u32 max_concurrent_instructions) { const u32 pc_offset = pc >> 2; if (atomic_instruction_table[pc_offset].observe() >= max_concurrent_instructions) { spu.state += cpu_flag::wait + cpu_flag::temp; if (timeout_ms > 0) { const u64 timeout = timeout_ms * 1000u; //convert to microseconds const u64 start = get_system_time(); auto remaining = timeout; while (atomic_instruction_table[pc_offset].observe() >= max_concurrent_instructions) { if (remaining >= native_jiffy_duration_us) std::this_thread::sleep_for(1ms); else std::this_thread::yield(); const auto now = get_system_time(); const auto elapsed = now - start; if (elapsed > timeout) break; remaining = timeout - elapsed; } } else { //Slight pause if function is overburdened const auto count = atomic_instruction_table[pc_offset].observe() * 100ull; busy_wait(count); } ensure(!spu.check_state()); } atomic_instruction_table[pc_offset]++; } void release_pc_address(u32 pc) { const u32 pc_offset = pc >> 2; atomic_instruction_table[pc_offset]--; } struct concurrent_execution_watchdog { u32 pc = 0; bool active = false; concurrent_execution_watchdog(spu_thread& spu) :pc(spu.pc) { if (const u32 max_concurrent_instructions = g_cfg.core.preferred_spu_threads) { acquire_pc_address(spu, pc, g_cfg.core.spu_delay_penalty, max_concurrent_instructions); active = true; } } ~concurrent_execution_watchdog() { if (active) release_pc_address(pc); } }; } } std::array<u32, 2> op_branch_targets(u32 pc, spu_opcode_t op) { std::array<u32, 2> res{spu_branch_target(pc + 4), umax}; switch (const auto type = s_spu_itype.decode(op.opcode)) { case spu_itype::BR: case spu_itype::BRA: case spu_itype::BRNZ: case spu_itype::BRZ: case spu_itype::BRHNZ: case spu_itype::BRHZ: case spu_itype::BRSL: case spu_itype::BRASL: { const int index = (type == spu_itype::BR || type == spu_itype::BRA || type == spu_itype::BRSL || type == spu_itype::BRASL ? 0 : 1); res[index] = (spu_branch_target(type == spu_itype::BRASL || type == spu_itype::BRA ? 0 : pc, op.i16)); if (res[0] == res[1]) { res[1] = umax; } break; } case spu_itype::IRET: case spu_itype::BI: case spu_itype::BISLED: case spu_itype::BISL: case spu_itype::BIZ: case spu_itype::BINZ: case spu_itype::BIHZ: case spu_itype::BIHNZ: // TODO (detect constant address branches, such as for interrupts enable/disable pattern) case spu_itype::UNK: { res[0] = umax; break; } default: break; } return res; } const auto spu_putllc_tx = build_function_asm<u64(*)(u32 raddr, u64 rtime, void* _old, const void* _new)>("spu_putllc_tx", [](native_asm& c, auto& args) { using namespace asmjit; #if defined(ARCH_X64) Label fall = c.newLabel(); Label fail = c.newLabel(); Label _ret = c.newLabel(); Label load = c.newLabel(); //if (utils::has_avx() && !s_tsx_avx) //{ // c.vzeroupper(); //} // Create stack frame if necessary (Windows ABI has only 6 volatile vector registers) #ifdef _WIN32 c.sub(x86::rsp, 168); if (s_tsx_avx) { c.vmovups(x86::oword_ptr(x86::rsp, 0), x86::xmm6); c.vmovups(x86::oword_ptr(x86::rsp, 16), x86::xmm7); } else { c.movups(x86::oword_ptr(x86::rsp, 0), x86::xmm6); c.movups(x86::oword_ptr(x86::rsp, 16), x86::xmm7); c.movups(x86::oword_ptr(x86::rsp, 32), x86::xmm8); c.movups(x86::oword_ptr(x86::rsp, 48), x86::xmm9); c.movups(x86::oword_ptr(x86::rsp, 64), x86::xmm10); c.movups(x86::oword_ptr(x86::rsp, 80), x86::xmm11); c.movups(x86::oword_ptr(x86::rsp, 96), x86::xmm12); c.movups(x86::oword_ptr(x86::rsp, 112), x86::xmm13); c.movups(x86::oword_ptr(x86::rsp, 128), x86::xmm14); c.movups(x86::oword_ptr(x86::rsp, 144), x86::xmm15); } #endif // Prepare registers build_swap_rdx_with(c, args, x86::r10); c.mov(args[1], x86::qword_ptr(reinterpret_cast<u64>(&vm::g_sudo_addr))); c.lea(args[1], x86::qword_ptr(args[1], args[0])); c.prefetchw(x86::byte_ptr(args[1], 0)); c.prefetchw(x86::byte_ptr(args[1], 64)); c.and_(args[0].r32(), 0xff80); c.shr(args[0].r32(), 1); c.lea(x86::r11, x86::qword_ptr(reinterpret_cast<u64>(+vm::g_reservations), args[0])); // Prepare data if (s_tsx_avx) { c.vmovups(x86::ymm0, x86::ymmword_ptr(args[2], 0)); c.vmovups(x86::ymm1, x86::ymmword_ptr(args[2], 32)); c.vmovups(x86::ymm2, x86::ymmword_ptr(args[2], 64)); c.vmovups(x86::ymm3, x86::ymmword_ptr(args[2], 96)); c.vmovups(x86::ymm4, x86::ymmword_ptr(args[3], 0)); c.vmovups(x86::ymm5, x86::ymmword_ptr(args[3], 32)); c.vmovups(x86::ymm6, x86::ymmword_ptr(args[3], 64)); c.vmovups(x86::ymm7, x86::ymmword_ptr(args[3], 96)); } else { c.movaps(x86::xmm0, x86::oword_ptr(args[2], 0)); c.movaps(x86::xmm1, x86::oword_ptr(args[2], 16)); c.movaps(x86::xmm2, x86::oword_ptr(args[2], 32)); c.movaps(x86::xmm3, x86::oword_ptr(args[2], 48)); c.movaps(x86::xmm4, x86::oword_ptr(args[2], 64)); c.movaps(x86::xmm5, x86::oword_ptr(args[2], 80)); c.movaps(x86::xmm6, x86::oword_ptr(args[2], 96)); c.movaps(x86::xmm7, x86::oword_ptr(args[2], 112)); c.movaps(x86::xmm8, x86::oword_ptr(args[3], 0)); c.movaps(x86::xmm9, x86::oword_ptr(args[3], 16)); c.movaps(x86::xmm10, x86::oword_ptr(args[3], 32)); c.movaps(x86::xmm11, x86::oword_ptr(args[3], 48)); c.movaps(x86::xmm12, x86::oword_ptr(args[3], 64)); c.movaps(x86::xmm13, x86::oword_ptr(args[3], 80)); c.movaps(x86::xmm14, x86::oword_ptr(args[3], 96)); c.movaps(x86::xmm15, x86::oword_ptr(args[3], 112)); } // Alloc args[0] to stamp0 const auto stamp0 = args[0]; build_get_tsc(c, stamp0); Label fail2 = c.newLabel(); Label tx1 = build_transaction_enter(c, fall, [&]() { c.add(x86::qword_ptr(args[2], ::offset32(&spu_thread::ftx) - ::offset32(&spu_thread::rdata)), 1); build_get_tsc(c); c.sub(x86::rax, stamp0); c.cmp(x86::rax, x86::qword_ptr(reinterpret_cast<u64>(&g_rtm_tx_limit2))); c.jae(fall); }); // Check pause flag c.bt(x86::dword_ptr(args[2], ::offset32(&spu_thread::state) - ::offset32(&spu_thread::rdata)), static_cast<u32>(cpu_flag::pause)); c.jc(fall); c.xbegin(tx1); if (s_tsx_avx) { c.vxorps(x86::ymm0, x86::ymm0, x86::ymmword_ptr(args[1], 0)); c.vxorps(x86::ymm1, x86::ymm1, x86::ymmword_ptr(args[1], 32)); c.vxorps(x86::ymm2, x86::ymm2, x86::ymmword_ptr(args[1], 64)); c.vxorps(x86::ymm3, x86::ymm3, x86::ymmword_ptr(args[1], 96)); c.vorps(x86::ymm0, x86::ymm0, x86::ymm1); c.vorps(x86::ymm1, x86::ymm2, x86::ymm3); c.vorps(x86::ymm0, x86::ymm1, x86::ymm0); c.vptest(x86::ymm0, x86::ymm0); } else { c.xorps(x86::xmm0, x86::oword_ptr(args[1], 0)); c.xorps(x86::xmm1, x86::oword_ptr(args[1], 16)); c.xorps(x86::xmm2, x86::oword_ptr(args[1], 32)); c.xorps(x86::xmm3, x86::oword_ptr(args[1], 48)); c.xorps(x86::xmm4, x86::oword_ptr(args[1], 64)); c.xorps(x86::xmm5, x86::oword_ptr(args[1], 80)); c.xorps(x86::xmm6, x86::oword_ptr(args[1], 96)); c.xorps(x86::xmm7, x86::oword_ptr(args[1], 112)); c.orps(x86::xmm0, x86::xmm1); c.orps(x86::xmm2, x86::xmm3); c.orps(x86::xmm4, x86::xmm5); c.orps(x86::xmm6, x86::xmm7); c.orps(x86::xmm0, x86::xmm2); c.orps(x86::xmm4, x86::xmm6); c.orps(x86::xmm0, x86::xmm4); c.ptest(x86::xmm0, x86::xmm0); } c.jnz(fail); if (s_tsx_avx) { c.vmovaps(x86::ymmword_ptr(args[1], 0), x86::ymm4); c.vmovaps(x86::ymmword_ptr(args[1], 32), x86::ymm5); c.vmovaps(x86::ymmword_ptr(args[1], 64), x86::ymm6); c.vmovaps(x86::ymmword_ptr(args[1], 96), x86::ymm7); } else { c.movaps(x86::oword_ptr(args[1], 0), x86::xmm8); c.movaps(x86::oword_ptr(args[1], 16), x86::xmm9); c.movaps(x86::oword_ptr(args[1], 32), x86::xmm10); c.movaps(x86::oword_ptr(args[1], 48), x86::xmm11); c.movaps(x86::oword_ptr(args[1], 64), x86::xmm12); c.movaps(x86::oword_ptr(args[1], 80), x86::xmm13); c.movaps(x86::oword_ptr(args[1], 96), x86::xmm14); c.movaps(x86::oword_ptr(args[1], 112), x86::xmm15); } c.xend(); c.lock().add(x86::qword_ptr(x86::r11), 64); c.add(x86::qword_ptr(args[2], ::offset32(&spu_thread::stx) - ::offset32(&spu_thread::rdata)), 1); build_get_tsc(c); c.sub(x86::rax, stamp0); c.jmp(_ret); // XABORT is expensive so try to finish with xend instead c.bind(fail); // Load previous data to store back to rdata if (s_tsx_avx) { c.vmovaps(x86::ymm0, x86::ymmword_ptr(args[1], 0)); c.vmovaps(x86::ymm1, x86::ymmword_ptr(args[1], 32)); c.vmovaps(x86::ymm2, x86::ymmword_ptr(args[1], 64)); c.vmovaps(x86::ymm3, x86::ymmword_ptr(args[1], 96)); } else { c.movaps(x86::xmm0, x86::oword_ptr(args[1], 0)); c.movaps(x86::xmm1, x86::oword_ptr(args[1], 16)); c.movaps(x86::xmm2, x86::oword_ptr(args[1], 32)); c.movaps(x86::xmm3, x86::oword_ptr(args[1], 48)); c.movaps(x86::xmm4, x86::oword_ptr(args[1], 64)); c.movaps(x86::xmm5, x86::oword_ptr(args[1], 80)); c.movaps(x86::xmm6, x86::oword_ptr(args[1], 96)); c.movaps(x86::xmm7, x86::oword_ptr(args[1], 112)); } c.xend(); c.add(x86::qword_ptr(args[2], ::offset32(&spu_thread::stx) - ::offset32(&spu_thread::rdata)), 1); c.jmp(fail2); c.bind(fall); c.mov(x86::rax, -1); c.jmp(_ret); c.bind(fail2); c.lock().sub(x86::qword_ptr(x86::r11), 64); c.bind(load); // Store previous data back to rdata if (s_tsx_avx) { c.vmovaps(x86::ymmword_ptr(args[2], 0), x86::ymm0); c.vmovaps(x86::ymmword_ptr(args[2], 32), x86::ymm1); c.vmovaps(x86::ymmword_ptr(args[2], 64), x86::ymm2); c.vmovaps(x86::ymmword_ptr(args[2], 96), x86::ymm3); } else { c.movaps(x86::oword_ptr(args[2], 0), x86::xmm0); c.movaps(x86::oword_ptr(args[2], 16), x86::xmm1); c.movaps(x86::oword_ptr(args[2], 32), x86::xmm2); c.movaps(x86::oword_ptr(args[2], 48), x86::xmm3); c.movaps(x86::oword_ptr(args[2], 64), x86::xmm4); c.movaps(x86::oword_ptr(args[2], 80), x86::xmm5); c.movaps(x86::oword_ptr(args[2], 96), x86::xmm6); c.movaps(x86::oword_ptr(args[2], 112), x86::xmm7); } c.mov(x86::rax, -1); c.mov(x86::qword_ptr(args[2], ::offset32(&spu_thread::last_ftime) - ::offset32(&spu_thread::rdata)), x86::rax); c.xor_(x86::eax, x86::eax); //c.jmp(_ret); c.bind(_ret); #ifdef _WIN32 if (s_tsx_avx) { c.vmovups(x86::xmm6, x86::oword_ptr(x86::rsp, 0)); c.vmovups(x86::xmm7, x86::oword_ptr(x86::rsp, 16)); } else { c.movups(x86::xmm6, x86::oword_ptr(x86::rsp, 0)); c.movups(x86::xmm7, x86::oword_ptr(x86::rsp, 16)); c.movups(x86::xmm8, x86::oword_ptr(x86::rsp, 32)); c.movups(x86::xmm9, x86::oword_ptr(x86::rsp, 48)); c.movups(x86::xmm10, x86::oword_ptr(x86::rsp, 64)); c.movups(x86::xmm11, x86::oword_ptr(x86::rsp, 80)); c.movups(x86::xmm12, x86::oword_ptr(x86::rsp, 96)); c.movups(x86::xmm13, x86::oword_ptr(x86::rsp, 112)); c.movups(x86::xmm14, x86::oword_ptr(x86::rsp, 128)); c.movups(x86::xmm15, x86::oword_ptr(x86::rsp, 144)); } c.add(x86::rsp, 168); #endif if (s_tsx_avx) { c.vzeroupper(); } maybe_flush_lbr(c); c.ret(); #else UNUSED(args); c.brk(Imm(0x42)); c.ret(a64::x30); #endif }); const auto spu_putlluc_tx = build_function_asm<u64(*)(u32 raddr, const void* rdata, u64* _stx, u64* _ftx)>("spu_putlluc_tx", [](native_asm& c, auto& args) { using namespace asmjit; #if defined(ARCH_X64) Label fall = c.newLabel(); Label _ret = c.newLabel(); //if (utils::has_avx() && !s_tsx_avx) //{ // c.vzeroupper(); //} // Create stack frame if necessary (Windows ABI has only 6 volatile vector registers) #ifdef _WIN32 c.sub(x86::rsp, 40); if (!s_tsx_avx) { c.movups(x86::oword_ptr(x86::rsp, 0), x86::xmm6); c.movups(x86::oword_ptr(x86::rsp, 16), x86::xmm7); } #endif // Prepare registers build_swap_rdx_with(c, args, x86::r10); c.mov(x86::r11, x86::qword_ptr(reinterpret_cast<u64>(&vm::g_sudo_addr))); c.lea(x86::r11, x86::qword_ptr(x86::r11, args[0])); c.prefetchw(x86::byte_ptr(x86::r11, 0)); c.prefetchw(x86::byte_ptr(x86::r11, 64)); // Prepare data if (s_tsx_avx) { c.vmovups(x86::ymm0, x86::ymmword_ptr(args[1], 0)); c.vmovups(x86::ymm1, x86::ymmword_ptr(args[1], 32)); c.vmovups(x86::ymm2, x86::ymmword_ptr(args[1], 64)); c.vmovups(x86::ymm3, x86::ymmword_ptr(args[1], 96)); } else { c.movaps(x86::xmm0, x86::oword_ptr(args[1], 0)); c.movaps(x86::xmm1, x86::oword_ptr(args[1], 16)); c.movaps(x86::xmm2, x86::oword_ptr(args[1], 32)); c.movaps(x86::xmm3, x86::oword_ptr(args[1], 48)); c.movaps(x86::xmm4, x86::oword_ptr(args[1], 64)); c.movaps(x86::xmm5, x86::oword_ptr(args[1], 80)); c.movaps(x86::xmm6, x86::oword_ptr(args[1], 96)); c.movaps(x86::xmm7, x86::oword_ptr(args[1], 112)); } c.and_(args[0].r32(), 0xff80); c.shr(args[0].r32(), 1); c.lea(args[1], x86::qword_ptr(reinterpret_cast<u64>(+vm::g_reservations), args[0])); // Alloc args[0] to stamp0 const auto stamp0 = args[0]; build_get_tsc(c, stamp0); Label tx1 = build_transaction_enter(c, fall, [&]() { // ftx++; c.add(x86::qword_ptr(args[3]), 1); build_get_tsc(c); c.sub(x86::rax, stamp0); c.cmp(x86::rax, x86::qword_ptr(reinterpret_cast<u64>(&g_rtm_tx_limit2))); c.jae(fall); }); c.xbegin(tx1); if (s_tsx_avx) { c.vmovaps(x86::ymmword_ptr(x86::r11, 0), x86::ymm0); c.vmovaps(x86::ymmword_ptr(x86::r11, 32), x86::ymm1); c.vmovaps(x86::ymmword_ptr(x86::r11, 64), x86::ymm2); c.vmovaps(x86::ymmword_ptr(x86::r11, 96), x86::ymm3); } else { c.movaps(x86::oword_ptr(x86::r11, 0), x86::xmm0); c.movaps(x86::oword_ptr(x86::r11, 16), x86::xmm1); c.movaps(x86::oword_ptr(x86::r11, 32), x86::xmm2); c.movaps(x86::oword_ptr(x86::r11, 48), x86::xmm3); c.movaps(x86::oword_ptr(x86::r11, 64), x86::xmm4); c.movaps(x86::oword_ptr(x86::r11, 80), x86::xmm5); c.movaps(x86::oword_ptr(x86::r11, 96), x86::xmm6); c.movaps(x86::oword_ptr(x86::r11, 112), x86::xmm7); } c.xend(); c.lock().add(x86::qword_ptr(args[1]), 32); // stx++ c.add(x86::qword_ptr(args[2]), 1); build_get_tsc(c); c.sub(x86::rax, stamp0); c.jmp(_ret); c.bind(fall); c.xor_(x86::eax, x86::eax); //c.jmp(_ret); c.bind(_ret); #ifdef _WIN32 if (!s_tsx_avx) { c.movups(x86::xmm6, x86::oword_ptr(x86::rsp, 0)); c.movups(x86::xmm7, x86::oword_ptr(x86::rsp, 16)); } c.add(x86::rsp, 40); #endif if (s_tsx_avx) { c.vzeroupper(); } maybe_flush_lbr(c); c.ret(); #else UNUSED(args); c.brk(Imm(0x42)); c.ret(a64::x30); #endif }); const auto spu_getllar_tx = build_function_asm<u64(*)(u32 raddr, void* rdata, cpu_thread* _cpu, u64 rtime)>("spu_getllar_tx", [](native_asm& c, auto& args) { using namespace asmjit; #if defined(ARCH_X64) Label fall = c.newLabel(); Label _ret = c.newLabel(); //if (utils::has_avx() && !s_tsx_avx) //{ // c.vzeroupper(); //} // Create stack frame if necessary (Windows ABI has only 6 volatile vector registers) c.push(x86::rbp); c.push(x86::rbx); c.sub(x86::rsp, 40); #ifdef _WIN32 if (!s_tsx_avx) { c.movups(x86::oword_ptr(x86::rsp, 0), x86::xmm6); c.movups(x86::oword_ptr(x86::rsp, 16), x86::xmm7); } #endif // Prepare registers build_swap_rdx_with(c, args, x86::r10); c.mov(x86::rbp, x86::qword_ptr(reinterpret_cast<u64>(&vm::g_sudo_addr))); c.lea(x86::rbp, x86::qword_ptr(x86::rbp, args[0])); c.and_(args[0].r32(), 0xff80); c.shr(args[0].r32(), 1); c.lea(x86::r11, x86::qword_ptr(reinterpret_cast<u64>(+vm::g_reservations), args[0])); // Alloc args[0] to stamp0 const auto stamp0 = args[0]; build_get_tsc(c, stamp0); // Begin transaction Label tx0 = build_transaction_enter(c, fall, [&]() { c.add(x86::qword_ptr(args[2], ::offset32(&spu_thread::ftx)), 1); build_get_tsc(c); c.sub(x86::rax, stamp0); c.cmp(x86::rax, x86::qword_ptr(reinterpret_cast<u64>(&g_rtm_tx_limit1))); c.jae(fall); }); // Check pause flag c.bt(x86::dword_ptr(args[2], ::offset32(&cpu_thread::state)), static_cast<u32>(cpu_flag::pause)); c.jc(fall); c.mov(x86::rax, x86::qword_ptr(x86::r11)); c.and_(x86::rax, -128); c.cmp(x86::rax, args[3]); c.jne(fall); c.xbegin(tx0); // Just read data to registers if (s_tsx_avx) { c.vmovups(x86::ymm0, x86::ymmword_ptr(x86::rbp, 0)); c.vmovups(x86::ymm1, x86::ymmword_ptr(x86::rbp, 32)); c.vmovups(x86::ymm2, x86::ymmword_ptr(x86::rbp, 64)); c.vmovups(x86::ymm3, x86::ymmword_ptr(x86::rbp, 96)); } else { c.movaps(x86::xmm0, x86::oword_ptr(x86::rbp, 0)); c.movaps(x86::xmm1, x86::oword_ptr(x86::rbp, 16)); c.movaps(x86::xmm2, x86::oword_ptr(x86::rbp, 32)); c.movaps(x86::xmm3, x86::oword_ptr(x86::rbp, 48)); c.movaps(x86::xmm4, x86::oword_ptr(x86::rbp, 64)); c.movaps(x86::xmm5, x86::oword_ptr(x86::rbp, 80)); c.movaps(x86::xmm6, x86::oword_ptr(x86::rbp, 96)); c.movaps(x86::xmm7, x86::oword_ptr(x86::rbp, 112)); } c.xend(); c.add(x86::qword_ptr(args[2], ::offset32(&spu_thread::stx)), 1); build_get_tsc(c); c.sub(x86::rax, stamp0); // Store data if (s_tsx_avx) { c.vmovaps(x86::ymmword_ptr(args[1], 0), x86::ymm0); c.vmovaps(x86::ymmword_ptr(args[1], 32), x86::ymm1); c.vmovaps(x86::ymmword_ptr(args[1], 64), x86::ymm2); c.vmovaps(x86::ymmword_ptr(args[1], 96), x86::ymm3); } else { c.movaps(x86::oword_ptr(args[1], 0), x86::xmm0); c.movaps(x86::oword_ptr(args[1], 16), x86::xmm1); c.movaps(x86::oword_ptr(args[1], 32), x86::xmm2); c.movaps(x86::oword_ptr(args[1], 48), x86::xmm3); c.movaps(x86::oword_ptr(args[1], 64), x86::xmm4); c.movaps(x86::oword_ptr(args[1], 80), x86::xmm5); c.movaps(x86::oword_ptr(args[1], 96), x86::xmm6); c.movaps(x86::oword_ptr(args[1], 112), x86::xmm7); } c.jmp(_ret); c.bind(fall); c.xor_(x86::eax, x86::eax); //c.jmp(_ret); c.bind(_ret); #ifdef _WIN32 if (!s_tsx_avx) { c.movups(x86::xmm6, x86::oword_ptr(x86::rsp, 0)); c.movups(x86::xmm7, x86::oword_ptr(x86::rsp, 16)); } #endif if (s_tsx_avx) { c.vzeroupper(); } c.add(x86::rsp, 40); c.pop(x86::rbx); c.pop(x86::rbp); maybe_flush_lbr(c); c.ret(); #else UNUSED(args); c.brk(Imm(0x42)); c.ret(a64::x30); #endif }); void spu_int_ctrl_t::set(u64 ints) { // leave only enabled interrupts ints &= mask; // notify if at least 1 bit was set if (ints && ~stat.fetch_or(ints) & ints) { std::shared_lock rlock(id_manager::g_mutex); if (lv2_obj::check(tag)) { if (auto handler = tag->handler; lv2_obj::check(handler)) { rlock.unlock(); thread_ctrl::notify(*handler->thread); } } } } const spu_imm_table_t g_spu_imm; spu_imm_table_t::scale_table_t::scale_table_t() { for (s32 i = -155; i < 174; i++) { m_data[i + 155] = v128::fromf32p(static_cast<float>(std::exp2(i))); } } spu_imm_table_t::spu_imm_table_t() { for (u32 i = 0; i < std::size(sldq_pshufb); i++) { for (u32 j = 0; j < 16; j++) { sldq_pshufb[i]._u8[j] = static_cast<u8>(j - i); } } for (u32 i = 0; i < std::size(srdq_pshufb); i++) { const u32 im = (0u - i) & 0x1f; for (u32 j = 0; j < 16; j++) { srdq_pshufb[i]._u8[j] = (j + im > 15) ? 0xff : static_cast<u8>(j + im); } } for (u32 i = 0; i < std::size(rldq_pshufb); i++) { for (u32 j = 0; j < 16; j++) { rldq_pshufb[i]._u8[j] = static_cast<u8>((j - i) & 0xf); } } } void spu_thread::dump_regs(std::string& ret, std::any& /*custom_data*/) const { const system_state emu_state = Emu.GetStatus(false); const bool is_stopped_or_frozen = state & cpu_flag::exit || emu_state == system_state::frozen || emu_state <= system_state::stopping; const spu_debugger_mode mode = debugger_mode.load(); const bool floats_only = !is_stopped_or_frozen && mode == spu_debugger_mode::is_float; const bool is_decimal = !is_stopped_or_frozen && mode == spu_debugger_mode::is_decimal; SPUDisAsm dis_asm(cpu_disasm_mode::normal, ls); for (u32 i = 0; i < 128; i++, ret += '\n') { const auto r = gpr[i]; auto [is_const, const_value] = dis_asm.try_get_const_value(i, pc); if (const_value != r) { // Expectation of pretictable code path has not been met (such as a branch directly to the instruction) is_const = false; } fmt::append(ret, "%s%s ", spu_reg_name[i], is_const ? "©" : ":"); if (auto [size, dst, src] = SPUDisAsm::try_get_insert_mask_info(r); size) { // Special: insertion masks const std::string_view type = size == 1 ? "byte" : size == 2 ? "half" : size == 4 ? "word" : size == 8 ? "dword" : "error"; if ((size >= 4u && !src) || (size == 2u && src == 1u) || (size == 1u && src == 3u)) { fmt::append(ret, "insert -> %s[%u]", type, dst); continue; } } auto to_f64 = [](u32 bits) { const u32 abs = bits & 0x7fff'ffff; constexpr u32 scale = (1 << 23); return std::copysign(abs < scale ? 0 : std::ldexp((scale + (abs % scale)) / f64{scale}, static_cast<int>(abs >> 23) - 127), bits >> 31 ? -1 : 1); }; const double array[]{to_f64(r.u32r[0]), to_f64(r.u32r[1]), to_f64(r.u32r[2]), to_f64(r.u32r[3])}; const u32 i3 = r._u32[3]; const bool is_packed = v128::from32p(i3) == r; if (floats_only) { fmt::append(ret, "%g, %g, %g, %g", array[0], array[1], array[2], array[3]); continue; } if (is_packed) { bool printed_error = false; if (i3 >> 31) { const usz old_size = ret.size(); fmt::append(ret, "%s (0x%x)", CellError{i3}, i3); // Test if failed to format (appended " 0x8".. in such case) if (ret[old_size] == '0') { // Failed ret.resize(old_size); } else { printed_error = true; } } if (!printed_error) { // Shortand formatting if (is_decimal) { fmt::append(ret, "%-11d", i3); } else { fmt::append(ret, "%08x", i3); } } } else { if (is_decimal) { fmt::append(ret, "%-11d %-11d %-11d %-11d", r.u32r[0], r.u32r[1], r.u32r[2], r.u32r[3]); } else { fmt::append(ret, "%08x %08x %08x %08x", r.u32r[0], r.u32r[1], r.u32r[2], r.u32r[3]); } } if (i3 >= 0x80 && is_exec_code(i3, { ls, SPU_LS_SIZE })) { dis_asm.disasm(i3); fmt::append(ret, " -> %s", dis_asm.last_opcode); } if (std::any_of(std::begin(array), std::end(array), [](f64 v){ return v != 0; })) { if (is_packed) { fmt::append(ret, " (%g)", array[0]); } else { fmt::append(ret, " (%g, %g, %g, %g)", array[0], array[1], array[2], array[3]); } } } const auto events = ch_events.load(); fmt::append(ret, "\nEvent Stat: 0x%x\n", events.events); fmt::append(ret, "Event Mask: 0x%x\n", events.mask); fmt::append(ret, "Event Count: %u\n", events.count); fmt::append(ret, "SRR0: 0x%05x\n", srr0); fmt::append(ret, "Stall Stat: %s\n", ch_stall_stat); fmt::append(ret, "Stall Mask: 0x%x\n", ch_stall_mask); fmt::append(ret, "Tag Stat: %s\n", ch_tag_stat); fmt::append(ret, "Tag Update: %s\n", mfc_tag_update{ch_tag_upd}); fmt::append(ret, "Atomic Stat: %s\n", ch_atomic_stat); // TODO: use mfc_atomic_status formatting fmt::append(ret, "Interrupts: %s\n", interrupts_enabled ? "Enabled" : "Disabled"); fmt::append(ret, "Inbound Mailbox: %s\n", ch_in_mbox); fmt::append(ret, "Out Mailbox: %s\n", ch_out_mbox); fmt::append(ret, "Out Interrupts Mailbox: %s\n", ch_out_intr_mbox); fmt::append(ret, "SNR config: 0x%llx\n", snr_config); fmt::append(ret, "SNR1: %s\n", ch_snr1); fmt::append(ret, "SNR2: %s\n", ch_snr2); if (get_type() != spu_type::threaded) { for (usz i = 0; i < int_ctrl.size(); i++) { fmt::append(ret, "Int Ctrl[%u]: stat=0x%x, mask=0x%x\n", i, +int_ctrl[i].stat, +int_ctrl[i].mask); } fmt::append(ret, "MFC Prxy Cmd: [%s written: %x]\n", mfc_prxy_cmd, mfc_prxy_write_state.all); fmt::append(ret, "MFC Prxy Mask: 0x%x\n", +mfc_prxy_mask); } const u32 addr = raddr; const u64 rtime0 = rtime; if (vm::check_addr(addr)) { fmt::append(ret, "Reservation Addr: 0x%x\n", addr); fmt::append(ret, "Reservation Time: 0x%x\n", rtime0 & 0xff'ff'ff); } else if (addr) { fmt::append(ret, "Reservation Addr: 0x%x (unmapped)\n", addr); fmt::append(ret, "Reservation Time: 0x%x\n", rtime0 & 0xff'ff'ff); } else { fmt::append(ret, "Reservation Addr: N/A\n"); fmt::append(ret, "Reservation Time: N/A\n"); } fmt::append(ret, "Reservation Data:\n"); be_t<u32> data[32]{}; std::memcpy(data, rdata, sizeof(rdata)); // Show the data even if the reservation was lost inside the atomic loop for (usz i = 0; i < std::size(data); i += 4) { fmt::append(ret, "[0x%02x] %08x %08x %08x %08x\n", i * sizeof(data[0]) , data[i + 0], data[i + 1], data[i + 2], data[i + 3]); } } std::string spu_thread::dump_callstack() const { std::string ret; fmt::append(ret, "Call stack:\n=========\n0x%08x (0x0) called\n", pc); for (const auto& sp : dump_callstack_list()) { // TODO: function addresses too fmt::append(ret, "> from 0x%08x (sp=0x%08x)\n", sp.first, sp.second); } return ret; } std::vector<std::pair<u32, u32>> spu_thread::dump_callstack_list() const { std::vector<std::pair<u32, u32>> call_stack_list; bool first = true; const v128 gpr0 = gpr[0]; const u32 _pc = pc; // Declare first 128-bytes as invalid for stack (common values such as 0 do not make sense here) for (u32 sp = gpr[1]._u32[3]; (sp & 0xF) == 0u && sp >= 0x80u && sp <= 0x3FFE0u; first = false) { v128 lr = _ref<v128>(sp + 16); auto is_invalid = [this](v128 v) { const u32 addr = v._u32[3] & 0x3FFFC; if (v != v128::from32r(addr)) { // 1) Non-zero lower words are invalid (because BRSL-like instructions generate only zeroes) // 2) Bits normally masked out by indirect braches are considered invalid return true; } return !addr || !is_exec_code(addr, { ls, SPU_LS_SIZE }); }; if (first && lr._u32[3] != gpr0._u32[3] && !is_invalid(gpr0)) { // Detect functions with no stack or before LR has been stored std::vector<bool> passed(_pc / 4); // Start with PC std::vector<u32> start_points{_pc}; bool is_ok = false; bool all_failed = false; for (usz start = 0; !all_failed && start < start_points.size(); start++) { for (u32 i = start_points[start]; i < SPU_LS_SIZE;) { if (i / 4 >= passed.size()) { passed.resize(i / 4 + 1); } else if (passed[i / 4]) { // Already passed break; } passed[i / 4] = true; const spu_opcode_t op{_ref<u32>(i)}; const auto type = s_spu_itype.decode(op.opcode); if (start == 0 && type == spu_itype::STQD && op.ra == 1u && op.rt == 0u) { // Saving LR to stack: this is indeed a new function (ok because LR has not been saved yet) is_ok = true; break; } if (type == spu_itype::LQD && op.rt == 0u) { // Loading LR from stack: this is not a leaf function all_failed = true; break; } if (type == spu_itype::UNK) { // Ignore for now break; } if ((type == spu_itype::BRSL || type == spu_itype::BRASL || type == spu_itype::BISL) && op.rt == 0u) { // Gave up on link before saving all_failed = true; break; } if (type & spu_itype::branch && type >= spu_itype::BI && op.ra == 0u) { // Returned is_ok = true; break; } const auto results = op_branch_targets(i, op); bool proceeded = false; for (usz res_i = 0; res_i < results.size(); res_i++) { const u32 route_pc = results[res_i]; if (route_pc >= SPU_LS_SIZE) { continue; } if (route_pc / 4 >= passed.size()) { passed.resize(route_pc / 4 + 1); } if (!passed[route_pc / 4]) { if (proceeded) { // Remember next route start point start_points.push_back(route_pc); } else { // Next PC i = route_pc; proceeded = true; } } } } } if (is_ok && !all_failed) { // Same stack as far as we know (for now) call_stack_list.emplace_back(gpr0._u32[3], sp); } } if (!is_invalid(lr)) { // TODO: function addresses too call_stack_list.emplace_back(lr._u32[3], sp); } else if (!first) { break; } const u32 temp_sp = _ref<u32>(sp); if (temp_sp <= sp) { // Ensure ascending stack frame pointers break; } sp = temp_sp; } return call_stack_list; } std::string spu_thread::dump_misc() const { std::string ret = cpu_thread::dump_misc(); fmt::append(ret, "Block Weight: %u (Retreats: %u)", block_counter, block_failure); if (g_cfg.core.spu_prof) { // Get short function hash and position in chunk fmt::append(ret, "\nCurrent block: %s", spu_block_hash{atomic_storage<u64>::load(block_hash)}); } const u32 offset = group ? SPU_FAKE_BASE_ADDR + (id & 0xffffff) * SPU_LS_SIZE : RAW_SPU_BASE_ADDR + index * RAW_SPU_OFFSET; fmt::append(ret, "\n[%s]", ch_mfc_cmd); fmt::append(ret, "\nLocal Storage: 0x%08x..0x%08x", offset, offset + 0x3ffff); if (const u64 _time = start_time) { if (const auto func = current_func) { ret += "\nIn function: "; ret += func; } else { ret += '\n'; } fmt::append(ret, "\nWaiting: %fs", (get_system_time() - _time) / 1000000.); } else { ret += "\n\n"; } fmt::append(ret, "\nTag Mask: 0x%08x", ch_tag_mask); fmt::append(ret, "\nMFC Queue Size: %u", mfc_size); for (u32 i = 0; i < 16; i++) { if (i < mfc_size) { fmt::append(ret, "\n%s", mfc_queue[i]); } else { break; } } return ret; } void spu_thread::cpu_on_stop() { if (current_func && is_stopped(state - cpu_flag::stop)) { if (start_time) { ppu_log.warning("'%s' aborted (%fs)", current_func, (get_system_time() - start_time) / 1000000.); } else { ppu_log.warning("'%s' aborted", current_func); } current_func = {}; } // TODO: More conditions if (Emu.IsStopped() && g_cfg.core.spu_debug) { std::string ret; dump_all(ret); spu_log.notice("thread context: %s", ret); } if (is_stopped(state - cpu_flag::stop)) { if (stx == 0 && ftx == 0 && last_succ == 0 && last_fail == 0) { perf_log.notice("SPU thread perf stats are not available."); } else { perf_log.notice("Perf stats for transactions: success %u, failure %u", stx, ftx); perf_log.notice("Perf stats for PUTLLC reload: success %u, failure %u", last_succ, last_fail); } } } void spu_thread::cpu_init() { std::memset(gpr.data(), 0, gpr.size() * sizeof(gpr[0])); fpscr.Reset(); ch_mfc_cmd = {}; srr0 = 0; mfc_size = 0; mfc_barrier = 0; mfc_fence = 0; ch_tag_upd = 0; ch_tag_mask = 0; ch_tag_stat.data.raw() = {}; ch_stall_mask = 0; ch_stall_stat.data.raw() = {}; ch_atomic_stat.data.raw() = {}; ch_out_mbox.data.raw() = {}; ch_out_intr_mbox.data.raw() = {}; ch_events.raw() = {}; interrupts_enabled = false; raddr = 0; ch_dec_start_timestamp = get_timebased_time(); ch_dec_value = option & SYS_SPU_THREAD_OPTION_DEC_SYNC_TB_ENABLE ? ~static_cast<u32>(ch_dec_start_timestamp) : 0; is_dec_frozen = false; if (get_type() >= spu_type::raw) { ch_in_mbox.clear(); ch_snr1.data.raw() = {}; ch_snr2.data.raw() = {}; snr_config = 0; mfc_prxy_mask.raw() = 0; mfc_prxy_write_state = {}; } status_npc.raw() = {get_type() == spu_type::isolated ? SPU_STATUS_IS_ISOLATED : 0, 0}; run_ctrl.raw() = 0; spurs_last_task_timestamp = 0; spurs_wait_duration_last = 0; spurs_average_task_duration = 0; spurs_waited = false; spurs_entered_wait = false; int_ctrl[0].clear(); int_ctrl[1].clear(); int_ctrl[2].clear(); gpr[1]._u32[3] = 0x3FFF0; // initial stack frame pointer } void spu_thread::cpu_return() { if (get_type() >= spu_type::raw) { if (status_npc.fetch_op([this](status_npc_sync_var& state) { if (state.status & SPU_STATUS_RUNNING) { // Save next PC and current SPU Interrupt Status // Used only by RunCtrl stop requests state.status &= ~SPU_STATUS_RUNNING; state.npc = pc | +interrupts_enabled; return true; } return false; }).second) { status_npc.notify_one(); } } else if (is_stopped()) { ch_in_mbox.clear(); if (ensure(group->running)-- == 1) { u32 last_stop = 0; { lv2_obj::notify_all_t notify; std::lock_guard lock(group->mutex); group->run_state = SPU_THREAD_GROUP_STATUS_INITIALIZED; if (!group->join_state) { group->join_state = SYS_SPU_THREAD_GROUP_JOIN_ALL_THREADS_EXIT; } for (const auto& thread : group->threads) { if (thread && thread.get() != this && thread->status_npc.load().status >> 16 == SYS_SPU_THREAD_STOP_THREAD_EXIT) { // Wait for all threads to have error codes if exited by sys_spu_thread_exit for (u32 status; !thread->exit_status.try_read(status) || status != thread->last_exit_status;) { utils::pause(); } } } if (status_npc.load().status >> 16 == SYS_SPU_THREAD_STOP_THREAD_EXIT) { // Set exit status now, in conjunction with group state changes exit_status.set_value(last_exit_status); } last_stop = group->stop_count; if (last_stop == umax) { // Restart with some high count to preserve some meaning group->stop_count = 1000; } else { group->stop_count++; } if (const auto ppu = std::exchange(group->waiter, nullptr)) { // Send exit status directly to the joining thread ppu->gpr[4] = group->join_state; ppu->gpr[5] = group->exit_status; group->join_state.release(0); lv2_obj::awake(ppu); } } // Notify on last thread stopped group->stop_count.notify_all(); // Wait for terminators manually if needed (ensuring they quit before value-wrapping) while (last_stop == umax && group->wait_term_count) { std::this_thread::yield(); } } else if (status_npc.load().status >> 16 == SYS_SPU_THREAD_STOP_THREAD_EXIT) { exit_status.set_value(last_exit_status); } } } extern thread_local std::string(*g_tls_log_prefix)(); void spu_thread::cpu_task() { #ifdef __APPLE__ pthread_jit_write_protect_np(true); #endif start_time = 0; // Get next PC and SPU Interrupt status pc = status_npc.load().npc; // Note: works both on RawSPU and threaded SPU! set_interrupt_status((pc & 1) != 0); pc &= 0x3fffc; std::fesetround(FE_TOWARDZERO); gv_set_zeroing_denormals(); g_tls_log_prefix = [] { const auto cpu = static_cast<spu_thread*>(get_current_cpu_thread()); static thread_local shared_ptr<std::string> name_cache; if (!cpu->spu_tname.is_equal(name_cache)) [[unlikely]] { cpu->spu_tname.peek_op([&](const shared_ptr<std::string>& ptr) { if (ptr != name_cache) { name_cache = ptr; } }); } const auto type = cpu->get_type(); if (u64 hash = cpu->block_hash) { return fmt::format("%sSPU[0x%07x] Thread (%s) [0x%05x: %s]", type >= spu_type::raw ? type == spu_type::isolated ? "Iso" : "Raw" : "", cpu->lv2_id, *name_cache.get(), cpu->pc, spu_block_hash_short{atomic_storage<u64>::load(hash)}); } return fmt::format("%sSPU[0x%07x] Thread (%s) [0x%05x]", type >= spu_type::raw ? type == spu_type::isolated ? "Iso" : "Raw" : "", cpu->lv2_id, *name_cache.get(), cpu->pc); }; constexpr u32 invalid_spurs = 0u - 0x80; if (spurs_addr == 0) { // Evaluate it if (!group) { spurs_addr = invalid_spurs; // Some invalid non-0 address } else { const u32 arg = static_cast<u32>(group->args[index][1]); if (group->name.ends_with("CellSpursKernelGroup"sv) && vm::check_addr(arg)) { spurs_addr = arg; group->spurs_running++; } else { spurs_addr = invalid_spurs; } } } if (jit) { while (true) { if (state) [[unlikely]] { if (check_state()) break; } if (_ref<u32>(pc) == 0x0u) { if (spu_thread::stop_and_signal(0x0)) pc += 4; continue; } spu_runtime::g_gateway(*this, _ptr<u8>(0), nullptr); } if (unsavable && is_stopped(state - cpu_flag::stop)) { spu_log.warning("Aborting unsaveable state"); } // Print some stats (!group || group->stop_count < 5 ? spu_log.notice : spu_log.trace)("Stats: Block Weight: %u (Retreats: %u);", block_counter, block_failure); } else { ensure(spu_runtime::g_interpreter); allow_interrupts_in_cpu_work = true; while (true) { if (state) [[unlikely]] { if (check_state()) break; } spu_runtime::g_interpreter(*this, _ptr<u8>(0), nullptr); } allow_interrupts_in_cpu_work = false; } if (spurs_addr != invalid_spurs) { if (group->spurs_running.exchange(0)) { group->spurs_running.notify_all(); } } } void spu_thread::cpu_work() { if (std::exchange(in_cpu_work, true)) { return; } const u32 old_iter_count = cpu_work_iteration_count++; bool work_left = false; if (has_active_local_bps) { const u32 pos_at = pc / 4; const u32 pos_bit = 1u << (pos_at % 8); if (local_breakpoints[pos_at / 8] & pos_bit) { // Ignore repeatations until a different instruction is issued if (pc != current_bp_pc) { // Breakpoint hit state += cpu_flag::dbg_pause; } } current_bp_pc = pc; work_left = true; } else { current_bp_pc = umax; } const auto timeout = +g_cfg.core.mfc_transfers_timeout; if (u32 shuffle_count = g_cfg.core.mfc_transfers_shuffling) { // If either MFC size exceeds limit or timeout has been reached execute pending MFC commands if (mfc_size > shuffle_count || (timeout && get_system_time() - mfc_last_timestamp >= timeout)) { work_left = do_mfc(false, false); } else { work_left = mfc_size != 0; // TODO: Optimize } } bool gen_interrupt = false; // Check interrupts every 16 iterations if (!(old_iter_count % 16) && allow_interrupts_in_cpu_work) { if (u32 mask = ch_events.load().mask & SPU_EVENT_INTR_BUSY_CHECK) { // LR check is expensive, do it once in a while if (old_iter_count /*% 256*/) { mask &= ~SPU_EVENT_LR; } get_events(mask); } gen_interrupt = check_mfc_interrupts(pc); work_left |= interrupts_enabled; } in_cpu_work = false; if (!work_left) { // No more pending work state.atomic_op([](bs_t<cpu_flag>& flags) { if (flags & cpu_flag::pending_recheck) { // Do not really remove ::pending because external thread may have pushed more pending work flags -= cpu_flag::pending_recheck; } else { flags -= cpu_flag::pending; } }); } if (gen_interrupt) { // Interrupt! escape everything and restart execution spu_runtime::g_escape(this); } } struct raw_spu_cleanup { raw_spu_cleanup() = default; raw_spu_cleanup(const raw_spu_cleanup&) = delete; raw_spu_cleanup& operator =(const raw_spu_cleanup&) = delete; ~raw_spu_cleanup() { std::memset(spu_thread::g_raw_spu_id, 0, sizeof(spu_thread::g_raw_spu_id)); spu_thread::g_raw_spu_ctr = 0; g_fxo->get<raw_spu_cleanup>(); // Register destructor } }; void spu_thread::cleanup() { // Deallocate local storage ensure(vm::dealloc(vm_offset(), vm::spu, &shm)); // Deallocate RawSPU ID if (get_type() >= spu_type::raw) { g_raw_spu_id[index] = 0; g_raw_spu_ctr--; } // Free range lock (and signals cleanup was called to the destructor) vm::free_range_lock(range_lock); // Terminate and join thread static_cast<named_thread<spu_thread>&>(*this) = thread_state::finished; } spu_thread::~spu_thread() { // Unmap LS and its mirrors shm->unmap(ls + SPU_LS_SIZE); shm->unmap(ls); shm->unmap(ls - SPU_LS_SIZE); utils::memory_release(ls - SPU_LS_SIZE * 2, SPU_LS_SIZE * 5); } u8* spu_thread::map_ls(utils::shm& shm, void* ptr) { vm::writer_lock mlock; const auto ls = ptr ? static_cast<u8*>(ptr) : static_cast<u8*>(ensure(utils::memory_reserve(SPU_LS_SIZE * 5, nullptr, true))) + SPU_LS_SIZE * 2; ensure(shm.map_critical(ls - SPU_LS_SIZE).first && shm.map_critical(ls).first && shm.map_critical(ls + SPU_LS_SIZE).first); return ls; } spu_thread::spu_thread(lv2_spu_group* group, u32 index, std::string_view name, u32 lv2_id, bool is_isolated, u32 option) : cpu_thread(idm::last_id()) , group(group) , index(index) , thread_type(group ? spu_type::threaded : is_isolated ? spu_type::isolated : spu_type::raw) , shm(std::make_shared<utils::shm>(SPU_LS_SIZE)) , ls(static_cast<u8*>(utils::memory_reserve(SPU_LS_SIZE * 5, nullptr, true)) + SPU_LS_SIZE * 2) , option(option) , lv2_id(lv2_id) , spu_tname(make_single<std::string>(name)) { if (g_cfg.core.spu_decoder == spu_decoder_type::asmjit) { jit = spu_recompiler_base::make_asmjit_recompiler(); } else if (g_cfg.core.spu_decoder == spu_decoder_type::llvm) { #if defined(ARCH_X64) jit = spu_recompiler_base::make_fast_llvm_recompiler(); #elif defined(ARCH_ARM64) jit = spu_recompiler_base::make_llvm_recompiler(); #else #error "Unimplemented" #endif } if (g_cfg.core.mfc_debug) { utils::memory_commit(vm::g_stat_addr + vm_offset(), SPU_LS_SIZE); mfc_history.resize(max_mfc_dump_idx); } if (g_cfg.core.spu_decoder == spu_decoder_type::asmjit || g_cfg.core.spu_decoder == spu_decoder_type::llvm) { if (g_cfg.core.spu_block_size != spu_block_size_type::safe) { // Initialize stack mirror std::memset(stack_mirror.data(), 0xff, sizeof(stack_mirror)); } } if (get_type() >= spu_type::raw) { cpu_init(); } range_lock = vm::alloc_range_lock(); memset(&hv_ctx, 0, sizeof(hv_ctx)); } void spu_thread::serialize_common(utils::serial& ar) { ar(gpr, pc, ch_mfc_cmd, mfc_size, mfc_barrier, mfc_fence, mfc_prxy_cmd, mfc_prxy_mask, mfc_prxy_write_state.all , srr0, ch_tag_upd, ch_tag_mask, ch_tag_stat.data, ch_stall_mask, ch_stall_stat.data, ch_atomic_stat.data , ch_out_mbox.data, ch_out_intr_mbox.data, snr_config, ch_snr1.data, ch_snr2.data, ch_events.raw().all, interrupts_enabled , run_ctrl, exit_status.data, status_npc.raw().status, ch_dec_start_timestamp, ch_dec_value, is_dec_frozen); ar(std::span(mfc_queue, mfc_size)); u32 vals[4]{}; if (ar.is_writing()) { const u8 count = ch_in_mbox.try_read(vals); ar(count, std::span(vals, count)); } else { const u8 count = ar; ar(std::span(vals, count)); ch_in_mbox.set_values(count, vals[0], vals[1], vals[2], vals[3]); } } spu_thread::spu_thread(utils::serial& ar, lv2_spu_group* group) : cpu_thread(idm::last_id()) , group(group) , index(ar) , thread_type(group ? spu_type::threaded : ar.pop<u8>() ? spu_type::isolated : spu_type::raw) , shm(ensure(vm::get(vm::spu)->peek(vm_offset()).second)) , ls(map_ls(*this->shm)) , option(ar) , lv2_id(ar) , spu_tname(make_single<std::string>(ar.operator std::string())) { if (g_cfg.core.spu_decoder == spu_decoder_type::asmjit) { jit = spu_recompiler_base::make_asmjit_recompiler(); } else if (g_cfg.core.spu_decoder == spu_decoder_type::llvm) { #if defined(ARCH_X64) jit = spu_recompiler_base::make_fast_llvm_recompiler(); #elif defined(ARCH_ARM64) jit = spu_recompiler_base::make_llvm_recompiler(); #else #error "Unimplemented" #endif } if (g_cfg.core.mfc_debug) { utils::memory_commit(vm::g_stat_addr + vm_offset(), SPU_LS_SIZE); mfc_history.resize(max_mfc_dump_idx); } if (g_cfg.core.spu_decoder != spu_decoder_type::_static && g_cfg.core.spu_decoder != spu_decoder_type::dynamic) { if (g_cfg.core.spu_block_size != spu_block_size_type::safe) { // Initialize stack mirror std::memset(stack_mirror.data(), 0xff, sizeof(stack_mirror)); } } range_lock = vm::alloc_range_lock(); serialize_common(ar); raddr = ::narrow<u32>(ar.pop<u64>()); if (raddr) { // Acquire reservation if (!vm::check_addr(raddr)) { fmt::throw_exception("SPU Serialization: Reservation address is not accessible! (addr=0x%x)", raddr); } rtime = vm::reservation_acquire(raddr); mov_rdata(rdata, *vm::get_super_ptr<spu_rdata_t>(raddr)); } status_npc.raw().npc = pc | u8{interrupts_enabled}; if (get_type() == spu_type::threaded) { for (auto& pair : spuq) { ar(pair.first); pair.second = idm::get_unlocked<lv2_obj, lv2_event_queue>(ar.pop<u32>()); } for (auto& q : spup) { q = idm::get_unlocked<lv2_obj, lv2_event_queue>(ar.pop<u32>()); } } else { for (spu_int_ctrl_t& ctrl : int_ctrl) { ar(ctrl.mask, ctrl.stat); ctrl.tag = idm::get_unlocked<lv2_obj, lv2_int_tag>(ar.pop<u32>()); } g_raw_spu_ctr++; g_raw_spu_id[index] = id; } ar(stop_flag_removal_protection); } void spu_thread::save(utils::serial& ar) { USING_SERIALIZATION_VERSION(spu); if (raddr) { // Last check for reservation-lost event get_events(SPU_EVENT_LR); } ar(index); if (get_type() != spu_type::threaded) { ar(u8{get_type() == spu_type::isolated}); } ar(option, lv2_id, *spu_tname.load()); serialize_common(ar); // Let's save it as u64 for future proofing ar(u64{raddr}); if (get_type() == spu_type::threaded) { for (const auto& [key, q] : spuq) { ar(key); ar(lv2_obj::check(q) ? q->id : 0); } for (auto& p : spup) { ar(lv2_obj::check(p) ? p->id : 0); } } else { for (const spu_int_ctrl_t& ctrl : int_ctrl) { ar(ctrl.mask, ctrl.stat, lv2_obj::check(ctrl.tag) ? ctrl.tag->id : 0); } } ar(!!(state & cpu_flag::stop)); } void spu_thread::push_snr(u32 number, u32 value) { // Get channel const auto channel = number & 1 ? &ch_snr2 : &ch_snr1; // Prepare some data const u32 event_bit = SPU_EVENT_S1 >> (number & 1); const bool bitor_bit = !!((snr_config >> number) & 1); // Redundant, g_use_rtm is checked inside tx_start now. if (g_use_rtm && false) { bool channel_notify = false; bool thread_notify = false; const bool ok = utils::tx_start([&] { channel_notify = (channel->data.raw() == spu_channel::bit_wait); thread_notify = (channel->data.raw() & spu_channel::bit_count) == 0; if (channel_notify) { ensure(channel->jostling_value.raw() == spu_channel::bit_wait); channel->jostling_value.raw() = value; channel->data.raw() = 0; } else if (bitor_bit) { channel->data.raw() &= ~spu_channel::bit_wait; channel->data.raw() |= spu_channel::bit_count | value; } else { channel->data.raw() = spu_channel::bit_count | value; } if (thread_notify) { ch_events.raw().events |= event_bit; if (ch_events.raw().mask & event_bit) { ch_events.raw().count = 1; thread_notify = ch_events.raw().waiting != 0; } else { thread_notify = false; } } }); if (ok) { if (channel_notify) channel->data.notify_one(); if (thread_notify) this->notify(); return; } } // Lock event channel in case it needs event notification ch_events.atomic_op([](ch_events_t& ev) { ev.locks++; }); // Check corresponding SNR register settings auto push_state = channel->push(value, bitor_bit); if (push_state.old_count < push_state.count) { set_events(event_bit); } else if (!push_state.op_done) { ensure(is_stopped()); if (auto cpu = cpu_thread::get_current()) { cpu->state += cpu_flag::again; } } ch_events.atomic_op([](ch_events_t& ev) { ev.locks--; }); } void spu_thread::do_dma_transfer(spu_thread* _this, const spu_mfc_cmd& args, u8* ls) { perf_meter<"DMA"_u32> perf_; const bool is_get = (args.cmd & ~(MFC_BARRIER_MASK | MFC_FENCE_MASK | MFC_START_MASK)) == MFC_GET_CMD; u32 eal = args.eal; u32 lsa = args.lsa & 0x3ffff; // Keep src point to const u8* dst = nullptr; const u8* src = nullptr; std::tie(dst, src) = [&]() -> std::pair<u8*, const u8*> { u8* dst = vm::_ptr<u8>(eal); u8* src = ls + lsa; if (is_get) { std::swap(src, dst); } return {dst, src}; }(); // SPU Thread Group MMIO (LS and SNR) and RawSPU MMIO if (_this && eal >= RAW_SPU_BASE_ADDR) { if (g_cfg.core.mfc_debug && _this) { // TODO } const u32 index = (eal - SYS_SPU_THREAD_BASE_LOW) / SYS_SPU_THREAD_OFFSET; // thread number in group const u32 offset = (eal - SYS_SPU_THREAD_BASE_LOW) % SYS_SPU_THREAD_OFFSET; // LS offset or MMIO register if (eal < SYS_SPU_THREAD_BASE_LOW) { // RawSPU MMIO auto thread = idm::get<named_thread<spu_thread>>(find_raw_spu((eal - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET)); if (!thread) { // Access Violation } else if ((eal - RAW_SPU_BASE_ADDR) % RAW_SPU_OFFSET + args.size - 1 < SPU_LS_SIZE) // LS access { } else if (u32 value; args.size == 4 && is_get && thread->read_reg(eal, value)) { _this->_ref<u32>(lsa) = value; return; } else if (args.size == 4 && !is_get && thread->write_reg(eal, args.cmd != MFC_SDCRZ_CMD ? + _this->_ref<u32>(lsa) : 0)) { return; } else { fmt::throw_exception("Invalid RawSPU MMIO offset (cmd=[%s])", args); } } else if (_this->get_type() >= spu_type::raw) { // Access Violation } else if (_this->group && _this->group->threads_map[index] != -1) { auto& spu = static_cast<spu_thread&>(*_this->group->threads[_this->group->threads_map[index]]); if (offset + args.size <= SPU_LS_SIZE) // LS access { // redirect access if (auto ptr = spu.ls + offset; is_get) src = ptr; else dst = ptr; } else if (!is_get && args.size == 4 && (offset == SYS_SPU_THREAD_SNR1 || offset == SYS_SPU_THREAD_SNR2)) { spu.push_snr(SYS_SPU_THREAD_SNR2 == offset, args.cmd != MFC_SDCRZ_CMD ? +_this->_ref<u32>(lsa) : 0); return; } else { fmt::throw_exception("Invalid MMIO offset (cmd=[%s])", args); } } else { // Access Violation } } // Cleanup: if PUT or GET happens after PUTLLC failure, it's too complicated and it's easier to just give up if (_this) { _this->last_faddr = 0; } // It is so rare that optimizations are not implemented (TODO) alignas(64) static constexpr u8 zero_buf[0x10000]{}; if (args.cmd == MFC_SDCRZ_CMD) { src = zero_buf; } rsx::reservation_lock<false, 1> rsx_lock(eal, args.size, !is_get && (g_cfg.video.strict_rendering_mode || (g_cfg.core.rsx_fifo_accuracy && !g_cfg.core.spu_accurate_dma && eal < rsx::constants::local_mem_base))); if ((!g_use_rtm && !is_get) || g_cfg.core.spu_accurate_dma) [[unlikely]] { perf_meter<"ADMA_GET"_u64> perf_get = perf_; perf_meter<"ADMA_PUT"_u64> perf_put = perf_; cpu_thread* _cpu = _this ? _this : get_current_cpu_thread(); atomic_t<u64, 64>* range_lock = nullptr; if (!_this) [[unlikely]] { if (_cpu->get_class() == thread_class::spu) { // Use range_lock of current SPU thread for range locks range_lock = static_cast<spu_thread*>(_cpu)->range_lock; } else { goto plain_access; } } else { range_lock = _this->range_lock; } utils::prefetch_write(range_lock); for (u32 size = args.size, size0; is_get; size -= size0, dst += size0, src += size0, eal += size0) { size0 = std::min<u32>(128 - (eal & 127), std::min<u32>(size, 128)); for (u64 i = 0;; [&]() { if (_cpu->state) { _cpu->check_state(); } else if (++i < 25) [[likely]] { busy_wait(300); } else { _cpu->state += cpu_flag::wait + cpu_flag::temp; std::this_thread::yield(); _cpu->check_state(); } }()) { const u64 time0 = vm::reservation_acquire(eal); if (time0 & 127) { continue; } const auto cpu = get_current_cpu_thread<spu_thread>(); alignas(64) u8 temp[128]; u8* dst0 = cpu && (eal & -128) == cpu->raddr ? temp : dst; if (dst0 == +temp && time0 != cpu->rtime) { // Validate rtime for read data cpu->set_events(SPU_EVENT_LR); cpu->raddr = 0; } switch (size0) { case 1: { *reinterpret_cast<u8*>(dst0) = *reinterpret_cast<const u8*>(src); break; } case 2: { *reinterpret_cast<u16*>(dst0) = *reinterpret_cast<const u16*>(src); break; } case 4: { *reinterpret_cast<u32*>(dst0) = *reinterpret_cast<const u32*>(src); break; } case 8: { *reinterpret_cast<u64*>(dst0) = *reinterpret_cast<const u64*>(src); break; } case 128: { mov_rdata(*reinterpret_cast<spu_rdata_t*>(dst0), *reinterpret_cast<const spu_rdata_t*>(src)); break; } default: { auto dst1 = dst0; auto src1 = src; auto size1 = size0; while (size1) { *reinterpret_cast<v128*>(dst1) = *reinterpret_cast<const v128*>(src1); dst1 += 16; src1 += 16; size1 -= 16; } break; } } if (time0 != vm::reservation_acquire(eal) || (size0 == 128 && !cmp_rdata(*reinterpret_cast<spu_rdata_t*>(dst0), *reinterpret_cast<const spu_rdata_t*>(src)))) { continue; } if (dst0 == +temp) { // Write to LS std::memcpy(dst, dst0, size0); // Validate data if (std::memcmp(dst0, &cpu->rdata[eal & 127], size0) != 0) { cpu->set_events(SPU_EVENT_LR); cpu->raddr = 0; } } break; } if (size == size0) { if (g_cfg.core.mfc_debug && _this) { auto& dump = _this->mfc_history[_this->mfc_dump_idx++ % spu_thread::max_mfc_dump_idx]; dump.cmd = args; dump.cmd.eah = _this->pc; dump.block_hash = _this->block_hash; std::memcpy(dump.data, is_get ? dst : src, std::min<u32>(args.size, 128)); } return; } } if (g_cfg.core.spu_accurate_dma) [[unlikely]] { for (u32 size0, size = args.size;; size -= size0, dst += size0, src += size0, eal += size0) { size0 = std::min<u32>(128 - (eal & 127), std::min<u32>(size, 128)); if (size0 == 128u && g_cfg.core.accurate_cache_line_stores) { // As atomic as PUTLLUC do_cell_atomic_128_store(eal, src); if (size == size0) { break; } continue; } // Lock each cache line auto& res = vm::reservation_acquire(eal); // Lock each bit corresponding to a byte being written, using some free space in reservation memory auto* bits = utils::bless<atomic_t<u128>>(vm::g_reservations + ((eal & 0xff80) / 2 + 16)); // Get writing mask const u128 wmask = (~u128{} << (eal & 127)) & (~u128{} >> (127 - ((eal + size0 - 1) & 127))); //const u64 start = (eal & 127) / 2; //const u64 _end_ = ((eal + size0 - 1) & 127) / 2; //const u64 wmask = (UINT64_MAX << start) & (UINT64_MAX >> (63 - _end_)); u128 old = 0; for (u64 i = 0; i != umax; [&]() { if (_cpu->state & cpu_flag::pause) { const bool ok = cpu_thread::if_suspended<0>(_cpu, {dst, dst + 64, &res}, [&] { std::memcpy(dst, src, size0); res += 128; }); if (ok) { // Exit loop and function i = -1; bits = nullptr; return; } } if (true || ++i < 10) { busy_wait(500); } else { // Wait _cpu->state += cpu_flag::wait + cpu_flag::temp; // bits->wait(old, wmask); _cpu->check_state(); } }()) { // Completed in suspend_all() if (!bits) { break; } bool ok = false; std::tie(old, ok) = bits->fetch_op([&](u128& v) { if (v & wmask) { return false; } v |= wmask; return true; }); if (ok) [[likely]] { break; } } if (!bits) { if (size == size0) { break; } continue; } // Lock reservation (shared) auto [_oldd, _ok] = res.fetch_op([&](u64& r) { if (r & vm::rsrv_unique_lock) { return false; } r += 1; return true; }); if (!_ok) { vm::reservation_shared_lock_internal(res); } // Obtain range lock as normal store vm::range_lock(range_lock, eal, size0); switch (size0) { case 1: { *reinterpret_cast<u8*>(dst) = *reinterpret_cast<const u8*>(src); break; } case 2: { *reinterpret_cast<u16*>(dst) = *reinterpret_cast<const u16*>(src); break; } case 4: { *reinterpret_cast<u32*>(dst) = *reinterpret_cast<const u32*>(src); break; } case 8: { *reinterpret_cast<u64*>(dst) = *reinterpret_cast<const u64*>(src); break; } case 128: { mov_rdata(*reinterpret_cast<spu_rdata_t*>(dst), *reinterpret_cast<const spu_rdata_t*>(src)); break; } default: { auto _dst = dst; auto _src = src; auto _size = size0; while (_size) { *reinterpret_cast<v128*>(_dst) = *reinterpret_cast<const v128*>(_src); _dst += 16; _src += 16; _size -= 16; } break; } } range_lock->release(0); res += 127; // Release bits and notify bits->atomic_op([&](u128& v) { v &= ~wmask; }); // bits->notify_all(wmask); if (size == size0) { break; } } //atomic_fence_seq_cst(); if (g_cfg.core.mfc_debug && _this) { auto& dump = _this->mfc_history[_this->mfc_dump_idx++ % spu_thread::max_mfc_dump_idx]; dump.cmd = args; dump.cmd.eah = _this->pc; dump.block_hash = _this->block_hash; std::memcpy(dump.data, is_get ? dst : src, std::min<u32>(args.size, 128)); } return; } else { perf_put.reset(); perf_get.reset(); } perf_meter<"DMA_PUT"_u64> perf2 = perf_; switch (u32 size = args.size) { case 1: { vm::range_lock<1>(range_lock, eal, 1); *reinterpret_cast<u8*>(dst) = *reinterpret_cast<const u8*>(src); range_lock->release(0); break; } case 2: { vm::range_lock<2>(range_lock, eal, 2); *reinterpret_cast<u16*>(dst) = *reinterpret_cast<const u16*>(src); range_lock->release(0); break; } case 4: { vm::range_lock<4>(range_lock, eal, 4); *reinterpret_cast<u32*>(dst) = *reinterpret_cast<const u32*>(src); range_lock->release(0); break; } case 8: { vm::range_lock<8>(range_lock, eal, 8); *reinterpret_cast<u64*>(dst) = *reinterpret_cast<const u64*>(src); range_lock->release(0); break; } default: { if (eal >> 28 == rsx::constants::local_mem_base >> 28) { if (size > s_rep_movsb_threshold) { __movsb(dst, src, size); } else { // Avoid unaligned stores in mov_rdata_avx if (reinterpret_cast<u64>(dst) & 0x10) { *reinterpret_cast<v128*>(dst) = *reinterpret_cast<const v128*>(src); dst += 16; src += 16; size -= 16; } while (size >= 128) { mov_rdata(*reinterpret_cast<spu_rdata_t*>(dst), *reinterpret_cast<const spu_rdata_t*>(src)); dst += 128; src += 128; size -= 128; } while (size) { *reinterpret_cast<v128*>(dst) = *reinterpret_cast<const v128*>(src); dst += 16; src += 16; size -= 16; } } break; } if (((eal & 127) + size) <= 128) { vm::range_lock(range_lock, eal, size); while (size) { *reinterpret_cast<v128*>(dst) = *reinterpret_cast<const v128*>(src); dst += 16; src += 16; size -= 16; } range_lock->release(0); break; } u32 range_addr = eal & -128; u32 range_end = utils::align(eal + size, 128); // Handle the case of crossing 64K page borders (TODO: maybe split in 4K fragments?) if (range_addr >> 16 != (range_end - 1) >> 16) { u32 nexta = range_end & -65536; u32 size0 = nexta - eal; size -= size0; // Split locking + transfer in two parts (before 64K border, and after it) vm::range_lock(range_lock, range_addr, size0); if (size > s_rep_movsb_threshold) { __movsb(dst, src, size0); dst += size0; src += size0; } else { // Avoid unaligned stores in mov_rdata_avx if (reinterpret_cast<u64>(dst) & 0x10) { *reinterpret_cast<v128*>(dst) = *reinterpret_cast<const v128*>(src); dst += 16; src += 16; size0 -= 16; } while (size0 >= 128) { mov_rdata(*reinterpret_cast<spu_rdata_t*>(dst), *reinterpret_cast<const spu_rdata_t*>(src)); dst += 128; src += 128; size0 -= 128; } while (size0) { *reinterpret_cast<v128*>(dst) = *reinterpret_cast<const v128*>(src); dst += 16; src += 16; size0 -= 16; } } range_lock->release(0); range_addr = nexta; } vm::range_lock(range_lock, range_addr, range_end - range_addr); if (size > s_rep_movsb_threshold) { __movsb(dst, src, size); } else { // Avoid unaligned stores in mov_rdata_avx if (reinterpret_cast<u64>(dst) & 0x10) { *reinterpret_cast<v128*>(dst) = *reinterpret_cast<const v128*>(src); dst += 16; src += 16; size -= 16; } while (size >= 128) { mov_rdata(*reinterpret_cast<spu_rdata_t*>(dst), *reinterpret_cast<const spu_rdata_t*>(src)); dst += 128; src += 128; size -= 128; } while (size) { *reinterpret_cast<v128*>(dst) = *reinterpret_cast<const v128*>(src); dst += 16; src += 16; size -= 16; } } range_lock->release(0); break; } } if (g_cfg.core.mfc_debug && _this) { auto& dump = _this->mfc_history[_this->mfc_dump_idx++ % spu_thread::max_mfc_dump_idx]; dump.cmd = args; dump.cmd.eah = _this->pc; dump.block_hash = _this->block_hash; std::memcpy(dump.data, is_get ? dst : src, std::min<u32>(args.size, 128)); } return; } plain_access: switch (u32 size = args.size) { case 1: { *reinterpret_cast<u8*>(dst) = *reinterpret_cast<const u8*>(src); break; } case 2: { *reinterpret_cast<u16*>(dst) = *reinterpret_cast<const u16*>(src); break; } case 4: { *reinterpret_cast<u32*>(dst) = *reinterpret_cast<const u32*>(src); break; } case 8: { *reinterpret_cast<u64*>(dst) = *reinterpret_cast<const u64*>(src); break; } default: { if (size > s_rep_movsb_threshold) { __movsb(dst, src, size); } else { // Avoid unaligned stores in mov_rdata_avx if (reinterpret_cast<u64>(dst) & 0x10) { *reinterpret_cast<v128*>(dst) = *reinterpret_cast<const v128*>(src); dst += 16; src += 16; size -= 16; } while (size >= 128) { mov_rdata(*reinterpret_cast<spu_rdata_t*>(dst), *reinterpret_cast<const spu_rdata_t*>(src)); dst += 128; src += 128; size -= 128; } while (size) { *reinterpret_cast<v128*>(dst) = *reinterpret_cast<const v128*>(src); dst += 16; src += 16; size -= 16; } } break; } } if (g_cfg.core.mfc_debug && _this) { auto& dump = _this->mfc_history[_this->mfc_dump_idx++ % spu_thread::max_mfc_dump_idx]; dump.cmd = args; dump.cmd.eah = _this->pc; dump.block_hash = _this->block_hash; std::memcpy(dump.data, is_get ? dst : src, std::min<u32>(args.size, 128)); } } bool spu_thread::do_dma_check(const spu_mfc_cmd& args) { const u32 mask = utils::rol32(1, args.tag); if (mfc_barrier & mask || (args.cmd & (MFC_BARRIER_MASK | MFC_FENCE_MASK) && mfc_fence & mask)) [[unlikely]] { // Check for special value combination (normally impossible) if (false) { // Update barrier/fence masks if necessary mfc_barrier = 0; mfc_fence = 0; for (u32 i = 0; i < mfc_size; i++) { if ((mfc_queue[i].cmd & ~0xc) == MFC_BARRIER_CMD) { mfc_barrier |= -1; mfc_fence |= utils::rol32(1, mfc_queue[i].tag); continue; } if (true) { const u32 _mask = utils::rol32(1u, mfc_queue[i].tag); // A command with barrier hard blocks that tag until it's been dealt with if (mfc_queue[i].cmd & MFC_BARRIER_MASK) { mfc_barrier |= _mask; } // A new command that has a fence can't be executed until the stalled list has been dealt with mfc_fence |= _mask; } } if (mfc_barrier & mask || (args.cmd & MFC_FENCE_MASK && mfc_fence & mask)) { return false; } return true; } return false; } return true; } bool spu_thread::do_list_transfer(spu_mfc_cmd& args) { perf_meter<"MFC_LIST"_u64> perf0; // Amount of elements to fetch in one go constexpr u32 fetch_size = 6; struct alignas(8) list_element { u8 sb; // Stall-and-Notify bit (0x80) u8 pad; be_t<u16> ts; // List Transfer Size be_t<u32> ea; // External Address Low }; alignas(16) list_element items[fetch_size]; static_assert(sizeof(v128) % sizeof(list_element) == 0); spu_mfc_cmd transfer; transfer.eah = 0; transfer.tag = args.tag; transfer.cmd = MFC{static_cast<u8>(args.cmd & ~0xf)}; u32 index = fetch_size; auto item_ptr = _ptr<const list_element>(args.eal & 0x3fff8); u32 arg_lsa = args.lsa & 0x3fff0; u32 arg_size = args.size; u8 optimization_compatible = transfer.cmd & (MFC_GET_CMD | MFC_PUT_CMD); if (spu_log.trace || g_cfg.core.spu_accurate_dma || g_cfg.core.mfc_debug) { optimization_compatible = 0; } rsx::reservation_lock<false, 1> rsx_lock(0, 128, optimization_compatible == MFC_PUT_CMD && (g_cfg.video.strict_rendering_mode || (g_cfg.core.rsx_fifo_accuracy && !g_cfg.core.spu_accurate_dma))); constexpr u32 ts_mask = 0x7fff; // Assume called with size greater than 0 while (true) { // Check if fetching is needed if (index == fetch_size) { const v128 data0 = v128::loadu(item_ptr, 0); const v128 data1 = v128::loadu(item_ptr, 1); const v128 data2 = v128::loadu(item_ptr, 2); // In a perfect world this would not be needed until after the if but relying on the compiler to keep the elements in SSE registers through it all is unrealistic std::memcpy(&items[sizeof(v128) / sizeof(list_element) * 0], &data0, sizeof(v128)); std::memcpy(&items[sizeof(v128) / sizeof(list_element) * 1], &data1, sizeof(v128)); std::memcpy(&items[sizeof(v128) / sizeof(list_element) * 2], &data2, sizeof(v128)); u32 s_size = data0._u32[0]; // We need to verify matching between odd and even elements (vector test is position independent) // 0-5 is the most unlikely couple match for many reasons so it skips the entire check very efficiently in most cases // Assumes padding bits should match if (optimization_compatible == MFC_GET_CMD && s_size == data0._u32[2] && arg_size >= fetch_size * 8) { const v128 ored = (data0 | data1 | data2) & v128::from64p(std::bit_cast<be_t<u64>>(1ull << 63 | (u64{ts_mask} << 32) | 0xe000'0000)); const v128 anded = (data0 & data1 & data2) & v128::from64p(std::bit_cast<be_t<u64>>(0xe000'0000 | (u64{ts_mask} << 32))); // Tests: // 1. Unset stall-and-notify bit on all 6 elements // 2. Equality of transfer size across all 6 elements // 3. Be in the same 512mb region, this is because this case is not expected to be broken usually and we need to ensure MMIO is not involved in any of the transfers (assumes MMIO to be so rare that this is the last check) if (ored == anded && items[0].ea < RAW_SPU_BASE_ADDR && items[1].ea < RAW_SPU_BASE_ADDR) { // Execute the postponed byteswapping and masking s_size = std::bit_cast<be_t<u32>>(s_size) & ts_mask; u8* src = vm::_ptr<u8>(0); u8* dst = this->ls + arg_lsa; // Assume success, prepare the next elements arg_lsa += fetch_size * utils::align<u32>(s_size, 16); item_ptr += fetch_size; arg_size -= fetch_size * 8; // Type which is friendly for fused address calculations constexpr usz _128 = 128; // This whole function relies on many constraints to be met (crashes real MFC), we can a have minor optimization assuming EA alignment to be +16 with +16 byte transfers #define MOV_T(type, index, _ea) { const usz ea = _ea; *reinterpret_cast<type*>(dst + index * utils::align<u32>(sizeof(type), 16) + ea % (sizeof(type) < 16 ? 16 : 1)) = *reinterpret_cast<const type*>(src + ea); } void() #define MOV_128(index, ea) mov_rdata(*reinterpret_cast<decltype(rdata)*>(dst + index * _128), *reinterpret_cast<const decltype(rdata)*>(src + (ea))) switch (s_size) { case 0: { if (!arg_size) { return true; } continue; } case 1: { MOV_T(u8, 0, items[0].ea); MOV_T(u8, 1, items[1].ea); MOV_T(u8, 2, items[2].ea); MOV_T(u8, 3, items[3].ea); MOV_T(u8, 4, items[4].ea); MOV_T(u8, 5, items[5].ea); if (!arg_size) { return true; } continue; } case 2: { MOV_T(u16, 0, items[0].ea); MOV_T(u16, 1, items[1].ea); MOV_T(u16, 2, items[2].ea); MOV_T(u16, 3, items[3].ea); MOV_T(u16, 4, items[4].ea); MOV_T(u16, 5, items[5].ea); if (!arg_size) { return true; } continue; } case 4: { MOV_T(u32, 0, items[0].ea); MOV_T(u32, 1, items[1].ea); MOV_T(u32, 2, items[2].ea); MOV_T(u32, 3, items[3].ea); MOV_T(u32, 4, items[4].ea); MOV_T(u32, 5, items[5].ea); if (!arg_size) { return true; } continue; } case 8: { MOV_T(u64, 0, items[0].ea); MOV_T(u64, 1, items[1].ea); MOV_T(u64, 2, items[2].ea); MOV_T(u64, 3, items[3].ea); MOV_T(u64, 4, items[4].ea); MOV_T(u64, 5, items[5].ea); if (!arg_size) { return true; } continue; } case 16: { MOV_T(v128, 0, items[0].ea); MOV_T(v128, 1, items[1].ea); MOV_T(v128, 2, items[2].ea); MOV_T(v128, 3, items[3].ea); MOV_T(v128, 4, items[4].ea); MOV_T(v128, 5, items[5].ea); if (!arg_size) { return true; } continue; } case 32: { struct mem { v128 a[2]; }; MOV_T(mem, 0, items[0].ea); MOV_T(mem, 1, items[1].ea); MOV_T(mem, 2, items[2].ea); MOV_T(mem, 3, items[3].ea); MOV_T(mem, 4, items[4].ea); MOV_T(mem, 5, items[5].ea); if (!arg_size) { return true; } continue; } case 48: { struct mem { v128 a[3]; }; MOV_T(mem, 0, items[0].ea); MOV_T(mem, 1, items[1].ea); MOV_T(mem, 2, items[2].ea); MOV_T(mem, 3, items[3].ea); MOV_T(mem, 4, items[4].ea); MOV_T(mem, 5, items[5].ea); if (!arg_size) { return true; } continue; } case 64: { struct mem { v128 a[4]; }; // TODO: Optimize (4 16-bytes movings is bad) MOV_T(mem, 0, items[0].ea); MOV_T(mem, 1, items[1].ea); MOV_T(mem, 2, items[2].ea); MOV_T(mem, 3, items[3].ea); MOV_T(mem, 4, items[4].ea); MOV_T(mem, 5, items[5].ea); if (!arg_size) { return true; } continue; } case 128: { MOV_128(0, items[0].ea); MOV_128(1, items[1].ea); MOV_128(2, items[2].ea); MOV_128(3, items[3].ea); MOV_128(4, items[4].ea); MOV_128(5, items[5].ea); if (!arg_size) { return true; } continue; } case 256: { const usz ea0 = items[0].ea; MOV_128(0, ea0 + 0); MOV_128(1, ea0 + _128); const usz ea1 = items[1].ea; MOV_128(2, ea1 + 0); MOV_128(3, ea1 + _128); const usz ea2 = items[2].ea; MOV_128(4, ea2 + 0); MOV_128(5, ea2 + _128); const usz ea3 = items[3].ea; MOV_128(6, ea3 + 0); MOV_128(7, ea3 + _128); const usz ea4 = items[4].ea; MOV_128(8, ea4 + 0); MOV_128(9, ea4 + _128); const usz ea5 = items[5].ea; MOV_128(10, ea5 + 0); MOV_128(11, ea5 + _128); if (!arg_size) { return true; } continue; } case 512: { const usz ea0 = items[0].ea; MOV_128(0 , ea0 + _128 * 0); MOV_128(1 , ea0 + _128 * 1); MOV_128(2 , ea0 + _128 * 2); MOV_128(3 , ea0 + _128 * 3); const usz ea1 = items[1].ea; MOV_128(4 , ea1 + _128 * 0); MOV_128(5 , ea1 + _128 * 1); MOV_128(6 , ea1 + _128 * 2); MOV_128(7 , ea1 + _128 * 3); const usz ea2 = items[2].ea; MOV_128(8 , ea2 + _128 * 0); MOV_128(9 , ea2 + _128 * 1); MOV_128(10, ea2 + _128 * 2); MOV_128(11, ea2 + _128 * 3); const usz ea3 = items[3].ea; MOV_128(12, ea3 + _128 * 0); MOV_128(13, ea3 + _128 * 1); MOV_128(14, ea3 + _128 * 2); MOV_128(15, ea3 + _128 * 3); const usz ea4 = items[4].ea; MOV_128(16, ea4 + _128 * 0); MOV_128(17, ea4 + _128 * 1); MOV_128(18, ea4 + _128 * 2); MOV_128(19, ea4 + _128 * 3); const usz ea5 = items[5].ea; MOV_128(20, ea5 + _128 * 0); MOV_128(21, ea5 + _128 * 1); MOV_128(22, ea5 + _128 * 2); MOV_128(23, ea5 + _128 * 3); if (!arg_size) { return true; } continue; } default: { // TODO: Are more cases common enough? (in the range of less than 512 bytes because for more than that the optimization is doubtful) break; } } #undef MOV_T #undef MOV_128 // Optimization miss, revert changes arg_lsa -= fetch_size * utils::align<u32>(s_size, 16); item_ptr -= fetch_size; arg_size += fetch_size * 8; } } // Reset to elements array head index = 0; } const u32 size = items[index].ts & ts_mask; const u32 addr = items[index].ea; // Try to inline the transfer if (addr < RAW_SPU_BASE_ADDR && size && optimization_compatible == MFC_GET_CMD) { const u8* src = vm::_ptr<u8>(addr); u8* dst = this->ls + arg_lsa + (addr & 0xf); switch (u32 _size = size) { case 1: { *reinterpret_cast<u8*>(dst) = *reinterpret_cast<const u8*>(src); break; } case 2: { *reinterpret_cast<u16*>(dst) = *reinterpret_cast<const u16*>(src); break; } case 4: { *reinterpret_cast<u32*>(dst) = *reinterpret_cast<const u32*>(src); break; } case 8: { *reinterpret_cast<u64*>(dst) = *reinterpret_cast<const u64*>(src); break; } default: { if (_size > s_rep_movsb_threshold) { __movsb(dst, src, _size); } else { // Avoid unaligned stores in mov_rdata_avx if (reinterpret_cast<u64>(dst) & 0x10) { *reinterpret_cast<v128*>(dst) = *reinterpret_cast<const v128*>(src); dst += 16; src += 16; _size -= 16; } while (_size >= 128) { mov_rdata(*reinterpret_cast<spu_rdata_t*>(dst), *reinterpret_cast<const spu_rdata_t*>(src)); dst += 128; src += 128; _size -= 128; } while (_size) { *reinterpret_cast<v128*>(dst) = *reinterpret_cast<const v128*>(src); dst += 16; src += 16; _size -= 16; } } break; } } arg_lsa += utils::align<u32>(size, 16); } // Avoid inlining huge transfers because it intentionally drops range lock unlock else if (optimization_compatible == MFC_PUT_CMD && ((addr >> 28 == rsx::constants::local_mem_base >> 28) || (addr < RAW_SPU_BASE_ADDR && size - 1 <= 0x400 - 1 && (addr % 0x10000 + (size - 1)) < 0x10000))) { if (addr >> 28 != rsx::constants::local_mem_base >> 28) { rsx_lock.update_if_enabled(addr, size, range_lock); if (!g_use_rtm) { vm::range_lock(range_lock, addr & -128, utils::align<u32>(addr + size, 128) - (addr & -128)); } } else { range_lock->release(0); rsx_lock.unlock(); } u8* dst = vm::_ptr<u8>(addr); const u8* src = this->ls + arg_lsa + (addr & 0xf); switch (u32 _size = size) { case 1: { *reinterpret_cast<u8*>(dst) = *reinterpret_cast<const u8*>(src); break; } case 2: { *reinterpret_cast<u16*>(dst) = *reinterpret_cast<const u16*>(src); break; } case 4: { *reinterpret_cast<u32*>(dst) = *reinterpret_cast<const u32*>(src); break; } case 8: { *reinterpret_cast<u64*>(dst) = *reinterpret_cast<const u64*>(src); break; } default: { if (_size > s_rep_movsb_threshold) { __movsb(dst, src, _size); } else { // Avoid unaligned stores in mov_rdata_avx if (reinterpret_cast<u64>(dst) & 0x10) { *reinterpret_cast<v128*>(dst) = *reinterpret_cast<const v128*>(src); dst += 16; src += 16; _size -= 16; } while (_size >= 128) { mov_rdata(*reinterpret_cast<spu_rdata_t*>(dst), *reinterpret_cast<const spu_rdata_t*>(src)); dst += 128; src += 128; _size -= 128; } while (_size) { *reinterpret_cast<v128*>(dst) = *reinterpret_cast<const v128*>(src); dst += 16; src += 16; _size -= 16; } } break; } } arg_lsa += utils::align<u32>(size, 16); } else if (size) { range_lock->release(0); rsx_lock.unlock(); spu_log.trace("LIST: item=0x%016x, lsa=0x%05x", std::bit_cast<be_t<u64>>(items[index]), arg_lsa | (addr & 0xf)); transfer.eal = addr; transfer.lsa = arg_lsa | (addr & 0xf); transfer.size = size; arg_lsa += utils::align<u32>(size, 16); do_dma_transfer(this, transfer, ls); } arg_size -= 8; if (!arg_size) { // No more elements break; } item_ptr++; if (items[index].sb & 0x80) [[unlikely]] { range_lock->release(0); ch_stall_mask |= utils::rol32(1, args.tag); if (!ch_stall_stat.get_count()) { set_events(SPU_EVENT_SN); } ch_stall_stat.set_value(utils::rol32(1, args.tag) | ch_stall_stat.get_value()); args.tag |= 0x80; // Set stalled status args.eal = ::narrow<u32>(reinterpret_cast<const u8*>(item_ptr) - this->ls); args.lsa = arg_lsa; args.size = arg_size; return false; } index++; } range_lock->release(0); return true; } bool spu_thread::do_putllc(const spu_mfc_cmd& args) { perf_meter<"PUTLLC-"_u64> perf0; perf_meter<"PUTLLC+"_u64> perf1 = perf0; // Store conditionally const u32 addr = args.eal & -128; if ([&]() { perf_meter<"PUTLLC."_u64> perf2 = perf0; if (raddr != addr) { return false; } const auto& to_write = _ref<spu_rdata_t>(args.lsa & 0x3ff80); auto& res = vm::reservation_acquire(addr); // TODO: Limit scope!! rsx::reservation_lock rsx_lock(addr, 128); if (rtime != res) { if (!g_cfg.core.spu_accurate_reservations && cmp_rdata(to_write, rdata)) { raddr = 0; return true; } return false; } if (cmp_rdata(to_write, rdata)) { if (!g_cfg.core.spu_accurate_reservations) { raddr = 0; return true; } // Writeback of unchanged data. Only check memory change if (cmp_rdata(rdata, vm::_ref<spu_rdata_t>(addr)) && res.compare_and_swap_test(rtime, rtime + 128)) { raddr = 0; // Disable notification return true; } return false; } auto [_oldd, _ok] = res.fetch_op([&](u64& r) { if ((r & -128) != rtime || (r & 127)) { return false; } r += vm::rsrv_unique_lock; return true; }); if (!_ok) { // Already locked or updated: give up return false; } if (!g_cfg.core.spu_accurate_reservations) { if (addr - spurs_addr <= 0x80) { mov_rdata(vm::_ref<spu_rdata_t>(addr), to_write); res += 64; return true; } } else if (!g_use_rtm) { vm::_ref<atomic_t<u32>>(addr) += 0; } if (g_use_rtm) [[likely]] { switch (u64 count = spu_putllc_tx(addr, rtime, rdata, to_write)) { case umax: { auto& data = *vm::get_super_ptr<spu_rdata_t>(addr); const bool ok = cpu_thread::suspend_all<+3>(this, {data, data + 64, &res}, [&]() { if ((res & -128) == rtime) { if (cmp_rdata(rdata, data)) { mov_rdata(data, to_write); res += 64; return true; } } // Save previous data mov_rdata_nt(rdata, data); res -= 64; return false; }); const u64 count2 = utils::get_tsc() - perf2.get(); if (count2 > 20000 && g_cfg.core.perf_report) [[unlikely]] { perf_log.warning(u8"PUTLLC: took too long: %.3fµs (%u c) (addr=0x%x) (S)", count2 / (utils::get_tsc_freq() / 1000'000.), count2, addr); } if (ok) { break; } last_ftime = -1; [[fallthrough]]; } case 0: { if (addr == last_faddr) { last_fail++; } if (last_ftime != umax) { last_faddr = 0; return false; } utils::prefetch_read(rdata); utils::prefetch_read(rdata + 64); last_faddr = addr; last_ftime = res.load() & -128; last_ftsc = utils::get_tsc(); return false; } default: { if (count > 20000 && g_cfg.core.perf_report) [[unlikely]] { perf_log.warning(u8"PUTLLC: took too long: %.3fµs (%u c) (addr = 0x%x)", count / (utils::get_tsc_freq() / 1000'000.), count, addr); } break; } } if (addr == last_faddr) { last_succ++; } last_faddr = 0; return true; } auto& super_data = *vm::get_super_ptr<spu_rdata_t>(addr); const bool success = [&]() { // Full lock (heavyweight) // TODO: vm::check_addr vm::writer_lock lock(addr, range_lock); if (cmp_rdata(rdata, super_data)) { mov_rdata(super_data, to_write); return true; } return false; }(); res += success ? 64 : 0 - 64; return success; }()) { if (raddr) { if (raddr != spurs_addr || pc != 0x11e4) { vm::reservation_notifier_notify(addr); } else { const u32 thread_bit_mask = (1u << index); constexpr usz SPU_IDLE = 0x73; const bool switched_from_running_to_idle = (static_cast<u8>(rdata[SPU_IDLE]) & thread_bit_mask) == 0 && (_ref<u8>(0x100 + SPU_IDLE) & thread_bit_mask) != 0; if (switched_from_running_to_idle) { vm::reservation_notifier_notify(addr); } } raddr = 0; } perf0.reset(); return true; } else { if (raddr) { // Last check for event before we clear the reservation if (~ch_events.load().events & SPU_EVENT_LR) { if (raddr == addr) { set_events(SPU_EVENT_LR); } else { get_events(SPU_EVENT_LR); } } } if (!vm::check_addr(addr, vm::page_writable)) { utils::trigger_write_page_fault(vm::base(addr)); } raddr = 0; perf1.reset(); return false; } } void do_cell_atomic_128_store(u32 addr, const void* to_write) { perf_meter<"STORE128"_u64> perf0; const auto cpu = get_current_cpu_thread(); rsx::reservation_lock rsx_lock(addr, 128); u64 shared_mem = vm::g_shmem[addr >> 16]; if (!shared_mem) { shared_mem = addr; } { auto& sdata = *vm::get_super_ptr<spu_rdata_t>(addr); auto& res = *utils::bless<atomic_t<u128>>(vm::g_reservations + (addr & 0xff80) / 2); for (u64 j = 0;; j++) { auto [_oldd, _ok] = res.fetch_op([&](u128& r) { if (r & 127) { return false; } r &= static_cast<u64>(r); r |= u128{shared_mem} << 64; r |= u128{vm::rsrv_unique_lock | vm::rsrv_putunc_flag}; return true; }); if (_ok) { break; } if (static_cast<u64>(_oldd) & vm::rsrv_putunc_flag && static_cast<u64>(_oldd >> 64) == shared_mem) { // Abandon store for (u64 k = 0;; k++) { if (res ^ _oldd) { break; } if (auto cpu = get_current_cpu_thread(); cpu && cpu->state) { cpu->check_state(); } else if (k < 15) { busy_wait(500); } else { std::this_thread::yield(); } } return static_cast<void>(cpu->test_stopped()); } if (auto cpu = get_current_cpu_thread(); cpu && cpu->state) { cpu->check_state(); } else if (j < 15) { busy_wait(500); } else { std::this_thread::yield(); } } u64 result = 1; if (!g_cfg.core.spu_accurate_reservations) { mov_rdata(sdata, *static_cast<const spu_rdata_t*>(to_write)); vm::reservation_acquire(addr) += 32; } else if (cpu->state & cpu_flag::pause) { result = 0; } else if (!g_use_rtm) { // Provoke page fault utils::trigger_write_page_fault(vm::base(addr)); // Hard lock auto spu = cpu ? cpu->try_get<spu_thread>() : nullptr; vm::writer_lock lock(addr, spu ? spu->range_lock : nullptr); mov_rdata(sdata, *static_cast<const spu_rdata_t*>(to_write)); vm::reservation_acquire(addr) += 32; } else if (cpu->get_class() != thread_class::spu) { u64 stx, ftx; result = spu_putlluc_tx(addr, to_write, &stx, &ftx); } else { auto _spu = static_cast<spu_thread*>(cpu); result = spu_putlluc_tx(addr, to_write, &_spu->stx, &_spu->ftx); } if (result == 0) { cpu_thread::suspend_all<+2>(cpu, {}, [&] { mov_rdata(sdata, *static_cast<const spu_rdata_t*>(to_write)); }); vm::reservation_acquire(addr) += 32; result = utils::get_tsc() - perf0.get(); } if (result > 20000 && g_cfg.core.perf_report) [[unlikely]] { perf_log.warning(u8"STORE128: took too long: %.3fµs (%u c) (addr=0x%x)", result / (utils::get_tsc_freq() / 1000'000.), result, addr); } static_cast<void>(cpu->test_stopped()); } } void spu_thread::do_putlluc(const spu_mfc_cmd& args) { perf_meter<"PUTLLUC"_u64> perf0; const u32 addr = args.eal & -128; if (raddr && addr == raddr && g_cfg.core.spu_accurate_reservations) { // Try to process PUTLLUC using PUTLLC when a reservation is active: // If it fails the reservation is cleared, LR event is set and we fallback to the main implementation // All of this is done atomically in PUTLLC if (!(ch_events.load().events & SPU_EVENT_LR) && do_putllc(args)) { // Success, return as our job was done here return; } // Failure, fallback to the main implementation raddr = 0; } do_cell_atomic_128_store(addr, _ptr<spu_rdata_t>(args.lsa & 0x3ff80)); vm::reservation_notifier_notify(addr); } bool spu_thread::do_mfc(bool can_escape, bool must_finish) { u32 removed = 0; u32 barrier = 0; u32 fence = 0; u16 exec_mask = 0; bool pending = false; auto process_command = [&](spu_mfc_cmd& args) { // Select tag bit in the tag mask or the stall mask const u32 mask = utils::rol32(1, args.tag); if ((args.cmd & ~0xc) == MFC_BARRIER_CMD) { if (&args - mfc_queue <= removed) { // Remove barrier-class command if it's the first in the queue atomic_fence_seq_cst(); removed++; return true; } // Block all tags barrier |= -1; fence |= mask; return false; } if (barrier & mask) { fence |= mask; return false; } if (args.cmd & (MFC_BARRIER_MASK | MFC_FENCE_MASK) && fence & mask) { if (args.cmd & MFC_BARRIER_MASK) { barrier |= mask; } return false; } // If command is not enabled in execution mask, execute it later if (!(exec_mask & (1u << (&args - mfc_queue)))) { if (args.cmd & MFC_BARRIER_MASK) { barrier |= mask; } // Fence is set for any command fence |= mask; pending = true; return false; } if (args.cmd & MFC_LIST_MASK) { if (!(args.tag & 0x80)) { if (do_list_transfer(args)) { removed++; return true; } } if (args.cmd & MFC_BARRIER_MASK) { barrier |= mask; } fence |= mask; return false; } if (args.cmd == MFC_PUTQLLUC_CMD) { if (fence & mask) { return false; } do_putlluc(args); } else if (args.size) { do_dma_transfer(this, args, ls); } removed++; return true; }; auto get_exec_mask = [&size = mfc_size] { // Get commands' execution mask // Mask bits are always set when mfc_transfers_shuffling is 0 return static_cast<u16>((0 - (1u << std::min<u32>(g_cfg.core.mfc_transfers_shuffling, size))) | utils::get_tsc()); }; // Process enqueued commands while (true) { removed = 0; barrier = 0; fence = 0; // Shuffle commands execution (if enabled), explicit barriers are obeyed pending = false; exec_mask = get_exec_mask(); static_cast<void>(std::remove_if(mfc_queue + 0, mfc_queue + mfc_size, process_command)); mfc_size -= removed; mfc_barrier = barrier; mfc_fence = fence; if (removed && ch_tag_upd) { const u32 completed = get_mfc_completed(); if (completed && ch_tag_upd == MFC_TAG_UPDATE_ANY) { ch_tag_stat.set_value(completed); ch_tag_upd = MFC_TAG_UPDATE_IMMEDIATE; } else if (completed == ch_tag_mask && ch_tag_upd == MFC_TAG_UPDATE_ALL) { ch_tag_stat.set_value(completed); ch_tag_upd = MFC_TAG_UPDATE_IMMEDIATE; } } if (can_escape && check_mfc_interrupts(pc + 4)) { spu_runtime::g_escape(this); } if (!pending) { break; } if (!must_finish && g_cfg.core.mfc_shuffling_in_steps) { // Exit early, not all pending commands have to be executed at a single iteration // Update last timestamp so the next MFC timeout check will use the current time mfc_last_timestamp = get_system_time(); return true; } } return false; } bool spu_thread::check_mfc_interrupts(u32 next_pc) { if (ch_events.load().count && std::exchange(interrupts_enabled, false)) { srr0 = next_pc; // Test for BR/BRA instructions (they are equivalent at zero pc) const u32 br = _ref<u32>(0); pc = (br & 0xfd80007f) == 0x30000000 ? (br >> 5) & 0x3fffc : 0; return true; } return false; } bool spu_thread::is_exec_code(u32 addr, std::span<const u8> ls_ptr, u32 base_addr, bool avoid_dead_code) { bool had_conditional = false; for (u32 i = 0; i < 40; i++) { if (addr & ~0x3FFFC) { return false; } if (addr < base_addr || addr >= base_addr + ls_ptr.size()) { return false; } const u32 addr0 = spu_branch_target(addr); const spu_opcode_t op{read_from_ptr<be_t<u32>>(ls_ptr, addr0 - base_addr)}; const auto type = s_spu_itype.decode(op.opcode); if (type == spu_itype::UNK || !op.opcode) { return false; } if (type == spu_itype::STOP) { if (op.rb) { return false; } if (avoid_dead_code) { switch (op.opcode) { case SYS_SPU_THREAD_STOP_YIELD: case SYS_SPU_THREAD_STOP_GROUP_EXIT: case SYS_SPU_THREAD_STOP_THREAD_EXIT: case SYS_SPU_THREAD_STOP_RECEIVE_EVENT: case SYS_SPU_THREAD_STOP_TRY_RECEIVE_EVENT: case SYS_SPU_THREAD_STOP_SWITCH_SYSTEM_MODULE: { break; } default: { return false; } } } } if (type == spu_itype::STOPD && !had_conditional) { return !avoid_dead_code; } if (i != 0 && type == spu_itype::STOPD) { return true; } if (type & spu_itype::branch) { if (type == spu_itype::BR && op.rt && op.rt != 127u) { return false; } auto results = op_branch_targets(addr, op); if (results[0] == umax) { switch (type) { case spu_itype::BIZ: case spu_itype::BINZ: case spu_itype::BIHZ: case spu_itype::BIHNZ: { results[0] = addr + 4; break; } default: { break; } } if (results[0] == umax) { break; } } else { switch (type) { case spu_itype::BR: case spu_itype::BRNZ: case spu_itype::BRZ: case spu_itype::BRHNZ: case spu_itype::BRHZ: case spu_itype::BRSL: { const s32 rel = bf_t<s32, 0, 18>::extract(static_cast<s32>(u32{op.i16} << 2)); if (rel == 0 && !had_conditional && avoid_dead_code) { // Infinite loop 100%, detect that as invalid code return false; } // Detect "invalid" relative branches // Branch offsets that, although are the only way to get X code address using relative address // Rely on overflow/underflow of SPU memory bounds // Thus they would behave differently if SPU LS memory size was to increase (evolving the CELL architecture was the original plan) // Making them highly unlikely to be valid code if (rel < 0) { if (addr < 0u - rel) { return false; } } else if (SPU_LS_SIZE - addr <= rel + 0u) { return false; } if (type == spu_itype::BRSL) { // Insert a virtual return-to-next, because it is usually a call results[1] = addr + 4; std::swap(results[1], results[0]); } break; } default: { break; } } } for (usz res_i = 1; res_i < results.size(); res_i++) { const u32 route_pc = results[res_i]; if (route_pc >= SPU_LS_SIZE) { continue; } if (route_pc < base_addr || route_pc >= base_addr + ls_ptr.size()) { return false; } // Test the validity of a single instruction of the optional target // This function can't be too slow and is unlikely to improve results by a great deal const u32 op0 = read_from_ptr<be_t<u32>>(ls_ptr, route_pc - base_addr); const spu_itype::type type0 = s_spu_itype.decode(op0); if (type0 == spu_itype::UNK || !op0) { return false; } had_conditional = true; } addr = spu_branch_target(results[0]); continue; } addr += 4; } return true; } u32 spu_thread::get_mfc_completed() const { return ch_tag_mask & ~mfc_fence; } bool spu_thread::process_mfc_cmd() { // Stall infinitely if MFC queue is full while (mfc_size >= 16) [[unlikely]] { // Reset MFC timestamp in the case of full queue mfc_last_timestamp = 0; if (test_stopped()) { return false; } // Process MFC commands do_mfc(); if (mfc_size < 16) { break; } auto old = state.add_fetch(cpu_flag::wait); if (is_stopped(old)) { return false; } thread_ctrl::wait_on(state, old); } spu::scheduler::concurrent_execution_watchdog watchdog(*this); spu_log.trace("DMAC: (%s)", ch_mfc_cmd); switch (ch_mfc_cmd.cmd) { case MFC_SDCRT_CMD: case MFC_SDCRTST_CMD: return true; case MFC_GETLLAR_CMD: { perf_meter<"GETLLAR"_u64> perf0; const u32 addr = ch_mfc_cmd.eal & -128; const auto& data = vm::_ref<spu_rdata_t>(addr); if (addr == last_faddr) { // TODO: make this configurable and possible to disable spu_log.trace(u8"GETLLAR after fail: addr=0x%x, time=%u c", last_faddr, (perf0.get() - last_ftsc)); } if (addr == last_faddr && perf0.get() - last_ftsc < 1000 && (vm::reservation_acquire(addr) & -128) == last_ftime) { rtime = last_ftime; raddr = last_faddr; last_ftime = 0; mov_rdata(_ref<spu_rdata_t>(ch_mfc_cmd.lsa & 0x3ff80), rdata); ch_atomic_stat.set_value(MFC_GETLLAR_SUCCESS); return true; } else { // Silent failure last_faddr = 0; } if (raddr) { if (raddr != addr) { // Last check for event before we replace the reservation with a new one if (reservation_check(raddr, rdata)) { set_events(SPU_EVENT_LR); } } else { // Check if we can reuse our existing reservation auto& res = vm::reservation_acquire(addr); const u64 this_time = res; if (this_time % 128 == 0 && cmp_rdata(rdata, data)) { mov_rdata(_ref<spu_rdata_t>(ch_mfc_cmd.lsa & 0x3ff80), rdata); ch_atomic_stat.set_value(MFC_GETLLAR_SUCCESS); // Need to check twice for it to be accurate, the code is before and not after this check for: // 1. Reduce time between reservation accesses so TSX panelty would be lowered // 2. Increase the chance of change detection: if GETLLAR has been called again new data is probably wanted if (this_time == res && cmp_rdata(rdata, data)) { if (this_time != rtime) { // Reservation was lost but the data itself remains unchanged so try to ignore it set_events(SPU_EVENT_LR); rtime = this_time; } if ([&]() -> bool { // Validation that it is indeed GETLLAR spinning (large time window is intentional) if (last_getllar_addr != addr || last_getllar_gpr1 != gpr[1]._u32[3] || perf0.get() - last_gtsc >= 5'000 || (interrupts_enabled && ch_events.load().mask)) { // Seemingly not getllar_busy_waiting_switch = umax; getllar_spin_count = 0; return true; } getllar_spin_count = std::min<u32>(getllar_spin_count + 1, u16{umax}); static atomic_t<usz> g_ok = 0, g_fail = 0; if (getllar_busy_waiting_switch == umax && getllar_spin_count == 4) { const u32 percent = g_cfg.core.spu_getllar_busy_waiting_percentage; // Hidden value to force busy waiting (100 to 1 are dynamically adjusted, 0 is not) if (percent != 101) { // Predict whether or not to use operating system sleep based on history auto& stats = getllar_wait_time[(addr % SPU_LS_SIZE) / 128]; const auto old_stats = stats; std::array<u8, 4> new_stats{}; // Rotate history (prepare newest entry) new_stats[0] = 0; new_stats[1] = old_stats[0]; new_stats[2] = old_stats[1]; new_stats[3] = old_stats[2]; stats = new_stats; u32 total_wait = 0; u32 zero_count = 0; // Try to ignore major inconsistencies for (u8 val : old_stats) { total_wait += val; zero_count += (val == 0 ? 1 : 0); } // Add to chance if previous wait was long enough const u32 add_count = zero_count == 3 && total_wait >= 40 ? (total_wait - 39) * 40 : zero_count == 2 && total_wait >= 11 ? (total_wait - 10) * 40 : zero_count == 1 && total_wait >= 8 ? (total_wait - 7) * 40 : zero_count == 0 && total_wait >= 6 ? (total_wait - 5) * 40 : 0; // Evalute its value (shift-right to ensure its randomness with different CPUs) getllar_busy_waiting_switch = ((perf0.get() >> 8) % 100 + add_count < percent) ? 1 : 0; getllar_evaluate_time = perf0.get(); if (getllar_busy_waiting_switch) { g_fail++; } else { g_ok++; } if ((g_ok + g_fail) % 200 == 0 && !getllar_busy_waiting_switch) spu_log.trace("SPU wait: count=%d. switch=%d, spin=%d, fail=%d, ok=%d, {%d, %d, %d, %d}", total_wait, getllar_busy_waiting_switch, getllar_spin_count, +g_fail, +g_ok, old_stats[0], old_stats[1], old_stats[2], old_stats[3] ); } else { getllar_busy_waiting_switch = 1; } } // Don't be stubborn, force operating sleep if too much time has passed else if (getllar_busy_waiting_switch == 1 && perf0.get() > getllar_evaluate_time && perf0.get() - getllar_evaluate_time >= 400'000) { const u32 percent = g_cfg.core.spu_getllar_busy_waiting_percentage; // Hidden value to force busy waiting if (percent != 101) { spu_log.trace("SPU wait for 0x%x", addr); getllar_wait_time[(addr % SPU_LS_SIZE) / 128].front() = 1; getllar_busy_waiting_switch = 0; } } // Either 1 or umax return getllar_busy_waiting_switch != 0; }()) { if (g_cfg.core.mfc_debug) { auto& dump = mfc_history[mfc_dump_idx++ % spu_thread::max_mfc_dump_idx]; dump.cmd = ch_mfc_cmd; dump.cmd.eah = pc; dump.block_hash = block_hash; std::memcpy(dump.data, rdata, 128); } last_getllar = pc; last_getllar_gpr1 = gpr[1]._u32[3]; if (getllar_busy_waiting_switch == 1) { getllar_wait_time[(addr % SPU_LS_SIZE) / 128].front() = 0; #if defined(ARCH_X64) if (utils::has_um_wait()) { if (utils::has_waitpkg()) { __tpause(std::min<u32>(getllar_spin_count, 10) * 500, 0x1); } else { struct check_wait_t { static FORCE_INLINE bool needs_wait(u64 rtime, const atomic_t<u64>& mem_rtime) noexcept { return rtime == mem_rtime; } }; // Provide the first X64 cache line of the reservation to be tracked __mwaitx<check_wait_t>(std::min<u32>(getllar_spin_count, 17) * 500, 0xf0, std::addressof(data), +rtime, vm::reservation_acquire(addr)); } } else #endif { busy_wait(300); } // Reset perf perf0.restart(); } last_gtsc = perf0.get(); return true; } // Spinning, might as well yield cpu resources state += cpu_flag::wait; if (auto wait_var = vm::reservation_notifier_begin_wait(addr, rtime)) { utils::bless<atomic_t<u32>>(&wait_var->raw().wait_flag)->wait(1, atomic_wait_timeout{100'000}); vm::reservation_notifier_end_wait(*wait_var); } static_cast<void>(test_stopped()); // Quick check if there were reservation changes const u64 new_time = res; if (new_time % 128 == 0 && cmp_rdata(rdata, data) && res == new_time && cmp_rdata(rdata, data)) { if (g_cfg.core.mfc_debug) { auto& dump = mfc_history[mfc_dump_idx++ % spu_thread::max_mfc_dump_idx]; dump.cmd = ch_mfc_cmd; dump.cmd.eah = pc; dump.block_hash = block_hash; std::memcpy(dump.data, rdata, 128); } if (new_time != rtime) { // Reservation was lost but the data itself remains unchanged so try to ignore it set_events(SPU_EVENT_LR); rtime = new_time; } u8& val = getllar_wait_time[(addr % SPU_LS_SIZE) / 128].front(); val = static_cast<u8>(std::min<u32>(val + 1, u8{umax})); // Reset perf perf0.restart(); last_gtsc = perf0.get(); return true; } static atomic_t<u32> g_changed, g_unchanged; if (new_time == this_time && res == this_time) { spu_log.trace("RTIME unchanged on address 0x%x", addr); g_unchanged++; // Notify threads manually, memory data has likely changed and broke the reservation for others if (vm::reservation_notifier_count(addr) && res == new_time) { vm::reservation_notifier_notify(addr); } } else { g_changed++; } if ((g_changed + g_unchanged) % 200 == 0) { spu_log.trace("SPU GETLLAR wait on RTIME stats: unchanged=%d, changed=%d", +g_unchanged, +g_changed); } } } if (this_time == rtime) { // Notify threads manually, memory data has likely changed and broke the reservation for others if (vm::reservation_notifier_count(addr) && res == this_time) { vm::reservation_notifier_notify(addr); } } // We can't, LR needs to be set now set_events(SPU_EVENT_LR); static_cast<void>(test_stopped()); // Reset perf perf0.restart(); } last_getllar = pc; last_gtsc = perf0.get(); } last_getllar_addr = addr; getllar_spin_count = 0; getllar_busy_waiting_switch = umax; u64 ntime = 0; rsx::reservation_lock rsx_lock(addr, 128); for (u64 i = 0; i != umax; [&]() { if (state & cpu_flag::pause) { auto& sdata = *vm::get_super_ptr<spu_rdata_t>(addr); const bool ok = cpu_thread::if_suspended<0>(this, {&ntime}, [&] { // Guaranteed success ntime = vm::reservation_acquire(addr); mov_rdata_nt(rdata, sdata); }); // Exit loop if (ok && (ntime & 127) == 0) { atomic_fence_seq_cst(); i = -1; return; } } if (i < 24) [[likely]] { i++; busy_wait(300); } else { state += cpu_flag::wait + cpu_flag::temp; std::this_thread::yield(); static_cast<void>(check_state()); } }()) { ntime = vm::reservation_acquire(addr); if (ntime & vm::rsrv_unique_lock) { // There's an on-going reservation store, wait continue; } u64 test_mask = -1; if (ntime & 127) { // Try to use TSX to obtain data atomically if (!g_use_rtm || !spu_getllar_tx(addr, rdata, this, ntime & -128)) { // See previous ntime check. continue; } } else { mov_rdata(rdata, data); } if (u64 time0 = vm::reservation_acquire(addr); (ntime & test_mask) != (time0 & test_mask)) { // Reservation data has been modified recently if (time0 & vm::rsrv_unique_lock) i += 12; continue; } if (!cmp_rdata(rdata, data)) { i += 2; continue; } if (i >= 15 && g_cfg.core.perf_report) [[unlikely]] { perf_log.warning("GETLLAR: took too long: %u", i); } break; } raddr = addr; rtime = ntime; mov_rdata(_ref<spu_rdata_t>(ch_mfc_cmd.lsa & 0x3ff80), rdata); ch_atomic_stat.set_value(MFC_GETLLAR_SUCCESS); if (g_cfg.core.mfc_debug) { auto& dump = mfc_history[mfc_dump_idx++ % spu_thread::max_mfc_dump_idx]; dump.cmd = ch_mfc_cmd; dump.cmd.eah = pc; dump.block_hash = block_hash; std::memcpy(dump.data, rdata, 128); } return true; } case MFC_PUTLLC_CMD: { // Avoid logging useless commands if there is no reservation const bool dump = g_cfg.core.mfc_debug && raddr; const bool is_spurs_task_wait = pc == 0x11e4 && spurs_addr != 0u - 0x80u; if (!is_spurs_task_wait || spurs_addr != raddr || spurs_waited) { // } else if ((_ref<u8>(0x100 + 0x73) & (1u << index)) == 0 && (static_cast<u8>(rdata[0x73]) & (1u << index)) != 0) { // Wait for other threads to complete their tasks (temporarily) u32 max_run = group->max_run; auto [prev_running, ok] = spurs_entered_wait ? std::make_pair(+group->spurs_running, false) : group->spurs_running.fetch_op([max_run, num = group->max_num](u32& x) { if (x >= max_run && max_run < num) { x--; return true; } return false; }); if (ok || spurs_entered_wait) { lv2_obj::prepare_for_sleep(*this); if (ok) { if (prev_running == max_run) { group->spurs_running.notify_one(); if (group->spurs_running == max_run - 1) { // Try to let another thread slip in and take over execution thread_ctrl::wait_for(300); // Update value prev_running = group->spurs_running + 1; } } // Restore state prev_running--; } const u64 before = get_system_time(); u64 current = before; spurs_waited = true; spurs_entered_wait = true; // Wait the duration of one and a half tasks const u64 spurs_wait_time = std::clamp<u64>(spurs_average_task_duration / spurs_task_count_to_calculate * 3 / 2, 10'000, 100'000); spurs_wait_duration_last = spurs_wait_time; if (spurs_last_task_timestamp) { const u64 avg_entry = spurs_average_task_duration / spurs_task_count_to_calculate; spurs_average_task_duration -= avg_entry; spurs_average_task_duration += std::min<u64>(45'000, current - spurs_last_task_timestamp); spu_log.trace("duration: %d, avg=%d", current - spurs_last_task_timestamp, spurs_average_task_duration / spurs_task_count_to_calculate); spurs_last_task_timestamp = 0; } while (true) { if (is_stopped() || current - before >= spurs_wait_time) { // Timed-out group->spurs_running++; break; } if (prev_running >= max_run) { thread_ctrl::wait_on(group->spurs_running, prev_running, spurs_wait_time - (current - before)); } max_run = group->max_run; prev_running = group->spurs_running.fetch_op([max_run](u32& x) { if (x < max_run) { x++; return true; } return false; }).first; if (prev_running < max_run) { break; } current = get_system_time(); } state += cpu_flag::temp; static_cast<void>(test_stopped()); } } if (do_putllc(ch_mfc_cmd)) { ch_atomic_stat.set_value(MFC_PUTLLC_SUCCESS); if (is_spurs_task_wait) { const bool is_idle = (_ref<u8>(0x100 + 0x73) & (1u << index)) != 0; const bool was_idle = (static_cast<u8>(rdata[0x73]) & (1u << index)) != 0; if (!was_idle && is_idle) { const u32 prev_running = group->spurs_running.fetch_op([](u32& x) { if (x) { x--; return true; } return false; }).first; if (prev_running) { spurs_entered_wait = true; } if (prev_running == group->max_run && prev_running < group->max_num) { group->spurs_running.notify_one(); } } else if (was_idle && !is_idle) { // Cleanup const u64 current = get_system_time(); if (spurs_last_task_timestamp) { const u64 avg_entry = spurs_average_task_duration / spurs_task_count_to_calculate; spurs_average_task_duration -= avg_entry; spurs_average_task_duration += std::min<u64>(45'000, current - spurs_last_task_timestamp); spu_log.trace("duration: %d, avg=%d", current - spurs_last_task_timestamp, spurs_average_task_duration / spurs_task_count_to_calculate); spurs_last_task_timestamp = 0; } spurs_last_task_timestamp = current; spurs_waited = false; spurs_entered_wait = false; } } } else { ch_atomic_stat.set_value(MFC_PUTLLC_FAILURE); } if (dump) { auto& dump = mfc_history[mfc_dump_idx++ % max_mfc_dump_idx]; dump.cmd = ch_mfc_cmd; dump.cmd.eah = pc; dump.cmd.tag = static_cast<u32>(ch_atomic_stat.get_value()); // Use tag as atomic status dump.block_hash = block_hash; std::memcpy(dump.data, _ptr<u8>(ch_mfc_cmd.lsa & 0x3ff80), 128); } static_cast<void>(test_stopped()); return true; } case MFC_PUTLLUC_CMD: { if (g_cfg.core.mfc_debug) { auto& dump = mfc_history[mfc_dump_idx++ % max_mfc_dump_idx]; dump.cmd = ch_mfc_cmd; dump.cmd.eah = pc; dump.block_hash = block_hash; std::memcpy(dump.data, _ptr<u8>(ch_mfc_cmd.lsa & 0x3ff80), 128); } do_putlluc(ch_mfc_cmd); ch_atomic_stat.set_value(MFC_PUTLLUC_SUCCESS); static_cast<void>(test_stopped()); return true; } case MFC_PUTQLLUC_CMD: { if (g_cfg.core.mfc_debug) { auto& dump = mfc_history[mfc_dump_idx++ % max_mfc_dump_idx]; dump.cmd = ch_mfc_cmd; dump.cmd.eah = pc; dump.block_hash = block_hash; std::memcpy(dump.data, _ptr<u8>(ch_mfc_cmd.lsa & 0x3ff80), 128); } const u32 mask = utils::rol32(1, ch_mfc_cmd.tag); if ((mfc_barrier | mfc_fence) & mask) [[unlikely]] { mfc_queue[mfc_size++] = ch_mfc_cmd; mfc_fence |= mask; } else { do_putlluc(ch_mfc_cmd); } return true; } case MFC_SNDSIG_CMD: case MFC_SNDSIGB_CMD: case MFC_SNDSIGF_CMD: { if (ch_mfc_cmd.size != 4) { break; } [[fallthrough]]; } case MFC_PUT_CMD: case MFC_PUTB_CMD: case MFC_PUTF_CMD: case MFC_PUTR_CMD: case MFC_PUTRB_CMD: case MFC_PUTRF_CMD: case MFC_GET_CMD: case MFC_GETB_CMD: case MFC_GETF_CMD: case MFC_SDCRZ_CMD: { if (ch_mfc_cmd.size <= 0x4000) [[likely]] { if (do_dma_check(ch_mfc_cmd)) [[likely]] { if (!g_cfg.core.mfc_transfers_shuffling) { if (ch_mfc_cmd.size) { do_dma_transfer(this, ch_mfc_cmd, ls); } return true; } if (!state.test_and_set(cpu_flag::pending)) mfc_last_timestamp = get_system_time(); } mfc_queue[mfc_size++] = ch_mfc_cmd; mfc_fence |= utils::rol32(1, ch_mfc_cmd.tag); if (ch_mfc_cmd.cmd & MFC_BARRIER_MASK) { mfc_barrier |= utils::rol32(1, ch_mfc_cmd.tag); } return true; } break; } case MFC_PUTL_CMD: case MFC_PUTLB_CMD: case MFC_PUTLF_CMD: case MFC_PUTRL_CMD: case MFC_PUTRLB_CMD: case MFC_PUTRLF_CMD: case MFC_GETL_CMD: case MFC_GETLB_CMD: case MFC_GETLF_CMD: { if (ch_mfc_cmd.size <= 0x4000) [[likely]] { auto& cmd = mfc_queue[mfc_size]; cmd = ch_mfc_cmd; //if (g_cfg.core.mfc_debug) //{ // TODO: This needs a disambiguator with list elements dumping // auto& dump = mfc_history[mfc_dump_idx++ % max_mfc_dump_idx]; // dump.cmd = ch_mfc_cmd; // dump.cmd.eah = pc; // std::memcpy(dump.data, _ptr<u8>(ch_mfc_cmd.eah & 0x3fff0), std::min<u32>(ch_mfc_cmd.size, 128)); //} if (do_dma_check(cmd)) [[likely]] { if (!g_cfg.core.mfc_transfers_shuffling) { if (!cmd.size || do_list_transfer(cmd)) [[likely]] { return true; } } else { if (!state.test_and_set(cpu_flag::pending)) mfc_last_timestamp = get_system_time(); } } mfc_size++; mfc_fence |= utils::rol32(1, cmd.tag); if (cmd.cmd & MFC_BARRIER_MASK) { mfc_barrier |= utils::rol32(1, cmd.tag); } if (check_mfc_interrupts(pc + 4)) { do_mfc(false); spu_runtime::g_escape(this); } return true; } break; } case MFC_BARRIER_CMD: case MFC_EIEIO_CMD: case MFC_SYNC_CMD: { if (mfc_size == 0) { atomic_fence_seq_cst(); } else { mfc_queue[mfc_size++] = ch_mfc_cmd; mfc_barrier |= -1; mfc_fence |= utils::rol32(1, ch_mfc_cmd.tag); } return true; } default: { break; } } fmt::throw_exception("Unknown command (cmd=%s, lsa=0x%x, ea=0x%llx, tag=0x%x, size=0x%x)", ch_mfc_cmd.cmd, ch_mfc_cmd.lsa, ch_mfc_cmd.eal, ch_mfc_cmd.tag, ch_mfc_cmd.size); } bool spu_thread::reservation_check(u32 addr, const decltype(rdata)& data) const { if (!addr) { // No reservation to be lost in the first place return false; } if ((vm::reservation_acquire(addr) & -128) != rtime) { return true; } if ((addr >> 28) < 2 || (addr >> 28) == 0xd) { // Always-allocated memory does not need strict checking (vm::main or vm::stack) return !cmp_rdata(data, *vm::get_super_ptr<decltype(rdata)>(addr)); } // Ensure data is allocated (HACK: would raise LR event if not) // Set range_lock first optimistically range_lock->store(u64{128} << 32 | addr); u64 lock_val = *std::prev(std::end(vm::g_range_lock_set)); u64 old_lock = 0; while (lock_val != old_lock) { // Since we want to read data, let's check readability first if (!(lock_val & vm::range_readable)) { // Only one abnormal operation is "unreadable" if ((lock_val >> vm::range_pos) == (vm::range_locked >> vm::range_pos)) { // All page flags are untouched and can be read safely if (!vm::check_addr(addr)) { // Assume our memory is being (de)allocated range_lock->release(0); break; } // g_shmem values are unchanged too const u64 is_shmem = vm::g_shmem[addr >> 16]; const u64 test_addr = is_shmem ? (is_shmem | static_cast<u16>(addr)) / 128 : u64{addr} / 128; const u64 lock_addr = lock_val / 128; if (test_addr == lock_addr) { // Our reservation is locked range_lock->release(0); break; } break; } } // Fallback to normal range check const u64 lock_addr = static_cast<u32>(lock_val); const u32 lock_size = static_cast<u32>(lock_val << 3 >> 35); if (lock_addr + lock_size <= addr || lock_addr >= addr + 128) { // We are outside locked range, so page flags are unaffected if (!vm::check_addr(addr)) { range_lock->release(0); break; } } else if (!(lock_val & vm::range_readable)) { range_lock->release(0); break; } old_lock = std::exchange(lock_val, *std::prev(std::end(vm::g_range_lock_set))); } if (!range_lock->load()) [[unlikely]] { return true; } const bool res = cmp_rdata(data, vm::_ref<decltype(rdata)>(addr)); range_lock->release(0); return !res; } std::pair<u32, u32> spu_thread::read_dec() const { const u64 res = ch_dec_value - (is_dec_frozen ? 0 : (get_timebased_time() - ch_dec_start_timestamp)); return {static_cast<u32>(res), static_cast<u32>(res >> 32)}; } spu_thread::ch_events_t spu_thread::get_events(u64 mask_hint, bool waiting, bool reading) { if (auto mask1 = ch_events.load().mask; mask1 & ~SPU_EVENT_IMPLEMENTED) { fmt::throw_exception("SPU Events not implemented (mask=0x%x)", mask1); } retry: u32 collect = 0; // Check reservation status and set SPU_EVENT_LR if lost if (mask_hint & SPU_EVENT_LR) { if (reservation_check(raddr, rdata)) { collect |= SPU_EVENT_LR; raddr = 0; } } // SPU Decrementer Event on underflow (use the upper 32-bits to determine it) if (mask_hint & SPU_EVENT_TM) { if (const u64 res = read_dec().second) { // Set next event to the next time the decrementer underflows ch_dec_start_timestamp -= res << 32; collect |= SPU_EVENT_TM; } } if (collect) { set_events(collect); } auto [res, ok] = ch_events.fetch_op([&](ch_events_t& events) { if (!reading) return false; if (waiting) events.waiting = !events.count; events.count = false; return true; }); if (reading && res.locks && mask_hint & (SPU_EVENT_S1 | SPU_EVENT_S2)) { busy_wait(100); goto retry; } return res; } void spu_thread::set_events(u32 bits) { if (ch_events.atomic_op([&](ch_events_t& events) { events.events |= bits; // If one masked event was fired, set the channel count (even if the event bit was already 1) if (events.mask & bits) { events.count = true; return !!events.waiting && (bits & (SPU_EVENT_S1 | SPU_EVENT_S2)); } return false; })) { notify(); } } void spu_thread::set_interrupt_status(bool enable) { if (enable) { // Detect enabling interrupts with events masked if (auto mask = ch_events.load().mask; mask & SPU_EVENT_INTR_BUSY_CHECK) { if (g_cfg.core.spu_decoder != spu_decoder_type::_static && g_cfg.core.spu_decoder != spu_decoder_type::dynamic) { fmt::throw_exception("SPU Interrupts not implemented (mask=0x%x): Use [%s] SPU decoder", mask, spu_decoder_type::dynamic); } spu_log.trace("SPU Interrupts (mask=0x%x) are using CPU busy checking mode", mask); // Process interrupts in cpu_work() if (state.none_of(cpu_flag::pending)) { state += cpu_flag::pending; } } } interrupts_enabled = enable; } u32 spu_thread::get_ch_count(u32 ch) { if (ch < 128) spu_log.trace("get_ch_count(ch=%s)", spu_ch_name[ch]); switch (ch) { case SPU_WrOutMbox: return ch_out_mbox.get_count() ^ 1; case SPU_WrOutIntrMbox: return ch_out_intr_mbox.get_count() ^ 1; case SPU_RdInMbox: return ch_in_mbox.get_count(); case MFC_RdTagStat: return ch_tag_stat.get_count(); case MFC_RdListStallStat: return ch_stall_stat.get_count(); case MFC_WrTagUpdate: return 1; case SPU_RdSigNotify1: return ch_snr1.get_count(); case SPU_RdSigNotify2: return ch_snr2.get_count(); case MFC_RdAtomicStat: return ch_atomic_stat.get_count(); case SPU_RdEventStat: return static_cast<u32>(get_events().count); case MFC_Cmd: return 16 - mfc_size; // Channels with a constant count of 1: case SPU_WrEventMask: case SPU_WrEventAck: case SPU_WrDec: case SPU_RdDec: case SPU_RdEventMask: case SPU_RdMachStat: case SPU_WrSRR0: case SPU_RdSRR0: case SPU_Set_Bkmk_Tag: case SPU_PM_Start_Ev: case SPU_PM_Stop_Ev: case MFC_RdTagMask: case MFC_LSA: case MFC_EAH: case MFC_EAL: case MFC_Size: case MFC_TagID: case MFC_WrTagMask: case MFC_WrListStallAck: return 1; default: break; } ensure(ch < 128u); spu_log.error("Unknown/illegal channel in RCHCNT (ch=%s)", spu_ch_name[ch]); return 0; // Default count } s64 spu_thread::get_ch_value(u32 ch) { if (ch < 128) spu_log.trace("get_ch_value(ch=%s)", spu_ch_name[ch]); auto read_channel = [&](spu_channel& channel) -> s64 { if (channel.get_count() == 0) { state += cpu_flag::wait + cpu_flag::temp; } if (state & cpu_flag::pending) { do_mfc(); } // Reset GETLLAR metadata last_getllar_addr = umax; const s64 out = channel.pop_wait(*this); if (state & cpu_flag::wait) { wakeup_delay(); } static_cast<void>(test_stopped()); return out; }; switch (ch) { case SPU_RdSRR0: { return srr0; } case SPU_RdInMbox: { if (ch_in_mbox.get_count() == 0) { state += cpu_flag::wait; } while (true) { if (state & cpu_flag::pending) { do_mfc(); } const auto [old_count, out] = ch_in_mbox.pop_wait(*this); if (old_count) { if (old_count == 4 /* SPU_IN_MBOX_THRESHOLD */) // TODO: check this { int_ctrl[2].set(SPU_INT2_STAT_SPU_MAILBOX_THRESHOLD_INT); } check_state(); return out; } auto old = +state; if (is_stopped(old)) { return -1; } thread_ctrl::wait_on(state, old); } fmt::throw_exception("Unreachable"); // Fix unannotated fallthrough warning } case MFC_RdTagStat: { if (state & cpu_flag::pending) { do_mfc(); } if (u32 out; ch_tag_stat.try_read(out)) { ch_tag_stat.set_value(0, false); return out; } return read_channel(ch_tag_stat); } case MFC_RdTagMask: { return ch_tag_mask; } case SPU_RdSigNotify1: { return read_channel(ch_snr1); } case SPU_RdSigNotify2: { return read_channel(ch_snr2); } case MFC_RdAtomicStat: { if (u32 out; ch_atomic_stat.try_read(out)) { ch_atomic_stat.set_value(0, false); return out; } // Will stall infinitely return read_channel(ch_atomic_stat); } case MFC_RdListStallStat: { if (u32 out; ch_stall_stat.try_read(out)) { ch_stall_stat.set_value(0, false); return out; } // Will stall infinitely return read_channel(ch_stall_stat); } case SPU_RdDec: { u32 out = read_dec().first; //Polling: We might as well hint to the scheduler to slot in another thread since this one is counting down if (g_cfg.core.spu_loop_detection && out > spu::scheduler::native_jiffy_duration_us) { state += cpu_flag::wait; std::this_thread::yield(); } return out; } case SPU_RdEventMask: { return ch_events.load().mask; } case SPU_RdEventStat: { const u32 mask1 = ch_events.load().mask; auto events = get_events(mask1, false, true); if (events.count) { return events.events & mask1; } spu_function_logger logger(*this, "MFC Events read"); lv2_obj::prepare_for_sleep(*this); using resrv_ptr = std::add_pointer_t<const decltype(rdata)>; resrv_mem = vm::get_super_ptr<decltype(rdata)>(raddr); std::shared_ptr<utils::shm> rdata_shm; const u32 old_raddr = raddr; // Does not need to safe-access reservation if LR is the only event masked // Because it's either an access violation or a live-lock if an invalid memory is passed if (raddr && mask1 > SPU_EVENT_LR) { auto area = vm::get(vm::any, raddr); if (area && (area->flags & vm::preallocated)) { if (!vm::check_addr(raddr)) { resrv_mem = nullptr; } } else if (area) { // Ensure possession over reservation memory so it won't be de-allocated auto [base_addr, shm_] = area->peek(raddr); if (shm_) { const u32 data_offs = raddr - base_addr; rdata_shm = std::move(shm_); resrv_mem = reinterpret_cast<resrv_ptr>(rdata_shm->get() + data_offs); } } if (!resrv_mem) { spu_log.error("A dangling reservation address has been found while reading SPU_RdEventStat channel. (addr=0x%x, events_mask=0x%x)", raddr, mask1); raddr = 0; set_events(SPU_EVENT_LR); } } const usz seed = (utils::get_tsc() >> 8) % 100; #ifdef __linux__ const bool reservation_busy_waiting = false; #else const bool reservation_busy_waiting = (seed + ((raddr == spurs_addr) ? 50u : 0u)) < g_cfg.core.spu_reservation_busy_waiting_percentage; #endif for (; !events.count; events = get_events(mask1 & ~SPU_EVENT_LR, true, true)) { const auto old = +state; if (is_stopped(old)) { return -1; } // Optimized check if (raddr) { bool set_lr = false; if (!vm::check_addr(raddr) || rtime != vm::reservation_acquire(raddr)) { set_lr = true; } else if (!cmp_rdata(rdata, *resrv_mem)) { // Notify threads manually, memory data has likely changed and broke the reservation for others if (vm::reservation_notifier_count(raddr) && vm::reservation_acquire(raddr) == rtime) { vm::reservation_notifier_notify(raddr); } set_lr = true; spu_log.trace("SPU Reservation Wait: Did not encounter notification (raddr=0x%x)", raddr); } if (set_lr) { raddr = 0; set_events(SPU_EVENT_LR); continue; } } if (raddr && (mask1 & ~SPU_EVENT_TM) == SPU_EVENT_LR) { if (u32 max_threads = std::min<u32>(g_cfg.core.max_spurs_threads, group ? group->max_num : u32{umax}); group && group->max_run != max_threads) { constexpr std::string_view spurs_suffix = "CellSpursKernelGroup"sv; if (group->name.ends_with(spurs_suffix) && !group->name.substr(0, group->name.size() - spurs_suffix.size()).ends_with("_libsail")) { // Hack: don't run more SPURS threads than specified. if (u32 old = atomic_storage<u32>::exchange(group->max_run, max_threads); old > max_threads) { spu_log.success("HACK: '%s' (0x%x) limited to %u threads.", group->name, group->id, max_threads); } else if (u32 running = group->spurs_running; old < max_threads && running >= old && running < max_threads) { group->spurs_running.notify_all(); } } } // Don't busy-wait with TSX - memory is sensitive if (g_use_rtm || !reservation_busy_waiting) { if (u32 work_count = g_spu_work_count) { const u32 true_free = utils::sub_saturate<u32>(utils::get_thread_count(), 10); if (work_count > true_free) { // SPU thread count estimation const u32 thread_count = (group ? g_raw_spu_ctr + group->max_num : g_raw_spu_ctr + 3); if (thread_count && seed % thread_count < work_count - true_free) { // Make the SPU wait longer for other threads to do the work thread_ctrl::wait_for(200); continue; } } } if (raddr - spurs_addr <= 0x80 && !g_cfg.core.spu_accurate_reservations && mask1 == SPU_EVENT_LR) { // Wait with extended timeout, in this situation we have notifications for nearly all writes making it possible // Abort notifications are handled specially for performance reasons if (auto wait_var = vm::reservation_notifier_begin_wait(raddr, rtime)) { utils::bless<atomic_t<u32>>(&wait_var->raw().wait_flag)->wait(1, atomic_wait_timeout{300'000}); vm::reservation_notifier_end_wait(*wait_var); } continue; } const u32 _raddr = this->raddr; #ifdef __linux__ if (auto wait_var = vm::reservation_notifier_begin_wait(_raddr, rtime)) { utils::bless<atomic_t<u32>>(&wait_var->raw().wait_flag)->wait(1, atomic_wait_timeout{50'000}); vm::reservation_notifier_end_wait(*wait_var); } #else static thread_local bool s_tls_try_notify = false; s_tls_try_notify = false; const auto wait_cb = mask1 != SPU_EVENT_LR ? nullptr : +[](u64 attempts) -> bool { const auto _this = static_cast<spu_thread*>(cpu_thread::get_current()); AUDIT(_this->get_class() == thread_class::spu); const auto old = +_this->state; if (is_stopped(old)) { return false; } if (!attempts) { // Skip checks which have been done already return true; } bool set_lr = false; const u32 raddr = _this->raddr; if (!raddr) { return true; } if (!vm::check_addr(raddr)) { set_lr = true; } else if (!cmp_rdata(_this->rdata, *_this->resrv_mem)) { // Notify threads manually, memory data has likely changed and broke the reservation for others if (vm::reservation_notifier_count(raddr) >= 2 && vm::reservation_acquire(raddr) == _this->rtime) { s_tls_try_notify = true; } set_lr = true; } if (set_lr) { _this->raddr = 0; _this->set_events(SPU_EVENT_LR); return false; } return true; }; if (auto wait_var = vm::reservation_notifier_begin_wait(_raddr, rtime)) { atomic_wait_engine::set_one_time_use_wait_callback(wait_cb); utils::bless<atomic_t<u32>>(&wait_var->raw().wait_flag)->wait(1, atomic_wait_timeout{80'000}); vm::reservation_notifier_end_wait(*wait_var); } if (s_tls_try_notify && vm::reservation_notifier_count(_raddr) && vm::reservation_acquire(_raddr) == rtime) { vm::reservation_notifier_notify(_raddr); } #endif } else { busy_wait(); } continue; } thread_ctrl::wait_on(state, old, 100); } wakeup_delay(); if (is_paused(state - cpu_flag::suspend)) { if (!raddr && old_raddr) { // Restore reservation address temporarily for debugging use raddr = old_raddr; check_state(); raddr = 0; } } check_state(); return events.events & mask1; } case SPU_RdMachStat: { // Return SPU Interrupt status in LSB return u32{interrupts_enabled} | (u32{get_type() == spu_type::isolated} << 1); } } fmt::throw_exception("Unknown/illegal channel in RDCH (ch=%d [%s])", ch, ch < 128 ? spu_ch_name[ch] : "???"); } bool spu_thread::set_ch_value(u32 ch, u32 value) { if (ch < 128) spu_log.trace("set_ch_value(ch=%s, value=0x%x)", spu_ch_name[ch], value); switch (ch) { case SPU_WrSRR0: { srr0 = value & 0x3fffc; return true; } case SPU_WrOutIntrMbox: { // Reset GETLLAR metadata last_getllar_addr = umax; if (get_type() >= spu_type::raw) { if (state & cpu_flag::pending) { do_mfc(); } if (ch_out_intr_mbox.get_count()) { state += cpu_flag::wait; } if (!ch_out_intr_mbox.push_wait(*this, value)) { return false; } int_ctrl[2].set(SPU_INT2_STAT_MAILBOX_INT); wakeup_delay(); check_state(); return true; } state += cpu_flag::wait; lv2_obj::notify_all_t notify; const u32 code = value >> 24; { if (code < 64) { /* ===== sys_spu_thread_send_event (used by spu_printf) ===== */ u32 spup = code & 63; u32 data = 0; if (!ch_out_mbox.try_pop(data)) { fmt::throw_exception("sys_spu_thread_send_event(value=0x%x, spup=%d): Out_MBox is empty", value, spup); } spu_log.trace("sys_spu_thread_send_event(spup=%d, data0=0x%x, data1=0x%x)", spup, value & 0x00ffffff, data); spu_function_logger logger(*this, "sys_spu_thread_send_event"); std::shared_ptr<lv2_event_queue> queue; { std::lock_guard lock(group->mutex); if (ch_in_mbox.get_count()) { // TODO: Check this spu_log.error("sys_spu_thread_send_event(spup=%d, data0=0x%x, data1=0x%x): In_MBox is not empty (%d)", spup, (value & 0x00ffffff), data, ch_in_mbox.get_count()); ch_in_mbox.set_values(1, CELL_EBUSY); return true; } // Reserve a place in the inbound mailbox ch_in_mbox.set_values(1, CELL_OK); queue = this->spup[spup]; } const auto res = !queue ? CELL_ENOTCONN : queue->send(SYS_SPU_THREAD_EVENT_USER_KEY, lv2_id, (u64{spup} << 32) | (value & 0x00ffffff), data); if (res == CELL_ENOTCONN) { spu_log.warning("sys_spu_thread_send_event(spup=%d, data0=0x%x, data1=0x%x): error (%s)", spup, (value & 0x00ffffff), data, res); } if (res == CELL_EAGAIN) { // Restore mailboxes state ch_out_mbox.set_value(data); ch_in_mbox.try_pop(data); return false; } atomic_storage<u32>::release(ch_in_mbox.values.raw().value0, res); return true; } else if (code < 128) { /* ===== sys_spu_thread_throw_event ===== */ u32 spup = code & 63; u32 data = 0; if (!ch_out_mbox.try_pop(data)) { fmt::throw_exception("sys_spu_thread_throw_event(value=0x%x, spup=%d): Out_MBox is empty", value, spup); } spu_log.trace("sys_spu_thread_throw_event(spup=%d, data0=0x%x, data1=0x%x)", spup, value & 0x00ffffff, data); spu_function_logger logger(*this, "sys_spu_thread_throw_event"); std::shared_ptr<lv2_event_queue> queue; { std::lock_guard lock{group->mutex}; queue = this->spup[spup]; } // TODO: check passing spup value if (auto res = queue ? queue->send(SYS_SPU_THREAD_EVENT_USER_KEY, lv2_id, (u64{spup} << 32) | (value & 0x00ffffff), data) : CELL_ENOTCONN) { if (res == CELL_EAGAIN) { ch_out_mbox.set_value(data); return false; } spu_log.warning("sys_spu_thread_throw_event(spup=%d, data0=0x%x, data1=0x%x) failed (error=%s)", spup, (value & 0x00ffffff), data, res); } return true; } else if (code == 128) { /* ===== sys_event_flag_set_bit ===== */ u32 flag = value & 0xffffff; u32 data = 0; if (!ch_out_mbox.try_pop(data)) { fmt::throw_exception("sys_event_flag_set_bit(value=0x%x (flag=%d)): Out_MBox is empty", value, flag); } spu_log.trace("sys_event_flag_set_bit(id=%d, value=0x%x (flag=%d))", data, value, flag); spu_function_logger logger(*this, "sys_event_flag_set_bit"); { std::lock_guard lock(group->mutex); if (ch_in_mbox.get_count()) { // TODO: Check this spu_log.error("sys_event_flag_set_bit(value=0x%x (flag=%d)): In_MBox is not empty (%d)", value, flag, ch_in_mbox.get_count()); ch_in_mbox.set_values(1, CELL_EBUSY); return true; } // Reserve a place in the inbound mailbox ch_in_mbox.set_values(1, CELL_OK); } // Use the syscall to set flag const auto res = 0u + sys_event_flag_set(*this, data, 1ull << flag); if (res == CELL_EAGAIN) { // Restore mailboxes state ch_out_mbox.set_value(data); ch_in_mbox.try_pop(data); return false; } atomic_storage<u32>::release(ch_in_mbox.values.raw().value0, res); return true; } else if (code == 192) { /* ===== sys_event_flag_set_bit_impatient ===== */ u32 flag = value & 0xffffff; u32 data = 0; if (!ch_out_mbox.try_pop(data)) { fmt::throw_exception("sys_event_flag_set_bit_impatient(value=0x%x (flag=%d)): Out_MBox is empty", value, flag); } spu_log.trace("sys_event_flag_set_bit_impatient(id=%d, value=0x%x (flag=%d))", data, value, flag); spu_function_logger logger(*this, "sys_event_flag_set_bit_impatient"); // Use the syscall to set flag if (sys_event_flag_set(*this, data, 1ull << flag) + 0u == CELL_EAGAIN) { ch_out_mbox.set_value(data); return false; } return true; } else { fmt::throw_exception("SPU_WrOutIntrMbox: unknown data (value=0x%x, Out_MBox=%s)", value, ch_out_mbox); } } } case SPU_WrOutMbox: { if (state & cpu_flag::pending) { do_mfc(); } if (ch_out_mbox.get_count()) { state += cpu_flag::wait; } if (!ch_out_mbox.push_wait(*this, value)) { return false; } check_state(); return true; } case MFC_WrTagMask: { ch_tag_mask = value; if (ch_tag_upd) { const u32 completed = get_mfc_completed(); if (completed && ch_tag_upd == MFC_TAG_UPDATE_ANY) { ch_tag_stat.set_value(completed); ch_tag_upd = MFC_TAG_UPDATE_IMMEDIATE; } else if (completed == value && ch_tag_upd == MFC_TAG_UPDATE_ALL) { ch_tag_stat.set_value(completed); ch_tag_upd = MFC_TAG_UPDATE_IMMEDIATE; } } return true; } case MFC_WrTagUpdate: { if (value > MFC_TAG_UPDATE_ALL) { break; } const u32 completed = get_mfc_completed(); if (!value) { ch_tag_upd = MFC_TAG_UPDATE_IMMEDIATE; ch_tag_stat.set_value(completed); } else if (completed && value == MFC_TAG_UPDATE_ANY) { ch_tag_upd = MFC_TAG_UPDATE_IMMEDIATE; ch_tag_stat.set_value(completed); } else if (completed == ch_tag_mask && value == MFC_TAG_UPDATE_ALL) { ch_tag_upd = MFC_TAG_UPDATE_IMMEDIATE; ch_tag_stat.set_value(completed); } else { ch_tag_upd = value; } return true; } case MFC_LSA: { ch_mfc_cmd.lsa = value; return true; } case MFC_EAH: { ch_mfc_cmd.eah = value; return true; } case MFC_EAL: { ch_mfc_cmd.eal = value; return true; } case MFC_Size: { ch_mfc_cmd.size = static_cast<u16>(std::min<u32>(value, 0xffff)); return true; } case MFC_TagID: { ch_mfc_cmd.tag = value & 0x1f; return true; } case MFC_Cmd: { ch_mfc_cmd.cmd = MFC(value & 0xff); return process_mfc_cmd(); } case MFC_WrListStallAck: { value &= 0x1f; // Reset stall status for specified tag const u32 tag_mask = utils::rol32(1, value); if (ch_stall_mask & tag_mask) { ch_stall_mask &= ~tag_mask; for (u32 i = 0; i < mfc_size; i++) { if (mfc_queue[i].tag == (value | 0x80)) { // Unset stall bit mfc_queue[i].tag &= 0x7f; } } do_mfc(true); } return true; } case SPU_WrDec: { get_events(SPU_EVENT_TM); // Don't discard possibly occured old event ch_dec_start_timestamp = get_timebased_time(); ch_dec_value = value; is_dec_frozen = false; return true; } case SPU_WrEventMask: { get_events(value | static_cast<u32>(ch_events.load().mask)); if (ch_events.atomic_op([&](ch_events_t& events) { events.mask = value; if (events.events & events.mask) { events.count = true; return true; } return !!events.count; })) { // Check interrupts in case count is 1 if (check_mfc_interrupts(pc + 4)) { spu_runtime::g_escape(this); } } return true; } case SPU_WrEventAck: { // "Collect" events before final acknowledgment get_events(value | static_cast<u32>(ch_events.load().mask)); bool freeze_dec = false; const bool check_intr = ch_events.atomic_op([&](ch_events_t& events) { events.events &= ~value; freeze_dec = !!((value & SPU_EVENT_TM) & ~events.mask); if (events.events & events.mask) { events.count = true; return true; } return !!events.count; }); if (!is_dec_frozen && freeze_dec) { // Save current time, this will be the reported value until the decrementer resumes ch_dec_value = read_dec().first; is_dec_frozen = true; } if (check_intr) { // Check interrupts in case count is 1 if (check_mfc_interrupts(pc + 4)) { spu_runtime::g_escape(this); } } return true; } case SPU_Set_Bkmk_Tag: case SPU_PM_Start_Ev: case SPU_PM_Stop_Ev: { return true; } } fmt::throw_exception("Unknown/illegal channel in WRCH (ch=%d [%s], value=0x%x)", ch, ch < 128 ? spu_ch_name[ch] : "???", value); } extern void resume_spu_thread_group_from_waiting(spu_thread& spu) { const auto group = spu.group; std::lock_guard lock(group->mutex); if (group->run_state == SPU_THREAD_GROUP_STATUS_WAITING) { group->run_state = SPU_THREAD_GROUP_STATUS_RUNNING; } else if (group->run_state == SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED) { group->run_state = SPU_THREAD_GROUP_STATUS_SUSPENDED; spu.state += cpu_flag::signal; spu.state.notify_one(); return; } for (auto& thread : group->threads) { if (thread) { if (thread.get() == &spu) { constexpr auto flags = cpu_flag::suspend + cpu_flag::signal; ensure(((thread->state ^= flags) & flags) == cpu_flag::signal); } else { thread->state -= cpu_flag::suspend; } thread->state.notify_one(); } } } bool spu_thread::stop_and_signal(u32 code) { spu_log.trace("stop_and_signal(code=0x%x)", code); auto set_status_npc = [&]() { status_npc.atomic_op([&](status_npc_sync_var& state) { state.status = (state.status & 0xffff) | (code << 16); state.status |= SPU_STATUS_STOPPED_BY_STOP; state.status &= ~SPU_STATUS_RUNNING; state.npc = (pc + 4) | +interrupts_enabled; }); }; if (get_type() >= spu_type::raw) { // Save next PC and current SPU Interrupt Status state += cpu_flag::stop + cpu_flag::wait + cpu_flag::ret; set_status_npc(); status_npc.notify_one(); int_ctrl[2].set(SPU_INT2_STAT_SPU_STOP_AND_SIGNAL_INT); check_state(); return true; } auto get_queue = [this](u32 spuq) -> const std::shared_ptr<lv2_event_queue>& { for (auto& v : this->spuq) { if (spuq == v.first) { if (lv2_obj::check(v.second)) { return v.second; } } } static const std::shared_ptr<lv2_event_queue> empty; return empty; }; switch (code) { case 0x001: { state += cpu_flag::wait; std::this_thread::sleep_for(1ms); // hack check_state(); return true; } case 0x002: { state += cpu_flag::ret; return true; } case SYS_SPU_THREAD_STOP_RECEIVE_EVENT: { /* ===== sys_spu_thread_receive_event ===== */ u32 spuq = 0; if (!ch_out_mbox.try_read(spuq)) { fmt::throw_exception("sys_spu_thread_receive_event(): Out_MBox is empty"); } if (u32 count = ch_in_mbox.get_count()) { spu_log.error("sys_spu_thread_receive_event(): In_MBox is not empty (%d)", count); return ch_in_mbox.set_values(1, CELL_EBUSY), true; } struct clear_mbox { spu_thread& _this; ~clear_mbox() noexcept { if (cpu_flag::again - _this.state) { u32 val = 0; _this.ch_out_mbox.try_pop(val); } } } clear{*this}; spu_log.trace("sys_spu_thread_receive_event(spuq=0x%x)", spuq); if (!group->has_scheduler_context /*|| group->type & 0xf00*/) { spu_log.error("sys_spu_thread_receive_event(): Incompatible group type = 0x%x", group->type); return ch_in_mbox.set_values(1, CELL_EINVAL), true; } lv2_obj::prepare_for_sleep(*this); spu_function_logger logger(*this, "sys_spu_thread_receive_event"); std::shared_ptr<lv2_event_queue> queue; while (true) { // Check group status (by actually checking thread status), wait if necessary while (true) { const auto old = +state; if (is_stopped(old)) { state += cpu_flag::again; return false; } if (!is_paused(old)) { // The group is not suspended (anymore) break; } thread_ctrl::wait_on(state, old); } reader_lock{group->mutex}, queue = get_queue(spuq); if (!queue) { return ch_in_mbox.set_values(1, CELL_EINVAL), true; } // Lock queue's mutex first, then group's mutex std::scoped_lock lock(queue->mutex, group->mutex); if (is_stopped()) { state += cpu_flag::again; return false; } if (Emu.IsStarting()) { // Deregister lv2_obj::g_to_sleep entry (savestates related) lv2_obj::sleep(*this); } if (group->run_state >= SPU_THREAD_GROUP_STATUS_WAITING && group->run_state <= SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED) { // Try again ensure(state & cpu_flag::suspend); continue; } if (queue != get_queue(spuq)) { // Try again continue; } if (!queue->exists) { return ch_in_mbox.set_values(1, CELL_EINVAL), true; } if (queue->events.empty()) { lv2_obj::emplace(queue->sq, this); group->run_state = SPU_THREAD_GROUP_STATUS_WAITING; group->waiter_spu_index = index; for (auto& thread : group->threads) { if (thread) { thread->state += cpu_flag::suspend; } } // Wait break; } else { // Return the event immediately const auto event = queue->events.front(); const auto data1 = static_cast<u32>(std::get<1>(event)); const auto data2 = static_cast<u32>(std::get<2>(event)); const auto data3 = static_cast<u32>(std::get<3>(event)); ch_in_mbox.set_values(4, CELL_OK, data1, data2, data3); queue->events.pop_front(); return true; } } while (auto old = +state) { if (old & cpu_flag::signal && state.test_and_reset(cpu_flag::signal)) { break; } if (is_stopped(old)) { std::lock_guard qlock(queue->mutex); old = state.fetch_sub(cpu_flag::signal); if (old & cpu_flag::signal) { break; } state += cpu_flag::again; return false; } thread_ctrl::wait_on(state, old); } wakeup_delay(); return true; } case SYS_SPU_THREAD_STOP_TRY_RECEIVE_EVENT: { /* ===== sys_spu_thread_tryreceive_event ===== */ u32 spuq = 0; if (!ch_out_mbox.try_pop(spuq)) { fmt::throw_exception("sys_spu_thread_tryreceive_event(): Out_MBox is empty"); } if (u32 count = ch_in_mbox.get_count()) { spu_log.error("sys_spu_thread_tryreceive_event(): In_MBox is not empty (%d)", count); return ch_in_mbox.set_values(1, CELL_EBUSY), true; } spu_log.trace("sys_spu_thread_tryreceive_event(spuq=0x%x)", spuq); std::shared_ptr<lv2_event_queue> queue; reader_lock{group->mutex}, queue = get_queue(spuq); std::unique_lock<shared_mutex> qlock, group_lock; while (true) { if (!queue) { return ch_in_mbox.set_values(1, CELL_EINVAL), true; } // Lock queue's mutex first, then group's mutex qlock = std::unique_lock{queue->mutex}; group_lock = std::unique_lock{group->mutex}; if (const auto& queue0 = get_queue(spuq); queue != queue0) { // Keep atleast one reference of the pointer so mutex unlock can work const auto old_ref = std::exchange(queue, queue0); group_lock.unlock(); qlock.unlock(); } else { break; } } if (!queue->exists) { return ch_in_mbox.set_values(1, CELL_EINVAL), true; } if (queue->events.empty()) { return ch_in_mbox.set_values(1, CELL_EBUSY), true; } const auto event = queue->events.front(); const auto data1 = static_cast<u32>(std::get<1>(event)); const auto data2 = static_cast<u32>(std::get<2>(event)); const auto data3 = static_cast<u32>(std::get<3>(event)); ch_in_mbox.set_values(4, CELL_OK, data1, data2, data3); queue->events.pop_front(); return true; } case SYS_SPU_THREAD_STOP_SWITCH_SYSTEM_MODULE: { fmt::throw_exception("SYS_SPU_THREAD_STOP_SWITCH_SYSTEM_MODULE (op=0x%x, Out_MBox=%s)", code, _ref<u32>(pc), ch_out_mbox); return true; } case SYS_SPU_THREAD_STOP_YIELD: { // SPU thread group yield (TODO) if (ch_out_mbox.get_count()) { fmt::throw_exception("STOP code 0x100: Out_MBox is not empty"); } atomic_fence_seq_cst(); return true; } case SYS_SPU_THREAD_STOP_GROUP_EXIT: { /* ===== sys_spu_thread_group_exit ===== */ state += cpu_flag::wait; u32 value = 0; if (!ch_out_mbox.try_pop(value)) { fmt::throw_exception("sys_spu_thread_group_exit(): Out_MBox is empty"); } spu_log.trace("sys_spu_thread_group_exit(status=0x%x)", value); spu_function_logger logger(*this, "sys_spu_thread_group_exit"); while (true) { // Check group status (by actually checking thread status), wait if necessary while (true) { const auto old = +state; if (is_stopped(old)) { ch_out_mbox.set_value(value); state += cpu_flag::again; return false; } if (!is_paused(old)) { // The group is not suspended (anymore) break; } thread_ctrl::wait_on(state, old); } std::lock_guard lock(group->mutex); if (auto _state = +group->run_state; _state >= SPU_THREAD_GROUP_STATUS_WAITING && _state <= SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED) { // We can't exit while we are waiting on an SPU event continue; } if (std::exchange(group->set_terminate, true)) { // Whoever terminated first decides the error status + cause return true; } for (auto& thread : group->threads) { if (thread) { thread->state.fetch_op([](bs_t<cpu_flag>& flags) { if (flags & cpu_flag::stop) { // In case the thread raised the ret flag itself at some point do not raise it again return false; } flags += cpu_flag::stop + cpu_flag::ret; return true; }); } } group->exit_status = value; group->join_state = SYS_SPU_THREAD_GROUP_JOIN_GROUP_EXIT; set_status_npc(); break; } u32 prev_resv = 0; for (auto& thread : group->threads) { if (thread) { // Notify threads, guess which threads need a notification by checking cpu_flag::ret (redundant notification can only occur if thread has called an exit syscall itself as well) if (thread.get() != this && thread->state & cpu_flag::ret) { thread_ctrl::notify(*thread); if (u32 resv = atomic_storage<u32>::load(thread->raddr)) { if (prev_resv && prev_resv != resv) { // Batch reservation notifications if possible vm::reservation_notifier_notify(prev_resv); } prev_resv = resv; } } } } if (prev_resv) { vm::reservation_notifier_notify(prev_resv); } check_state(); return true; } case SYS_SPU_THREAD_STOP_THREAD_EXIT: { /* ===== sys_spu_thread_exit ===== */ state += cpu_flag::wait; u32 value; if (!ch_out_mbox.try_pop(value)) { fmt::throw_exception("sys_spu_thread_exit(): Out_MBox is empty"); } spu_function_logger logger(*this, "sys_spu_thread_exit"); spu_log.trace("sys_spu_thread_exit(status=0x%x)", value); last_exit_status.release(value); set_status_npc(); state += cpu_flag::stop + cpu_flag::ret; check_state(); return true; } } fmt::throw_exception("Unknown STOP code: 0x%x (op=0x%x, Out_MBox=%s)", code, _ref<u32>(pc), ch_out_mbox); } void spu_thread::halt() { spu_log.trace("halt()"); if (get_type() >= spu_type::raw) { state += cpu_flag::stop + cpu_flag::wait; status_npc.atomic_op([this](status_npc_sync_var& state) { state.status |= SPU_STATUS_STOPPED_BY_HALT; state.status &= ~SPU_STATUS_RUNNING; state.npc = pc | +interrupts_enabled; }); status_npc.notify_one(); int_ctrl[2].set(SPU_INT2_STAT_SPU_HALT_OR_STEP_INT); spu_runtime::g_escape(this); } spu_log.fatal("Halt"); spu_runtime::g_escape(this); } void spu_thread::fast_call(u32 ls_addr) { // LS:0x0: this is originally the entry point of the interrupt handler, but interrupts are not implemented _ref<u32>(0) = 0x00000002; // STOP 2 auto old_pc = pc; auto old_lr = gpr[0]._u32[3]; auto old_stack = gpr[1]._u32[3]; // only saved and restored (may be wrong) pc = ls_addr; gpr[0]._u32[3] = 0x0; cpu_task(); state -= cpu_flag::ret; pc = old_pc; gpr[0]._u32[3] = old_lr; gpr[1]._u32[3] = old_stack; } spu_exec_object spu_thread::capture_memory_as_elf(std::span<spu_memory_segment_dump_data> segs, u32 pc_hint) { spu_exec_object spu_exec; spu_exec.set_error(elf_error::ok); std::vector<u8> all_data(SPU_LS_SIZE); for (auto& seg : segs) { std::vector<uchar> data(seg.segment_size); if (auto [vm_addr, ok] = vm::try_get_addr(seg.src_addr); ok) { if (!vm::try_access(vm_addr, data.data(), seg.segment_size, false)) { spu_log.error("capture_memory_as_elf(): Failed to read {0x%x..0x%x}, aborting capture.", +vm_addr, vm_addr + seg.segment_size - 1); spu_exec.set_error(elf_error::stream_data); return spu_exec; } } else { std::memcpy(data.data(), seg.src_addr, data.size()); } std::memcpy(all_data.data() + seg.ls_addr, data.data(), data.size()); auto& prog = spu_exec.progs.emplace_back(SYS_SPU_SEGMENT_TYPE_COPY, seg.flags & 0x7, seg.ls_addr, seg.segment_size, 8, std::move(data)); prog.p_paddr = prog.p_vaddr; spu_log.success("Segment: p_type=0x%x, p_vaddr=0x%x, p_filesz=0x%x, p_memsz=0x%x", prog.p_type, prog.p_vaddr, prog.p_filesz, prog.p_memsz); } u32 pc0 = pc_hint; if (pc_hint != umax) { for (pc0 = pc_hint; pc0; pc0 -= 4) { const u32 op = read_from_ptr<be_t<u32>>(all_data, pc0 - 4); // Try to find function entry (if they are placed sequentially search for BI $LR of previous function) if (!op || op == 0x35000000u || s_spu_itype.decode(op) == spu_itype::UNK) { if (is_exec_code(pc0, { all_data.data(), SPU_LS_SIZE })) break; } } } else { for (pc0 = 0; pc0 < SPU_LS_SIZE; pc0 += 4) { // Try to find a function entry (very basic) if (is_exec_code(pc0, { all_data.data(), SPU_LS_SIZE })) break; } } spu_exec.header.e_entry = pc0; return spu_exec; } bool spu_thread::capture_state() { ensure(state & cpu_flag::wait); // Save data as an executable segment, even the SPU stack // In the past, an optimization was made here to save only non-zero chunks of data // But Ghidra didn't like accessing memory out of chunks (pretty common) // So it has been reverted spu_memory_segment_dump_data single_seg{.ls_addr = 0, .src_addr = ls, .segment_size = SPU_LS_SIZE}; spu_exec_object spu_exec = capture_memory_as_elf({&single_seg, 1}, pc); std::string name; if (get_type() == spu_type::threaded) { name = *spu_tname.load(); if (name.empty()) { // TODO: Maybe add thread group name here fmt::append(name, "SPU.0x%07x", lv2_id); } } else { fmt::append(name, "RawSPU.%u", lv2_id); } name = vfs::escape(name, true); std::replace(name.begin(), name.end(), ' ', '_'); auto get_filename = [&]() -> std::string { return fs::get_cache_dir() + "spu_progs/" + Emu.GetTitleID() + "_" + vfs::escape(name, true) + '_' + date_time::current_time_narrow() + "_capture.elf"; }; auto elf_path = get_filename(); if (fs::exists(elf_path)) { // Wait 1 second so current_time_narrow() will return a different string std::this_thread::sleep_for(1s); if (elf_path = get_filename(); fs::exists(elf_path)) { spu_log.error("Failed to create '%s' (error=%s)", elf_path, fs::g_tls_error); return false; } } fs::file temp(elf_path, fs::rewrite); if (!temp) { spu_log.error("Failed to create file '%s' (error=%s)", elf_path, fs::g_tls_error); return false; } auto data = spu_exec.save(); if (temp.write(data.data(), data.size()) != data.size()) { spu_log.error("Failed to write file '%s' (error=%s)", elf_path, fs::g_tls_error); return false; } spu_log.success("SPU Local Storage image saved to '%s'", elf_path); if (g_cfg.core.spu_decoder == spu_decoder_type::asmjit || g_cfg.core.spu_decoder == spu_decoder_type::llvm) { return true; } auto& rewind = rewind_captures[current_rewind_capture_idx++ % rewind_captures.size()]; if (rewind) { spu_log.error("Due to resource limits the 16th SPU rewind capture is being overwritten with a new one. (free the most recent by loading it)"); rewind.reset(); } rewind = std::make_shared<utils::serial>(); (*rewind)(std::span(spu_exec.progs[0].bin.data(), spu_exec.progs[0].bin.size())); // span serialization doesn't remember size which is what we need serialize_common(*rewind); // TODO: Save and restore decrementer state properly spu_log.success("SPU rewind image has been saved in memory. (%d free slots left)", std::count_if(rewind_captures.begin(), rewind_captures.end(), FN(!x.operator bool()))); return true; } bool spu_thread::try_load_debug_capture() { if (cpu_flag::wait - state) { return false; } auto rewind = std::move(rewind_captures[(current_rewind_capture_idx - 1) % rewind_captures.size()]); if (!rewind) { return false; } rewind->set_reading_state(); (*rewind)(std::span(ls, SPU_LS_SIZE)); // span serialization doesn't remember size which is what we need serialize_common(*rewind); current_rewind_capture_idx--; spu_log.success("Last SPU rewind image has been loaded."); return true; } void spu_thread::wakeup_delay(u32 div) const { if (g_cfg.core.spu_wakeup_delay_mask & (1u << index)) thread_ctrl::wait_for_accurate(utils::aligned_div(+g_cfg.core.spu_wakeup_delay, div)); } spu_function_logger::spu_function_logger(spu_thread& spu, const char* func) noexcept : spu(spu) { spu.current_func = func; spu.start_time = get_system_time(); } spu_thread::thread_name_t::operator std::string() const { std::string full_name = fmt::format("%s[0x%07x]", [](spu_type type) -> std::string_view { switch (type) { case spu_type::threaded: return "SPU"sv; case spu_type::raw: return "RawSPU"sv; case spu_type::isolated: return "Iso"sv; default: fmt::throw_exception("Unreachable"); } }(_this->get_type()), _this->lv2_id); if (const std::string name = *_this->spu_tname.load(); !name.empty()) { fmt::append(full_name, " %s", name); } return full_name; } spu_thread::spu_prio_t spu_thread::priority_t::load() const { if (_this->get_type() != spu_type::threaded || !_this->group->has_scheduler_context) { spu_thread::spu_prio_t prio{}; prio.prio = s32{smax}; return prio; } return _this->group->prio; } s64 spu_channel::pop_wait(cpu_thread& spu, bool pop) { u64 old = data.fetch_op([&](u64& data) { if (data & bit_count) [[likely]] { if (pop) { data = 0; return true; } return false; } data = (pop ? bit_occupy : 0) | bit_wait; if (pop) { jostling_value.release(bit_occupy); } return true; }).first; if (old & bit_count) { return static_cast<u32>(old); } for (int i = 0; i < 10; i++) { busy_wait(); if (!(data & bit_wait)) { return static_cast<u32>(pop ? jostling_value.exchange(0) : +data); } } const u32 wait_on_val = static_cast<u32>(((pop ? bit_occupy : 0) | bit_wait) >> 32); while (true) { thread_ctrl::wait_on(utils::bless<atomic_t<u32>>(&data)[1], wait_on_val); old = data; if (!(old & bit_wait)) { return static_cast<u32>(pop ? jostling_value.exchange(0) : +data); } if (spu.is_stopped()) { // Abort waiting and test if a value has been received if (pop) { if (u64 v = jostling_value.exchange(0); !(v & bit_occupy)) { return static_cast<u32>(v); } ensure(data.fetch_and(~(bit_wait | bit_occupy)) & bit_wait); } else { data.bit_test_reset(off_wait); } return -1; } } } // Waiting for channel push state availability, actually pushing if specified bool spu_channel::push_wait(cpu_thread& spu, u32 value, bool push) { u64 state{}; data.fetch_op([&](u64& data) { if (data & bit_count) [[unlikely]] { jostling_value.release(push ? (bit_occupy | value) : static_cast<u32>(data)); data |= (push ? bit_occupy : 0) | bit_wait; } else if (push) { data = bit_count | value; } else { state = data; return false; } state = data; return true; }); for (int i = 0; i < 10; i++) { if (!(state & bit_wait)) { return true; } busy_wait(); state = data; } while (true) { if (!(state & bit_wait)) { return true; } if (spu.is_stopped()) { return !data.bit_test_reset(off_wait); } thread_ctrl::wait_on(utils::bless<atomic_t<u32>>(&data)[1], u32(state >> 32)); state = data; } } std::pair<u32, u32> spu_channel_4_t::pop_wait(cpu_thread& spu, bool pop_value) { auto old = values.fetch_op([&](sync_var_t& data) { if (data.count != 0) { if (!pop_value) { return; } data.waiting = 0; data.count--; data.value0 = data.value1; data.value1 = data.value2; data.value2 = this->value3; } else { data.waiting = (pop_value ? bit_occupy : 0) | bit_wait; jostling_value.release(pop_value ? jostling_flag : 0); } }); if (old.count) { return {old.count, old.value0}; } old.waiting = (pop_value ? bit_occupy : 0) | bit_wait; for (int i = 0; i < 10; i++) { busy_wait(); if (!atomic_storage<u8>::load(values.raw().waiting)) { return {1, static_cast<u32>(pop_value ? jostling_value.exchange(0) : 0)}; } } while (true) { thread_ctrl::wait_on(utils::bless<atomic_t<u32>>(&values)[0], u32(u64(std::bit_cast<u128>(old)))); old = values; if (~old.waiting & bit_wait) { // Count of 1 because a value has been inserted and popped in the same step. return {1, static_cast<u32>(pop_value ? jostling_value.exchange(0) : 0)}; } if (spu.is_stopped()) { if (pop_value) { // Abort waiting and test if a value has been received if (u64 v = jostling_value.exchange(0); !(v & jostling_flag)) { return {1, static_cast<u32>(v)}; } } if (~atomic_storage<u8>::exchange(values.raw().waiting, 0) & bit_wait) { // Count of 1 because a value has been inserted and popped in the same step. return {1, static_cast<u32>(pop_value ? jostling_value.exchange(0) : 0)}; } return {}; } } } spu_channel_op_state spu_channel_4_t::push(u32 value, bool postpone_notify) { while (true) { value3.release(value); const auto [old, pushed_to_data] = values.fetch_op([&](sync_var_t& data) { if (data.waiting & bit_occupy) { return false; } switch (data.count++) { case 0: data.value0 = value; break; case 1: data.value1 = value; break; case 2: data.value2 = value; break; default: { data.count = 4; data.value3_inval++; // Ensure the SPU reads the most recent value3 write in try_pop by re-loading break; } } return true; }); if (!pushed_to_data) { // Insert the pending value in special storage for waiting SPUs, leave no time in which the channel has data if (!jostling_value.compare_and_swap_test(jostling_flag, value)) { // Other thread has inserted a value through jostling_value, retry continue; } } if (old.waiting & bit_wait) { // Turn off waiting bit manually (must succeed because waiting bit can only be resetted by the thread pushing to jostling_value) if (~atomic_storage<u8>::exchange(values.raw().waiting, 0) & bit_wait) { // Could be fatal or at emulation stopping, to be checked by the caller return { old.count, old.count, false, false }; } if (!postpone_notify) { utils::bless<atomic_t<u32>>(&values)[0].notify_one(); } } return { old.count, std::min<u8>(static_cast<u8>(old.count + 1), 4), !!(old.waiting & bit_wait), true }; } } template <> void fmt_class_string<spu_channel>::format(std::string& out, u64 arg) { const auto& ch = get_object(arg); u32 data = 0; if (ch.try_read(data)) { fmt::append(out, "0x%08x", data); } else { out += "empty"; } } template <> void fmt_class_string<spu_channel_4_t>::format(std::string& out, u64 arg) { const auto& ch = get_object(arg); u32 vals[4]{}; const uint count = ch.try_read(vals); fmt::append(out, "count = %d, data:\n", count); out += "{ "; for (u32 i = 0; i < count;) { fmt::append(out, "0x%x", vals[i]); if (++i != count) { out += ", "; } } out += " }\n"; } DECLARE(spu_thread::g_raw_spu_ctr){}; DECLARE(spu_thread::g_raw_spu_id){}; DECLARE(spu_thread::g_spu_work_count){};
181,066
C++
.cpp
6,251
24.959526
234
0.624785
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,176
SPUASMJITRecompiler.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/SPUASMJITRecompiler.cpp
#include "stdafx.h" #include "SPUASMJITRecompiler.h" #include "Emu/system_config.h" #include "Emu/IdManager.h" #include "Emu/Cell/timers.hpp" #include "SPUDisAsm.h" #include "SPUThread.h" #include "SPUInterpreter.h" #include "PPUAnalyser.h" #include "Crypto/sha1.h" #include "util/asm.hpp" #include "util/v128.hpp" #include "util/simd.hpp" #include "util/sysinfo.hpp" #include <cmath> #include <thread> #define SPU_OFF_128(x, ...) asmjit::x86::oword_ptr(*cpu, offset32(&spu_thread::x, ##__VA_ARGS__)) #define SPU_OFF_64(x, ...) asmjit::x86::qword_ptr(*cpu, offset32(&spu_thread::x, ##__VA_ARGS__)) #define SPU_OFF_32(x, ...) asmjit::x86::dword_ptr(*cpu, offset32(&spu_thread::x, ##__VA_ARGS__)) #define SPU_OFF_16(x, ...) asmjit::x86::word_ptr(*cpu, offset32(&spu_thread::x, ##__VA_ARGS__)) #define SPU_OFF_8(x, ...) asmjit::x86::byte_ptr(*cpu, offset32(&spu_thread::x, ##__VA_ARGS__)) const spu_decoder<spu_recompiler> s_spu_decoder; std::unique_ptr<spu_recompiler_base> spu_recompiler_base::make_asmjit_recompiler() { return std::make_unique<spu_recompiler>(); } spu_recompiler::spu_recompiler() { } void spu_recompiler::init() { // Initialize if necessary if (!m_spurt) { m_spurt = &g_fxo->get<spu_runtime>(); } } spu_function_t spu_recompiler::compile(spu_program&& _func) { const u32 start0 = _func.entry_point; const auto add_loc = m_spurt->add_empty(std::move(_func)); if (!add_loc) { return nullptr; } if (add_loc->compiled) { return add_loc->compiled; } const spu_program& func = add_loc->data; if (func.entry_point != start0) { // Wait for the duplicate while (!add_loc->compiled) { add_loc->compiled.wait(nullptr); } return add_loc->compiled; } if (auto& cache = g_fxo->get<spu_cache>(); cache && g_cfg.core.spu_cache && !add_loc->cached.exchange(1)) { cache.add(func); } { sha1_context ctx; u8 output[20]; sha1_starts(&ctx); sha1_update(&ctx, reinterpret_cast<const u8*>(func.data.data()), func.data.size() * 4); sha1_finish(&ctx, output); be_t<u64> hash_start; std::memcpy(&hash_start, output, sizeof(hash_start)); m_hash_start = hash_start; } using namespace asmjit; StringLogger logger; logger.addFlags(FormatFlags::kMachineCode); std::string log; CodeHolder code; code.init(m_asmrt.environment()); x86::Assembler compiler(&code); compiler.addEncodingOptions(EncodingOptions::kOptimizedAlign); this->c = &compiler; if (g_cfg.core.spu_debug && !add_loc->logged.exchange(1)) { // Dump analyser data this->dump(func, log); fs::write_file(m_spurt->get_cache_path() + "spu.log", fs::write + fs::append, log); // Set logger code.setLogger(&logger); } // Initialize args this->cpu = &x86::r13; this->ls = &x86::rbp; this->rip = &x86::r12; this->pc0 = &x86::r15; this->addr = &x86::eax; #ifdef _WIN32 this->arg0 = &x86::rcx; this->arg1 = &x86::rdx; this->qw0 = &x86::r8; this->qw1 = &x86::r9; #else this->arg0 = &x86::rdi; this->arg1 = &x86::rsi; this->qw0 = &x86::rdx; this->qw1 = &x86::rcx; #endif const std::array<const x86::Xmm*, 16> vec_vars { &x86::xmm0, &x86::xmm1, &x86::xmm2, &x86::xmm3, &x86::xmm4, &x86::xmm5, &x86::xmm6, &x86::xmm7, &x86::xmm8, &x86::xmm9, &x86::xmm10, &x86::xmm11, &x86::xmm12, &x86::xmm13, &x86::xmm14, &x86::xmm15, }; for (u32 i = 0; i < vec_vars.size(); i++) { vec[i] = vec_vars[i]; } label_stop = c->newLabel(); Label label_diff = c->newLabel(); Label label_code = c->newLabel(); std::vector<u32> words; u32 words_align = 8; // Start compilation m_pos = func.lower_bound; m_base = func.entry_point; m_size = ::size32(func.data) * 4; const u32 start = m_pos; const u32 end = start + m_size; // Create block labels for (u32 i = 0; i < func.data.size(); i++) { if (func.data[i] && m_block_info[i + start / 4]) { instr_labels[i * 4 + start] = c->newLabel(); } } // Load actual PC and check status c->sub(x86::rsp, 0x28); c->mov(pc0->r32(), SPU_OFF_32(pc)); c->cmp(SPU_OFF_32(state), 0); c->jnz(label_stop); if (g_cfg.core.spu_prof && g_cfg.core.spu_verification) { c->mov(x86::rax, m_hash_start & -0xffff); c->mov(SPU_OFF_64(block_hash), x86::rax); } if (utils::has_avx()) { // How to check dirty AVX state //c->pxor(x86::xmm0, x86::xmm0); //c->vptest(x86::ymm0, x86::ymm0); //c->jnz(label_stop); } // Get bit mask of valid code words for a given range (up to 128 bytes) auto get_code_mask = [&](u32 starta, u32 enda) -> u32 { u32 result = 0; for (u32 addr = starta, m = 1; addr < enda && m; addr += 4, m <<= 1) { // Filter out if out of range, or is a hole if (addr >= start && addr < end && func.data[(addr - start) / 4]) { result |= m; } } return result; }; // Check code u32 starta = start; // Skip holes at the beginning (giga only) for (u32 j = start; j < end; j += 4) { if (!func.data[(j - start) / 4]) { starta += 4; } else { break; } } auto get_pc_ptr = [&]() { // Get data start address if (starta != m_base) { c->lea(x86::rax, get_pc(starta)); c->and_(x86::eax, 0x3fffc); return x86::qword_ptr(*ls, x86::rax); } else { return x86::qword_ptr(*ls, *pc0); } }; if (!g_cfg.core.spu_verification) { // Disable check (unsafe) if (utils::has_avx()) { c->vzeroupper(); } } else if (m_size == 8) { c->mov(x86::rax, static_cast<u64>(func.data[1]) << 32 | func.data[0]); c->cmp(x86::rax, x86::qword_ptr(*ls, *pc0)); c->jnz(label_diff); if (utils::has_avx()) { c->vzeroupper(); } } else if (m_size == 4) { c->cmp(x86::dword_ptr(*ls, *pc0), func.data[0]); c->jnz(label_diff); if (utils::has_avx()) { c->vzeroupper(); } } else if (utils::has_avx512() && false) { // AVX-512 optimized check using 512-bit registers (disabled) words_align = 64; const u32 starta = start & -64; const u32 enda = utils::align(end, 64); const u32 sizea = (enda - starta) / 64; ensure(sizea); // Initialize pointers c->lea(x86::rax, x86::qword_ptr(label_code)); u32 code_off = 0; u32 ls_off = -8192; for (u32 j = starta; j < enda; j += 64) { const u32 cmask = get_code_mask(j, j + 64); if (cmask == 0) [[unlikely]] { continue; } const bool first = ls_off == u32{} - 8192; // Ensure small distance for disp8*N if (j - ls_off >= 8192) { c->lea(*qw1, x86::qword_ptr(*ls, j)); ls_off = j; } if (code_off >= 8192) { c->lea(x86::rax, x86::qword_ptr(x86::rax, 8192)); code_off -= 8192; } if (cmask != 0xffff) { // Generate k-mask for the block Label label = c->newLabel(); c->kmovw(x86::k7, x86::word_ptr(label)); consts.emplace_back([=, this] { c->bind(label); c->dq(cmask); }); c->setExtraReg(x86::k7); c->z().vmovdqa32(x86::zmm0, x86::zmmword_ptr(*qw1, j - ls_off)); } else { c->vmovdqa32(x86::zmm0, x86::zmmword_ptr(*qw1, j - ls_off)); } if (first) { c->vpcmpud(x86::k1, x86::zmm0, x86::zmmword_ptr(x86::rax, code_off), 4); } else { c->vpcmpud(x86::k3, x86::zmm0, x86::zmmword_ptr(x86::rax, code_off), 4); c->korw(x86::k1, x86::k3, x86::k1); } for (u32 i = j; i < j + 64; i += 4) { words.push_back(i >= start && i < end ? func.data[(i - start) / 4] : 0); } code_off += 64; } c->ktestw(x86::k1, x86::k1); c->jnz(label_diff); c->vzeroupper(); } else if (0 && utils::has_avx512()) { // AVX-512 optimized check using 256-bit registers words_align = 32; const u32 starta = start & -32; const u32 enda = utils::align(end, 32); const u32 sizea = (enda - starta) / 32; ensure(sizea); if (sizea == 1) { const u32 cmask = get_code_mask(starta, enda); if (cmask == 0xff) { c->vmovdqa(x86::ymm0, x86::ymmword_ptr(*ls, starta)); } else { c->vpxor(x86::ymm0, x86::ymm0, x86::ymm0); c->vpblendd(x86::ymm0, x86::ymm0, x86::ymmword_ptr(*ls, starta), cmask); } c->vpxor(x86::ymm0, x86::ymm0, x86::ymmword_ptr(label_code)); c->vptest(x86::ymm0, x86::ymm0); c->jnz(label_diff); for (u32 i = starta; i < enda; i += 4) { words.push_back(i >= start && i < end ? func.data[(i - start) / 4] : 0); } } else if (sizea == 2 && (end - start) <= 32) { const u32 cmask0 = get_code_mask(starta, starta + 32); const u32 cmask1 = get_code_mask(starta + 32, enda); c->vpxor(x86::ymm0, x86::ymm0, x86::ymm0); c->vpblendd(x86::ymm0, x86::ymm0, x86::ymmword_ptr(*ls, starta), cmask0); c->vpblendd(x86::ymm0, x86::ymm0, x86::ymmword_ptr(*ls, starta + 32), cmask1); c->vpxor(x86::ymm0, x86::ymm0, x86::ymmword_ptr(label_code)); c->vptest(x86::ymm0, x86::ymm0); c->jnz(label_diff); for (u32 i = starta; i < starta + 32; i += 4) { words.push_back(i >= start ? func.data[(i - start) / 4] : i + 32 < end ? func.data[(i + 32 - start) / 4] : 0); } } else { bool xmm2z = false; // Initialize pointers c->lea(x86::rax, x86::qword_ptr(label_code)); u32 code_off = 0; u32 ls_off = -4096; for (u32 j = starta; j < enda; j += 32) { const u32 cmask = get_code_mask(j, j + 32); if (cmask == 0) [[unlikely]] { continue; } const bool first = ls_off == u32{0} - 4096; // Ensure small distance for disp8*N if (j - ls_off >= 4096) { c->lea(*qw1, x86::qword_ptr(*ls, j)); ls_off = j; } if (code_off >= 4096) { c->lea(x86::rax, x86::qword_ptr(x86::rax, 4096)); code_off -= 4096; } if (cmask != 0xff) { if (!xmm2z) { c->vpxor(x86::xmm2, x86::xmm2, x86::xmm2); xmm2z = true; } c->vpblendd(x86::ymm1, x86::ymm2, x86::ymmword_ptr(*qw1, j - ls_off), cmask); } else { c->vmovdqa32(x86::ymm1, x86::ymmword_ptr(*qw1, j - ls_off)); } // Perform bitwise comparison and accumulate if (first) { c->vpxor(x86::ymm0, x86::ymm1, x86::ymmword_ptr(x86::rax, code_off)); } else { c->vpternlogd(x86::ymm0, x86::ymm1, x86::ymmword_ptr(x86::rax, code_off), 0xf6 /* orAxorBC */); } for (u32 i = j; i < j + 32; i += 4) { words.push_back(i >= start && i < end ? func.data[(i - start) / 4] : 0); } code_off += 32; } c->vptest(x86::ymm0, x86::ymm0); c->jnz(label_diff); } c->vzeroupper(); } else if (0 && utils::has_avx()) { // Mainstream AVX words_align = 32; const u32 starta = start & -32; const u32 enda = utils::align(end, 32); const u32 sizea = (enda - starta) / 32; ensure(sizea); if (sizea == 1) { const u32 cmask = get_code_mask(starta, enda); if (cmask == 0xff) { c->vmovaps(x86::ymm0, x86::ymmword_ptr(*ls, starta)); } else { c->vxorps(x86::ymm0, x86::ymm0, x86::ymm0); c->vblendps(x86::ymm0, x86::ymm0, x86::ymmword_ptr(*ls, starta), cmask); } c->vxorps(x86::ymm0, x86::ymm0, x86::ymmword_ptr(label_code)); c->vptest(x86::ymm0, x86::ymm0); c->jnz(label_diff); for (u32 i = starta; i < enda; i += 4) { words.push_back(i >= start && i < end ? func.data[(i - start) / 4] : 0); } } else if (sizea == 2 && (end - start) <= 32) { const u32 cmask0 = get_code_mask(starta, starta + 32); const u32 cmask1 = get_code_mask(starta + 32, enda); c->vxorps(x86::ymm0, x86::ymm0, x86::ymm0); c->vblendps(x86::ymm0, x86::ymm0, x86::ymmword_ptr(*ls, starta), cmask0); c->vblendps(x86::ymm0, x86::ymm0, x86::ymmword_ptr(*ls, starta + 32), cmask1); c->vxorps(x86::ymm0, x86::ymm0, x86::ymmword_ptr(label_code)); c->vptest(x86::ymm0, x86::ymm0); c->jnz(label_diff); for (u32 i = starta; i < starta + 32; i += 4) { words.push_back(i >= start ? func.data[(i - start) / 4] : i + 32 < end ? func.data[(i + 32 - start) / 4] : 0); } } else { bool xmm2z = false; // Initialize pointers c->add(*ls, starta); c->lea(x86::rax, x86::qword_ptr(label_code)); u32 code_off = 0; u32 ls_off = starta; u32 order0 = 0; u32 order1 = 0; for (u32 j = starta; j < enda; j += 32) { const u32 cmask = get_code_mask(j, j + 32); if (cmask == 0) [[unlikely]] { continue; } // Interleave two threads auto& order = order0 > order1 ? order1 : order0; const auto& reg0 = order0 > order1 ? x86::ymm3 : x86::ymm0; const auto& reg1 = order0 > order1 ? x86::ymm4 : x86::ymm1; // Ensure small distance for disp8 if (j - ls_off >= 256) { c->add(*ls, j - ls_off); ls_off = j; } else if (j - ls_off >= 128) { c->sub(*ls, -128); ls_off += 128; } if (code_off >= 128) { c->sub(x86::rax, -128); code_off -= 128; } if (cmask != 0xff) { if (!xmm2z) { c->vxorps(x86::xmm2, x86::xmm2, x86::xmm2); xmm2z = true; } c->vblendps(reg1, x86::ymm2, x86::ymmword_ptr(*ls, j - ls_off), cmask); } else { c->vmovaps(reg1, x86::ymmword_ptr(*ls, j - ls_off)); } // Perform bitwise comparison and accumulate if (!order++) { c->vxorps(reg0, reg1, x86::ymmword_ptr(x86::rax, code_off)); } else { c->vxorps(reg1, reg1, x86::ymmword_ptr(x86::rax, code_off)); c->vorps(reg0, reg1, reg0); } for (u32 i = j; i < j + 32; i += 4) { words.push_back(i >= start && i < end ? func.data[(i - start) / 4] : 0); } code_off += 32; } c->sub(*ls, ls_off); if (order1) { c->vorps(x86::ymm0, x86::ymm3, x86::ymm0); } c->vptest(x86::ymm0, x86::ymm0); c->jnz(label_diff); } c->vzeroupper(); } else { if (utils::has_avx()) { c->vzeroupper(); } // Compatible SSE2 words_align = 16; // Initialize pointers c->lea(x86::rcx, get_pc_ptr()); c->lea(x86::rax, x86::qword_ptr(label_code)); u32 code_off = 0; u32 ls_off = starta; u32 order0 = 0; u32 order1 = 0; for (u32 j = starta; j < end; j += 16) { const u32 cmask = get_code_mask(j, j + 16); if (cmask == 0) [[unlikely]] { continue; } // Interleave two threads auto& order = order0 > order1 ? order1 : order0; const auto& reg0 = order0 > order1 ? x86::xmm3 : x86::xmm0; const auto& reg1 = order0 > order1 ? x86::xmm4 : x86::xmm1; // Ensure small distance for disp8 if (j - ls_off >= 256) { c->add(x86::rcx, j - ls_off); ls_off = j; } else if (j - ls_off >= 128) { c->sub(x86::rcx, -128); ls_off += 128; } if (code_off >= 128) { c->sub(x86::rax, -128); code_off -= 128; } // Determine which value will be duplicated at hole positions const u32 w3 = ::at32(func.data, (j - start + ~static_cast<u32>(std::countl_zero(cmask)) % 4 * 4) / 4); words.push_back(cmask & 1 ? func.data[(j - start + 0) / 4] : w3); words.push_back(cmask & 2 ? func.data[(j - start + 4) / 4] : w3); words.push_back(cmask & 4 ? func.data[(j - start + 8) / 4] : w3); words.push_back(w3); // PSHUFD immediate table for all possible hole mask values, holes repeat highest valid word static constexpr s32 s_pshufd_imm[16] { -1, // invalid index 0b00000000, // copy 0 0b01010101, // copy 1 0b01010100, // copy 1 0b10101010, // copy 2 0b10101000, // copy 2 0b10100110, // copy 2 0b10100100, // copy 2 0b11111111, // copy 3 0b11111100, // copy 3 0b11110111, // copy 3 0b11110100, // copy 3 0b11101111, // copy 3 0b11101100, // copy 3 0b11100111, // copy 3 0b11100100, // full }; const bool first = !order++; const auto& dest = first ? reg0 : reg1; // Load unaligned code block from LS if (cmask != 0xf) { if (utils::has_avx()) { c->vpshufd(dest, x86::dqword_ptr(x86::rcx, j - ls_off), s_pshufd_imm[cmask]); } else { c->movups(dest, x86::dqword_ptr(x86::rcx, j - ls_off)); c->pshufd(dest, dest, s_pshufd_imm[cmask]); } } else { c->movups(dest, x86::dqword_ptr(x86::rcx, j - ls_off)); } // Perform bitwise comparison and accumulate c->xorps(dest, x86::dqword_ptr(x86::rax, code_off)); if (!first) { c->orps(reg0, dest); } code_off += 16; } if (order1) { c->orps(x86::xmm0, x86::xmm3); } if (utils::has_sse41()) { c->ptest(x86::xmm0, x86::xmm0); c->jnz(label_diff); } else { c->packssdw(x86::xmm0, x86::xmm0); c->movq(x86::rax, x86::xmm0); c->test(x86::rax, x86::rax); c->jne(label_diff); } } // Acknowledge success and add statistics c->add(SPU_OFF_64(block_counter), ::size32(words) / (words_align / 4)); // Set block hash for profiling (if enabled) if (g_cfg.core.spu_prof) { c->mov(x86::rax, m_hash_start | 0xffff); c->mov(SPU_OFF_64(block_hash), x86::rax); } if (m_pos != start) { // Jump to the entry point if necessary c->jmp(instr_labels[m_pos]); m_pos = -1; } for (u32 i = 0; i < func.data.size(); i++) { const u32 pos = start + i * 4; const u32 op = std::bit_cast<be_t<u32>>(func.data[i]); if (!op) { // Ignore hole if (m_pos + 1) { spu_log.error("Unexpected fallthrough to 0x%x", pos); branch_fixed(spu_branch_target(pos)); m_pos = -1; } continue; } // Update position m_pos = pos; // Bind instruction label if necessary const auto found = instr_labels.find(pos); if (found != instr_labels.end()) { if (m_preds.count(pos)) { c->align(AlignMode::kCode, 16); } c->bind(found->second); } if (g_cfg.core.spu_debug) { // Write the instruction address inside the ASMJIT log compiler.comment(fmt::format("[0x%05x]", m_pos).c_str()); } // Tracing //c->lea(x86::r14, get_pc(m_pos)); // Execute recompiler function (this->*s_spu_decoder.decode(op))({op}); // Collect allocated xmm vars for (u32 i = 0; i < vec_vars.size(); i++) { vec[i] = vec_vars[i]; } } // Make fallthrough if necessary if (m_pos + 1) { branch_fixed(spu_branch_target(end)); } // Simply return c->align(AlignMode::kCode, 16); c->bind(label_stop); c->add(x86::rsp, 0x28); c->ret(); if (g_cfg.core.spu_verification) { // Dispatch c->align(AlignMode::kCode, 16); c->bind(label_diff); c->inc(SPU_OFF_64(block_failure)); c->add(x86::rsp, 0x28); c->jmp(spu_runtime::tr_dispatch); } for (auto&& work : ::as_rvalue(std::move(after))) { work(); } // Build instruction dispatch table if (instr_table.isValid()) { c->align(AlignMode::kData, 8); c->bind(instr_table); // Get actual instruction table bounds const u32 start = instr_labels.begin()->first; const u32 end = instr_labels.rbegin()->first + 4; for (u32 addr = start; addr < end; addr += 4) { const auto found = instr_labels.find(addr); if (found != instr_labels.end()) { c->embedLabel(found->second); } else { c->embedLabel(label_stop); } } } c->align(AlignMode::kData, words_align); c->bind(label_code); for (u32 d : words) c->dd(d); for (auto&& work : ::as_rvalue(std::move(consts))) { work(); } label_stop.reset(); instr_table.reset(); instr_labels.clear(); xmm_consts.clear(); // Compile and get function address spu_function_t fn = reinterpret_cast<spu_function_t>(m_asmrt._add(&code)); if (!fn) { spu_log.fatal("Failed to build a function"); } else { jit_announce(fn, code.codeSize(), fmt::format("spu-b-%s", fmt::base57(be_t<u64>(m_hash_start)))); } // Install compiled function pointer const bool added = !add_loc->compiled && add_loc->compiled.compare_and_swap_test(nullptr, fn); // Rebuild trampoline if necessary if (!m_spurt->rebuild_ubertrampoline(func.data[0])) { return nullptr; } if (added) { add_loc->compiled.notify_all(); } if (g_cfg.core.spu_debug && added) { // Add ASMJIT logs fmt::append(log, "Address: %p\n\n", fn); log.append(logger._content.data(), logger._content.size()); log += "\n\n\n"; // Append log file fs::write_file(m_spurt->get_cache_path() + "spu-ir.log", fs::write + fs::append, log); } return fn; } spu_recompiler::XmmLink spu_recompiler::XmmAlloc() // get empty xmm register { for (auto& v : vec) { if (v) return{ v }; } fmt::throw_exception("Out of Xmm Vars"); } spu_recompiler::XmmLink spu_recompiler::XmmGet(s8 reg, XmmType type) // get xmm register with specific SPU reg { XmmLink result = XmmAlloc(); switch (type) { case XmmType::Int: c->movdqa(result, SPU_OFF_128(gpr, reg)); break; case XmmType::Float: c->movaps(result, SPU_OFF_128(gpr, reg)); break; case XmmType::Double: c->movapd(result, SPU_OFF_128(gpr, reg)); break; default: fmt::throw_exception("Invalid XmmType"); } return result; } inline asmjit::x86::Mem spu_recompiler::XmmConst(const v128& data) { // Find existing const auto& xmm_label = xmm_consts[std::make_pair(data._u64[0], data._u64[1])]; if (!xmm_label.isValid()) { xmm_label = c->newLabel(); consts.emplace_back([=, this] { c->align(asmjit::AlignMode::kData, 16); c->bind(xmm_label); c->dq(data._u64[0]); c->dq(data._u64[1]); }); } return asmjit::x86::oword_ptr(xmm_label); } inline asmjit::x86::Mem spu_recompiler::get_pc(u32 addr) { return asmjit::x86::qword_ptr(*pc0, addr - m_base); } static void check_state(spu_thread* _spu) { if (_spu->state && _spu->check_state()) { spu_runtime::g_escape(_spu); } } void spu_recompiler::branch_fixed(u32 target, bool absolute) { using namespace asmjit; // Check local branch const auto local = instr_labels.find(target); if (local != instr_labels.end() && local->second.isValid()) { Label fail; if (absolute) { fail = c->newLabel(); c->cmp(pc0->r32(), m_base); c->jne(fail); } c->cmp(SPU_OFF_32(state), 0); c->jz(local->second); c->lea(addr->r64(), get_pc(target)); c->and_(*addr, 0x3fffc); c->mov(SPU_OFF_32(pc), *addr); c->mov(*arg0, *cpu); c->call(&check_state); c->jmp(local->second); if (absolute) { c->bind(fail); } else { return; } } const auto ppptr = !g_cfg.core.spu_verification ? nullptr : m_spurt->make_branch_patchpoint(); if (absolute) { c->mov(SPU_OFF_32(pc), target); } else { c->lea(addr->r64(), get_pc(target)); c->and_(*addr, 0x3fffc); c->mov(SPU_OFF_32(pc), *addr); } c->xor_(rip->r32(), rip->r32()); c->cmp(SPU_OFF_32(state), 0); c->jnz(label_stop); if (ppptr) { c->add(x86::rsp, 0x28); c->jmp(ppptr); } else { c->jmp(label_stop); } } void spu_recompiler::branch_indirect(spu_opcode_t op, bool jt, bool ret) { using namespace asmjit; // Initialize third arg to zero c->xor_(rip->r32(), rip->r32()); if (op.d) { c->mov(SPU_OFF_8(interrupts_enabled), 0); } else if (op.e) { auto _throw = [](spu_thread* _spu) { _spu->state += cpu_flag::dbg_pause; spu_log.fatal("SPU Interrupts not implemented (mask=0x%x)", +_spu->ch_events.load().mask); spu_runtime::g_escape(_spu); }; Label no_intr = c->newLabel(); Label intr = c->newLabel(); Label fail = c->newLabel(); c->mov(*qw1, SPU_OFF_64(ch_events)); c->ror(*qw1, 32); c->test(qw1->r32(), ~SPU_EVENT_INTR_IMPLEMENTED); c->ror(*qw1, 32); c->jnz(fail); c->mov(SPU_OFF_8(interrupts_enabled), 1); c->bt(qw1->r32(), 31); c->jc(intr); c->jmp(no_intr); c->bind(fail); c->mov(SPU_OFF_32(pc), *addr); c->mov(*arg0, *cpu); c->add(x86::rsp, 0x28); c->jmp(+_throw); // Save addr in srr0 and disable interrupts c->bind(intr); c->mov(SPU_OFF_8(interrupts_enabled), 0); c->mov(SPU_OFF_32(srr0), *addr); // Test for BR/BRA instructions (they are equivalent at zero pc) c->mov(*addr, x86::dword_ptr(*ls)); c->and_(*addr, 0xfffffffd); c->xor_(*addr, 0x30); c->bswap(*addr); c->test(*addr, 0xff80007f); c->cmovnz(*addr, rip->r32()); c->shr(*addr, 5); c->align(AlignMode::kCode, 16); c->bind(no_intr); } c->mov(SPU_OFF_32(pc), *addr); c->cmp(SPU_OFF_32(state), 0); c->jnz(label_stop); if (g_cfg.core.spu_block_size != spu_block_size_type::safe && ret) { // Get stack pointer, try to use native return address (check SPU return address) Label fail = c->newLabel(); c->mov(qw1->r32(), SPU_OFF_32(gpr, 1, &v128::_u32, 3)); c->and_(qw1->r32(), 0x3fff0); c->lea(*qw1, x86::qword_ptr(*cpu, *qw1, 0, ::offset32(&spu_thread::stack_mirror))); c->cmp(x86::dword_ptr(*qw1, 8), *addr); c->jne(fail); c->mov(pc0->r32(), x86::dword_ptr(*qw1, 12)); c->jmp(x86::qword_ptr(*qw1)); c->bind(fail); } if (jt || g_cfg.core.spu_block_size == spu_block_size_type::giga) { if (!instr_table.isValid()) { // Request instruction table instr_table = c->newLabel(); } // Get actual instruction table bounds const u32 start = instr_labels.begin()->first; const u32 end = instr_labels.rbegin()->first + 4; // Load local indirect jump address, check local bounds ensure(start == m_base); Label fail = c->newLabel(); c->mov(qw1->r32(), *addr); c->sub(qw1->r32(), pc0->r32()); c->cmp(qw1->r32(), end - start); c->jae(fail); c->lea(addr->r64(), x86::qword_ptr(instr_table)); c->jmp(x86::qword_ptr(addr->r64(), *qw1, 1, 0)); c->bind(fail); } // Simply external call (return or indirect call) const auto ppptr = !g_cfg.core.spu_verification ? nullptr : m_spurt->make_branch_patchpoint(); if (ppptr) { c->add(x86::rsp, 0x28); c->jmp(ppptr); } else { c->jmp(label_stop); } } void spu_recompiler::branch_set_link(u32 target) { using namespace asmjit; if (g_cfg.core.spu_block_size != spu_block_size_type::safe) { // Find instruction at target const auto local = instr_labels.find(target); if (local != instr_labels.end() && local->second.isValid()) { Label ret = c->newLabel(); // Get stack pointer, write native and SPU return addresses into the stack mirror c->mov(qw1->r32(), SPU_OFF_32(gpr, 1, &v128::_u32, 3)); c->and_(qw1->r32(), 0x3fff0); c->lea(*qw1, x86::qword_ptr(*cpu, *qw1, 0, ::offset32(&spu_thread::stack_mirror))); c->lea(x86::r10, x86::qword_ptr(ret)); c->mov(x86::qword_ptr(*qw1, 0), x86::r10); c->lea(x86::r10, get_pc(target)); c->and_(x86::r10d, 0x3fffc); c->mov(x86::dword_ptr(*qw1, 8), x86::r10d); c->mov(x86::dword_ptr(*qw1, 12), pc0->r32()); after.emplace_back([=, this, target = local->second] { // Clear return info after use c->align(AlignMode::kCode, 16); c->bind(ret); c->mov(qw1->r32(), SPU_OFF_32(gpr, 1, &v128::_u32, 3)); c->and_(qw1->r32(), 0x3fff0); c->pcmpeqd(x86::xmm0, x86::xmm0); c->movdqa(x86::dqword_ptr(*cpu, *qw1, 0, ::offset32(&spu_thread::stack_mirror)), x86::xmm0); // Set block hash for profiling (if enabled) if (g_cfg.core.spu_prof) { c->mov(x86::rax, m_hash_start | 0xffff); c->mov(SPU_OFF_64(block_hash), x86::rax); } c->jmp(target); }); } } } void spu_recompiler::fall(spu_opcode_t op) { auto gate = [](spu_thread* _spu, u32 opcode, spu_intrp_func_t _func) { if (!_func(*_spu, {opcode})) { _spu->state += cpu_flag::dbg_pause; spu_log.fatal("spu_recompiler::fall(): unexpected interpreter call (op=0x%08x)", opcode); spu_runtime::g_escape(_spu); } }; c->lea(addr->r64(), get_pc(m_pos)); c->and_(*addr, 0x3fffc); c->mov(SPU_OFF_32(pc), *addr); c->mov(arg1->r32(), op.opcode); c->mov(*qw0, g_fxo->get<spu_interpreter_rt>().decode(op.opcode)); c->mov(*arg0, *cpu); c->call(+gate); } void spu_recompiler::UNK(spu_opcode_t op) { auto gate = [](spu_thread* _spu, u32 op) { _spu->state += cpu_flag::dbg_pause; spu_log.fatal("Unknown/Illegal instruction (0x%08x)", op); spu_runtime::g_escape(_spu); }; c->lea(addr->r64(), get_pc(m_pos)); c->and_(*addr, 0x3fffc); c->mov(SPU_OFF_32(pc), *addr); c->mov(arg1->r32(), op.opcode); c->mov(*arg0, *cpu); c->add(asmjit::x86::rsp, 0x28); c->jmp(+gate); m_pos = -1; } void spu_stop(spu_thread* _spu, u32 code) { if (!_spu->stop_and_signal(code) || _spu->state & cpu_flag::again) { spu_runtime::g_escape(_spu); } if (_spu->test_stopped()) { _spu->pc += 4; spu_runtime::g_escape(_spu); } } void spu_recompiler::STOP(spu_opcode_t op) { using namespace asmjit; Label ret = c->newLabel(); c->lea(addr->r64(), get_pc(m_pos)); c->and_(*addr, 0x3fffc); c->mov(SPU_OFF_32(pc), *addr); c->mov(arg1->r32(), op.opcode & 0x3fff); c->mov(*arg0, *cpu); c->call(spu_stop); c->align(AlignMode::kCode, 16); c->bind(ret); c->add(SPU_OFF_32(pc), 4); if (g_cfg.core.spu_block_size == spu_block_size_type::safe) { c->jmp(label_stop); m_pos = -1; } } void spu_recompiler::LNOP(spu_opcode_t) { } void spu_recompiler::SYNC(spu_opcode_t) { // This instruction must be used following a store instruction that modifies the instruction stream. c->lock().or_(asmjit::x86::dword_ptr(asmjit::x86::rsp), 0); if (g_cfg.core.spu_block_size == spu_block_size_type::safe) { c->lea(addr->r64(), get_pc(m_pos + 4)); c->and_(*addr, 0x3fffc); c->mov(SPU_OFF_32(pc), *addr); c->jmp(label_stop); m_pos = -1; } } void spu_recompiler::DSYNC(spu_opcode_t) { // This instruction forces all earlier load, store, and channel instructions to complete before proceeding. c->lock().or_(asmjit::x86::dword_ptr(asmjit::x86::rsp), 0); } void spu_recompiler::MFSPR(spu_opcode_t op) { // Check SPUInterpreter for notes. const XmmLink& vr = XmmAlloc(); c->pxor(vr, vr); c->movdqa(SPU_OFF_128(gpr, op.rt), vr); } static u32 spu_rdch(spu_thread* _spu, u32 ch) { const s64 result = _spu->get_ch_value(ch); if (result < 0 || _spu->state & cpu_flag::again) { spu_runtime::g_escape(_spu); } if (_spu->test_stopped()) { _spu->pc += 4; spu_runtime::g_escape(_spu); } return static_cast<u32>(result & 0xffffffff); } void spu_recompiler::RDCH(spu_opcode_t op) { using namespace asmjit; auto read_channel = [&](x86::Mem channel_ptr, bool sync = true) { Label wait = c->newLabel(); Label again = c->newLabel(); Label ret = c->newLabel(); c->mov(addr->r64(), channel_ptr); c->xor_(qw0->r32(), qw0->r32()); c->align(AlignMode::kCode, 16); c->bind(again); c->bt(addr->r64(), spu_channel::off_count); c->jnc(wait); after.emplace_back([=, this, pos = m_pos] { c->bind(wait); c->lea(addr->r64(), get_pc(pos)); c->and_(*addr, 0x3fffc); c->mov(SPU_OFF_32(pc), *addr); c->mov(arg1->r32(), +op.ra); c->mov(*arg0, *cpu); c->call(spu_rdch); c->jmp(ret); }); if (sync) { // Channel is externally accessible c->lock().cmpxchg(channel_ptr, *qw0); c->jnz(again); } else { // Just write zero c->mov(channel_ptr, *qw0); } c->bind(ret); c->movd(x86::xmm0, *addr); c->pslldq(x86::xmm0, 12); c->movdqa(SPU_OFF_128(gpr, op.rt), x86::xmm0); }; switch (op.ra) { case SPU_RdSRR0: { const XmmLink& vr = XmmAlloc(); c->movd(vr, SPU_OFF_32(srr0)); c->pslldq(vr, 12); c->movdqa(SPU_OFF_128(gpr, op.rt), vr); return; } case SPU_RdInMbox: { // TODO break; } case MFC_RdTagStat: { read_channel(SPU_OFF_64(ch_tag_stat), false); return; } case MFC_RdTagMask: { const XmmLink& vr = XmmAlloc(); c->movd(vr, SPU_OFF_32(ch_tag_mask)); c->pslldq(vr, 12); c->movdqa(SPU_OFF_128(gpr, op.rt), vr); return; } case SPU_RdSigNotify1: { read_channel(SPU_OFF_64(ch_snr1)); return; } case SPU_RdSigNotify2: { read_channel(SPU_OFF_64(ch_snr2)); return; } case MFC_RdAtomicStat: { read_channel(SPU_OFF_64(ch_atomic_stat), false); return; } case MFC_RdListStallStat: { read_channel(SPU_OFF_64(ch_stall_stat), false); return; } case SPU_RdDec: { spu_log.warning("[0x%x] RDCH: RdDec", m_pos); auto sub1 = [](spu_thread* _spu, v128* _res) { const u32 out = _spu->ch_dec_value - static_cast<u32>(get_timebased_time() - _spu->ch_dec_start_timestamp); if (out > 1500) { _spu->state += cpu_flag::wait; std::this_thread::yield(); if (_spu->test_stopped()) { _spu->pc += 4; spu_runtime::g_escape(_spu); } } *_res = v128::from32r(out); }; auto sub2 = [](spu_thread* _spu, v128* _res) { const u32 out = _spu->read_dec().first; *_res = v128::from32r(out); }; c->lea(addr->r64(), get_pc(m_pos)); c->and_(*addr, 0x3fffc); c->mov(SPU_OFF_32(pc), *addr); c->lea(*arg1, SPU_OFF_128(gpr, op.rt)); c->mov(*arg0, *cpu); c->call(g_cfg.core.spu_loop_detection ? +sub1 : +sub2); return; } case SPU_RdEventMask: { const XmmLink& vr = XmmAlloc(); c->movq(vr, SPU_OFF_64(ch_events)); c->psrldq(vr, 4); c->pslldq(vr, 12); c->movdqa(SPU_OFF_128(gpr, op.rt), vr); return; } case SPU_RdEventStat: { spu_log.warning("[0x%x] RDCH: RdEventStat", m_pos); break; // TODO } case SPU_RdMachStat: { const XmmLink& vr = XmmAlloc(); c->movzx(*addr, SPU_OFF_8(interrupts_enabled)); c->mov(arg1->r32(), SPU_OFF_32(thread_type)); c->and_(arg1->r32(), 2); c->or_(addr->r32(), arg1->r32()); c->movd(vr, *addr); c->pslldq(vr, 12); c->movdqa(SPU_OFF_128(gpr, op.rt), vr); return; } default: break; } c->lea(addr->r64(), get_pc(m_pos)); c->and_(*addr, 0x3fffc); c->mov(SPU_OFF_32(pc), *addr); c->mov(arg1->r32(), +op.ra); c->mov(*arg0, *cpu); c->call(spu_rdch); c->movd(x86::xmm0, *addr); c->pslldq(x86::xmm0, 12); c->movdqa(SPU_OFF_128(gpr, op.rt), x86::xmm0); } static u32 spu_rchcnt(spu_thread* _spu, u32 ch) { return _spu->get_ch_count(ch); } void spu_recompiler::RCHCNT(spu_opcode_t op) { using namespace asmjit; auto ch_cnt = [&](x86::Mem channel_ptr, bool inv = false) { // Load channel count const XmmLink& vr = XmmAlloc(); c->movq(vr, channel_ptr); c->psrlq(vr, spu_channel::off_count); if (inv) c->pxor(vr, XmmConst(v128::from32p(1))); c->pslldq(vr, 12); c->movdqa(SPU_OFF_128(gpr, op.rt), vr); }; switch (op.ra) { case SPU_WrOutMbox: return ch_cnt(SPU_OFF_64(ch_out_mbox), true); case SPU_WrOutIntrMbox: return ch_cnt(SPU_OFF_64(ch_out_intr_mbox), true); case MFC_RdTagStat: return ch_cnt(SPU_OFF_64(ch_tag_stat)); case MFC_RdListStallStat: return ch_cnt(SPU_OFF_64(ch_stall_stat)); case SPU_RdSigNotify1: return ch_cnt(SPU_OFF_64(ch_snr1)); case SPU_RdSigNotify2: return ch_cnt(SPU_OFF_64(ch_snr2)); case MFC_RdAtomicStat: return ch_cnt(SPU_OFF_64(ch_atomic_stat)); case MFC_WrTagUpdate: { const XmmLink& vr = XmmAlloc(); c->mov(addr->r32(), 1); c->movd(vr, addr->r32()); c->pslldq(vr, 12); c->movdqa(SPU_OFF_128(gpr, op.rt), vr); return; } case MFC_Cmd: { const XmmLink& vr = XmmAlloc(); const XmmLink& v1 = XmmAlloc(); c->movdqa(vr, XmmConst(v128::from32p(16))); c->movd(v1, SPU_OFF_32(mfc_size)); c->psubd(vr, v1); c->pslldq(vr, 12); c->movdqa(SPU_OFF_128(gpr, op.rt), vr); return; } case SPU_RdInMbox: { const XmmLink& vr = XmmAlloc(); c->movdqa(vr, SPU_OFF_128(ch_in_mbox)); c->pslldq(vr, 14); c->psrldq(vr, 3); c->movdqa(SPU_OFF_128(gpr, op.rt), vr); return; } // Channels with a constant count of 1: case SPU_WrEventMask: case SPU_WrEventAck: case SPU_WrDec: case SPU_RdDec: case SPU_RdEventMask: case SPU_RdMachStat: case SPU_WrSRR0: case SPU_RdSRR0: case SPU_Set_Bkmk_Tag: case SPU_PM_Start_Ev: case SPU_PM_Stop_Ev: case MFC_RdTagMask: case MFC_LSA: case MFC_EAH: case MFC_EAL: case MFC_Size: case MFC_TagID: case MFC_WrTagMask: case MFC_WrListStallAck: { const XmmLink& vr = XmmAlloc(); c->mov(addr->r32(), 1); c->movd(vr, addr->r32()); c->pslldq(vr, 12); c->movdqa(SPU_OFF_128(gpr, op.rt), vr); return; } case SPU_RdEventStat: { spu_log.warning("[0x%x] RCHCNT: RdEventStat", m_pos); [[fallthrough]]; // fallback } default: { c->lea(addr->r64(), get_pc(m_pos)); c->and_(*addr, 0x3fffc); c->mov(SPU_OFF_32(pc), *addr); c->mov(arg1->r32(), +op.ra); c->mov(*arg0, *cpu); c->call(spu_rchcnt); break; } } // Use result from the third argument c->movd(x86::xmm0, *addr); c->pslldq(x86::xmm0, 12); c->movdqa(SPU_OFF_128(gpr, op.rt), x86::xmm0); } void spu_recompiler::SF(spu_opcode_t op) { // sub from const XmmLink& vb = XmmGet(op.rb, XmmType::Int); c->psubd(vb, SPU_OFF_128(gpr, op.ra)); c->movdqa(SPU_OFF_128(gpr, op.rt), vb); } void spu_recompiler::OR(spu_opcode_t op) { const XmmLink& vb = XmmGet(op.rb, XmmType::Int); c->por(vb, SPU_OFF_128(gpr, op.ra)); c->movdqa(SPU_OFF_128(gpr, op.rt), vb); } void spu_recompiler::BG(spu_opcode_t op) { // compare if-greater-than const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vi = XmmAlloc(); if (utils::has_avx512()) { const XmmLink& vb = XmmGet(op.rb, XmmType::Int); c->vpsubd(vi, vb, va); c->vpternlogd(va, vb, vi, 0x4d /* B?nandAC:norAC */); c->psrld(va, 31); c->movdqa(SPU_OFF_128(gpr, op.rt), va); return; } c->movdqa(vi, XmmConst(v128::from32p(0x80000000))); c->pxor(va, vi); c->pxor(vi, SPU_OFF_128(gpr, op.rb)); c->pcmpgtd(va, vi); c->paddd(va, XmmConst(v128::from32p(1))); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::SFH(spu_opcode_t op) { // sub from (halfword) const XmmLink& vb = XmmGet(op.rb, XmmType::Int); c->psubw(vb, SPU_OFF_128(gpr, op.ra)); c->movdqa(SPU_OFF_128(gpr, op.rt), vb); } void spu_recompiler::NOR(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); if (utils::has_avx512()) { c->vpternlogd(va, va, SPU_OFF_128(gpr, op.rb), 0x11 /* norCB */); c->movdqa(SPU_OFF_128(gpr, op.rt), va); return; } c->por(va, SPU_OFF_128(gpr, op.rb)); c->pxor(va, XmmConst(v128::from32p(0xffffffff))); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::ABSDB(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& vm = XmmAlloc(); c->movdqa(vm, va); c->pmaxub(va, vb); c->pminub(vb, vm); c->psubb(va, vb); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::ROT(spu_opcode_t op) { if (utils::has_avx512()) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& vt = XmmAlloc(); c->vprolvd(vt, va, vb); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); return; } if (utils::has_avx2()) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& vt = XmmAlloc(); const XmmLink& v4 = XmmAlloc(); c->movdqa(v4, XmmConst(v128::from32p(0x1f))); c->pand(vb, v4); c->vpsllvd(vt, va, vb); c->psubd(vb, XmmConst(v128::from32p(1))); c->pandn(vb, v4); c->vpsrlvd(va, va, vb); c->por(vt, va); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); return; } if (utils::has_xop()) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& vt = XmmAlloc(); c->vprotd(vt, va, vb); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); return; } for (u32 i = 0; i < 4; i++) // unrolled loop { c->mov(qw0->r32(), SPU_OFF_32(gpr, op.ra, &v128::_u32, i)); c->mov(asmjit::x86::ecx, SPU_OFF_32(gpr, op.rb, &v128::_u32, i)); c->rol(qw0->r32(), asmjit::x86::cl); c->mov(SPU_OFF_32(gpr, op.rt, &v128::_u32, i), qw0->r32()); } } void spu_recompiler::ROTM(spu_opcode_t op) { if (utils::has_avx2()) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& vt = XmmAlloc(); c->psubd(vb, XmmConst(v128::from32p(1))); c->pandn(vb, XmmConst(v128::from32p(0x3f))); c->vpsrlvd(vt, va, vb); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); return; } if (utils::has_xop()) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& vt = XmmAlloc(); c->psubd(vb, XmmConst(v128::from32p(1))); c->pandn(vb, XmmConst(v128::from32p(0x3f))); c->pxor(vt, vt); c->psubd(vt, vb); c->pcmpgtd(vb, XmmConst(v128::from32p(31))); c->vpshld(vt, va, vt); c->vpandn(vt, vb, vt); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); return; } for (u32 i = 0; i < 4; i++) // unrolled loop { c->mov(qw0->r32(), SPU_OFF_32(gpr, op.ra, &v128::_u32, i)); c->mov(asmjit::x86::ecx, SPU_OFF_32(gpr, op.rb, &v128::_u32, i)); c->neg(asmjit::x86::ecx); c->shr(*qw0, asmjit::x86::cl); c->mov(SPU_OFF_32(gpr, op.rt, &v128::_u32, i), qw0->r32()); } } void spu_recompiler::ROTMA(spu_opcode_t op) { if (utils::has_avx2()) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& vt = XmmAlloc(); c->psubd(vb, XmmConst(v128::from32p(1))); c->pandn(vb, XmmConst(v128::from32p(0x3f))); c->vpsravd(vt, va, vb); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); return; } if (utils::has_xop()) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& vt = XmmAlloc(); c->psubd(vb, XmmConst(v128::from32p(1))); c->pandn(vb, XmmConst(v128::from32p(0x3f))); c->pxor(vt, vt); c->pminud(vb, XmmConst(v128::from32p(31))); c->psubd(vt, vb); c->vpshad(vt, va, vt); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); return; } for (u32 i = 0; i < 4; i++) // unrolled loop { c->movsxd(*qw0, SPU_OFF_32(gpr, op.ra, &v128::_u32, i)); c->mov(asmjit::x86::ecx, SPU_OFF_32(gpr, op.rb, &v128::_u32, i)); c->neg(asmjit::x86::ecx); c->sar(*qw0, asmjit::x86::cl); c->mov(SPU_OFF_32(gpr, op.rt, &v128::_u32, i), qw0->r32()); } } void spu_recompiler::SHL(spu_opcode_t op) { if (utils::has_avx2()) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& vt = XmmAlloc(); c->pand(vb, XmmConst(v128::from32p(0x3f))); c->vpsllvd(vt, va, vb); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); return; } if (utils::has_xop()) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& vt = XmmAlloc(); c->pand(vb, XmmConst(v128::from32p(0x3f))); c->vpcmpgtd(vt, vb, XmmConst(v128::from32p(31))); c->vpshld(vb, va, vb); c->pandn(vt, vb); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); return; } for (u32 i = 0; i < 4; i++) // unrolled loop { c->mov(qw0->r32(), SPU_OFF_32(gpr, op.ra, &v128::_u32, i)); c->mov(asmjit::x86::ecx, SPU_OFF_32(gpr, op.rb, &v128::_u32, i)); c->shl(*qw0, asmjit::x86::cl); c->mov(SPU_OFF_32(gpr, op.rt, &v128::_u32, i), qw0->r32()); } } void spu_recompiler::ROTH(spu_opcode_t op) //nf { if (utils::has_avx512()) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& vt = XmmAlloc(); const XmmLink& v4 = XmmAlloc(); c->vmovdqa(v4, XmmConst(v128::from32r(0x0d0c0d0c, 0x09080908, 0x05040504, 0x01000100))); c->vpshufb(vt, va, v4); // duplicate low word c->vpsrld(va, va, 16); c->vpshufb(va, va, v4); c->vpsrld(v4, vb, 16); c->vprolvd(va, va, v4); c->vprolvd(vb, vt, vb); c->vpblendw(vt, vb, va, 0xaa); c->vmovdqa(SPU_OFF_128(gpr, op.rt), vt); return; } if (utils::has_xop()) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& vt = XmmAlloc(); c->vprotw(vt, va, vb); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); return; } for (u32 i = 0; i < 8; i++) // unrolled loop { c->movzx(qw0->r32(), SPU_OFF_16(gpr, op.ra, &v128::_u16, i)); c->movzx(asmjit::x86::ecx, SPU_OFF_16(gpr, op.rb, &v128::_u16, i)); c->rol(qw0->r16(), asmjit::x86::cl); c->mov(SPU_OFF_16(gpr, op.rt, &v128::_u16, i), qw0->r16()); } } void spu_recompiler::ROTHM(spu_opcode_t op) { if (utils::has_avx512()) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& vt = XmmAlloc(); c->psubw(vb, XmmConst(v128::from16p(1))); c->pandn(vb, XmmConst(v128::from16p(0x1f))); c->vpsrlvw(vt, va, vb); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); return; } if (utils::has_avx2()) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& vt = XmmAlloc(); const XmmLink& v4 = XmmAlloc(); const XmmLink& v5 = XmmAlloc(); c->psubw(vb, XmmConst(v128::from16p(1))); c->pandn(vb, XmmConst(v128::from16p(0x1f))); c->movdqa(vt, XmmConst(v128::from32p(0xffff0000))); // mask: select high words c->vpsrld(v4, vb, 16); c->vpsubusw(v5, vb, vt); // clear high words (using saturation sub for throughput) c->vpandn(vb, vt, va); // clear high words c->vpsrlvd(va, va, v4); c->vpsrlvd(vb, vb, v5); c->vpblendw(vt, vb, va, 0xaa); // can use vpblendvb with 0xffff0000 mask (vt) c->movdqa(SPU_OFF_128(gpr, op.rt), vt); return; } if (utils::has_xop()) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& vt = XmmAlloc(); c->psubw(vb, XmmConst(v128::from16p(1))); c->pandn(vb, XmmConst(v128::from16p(0x1f))); c->pxor(vt, vt); c->psubw(vt, vb); c->pcmpgtw(vb, XmmConst(v128::from16p(15))); c->vpshlw(vt, va, vt); c->vpandn(vt, vb, vt); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); return; } for (u32 i = 0; i < 8; i++) // unrolled loop { c->movzx(qw0->r32(), SPU_OFF_16(gpr, op.ra, &v128::_u16, i)); c->movzx(asmjit::x86::ecx, SPU_OFF_16(gpr, op.rb, &v128::_u16, i)); c->neg(asmjit::x86::ecx); c->shr(qw0->r32(), asmjit::x86::cl); c->mov(SPU_OFF_16(gpr, op.rt, &v128::_u16, i), qw0->r16()); } } void spu_recompiler::ROTMAH(spu_opcode_t op) { if (utils::has_avx512()) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& vt = XmmAlloc(); c->psubw(vb, XmmConst(v128::from16p(1))); c->pandn(vb, XmmConst(v128::from16p(0x1f))); c->vpsravw(vt, va, vb); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); return; } if (utils::has_avx2()) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& vt = XmmAlloc(); const XmmLink& v4 = XmmAlloc(); const XmmLink& v5 = XmmAlloc(); c->psubw(vb, XmmConst(v128::from16p(1))); c->movdqa(vt, XmmConst(v128::from16p(0x1f))); c->vpandn(v4, vb, vt); c->vpand(v5, vb, vt); c->movdqa(vt, XmmConst(v128::from32p(0x2f))); c->vpsrld(v4, v4, 16); c->vpsubusw(v5, vt, v5); // clear high word and add 16 to low word c->vpslld(vb, va, 16); c->vpsravd(va, va, v4); c->vpsravd(vb, vb, v5); c->vpblendw(vt, vb, va, 0xaa); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); return; } if (utils::has_xop()) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& vt = XmmAlloc(); c->psubw(vb, XmmConst(v128::from16p(1))); c->pandn(vb, XmmConst(v128::from16p(0x1f))); c->pxor(vt, vt); c->pminuw(vb, XmmConst(v128::from16p(15))); c->psubw(vt, vb); c->vpshaw(vt, va, vt); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); return; } for (u32 i = 0; i < 8; i++) // unrolled loop { c->movsx(qw0->r32(), SPU_OFF_16(gpr, op.ra, &v128::_u16, i)); c->movzx(asmjit::x86::ecx, SPU_OFF_16(gpr, op.rb, &v128::_u16, i)); c->neg(asmjit::x86::ecx); c->sar(qw0->r32(), asmjit::x86::cl); c->mov(SPU_OFF_16(gpr, op.rt, &v128::_u16, i), qw0->r16()); } } void spu_recompiler::SHLH(spu_opcode_t op) { if (utils::has_avx512()) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& vt = XmmAlloc(); c->pand(vb, XmmConst(v128::from16p(0x1f))); c->vpsllvw(vt, va, vb); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); return; } if (utils::has_avx2()) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& vt = XmmAlloc(); const XmmLink& v4 = XmmAlloc(); const XmmLink& v5 = XmmAlloc(); c->pand(vb, XmmConst(v128::from16p(0x1f))); c->movdqa(vt, XmmConst(v128::from32p(0xffff0000))); // mask: select high words c->vpsrld(v4, vb, 16); c->vpsubusw(v5, vb, vt); // clear high words (using saturation sub for throughput) c->vpand(vb, vt, va); // clear low words c->vpsllvd(va, va, v5); c->vpsllvd(vb, vb, v4); c->vpblendw(vt, vb, va, 0x55); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); return; } if (utils::has_xop()) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& vt = XmmAlloc(); c->pand(vb, XmmConst(v128::from16p(0x1f))); c->vpcmpgtw(vt, vb, XmmConst(v128::from16p(15))); c->vpshlw(vb, va, vb); c->pandn(vt, vb); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); return; } for (u32 i = 0; i < 8; i++) // unrolled loop { c->movzx(qw0->r32(), SPU_OFF_16(gpr, op.ra, &v128::_u16, i)); c->movzx(asmjit::x86::ecx, SPU_OFF_16(gpr, op.rb, &v128::_u16, i)); c->shl(qw0->r32(), asmjit::x86::cl); c->mov(SPU_OFF_16(gpr, op.rt, &v128::_u16, i), qw0->r16()); } } void spu_recompiler::ROTI(spu_opcode_t op) { // rotate left const int s = op.i7 & 0x1f; if (utils::has_avx512()) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->vprold(va, va, s); c->movdqa(SPU_OFF_128(gpr, op.rt), va); return; } if (utils::has_xop()) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->vprotd(va, va, s); c->movdqa(SPU_OFF_128(gpr, op.rt), va); return; } const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& v1 = XmmAlloc(); c->movdqa(v1, va); c->pslld(va, s); c->psrld(v1, 32 - s); c->por(va, v1); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::ROTMI(spu_opcode_t op) { // shift right logical const int s = (0 - op.i7) & 0x3f; const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->psrld(va, s); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::ROTMAI(spu_opcode_t op) { // shift right arithmetical const int s = (0 - op.i7) & 0x3f; const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->psrad(va, s); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::SHLI(spu_opcode_t op) { // shift left const int s = op.i7 & 0x3f; const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->pslld(va, s); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::ROTHI(spu_opcode_t op) { // rotate left (halfword) const int s = op.i7 & 0xf; const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& v1 = XmmAlloc(); c->movdqa(v1, va); c->psllw(va, s); c->psrlw(v1, 16 - s); c->por(va, v1); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::ROTHMI(spu_opcode_t op) { // shift right logical const int s = (0 - op.i7) & 0x1f; const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->psrlw(va, s); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::ROTMAHI(spu_opcode_t op) { // shift right arithmetical (halfword) const int s = (0 - op.i7) & 0x1f; const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->psraw(va, s); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::SHLHI(spu_opcode_t op) { // shift left (halfword) const int s = op.i7 & 0x1f; const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->psllw(va, s); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::A(spu_opcode_t op) { const XmmLink& vb = XmmGet(op.rb, XmmType::Int); c->paddd(vb, SPU_OFF_128(gpr, op.ra)); c->movdqa(SPU_OFF_128(gpr, op.rt), vb); } void spu_recompiler::AND(spu_opcode_t op) { // and const XmmLink& vb = XmmGet(op.rb, XmmType::Int); c->pand(vb, SPU_OFF_128(gpr, op.ra)); c->movdqa(SPU_OFF_128(gpr, op.rt), vb); } void spu_recompiler::CG(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& vi = XmmAlloc(); if (utils::has_avx512()) { c->vpaddd(vi, vb, va); c->vpternlogd(vi, va, vb, 0x8e /* A?andBC:orBC */); c->psrld(vi, 31); c->movdqa(SPU_OFF_128(gpr, op.rt), vi); return; } c->movdqa(vi, XmmConst(v128::from32p(0x80000000))); c->paddd(vb, va); c->pxor(va, vi); c->pxor(vb, vi); c->pcmpgtd(va, vb); c->psrld(va, 31); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::AH(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->paddw(va, SPU_OFF_128(gpr, op.rb)); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::NAND(spu_opcode_t op) { // nand const XmmLink& va = XmmGet(op.ra, XmmType::Int); if (utils::has_avx512()) { c->vpternlogd(va, va, SPU_OFF_128(gpr, op.rb), 0x77 /* nandCB */); c->movdqa(SPU_OFF_128(gpr, op.rt), va); return; } c->pand(va, SPU_OFF_128(gpr, op.rb)); c->pxor(va, XmmConst(v128::from32p(0xffffffff))); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::AVGB(spu_opcode_t op) { const XmmLink& vb = XmmGet(op.rb, XmmType::Int); c->pavgb(vb, SPU_OFF_128(gpr, op.ra)); c->movdqa(SPU_OFF_128(gpr, op.rt), vb); } void spu_recompiler::MTSPR(spu_opcode_t) { // Check SPUInterpreter for notes. } static void spu_wrch(spu_thread* _spu, u32 ch, u32 value) { if (!_spu->set_ch_value(ch, value) || _spu->state & cpu_flag::again) { spu_runtime::g_escape(_spu); } if (_spu->test_stopped()) { _spu->pc += 4; spu_runtime::g_escape(_spu); } } static void spu_wrch_mfc(spu_thread* _spu) { if (!_spu->process_mfc_cmd() || _spu->state & cpu_flag::again) { spu_runtime::g_escape(_spu); } if (_spu->test_stopped()) { _spu->pc += 4; spu_runtime::g_escape(_spu); } } void spu_recompiler::WRCH(spu_opcode_t op) { using namespace asmjit; switch (op.ra) { case SPU_WrSRR0: { c->mov(*addr, SPU_OFF_32(gpr, op.rt, &v128::_u32, 3)); c->and_(*addr, 0x3fffc); c->mov(SPU_OFF_32(srr0), *addr); return; } case SPU_WrOutIntrMbox: { // Can't seemingly be optimized break; } case SPU_WrOutMbox: { Label wait = c->newLabel(); Label again = c->newLabel(); Label ret = c->newLabel(); c->mov(qw0->r32(), SPU_OFF_32(gpr, op.rt, &v128::_u32, 3)); c->mov(addr->r64(), SPU_OFF_64(ch_out_mbox)); c->align(AlignMode::kCode, 16); c->bind(again); c->mov(qw0->r32(), qw0->r32()); c->bt(addr->r64(), spu_channel::off_count); c->jc(wait); after.emplace_back([=, this, pos = m_pos] { c->bind(wait); c->lea(addr->r64(), get_pc(pos)); c->and_(*addr, 0x3fffc); c->mov(SPU_OFF_32(pc), *addr); c->mov(arg1->r32(), +op.ra); c->mov(*arg0, *cpu); c->call(spu_wrch); c->jmp(ret); }); c->bts(*qw0, spu_channel::off_count); c->lock().cmpxchg(SPU_OFF_64(ch_out_mbox), *qw0); c->jnz(again); c->bind(ret); return; } case MFC_WrTagMask: { Label upd = c->newLabel(); Label ret = c->newLabel(); c->mov(qw0->r32(), SPU_OFF_32(gpr, op.rt, &v128::_u32, 3)); c->mov(SPU_OFF_32(ch_tag_mask), qw0->r32()); c->cmp(SPU_OFF_32(ch_tag_upd), MFC_TAG_UPDATE_IMMEDIATE); c->jnz(upd); after.emplace_back([=, this, pos = m_pos] { c->bind(upd); c->lea(addr->r64(), get_pc(pos)); c->and_(*addr, 0x3fffc); c->mov(SPU_OFF_32(pc), *addr); c->mov(arg1->r32(), MFC_WrTagMask); c->mov(*arg0, *cpu); c->call(spu_wrch); c->jmp(ret); }); c->bind(ret); return; } case MFC_WrTagUpdate: { Label fail = c->newLabel(); Label zero = c->newLabel(); Label ret = c->newLabel(); c->mov(qw0->r32(), SPU_OFF_32(gpr, op.rt, &v128::_u32, 3)); c->cmp(qw0->r32(), 2); c->ja(fail); after.emplace_back([=, this, pos = m_pos] { c->bind(fail); c->lea(addr->r64(), get_pc(pos)); c->and_(*addr, 0x3fffc); c->mov(SPU_OFF_32(pc), *addr); c->mov(arg1->r32(), +op.ra); c->mov(*arg0, *cpu); c->call(spu_wrch); c->jmp(ret); c->bind(zero); c->mov(SPU_OFF_32(ch_tag_upd), qw0->r32()); c->jmp(ret); }); // addr = completed mask, will be compared with qw1 c->mov(*addr, SPU_OFF_32(mfc_fence)); c->not_(*addr); c->and_(*addr, SPU_OFF_32(ch_tag_mask)); c->mov(qw1->r32(), *addr); c->test(*addr, *addr); c->cmovz(qw1->r32(), qw0->r32()); c->cmp(qw0->r32(), 1); c->cmovb(qw1->r32(), *addr); c->cmova(qw1->r32(), SPU_OFF_32(ch_tag_mask)); c->cmp(*addr, qw1->r32()); c->jne(zero); c->bts(addr->r64(), spu_channel::off_count); c->mov(SPU_OFF_32(ch_tag_upd), 0); c->mov(SPU_OFF_64(ch_tag_stat), addr->r64()); c->bind(ret); return; } case MFC_LSA: { c->mov(*addr, SPU_OFF_32(gpr, op.rt, &v128::_u32, 3)); c->mov(SPU_OFF_32(ch_mfc_cmd, &spu_mfc_cmd::lsa), *addr); return; } case MFC_EAH: { c->mov(*addr, SPU_OFF_32(gpr, op.rt, &v128::_u32, 3)); c->mov(SPU_OFF_32(ch_mfc_cmd, &spu_mfc_cmd::eah), *addr); return; } case MFC_EAL: { c->mov(*addr, SPU_OFF_32(gpr, op.rt, &v128::_u32, 3)); c->mov(SPU_OFF_32(ch_mfc_cmd, &spu_mfc_cmd::eal), *addr); return; } case MFC_Size: { c->mov(*addr, SPU_OFF_32(gpr, op.rt, &v128::_u32, 3)); c->and_(*addr, 0x7fff); c->mov(SPU_OFF_16(ch_mfc_cmd, &spu_mfc_cmd::size), addr->r16()); return; } case MFC_TagID: { c->mov(*addr, SPU_OFF_32(gpr, op.rt, &v128::_u32, 3)); c->and_(*addr, 0x1f); c->mov(SPU_OFF_8(ch_mfc_cmd, &spu_mfc_cmd::tag), addr->r8()); return; } case MFC_Cmd: { c->mov(*addr, SPU_OFF_32(gpr, op.rt, &v128::_u32, 3)); c->mov(SPU_OFF_8(ch_mfc_cmd, &spu_mfc_cmd::cmd), addr->r8()); c->lea(addr->r64(), get_pc(m_pos)); c->and_(*addr, 0x3fffc); c->mov(SPU_OFF_32(pc), *addr); c->mov(*arg0, *cpu); c->call(spu_wrch_mfc); return; } case MFC_WrListStallAck: { auto sub = [](spu_thread* _spu, u32 tag) { for (u32 i = 0; i < _spu->mfc_size; i++) { if (_spu->mfc_queue[i].tag == (tag | 0x80)) { // Unset stall bit _spu->mfc_queue[i].tag &= 0x7f; } } _spu->do_mfc(true); }; Label ret = c->newLabel(); c->mov(arg1->r32(), SPU_OFF_32(gpr, op.rt, &v128::_u32, 3)); c->and_(arg1->r32(), 0x1f); c->btr(SPU_OFF_32(ch_stall_mask), arg1->r32()); c->jnc(ret); c->mov(*arg0, *cpu); c->call(+sub); c->bind(ret); return; } case SPU_WrDec: { auto sub = [](spu_thread* _spu) { _spu->get_events(SPU_EVENT_TM); _spu->ch_dec_start_timestamp = get_timebased_time(); }; c->mov(*arg0, *cpu); c->call(+sub); c->mov(qw0->r32(), SPU_OFF_32(gpr, op.rt, &v128::_u32, 3)); c->mov(SPU_OFF_32(ch_dec_value), qw0->r32()); c->mov(SPU_OFF_8(is_dec_frozen), 0); return; } case SPU_WrEventMask: { // TODO break; } case SPU_WrEventAck: { // TODO break; } case SPU_Set_Bkmk_Tag: case SPU_PM_Start_Ev: case SPU_PM_Stop_Ev: { return; } default: break; } c->lea(addr->r64(), get_pc(m_pos)); c->and_(*addr, 0x3fffc); c->mov(SPU_OFF_32(pc), *addr); c->mov(arg1->r32(), +op.ra); c->mov(qw0->r32(), SPU_OFF_32(gpr, op.rt, &v128::_u32, 3)); c->mov(*arg0, *cpu); c->call(spu_wrch); } void spu_recompiler::BIZ(spu_opcode_t op) { asmjit::Label branch_label = c->newLabel(); c->cmp(SPU_OFF_32(gpr, op.rt, &v128::_u32, 3), 0); c->je(branch_label); after.emplace_back([=, this, jt = m_targets[m_pos].size() > 1] { c->align(asmjit::AlignMode::kCode, 16); c->bind(branch_label); c->mov(*addr, SPU_OFF_32(gpr, op.ra, &v128::_u32, 3)); c->and_(*addr, 0x3fffc); branch_indirect(op, jt); }); } void spu_recompiler::BINZ(spu_opcode_t op) { asmjit::Label branch_label = c->newLabel(); c->cmp(SPU_OFF_32(gpr, op.rt, &v128::_u32, 3), 0); c->jne(branch_label); after.emplace_back([=, this, jt = m_targets[m_pos].size() > 1] { c->align(asmjit::AlignMode::kCode, 16); c->bind(branch_label); c->mov(*addr, SPU_OFF_32(gpr, op.ra, &v128::_u32, 3)); c->and_(*addr, 0x3fffc); branch_indirect(op, jt); }); } void spu_recompiler::BIHZ(spu_opcode_t op) { asmjit::Label branch_label = c->newLabel(); c->cmp(SPU_OFF_16(gpr, op.rt, &v128::_u16, 6), 0); c->je(branch_label); after.emplace_back([=, this, jt = m_targets[m_pos].size() > 1] { c->align(asmjit::AlignMode::kCode, 16); c->bind(branch_label); c->mov(*addr, SPU_OFF_32(gpr, op.ra, &v128::_u32, 3)); c->and_(*addr, 0x3fffc); branch_indirect(op, jt); }); } void spu_recompiler::BIHNZ(spu_opcode_t op) { asmjit::Label branch_label = c->newLabel(); c->cmp(SPU_OFF_16(gpr, op.rt, &v128::_u16, 6), 0); c->jne(branch_label); after.emplace_back([=, this, jt = m_targets[m_pos].size() > 1] { c->align(asmjit::AlignMode::kCode, 16); c->bind(branch_label); c->mov(*addr, SPU_OFF_32(gpr, op.ra, &v128::_u32, 3)); c->and_(*addr, 0x3fffc); branch_indirect(op, jt); }); } void spu_recompiler::STOPD(spu_opcode_t) { STOP(spu_opcode_t{0x3fff}); } void spu_recompiler::STQX(spu_opcode_t op) { c->mov(*addr, SPU_OFF_32(gpr, op.ra, &v128::_u32, 3)); c->add(*addr, SPU_OFF_32(gpr, op.rb, &v128::_u32, 3)); c->and_(*addr, 0x3fff0); if (utils::has_ssse3()) { const XmmLink& vt = XmmGet(op.rt, XmmType::Int); c->pshufb(vt, XmmConst(v128::from32r(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f))); c->movdqa(asmjit::x86::oword_ptr(*ls, addr->r64()), vt); } else { c->mov(*qw0, SPU_OFF_64(gpr, op.rt, &v128::_u64, 0)); c->mov(*qw1, SPU_OFF_64(gpr, op.rt, &v128::_u64, 1)); c->bswap(*qw0); c->bswap(*qw1); c->mov(asmjit::x86::qword_ptr(*ls, addr->r64(), 0, 0), *qw1); c->mov(asmjit::x86::qword_ptr(*ls, addr->r64(), 0, 8), *qw0); } } void spu_recompiler::BI(spu_opcode_t op) { const auto found = m_targets.find(m_pos); const auto is_jt = found == m_targets.end() || found->second.size() > 1; if (found == m_targets.end()) { spu_log.error("[0x%x] BI: no targets", m_pos); } else if (op.d && found->second.size() == 1 && found->second[0] == spu_branch_target(m_pos, 1)) { // Interrupts-disable pattern c->mov(SPU_OFF_8(interrupts_enabled), 0); return; } c->mov(*addr, SPU_OFF_32(gpr, op.ra, &v128::_u32, 3)); c->and_(*addr, 0x3fffc); branch_indirect(op, is_jt, !is_jt); m_pos = -1; } void spu_recompiler::BISL(spu_opcode_t op) { c->mov(*addr, SPU_OFF_32(gpr, op.ra, &v128::_u32, 3)); c->and_(*addr, 0x3fffc); const XmmLink& vr = XmmAlloc(); c->lea(*qw0, get_pc(m_pos + 4)); c->and_(qw0->r32(), 0x3fffc); c->movd(vr, qw0->r32()); c->pslldq(vr, 12); c->movdqa(SPU_OFF_128(gpr, op.rt), vr); branch_set_link(m_pos + 4); branch_indirect(op, true, false); m_pos = -1; } void spu_recompiler::IRET(spu_opcode_t op) { c->mov(*addr, SPU_OFF_32(srr0)); branch_indirect(op); m_pos = -1; } void spu_recompiler::BISLED(spu_opcode_t op) { auto get_events = [](spu_thread* _spu) -> u32 { return _spu->get_events(_spu->ch_events.load().mask).count; }; c->mov(*addr, SPU_OFF_32(gpr, op.ra, &v128::_u32, 3)); const XmmLink& vr = XmmAlloc(); c->lea(*qw0, get_pc(m_pos + 4)); c->movd(vr, qw0->r32()); c->pand(vr, XmmConst(v128::from32p(0x3fffc))); c->pslldq(vr, 12); c->movdqa(SPU_OFF_128(gpr, op.rt), vr); asmjit::Label branch_label = c->newLabel(); c->mov(*arg0, *cpu); c->call(+get_events); c->test(*addr, 1); c->jne(branch_label); after.emplace_back([=, this]() { c->align(asmjit::AlignMode::kCode, 16); c->bind(branch_label); c->and_(*addr, 0x3fffc); branch_indirect(op, true, false); }); } void spu_recompiler::HBR([[maybe_unused]] spu_opcode_t op) { } void spu_recompiler::GB(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->pslld(va, 31); c->movmskps(*addr, va); c->pxor(va, va); c->pinsrw(va, *addr, 6); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::GBH(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->psllw(va, 15); c->packsswb(va, XmmConst({})); c->pmovmskb(*addr, va); c->pxor(va, va); c->pinsrw(va, *addr, 6); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::GBB(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->psllq(va, 7); c->pmovmskb(*addr, va); c->pxor(va, va); c->pinsrw(va, *addr, 6); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::FSM(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vm = XmmAlloc(); c->pshufd(va, va, 0xff); c->movdqa(vm, XmmConst(v128::from32r(8, 4, 2, 1))); c->pand(va, vm); c->pcmpeqd(va, vm); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::FSMH(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vm = XmmAlloc(); c->punpckhwd(va, va); c->pshufd(va, va, 0xaa); c->movdqa(vm, XmmConst(v128::from64r(0x0080004000200010, 0x0008000400020001))); c->pand(va, vm); c->pcmpeqw(va, vm); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::FSMB(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vm = XmmAlloc(); if (utils::has_ssse3()) { c->pshufb(va, XmmConst(v128::from64r(0x0d0d0d0d0d0d0d0d, 0x0c0c0c0c0c0c0c0c))); } else { c->punpckhbw(va, va); c->pshufhw(va, va, 0x50); c->pshufd(va, va, 0xfa); } c->movdqa(vm, XmmConst(v128::from64p(0x8040201008040201))); c->pand(va, vm); c->pcmpeqb(va, vm); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::FREST(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Float); const XmmLink& v_fraction = XmmAlloc(); const XmmLink& v_exponent = XmmAlloc(); const XmmLink& v_sign = XmmAlloc(); c->movdqa(v_fraction, va); c->movdqa(v_exponent, va); c->movdqa(v_sign, va); c->psrld(v_fraction, 18); c->psrld(v_exponent, 23); c->andps(v_fraction, XmmConst(v128::from32p(0x1F))); c->andps(v_exponent, XmmConst(v128::from32p(0xFF))); c->andps(v_sign, XmmConst(v128::from32p(0x80000000))); const u64 fraction_lut_addr = reinterpret_cast<u64>(spu_frest_fraction_lut); const u64 exponent_lut_addr = reinterpret_cast<u64>(spu_frest_exponent_lut); for (u32 index = 0; index < 4; index++) { c->pextrd(*qw0, v_fraction, index); c->mov(*qw1, asmjit::x86::dword_ptr(fraction_lut_addr, *qw0, 2)); c->pinsrd(v_fraction, *qw1, index); c->pextrd(*qw0, v_exponent, index); c->mov(*qw1, asmjit::x86::dword_ptr(exponent_lut_addr, *qw0, 2)); c->pinsrd(v_exponent, *qw1, index); } // AVX2(not working?) // c->mov(qw1->r64(),spu_frest_fraction_lut); // c->vpgatherdd(v_fraction, asmjit::x86::dword_ptr(*qw1)); // c->mov(qw0->r64(),spu_frest_exponent_lut); // c->vpgatherdd(v_exponent, asmjit::x86::dword_ptr(*qw0)); c->orps(v_fraction, v_exponent); c->orps(v_sign, v_fraction); c->movaps(SPU_OFF_128(gpr, op.rt), v_sign); } void spu_recompiler::FRSQEST(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Float); const XmmLink& v_fraction = XmmAlloc(); const XmmLink& v_exponent = XmmAlloc(); c->movdqa(v_fraction, va); c->movdqa(v_exponent, va); c->psrld(v_fraction, 18); c->psrld(v_exponent, 23); c->andps(v_fraction, XmmConst(v128::from32p(0x3F))); c->andps(v_exponent, XmmConst(v128::from32p(0xFF))); const u64 fraction_lut_addr = reinterpret_cast<u64>(spu_frsqest_fraction_lut); const u64 exponent_lut_addr = reinterpret_cast<u64>(spu_frsqest_exponent_lut); for (u32 index = 0; index < 4; index++) { c->pextrd(*qw0, v_fraction, index); c->mov(*qw1, asmjit::x86::dword_ptr(fraction_lut_addr, *qw0, 2)); c->pinsrd(v_fraction, *qw1, index); c->pextrd(*qw0, v_exponent, index); c->mov(*qw1, asmjit::x86::dword_ptr(exponent_lut_addr, *qw0, 2)); c->pinsrd(v_exponent, *qw1, index); } c->orps(v_fraction, v_exponent); c->movaps(SPU_OFF_128(gpr, op.rt), v_fraction); } void spu_recompiler::LQX(spu_opcode_t op) { c->mov(*addr, SPU_OFF_32(gpr, op.ra, &v128::_u32, 3)); c->add(*addr, SPU_OFF_32(gpr, op.rb, &v128::_u32, 3)); c->and_(*addr, 0x3fff0); if (utils::has_ssse3()) { const XmmLink& vt = XmmAlloc(); c->movdqa(vt, asmjit::x86::oword_ptr(*ls, addr->r64())); c->pshufb(vt, XmmConst(v128::from32r(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f))); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); } else { c->mov(*qw0, asmjit::x86::qword_ptr(*ls, addr->r64(), 0, 0)); c->mov(*qw1, asmjit::x86::qword_ptr(*ls, addr->r64(), 0, 8)); c->bswap(*qw0); c->bswap(*qw1); c->mov(SPU_OFF_64(gpr, op.rt, &v128::_u64, 0), *qw1); c->mov(SPU_OFF_64(gpr, op.rt, &v128::_u64, 1), *qw0); } } void spu_recompiler::ROTQBYBI(spu_opcode_t op) { if (!utils::has_ssse3()) { return fall(op); } const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->mov(*qw0, +g_spu_imm.rldq_pshufb); c->mov(*addr, SPU_OFF_32(gpr, op.rb, &v128::_u32, 3)); c->and_(*addr, 0xf << 3); c->pshufb(va, asmjit::x86::oword_ptr(*qw0, addr->r64(), 1)); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::ROTQMBYBI(spu_opcode_t op) { if (!utils::has_ssse3()) { return fall(op); } const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->mov(*qw0, +g_spu_imm.srdq_pshufb); c->mov(*addr, SPU_OFF_32(gpr, op.rb, &v128::_u32, 3)); c->and_(*addr, 0x1f << 3); c->pshufb(va, asmjit::x86::oword_ptr(*qw0, addr->r64(), 1)); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::SHLQBYBI(spu_opcode_t op) { if (!utils::has_ssse3()) { return fall(op); } const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->mov(*qw0, +g_spu_imm.sldq_pshufb); c->mov(*addr, SPU_OFF_32(gpr, op.rb, &v128::_u32, 3)); c->and_(*addr, 0x1f << 3); c->pshufb(va, asmjit::x86::oword_ptr(*qw0, addr->r64(), 1)); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::CBX(spu_opcode_t op) { c->mov(*addr, SPU_OFF_32(gpr, op.rb, &v128::_u32, 3)); c->add(*addr, SPU_OFF_32(gpr, op.ra, &v128::_u32, 3)); c->not_(*addr); c->and_(*addr, 0xf); const XmmLink& vr = XmmAlloc(); c->movdqa(vr, XmmConst(v128::from32r(0x10111213, 0x14151617, 0x18191a1b, 0x1c1d1e1f))); c->movdqa(SPU_OFF_128(gpr, op.rt), vr); c->mov(asmjit::x86::byte_ptr(*cpu, addr->r64(), 0, offset32(&spu_thread::gpr, op.rt)), 0x03); } void spu_recompiler::CHX(spu_opcode_t op) { c->mov(*addr, SPU_OFF_32(gpr, op.rb, &v128::_u32, 3)); c->add(*addr, SPU_OFF_32(gpr, op.ra, &v128::_u32, 3)); c->not_(*addr); c->and_(*addr, 0xe); const XmmLink& vr = XmmAlloc(); c->movdqa(vr, XmmConst(v128::from32r(0x10111213, 0x14151617, 0x18191a1b, 0x1c1d1e1f))); c->movdqa(SPU_OFF_128(gpr, op.rt), vr); c->mov(asmjit::x86::word_ptr(*cpu, addr->r64(), 0, offset32(&spu_thread::gpr, op.rt)), 0x0203); } void spu_recompiler::CWX(spu_opcode_t op) { c->mov(*addr, SPU_OFF_32(gpr, op.rb, &v128::_u32, 3)); c->add(*addr, SPU_OFF_32(gpr, op.ra, &v128::_u32, 3)); c->not_(*addr); c->and_(*addr, 0xc); const XmmLink& vr = XmmAlloc(); c->movdqa(vr, XmmConst(v128::from32r(0x10111213, 0x14151617, 0x18191a1b, 0x1c1d1e1f))); c->movdqa(SPU_OFF_128(gpr, op.rt), vr); c->mov(asmjit::x86::dword_ptr(*cpu, addr->r64(), 0, offset32(&spu_thread::gpr, op.rt)), 0x00010203); } void spu_recompiler::CDX(spu_opcode_t op) { c->mov(*addr, SPU_OFF_32(gpr, op.rb, &v128::_u32, 3)); c->add(*addr, SPU_OFF_32(gpr, op.ra, &v128::_u32, 3)); c->not_(*addr); c->and_(*addr, 0x8); const XmmLink& vr = XmmAlloc(); c->movdqa(vr, XmmConst(v128::from32r(0x10111213, 0x14151617, 0x18191a1b, 0x1c1d1e1f))); c->movdqa(SPU_OFF_128(gpr, op.rt), vr); c->mov(*qw0, asmjit::Imm(0x0001020304050607ull)); c->mov(asmjit::x86::qword_ptr(*cpu, addr->r64(), 0, offset32(&spu_thread::gpr, op.rt)), *qw0); } void spu_recompiler::ROTQBI(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& vt = XmmAlloc(); const XmmLink& v4 = XmmAlloc(); c->psrldq(vb, 12); c->pand(vb, XmmConst(v128::from64r(0, 7))); c->movdqa(v4, XmmConst(v128::from64r(0, 64))); c->pshufd(vt, va, 0x4e); c->psubq(v4, vb); c->psllq(va, vb); c->psrlq(vt, v4); c->por(vt, va); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); } void spu_recompiler::ROTQMBI(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmAlloc(); const XmmLink& vt = XmmGet(op.rb, XmmType::Int); const XmmLink& v4 = XmmAlloc(); c->psrldq(vt, 12); c->pxor(vb, vb); c->psubq(vb, vt); c->pand(vb, XmmConst(v128::from64r(0, 7))); c->movdqa(v4, XmmConst(v128::from64r(0, 64))); c->movdqa(vt, va); c->psrldq(vt, 8); c->psubq(v4, vb); c->psrlq(va, vb); c->psllq(vt, v4); c->por(vt, va); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); } void spu_recompiler::SHLQBI(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& vt = XmmAlloc(); const XmmLink& v4 = XmmAlloc(); c->psrldq(vb, 12); c->pand(vb, XmmConst(v128::from64r(0, 7))); c->movdqa(v4, XmmConst(v128::from64r(0, 64))); c->movdqa(vt, va); c->pslldq(vt, 8); c->psubq(v4, vb); c->psllq(va, vb); c->psrlq(vt, v4); c->por(vt, va); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); } void spu_recompiler::ROTQBY(spu_opcode_t op) { if (!utils::has_ssse3()) { return fall(op); } const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->mov(*qw0, +g_spu_imm.rldq_pshufb); c->mov(*addr, SPU_OFF_32(gpr, op.rb, &v128::_u32, 3)); c->and_(*addr, 0xf); c->shl(*addr, 4); c->pshufb(va, asmjit::x86::oword_ptr(*qw0, addr->r64())); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::ROTQMBY(spu_opcode_t op) { if (!utils::has_ssse3()) { return fall(op); } const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->mov(*qw0, +g_spu_imm.srdq_pshufb); c->mov(*addr, SPU_OFF_32(gpr, op.rb, &v128::_u32, 3)); c->and_(*addr, 0x1f); c->shl(*addr, 4); c->pshufb(va, asmjit::x86::oword_ptr(*qw0, addr->r64())); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::SHLQBY(spu_opcode_t op) { if (!utils::has_ssse3()) { return fall(op); } const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->mov(*qw0, +g_spu_imm.sldq_pshufb); c->mov(*addr, SPU_OFF_32(gpr, op.rb, &v128::_u32, 3)); c->and_(*addr, 0x1f); c->shl(*addr, 4); c->pshufb(va, asmjit::x86::oword_ptr(*qw0, addr->r64())); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::ORX(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& v1 = XmmAlloc(); c->pshufd(v1, va, 0xb1); c->por(va, v1); c->pshufd(v1, va, 0x4e); c->por(va, v1); c->pslldq(va, 12); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::CBD(spu_opcode_t op) { //if (op.ra == 1) //{ // // assuming that SP % 16 is always zero // const XmmLink& vr = XmmAlloc(); // v128 value = v128::fromV(v128::from32r(0x10111213, 0x14151617, 0x18191a1b, 0x1c1d1e1f)); // value.u8r[op.i7 & 0xf] = 0x03; // c->movdqa(vr, XmmConst(value)); // c->movdqa(SPU_OFF_128(gpr, op.rt), vr); // return; //} c->mov(*addr, SPU_OFF_32(gpr, op.ra, &v128::_u32, 3)); if (op.i7) c->add(*addr, +op.i7); c->not_(*addr); c->and_(*addr, 0xf); const XmmLink& vr = XmmAlloc(); c->movdqa(vr, XmmConst(v128::from32r(0x10111213, 0x14151617, 0x18191a1b, 0x1c1d1e1f))); c->movdqa(SPU_OFF_128(gpr, op.rt), vr); c->mov(asmjit::x86::byte_ptr(*cpu, addr->r64(), 0, offset32(&spu_thread::gpr, op.rt)), 0x03); } void spu_recompiler::CHD(spu_opcode_t op) { //if (op.ra == 1) //{ // // assuming that SP % 16 is always zero // const XmmLink& vr = XmmAlloc(); // v128 value = v128::fromV(v128::from32r(0x10111213, 0x14151617, 0x18191a1b, 0x1c1d1e1f)); // value.u16r[(op.i7 >> 1) & 0x7] = 0x0203; // c->movdqa(vr, XmmConst(value)); // c->movdqa(SPU_OFF_128(gpr, op.rt), vr); // return; //} c->mov(*addr, SPU_OFF_32(gpr, op.ra, &v128::_u32, 3)); if (op.i7) c->add(*addr, +op.i7); c->not_(*addr); c->and_(*addr, 0xe); const XmmLink& vr = XmmAlloc(); c->movdqa(vr, XmmConst(v128::from32r(0x10111213, 0x14151617, 0x18191a1b, 0x1c1d1e1f))); c->movdqa(SPU_OFF_128(gpr, op.rt), vr); c->mov(asmjit::x86::word_ptr(*cpu, addr->r64(), 0, offset32(&spu_thread::gpr, op.rt)), 0x0203); } void spu_recompiler::CWD(spu_opcode_t op) { //if (op.ra == 1) //{ // // assuming that SP % 16 is always zero // const XmmLink& vr = XmmAlloc(); // v128 value = v128::fromV(v128::from32r(0x10111213, 0x14151617, 0x18191a1b, 0x1c1d1e1f)); // value.u32r[(op.i7 >> 2) & 0x3] = 0x00010203; // c->movdqa(vr, XmmConst(value)); // c->movdqa(SPU_OFF_128(gpr, op.rt), vr); // return; //} c->mov(*addr, SPU_OFF_32(gpr, op.ra, &v128::_u32, 3)); if (op.i7) c->add(*addr, +op.i7); c->not_(*addr); c->and_(*addr, 0xc); const XmmLink& vr = XmmAlloc(); c->movdqa(vr, XmmConst(v128::from32r(0x10111213, 0x14151617, 0x18191a1b, 0x1c1d1e1f))); c->movdqa(SPU_OFF_128(gpr, op.rt), vr); c->mov(asmjit::x86::dword_ptr(*cpu, addr->r64(), 0, offset32(&spu_thread::gpr, op.rt)), 0x00010203); } void spu_recompiler::CDD(spu_opcode_t op) { //if (op.ra == 1) //{ // // assuming that SP % 16 is always zero // const XmmLink& vr = XmmAlloc(); // v128 value = v128::fromV(v128::from32r(0x10111213, 0x14151617, 0x18191a1b, 0x1c1d1e1f)); // value.u64r[(op.i7 >> 3) & 0x1] = 0x0001020304050607ull; // c->movdqa(vr, XmmConst(value)); // c->movdqa(SPU_OFF_128(gpr, op.rt), vr); // return; //} c->mov(*addr, SPU_OFF_32(gpr, op.ra, &v128::_u32, 3)); if (op.i7) c->add(*addr, +op.i7); c->not_(*addr); c->and_(*addr, 0x8); const XmmLink& vr = XmmAlloc(); c->movdqa(vr, XmmConst(v128::from32r(0x10111213, 0x14151617, 0x18191a1b, 0x1c1d1e1f))); c->movdqa(SPU_OFF_128(gpr, op.rt), vr); c->mov(*qw0, asmjit::Imm(0x0001020304050607ull)); c->mov(asmjit::x86::qword_ptr(*cpu, addr->r64(), 0, offset32(&spu_thread::gpr, op.rt)), *qw0); } void spu_recompiler::ROTQBII(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vt = XmmAlloc(); c->pshufd(vt, va, 0x4e); // swap 64-bit parts c->psllq(va, (op.i7 & 0x7)); c->psrlq(vt, 64 - (op.i7 & 0x7)); c->por(vt, va); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); } void spu_recompiler::ROTQMBII(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vt = XmmAlloc(); c->movdqa(vt, va); c->psrldq(vt, 8); c->psrlq(va, ((0 - op.i7) & 0x7)); c->psllq(vt, 64 - ((0 - op.i7) & 0x7)); c->por(vt, va); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); } void spu_recompiler::SHLQBII(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vt = XmmAlloc(); c->movdqa(vt, va); c->pslldq(vt, 8); c->psllq(va, (op.i7 & 0x7)); c->psrlq(vt, 64 - (op.i7 & 0x7)); c->por(vt, va); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); } void spu_recompiler::ROTQBYI(spu_opcode_t op) { const int s = op.i7 & 0xf; const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& v2 = XmmAlloc(); if (s == 0) { } else if (s == 4 || s == 8 || s == 12) { c->pshufd(va, va, utils::rol8(0xE4, s / 2)); } else if (utils::has_ssse3()) { c->palignr(va, va, 16 - s); } else { c->movdqa(v2, va); c->psrldq(va, 16 - s); c->pslldq(v2, s); c->por(va, v2); } c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::ROTQMBYI(spu_opcode_t op) { const int s = (0 - op.i7) & 0x1f; const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->psrldq(va, s); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::SHLQBYI(spu_opcode_t op) { const int s = op.i7 & 0x1f; const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->pslldq(va, s); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::NOP(spu_opcode_t) { } void spu_recompiler::CGT(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->pcmpgtd(va, SPU_OFF_128(gpr, op.rb)); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::XOR(spu_opcode_t op) { // xor const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->pxor(va, SPU_OFF_128(gpr, op.rb)); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::CGTH(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->pcmpgtw(va, SPU_OFF_128(gpr, op.rb)); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::EQV(spu_opcode_t op) { const XmmLink& vb = XmmGet(op.rb, XmmType::Int); if (utils::has_avx512()) { c->vpternlogd(vb, vb, SPU_OFF_128(gpr, op.ra), 0x99 /* xnorCB */); c->movdqa(SPU_OFF_128(gpr, op.rt), vb); return; } c->pxor(vb, XmmConst(v128::from32p(0xffffffff))); c->pxor(vb, SPU_OFF_128(gpr, op.ra)); c->movdqa(SPU_OFF_128(gpr, op.rt), vb); } void spu_recompiler::CGTB(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->pcmpgtb(va, SPU_OFF_128(gpr, op.rb)); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::SUMB(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& v1 = XmmAlloc(); const XmmLink& v2 = XmmAlloc(); c->movdqa(v2, XmmConst(v128::from16p(0xff))); c->movdqa(v1, va); c->psrlw(va, 8); c->pand(v1, v2); c->pand(v2, vb); c->psrlw(vb, 8); c->paddw(va, v1); c->paddw(vb, v2); c->movdqa(v2, XmmConst(v128::from32p(0xffff))); c->movdqa(v1, va); c->psrld(va, 16); c->pand(v1, v2); c->pandn(v2, vb); c->pslld(vb, 16); c->paddw(va, v1); c->paddw(vb, v2); c->por(va, vb); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::HGT(spu_opcode_t op) { c->mov(*addr, SPU_OFF_32(gpr, op.ra, &v128::_s32, 3)); c->cmp(*addr, SPU_OFF_32(gpr, op.rb, &v128::_s32, 3)); asmjit::Label label = c->newLabel(); asmjit::Label ret = c->newLabel(); c->jg(label); after.emplace_back([=, this, pos = m_pos] { c->bind(label); c->lea(addr->r64(), get_pc(pos)); c->and_(*addr, 0x3fffc); c->mov(SPU_OFF_32(pc), *addr); c->mov(addr->r64(), reinterpret_cast<u64>(vm::base(0xffdead00))); c->mov(asmjit::x86::dword_ptr(addr->r64()), "HALT"_u32); c->jmp(ret); }); } void spu_recompiler::CLZ(spu_opcode_t op) { if (utils::has_avx512()) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vt = XmmAlloc(); c->vplzcntd(vt, va); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); return; } c->mov(qw0->r32(), 32 + 31); for (u32 i = 0; i < 4; i++) // unrolled loop { c->bsr(*addr, SPU_OFF_32(gpr, op.ra, &v128::_u32, i)); c->cmovz(*addr, qw0->r32()); c->xor_(*addr, 31); c->mov(SPU_OFF_32(gpr, op.rt, &v128::_u32, i), *addr); } } void spu_recompiler::XSWD(spu_opcode_t op) { c->movsxd(*qw0, SPU_OFF_32(gpr, op.ra, &v128::_s32, 0)); c->movsxd(*qw1, SPU_OFF_32(gpr, op.ra, &v128::_s32, 2)); c->mov(SPU_OFF_64(gpr, op.rt, &v128::_s64, 0), *qw0); c->mov(SPU_OFF_64(gpr, op.rt, &v128::_s64, 1), *qw1); } void spu_recompiler::XSHW(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->pslld(va, 16); c->psrad(va, 16); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::CNTB(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& v1 = XmmAlloc(); const XmmLink& vm = XmmAlloc(); c->movdqa(vm, XmmConst(v128::from8p(0x55))); c->movdqa(v1, va); c->pand(va, vm); c->psrlq(v1, 1); c->pand(v1, vm); c->paddb(va, v1); c->movdqa(vm, XmmConst(v128::from8p(0x33))); c->movdqa(v1, va); c->pand(va, vm); c->psrlq(v1, 2); c->pand(v1, vm); c->paddb(va, v1); c->movdqa(vm, XmmConst(v128::from8p(0x0f))); c->movdqa(v1, va); c->pand(va, vm); c->psrlq(v1, 4); c->pand(v1, vm); c->paddb(va, v1); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::XSBH(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->psllw(va, 8); c->psraw(va, 8); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::CLGT(spu_opcode_t op) { // compare if-greater-than const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vi = XmmAlloc(); c->movdqa(vi, XmmConst(v128::from32p(0x80000000))); c->pxor(va, vi); c->pxor(vi, SPU_OFF_128(gpr, op.rb)); c->pcmpgtd(va, vi); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::ANDC(spu_opcode_t op) { // and not const XmmLink& vb = XmmGet(op.rb, XmmType::Int); c->pandn(vb, SPU_OFF_128(gpr, op.ra)); c->movdqa(SPU_OFF_128(gpr, op.rt), vb); } void spu_recompiler::FCGT(spu_opcode_t op) { const auto last_exp_bit = XmmConst(v128::from32p(0x00800000)); const auto all_exp_bits = XmmConst(v128::from32p(0x7f800000)); const XmmLink& tmp0 = XmmAlloc(); const XmmLink& tmp1 = XmmAlloc(); const XmmLink& tmp2 = XmmAlloc(); const XmmLink& tmp3 = XmmAlloc(); const XmmLink& tmpv = XmmAlloc(); c->pxor(tmp0, tmp0); c->pxor(tmp1, tmp1); c->cmpps(tmp0, SPU_OFF_128(gpr, op.ra), 3); //tmp0 is true if a is extended (nan/inf) c->cmpps(tmp1, SPU_OFF_128(gpr, op.rb), 3); //tmp1 is true if b is extended (nan/inf) //compute lower a and b c->movaps(tmp2, last_exp_bit); c->movaps(tmp3, last_exp_bit); c->pandn(tmp2, SPU_OFF_128(gpr, op.ra)); //tmp2 = lowered_a c->pandn(tmp3, SPU_OFF_128(gpr, op.rb)); //tmp3 = lowered_b //lower a if extended c->movaps(tmpv, tmp0); c->pand(tmpv, tmp2); c->pandn(tmp0, SPU_OFF_128(gpr, op.ra)); c->orps(tmp0, tmpv); //lower b if extended c->movaps(tmpv, tmp1); c->pand(tmpv, tmp3); c->pandn(tmp1, SPU_OFF_128(gpr, op.rb)); c->orps(tmp1, tmpv); //flush to 0 if denormalized c->pxor(tmpv, tmpv); c->movaps(tmp2, SPU_OFF_128(gpr, op.ra)); c->movaps(tmp3, SPU_OFF_128(gpr, op.rb)); c->andps(tmp2, all_exp_bits); c->andps(tmp3, all_exp_bits); c->cmpps(tmp2, tmpv, 0); c->cmpps(tmp3, tmpv, 0); c->pandn(tmp2, tmp0); c->pandn(tmp3, tmp1); c->cmpps(tmp3, tmp2, 1); c->movaps(SPU_OFF_128(gpr, op.rt), tmp3); } void spu_recompiler::DFCGT(spu_opcode_t op) { UNK(op); } void spu_recompiler::FA(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Float); c->addps(va, SPU_OFF_128(gpr, op.rb)); c->movaps(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::FS(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Float); c->subps(va, SPU_OFF_128(gpr, op.rb)); c->movaps(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::FM(spu_opcode_t op) { const auto sign_bits = XmmConst(v128::from32p(0x80000000)); const auto all_exp_bits = XmmConst(v128::from32p(0x7f800000)); const XmmLink& tmp0 = XmmAlloc(); const XmmLink& tmp1 = XmmAlloc(); const XmmLink& tmp2 = XmmAlloc(); const XmmLink& tmp3 = XmmAlloc(); const XmmLink& tmp4 = XmmGet(op.ra, XmmType::Float); const XmmLink& tmp5 = XmmGet(op.rb, XmmType::Float); //check denormals c->pxor(tmp0, tmp0); c->movaps(tmp1, all_exp_bits); c->movaps(tmp2, all_exp_bits); c->andps(tmp1, tmp4); c->andps(tmp2, tmp5); c->cmpps(tmp1, tmp0, 0); c->cmpps(tmp2, tmp0, 0); c->orps(tmp1, tmp2); //denormal operand mask //compute result with flushed denormal inputs c->movaps(tmp2, tmp4); c->mulps(tmp2, tmp5); //primary result c->movaps(tmp3, tmp2); c->andps(tmp3, all_exp_bits); c->cmpps(tmp3, tmp0, 0); //denom mask from result c->orps(tmp3, tmp1); c->andnps(tmp3, tmp2); //flushed result //compute results for the extended path c->andps(tmp2, all_exp_bits); c->cmpps(tmp2, all_exp_bits, 0); //extended mask c->movaps(tmp4, sign_bits); c->movaps(tmp5, sign_bits); c->movaps(tmp0, sign_bits); c->andps(tmp4, SPU_OFF_128(gpr, op.ra)); c->andps(tmp5, SPU_OFF_128(gpr, op.rb)); c->xorps(tmp4, tmp5); //sign mask c->pandn(tmp0, tmp2); c->orps(tmp4, tmp0); //add result sign back to original extended value c->movaps(tmp5, tmp1); //denormal mask (operands) c->andnps(tmp5, tmp4); //max_float with sign bit (nan/-nan) where not denormal or zero //select result c->movaps(tmp0, tmp2); c->andnps(tmp0, tmp3); c->andps(tmp2, tmp5); c->orps(tmp0, tmp2); c->movaps(SPU_OFF_128(gpr, op.rt), tmp0); } void spu_recompiler::CLGTH(spu_opcode_t op) { // compare if-greater-than const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vi = XmmAlloc(); c->movdqa(vi, XmmConst(v128::from16p(0x8000))); c->pxor(va, vi); c->pxor(vi, SPU_OFF_128(gpr, op.rb)); c->pcmpgtw(va, vi); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::ORC(spu_opcode_t op) { const XmmLink& vb = XmmGet(op.rb, XmmType::Int); if (utils::has_avx512()) { c->vpternlogd(vb, vb, SPU_OFF_128(gpr, op.ra), 0xbb /* orC!B */); c->movdqa(SPU_OFF_128(gpr, op.rt), vb); return; } c->pxor(vb, XmmConst(v128::from32p(0xffffffff))); c->por(vb, SPU_OFF_128(gpr, op.ra)); c->movdqa(SPU_OFF_128(gpr, op.rt), vb); } void spu_recompiler::FCMGT(spu_opcode_t op) { // reverted less-than // since comparison is absoulte, a > b if a is extended and b is not extended // flush denormals to zero to make zero == zero work const auto all_exp_bits = XmmConst(v128::from32p(0x7f800000)); const auto remove_sign_bits = XmmConst(v128::from32p(0x7fffffff)); const XmmLink& tmp0 = XmmAlloc(); const XmmLink& tmp1 = XmmAlloc(); const XmmLink& tmp2 = XmmAlloc(); const XmmLink& tmp3 = XmmAlloc(); const XmmLink& tmpv = XmmAlloc(); c->pxor(tmp0, tmp0); c->pxor(tmp1, tmp1); c->cmpps(tmp0, SPU_OFF_128(gpr, op.ra), 3); //tmp0 is true if a is extended (nan/inf) c->cmpps(tmp1, SPU_OFF_128(gpr, op.rb), 3); //tmp1 is true if b is extended (nan/inf) //flush to 0 if denormalized c->pxor(tmpv, tmpv); c->movaps(tmp2, SPU_OFF_128(gpr, op.ra)); c->movaps(tmp3, SPU_OFF_128(gpr, op.rb)); c->andps(tmp2, all_exp_bits); c->andps(tmp3, all_exp_bits); c->cmpps(tmp2, tmpv, 0); c->cmpps(tmp3, tmpv, 0); c->pandn(tmp2, SPU_OFF_128(gpr, op.ra)); c->pandn(tmp3, SPU_OFF_128(gpr, op.rb)); //Set tmp1 to true where a is extended but b is not extended //This is a simplification since absolute values remove necessity of lowering c->xorps(tmp0, tmp1); //tmp0 is true when either a or b is extended c->pandn(tmp1, tmp0); //tmp1 is true if b is not extended and a is extended c->andps(tmp2, remove_sign_bits); c->andps(tmp3, remove_sign_bits); c->cmpps(tmp3, tmp2, 1); c->orps(tmp3, tmp1); //Force result to all true if a is extended but b is not c->movaps(SPU_OFF_128(gpr, op.rt), tmp3); } void spu_recompiler::DFCMGT(spu_opcode_t op) { UNK(op); } void spu_recompiler::DFA(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Double); c->addpd(va, SPU_OFF_128(gpr, op.rb)); c->movapd(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::DFS(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Double); c->subpd(va, SPU_OFF_128(gpr, op.rb)); c->movapd(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::DFM(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Double); c->mulpd(va, SPU_OFF_128(gpr, op.rb)); c->movapd(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::CLGTB(spu_opcode_t op) { // compare if-greater-than const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vi = XmmAlloc(); c->movdqa(vi, XmmConst(v128::from8p(0x80))); c->pxor(va, vi); c->pxor(vi, SPU_OFF_128(gpr, op.rb)); c->pcmpgtb(va, vi); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::HLGT(spu_opcode_t op) { c->mov(*addr, SPU_OFF_32(gpr, op.ra, &v128::_u32, 3)); c->cmp(*addr, SPU_OFF_32(gpr, op.rb, &v128::_u32, 3)); asmjit::Label label = c->newLabel(); asmjit::Label ret = c->newLabel(); c->ja(label); after.emplace_back([=, this, pos = m_pos] { c->bind(label); c->lea(addr->r64(), get_pc(pos)); c->and_(*addr, 0x3fffc); c->mov(SPU_OFF_32(pc), *addr); c->mov(addr->r64(), reinterpret_cast<u64>(vm::base(0xffdead00))); c->mov(asmjit::x86::dword_ptr(addr->r64()), "HALT"_u32); c->jmp(ret); }); } void spu_recompiler::DFMA(spu_opcode_t op) { const XmmLink& vr = XmmGet(op.rt, XmmType::Double); const XmmLink& va = XmmGet(op.ra, XmmType::Double); c->mulpd(va, SPU_OFF_128(gpr, op.rb)); c->addpd(vr, va); c->movapd(SPU_OFF_128(gpr, op.rt), vr); } void spu_recompiler::DFMS(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Double); const XmmLink& vt = XmmGet(op.rt, XmmType::Double); c->mulpd(va, SPU_OFF_128(gpr, op.rb)); c->subpd(va, vt); c->movapd(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::DFNMS(spu_opcode_t op) { const XmmLink& vr = XmmGet(op.rt, XmmType::Double); const XmmLink& va = XmmGet(op.ra, XmmType::Double); c->mulpd(va, SPU_OFF_128(gpr, op.rb)); c->subpd(vr, va); c->movapd(SPU_OFF_128(gpr, op.rt), vr); } void spu_recompiler::DFNMA(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Double); const XmmLink& vt = XmmGet(op.rt, XmmType::Double); c->mulpd(va, SPU_OFF_128(gpr, op.rb)); c->addpd(va, vt); c->xorpd(va, XmmConst(v128::from64p(0x8000000000000000))); c->movapd(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::CEQ(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->pcmpeqd(va, SPU_OFF_128(gpr, op.rb)); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::MPYHHU(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& va2 = XmmAlloc(); c->movdqa(va2, va); c->pmulhuw(va, vb); c->pmullw(va2, vb); c->pand(va, XmmConst(v128::from32p(0xffff0000))); c->psrld(va2, 16); c->por(va, va2); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::ADDX(spu_opcode_t op) { const XmmLink& vt = XmmGet(op.rt, XmmType::Int); c->pand(vt, XmmConst(v128::from32p(1))); c->paddd(vt, SPU_OFF_128(gpr, op.ra)); c->paddd(vt, SPU_OFF_128(gpr, op.rb)); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); } void spu_recompiler::SFX(spu_opcode_t op) { const XmmLink& vt = XmmGet(op.rt, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); c->pandn(vt, XmmConst(v128::from32p(1))); c->psubd(vb, SPU_OFF_128(gpr, op.ra)); c->psubd(vb, vt); c->movdqa(SPU_OFF_128(gpr, op.rt), vb); } void spu_recompiler::CGX(spu_opcode_t op) //nf { const XmmLink& vt = XmmGet(op.rt, XmmType::Int); const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& res = XmmAlloc(); const XmmLink& sign = XmmAlloc(); c->pslld(vt, 31); c->psrad(vt, 31); if (utils::has_avx()) { c->vpaddd(res, va, vb); } else { c->movdqa(res, va); c->paddd(res, vb); } c->movdqa(sign, XmmConst(v128::from32p(0x80000000))); c->pxor(va, sign); c->pxor(res, sign); c->pcmpgtd(va, res); c->pxor(res, sign); c->pcmpeqd(res, vt); c->pand(res, vt); c->por(res, va); c->psrld(res, 31); c->movdqa(SPU_OFF_128(gpr, op.rt), res); } void spu_recompiler::BGX(spu_opcode_t op) //nf { const XmmLink& vt = XmmGet(op.rt, XmmType::Int); const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& temp = XmmAlloc(); const XmmLink& sign = XmmAlloc(); c->pslld(vt, 31); if (utils::has_avx()) { c->vpcmpeqd(temp, vb, va); } else { c->movdqa(temp, vb); c->pcmpeqd(temp, va); } c->pand(vt, temp); c->movdqa(sign, XmmConst(v128::from32p(0x80000000))); c->pxor(va, sign); c->pxor(vb, sign); c->pcmpgtd(vb, va); c->por(vt, vb); c->psrld(vt, 31); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); } void spu_recompiler::MPYHHA(spu_opcode_t op) { const XmmLink& vt = XmmGet(op.rt, XmmType::Int); const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); c->psrld(va, 16); c->psrld(vb, 16); c->pmaddwd(va, vb); c->paddd(vt, va); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); } void spu_recompiler::MPYHHAU(spu_opcode_t op) { const XmmLink& vt = XmmGet(op.rt, XmmType::Int); const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& va2 = XmmAlloc(); c->movdqa(va2, va); c->pmulhuw(va, vb); c->pmullw(va2, vb); c->pand(va, XmmConst(v128::from32p(0xffff0000))); c->psrld(va2, 16); c->paddd(vt, va); c->paddd(vt, va2); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); } void spu_recompiler::FSCRRD(spu_opcode_t op) { // zero (hack) const XmmLink& v0 = XmmAlloc(); c->pxor(v0, v0); c->movdqa(SPU_OFF_128(gpr, op.rt), v0); } void spu_recompiler::FESD(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Float); c->shufps(va, va, 0x8d); // _f[0] = _f[1]; _f[1] = _f[3]; c->cvtps2pd(va, va); c->movapd(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::FRDS(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Double); c->cvtpd2ps(va, va); c->shufps(va, va, 0x72); // _f[1] = _f[0]; _f[3] = _f[1]; _f[0] = _f[2] = 0; c->movaps(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::FSCRWR(spu_opcode_t /*op*/) { // nop (not implemented) } void spu_recompiler::DFTSV(spu_opcode_t op) { UNK(op); } void spu_recompiler::FCEQ(spu_opcode_t op) { // compare equal const XmmLink& vb = XmmGet(op.rb, XmmType::Float); c->cmpps(vb, SPU_OFF_128(gpr, op.ra), 0); c->movaps(SPU_OFF_128(gpr, op.rt), vb); } void spu_recompiler::DFCEQ(spu_opcode_t op) { UNK(op); } void spu_recompiler::MPY(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& vi = XmmAlloc(); c->movdqa(vi, XmmConst(v128::from32p(0xffff))); c->pand(va, vi); c->pand(vb, vi); c->pmaddwd(va, vb); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::MPYH(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); c->psrld(va, 16); c->pmullw(va, vb); c->pslld(va, 16); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::MPYHH(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); c->psrld(va, 16); c->psrld(vb, 16); c->pmaddwd(va, vb); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::MPYS(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); c->pmulhw(va, vb); c->pslld(va, 16); c->psrad(va, 16); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::CEQH(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->pcmpeqw(va, SPU_OFF_128(gpr, op.rb)); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::FCMEQ(spu_opcode_t op) { const XmmLink& vb = XmmGet(op.rb, XmmType::Float); const XmmLink& vi = XmmAlloc(); c->movaps(vi, XmmConst(v128::from32p(0x7fffffff))); c->andps(vb, vi); // abs c->andps(vi, SPU_OFF_128(gpr, op.ra)); c->cmpps(vb, vi, 0); // == c->movaps(SPU_OFF_128(gpr, op.rt), vb); } void spu_recompiler::DFCMEQ(spu_opcode_t op) { UNK(op); } void spu_recompiler::MPYU(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& va2 = XmmAlloc(); c->movdqa(va2, va); c->pmulhuw(va, vb); c->pmullw(va2, vb); c->pslld(va, 16); c->pand(va2, XmmConst(v128::from32p(0xffff))); c->por(va, va2); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::CEQB(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->pcmpeqb(va, SPU_OFF_128(gpr, op.rb)); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::FI(spu_opcode_t op) { // Floating Interpolate const XmmLink& va = XmmGet(op.ra, XmmType::Float); const XmmLink& vb = XmmGet(op.rb, XmmType::Float); const XmmLink& vb_base = XmmAlloc(); const XmmLink& ymul = XmmAlloc(); const XmmLink& temp_reg = XmmAlloc(); const XmmLink& temp_reg2 = XmmAlloc(); c->movdqa(vb_base, vb); c->movdqa(ymul, vb); c->movdqa(temp_reg, va); c->pand(vb_base, XmmConst(v128::from32p(0x007ffc00u))); c->pslld(vb_base, 9); c->pand(temp_reg, XmmConst(v128::from32p(0x7ffff))); // va_fraction c->pand(ymul, XmmConst(v128::from32p(0x3ff))); c->pmulld(ymul, temp_reg); c->movdqa(temp_reg, vb_base); c->psubd(temp_reg, ymul); // Makes signed comparison unsigned and determines if we need to adjust exponent auto xor_const = XmmConst(v128::from32p(0x80000000)); c->pxor(ymul, xor_const); c->pxor(vb_base, xor_const); c->pcmpgtd(ymul, vb_base); c->movdqa(vb_base, ymul); c->movdqa(temp_reg2, temp_reg); c->pand(temp_reg2, vb_base); c->psrld(temp_reg2, 8); // only shift right by 8 if exponent is adjusted c->xorps(vb_base, XmmConst(v128::from32p(0xFFFFFFFF))); // Invert the mask c->pand(temp_reg, vb_base); c->psrld(temp_reg, 9); // shift right by 9 if not adjusted c->por(temp_reg, temp_reg2); c->pand(vb, XmmConst(v128::from32p(0xff800000u))); c->pand(temp_reg, XmmConst(v128::from32p(~0xff800000u))); c->por(vb, temp_reg); c->pand(ymul, XmmConst(v128::from32p(1 << 23))); c->psubd(vb, ymul); c->movaps(SPU_OFF_128(gpr, op.rt), vb); } void spu_recompiler::HEQ(spu_opcode_t op) { c->mov(*addr, SPU_OFF_32(gpr, op.ra, &v128::_s32, 3)); c->cmp(*addr, SPU_OFF_32(gpr, op.rb, &v128::_s32, 3)); asmjit::Label label = c->newLabel(); asmjit::Label ret = c->newLabel(); c->je(label); after.emplace_back([=, this, pos = m_pos] { c->bind(label); c->lea(addr->r64(), get_pc(pos)); c->and_(*addr, 0x3fffc); c->mov(SPU_OFF_32(pc), *addr); c->mov(addr->r64(), reinterpret_cast<u64>(vm::base(0xffdead00))); c->mov(asmjit::x86::dword_ptr(addr->r64()), "HALT"_u32); c->jmp(ret); }); } void spu_recompiler::CFLTS(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Float); const XmmLink& vi = XmmAlloc(); if (op.i8 != 173) c->mulps(va, XmmConst(v128::fromf32p(std::exp2(static_cast<float>(static_cast<s16>(173 - op.i8)))))); // scale c->movaps(vi, XmmConst(v128::fromf32p(std::exp2(31.f)))); c->cmpps(vi, va, 2); c->cvttps2dq(va, va); // convert to ints with truncation c->pxor(va, vi); // fix result saturation (0x80000000 -> 0x7fffffff) c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::CFLTU(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Float); const XmmLink& vs = XmmAlloc(); const XmmLink& vs2 = XmmAlloc(); const XmmLink& vs3 = XmmAlloc(); if (op.i8 != 173) c->mulps(va, XmmConst(v128::fromf32p(std::exp2(static_cast<float>(static_cast<s16>(173 - op.i8)))))); // scale if (utils::has_avx512()) { c->vcvttps2udq(vs, va); c->psrad(va, 31); c->pandn(va, vs); c->movdqa(SPU_OFF_128(gpr, op.rt), va); return; } c->movdqa(vs, va); c->psrad(va, 31); c->andnps(va, vs); c->movaps(vs, va); // copy scaled value c->movaps(vs2, va); c->movaps(vs3, XmmConst(v128::fromf32p(std::exp2(31.f)))); c->subps(vs2, vs3); c->cmpps(vs3, vs, 2); c->andps(vs2, vs3); c->cvttps2dq(va, va); c->cmpps(vs, XmmConst(v128::fromf32p(std::exp2(32.f))), 5); c->cvttps2dq(vs2, vs2); c->por(va, vs); c->por(va, vs2); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::CSFLT(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->cvtdq2ps(va, va); // convert to floats if (op.i8 != 155) c->mulps(va, XmmConst(v128::fromf32p(std::exp2(static_cast<float>(static_cast<s16>(op.i8 - 155)))))); // scale c->movaps(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::CUFLT(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& v1 = XmmAlloc(); if (utils::has_avx512()) { c->vcvtudq2ps(va, va); } else { c->movdqa(v1, va); c->pand(va, XmmConst(v128::from32p(0x7fffffff))); c->cvtdq2ps(va, va); // convert to floats c->psrad(v1, 31); // generate mask from sign bit c->andps(v1, XmmConst(v128::fromf32p(std::exp2(31.f)))); // generate correction component c->addps(va, v1); // add correction component } if (op.i8 != 155) c->mulps(va, XmmConst(v128::fromf32p(std::exp2(static_cast<float>(static_cast<s16>(op.i8 - 155)))))); // scale c->movaps(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::BRZ(spu_opcode_t op) { const u32 target = spu_branch_target(m_pos, op.i16); if (target == m_pos + 4) { return; } asmjit::Label branch_label = c->newLabel(); c->cmp(SPU_OFF_32(gpr, op.rt, &v128::_u32, 3), 0); c->je(branch_label); after.emplace_back([=, this]() { c->align(asmjit::AlignMode::kCode, 16); c->bind(branch_label); branch_fixed(target); }); } void spu_recompiler::STQA(spu_opcode_t op) { if (utils::has_ssse3()) { const XmmLink& vt = XmmGet(op.rt, XmmType::Int); c->pshufb(vt, XmmConst(v128::from32r(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f))); c->movdqa(asmjit::x86::oword_ptr(*ls, spu_ls_target(0, op.i16)), vt); } else { c->mov(*qw0, SPU_OFF_64(gpr, op.rt, &v128::_u64, 0)); c->mov(*qw1, SPU_OFF_64(gpr, op.rt, &v128::_u64, 1)); c->bswap(*qw0); c->bswap(*qw1); c->mov(asmjit::x86::qword_ptr(*ls, spu_ls_target(0, op.i16) + 0), *qw1); c->mov(asmjit::x86::qword_ptr(*ls, spu_ls_target(0, op.i16) + 8), *qw0); } } void spu_recompiler::BRNZ(spu_opcode_t op) { const u32 target = spu_branch_target(m_pos, op.i16); if (target == m_pos + 4) { return; } asmjit::Label branch_label = c->newLabel(); c->cmp(SPU_OFF_32(gpr, op.rt, &v128::_u32, 3), 0); c->jne(branch_label); after.emplace_back([=, this]() { c->align(asmjit::AlignMode::kCode, 16); c->bind(branch_label); branch_fixed(target); }); } void spu_recompiler::BRHZ(spu_opcode_t op) { const u32 target = spu_branch_target(m_pos, op.i16); if (target == m_pos + 4) { return; } asmjit::Label branch_label = c->newLabel(); c->cmp(SPU_OFF_16(gpr, op.rt, &v128::_u16, 6), 0); c->je(branch_label); after.emplace_back([=, this]() { c->align(asmjit::AlignMode::kCode, 16); c->bind(branch_label); branch_fixed(target); }); } void spu_recompiler::BRHNZ(spu_opcode_t op) { const u32 target = spu_branch_target(m_pos, op.i16); if (target == m_pos + 4) { return; } asmjit::Label branch_label = c->newLabel(); c->cmp(SPU_OFF_16(gpr, op.rt, &v128::_u16, 6), 0); c->jne(branch_label); after.emplace_back([=, this]() { c->align(asmjit::AlignMode::kCode, 16); c->bind(branch_label); branch_fixed(target); }); } void spu_recompiler::STQR(spu_opcode_t op) { c->lea(addr->r64(), get_pc(spu_ls_target(m_pos, op.i16))); c->and_(*addr, 0x3fff0); if (utils::has_ssse3()) { const XmmLink& vt = XmmGet(op.rt, XmmType::Int); c->pshufb(vt, XmmConst(v128::from32r(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f))); c->movdqa(asmjit::x86::oword_ptr(*ls, addr->r64()), vt); } else { c->mov(*qw0, SPU_OFF_64(gpr, op.rt, &v128::_u64, 0)); c->mov(*qw1, SPU_OFF_64(gpr, op.rt, &v128::_u64, 1)); c->bswap(*qw0); c->bswap(*qw1); c->mov(asmjit::x86::qword_ptr(*ls, addr->r64(), 0, 0), *qw1); c->mov(asmjit::x86::qword_ptr(*ls, addr->r64(), 0, 8), *qw0); } } void spu_recompiler::BRA(spu_opcode_t op) { const u32 target = spu_branch_target(0, op.i16); branch_fixed(target, true); m_pos = -1; } void spu_recompiler::LQA(spu_opcode_t op) { if (utils::has_ssse3()) { const XmmLink& vt = XmmAlloc(); c->movdqa(vt, asmjit::x86::oword_ptr(*ls, spu_ls_target(0, op.i16))); c->pshufb(vt, XmmConst(v128::from32r(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f))); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); } else { c->mov(*qw0, asmjit::x86::qword_ptr(*ls, spu_ls_target(0, op.i16) + 0)); c->mov(*qw1, asmjit::x86::qword_ptr(*ls, spu_ls_target(0, op.i16) + 8)); c->bswap(*qw0); c->bswap(*qw1); c->mov(SPU_OFF_64(gpr, op.rt, &v128::_u64, 0), *qw1); c->mov(SPU_OFF_64(gpr, op.rt, &v128::_u64, 1), *qw0); } } void spu_recompiler::BRASL(spu_opcode_t op) { const u32 target = spu_branch_target(0, op.i16); const XmmLink& vr = XmmAlloc(); c->lea(addr->r64(), get_pc(m_pos + 4)); c->and_(*addr, 0x3fffc); c->movd(vr, *addr); c->pslldq(vr, 12); c->movdqa(SPU_OFF_128(gpr, op.rt), vr); branch_set_link(m_pos + 4); branch_fixed(target, true); m_pos = -1; } void spu_recompiler::BR(spu_opcode_t op) { const u32 target = spu_branch_target(m_pos, op.i16); if (target != m_pos + 4) { branch_fixed(target); m_pos = -1; } } void spu_recompiler::FSMBI(spu_opcode_t op) { v128 data; for (u32 i = 0; i < 16; i++) data._u8[i] = op.i16 & (1u << i) ? 0xff : 0; const XmmLink& vr = XmmAlloc(); c->movdqa(vr, XmmConst(data)); c->movdqa(SPU_OFF_128(gpr, op.rt), vr); } void spu_recompiler::BRSL(spu_opcode_t op) { const u32 target = spu_branch_target(m_pos, op.i16); const XmmLink& vr = XmmAlloc(); c->lea(addr->r64(), get_pc(m_pos + 4)); c->and_(*addr, 0x3fffc); c->movd(vr, *addr); c->pslldq(vr, 12); c->movdqa(SPU_OFF_128(gpr, op.rt), vr); if (target != m_pos + 4) { branch_set_link(m_pos + 4); branch_fixed(target); m_pos = -1; } } void spu_recompiler::LQR(spu_opcode_t op) { c->lea(addr->r64(), get_pc(spu_ls_target(m_pos, op.i16))); c->and_(*addr, 0x3fff0); if (utils::has_ssse3()) { const XmmLink& vt = XmmAlloc(); c->movdqa(vt, asmjit::x86::oword_ptr(*ls, addr->r64())); c->pshufb(vt, XmmConst(v128::from32r(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f))); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); } else { c->mov(*qw0, asmjit::x86::qword_ptr(*ls, addr->r64(), 0, 0)); c->mov(*qw1, asmjit::x86::qword_ptr(*ls, addr->r64(), 0, 8)); c->bswap(*qw0); c->bswap(*qw1); c->mov(SPU_OFF_64(gpr, op.rt, &v128::_u64, 0), *qw1); c->mov(SPU_OFF_64(gpr, op.rt, &v128::_u64, 1), *qw0); } } void spu_recompiler::IL(spu_opcode_t op) { const XmmLink& vr = XmmAlloc(); c->movdqa(vr, XmmConst(v128::from32p(op.si16))); c->movdqa(SPU_OFF_128(gpr, op.rt), vr); } void spu_recompiler::ILHU(spu_opcode_t op) { const XmmLink& vr = XmmAlloc(); c->movdqa(vr, XmmConst(v128::from32p(op.i16 << 16))); c->movdqa(SPU_OFF_128(gpr, op.rt), vr); } void spu_recompiler::ILH(spu_opcode_t op) { const XmmLink& vr = XmmAlloc(); c->movdqa(vr, XmmConst(v128::from16p(op.i16))); c->movdqa(SPU_OFF_128(gpr, op.rt), vr); } void spu_recompiler::IOHL(spu_opcode_t op) { const XmmLink& vt = XmmGet(op.rt, XmmType::Int); c->por(vt, XmmConst(v128::from32p(op.i16))); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); } void spu_recompiler::ORI(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); if (op.si10) c->por(va, XmmConst(v128::from32p(op.si10))); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::ORHI(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->por(va, XmmConst(v128::from16p(op.si10))); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::ORBI(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->por(va, XmmConst(v128::from8p(op.si10))); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::SFI(spu_opcode_t op) { const XmmLink& vr = XmmAlloc(); c->movdqa(vr, XmmConst(v128::from32p(op.si10))); c->psubd(vr, SPU_OFF_128(gpr, op.ra)); c->movdqa(SPU_OFF_128(gpr, op.rt), vr); } void spu_recompiler::SFHI(spu_opcode_t op) { const XmmLink& vr = XmmAlloc(); c->movdqa(vr, XmmConst(v128::from16p(op.si10))); c->psubw(vr, SPU_OFF_128(gpr, op.ra)); c->movdqa(SPU_OFF_128(gpr, op.rt), vr); } void spu_recompiler::ANDI(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->pand(va, XmmConst(v128::from32p(op.si10))); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::ANDHI(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->pand(va, XmmConst(v128::from16p(op.si10))); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::ANDBI(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->pand(va, XmmConst(v128::from8p(op.si10))); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::AI(spu_opcode_t op) { // add const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->paddd(va, XmmConst(v128::from32p(op.si10))); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::AHI(spu_opcode_t op) { // add const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->paddw(va, XmmConst(v128::from16p(op.si10))); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::STQD(spu_opcode_t op) { c->mov(*addr, SPU_OFF_32(gpr, op.ra, &v128::_u32, 3)); if (op.si10) c->add(*addr, op.si10 * 16); c->and_(*addr, 0x3fff0); if (utils::has_ssse3()) { const XmmLink& vt = XmmGet(op.rt, XmmType::Int); c->pshufb(vt, XmmConst(v128::from32r(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f))); c->movdqa(asmjit::x86::oword_ptr(*ls, addr->r64()), vt); } else { c->mov(*qw0, SPU_OFF_64(gpr, op.rt, &v128::_u64, 0)); c->mov(*qw1, SPU_OFF_64(gpr, op.rt, &v128::_u64, 1)); c->bswap(*qw0); c->bswap(*qw1); c->mov(asmjit::x86::qword_ptr(*ls, addr->r64(), 0, 0), *qw1); c->mov(asmjit::x86::qword_ptr(*ls, addr->r64(), 0, 8), *qw0); } } void spu_recompiler::LQD(spu_opcode_t op) { c->mov(*addr, SPU_OFF_32(gpr, op.ra, &v128::_u32, 3)); if (op.si10) c->add(*addr, op.si10 * 16); c->and_(*addr, 0x3fff0); if (utils::has_ssse3()) { const XmmLink& vt = XmmAlloc(); c->movdqa(vt, asmjit::x86::oword_ptr(*ls, addr->r64())); c->pshufb(vt, XmmConst(v128::from32r(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f))); c->movdqa(SPU_OFF_128(gpr, op.rt), vt); } else { c->mov(*qw0, asmjit::x86::qword_ptr(*ls, addr->r64(), 0, 0)); c->mov(*qw1, asmjit::x86::qword_ptr(*ls, addr->r64(), 0, 8)); c->bswap(*qw0); c->bswap(*qw1); c->mov(SPU_OFF_64(gpr, op.rt, &v128::_u64, 0), *qw1); c->mov(SPU_OFF_64(gpr, op.rt, &v128::_u64, 1), *qw0); } } void spu_recompiler::XORI(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->pxor(va, XmmConst(v128::from32p(op.si10))); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::XORHI(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->pxor(va, XmmConst(v128::from16p(op.si10))); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::XORBI(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->pxor(va, XmmConst(v128::from8p(op.si10))); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::CGTI(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->pcmpgtd(va, XmmConst(v128::from32p(op.si10))); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::CGTHI(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->pcmpgtw(va, XmmConst(v128::from16p(op.si10))); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::CGTBI(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->pcmpgtb(va, XmmConst(v128::from8p(op.si10))); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::HGTI(spu_opcode_t op) { c->cmp(SPU_OFF_32(gpr, op.ra, &v128::_s32, 3), +op.si10); asmjit::Label label = c->newLabel(); asmjit::Label ret = c->newLabel(); c->jg(label); after.emplace_back([=, this, pos = m_pos] { c->bind(label); c->lea(addr->r64(), get_pc(pos)); c->and_(*addr, 0x3fffc); c->mov(SPU_OFF_32(pc), *addr); c->mov(addr->r64(), reinterpret_cast<u64>(vm::base(0xffdead00))); c->mov(asmjit::x86::dword_ptr(addr->r64()), "HALT"_u32); c->jmp(ret); }); } void spu_recompiler::CLGTI(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->pxor(va, XmmConst(v128::from32p(0x80000000))); c->pcmpgtd(va, XmmConst(v128::from32p(op.si10 - 0x80000000))); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::CLGTHI(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->pxor(va, XmmConst(v128::from16p(0x8000))); c->pcmpgtw(va, XmmConst(v128::from16p(op.si10 - 0x8000))); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::CLGTBI(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->psubb(va, XmmConst(v128::from8p(0x80))); c->pcmpgtb(va, XmmConst(v128::from8p(op.si10 - 0x80))); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::HLGTI(spu_opcode_t op) { c->cmp(SPU_OFF_32(gpr, op.ra, &v128::_u32, 3), +op.si10); asmjit::Label label = c->newLabel(); asmjit::Label ret = c->newLabel(); c->ja(label); after.emplace_back([=, this, pos = m_pos] { c->bind(label); c->lea(addr->r64(), get_pc(pos)); c->and_(*addr, 0x3fffc); c->mov(SPU_OFF_32(pc), *addr); c->mov(addr->r64(), reinterpret_cast<u64>(vm::base(0xffdead00))); c->mov(asmjit::x86::dword_ptr(addr->r64()), "HALT"_u32); c->jmp(ret); }); } void spu_recompiler::MPYI(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->pmaddwd(va, XmmConst(v128::from32p(op.si10 & 0xffff))); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::MPYUI(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vi = XmmAlloc(); const XmmLink& va2 = XmmAlloc(); c->movdqa(va2, va); c->movdqa(vi, XmmConst(v128::from32p(op.si10 & 0xffff))); c->pmulhuw(va, vi); c->pmullw(va2, vi); c->pslld(va, 16); c->por(va, va2); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::CEQI(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->pcmpeqd(va, XmmConst(v128::from32p(op.si10))); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::CEQHI(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->pcmpeqw(va, XmmConst(v128::from16p(op.si10))); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::CEQBI(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); c->pcmpeqb(va, XmmConst(v128::from8p(op.si10))); c->movdqa(SPU_OFF_128(gpr, op.rt), va); } void spu_recompiler::HEQI(spu_opcode_t op) { c->cmp(SPU_OFF_32(gpr, op.ra, &v128::_u32, 3), +op.si10); asmjit::Label label = c->newLabel(); asmjit::Label ret = c->newLabel(); c->je(label); after.emplace_back([=, this, pos = m_pos] { c->bind(label); c->lea(addr->r64(), get_pc(pos)); c->and_(*addr, 0x3fffc); c->mov(SPU_OFF_32(pc), *addr); c->mov(addr->r64(), reinterpret_cast<u64>(vm::base(0xffdead00))); c->mov(asmjit::x86::dword_ptr(addr->r64()), "HALT"_u32); c->jmp(ret); }); } void spu_recompiler::HBRA([[maybe_unused]] spu_opcode_t op) { } void spu_recompiler::HBRR([[maybe_unused]] spu_opcode_t op) { } void spu_recompiler::ILA(spu_opcode_t op) { const XmmLink& vr = XmmAlloc(); c->movdqa(vr, XmmConst(v128::from32p(op.i18))); c->movdqa(SPU_OFF_128(gpr, op.rt), vr); } void spu_recompiler::SELB(spu_opcode_t op) { const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& vc = XmmGet(op.rc, XmmType::Int); if (utils::has_avx512()) { c->vpternlogd(vc, vb, SPU_OFF_128(gpr, op.ra), 0xca /* A?B:C */); c->movdqa(SPU_OFF_128(gpr, op.rt4), vc); return; } if (utils::has_xop()) { c->vpcmov(vc, vb, SPU_OFF_128(gpr, op.ra), vc); c->movdqa(SPU_OFF_128(gpr, op.rt4), vc); return; } c->pand(vb, vc); c->pandn(vc, SPU_OFF_128(gpr, op.ra)); c->por(vb, vc); c->movdqa(SPU_OFF_128(gpr, op.rt4), vb); } void spu_recompiler::SHUFB(spu_opcode_t op) { if (0 && utils::has_avx512()) { // Deactivated due to poor performance of mask merge ops. const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& vc = XmmGet(op.rc, XmmType::Int); const XmmLink& vt = XmmAlloc(); const XmmLink& vm = XmmAlloc(); c->vpcmpub(asmjit::x86::k1, vc, XmmConst(v128::from8p(-0x40)), 5 /* GE */); c->vpxor(vm, vc, XmmConst(v128::from8p(0xf))); c->setExtraReg(asmjit::x86::k1); c->z().vpblendmb(vc, vc, XmmConst(v128::from8p(-1))); // {k1} c->vpcmpub(asmjit::x86::k2, vm, XmmConst(v128::from8p(-0x20)), 5 /* GE */); c->vptestmb(asmjit::x86::k1, vm, XmmConst(v128::from8p(0x10))); c->vpshufb(vt, va, vm); c->setExtraReg(asmjit::x86::k2); c->z().vpblendmb(va, va, XmmConst(v128::from8p(0x7f))); // {k2} c->setExtraReg(asmjit::x86::k1); c->vpshufb(vt, vb, vm); // {k1} c->vpternlogd(vt, va, vc, 0xf6 /* orAxorBC */); c->movdqa(SPU_OFF_128(gpr, op.rt4), vt); return; } if (!utils::has_ssse3()) { return fall(op); } const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& vc = XmmGet(op.rc, XmmType::Int); const XmmLink& vt = XmmAlloc(); const XmmLink& vm = XmmAlloc(); const XmmLink& v5 = XmmAlloc(); c->movdqa(vm, XmmConst(v128::from8p(static_cast<s8>(0xc0)))); if (utils::has_avx()) { c->vpand(v5, vc, XmmConst(v128::from8p(static_cast<s8>(0xe0)))); c->vpxor(vc, vc, XmmConst(v128::from8p(0xf))); c->vpshufb(va, va, vc); c->vpslld(vt, vc, 3); c->vpcmpeqb(v5, v5, vm); c->vpshufb(vb, vb, vc); c->vpand(vc, vc, vm); c->vpblendvb(vb, va, vb, vt); c->vpcmpeqb(vt, vc, vm); c->vpavgb(vt, vt, v5); c->vpor(vt, vt, vb); } else { c->movdqa(v5, vc); c->pand(v5, XmmConst(v128::from8p(static_cast<s8>(0xe0)))); c->movdqa(vt, vc); c->pand(vt, vm); c->pxor(vc, XmmConst(v128::from8p(0xf))); c->pshufb(va, vc); c->pshufb(vb, vc); c->pslld(vc, 3); c->pcmpeqb(v5, vm); // If true, result should become 0xFF c->pcmpeqb(vt, vm); // If true, result should become either 0xFF or 0x80 c->pcmpeqb(vm, vm); c->pcmpgtb(vc, vm); c->pand(va, vc); c->pandn(vc, vb); c->por(vc, va); // Select result value from va or vb c->pavgb(vt, v5); // Generate result constant: AVG(0xff, 0x00) == 0x80 c->por(vt, vc); } c->movdqa(SPU_OFF_128(gpr, op.rt4), vt); } void spu_recompiler::MPYA(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Int); const XmmLink& vb = XmmGet(op.rb, XmmType::Int); const XmmLink& vi = XmmAlloc(); c->movdqa(vi, XmmConst(v128::from32p(0xffff))); c->pand(va, vi); c->pand(vb, vi); c->pmaddwd(va, vb); c->paddd(va, SPU_OFF_128(gpr, op.rc)); c->movdqa(SPU_OFF_128(gpr, op.rt4), va); } void spu_recompiler::FNMS(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Float); const XmmLink& vb = XmmGet(op.rb, XmmType::Float); const XmmLink& mask = XmmAlloc(); const XmmLink& v1 = XmmAlloc(); const XmmLink& v2 = XmmAlloc(); c->movaps(mask, XmmConst(v128::from32p(0x7f800000))); c->movaps(v1, va); c->movaps(v2, vb); c->andps(va, mask); c->andps(vb, mask); c->cmpps(va, mask, 4); // va = ra == extended c->cmpps(vb, mask, 4); // vb = rb == extended c->andps(va, v1); // va = ra & ~ra_extended c->andps(vb, v2); // vb = rb & ~rb_extended c->mulps(va, vb); c->movaps(vb, SPU_OFF_128(gpr, op.rc)); c->subps(vb, va); c->movaps(SPU_OFF_128(gpr, op.rt4), vb); } void spu_recompiler::FMA(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Float); const XmmLink& vb = XmmGet(op.rb, XmmType::Float); const XmmLink& mask = XmmAlloc(); const XmmLink& v1 = XmmAlloc(); const XmmLink& v2 = XmmAlloc(); c->movaps(mask, XmmConst(v128::from32p(0x7f800000))); c->movaps(v1, va); c->movaps(v2, vb); c->andps(va, mask); c->andps(vb, mask); c->cmpps(va, mask, 4); // va = ra == extended c->cmpps(vb, mask, 4); // vb = rb == extended c->andps(va, v1); // va = ra & ~ra_extended c->andps(vb, v2); // vb = rb & ~rb_extended c->mulps(va, vb); c->addps(va, SPU_OFF_128(gpr, op.rc)); c->movaps(SPU_OFF_128(gpr, op.rt4), va); } void spu_recompiler::FMS(spu_opcode_t op) { const XmmLink& va = XmmGet(op.ra, XmmType::Float); const XmmLink& vb = XmmGet(op.rb, XmmType::Float); const XmmLink& mask = XmmAlloc(); const XmmLink& v1 = XmmAlloc(); const XmmLink& v2 = XmmAlloc(); c->movaps(mask, XmmConst(v128::from32p(0x7f800000))); c->movaps(v1, va); c->movaps(v2, vb); c->andps(va, mask); c->andps(vb, mask); c->cmpps(va, mask, 4); // va = ra == extended c->cmpps(vb, mask, 4); // vb = rb == extended c->andps(va, v1); // va = ra & ~ra_extended c->andps(vb, v2); // vb = rb & ~rb_extended c->mulps(va, vb); c->subps(va, SPU_OFF_128(gpr, op.rc)); c->movaps(SPU_OFF_128(gpr, op.rt4), va); }
122,034
C++
.cpp
4,265
26.091676
129
0.633515
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,177
SPUAnalyser.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/SPUAnalyser.cpp
#include "stdafx.h" #include "SPUAnalyser.h" #include "SPUOpcodes.h" const extern spu_decoder<spu_itype> g_spu_itype{}; const extern spu_decoder<spu_iname> g_spu_iname{}; const extern spu_decoder<spu_iflag> g_spu_iflag{};
223
C++
.cpp
6
36
50
0.763889
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
true
false
false
true
false
false
5,178
SPULLVMRecompiler.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/SPULLVMRecompiler.cpp
#include "stdafx.h" #include "SPURecompiler.h" #include "Emu/System.h" #include "Emu/system_config.h" #include "Emu/IdManager.h" #include "Emu/Cell/timers.hpp" #include "Emu/Memory/vm_reservation.h" #include "Emu/RSX/Core/RSXReservationLock.hpp" #include "Crypto/sha1.h" #include "Utilities/JIT.h" #include "SPUThread.h" #include "SPUAnalyser.h" #include "SPUInterpreter.h" #include <algorithm> #include <thread> #include "util/v128.hpp" #include "util/simd.hpp" #include "util/sysinfo.hpp" const extern spu_decoder<spu_itype> g_spu_itype; const extern spu_decoder<spu_iname> g_spu_iname; const extern spu_decoder<spu_iflag> g_spu_iflag; #ifdef LLVM_AVAILABLE #include "Emu/CPU/CPUTranslator.h" #ifdef _MSC_VER #pragma warning(push, 0) #else #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wold-style-cast" #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wmissing-noreturn" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #endif #include <llvm/ADT/PostOrderIterator.h> #include <llvm/Analysis/PostDominators.h> #include <llvm/IR/InlineAsm.h> #include <llvm/IR/Verifier.h> #include <llvm/TargetParser/Host.h> #include <llvm/Transforms/Utils/BasicBlockUtils.h> #if LLVM_VERSION_MAJOR < 17 #include <llvm/IR/LegacyPassManager.h> #include <llvm/Transforms/Scalar.h> #include <llvm/Analysis/AliasAnalysis.h> #else #include <llvm/Analysis/CGSCCPassManager.h> #include <llvm/Analysis/LoopAnalysisManager.h> #include <llvm/IR/PassManager.h> #include <llvm/Passes/PassBuilder.h> #include <llvm/Transforms/Scalar/ADCE.h> #include <llvm/Transforms/Scalar/DeadStoreElimination.h> #include <llvm/Transforms/Scalar/EarlyCSE.h> #include <llvm/Transforms/Scalar/LICM.h> #include <llvm/Transforms/Scalar/LoopPassManager.h> #include <llvm/Transforms/Scalar/SimplifyCFG.h> #endif #ifdef _MSC_VER #pragma warning(pop) #else #pragma GCC diagnostic pop #endif #ifdef ARCH_ARM64 #include "Emu/CPU/Backends/AArch64/AArch64JIT.h" #endif class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator { // JIT Instance jit_compiler m_jit{{}, jit_compiler::cpu(g_cfg.core.llvm_cpu)}; // Interpreter table size power const u8 m_interp_magn; // Constant opcode bits u32 m_op_const_mask = -1; // Current function chunk entry point u32 m_entry; // Main entry point offset u32 m_base; // Module name std::string m_hash; // Patchpoint unique id u32 m_pp_id = 0; // Next opcode u32 m_next_op = 0; // Current function (chunk) llvm::Function* m_function; llvm::Value* m_thread; llvm::Value* m_lsptr; llvm::Value* m_interp_op; llvm::Value* m_interp_pc; llvm::Value* m_interp_table; llvm::Value* m_interp_7f0; llvm::Value* m_interp_regs; // Helpers llvm::Value* m_base_pc; llvm::Value* m_interp_pc_next; llvm::BasicBlock* m_interp_bblock; // i8*, contains constant vm::g_base_addr value llvm::Value* m_memptr; // Pointers to registers in the thread context std::array<llvm::Value*, s_reg_max> m_reg_addr; // Global variable (function table) llvm::GlobalVariable* m_function_table{}; // Global LUTs llvm::GlobalVariable* m_spu_frest_fraction_lut{}; llvm::GlobalVariable* m_spu_frsqest_fraction_lut{}; llvm::GlobalVariable* m_spu_frsqest_exponent_lut{}; // Helpers (interpreter) llvm::GlobalVariable* m_scale_float_to{}; llvm::GlobalVariable* m_scale_to_float{}; // Function for check_state execution llvm::Function* m_test_state{}; // Chunk for external tail call (dispatch) llvm::Function* m_dispatch{}; llvm::MDNode* m_md_unlikely; llvm::MDNode* m_md_likely; struct block_info { // Pointer to the analyser spu_recompiler_base::block_info* bb{}; // Current block's entry block llvm::BasicBlock* block; // Final block (for PHI nodes, set after completion) llvm::BasicBlock* block_end{}; // Additional blocks for sinking instructions after block_end: std::unordered_map<u32, llvm::BasicBlock*, value_hash<u32, 2>> block_edges; // Current register values std::array<llvm::Value*, s_reg_max> reg{}; // PHI nodes created for this block (if any) std::array<llvm::PHINode*, s_reg_max> phi{}; // Store instructions std::array<llvm::StoreInst*, s_reg_max> store{}; // Store reordering/elimination protection std::array<usz, s_reg_max> store_context_last_id = fill_array<usz>(0); // Protects against illegal forward ordering std::array<usz, s_reg_max> store_context_first_id = fill_array<usz>(usz{umax}); // Protects against illegal past store elimination (backwards ordering is not implemented) std::array<usz, s_reg_max> store_context_ctr = fill_array<usz>(1); // Store barrier counter bool has_gpr_memory_barriers = false; // Summarizes whether GPR barriers exist this block (as if checking all store_context_ctr entries) bool does_gpr_barrier_proceed_last_store(u32 i) const noexcept { const usz counter = store_context_ctr[i]; return counter != 1 && counter > store_context_last_id[i]; } bool does_gpr_barrier_preceed_first_store(u32 i) const noexcept { const usz counter = store_context_ctr[i]; const usz first_id = store_context_first_id[i]; return counter != 1 && first_id != umax && counter < first_id; } }; struct function_info { // Standard callable chunk llvm::Function* chunk{}; // Callable function llvm::Function* fn{}; // Registers possibly loaded in the entry block std::array<llvm::Value*, s_reg_max> load{}; }; // Current block block_info* m_block; // Current function or chunk function_info* m_finfo; // All blocks in the current function chunk std::unordered_map<u32, block_info, value_hash<u32, 2>> m_blocks; // Block list for processing std::vector<u32> m_block_queue; // All function chunks in current SPU compile unit std::unordered_map<u32, function_info, value_hash<u32, 2>> m_functions; // Function chunk list for processing std::vector<u32> m_function_queue; // Add or get the function chunk function_info* add_function(u32 addr) { // Enqueue if necessary const auto empl = m_functions.try_emplace(addr); if (!empl.second) { return &empl.first->second; } // Chunk function type // 0. Result (tail call target) // 1. Thread context // 2. Local storage pointer // 3. #if 0 const auto chunk_type = get_ftype<u8*, u8*, u8*, u32>(); #else const auto chunk_type = get_ftype<void, u8*, u8*, u32>(); #endif // Get function chunk name const std::string name = fmt::format("__spu-cx%05x-%s", addr, fmt::base57(be_t<u64>{m_hash_start})); llvm::Function* result = llvm::cast<llvm::Function>(m_module->getOrInsertFunction(name, chunk_type).getCallee()); // Set parameters result->setLinkage(llvm::GlobalValue::InternalLinkage); result->addParamAttr(0, llvm::Attribute::NoAlias); result->addParamAttr(1, llvm::Attribute::NoAlias); #if 1 result->setCallingConv(llvm::CallingConv::GHC); #endif empl.first->second.chunk = result; if (g_cfg.core.spu_block_size == spu_block_size_type::giga) { // Find good real function const auto ffound = m_funcs.find(addr); if (ffound != m_funcs.end() && ffound->second.good) { // Real function type (not equal to chunk type) // 4. $SP // 5. $3 const auto func_type = get_ftype<u32[4], u8*, u8*, u32, u32[4], u32[4]>(); const std::string fname = fmt::format("__spu-fx%05x-%s", addr, fmt::base57(be_t<u64>{m_hash_start})); llvm::Function* fn = llvm::cast<llvm::Function>(m_module->getOrInsertFunction(fname, func_type).getCallee()); fn->setLinkage(llvm::GlobalValue::InternalLinkage); fn->addParamAttr(0, llvm::Attribute::NoAlias); fn->addParamAttr(1, llvm::Attribute::NoAlias); #if 1 fn->setCallingConv(llvm::CallingConv::GHC); #endif empl.first->second.fn = fn; } } // Enqueue m_function_queue.push_back(addr); return &empl.first->second; } // Create tail call to the function chunk (non-tail calls are just out of question) void tail_chunk(llvm::FunctionCallee callee, llvm::Value* base_pc = nullptr) { if (!callee && !g_cfg.core.spu_verification) { // Disable patchpoints if verification is disabled callee = m_dispatch; } else if (!callee) { // Create branch patchpoint if chunk == nullptr ensure(m_finfo && (!m_finfo->fn || m_function == m_finfo->chunk)); // Register under a unique linkable name const std::string ppname = fmt::format("%s-pp-%u", m_hash, m_pp_id++); m_engine->updateGlobalMapping(ppname, reinterpret_cast<u64>(m_spurt->make_branch_patchpoint())); // Create function with not exactly correct type const auto ppfunc = llvm::cast<llvm::Function>(m_module->getOrInsertFunction(ppname, m_finfo->chunk->getFunctionType()).getCallee()); ppfunc->setCallingConv(m_finfo->chunk->getCallingConv()); if (m_finfo->chunk->getReturnType() != get_type<void>()) { m_ir->CreateRet(ppfunc); return; } callee = ppfunc; base_pc = m_ir->getInt32(0); } ensure(callee); auto call = m_ir->CreateCall(callee, {m_thread, m_lsptr, base_pc ? base_pc : m_base_pc}); auto func = m_finfo ? m_finfo->chunk : llvm::dyn_cast<llvm::Function>(callee.getCallee()); call->setCallingConv(func->getCallingConv()); call->setTailCall(); if (func->getReturnType() == get_type<void>()) { m_ir->CreateRetVoid(); } else { m_ir->CreateRet(call); } } // Call the real function void call_function(llvm::Function* fn, bool tail = false) { llvm::Value* lr{}; llvm::Value* sp{}; llvm::Value* r3{}; if (!m_finfo->fn && !m_block) { lr = m_ir->CreateLoad(get_type<u32>(), spu_ptr<u32>(&spu_thread::gpr, +s_reg_lr, &v128::_u32, 3)); sp = m_ir->CreateLoad(get_type<u32[4]>(), spu_ptr<u32[4]>(&spu_thread::gpr, +s_reg_sp)); r3 = m_ir->CreateLoad(get_type<u32[4]>(), spu_ptr<u32[4]>(&spu_thread::gpr, 3)); } else { lr = m_ir->CreateExtractElement(get_reg_fixed<u32[4]>(s_reg_lr).value, 3); sp = get_reg_fixed<u32[4]>(s_reg_sp).value; r3 = get_reg_fixed<u32[4]>(3).value; } const auto _call = m_ir->CreateCall(ensure(fn), {m_thread, m_lsptr, m_base_pc, sp, r3}); _call->setCallingConv(fn->getCallingConv()); // Tail call using loaded LR value (gateway from a chunk) if (!m_finfo->fn) { lr = m_ir->CreateAnd(lr, 0x3fffc); m_ir->CreateStore(lr, spu_ptr<u32>(&spu_thread::pc)); m_ir->CreateStore(_call, spu_ptr<u32[4]>(&spu_thread::gpr, 3)); m_ir->CreateBr(add_block_indirect({}, value<u32>(lr))); } else if (tail) { _call->setTailCall(); m_ir->CreateRet(_call); } else { // TODO: initialize $LR with a constant for (u32 i = 0; i < s_reg_max; i++) { if (i != s_reg_lr && i != s_reg_sp && (i < s_reg_80 || i > s_reg_127)) { m_block->reg[i] = m_ir->CreateLoad(get_reg_type(i), init_reg_fixed(i)); } } // Set result m_block->reg[3] = _call; } } // Emit return from the real function void ret_function() { m_ir->CreateRet(get_reg_fixed<u32[4]>(3).value); } void set_function(llvm::Function* func) { m_function = func; m_thread = func->getArg(0); m_lsptr = func->getArg(1); m_base_pc = func->getArg(2); m_reg_addr.fill(nullptr); m_block = nullptr; m_finfo = nullptr; m_blocks.clear(); m_block_queue.clear(); m_ir->SetInsertPoint(llvm::BasicBlock::Create(m_context, "", m_function)); m_memptr = m_ir->CreateLoad(get_type<u8*>(), spu_ptr<u8*>(&spu_thread::memory_base_addr)); } // Add block with current block as a predecessor llvm::BasicBlock* add_block(u32 target, bool absolute = false) { // Check the predecessor const bool pred_found = m_block_info[target / 4] && std::find(m_preds[target].begin(), m_preds[target].end(), m_pos) != m_preds[target].end(); if (m_blocks.empty()) { // Special case: first block, proceed normally if (auto fn = std::exchange(m_finfo->fn, nullptr)) { // Create a gateway call_function(fn, true); m_finfo->fn = fn; m_function = fn; m_thread = fn->getArg(0); m_lsptr = fn->getArg(1); m_base_pc = fn->getArg(2); m_ir->SetInsertPoint(llvm::BasicBlock::Create(m_context, "", fn)); m_memptr = m_ir->CreateLoad(get_type<u8*>(), spu_ptr<u8*>(&spu_thread::memory_base_addr)); // Load registers at the entry chunk for (u32 i = 0; i < s_reg_max; i++) { if (i >= s_reg_80 && i <= s_reg_127) { // TODO //m_finfo->load[i] = llvm::UndefValue::get(get_reg_type(i)); } m_finfo->load[i] = m_ir->CreateLoad(get_reg_type(i), init_reg_fixed(i)); } // Load $SP m_finfo->load[s_reg_sp] = fn->getArg(3); // Load first args m_finfo->load[3] = fn->getArg(4); } } else if (m_block_info[target / 4] && m_entry_info[target / 4] && !(pred_found && m_entry == target) && (!m_finfo->fn || !m_ret_info[target / 4])) { // Generate a tail call to the function chunk const auto cblock = m_ir->GetInsertBlock(); const auto result = llvm::BasicBlock::Create(m_context, "", m_function); m_ir->SetInsertPoint(result); const auto pfinfo = add_function(target); if (absolute) { ensure(!m_finfo->fn); const auto next = llvm::BasicBlock::Create(m_context, "", m_function); const auto fail = llvm::BasicBlock::Create(m_context, "", m_function); m_ir->CreateCondBr(m_ir->CreateICmpEQ(m_base_pc, m_ir->getInt32(m_base)), next, fail); m_ir->SetInsertPoint(fail); m_ir->CreateStore(m_ir->getInt32(target), spu_ptr<u32>(&spu_thread::pc)); tail_chunk(nullptr); m_ir->SetInsertPoint(next); } if (pfinfo->fn) { // Tail call to the real function call_function(pfinfo->fn, true); if (!result->getTerminator()) ret_function(); } else { // Just a boring tail call to another chunk update_pc(target); tail_chunk(pfinfo->chunk); } m_ir->SetInsertPoint(cblock); return result; } else if (!pred_found || !m_block_info[target / 4]) { if (m_block_info[target / 4]) { spu_log.error("[%s] [0x%x] Predecessor not found for target 0x%x (chunk=0x%x, entry=0x%x, size=%u)", m_hash, m_pos, target, m_entry, m_function_queue[0], m_size / 4); } const auto cblock = m_ir->GetInsertBlock(); const auto result = llvm::BasicBlock::Create(m_context, "", m_function); m_ir->SetInsertPoint(result); if (absolute) { ensure(!m_finfo->fn); m_ir->CreateStore(m_ir->getInt32(target), spu_ptr<u32>(&spu_thread::pc)); } else { update_pc(target); } tail_chunk(nullptr); m_ir->SetInsertPoint(cblock); return result; } auto& result = m_blocks[target].block; if (!result) { result = llvm::BasicBlock::Create(m_context, fmt::format("b-0x%x", target), m_function); // Add the block to the queue m_block_queue.push_back(target); } else if (m_block && m_blocks[target].block_end) { // Connect PHI nodes if necessary for (u32 i = 0; i < s_reg_max; i++) { if (const auto phi = m_blocks[target].phi[i]) { const auto typ = phi->getType() == get_type<f64[4]>() ? get_type<f64[4]>() : get_reg_type(i); phi->addIncoming(get_reg_fixed(i, typ), m_block->block_end); } } } return result; } template <typename T = u8> llvm::Value* _ptr(llvm::Value* base, u32 offset) { return m_ir->CreateGEP(get_type<u8>(), base, m_ir->getInt64(offset)); } template <typename T = u8> llvm::Value* _ptr(llvm::Value* base, llvm::Value* offset) { return m_ir->CreateGEP(get_type<u8>(), base, offset); } template <typename T, typename... Args> llvm::Value* spu_ptr(Args... offset_args) { return _ptr<T>(m_thread, ::offset32(offset_args...)); } template <typename T, typename... Args> llvm::Value* spu_ptr(value_t<u64> add, Args... offset_args) { const auto off = m_ir->CreateGEP(get_type<u8>(), m_thread, m_ir->getInt64(::offset32(offset_args...))); return m_ir->CreateAdd(off, add.value); } // Return default register type llvm::Type* get_reg_type(u32 index) { if (index < 128) { return get_type<u32[4]>(); } switch (index) { case s_reg_mfc_eal: case s_reg_mfc_lsa: return get_type<u32>(); case s_reg_mfc_tag: return get_type<u8>(); case s_reg_mfc_size: return get_type<u16>(); default: fmt::throw_exception("get_reg_type(%u): invalid register index", index); } } u32 get_reg_offset(u32 index) { if (index < 128) { return ::offset32(&spu_thread::gpr, index); } switch (index) { case s_reg_mfc_eal: return ::offset32(&spu_thread::ch_mfc_cmd, &spu_mfc_cmd::eal); case s_reg_mfc_lsa: return ::offset32(&spu_thread::ch_mfc_cmd, &spu_mfc_cmd::lsa); case s_reg_mfc_tag: return ::offset32(&spu_thread::ch_mfc_cmd, &spu_mfc_cmd::tag); case s_reg_mfc_size: return ::offset32(&spu_thread::ch_mfc_cmd, &spu_mfc_cmd::size); default: fmt::throw_exception("get_reg_offset(%u): invalid register index", index); } } llvm::Value* init_reg_fixed(u32 index) { if (!m_block) { return _ptr<u8>(m_thread, get_reg_offset(index)); } auto& ptr = ::at32(m_reg_addr, index); if (!ptr) { // Save and restore current insert point if necessary const auto block_cur = m_ir->GetInsertBlock(); // Emit register pointer at the beginning of the function chunk m_ir->SetInsertPoint(m_function->getEntryBlock().getTerminator()); ptr = _ptr<u8>(m_thread, get_reg_offset(index)); m_ir->SetInsertPoint(block_cur); } return ptr; } // Get pointer to the vector register (interpreter only) template <typename T, uint I> llvm::Value* init_vr(const bf_t<u32, I, 7>&) { if (!m_interp_magn) { m_interp_7f0 = m_ir->getInt32(0x7f0); m_interp_regs = _ptr(m_thread, get_reg_offset(0)); } // Extract reg index const auto isl = I >= 4 ? m_interp_op : m_ir->CreateShl(m_interp_op, u64{4 - I}); const auto isr = I <= 4 ? m_interp_op : m_ir->CreateLShr(m_interp_op, u64{I - 4}); const auto idx = m_ir->CreateAnd(I > 4 ? isr : isl, m_interp_7f0); // Pointer to the register return m_ir->CreateGEP(get_type<u8>(), m_interp_regs, m_ir->CreateZExt(idx, get_type<u64>())); } llvm::Value* double_as_uint64(llvm::Value* val) { return bitcast<u64[4]>(val); } llvm::Value* uint64_as_double(llvm::Value* val) { return bitcast<f64[4]>(val); } llvm::Value* double_to_xfloat(llvm::Value* val) { ensure(val && val->getType() == get_type<f64[4]>()); const auto d = double_as_uint64(val); const auto s = m_ir->CreateAnd(m_ir->CreateLShr(d, 32), 0x80000000); const auto m = m_ir->CreateXor(m_ir->CreateLShr(d, 29), 0x40000000); const auto r = m_ir->CreateOr(m_ir->CreateAnd(m, 0x7fffffff), s); return m_ir->CreateTrunc(m_ir->CreateSelect(m_ir->CreateIsNotNull(d), r, splat<u64[4]>(0).eval(m_ir)), get_type<u32[4]>()); } llvm::Value* xfloat_to_double(llvm::Value* val) { ensure(val && val->getType() == get_type<u32[4]>()); const auto x = m_ir->CreateZExt(val, get_type<u64[4]>()); const auto s = m_ir->CreateShl(m_ir->CreateAnd(x, 0x80000000), 32); const auto a = m_ir->CreateAnd(x, 0x7fffffff); const auto m = m_ir->CreateShl(m_ir->CreateAdd(a, splat<u64[4]>(0x1c0000000).eval(m_ir)), 29); const auto r = m_ir->CreateSelect(m_ir->CreateICmpSGT(a, splat<u64[4]>(0x7fffff).eval(m_ir)), m, splat<u64[4]>(0).eval(m_ir)); const auto f = m_ir->CreateOr(s, r); return uint64_as_double(f); } // Clamp double values to ±Smax, flush values smaller than ±Smin to positive zero llvm::Value* xfloat_in_double(llvm::Value* val) { ensure(val && val->getType() == get_type<f64[4]>()); const auto smax = uint64_as_double(splat<u64[4]>(0x47ffffffe0000000).eval(m_ir)); const auto smin = uint64_as_double(splat<u64[4]>(0x3810000000000000).eval(m_ir)); const auto d = double_as_uint64(val); const auto s = m_ir->CreateAnd(d, 0x8000000000000000); const auto a = uint64_as_double(m_ir->CreateAnd(d, 0x7fffffffe0000000)); const auto n = m_ir->CreateFCmpOLT(a, smax); const auto z = m_ir->CreateFCmpOLT(a, smin); const auto c = double_as_uint64(m_ir->CreateSelect(n, a, smax)); return m_ir->CreateSelect(z, fsplat<f64[4]>(0.).eval(m_ir), uint64_as_double(m_ir->CreateOr(c, s))); } // Expand 32-bit mask for xfloat values to 64-bit, 29 least significant bits are always zero llvm::Value* conv_xfloat_mask(llvm::Value* val) { const auto d = m_ir->CreateZExt(val, get_type<u64[4]>()); const auto s = m_ir->CreateShl(m_ir->CreateAnd(d, 0x80000000), 32); const auto e = m_ir->CreateLShr(m_ir->CreateAShr(m_ir->CreateShl(d, 33), 4), 1); return m_ir->CreateOr(s, e); } llvm::Value* get_reg_raw(u32 index) { if (!m_block || index >= m_block->reg.size()) { return nullptr; } return m_block->reg[index]; } llvm::Value* get_reg_fixed(u32 index, llvm::Type* type) { llvm::Value* dummy{}; auto& reg = *(m_block ? &::at32(m_block->reg, index) : &dummy); if (!reg) { // Load register value if necessary reg = m_finfo && m_finfo->load[index] ? m_finfo->load[index] : m_ir->CreateLoad(get_reg_type(index), init_reg_fixed(index)); } if (reg->getType() == get_type<f64[4]>()) { if (type == reg->getType()) { return reg; } return bitcast(double_to_xfloat(reg), type); } if (type == get_type<f64[4]>()) { return xfloat_to_double(bitcast<u32[4]>(reg)); } return bitcast(reg, type); } template <typename T = u32[4]> value_t<T> get_reg_fixed(u32 index) { value_t<T> r; r.value = get_reg_fixed(index, get_type<T>()); return r; } template <typename T = u32[4], uint I> value_t<T> get_vr(const bf_t<u32, I, 7>& index) { value_t<T> r; if ((m_op_const_mask & index.data_mask()) != index.data_mask()) { // Update const mask if necessary if (I >= (32u - m_interp_magn)) { m_op_const_mask |= index.data_mask(); } // Load reg if (get_type<T>() == get_type<f64[4]>()) { r.value = xfloat_to_double(m_ir->CreateLoad(get_type<u32[4]>(), init_vr<u32[4]>(index))); } else { r.value = m_ir->CreateLoad(get_type<T>(), init_vr<T>(index)); } } else { r.value = get_reg_fixed(index, get_type<T>()); } return r; } template <typename U, uint I> auto get_vr_as(U&&, const bf_t<u32, I, 7>& index) { return get_vr<typename llvm_expr_t<U>::type>(index); } template <typename T = u32[4], typename... Args> std::tuple<std::conditional_t<false, Args, value_t<T>>...> get_vrs(const Args&... args) { return {get_vr<T>(args)...}; } template <typename T = u32[4], uint I> llvm_match_t<T> match_vr(const bf_t<u32, I, 7>& index) { llvm_match_t<T> r; if (m_block) { auto v = ::at32(m_block->reg, index); if (v && v->getType() == get_type<T>()) { r.value = v; return r; } } return r; } template <typename U, uint I> auto match_vr_as(U&&, const bf_t<u32, I, 7>& index) { return match_vr<typename llvm_expr_t<U>::type>(index); } template <typename... Types, uint I, typename F> bool match_vr(const bf_t<u32, I, 7>& index, F&& pred) { return (( match_vr<Types>(index) ? pred(match_vr<Types>(index), match<Types>()) : false ) || ...); } template <typename T = u32[4], typename... Args> std::tuple<std::conditional_t<false, Args, llvm_match_t<T>>...> match_vrs(const Args&... args) { return {match_vr<T>(args)...}; } // Extract scalar value from the preferred slot template <typename T> auto get_scalar(value_t<T> value) { using e_type = std::remove_extent_t<T>; static_assert(sizeof(T) == 16 || std::is_same_v<f64[4], T>, "Unknown vector type"); if (auto [ok, v] = match_expr(value, vsplat<T>(match<e_type>())); ok) { return eval(v); } if constexpr (sizeof(e_type) == 1) { return eval(extract(value, 12)); } else if constexpr (sizeof(e_type) == 2) { return eval(extract(value, 6)); } else if constexpr (sizeof(e_type) == 4 || sizeof(T) == 32) { return eval(extract(value, 3)); } else { return eval(extract(value, 1)); } } // Splat scalar value from the preferred slot template <typename T> auto splat_scalar(T&& arg) { using VT = std::remove_extent_t<typename std::decay_t<T>::type>; if constexpr (sizeof(VT) == 1) { return zshuffle(std::forward<T>(arg), 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12); } else if constexpr (sizeof(VT) == 2) { return zshuffle(std::forward<T>(arg), 6, 6, 6, 6, 6, 6, 6, 6); } else if constexpr (sizeof(VT) == 4) { return zshuffle(std::forward<T>(arg), 3, 3, 3, 3); } else if constexpr (sizeof(VT) == 8) { return zshuffle(std::forward<T>(arg), 1, 1); } else { static_assert(sizeof(VT) == 16); return std::forward<T>(arg); } } void set_reg_fixed(u32 index, llvm::Value* value, bool fixup = true) { llvm::StoreInst* dummy{}; // Check ensure(!m_block || m_regmod[m_pos / 4] == index); // Test for special case const bool is_xfloat = value->getType() == get_type<f64[4]>(); // Clamp value if necessary const auto saved_value = is_xfloat && fixup ? xfloat_in_double(value) : value; // Set register value if (m_block) { #ifndef _WIN32 if (g_cfg.core.spu_debug) value->setName(fmt::format("result_0x%05x", m_pos)); #endif ::at32(m_block->reg, index) = saved_value; } // Get register location const auto addr = init_reg_fixed(index); auto& _store = *(m_block ? &m_block->store[index] : &dummy); // Erase previous dead store instruction if necessary if (_store) { if (m_block->store_context_last_id[index] == m_block->store_context_ctr[index]) { // Erase store of it is not preserved by ensure_gpr_stores() _store->eraseFromParent(); } } if (m_block) { // Keep the store's location in history of gpr preservaions m_block->store_context_last_id[index] = m_block->store_context_ctr[index]; m_block->store_context_first_id[index] = std::min<usz>(m_block->store_context_first_id[index], m_block->store_context_ctr[index]); } if (m_finfo && m_finfo->fn) { if (index <= 3 || (index >= s_reg_80 && index <= s_reg_127)) { // Don't save some registers in true functions return; } } // Write register to the context _store = m_ir->CreateStore(is_xfloat ? double_to_xfloat(saved_value) : m_ir->CreateBitCast(value, get_reg_type(index)), addr); } template <typename T, uint I> void set_vr(const bf_t<u32, I, 7>& index, T expr, std::function<llvm::KnownBits()> vr_assume = nullptr, bool fixup = true) { // Process expression const auto value = expr.eval(m_ir); // Test for special case const bool is_xfloat = value->getType() == get_type<f64[4]>(); if ((m_op_const_mask & index.data_mask()) != index.data_mask()) { // Update const mask if necessary if (I >= (32u - m_interp_magn)) { m_op_const_mask |= index.data_mask(); } // Clamp value if necessary const auto saved_value = is_xfloat && fixup ? xfloat_in_double(value) : value; // Store value m_ir->CreateStore(is_xfloat ? double_to_xfloat(saved_value) : m_ir->CreateBitCast(value, get_type<u32[4]>()), init_vr<u32[4]>(index)); return; } if (vr_assume) { } set_reg_fixed(index, value, fixup); } template <typename T = u32[4], uint I, uint N> value_t<T> get_imm(const bf_t<u32, I, N>& imm, bool mask = true) { if ((m_op_const_mask & imm.data_mask()) != imm.data_mask()) { // Update const mask if necessary if (I >= (32u - m_interp_magn)) { m_op_const_mask |= imm.data_mask(); } // Extract unsigned immediate (skip AND if mask == false or truncated anyway) value_t<T> r; r.value = m_interp_op; r.value = I == 0 ? r.value : m_ir->CreateLShr(r.value, u64{I}); r.value = !mask || N >= r.esize ? r.value : m_ir->CreateAnd(r.value, imm.data_mask() >> I); if constexpr (r.esize != 32) { r.value = m_ir->CreateZExtOrTrunc(r.value, get_type<T>()->getScalarType()); } if (r.is_vector) { r.value = m_ir->CreateVectorSplat(r.is_vector, r.value); } return r; } return eval(splat<T>(imm)); } template <typename T = u32[4], uint I, uint N> value_t<T> get_imm(const bf_t<s32, I, N>& imm) { if ((m_op_const_mask & imm.data_mask()) != imm.data_mask()) { // Update const mask if necessary if (I >= (32u - m_interp_magn)) { m_op_const_mask |= imm.data_mask(); } // Extract signed immediate (skip sign ext if truncated anyway) value_t<T> r; r.value = m_interp_op; r.value = I + N == 32 || N >= r.esize ? r.value : m_ir->CreateShl(r.value, u64{32u - I - N}); r.value = N == 32 || N >= r.esize ? r.value : m_ir->CreateAShr(r.value, u64{32u - N}); r.value = I == 0 || N < r.esize ? r.value : m_ir->CreateLShr(r.value, u64{I}); if constexpr (r.esize != 32) { r.value = m_ir->CreateSExtOrTrunc(r.value, get_type<T>()->getScalarType()); } if (r.is_vector) { r.value = m_ir->CreateVectorSplat(r.is_vector, r.value); } return r; } return eval(splat<T>(imm)); } // Get PC for given instruction address llvm::Value* get_pc(u32 addr) { return m_ir->CreateAdd(m_base_pc, m_ir->getInt32(addr - m_base)); } // Update PC for current or explicitly specified instruction address void update_pc(u32 target = -1) { m_ir->CreateStore(m_ir->CreateAnd(get_pc(target + 1 ? target : m_pos), 0x3fffc), spu_ptr<u32>(&spu_thread::pc))->setVolatile(true); } // Call cpu_thread::check_state if necessary and return or continue (full check) void check_state(u32 addr, bool may_be_unsafe_for_savestate = true) { const auto pstate = spu_ptr<u32>(&spu_thread::state); const auto _body = llvm::BasicBlock::Create(m_context, "", m_function); const auto check = llvm::BasicBlock::Create(m_context, "", m_function); m_ir->CreateCondBr(m_ir->CreateICmpEQ(m_ir->CreateLoad(get_type<u32>(), pstate, true), m_ir->getInt32(0)), _body, check, m_md_likely); m_ir->SetInsertPoint(check); update_pc(addr); if (may_be_unsafe_for_savestate && m_block && m_block->bb->preds.empty()) { may_be_unsafe_for_savestate = false; } if (may_be_unsafe_for_savestate) { m_ir->CreateStore(m_ir->getInt8(1), spu_ptr<u8>(&spu_thread::unsavable))->setVolatile(true); } m_ir->CreateCall(m_test_state, {m_thread}); if (may_be_unsafe_for_savestate) { m_ir->CreateStore(m_ir->getInt8(0), spu_ptr<u8>(&spu_thread::unsavable))->setVolatile(true); } m_ir->CreateBr(_body); m_ir->SetInsertPoint(_body); } void putllc16_pattern(const spu_program& /*prog*/, utils::address_range range) { // Prevent store elimination m_block->store_context_ctr[s_reg_mfc_eal]++; m_block->store_context_ctr[s_reg_mfc_lsa]++; m_block->store_context_ctr[s_reg_mfc_tag]++; m_block->store_context_ctr[s_reg_mfc_size]++; static const auto on_fail = [](spu_thread* _spu, u32 addr) { if (const u32 raddr = _spu->raddr) { // Last check for event before we clear the reservation if (~_spu->ch_events.load().events & SPU_EVENT_LR) { if (raddr == addr) { _spu->set_events(SPU_EVENT_LR); } else { _spu->get_events(SPU_EVENT_LR); } } _spu->raddr = 0; } }; const union putllc16_info { u32 data; bf_t<u32, 30, 2> type; bf_t<u32, 29, 1> runtime16_select; bf_t<u32, 28, 1> no_notify; bf_t<u32, 18, 8> reg; bf_t<u32, 0, 18> off18; bf_t<u32, 0, 8> reg2; } info = std::bit_cast<putllc16_info>(range.end); enum : u32 { v_const = 0, v_relative = 1, v_reg_offs = 2, v_reg2 = 3, }; const auto _raddr_match = llvm::BasicBlock::Create(m_context, "__raddr_match", m_function); const auto _lock_success = llvm::BasicBlock::Create(m_context, "__putllc16_lock", m_function); const auto _begin_op = llvm::BasicBlock::Create(m_context, "__putllc16_begin", m_function); const auto _repeat_lock = llvm::BasicBlock::Create(m_context, "__putllc16_repeat", m_function); const auto _repeat_lock_fail = llvm::BasicBlock::Create(m_context, "__putllc16_lock_fail", m_function); const auto _success = llvm::BasicBlock::Create(m_context, "__putllc16_success", m_function); const auto _inc_res = llvm::BasicBlock::Create(m_context, "__putllc16_inc_resv", m_function); const auto _inc_res_unlocked = llvm::BasicBlock::Create(m_context, "__putllc16_inc_resv_unlocked", m_function); const auto _success_and_unlock = llvm::BasicBlock::Create(m_context, "__putllc16_succ_unlock", m_function); const auto _fail = llvm::BasicBlock::Create(m_context, "__putllc16_fail", m_function); const auto _fail_and_unlock = llvm::BasicBlock::Create(m_context, "__putllc16_unlock", m_function); const auto _final = llvm::BasicBlock::Create(m_context, "__putllc16_final", m_function); const auto _eal = (get_reg_fixed<u32>(s_reg_mfc_eal) & -128).eval(m_ir); const auto _raddr = m_ir->CreateLoad(get_type<u32>(), spu_ptr<u32>(&spu_thread::raddr)); m_ir->CreateCondBr(m_ir->CreateAnd(m_ir->CreateICmpEQ(_eal, _raddr), m_ir->CreateIsNotNull(_raddr)), _raddr_match, _fail, m_md_likely); m_ir->SetInsertPoint(_raddr_match); value_t<u32> eal_val; eal_val.value = _eal; auto get_reg32 = [&](u32 reg) { if (get_reg_type(reg) != get_type<u32[4]>()) { return get_reg_fixed(reg, get_type<u32>()); } return extract(get_reg_fixed(reg), 3).eval(m_ir); }; const auto _lsa = (get_reg_fixed<u32>(s_reg_mfc_lsa) & 0x3ff80).eval(m_ir); llvm::Value* dest{}; if (info.type == v_const) { dest = m_ir->getInt32(info.off18); } else if (info.type == v_relative) { dest = m_ir->CreateAnd(get_pc(spu_branch_target(info.off18 + m_base)), 0x3fff0); } else if (info.type == v_reg_offs) { dest = m_ir->CreateAnd(m_ir->CreateAdd(get_reg32(info.reg), m_ir->getInt32(info.off18)), 0x3fff0); } else { dest = m_ir->CreateAnd(m_ir->CreateAdd(get_reg32(info.reg), get_reg32(info.reg2)), 0x3fff0); } if (g_cfg.core.rsx_accurate_res_access) { const auto success = call("spu_putllc16_rsx_res", +[](spu_thread* _spu, u32 ls_dst, u32 lsa, u32 eal, u32 notify) -> bool { const u32 raddr = eal; const v128 rdata = read_from_ptr<v128>(_spu->rdata, ls_dst % 0x80); const v128 to_write = _spu->_ref<const nse_t<v128>>(ls_dst); const auto dest = raddr | (ls_dst & 127); const auto _dest = vm::get_super_ptr<atomic_t<nse_t<v128>>>(dest); if (rdata == to_write || ((lsa ^ ls_dst) & (SPU_LS_SIZE - 128))) { vm::reservation_update(raddr); _spu->ch_atomic_stat.set_value(MFC_PUTLLC_SUCCESS); _spu->raddr = 0; return true; } auto& res = vm::reservation_acquire(eal); if (res % 128) { return false; } { rsx::reservation_lock rsx_lock(raddr, 128); // Touch memory utils::trigger_write_page_fault(vm::base(dest ^ (4096 / 2))); auto [old_res, ok] = res.fetch_op([&](u64& rval) { if (rval % 128) { return false; } rval |= 127; return true; }); if (!ok) { return false; } if (!_dest->compare_and_swap_test(rdata, to_write)) { res.release(old_res); return false; } // Success res.release(old_res + 128); } _spu->ch_atomic_stat.set_value(MFC_PUTLLC_SUCCESS); _spu->raddr = 0; if (notify) { res.notify_all(); } return true; }, m_thread, dest, _lsa, _eal, m_ir->getInt32(!info.no_notify)); m_ir->CreateCondBr(success, _final, _fail); m_ir->SetInsertPoint(_fail); call("PUTLLC16_fail", +on_fail, m_thread, _eal); m_ir->CreateStore(m_ir->getInt64(spu_channel::bit_count | MFC_PUTLLC_FAILURE), spu_ptr<u64>(&spu_thread::ch_atomic_stat)); m_ir->CreateBr(_final); m_ir->SetInsertPoint(_final); return; } const auto diff = m_ir->CreateZExt(m_ir->CreateSub(dest, _lsa), get_type<u64>()); const auto _new = m_ir->CreateAlignedLoad(get_type<u128>(), _ptr<u128>(m_lsptr, dest), llvm::MaybeAlign{16}); const auto _rdata = m_ir->CreateAlignedLoad(get_type<u128>(), _ptr<u128>(spu_ptr<u8>(&spu_thread::rdata), m_ir->CreateAnd(diff, 0x70)), llvm::MaybeAlign{16}); const bool is_accurate_op = !!g_cfg.core.spu_accurate_reservations; const auto compare_data_change_res = is_accurate_op ? m_ir->getTrue() : m_ir->CreateICmpNE(_new, _rdata); if (info.runtime16_select) { m_ir->CreateCondBr(m_ir->CreateAnd(m_ir->CreateICmpULT(diff, m_ir->getInt64(128)), compare_data_change_res), _begin_op, _inc_res, m_md_likely); } else { m_ir->CreateCondBr(compare_data_change_res, _begin_op, _inc_res, m_md_unlikely); } m_ir->SetInsertPoint(_begin_op); // Touch memory (on the opposite side of the page) m_ir->CreateAtomicRMW(llvm::AtomicRMWInst::Or, _ptr<u8>(m_memptr, m_ir->CreateXor(_eal, 4096 / 2)), m_ir->getInt8(0), llvm::MaybeAlign{16}, llvm::AtomicOrdering::SequentiallyConsistent); const auto rptr = _ptr<u64>(m_ir->CreateLoad(get_type<u8*>(), spu_ptr<u8*>(&spu_thread::reserv_base_addr)), ((eal_val & 0xff80) >> 1).eval(m_ir)); const auto rtime = m_ir->CreateLoad(get_type<u64>(), spu_ptr<u64>(&spu_thread::rtime)); m_ir->CreateBr(_repeat_lock); m_ir->SetInsertPoint(_repeat_lock); const auto rval = m_ir->CreatePHI(get_type<u64>(), 2); rval->addIncoming(rtime, _begin_op); // Lock reservation const auto cmp_res = m_ir->CreateAtomicCmpXchg(rptr, rval, m_ir->CreateOr(rval, 0x7f), llvm::MaybeAlign{16}, llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering::SequentiallyConsistent); m_ir->CreateCondBr(m_ir->CreateExtractValue(cmp_res, 1), _lock_success, _repeat_lock_fail, m_md_likely); m_ir->SetInsertPoint(_repeat_lock_fail); const auto last_rval = m_ir->CreateExtractValue(cmp_res, 0); rval->addIncoming(last_rval, _repeat_lock_fail); m_ir->CreateCondBr(is_accurate_op ? m_ir->CreateICmpEQ(last_rval, rval) : m_ir->CreateIsNull(m_ir->CreateAnd(last_rval, 0x7f)), _repeat_lock, _fail); m_ir->SetInsertPoint(_lock_success); // Commit 16 bytes compare-exchange const auto sudo_ptr = _ptr<u8>(m_ir->CreateLoad(get_type<u8*>(), spu_ptr<u8*>(&spu_thread::memory_sudo_addr)), _eal); m_ir->CreateCondBr( m_ir->CreateExtractValue(m_ir->CreateAtomicCmpXchg(_ptr<u128>(sudo_ptr, diff), _rdata, _new, llvm::MaybeAlign{16}, llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering::SequentiallyConsistent), 1) , _success_and_unlock , _fail_and_unlock); // Unlock and notify m_ir->SetInsertPoint(_success_and_unlock); m_ir->CreateAlignedStore(m_ir->CreateAdd(rval, m_ir->getInt64(128)), rptr, llvm::MaybeAlign{8}); if (!info.no_notify) { call("atomic_wait_engine::notify_all", static_cast<void(*)(const void*)>(atomic_wait_engine::notify_all), rptr); } m_ir->CreateBr(_success); // Perform unlocked vm::reservation_update if no physical memory changes needed m_ir->SetInsertPoint(_inc_res); const auto rptr2 = _ptr<u64>(m_ir->CreateLoad(get_type<u8*>(), spu_ptr<u8*>(&spu_thread::reserv_base_addr)), ((eal_val & 0xff80) >> 1).eval(m_ir)); llvm::Value* old_val{}; if (true || is_accurate_op) { old_val = m_ir->CreateLoad(get_type<u64>(), spu_ptr<u64>(&spu_thread::rtime)); } else { old_val = m_ir->CreateAlignedLoad(get_type<u64>(), rptr2, llvm::MaybeAlign{8}); m_ir->CreateCondBr(m_ir->CreateIsNotNull(m_ir->CreateAnd(old_val, 0x7f)), _success, _inc_res_unlocked); m_ir->SetInsertPoint(_inc_res_unlocked); } const auto cmp_res2 = m_ir->CreateAtomicCmpXchg(rptr2, old_val, m_ir->CreateAdd(old_val, m_ir->getInt64(128)), llvm::MaybeAlign{16}, llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering::SequentiallyConsistent); if (true || is_accurate_op) { m_ir->CreateCondBr(m_ir->CreateExtractValue(cmp_res2, 1), _success, _fail); } else { m_ir->CreateBr(_success); } m_ir->SetInsertPoint(_success); m_ir->CreateStore(m_ir->getInt64(spu_channel::bit_count | MFC_PUTLLC_SUCCESS), spu_ptr<u64>(&spu_thread::ch_atomic_stat)); m_ir->CreateStore(m_ir->getInt32(0), spu_ptr<u32>(&spu_thread::raddr)); m_ir->CreateBr(_final); m_ir->SetInsertPoint(_fail_and_unlock); m_ir->CreateAlignedStore(rval, rptr, llvm::MaybeAlign{8}); m_ir->CreateBr(_fail); m_ir->SetInsertPoint(_fail); call("PUTLLC16_fail", +on_fail, m_thread, _eal); m_ir->CreateStore(m_ir->getInt64(spu_channel::bit_count | MFC_PUTLLC_FAILURE), spu_ptr<u64>(&spu_thread::ch_atomic_stat)); m_ir->CreateBr(_final); m_ir->SetInsertPoint(_final); } void putllc0_pattern(const spu_program& /*prog*/, utils::address_range /*range*/) { // Prevent store elimination m_block->store_context_ctr[s_reg_mfc_eal]++; m_block->store_context_ctr[s_reg_mfc_lsa]++; m_block->store_context_ctr[s_reg_mfc_tag]++; m_block->store_context_ctr[s_reg_mfc_size]++; static const auto on_fail = [](spu_thread* _spu, u32 addr) { if (const u32 raddr = _spu->raddr) { // Last check for event before we clear the reservation if (~_spu->ch_events.load().events & SPU_EVENT_LR) { if (raddr == addr) { _spu->set_events(SPU_EVENT_LR); } else { _spu->get_events(SPU_EVENT_LR); } } _spu->raddr = 0; } }; const auto _next = llvm::BasicBlock::Create(m_context, "", m_function); const auto _next0 = llvm::BasicBlock::Create(m_context, "", m_function); const auto _fail = llvm::BasicBlock::Create(m_context, "", m_function); const auto _final = llvm::BasicBlock::Create(m_context, "", m_function); const auto _eal = (get_reg_fixed<u32>(s_reg_mfc_eal) & -128).eval(m_ir); const auto _raddr = m_ir->CreateLoad(get_type<u32>(), spu_ptr<u32>(&spu_thread::raddr)); m_ir->CreateCondBr(m_ir->CreateAnd(m_ir->CreateICmpEQ(_eal, _raddr), m_ir->CreateIsNotNull(_raddr)), _next, _fail, m_md_likely); m_ir->SetInsertPoint(_next); value_t<u32> eal_val; eal_val.value = _eal; const auto rptr = _ptr<u64>(m_ir->CreateLoad(get_type<u8*>(), spu_ptr<u8*>(&spu_thread::reserv_base_addr)), ((eal_val & 0xff80) >> 1).eval(m_ir)); const auto rval = m_ir->CreateLoad(get_type<u64>(), spu_ptr<u64>(&spu_thread::rtime)); m_ir->CreateCondBr( m_ir->CreateExtractValue(m_ir->CreateAtomicCmpXchg(rptr, rval, m_ir->CreateAdd(rval, m_ir->getInt64(128)), llvm::MaybeAlign{16}, llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering::SequentiallyConsistent), 1) , _next0 , g_cfg.core.spu_accurate_reservations ? _fail : _next0); // Succeed unconditionally m_ir->SetInsertPoint(_next0); //call("atomic_wait_engine::notify_all", static_cast<void(*)(const void*)>(atomic_wait_engine::notify_all), rptr); m_ir->CreateStore(m_ir->getInt64(spu_channel::bit_count | MFC_PUTLLC_SUCCESS), spu_ptr<u64>(&spu_thread::ch_atomic_stat)); m_ir->CreateBr(_final); m_ir->SetInsertPoint(_fail); call("PUTLLC0_fail", +on_fail, m_thread, _eal); m_ir->CreateStore(m_ir->getInt64(spu_channel::bit_count | MFC_PUTLLC_FAILURE), spu_ptr<u64>(&spu_thread::ch_atomic_stat)); m_ir->CreateBr(_final); m_ir->SetInsertPoint(_final); m_ir->CreateStore(m_ir->getInt32(0), spu_ptr<u32>(&spu_thread::raddr)); } public: spu_llvm_recompiler(u8 interp_magn = 0) : spu_recompiler_base() , cpu_translator(nullptr, false) , m_interp_magn(interp_magn) { } virtual void init() override { // Initialize if necessary if (!m_spurt) { m_spurt = &g_fxo->get<spu_runtime>(); cpu_translator::initialize(m_jit.get_context(), m_jit.get_engine()); const auto md_name = llvm::MDString::get(m_context, "branch_weights"); const auto md_low = llvm::ValueAsMetadata::get(llvm::ConstantInt::get(GetType<u32>(), 1)); const auto md_high = llvm::ValueAsMetadata::get(llvm::ConstantInt::get(GetType<u32>(), 999)); // Metadata for branch weights m_md_likely = llvm::MDTuple::get(m_context, {md_name, md_high, md_low}); m_md_unlikely = llvm::MDTuple::get(m_context, {md_name, md_low, md_high}); // Initialize transform passes clear_transforms(); #ifdef ARCH_ARM64 { auto should_exclude_function = [](const std::string& fn_name) { return fn_name.starts_with("spu_") || fn_name.starts_with("tr_"); }; aarch64::GHC_frame_preservation_pass::config_t config = { .debug_info = false, // Set to "true" to insert debug frames on x27 .use_stack_frames = false, // We don't need this since the SPU GW allocates global scratch on the stack .hypervisor_context_offset = ::offset32(&spu_thread::hv_ctx), .exclusion_callback = should_exclude_function, .base_register_lookup = {} // Unused, always x19 on SPU }; // Create transform pass std::unique_ptr<translator_pass> ghc_fixup_pass = std::make_unique<aarch64::GHC_frame_preservation_pass>(config); // Register it register_transform_pass(ghc_fixup_pass); } #endif } reset_transforms(); } void init_luts() { // LUTs for some instructions m_spu_frest_fraction_lut = new llvm::GlobalVariable(*m_module, llvm::ArrayType::get(GetType<u32>(), 32), true, llvm::GlobalValue::PrivateLinkage, llvm::ConstantDataArray::get(m_context, spu_frest_fraction_lut)); m_spu_frsqest_fraction_lut = new llvm::GlobalVariable(*m_module, llvm::ArrayType::get(GetType<u32>(), 64), true, llvm::GlobalValue::PrivateLinkage, llvm::ConstantDataArray::get(m_context, spu_frsqest_fraction_lut)); m_spu_frsqest_exponent_lut = new llvm::GlobalVariable(*m_module, llvm::ArrayType::get(GetType<u32>(), 256), true, llvm::GlobalValue::PrivateLinkage, llvm::ConstantDataArray::get(m_context, spu_frsqest_exponent_lut)); } virtual spu_function_t compile(spu_program&& _func) override { if (_func.data.empty() && m_interp_magn) { return compile_interpreter(); } const u32 start0 = _func.entry_point; const usz func_size = _func.data.size(); const auto add_loc = m_spurt->add_empty(std::move(_func)); if (!add_loc) { return nullptr; } const spu_program& func = add_loc->data; if (func.entry_point != start0) { // Wait for the duplicate while (!add_loc->compiled) { add_loc->compiled.wait(nullptr); } return add_loc->compiled; } std::string log; bool add_to_file = false; if (auto& cache = g_fxo->get<spu_cache>(); cache && g_cfg.core.spu_cache && !add_loc->cached.exchange(1)) { add_to_file = true; } { sha1_context ctx; u8 output[20]; sha1_starts(&ctx); sha1_update(&ctx, reinterpret_cast<const u8*>(func.data.data()), func.data.size() * 4); sha1_finish(&ctx, output); m_hash.clear(); fmt::append(m_hash, "__spu-0x%05x-%s", func.entry_point, fmt::base57(output)); be_t<u64> hash_start; std::memcpy(&hash_start, output, sizeof(hash_start)); m_hash_start = hash_start; } spu_log.notice("Building function 0x%x... (size %u, %s)", func.entry_point, func.data.size(), m_hash); m_pos = func.lower_bound; m_base = func.entry_point; m_size = ::size32(func.data) * 4; const u32 start = m_pos; const u32 end = start + m_size; m_pp_id = 0; if (g_cfg.core.spu_debug && !add_loc->logged.exchange(1)) { this->dump(func, log); fs::write_file(m_spurt->get_cache_path() + "spu.log", fs::write + fs::append, log); } using namespace llvm; m_engine->clearAllGlobalMappings(); // Create LLVM module std::unique_ptr<Module> _module = std::make_unique<Module>(m_hash + ".obj", m_context); _module->setTargetTriple(jit_compiler::triple2()); _module->setDataLayout(m_jit.get_engine().getTargetMachine()->createDataLayout()); m_module = _module.get(); // Initialize IR Builder IRBuilder<> irb(m_context); m_ir = &irb; // Add entry function (contains only state/code check) const auto main_func = llvm::cast<llvm::Function>(m_module->getOrInsertFunction(m_hash, get_ftype<void, u8*, u8*, u64>()).getCallee()); const auto main_arg2 = main_func->getArg(2); main_func->setCallingConv(CallingConv::GHC); set_function(main_func); init_luts(); // Start compilation const auto label_test = BasicBlock::Create(m_context, "", m_function); const auto label_diff = BasicBlock::Create(m_context, "", m_function); const auto label_body = BasicBlock::Create(m_context, "", m_function); const auto label_stop = BasicBlock::Create(m_context, "", m_function); // Load PC, which will be the actual value of 'm_base' m_base_pc = m_ir->CreateLoad(get_type<u32>(), spu_ptr<u32>(&spu_thread::pc)); // Emit state check const auto pstate = spu_ptr<u32>(&spu_thread::state); m_ir->CreateCondBr(m_ir->CreateICmpNE(m_ir->CreateLoad(get_type<u32>(), pstate), m_ir->getInt32(0)), label_stop, label_test, m_md_unlikely); // Emit code check u32 check_iterations = 0; m_ir->SetInsertPoint(label_test); // Set block hash for profiling (if enabled) if (g_cfg.core.spu_prof && g_cfg.core.spu_verification) m_ir->CreateStore(m_ir->getInt64((m_hash_start & -65536)), spu_ptr<u64>(&spu_thread::block_hash)); if (!g_cfg.core.spu_verification) { // Disable check (unsafe) m_ir->CreateBr(label_body); } else if (func.data.size() == 1) { const auto pu32 = m_ir->CreateGEP(get_type<u8>(), m_lsptr, m_base_pc); const auto cond = m_ir->CreateICmpNE(m_ir->CreateLoad(get_type<u32>(), pu32), m_ir->getInt32(func.data[0])); m_ir->CreateCondBr(cond, label_diff, label_body, m_md_unlikely); } else if (func.data.size() == 2) { const auto pu64 = m_ir->CreateGEP(get_type<u8>(), m_lsptr, m_base_pc); const auto cond = m_ir->CreateICmpNE(m_ir->CreateLoad(get_type<u64>(), pu64), m_ir->getInt64(static_cast<u64>(func.data[1]) << 32 | func.data[0])); m_ir->CreateCondBr(cond, label_diff, label_body, m_md_unlikely); } else { u32 starta = start; // Skip holes at the beginning (giga only) for (u32 j = start; j < end; j += 4) { if (!func.data[(j - start) / 4]) { starta += 4; } else { break; } } u32 stride; u32 elements; u32 dwords; if (m_use_avx512 && g_cfg.core.full_width_avx512) { stride = 64; elements = 16; dwords = 8; } else if (m_use_avx) { stride = 32; elements = 8; dwords = 4; } else { stride = 16; elements = 4; dwords = 2; } // Get actual pc corresponding to the found beginning of the data llvm::Value* starta_pc = m_ir->CreateAnd(get_pc(starta), 0x3fffc); llvm::Value* data_addr = m_ir->CreateGEP(get_type<u8>(), m_lsptr, starta_pc); llvm::Value* acc = nullptr; for (u32 j = starta; j < end; j += stride) { int indices[16]; bool holes = false; bool data = false; for (u32 i = 0; i < elements; i++) { const u32 k = j + i * 4; if (k < start || k >= end || !func.data[(k - start) / 4]) { indices[i] = elements; holes = true; } else { indices[i] = i; data = true; } } if (!data) { // Skip full-sized holes continue; } llvm::Value* vls = nullptr; // Load unaligned code block from LS if (m_use_avx512 && g_cfg.core.full_width_avx512) { vls = m_ir->CreateAlignedLoad(get_type<u32[16]>(), _ptr<u32[16]>(data_addr, j - starta), llvm::MaybeAlign{4}); } else if (m_use_avx) { vls = m_ir->CreateAlignedLoad(get_type<u32[8]>(), _ptr<u32[8]>(data_addr, j - starta), llvm::MaybeAlign{4}); } else { vls = m_ir->CreateAlignedLoad(get_type<u32[4]>(), _ptr<u32[4]>(data_addr, j - starta), llvm::MaybeAlign{4}); } // Mask if necessary if (holes) { vls = m_ir->CreateShuffleVector(vls, ConstantAggregateZero::get(vls->getType()), llvm::ArrayRef(indices, elements)); } // Perform bitwise comparison and accumulate u32 words[16]; for (u32 i = 0; i < elements; i++) { const u32 k = j + i * 4; words[i] = k >= start && k < end ? func.data[(k - start) / 4] : 0; } vls = m_ir->CreateXor(vls, ConstantDataVector::get(m_context, llvm::ArrayRef(words, elements))); acc = acc ? m_ir->CreateOr(acc, vls) : vls; check_iterations++; } // Pattern for PTEST if (m_use_avx512 && g_cfg.core.full_width_avx512) { acc = m_ir->CreateBitCast(acc, get_type<u64[8]>()); } else if (m_use_avx) { acc = m_ir->CreateBitCast(acc, get_type<u64[4]>()); } else { acc = m_ir->CreateBitCast(acc, get_type<u64[2]>()); } llvm::Value* elem = m_ir->CreateExtractElement(acc, u64{0}); for (u32 i = 1; i < dwords; i++) { elem = m_ir->CreateOr(elem, m_ir->CreateExtractElement(acc, i)); } // Compare result with zero const auto cond = m_ir->CreateICmpNE(elem, m_ir->getInt64(0)); m_ir->CreateCondBr(cond, label_diff, label_body, m_md_unlikely); } // Increase block counter with statistics m_ir->SetInsertPoint(label_body); const auto pbcount = spu_ptr<u64>(&spu_thread::block_counter); m_ir->CreateStore(m_ir->CreateAdd(m_ir->CreateLoad(get_type<u64>(), pbcount), m_ir->getInt64(check_iterations)), pbcount); // Call the entry function chunk const auto entry_chunk = add_function(m_pos); const auto entry_call = m_ir->CreateCall(entry_chunk->chunk, {m_thread, m_lsptr, m_base_pc}); entry_call->setCallingConv(entry_chunk->chunk->getCallingConv()); const auto dispatcher = llvm::cast<llvm::Function>(m_module->getOrInsertFunction("spu_dispatcher", main_func->getType()).getCallee()); m_engine->updateGlobalMapping("spu_dispatcher", reinterpret_cast<u64>(spu_runtime::tr_all)); dispatcher->setCallingConv(main_func->getCallingConv()); // Proceed to the next code if (entry_chunk->chunk->getReturnType() != get_type<void>()) { const auto next_call = m_ir->CreateCall(main_func->getFunctionType(), entry_call, {m_thread, m_lsptr, m_ir->getInt64(0)}); next_call->setCallingConv(main_func->getCallingConv()); next_call->setTailCall(); } else { entry_call->setTailCall(); } m_ir->CreateRetVoid(); m_ir->SetInsertPoint(label_stop); call("spu_escape", spu_runtime::g_escape, m_thread)->setTailCall(); m_ir->CreateRetVoid(); m_ir->SetInsertPoint(label_diff); if (g_cfg.core.spu_verification) { const auto pbfail = spu_ptr<u64>(&spu_thread::block_failure); m_ir->CreateStore(m_ir->CreateAdd(m_ir->CreateLoad(get_type<u64>(), pbfail), m_ir->getInt64(1)), pbfail); const auto dispci = call("spu_dispatch", spu_runtime::tr_dispatch, m_thread, m_lsptr, main_arg2); dispci->setCallingConv(CallingConv::GHC); dispci->setTailCall(); m_ir->CreateRetVoid(); } else { m_ir->CreateUnreachable(); } m_dispatch = cast<Function>(_module->getOrInsertFunction("__spu-null", entry_chunk->chunk->getFunctionType()).getCallee()); m_dispatch->setLinkage(llvm::GlobalValue::InternalLinkage); m_dispatch->setCallingConv(entry_chunk->chunk->getCallingConv()); set_function(m_dispatch); if (entry_chunk->chunk->getReturnType() == get_type<void>()) { const auto next_call = m_ir->CreateCall(main_func->getFunctionType(), dispatcher, {m_thread, m_lsptr, m_ir->getInt64(0)}); next_call->setCallingConv(main_func->getCallingConv()); next_call->setTailCall(); m_ir->CreateRetVoid(); } else { m_ir->CreateRet(dispatcher); } // Function that executes check_state and escapes if necessary m_test_state = llvm::cast<llvm::Function>(m_module->getOrInsertFunction("spu_test_state", get_ftype<void, u8*>()).getCallee()); m_test_state->setLinkage(GlobalValue::InternalLinkage); #ifdef ARCH_ARM64 // LLVM doesn't support PreserveAll on arm64. m_test_state->setCallingConv(CallingConv::PreserveMost); #else m_test_state->setCallingConv(CallingConv::PreserveAll); #endif m_ir->SetInsertPoint(BasicBlock::Create(m_context, "", m_test_state)); const auto escape_yes = BasicBlock::Create(m_context, "", m_test_state); const auto escape_no = BasicBlock::Create(m_context, "", m_test_state); m_ir->CreateCondBr(call("spu_exec_check_state", &exec_check_state, m_test_state->getArg(0)), escape_yes, escape_no); m_ir->SetInsertPoint(escape_yes); call("spu_escape", spu_runtime::g_escape, m_test_state->getArg(0)); m_ir->CreateRetVoid(); m_ir->SetInsertPoint(escape_no); m_ir->CreateRetVoid(); // Create function table (uninitialized) m_function_table = new llvm::GlobalVariable(*m_module, llvm::ArrayType::get(entry_chunk->chunk->getType(), m_size / 4), true, llvm::GlobalValue::InternalLinkage, nullptr); // Create function chunks for (usz fi = 0; fi < m_function_queue.size(); fi++) { // Initialize function info m_entry = m_function_queue[fi]; set_function(m_functions[m_entry].chunk); // Set block hash for profiling (if enabled) if (g_cfg.core.spu_prof) m_ir->CreateStore(m_ir->getInt64((m_hash_start & -65536) | (m_entry >> 2)), spu_ptr<u64>(&spu_thread::block_hash)); m_finfo = &m_functions[m_entry]; m_ir->CreateBr(add_block(m_entry)); // Emit instructions for basic blocks for (usz bi = 0; bi < m_block_queue.size(); bi++) { // Initialize basic block info const u32 baddr = m_block_queue[bi]; m_block = &m_blocks[baddr]; m_ir->SetInsertPoint(m_block->block); auto& bb = ::at32(m_bbs, baddr); bool need_check = false; m_block->bb = &bb; if (!bb.preds.empty()) { // Initialize registers and build PHI nodes if necessary for (u32 i = 0; i < s_reg_max; i++) { const u32 src = m_finfo->fn ? bb.reg_origin_abs[i] : bb.reg_origin[i]; if (src > 0x40000) { // Use the xfloat hint to create 256-bit (4x double) PHI llvm::Type* type = g_cfg.core.spu_xfloat_accuracy == xfloat_accuracy::accurate && bb.reg_maybe_xf[i] ? get_type<f64[4]>() : get_reg_type(i); const auto _phi = m_ir->CreatePHI(type, ::size32(bb.preds), fmt::format("phi0x%05x_r%u", baddr, i)); m_block->phi[i] = _phi; m_block->reg[i] = _phi; for (u32 pred : bb.preds) { const auto bfound = m_blocks.find(pred); if (bfound != m_blocks.end() && bfound->second.block_end) { auto& value = bfound->second.reg[i]; if (!value || value->getType() != _phi->getType()) { const auto regptr = init_reg_fixed(i); const auto cblock = m_ir->GetInsertBlock(); m_ir->SetInsertPoint(bfound->second.block_end->getTerminator()); if (!value) { // Value hasn't been loaded yet value = m_finfo && m_finfo->load[i] ? m_finfo->load[i] : m_ir->CreateLoad(get_reg_type(i), regptr); } if (value->getType() == get_type<f64[4]>() && type != get_type<f64[4]>()) { value = double_to_xfloat(value); } else if (value->getType() != get_type<f64[4]>() && type == get_type<f64[4]>()) { value = xfloat_to_double(bitcast<u32[4]>(value)); } else { value = bitcast(value, _phi->getType()); } m_ir->SetInsertPoint(cblock); ensure(bfound->second.block_end->getTerminator()); } _phi->addIncoming(value, bfound->second.block_end); } } if (baddr == m_entry) { // Load value at the function chunk's entry block if necessary const auto regptr = init_reg_fixed(i); const auto cblock = m_ir->GetInsertBlock(); m_ir->SetInsertPoint(m_function->getEntryBlock().getTerminator()); const auto value = m_finfo && m_finfo->load[i] ? m_finfo->load[i] : m_ir->CreateLoad(get_reg_type(i), regptr); m_ir->SetInsertPoint(cblock); _phi->addIncoming(value, &m_function->getEntryBlock()); } } else if (src < 0x40000) { // Passthrough register value const auto bfound = m_blocks.find(src); if (bfound != m_blocks.end()) { m_block->reg[i] = bfound->second.reg[i]; } else { spu_log.error("[0x%05x] Value not found ($%u from 0x%05x)", baddr, i, src); } } else { m_block->reg[i] = m_finfo->load[i]; } } // Emit state check if necessary (TODO: more conditions) for (u32 pred : bb.preds) { if (pred >= baddr) { // If this block is a target of a backward branch (possibly loop), emit a check need_check = true; break; } } } // State check at the beginning of the chunk if (need_check || (bi == 0 && g_cfg.core.spu_block_size != spu_block_size_type::safe)) { check_state(baddr); } // Emit instructions for (m_pos = baddr; m_pos >= start && m_pos < end && !m_ir->GetInsertBlock()->getTerminator(); m_pos += 4) { if (m_pos != baddr && m_block_info[m_pos / 4]) { break; } const u32 op = std::bit_cast<be_t<u32>>(func.data[(m_pos - start) / 4]); if (!op) { spu_log.error("[%s] Unexpected fallthrough to 0x%x (chunk=0x%x, entry=0x%x)", m_hash, m_pos, m_entry, m_function_queue[0]); break; } // Set variable for set_link() if (m_pos + 4 >= end) m_next_op = 0; else m_next_op = func.data[(m_pos - start) / 4 + 1]; switch (m_inst_attrs[(m_pos - start) / 4]) { case inst_attr::putllc0: { putllc0_pattern(func, m_patterns.at(m_pos - start).range); continue; } case inst_attr::putllc16: { putllc16_pattern(func, m_patterns.at(m_pos - start).range); continue; } case inst_attr::omit: { // TODO continue; } default: break; } // Execute recompiler function (TODO) (this->*decode(op))({op}); } // Finalize block with fallthrough if necessary if (!m_ir->GetInsertBlock()->getTerminator()) { const u32 target = m_pos == baddr ? baddr : m_pos & 0x3fffc; if (m_pos != baddr) { m_pos -= 4; if (target >= start && target < end) { const auto tfound = m_targets.find(m_pos); if (tfound == m_targets.end() || std::find(tfound->second.begin(), tfound->second.end(), target) == tfound->second.end()) { spu_log.error("[%s] Unregistered fallthrough to 0x%x (chunk=0x%x, entry=0x%x)", m_hash, target, m_entry, m_function_queue[0]); } } } m_block->block_end = m_ir->GetInsertBlock(); m_ir->CreateBr(add_block(target)); } ensure(m_block->block_end); } // Work on register stores. // 1. Remove stores which are overwritten later. // 2. Sink stores to post-dominating blocks. llvm::PostDominatorTree pdt(*m_function); llvm::DominatorTree dt(*m_function); // Post-order indices std::unordered_map<llvm::BasicBlock*, usz> pois; { usz i = 0; for (auto* bb : llvm::post_order(m_function)) pois[bb] = i++; } // Basic block to block_info std::unordered_map<llvm::BasicBlock*, block_info*> bb_to_info; std::vector<std::pair<u32, block_info*>> block_q; block_q.reserve(m_blocks.size()); bool has_gpr_memory_barriers = false; for (auto& [a, b] : m_blocks) { block_q.emplace_back(a, &b); bb_to_info[b.block] = &b; has_gpr_memory_barriers |= b.has_gpr_memory_barriers; } for (usz bi = 0; bi < block_q.size(); bi++) { auto bqbi = block_q[bi].second; // TODO: process all registers up to s_reg_max for (u32 i = 0; i <= s_reg_127; i++) { // Check if the store is beyond the last barrier if (auto& bs = bqbi->store[i]; bs && !bqbi->does_gpr_barrier_proceed_last_store(i)) { for (auto& [a, b] : m_blocks) { // Check if the store occurs before any barrier in the block if (b.store[i] && b.store[i] != bs && b.store_context_first_id[i] == 1) { if (pdt.dominates(b.store[i], bs)) { spu_log.trace("Erased r%u store from block 0x%x (simple)", i, block_q[bi].first); bs->eraseFromParent(); bs = nullptr; break; } } } if (!bs) continue; // Set of store instructions which overwrite bs std::vector<llvm::BasicBlock*> killers; for (auto& [a, b] : m_blocks) { const auto si = b.store[i]; if (si && si != bs) { if (pois[bs->getParent()] > pois[si->getParent()]) { killers.emplace_back(si->getParent()); } else { // Reset: store is not the first in the set killers.clear(); break; } } } if (killers.empty()) continue; // Find nearest common post-dominator llvm::BasicBlock* common_pdom = killers[0]; if (has_gpr_memory_barriers) { // Cannot optimize block walk-through, need to inspect all possible memory barriers in the way common_pdom = nullptr; } for (auto* bbb : llvm::drop_begin(killers)) { if (!common_pdom) { break; } common_pdom = pdt.findNearestCommonDominator(common_pdom, bbb); } // Shortcut if (common_pdom && !pdt.dominates(common_pdom, bs->getParent())) { common_pdom = nullptr; } // Look for possibly-dead store in CFG starting from the exit nodes llvm::SetVector<llvm::BasicBlock*> work_list; std::unordered_map<llvm::BasicBlock*, bool> worked_on; if (!common_pdom || std::none_of(killers.begin(), killers.end(), [common_pdom](const llvm::BasicBlock* block){ return block == common_pdom;})) { if (common_pdom) { // Shortcut work_list.insert(common_pdom); worked_on[common_pdom] = true; } else { // Check all exits for (auto* r : pdt.roots()) { worked_on[r] = true; work_list.insert(r); } } } // bool flag indicates the presence of a memory barrier before the killer store std::vector<std::pair<llvm::BasicBlock*, bool>> work2_list; for (usz wi = 0; wi < work_list.size(); wi++) { auto* cur = work_list[wi]; if (std::any_of(killers.begin(), killers.end(), [cur](const llvm::BasicBlock* block){ return block == cur; })) { work2_list.emplace_back(cur, bb_to_info[cur] && bb_to_info[cur]->does_gpr_barrier_preceed_first_store(i)); continue; } if (cur == bs->getParent()) { // Reset: store is not dead killers.clear(); break; } for (auto* p : llvm::predecessors(cur)) { if (!worked_on[p]) { worked_on[p] = true; work_list.insert(p); } } } if (killers.empty()) continue; worked_on.clear(); for (usz wi = 0; wi < work2_list.size(); wi++) { worked_on[work2_list[wi].first] = true; } // Need to treat tails differently: do not require checking barrier (checked before in a suitable manner) const usz work_list_tail_blocks_max_index = work2_list.size(); for (usz wi = 0; wi < work2_list.size(); wi++) { auto [cur, found_user] = work2_list[wi]; ensure(cur != bs->getParent()); if (!found_user && wi >= work_list_tail_blocks_max_index) { if (auto info = bb_to_info[cur]) { if (info->store_context_ctr[i] != 1) { found_user = true; } } } for (auto* p : llvm::predecessors(cur)) { if (p == bs->getParent()) { if (found_user) { // Reset: store is being used and preserved by ensure_gpr_stores() killers.clear(); break; } continue; } if (!worked_on[p]) { worked_on[p] = true; work2_list.push_back(std::make_pair(p, found_user)); } // Enqueue a second iteration for found_user=true if only found with found_user=false else if (found_user && !std::find_if(work2_list.rbegin(), work2_list.rend(), [&](auto& it){ return it.first == p; })->second) { work2_list.push_back(std::make_pair(p, true)); } } if (killers.empty()) { break; } } // Finally erase the dead store if (!killers.empty()) { spu_log.trace("Erased r%u store from block 0x%x (reversed)", i, block_q[bi].first); bs->eraseFromParent(); bs = nullptr; // Run the loop from the start bi = 0; } } } } block_q.clear(); for (auto& [a, b] : m_blocks) { block_q.emplace_back(a, &b); } for (usz bi = 0; bi < block_q.size(); bi++) { auto bqbi = block_q[bi].second; std::vector<std::pair<u32, bool>> work_list; std::map<u32, block_info*, std::greater<>> sucs; std::unordered_map<u32, bool> worked_on; for (u32 i = 0; i <= s_reg_127; i++) { if (i == s_reg_sp) { // If we postpone R1 store we lose effortless meta-analytical capabilities for little gain continue; } // If store isn't erased, try to sink it if (auto& bs = bqbi->store[i]; bs && bqbi->bb->targets.size() > 1 && !bqbi->does_gpr_barrier_proceed_last_store(i)) { if (sucs.empty()) { for (u32 tj : bqbi->bb->targets) { auto b2it = m_blocks.find(tj); if (b2it != m_blocks.end()) { sucs.emplace(tj, &b2it->second); } } } // Reset work_list.clear(); for (auto& [_, worked] : worked_on) { worked = false; } bool has_gpr_barriers_in_the_way = false; for (auto [a2, b2] : sucs) { if (a2 == block_q[bi].first) { if (bqbi->store_context_ctr[i] != 1) { has_gpr_barriers_in_the_way = true; break; } continue; } if (!worked_on[a2]) { work_list.emplace_back(a2, b2->store_context_ctr[i] != 1); worked_on[a2] = true; } } if (has_gpr_barriers_in_the_way) { // Cannot sink store, has barriers in the way continue; } for (usz wi = 0; wi < work_list.size(); wi++) { auto [cur, found_barrier] = work_list[wi]; if (!found_barrier) { if (const auto it = m_blocks.find(cur); it != m_blocks.cend()) { if (it->second.store_context_ctr[i] != 1) { found_barrier = true; } } } if (cur == block_q[bi].first) { if (found_barrier) { has_gpr_barriers_in_the_way = true; break; } continue; } for (u32 target : m_bbs[cur].targets) { if (!m_block_info[target / 4]) { continue; } if (m_blocks.find(target) == m_blocks.end()) { continue; } if (!worked_on[target]) { worked_on[target] = true; work_list.emplace_back(target, found_barrier); } // Enqueue a second iteration for found_barrier=true if only found with found_barrier=false else if (found_barrier && !std::find_if(work_list.rbegin(), work_list.rend(), [&](auto& it){ return it.first == target; })->second) { work_list.emplace_back(target, true); } } } if (has_gpr_barriers_in_the_way) { // Cannot sink store, has barriers in the way continue; } for (auto [a2, b2] : sucs) { if (b2 != bqbi) { auto ins = b2->block->getFirstNonPHI(); if (b2->bb->preds.size() == 1) { if (!dt.dominates(bs->getOperand(0), ins)) continue; if (!pdt.dominates(ins, bs)) continue; m_ir->SetInsertPoint(ins); auto si = llvm::cast<StoreInst>(m_ir->Insert(bs->clone())); if (b2->store[i] == nullptr) { // Protect against backwards ordering now b2->store[i] = si; b2->store_context_last_id[i] = 0; b2->store_context_first_id[i] = b2->store_context_ctr[i] + 1; if (std::none_of(block_q.begin() + bi, block_q.end(), [b_info = b2](auto&& a) { return a.second == b_info; })) { // Sunk store can be checked again block_q.emplace_back(a2, b2); } } spu_log.trace("Postponed r%u store from block 0x%x (single)", i, block_q[bi].first); } else { // Initialize additional block between two basic blocks auto& edge = bqbi->block_edges[a2]; if (!edge) { const auto succ_range = llvm::successors(bqbi->block_end); auto succ = b2->block; llvm::SmallSetVector<llvm::BasicBlock*, 32> succ_q; succ_q.insert(b2->block); for (usz j = 0; j < 32 && j < succ_q.size(); j++) { if (!llvm::count(succ_range, (succ = succ_q[j]))) { for (auto pred : llvm::predecessors(succ)) { succ_q.insert(pred); } } else { break; } } if (!llvm::count(succ_range, succ)) { // TODO: figure this out spu_log.notice("[%s] Failed successor to 0x%05x", fmt::base57(be_t<u64>{m_hash_start}), a2); continue; } edge = llvm::SplitEdge(bqbi->block_end, succ); pdt.recalculate(*m_function); dt.recalculate(*m_function); spu_log.trace("Postponed r%u store from block 0x%x (multiple)", i, block_q[bi].first); } ins = edge->getTerminator(); if (!dt.dominates(bs->getOperand(0), ins)) continue; if (!pdt.dominates(ins, bs)) continue; m_ir->SetInsertPoint(ins); m_ir->Insert(bs->clone()); } bs->eraseFromParent(); bs = nullptr; pdt.recalculate(*m_function); dt.recalculate(*m_function); break; } } } } } } // Create function table if necessary if (m_function_table->getNumUses()) { std::vector<llvm::Constant*> chunks; chunks.reserve(m_size / 4); for (u32 i = start; i < end; i += 4) { const auto found = m_functions.find(i); if (found == m_functions.end()) { if (false && g_cfg.core.spu_verification) { const std::string ppname = fmt::format("%s-chunkpp-0x%05x", m_hash, i); m_engine->updateGlobalMapping(ppname, reinterpret_cast<u64>(m_spurt->make_branch_patchpoint(i / 4))); const auto ppfunc = llvm::cast<llvm::Function>(m_module->getOrInsertFunction(ppname, m_finfo->chunk->getFunctionType()).getCallee()); ppfunc->setCallingConv(m_finfo->chunk->getCallingConv()); chunks.push_back(ppfunc); continue; } chunks.push_back(m_dispatch); continue; } chunks.push_back(found->second.chunk); } m_function_table->setInitializer(llvm::ConstantArray::get(llvm::ArrayType::get(entry_chunk->chunk->getType(), m_size / 4), chunks)); } else { m_function_table->eraseFromParent(); } #if LLVM_VERSION_MAJOR < 17 // Initialize pass manager legacy::FunctionPassManager pm(_module.get()); // Basic optimizations pm.add(createEarlyCSEPass()); pm.add(createCFGSimplificationPass()); //pm.add(createNewGVNPass()); pm.add(createDeadStoreEliminationPass()); pm.add(createLICMPass()); pm.add(createAggressiveDCEPass()); pm.add(createDeadCodeEliminationPass()); //pm.add(createLintPass()); // Check #else // Create the analysis managers. // These must be declared in this order so that they are destroyed in the // correct order due to inter-analysis-manager references. LoopAnalysisManager lam; FunctionAnalysisManager fam; CGSCCAnalysisManager cgam; ModuleAnalysisManager mam; // Create the new pass manager builder. // Take a look at the PassBuilder constructor parameters for more // customization, e.g. specifying a TargetMachine or various debugging // options. PassBuilder pb; // Register all the basic analyses with the managers. pb.registerModuleAnalyses(mam); pb.registerCGSCCAnalyses(cgam); pb.registerFunctionAnalyses(fam); pb.registerLoopAnalyses(lam); pb.crossRegisterProxies(lam, fam, cgam, mam); FunctionPassManager fpm; // Basic optimizations fpm.addPass(EarlyCSEPass(true)); fpm.addPass(SimplifyCFGPass()); fpm.addPass(DSEPass()); fpm.addPass(createFunctionToLoopPassAdaptor(LICMPass(LICMOptions()), true)); fpm.addPass(ADCEPass()); #endif for (auto& f : *m_module) { run_transforms(f); } for (const auto& func : m_functions) { const auto f = func.second.fn ? func.second.fn : func.second.chunk; #if LLVM_VERSION_MAJOR < 17 pm.run(*f); #else fpm.run(*f, fam); #endif } // Clear context (TODO) m_blocks.clear(); m_block_queue.clear(); m_functions.clear(); m_function_queue.clear(); m_function_table = nullptr; raw_string_ostream out(log); if (g_cfg.core.spu_debug) { fmt::append(log, "LLVM IR at 0x%x:\n", func.entry_point); out << *_module; // print IR out << "\n\n"; } if (verifyModule(*_module, &out)) { out.flush(); spu_log.error("LLVM: Verification failed at 0x%x:\n%s", func.entry_point, log); if (g_cfg.core.spu_debug) { fs::write_file(m_spurt->get_cache_path() + "spu-ir.log", fs::write + fs::append, log); } if (auto& cache = g_fxo->get<spu_cache>()) { if (add_to_file) { cache.add(func); } } fmt::throw_exception("Compilation failed"); } #if defined(__APPLE__) pthread_jit_write_protect_np(false); #endif if (g_cfg.core.spu_debug) { // Testing only m_jit.add(std::move(_module), m_spurt->get_cache_path() + "llvm/"); } else { m_jit.add(std::move(_module)); } m_jit.fin(); // Register function pointer const spu_function_t fn = reinterpret_cast<spu_function_t>(m_jit.get_engine().getPointerToFunction(main_func)); // Install unconditionally, possibly replacing existing one from spu_fast add_loc->compiled = fn; // Rebuild trampoline if necessary if (!m_spurt->rebuild_ubertrampoline(func.data[0])) { if (auto& cache = g_fxo->get<spu_cache>()) { if (add_to_file) { cache.add(func); } } return nullptr; } add_loc->compiled.notify_all(); if (g_cfg.core.spu_debug) { out.flush(); fs::write_file(m_spurt->get_cache_path() + "spu-ir.log", fs::create + fs::write + fs::append, log); } #if defined(__APPLE__) pthread_jit_write_protect_np(true); #endif #if defined(ARCH_ARM64) // Flush all cache lines after potentially writing executable code asm("ISB"); asm("DSB ISH"); #endif if (auto& cache = g_fxo->get<spu_cache>()) { if (add_to_file) { cache.add(func); } spu_log.success("New SPU block compiled successfully (size=%u)", func_size); } return fn; } static void interp_check(spu_thread* _spu, bool after) { static thread_local std::array<v128, 128> s_gpr; if (!after) { // Preserve reg state s_gpr = _spu->gpr; // Execute interpreter instruction const u32 op = *reinterpret_cast<const be_t<u32>*>(_spu->_ptr<u8>(0) + _spu->pc); if (!g_fxo->get<spu_interpreter_rt>().decode(op)(*_spu, {op})) spu_log.fatal("Bad instruction"); // Swap state for (u32 i = 0; i < s_gpr.size(); ++i) std::swap(_spu->gpr[i], s_gpr[i]); } else { // Check saved state for (u32 i = 0; i < s_gpr.size(); ++i) { if (_spu->gpr[i] != s_gpr[i]) { spu_log.fatal("Register mismatch: $%u\n%s\n%s", i, _spu->gpr[i], s_gpr[i]); _spu->state += cpu_flag::dbg_pause; } } } } spu_function_t compile_interpreter() { using namespace llvm; m_engine->clearAllGlobalMappings(); // Create LLVM module std::unique_ptr<Module> _module = std::make_unique<Module>("spu_interpreter.obj", m_context); _module->setTargetTriple(jit_compiler::triple2()); _module->setDataLayout(m_jit.get_engine().getTargetMachine()->createDataLayout()); m_module = _module.get(); // Initialize IR Builder IRBuilder<> irb(m_context); m_ir = &irb; // Create interpreter table const auto if_type = get_ftype<void, u8*, u8*, u32, u32, u8*, u32, u8*>(); m_function_table = new GlobalVariable(*m_module, ArrayType::get(if_type->getPointerTo(), 1ull << m_interp_magn), true, GlobalValue::InternalLinkage, nullptr); init_luts(); // Add return function const auto ret_func = cast<Function>(_module->getOrInsertFunction("spu_ret", if_type).getCallee()); ret_func->setCallingConv(CallingConv::GHC); ret_func->setLinkage(GlobalValue::InternalLinkage); m_ir->SetInsertPoint(BasicBlock::Create(m_context, "", ret_func)); m_thread = ret_func->getArg(1); m_interp_pc = ret_func->getArg(2); m_ir->CreateRetVoid(); // Add entry function, serves as a trampoline const auto main_func = llvm::cast<Function>(m_module->getOrInsertFunction("spu_interpreter", get_ftype<void, u8*, u8*, u8*>()).getCallee()); #ifdef _WIN32 main_func->setCallingConv(CallingConv::Win64); #endif set_function(main_func); // Load pc and opcode m_interp_pc = m_ir->CreateLoad(get_type<u32>(), spu_ptr<u32>(&spu_thread::pc)); m_interp_op = m_ir->CreateLoad(get_type<u32>(), m_ir->CreateGEP(get_type<u8>(), m_lsptr, m_ir->CreateZExt(m_interp_pc, get_type<u64>()))); m_interp_op = m_ir->CreateCall(get_intrinsic<u32>(Intrinsic::bswap), {m_interp_op}); // Pinned constant, address of interpreter table m_interp_table = m_ir->CreateGEP(m_function_table->getValueType(), m_function_table, {m_ir->getInt64(0), m_ir->getInt64(0)}); // Pinned constant, mask for shifted register index m_interp_7f0 = m_ir->getInt32(0x7f0); // Pinned constant, address of first register m_interp_regs = _ptr(m_thread, get_reg_offset(0)); // Save host thread's stack pointer const auto native_sp = spu_ptr<u64>(&spu_thread::hv_ctx, &rpcs3::hypervisor_context_t::regs); #if defined(ARCH_X64) const auto rsp_name = MetadataAsValue::get(m_context, MDNode::get(m_context, {MDString::get(m_context, "rsp")})); #elif defined(ARCH_ARM64) const auto rsp_name = MetadataAsValue::get(m_context, MDNode::get(m_context, {MDString::get(m_context, "sp")})); #endif m_ir->CreateStore(m_ir->CreateCall(get_intrinsic<u64>(Intrinsic::read_register), {rsp_name}), native_sp); // Decode (shift) and load function pointer const auto first = m_ir->CreateLoad(if_type->getPointerTo(), m_ir->CreateGEP(if_type->getPointerTo(), m_interp_table, m_ir->CreateLShr(m_interp_op, 32u - m_interp_magn))); const auto call0 = m_ir->CreateCall(if_type, first, {m_lsptr, m_thread, m_interp_pc, m_interp_op, m_interp_table, m_interp_7f0, m_interp_regs}); call0->setCallingConv(CallingConv::GHC); m_ir->CreateRetVoid(); // Create helper globals { std::vector<llvm::Constant*> float_to; std::vector<llvm::Constant*> to_float; float_to.reserve(256); to_float.reserve(256); for (int i = 0; i < 256; ++i) { float_to.push_back(ConstantFP::get(get_type<f32>(), std::exp2(173 - i))); to_float.push_back(ConstantFP::get(get_type<f32>(), std::exp2(i - 155))); } const auto atype = ArrayType::get(get_type<f32>(), 256); m_scale_float_to = new GlobalVariable(*m_module, atype, true, GlobalValue::InternalLinkage, ConstantArray::get(atype, float_to)); m_scale_to_float = new GlobalVariable(*m_module, atype, true, GlobalValue::InternalLinkage, ConstantArray::get(atype, to_float)); } // Fill interpreter table std::array<llvm::Function*, 256> ifuncs{}; std::vector<llvm::Constant*> iptrs; iptrs.reserve(1ull << m_interp_magn); m_block = nullptr; auto last_itype = spu_itype::type{255}; for (u32 i = 0; i < 1u << m_interp_magn;) { // Fake opcode const u32 op = i << (32u - m_interp_magn); // Instruction type const auto itype = g_spu_itype.decode(op); // Function name std::string fname = fmt::format("spu_%s", g_spu_iname.decode(op)); if (last_itype != itype) { // Trigger automatic information collection (probing) m_op_const_mask = 0; } else { // Inject const mask into function name fmt::append(fname, "_%X", (i & (m_op_const_mask >> (32u - m_interp_magn))) | (1u << m_interp_magn)); } // Decode instruction name, access function const auto f = cast<Function>(_module->getOrInsertFunction(fname, if_type).getCallee()); // Build if necessary if (f->empty()) { if (last_itype != itype) { ifuncs[static_cast<usz>(itype)] = f; } f->setCallingConv(CallingConv::GHC); m_function = f; m_lsptr = f->getArg(0); m_thread = f->getArg(1); m_interp_pc = f->getArg(2); m_interp_op = f->getArg(3); m_interp_table = f->getArg(4); m_interp_7f0 = f->getArg(5); m_interp_regs = f->getArg(6); m_ir->SetInsertPoint(BasicBlock::Create(m_context, "", f)); m_memptr = m_ir->CreateLoad(get_type<u8*>(), spu_ptr<u8*>(&spu_thread::memory_base_addr)); switch (itype) { case spu_itype::UNK: case spu_itype::DFCEQ: case spu_itype::DFCMEQ: case spu_itype::DFCGT: case spu_itype::DFCMGT: case spu_itype::DFTSV: case spu_itype::STOP: case spu_itype::STOPD: case spu_itype::RDCH: case spu_itype::WRCH: { // Invalid or abortable instruction. Save current address. m_ir->CreateStore(m_interp_pc, spu_ptr<u32>(&spu_thread::pc)); [[fallthrough]]; } default: { break; } } { m_interp_bblock = nullptr; // Next instruction (no wraparound at the end of LS) m_interp_pc_next = m_ir->CreateAdd(m_interp_pc, m_ir->getInt32(4)); bool check = false; if (itype == spu_itype::WRCH || itype == spu_itype::RDCH || itype == spu_itype::RCHCNT || itype == spu_itype::STOP || itype == spu_itype::STOPD || itype & spu_itype::floating || itype & spu_itype::branch) { check = false; } if (itype & spu_itype::branch) { // Instruction changes pc - change order. (this->*decode(op))({op}); if (m_interp_bblock) { m_ir->SetInsertPoint(m_interp_bblock); m_interp_bblock = nullptr; } } if (!m_ir->GetInsertBlock()->getTerminator()) { if (check) { m_ir->CreateStore(m_interp_pc, spu_ptr<u32>(&spu_thread::pc)); } // Decode next instruction. const auto next_pc = itype & spu_itype::branch ? m_interp_pc : m_interp_pc_next; const auto be32_op = m_ir->CreateLoad(get_type<u32>(), m_ir->CreateGEP(get_type<u8>(), m_lsptr, m_ir->CreateZExt(next_pc, get_type<u64>()))); const auto next_op = m_ir->CreateCall(get_intrinsic<u32>(Intrinsic::bswap), {be32_op}); const auto next_if = m_ir->CreateLoad(if_type->getPointerTo(), m_ir->CreateGEP(if_type->getPointerTo(), m_interp_table, m_ir->CreateLShr(next_op, 32u - m_interp_magn))); llvm::cast<LoadInst>(next_if)->setVolatile(true); if (!(itype & spu_itype::branch)) { if (check) { call("spu_interp_check", &interp_check, m_thread, m_ir->getFalse()); } // Normal instruction. (this->*decode(op))({op}); if (check && !m_ir->GetInsertBlock()->getTerminator()) { call("spu_interp_check", &interp_check, m_thread, m_ir->getTrue()); } m_interp_pc = m_interp_pc_next; } if (last_itype != itype) { // Reset to discard dead code llvm::cast<LoadInst>(next_if)->setVolatile(false); if (itype & spu_itype::branch) { const auto _stop = BasicBlock::Create(m_context, "", f); const auto _next = BasicBlock::Create(m_context, "", f); m_ir->CreateCondBr(m_ir->CreateIsNotNull(m_ir->CreateLoad(get_type<u32>(), spu_ptr<u32>(&spu_thread::state))), _stop, _next, m_md_unlikely); m_ir->SetInsertPoint(_stop); m_ir->CreateStore(m_interp_pc, spu_ptr<u32>(&spu_thread::pc)); const auto escape_yes = BasicBlock::Create(m_context, "", f); const auto escape_no = BasicBlock::Create(m_context, "", f); m_ir->CreateCondBr(call("spu_exec_check_state", &exec_check_state, m_thread), escape_yes, escape_no); m_ir->SetInsertPoint(escape_yes); call("spu_escape", spu_runtime::g_escape, m_thread); m_ir->CreateBr(_next); m_ir->SetInsertPoint(escape_no); m_ir->CreateBr(_next); m_ir->SetInsertPoint(_next); } llvm::Value* fret = m_interp_table; if (itype == spu_itype::WRCH || itype == spu_itype::RDCH || itype == spu_itype::RCHCNT || itype == spu_itype::STOP || itype == spu_itype::STOPD || itype == spu_itype::UNK || itype == spu_itype::DFCMEQ || itype == spu_itype::DFCMGT || itype == spu_itype::DFCGT || itype == spu_itype::DFCEQ || itype == spu_itype::DFTSV) { m_interp_7f0 = m_ir->getInt32(0x7f0); m_interp_regs = _ptr(m_thread, get_reg_offset(0)); fret = ret_func; } else if (!(itype & spu_itype::branch)) { // Hack: inline ret instruction before final jmp; this is not reliable. #ifdef ARCH_X64 m_ir->CreateCall(InlineAsm::get(get_ftype<void>(), "ret", "", true, false, InlineAsm::AD_Intel)); #else m_ir->CreateCall(InlineAsm::get(get_ftype<void>(), "ret", "", true, false)); #endif fret = ret_func; } const auto arg3 = UndefValue::get(get_type<u32>()); const auto _ret = m_ir->CreateCall(if_type, fret, {m_lsptr, m_thread, m_interp_pc, arg3, m_interp_table, m_interp_7f0, m_interp_regs}); _ret->setCallingConv(CallingConv::GHC); _ret->setTailCall(); m_ir->CreateRetVoid(); } if (!m_ir->GetInsertBlock()->getTerminator()) { // Call next instruction. const auto _stop = BasicBlock::Create(m_context, "", f); const auto _next = BasicBlock::Create(m_context, "", f); m_ir->CreateCondBr(m_ir->CreateIsNotNull(m_ir->CreateLoad(get_type<u32>(), spu_ptr<u32>(&spu_thread::state))), _stop, _next, m_md_unlikely); m_ir->SetInsertPoint(_next); if (itype == spu_itype::WRCH || itype == spu_itype::RDCH || itype == spu_itype::RCHCNT || itype == spu_itype::STOP || itype == spu_itype::STOPD) { m_interp_7f0 = m_ir->getInt32(0x7f0); m_interp_regs = _ptr(m_thread, get_reg_offset(0)); } const auto ncall = m_ir->CreateCall(if_type, next_if, {m_lsptr, m_thread, m_interp_pc, next_op, m_interp_table, m_interp_7f0, m_interp_regs}); ncall->setCallingConv(CallingConv::GHC); ncall->setTailCall(); m_ir->CreateRetVoid(); m_ir->SetInsertPoint(_stop); m_ir->CreateStore(m_interp_pc, spu_ptr<u32>(&spu_thread::pc)); call("spu_escape", spu_runtime::g_escape, m_thread)->setTailCall(); m_ir->CreateRetVoid(); } } } } if (last_itype != itype && g_cfg.core.spu_decoder != spu_decoder_type::llvm) { // Repeat after probing last_itype = itype; } else { // Add to the table iptrs.push_back(f); i++; } } m_function_table->setInitializer(ConstantArray::get(ArrayType::get(if_type->getPointerTo(), 1ull << m_interp_magn), iptrs)); m_function_table = nullptr; for (auto& f : *_module) { run_transforms(f); } std::string log; raw_string_ostream out(log); if (g_cfg.core.spu_debug) { fmt::append(log, "LLVM IR (interpreter):\n"); out << *_module; // print IR out << "\n\n"; } if (verifyModule(*_module, &out)) { out.flush(); spu_log.error("LLVM: Verification failed:\n%s", log); if (g_cfg.core.spu_debug) { fs::write_file(m_spurt->get_cache_path() + "spu-ir.log", fs::create + fs::write + fs::append, log); } fmt::throw_exception("Compilation failed"); } if (g_cfg.core.spu_debug) { // Testing only m_jit.add(std::move(_module), m_spurt->get_cache_path() + "llvm/"); } else { m_jit.add(std::move(_module)); } m_jit.fin(); // Register interpreter entry point spu_runtime::g_interpreter = reinterpret_cast<spu_function_t>(m_jit.get_engine().getPointerToFunction(main_func)); for (u32 i = 0; i < spu_runtime::g_interpreter_table.size(); i++) { // Fill exported interpreter table spu_runtime::g_interpreter_table[i] = ifuncs[i] ? reinterpret_cast<u64>(m_jit.get_engine().getPointerToFunction(ifuncs[i])) : 0; } if (!spu_runtime::g_interpreter) { return nullptr; } if (g_cfg.core.spu_debug) { out.flush(); fs::write_file(m_spurt->get_cache_path() + "spu-ir.log", fs::create + fs::write + fs::append, log); } return spu_runtime::g_interpreter; } static bool exec_check_state(spu_thread* _spu) { return _spu->check_state(); } template <spu_intrp_func_t F> static void exec_fall(spu_thread* _spu, spu_opcode_t op) { if (F(*_spu, op)) { _spu->pc += 4; } } template <spu_intrp_func_t F> void fall(spu_opcode_t op) { std::string name = fmt::format("spu_%s", g_spu_iname.decode(op.opcode)); if (m_interp_magn) { call(name, F, m_thread, m_interp_op); return; } update_pc(); call(name, &exec_fall<F>, m_thread, m_ir->getInt32(op.opcode)); } [[noreturn]] static void exec_unk(spu_thread*, u32 op) { fmt::throw_exception("Unknown/Illegal instruction (0x%08x)", op); } void UNK(spu_opcode_t op_unk) { if (m_interp_magn) { m_ir->CreateStore(m_interp_pc, spu_ptr<u32>(&spu_thread::pc)); call("spu_unknown", &exec_unk, m_thread, m_ir->getInt32(op_unk.opcode)); return; } m_block->block_end = m_ir->GetInsertBlock(); update_pc(); call("spu_unknown", &exec_unk, m_thread, m_ir->getInt32(op_unk.opcode)); } static void exec_stop(spu_thread* _spu, u32 code) { if (!_spu->stop_and_signal(code) || _spu->state & cpu_flag::again) { spu_runtime::g_escape(_spu); } if (_spu->test_stopped()) { _spu->pc += 4; spu_runtime::g_escape(_spu); } } void STOP(spu_opcode_t op) // { if (m_interp_magn) { call("spu_syscall", &exec_stop, m_thread, m_ir->CreateAnd(m_interp_op, m_ir->getInt32(0x3fff))); return; } update_pc(); ensure_gpr_stores(); call("spu_syscall", &exec_stop, m_thread, m_ir->getInt32(op.opcode & 0x3fff)); if (g_cfg.core.spu_block_size == spu_block_size_type::safe) { m_block->block_end = m_ir->GetInsertBlock(); update_pc(m_pos + 4); tail_chunk(m_dispatch); return; } } void STOPD(spu_opcode_t) // { if (m_interp_magn) { call("spu_syscall", &exec_stop, m_thread, m_ir->getInt32(0x3fff)); return; } STOP(spu_opcode_t{0x3fff}); } static u32 exec_rdch(spu_thread* _spu, u32 ch) { const s64 result = _spu->get_ch_value(ch); if (result < 0 || _spu->state & cpu_flag::again) { spu_runtime::g_escape(_spu); } static_cast<void>(_spu->test_stopped()); return static_cast<u32>(result & 0xffffffff); } static u32 exec_read_in_mbox(spu_thread* _spu) { // TODO return exec_rdch(_spu, SPU_RdInMbox); } static u32 exec_read_dec(spu_thread* _spu) { const u32 res = _spu->read_dec().first; if (res > 1500 && g_cfg.core.spu_loop_detection) { _spu->state += cpu_flag::wait; std::this_thread::yield(); static_cast<void>(_spu->test_stopped()); } return res; } static u32 exec_read_events(spu_thread* _spu) { // TODO return exec_rdch(_spu, SPU_RdEventStat); } void ensure_gpr_stores() { if (m_block) { // Make previous stores not able to be reordered beyond this point or be deleted std::for_each(m_block->store_context_ctr.begin(), m_block->store_context_ctr.end(), FN(x++)); m_block->has_gpr_memory_barriers = true; } } llvm::Value* get_rdch(spu_opcode_t op, u32 off, bool atomic) { const auto ptr = _ptr<u64>(m_thread, off); llvm::Value* val0; if (atomic) { const auto val = m_ir->CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, ptr, m_ir->getInt64(0), llvm::MaybeAlign{8}, llvm::AtomicOrdering::Acquire); val0 = val; } else { const auto val = m_ir->CreateLoad(get_type<u64>(), ptr); val->setAtomic(llvm::AtomicOrdering::Acquire); m_ir->CreateStore(m_ir->getInt64(0), ptr)->setAtomic(llvm::AtomicOrdering::Release); val0 = val; } const auto _cur = m_ir->GetInsertBlock(); const auto done = llvm::BasicBlock::Create(m_context, "", m_function); const auto wait = llvm::BasicBlock::Create(m_context, "", m_function); const auto cond = m_ir->CreateICmpSLT(val0, m_ir->getInt64(0)); val0 = m_ir->CreateTrunc(val0, get_type<u32>()); m_ir->CreateCondBr(cond, done, wait); m_ir->SetInsertPoint(wait); update_pc(); const auto val1 = call("spu_read_channel", &exec_rdch, m_thread, m_ir->getInt32(op.ra)); m_ir->CreateBr(done); m_ir->SetInsertPoint(done); const auto rval = m_ir->CreatePHI(get_type<u32>(), 2); rval->addIncoming(val0, _cur); rval->addIncoming(val1, wait); return rval; } void RDCH(spu_opcode_t op) // { value_t<u32> res; if (m_interp_magn) { res.value = call("spu_read_channel", &exec_rdch, m_thread, get_imm<u32>(op.ra).value); set_vr(op.rt, insert(splat<u32[4]>(0), 3, res)); return; } switch (op.ra) { case SPU_RdSRR0: { res.value = m_ir->CreateLoad(get_type<u32>(), spu_ptr<u32>(&spu_thread::srr0)); break; } case SPU_RdInMbox: { update_pc(); ensure_gpr_stores(); res.value = call("spu_read_in_mbox", &exec_read_in_mbox, m_thread); break; } case MFC_RdTagStat: { res.value = get_rdch(op, ::offset32(&spu_thread::ch_tag_stat), false); break; } case MFC_RdTagMask: { res.value = m_ir->CreateLoad(get_type<u32>(), spu_ptr<u32>(&spu_thread::ch_tag_mask)); break; } case SPU_RdSigNotify1: { update_pc(); ensure_gpr_stores(); res.value = get_rdch(op, ::offset32(&spu_thread::ch_snr1), true); break; } case SPU_RdSigNotify2: { update_pc(); ensure_gpr_stores(); res.value = get_rdch(op, ::offset32(&spu_thread::ch_snr2), true); break; } case MFC_RdAtomicStat: { res.value = get_rdch(op, ::offset32(&spu_thread::ch_atomic_stat), false); break; } case MFC_RdListStallStat: { res.value = get_rdch(op, ::offset32(&spu_thread::ch_stall_stat), false); break; } case SPU_RdDec: { #if defined(ARCH_X64) if (utils::get_tsc_freq() && !(g_cfg.core.spu_loop_detection) && (g_cfg.core.clocks_scale == 100)) { const auto timestamp = m_ir->CreateLoad(get_type<u64>(), spu_ptr<u64>(&spu_thread::ch_dec_start_timestamp)); const auto dec_value = m_ir->CreateLoad(get_type<u32>(), spu_ptr<u32>(&spu_thread::ch_dec_value)); const auto tsc = m_ir->CreateCall(get_intrinsic(llvm::Intrinsic::x86_rdtsc)); const auto tscx = m_ir->CreateMul(m_ir->CreateUDiv(tsc, m_ir->getInt64(utils::get_tsc_freq())), m_ir->getInt64(80000000)); const auto tscm = m_ir->CreateUDiv(m_ir->CreateMul(m_ir->CreateURem(tsc, m_ir->getInt64(utils::get_tsc_freq())), m_ir->getInt64(80000000)), m_ir->getInt64(utils::get_tsc_freq())); const auto tsctb = m_ir->CreateAdd(tscx, tscm); const auto frz = m_ir->CreateLoad(get_type<u8>(), spu_ptr<u8>(&spu_thread::is_dec_frozen)); const auto frzev = m_ir->CreateICmpEQ(frz, m_ir->getInt8(0)); const auto delta = m_ir->CreateTrunc(m_ir->CreateSub(tsctb, timestamp), get_type<u32>()); const auto deltax = m_ir->CreateSelect(frzev, delta, m_ir->getInt32(0)); res.value = m_ir->CreateSub(dec_value, deltax); break; } #endif res.value = call("spu_read_decrementer", &exec_read_dec, m_thread); break; } case SPU_RdEventMask: { const auto value = m_ir->CreateLoad(get_type<u64>(), spu_ptr<u64>(&spu_thread::ch_events)); value->setAtomic(llvm::AtomicOrdering::Acquire); res.value = m_ir->CreateTrunc(m_ir->CreateLShr(value, 32), get_type<u32>()); break; } case SPU_RdEventStat: { update_pc(); if (g_cfg.savestate.compatible_mode) { ensure_gpr_stores(); } else { m_ir->CreateStore(m_ir->getInt8(1), spu_ptr<u8>(&spu_thread::unsavable)); } res.value = call("spu_read_events", &exec_read_events, m_thread); if (!g_cfg.savestate.compatible_mode) { m_ir->CreateStore(m_ir->getInt8(0), spu_ptr<u8>(&spu_thread::unsavable)); } break; } case SPU_RdMachStat: { res.value = m_ir->CreateZExt(m_ir->CreateLoad(get_type<u8>(), spu_ptr<u8>(&spu_thread::interrupts_enabled)), get_type<u32>()); res.value = m_ir->CreateOr(res.value, m_ir->CreateAnd(m_ir->CreateLoad(get_type<u32>(), spu_ptr<u32>(&spu_thread::thread_type)), m_ir->getInt32(2))); break; } default: { update_pc(); ensure_gpr_stores(); res.value = call("spu_read_channel", &exec_rdch, m_thread, m_ir->getInt32(op.ra)); break; } } set_vr(op.rt, insert(splat<u32[4]>(0), 3, res)); } static u32 exec_rchcnt(spu_thread* _spu, u32 ch) { return _spu->get_ch_count(ch); } static u32 exec_get_events(spu_thread* _spu, u32 mask) { return _spu->get_events(mask).count; } llvm::Value* get_rchcnt(u32 off, u64 inv = 0) { const auto val = m_ir->CreateLoad(get_type<u64>(), _ptr<u64>(m_thread, off)); val->setAtomic(llvm::AtomicOrdering::Acquire); const auto shv = m_ir->CreateLShr(val, spu_channel::off_count); return m_ir->CreateTrunc(m_ir->CreateXor(shv, inv), get_type<u32>()); } llvm::Value* wait_rchcnt(u32 off, u32 inv = 0) { auto wait_on_channel = [](spu_thread* _spu, spu_channel* ch, u32 is_read) -> u32 { if (is_read) { ch->pop_wait(*_spu, false); } else { ch->push_wait(*_spu, 0, false); } return ch->get_count(); }; return m_ir->CreateXor(call("wait_on_spu_channel", +wait_on_channel, m_thread, _ptr<u64>(m_thread, off), m_ir->getInt32(inv == 0u)), m_ir->getInt32(inv)); } void RCHCNT(spu_opcode_t op) // { value_t<u32> res{}; if (m_interp_magn) { res.value = call("spu_read_channel_count", &exec_rchcnt, m_thread, get_imm<u32>(op.ra).value); set_vr(op.rt, insert(splat<u32[4]>(0), 3, res)); return; } switch (op.ra) { case SPU_WrOutMbox: case SPU_WrOutIntrMbox: case SPU_RdSigNotify1: case SPU_RdSigNotify2: case SPU_RdInMbox: case SPU_RdEventStat: { bool loop_is_likely = op.ra == SPU_RdSigNotify1 || op.ra == SPU_RdSigNotify2; for (u32 block_start : m_block->bb->preds) { if (block_start >= m_pos) { loop_is_likely = true; break; } } if (loop_is_likely || g_cfg.savestate.compatible_mode) { ensure_gpr_stores(); check_state(m_pos, false); } break; } default: { break; } } if (m_inst_attrs[(m_pos - m_base) / 4] == inst_attr::rchcnt_loop) { switch (op.ra) { case SPU_WrOutMbox: { res.value = wait_rchcnt(::offset32(&spu_thread::ch_out_mbox), true); break; } case SPU_WrOutIntrMbox: { res.value = wait_rchcnt(::offset32(&spu_thread::ch_out_intr_mbox), true); break; } case SPU_RdSigNotify1: { res.value = wait_rchcnt(::offset32(&spu_thread::ch_snr1)); break; } case SPU_RdSigNotify2: { res.value = wait_rchcnt(::offset32(&spu_thread::ch_snr2)); break; } case SPU_RdInMbox: { auto wait_inbox = [](spu_thread* _spu, spu_channel_4_t* ch) -> u32 { return ch->pop_wait(*_spu, false), ch->get_count(); }; res.value = call("wait_spu_inbox", +wait_inbox, m_thread, spu_ptr<void*>(&spu_thread::ch_in_mbox)); break; } default: break; } if (res.value) { set_vr(op.rt, insert(splat<u32[4]>(0), 3, res)); return; } } switch (op.ra) { case SPU_WrOutMbox: { res.value = get_rchcnt(::offset32(&spu_thread::ch_out_mbox), true); break; } case SPU_WrOutIntrMbox: { res.value = get_rchcnt(::offset32(&spu_thread::ch_out_intr_mbox), true); break; } case MFC_RdTagStat: { res.value = get_rchcnt(::offset32(&spu_thread::ch_tag_stat)); break; } case MFC_RdListStallStat: { res.value = get_rchcnt(::offset32(&spu_thread::ch_stall_stat)); break; } case SPU_RdSigNotify1: { res.value = get_rchcnt(::offset32(&spu_thread::ch_snr1)); break; } case SPU_RdSigNotify2: { res.value = get_rchcnt(::offset32(&spu_thread::ch_snr2)); break; } case MFC_RdAtomicStat: { res.value = get_rchcnt(::offset32(&spu_thread::ch_atomic_stat)); break; } case MFC_WrTagUpdate: { res.value = m_ir->getInt32(1); break; } case MFC_Cmd: { res.value = m_ir->CreateLoad(get_type<u32>(), spu_ptr<u32>(&spu_thread::mfc_size)); res.value = m_ir->CreateSub(m_ir->getInt32(16), res.value); break; } case SPU_RdInMbox: { const auto value = m_ir->CreateLoad(get_type<u32>(), spu_ptr<u32>(&spu_thread::ch_in_mbox)); value->setAtomic(llvm::AtomicOrdering::Acquire); res.value = value; res.value = m_ir->CreateLShr(res.value, 8); res.value = m_ir->CreateAnd(res.value, 7); break; } case SPU_RdEventStat: { const auto mask = m_ir->CreateTrunc(m_ir->CreateLShr(m_ir->CreateLoad(get_type<u64>(), spu_ptr<u64>(&spu_thread::ch_events)), 32), get_type<u32>()); res.value = call("spu_get_events", &exec_get_events, m_thread, mask); break; } // Channels with a constant count of 1: case SPU_WrEventMask: case SPU_WrEventAck: case SPU_WrDec: case SPU_RdDec: case SPU_RdEventMask: case SPU_RdMachStat: case SPU_WrSRR0: case SPU_RdSRR0: case SPU_Set_Bkmk_Tag: case SPU_PM_Start_Ev: case SPU_PM_Stop_Ev: case MFC_RdTagMask: case MFC_LSA: case MFC_EAH: case MFC_EAL: case MFC_Size: case MFC_TagID: case MFC_WrTagMask: case MFC_WrListStallAck: { res.value = m_ir->getInt32(1); break; } default: { res.value = call("spu_read_channel_count", &exec_rchcnt, m_thread, m_ir->getInt32(op.ra)); break; } } set_vr(op.rt, insert(splat<u32[4]>(0), 3, res)); } static void exec_wrch(spu_thread* _spu, u32 ch, u32 value) { if (!_spu->set_ch_value(ch, value) || _spu->state & cpu_flag::again) { spu_runtime::g_escape(_spu); } static_cast<void>(_spu->test_stopped()); } static void exec_list_unstall(spu_thread* _spu, u32 tag) { for (u32 i = 0; i < _spu->mfc_size; i++) { if (_spu->mfc_queue[i].tag == (tag | 0x80)) { _spu->mfc_queue[i].tag &= 0x7f; } } _spu->do_mfc(); } template <bool Saveable> static void exec_mfc_cmd(spu_thread* _spu) { if constexpr (!Saveable) { _spu->unsavable = true; } if (!_spu->process_mfc_cmd() || _spu->state & cpu_flag::again) { fmt::throw_exception("exec_mfc_cmd(): Should not abort!"); } static_cast<void>(_spu->test_stopped()); if constexpr (!Saveable) { _spu->unsavable = false; } } void WRCH(spu_opcode_t op) // { const auto val = eval(extract(get_vr(op.rt), 3)); if (m_interp_magn) { call("spu_write_channel", &exec_wrch, m_thread, get_imm<u32>(op.ra).value, val.value); return; } switch (op.ra) { case SPU_WrSRR0: { m_ir->CreateStore(eval(val & 0x3fffc).value, spu_ptr<u32>(&spu_thread::srr0)); return; } case SPU_WrOutIntrMbox: { // TODO break; } case SPU_WrOutMbox: { // TODO break; } case MFC_WrTagMask: { // TODO m_ir->CreateStore(val.value, spu_ptr<u32>(&spu_thread::ch_tag_mask)); const auto next = llvm::BasicBlock::Create(m_context, "", m_function); const auto _mfc = llvm::BasicBlock::Create(m_context, "", m_function); m_ir->CreateCondBr(m_ir->CreateICmpNE(m_ir->CreateLoad(get_type<u32>(), spu_ptr<u32>(&spu_thread::ch_tag_upd)), m_ir->getInt32(MFC_TAG_UPDATE_IMMEDIATE)), _mfc, next); m_ir->SetInsertPoint(_mfc); update_pc(); call("spu_write_channel", &exec_wrch, m_thread, m_ir->getInt32(op.ra), val.value); m_ir->CreateBr(next); m_ir->SetInsertPoint(next); return; } case MFC_WrTagUpdate: { if (true) { const auto tag_mask = m_ir->CreateLoad(get_type<u32>(), spu_ptr<u32>(&spu_thread::ch_tag_mask)); const auto mfc_fence = m_ir->CreateLoad(get_type<u32>(), spu_ptr<u32>(&spu_thread::mfc_fence)); const auto completed = m_ir->CreateAnd(tag_mask, m_ir->CreateNot(mfc_fence)); const auto upd_ptr = spu_ptr<u32>(&spu_thread::ch_tag_upd); const auto stat_ptr = spu_ptr<u64>(&spu_thread::ch_tag_stat); const auto stat_val = m_ir->CreateOr(m_ir->CreateZExt(completed, get_type<u64>()), s64{smin}); const auto next = llvm::BasicBlock::Create(m_context, "", m_function); const auto next0 = llvm::BasicBlock::Create(m_context, "", m_function); const auto imm = llvm::BasicBlock::Create(m_context, "", m_function); const auto any = llvm::BasicBlock::Create(m_context, "", m_function); const auto fail = llvm::BasicBlock::Create(m_context, "", m_function); const auto update = llvm::BasicBlock::Create(m_context, "", m_function); m_ir->CreateCondBr(m_ir->CreateICmpEQ(val.value, m_ir->getInt32(MFC_TAG_UPDATE_IMMEDIATE)), imm, next0); m_ir->SetInsertPoint(imm); m_ir->CreateStore(val.value, upd_ptr); m_ir->CreateStore(stat_val, stat_ptr); m_ir->CreateBr(next); m_ir->SetInsertPoint(next0); m_ir->CreateCondBr(m_ir->CreateICmpULE(val.value, m_ir->getInt32(MFC_TAG_UPDATE_ALL)), any, fail, m_md_likely); // Illegal update, access violate with special address m_ir->SetInsertPoint(fail); const auto ptr = _ptr<u32>(m_memptr, 0xffdead04); m_ir->CreateStore(m_ir->getInt32("TAG\0"_u32), ptr); m_ir->CreateBr(next); m_ir->SetInsertPoint(any); const auto cond = m_ir->CreateSelect(m_ir->CreateICmpEQ(val.value, m_ir->getInt32(MFC_TAG_UPDATE_ANY)) , m_ir->CreateICmpNE(completed, m_ir->getInt32(0)), m_ir->CreateICmpEQ(completed, tag_mask)); m_ir->CreateStore(m_ir->CreateSelect(cond, m_ir->getInt32(MFC_TAG_UPDATE_IMMEDIATE), val.value), upd_ptr); m_ir->CreateCondBr(cond, update, next, m_md_likely); m_ir->SetInsertPoint(update); m_ir->CreateStore(stat_val, stat_ptr); m_ir->CreateBr(next); m_ir->SetInsertPoint(next); } return; } case MFC_LSA: { set_reg_fixed(s_reg_mfc_lsa, val.value); return; } case MFC_EAH: { if (auto ci = llvm::dyn_cast<llvm::ConstantInt>(val.value)) { if (ci->getZExtValue() == 0) { return; } } spu_log.warning("[0x%x] MFC_EAH: $%u is not a zero constant", m_pos, +op.rt); //m_ir->CreateStore(val.value, spu_ptr<u32>(&spu_thread::ch_mfc_cmd, &spu_mfc_cmd::eah)); return; } case MFC_EAL: { set_reg_fixed(s_reg_mfc_eal, val.value); return; } case MFC_Size: { set_reg_fixed(s_reg_mfc_size, trunc<u16>(val).eval(m_ir)); return; } case MFC_TagID: { set_reg_fixed(s_reg_mfc_tag, trunc<u8>(val & 0x1f).eval(m_ir)); return; } case MFC_Cmd: { // Prevent store elimination (TODO) m_block->store_context_ctr[s_reg_mfc_eal]++; m_block->store_context_ctr[s_reg_mfc_lsa]++; m_block->store_context_ctr[s_reg_mfc_tag]++; m_block->store_context_ctr[s_reg_mfc_size]++; if (auto ci = llvm::dyn_cast<llvm::ConstantInt>(trunc<u8>(val).eval(m_ir))) { if (g_cfg.core.mfc_debug) { break; } bool must_use_cpp_functions = !!g_cfg.core.spu_accurate_dma; if (u64 cmdh = ci->getZExtValue() & ~(MFC_BARRIER_MASK | MFC_FENCE_MASK | MFC_RESULT_MASK); g_cfg.core.rsx_fifo_accuracy || g_cfg.video.strict_rendering_mode || !g_use_rtm) { // TODO: don't require TSX (current implementation is TSX-only) if (cmdh == MFC_PUT_CMD || cmdh == MFC_SNDSIG_CMD) { must_use_cpp_functions = true; } } const auto eal = get_reg_fixed<u32>(s_reg_mfc_eal); const auto lsa = get_reg_fixed<u32>(s_reg_mfc_lsa); const auto tag = get_reg_fixed<u8>(s_reg_mfc_tag); const auto size = get_reg_fixed<u16>(s_reg_mfc_size); const auto mask = m_ir->CreateShl(m_ir->getInt32(1), zext<u32>(tag).eval(m_ir)); const auto exec = llvm::BasicBlock::Create(m_context, "", m_function); const auto fail = llvm::BasicBlock::Create(m_context, "", m_function); const auto next = llvm::BasicBlock::Create(m_context, "", m_function); const auto pf = spu_ptr<u32>(&spu_thread::mfc_fence); const auto pb = spu_ptr<u32>(&spu_thread::mfc_barrier); switch (u64 cmd = ci->getZExtValue()) { case MFC_SDCRT_CMD: case MFC_SDCRTST_CMD: { return; } case MFC_PUTL_CMD: case MFC_PUTLB_CMD: case MFC_PUTLF_CMD: case MFC_PUTRL_CMD: case MFC_PUTRLB_CMD: case MFC_PUTRLF_CMD: case MFC_GETL_CMD: case MFC_GETLB_CMD: case MFC_GETLF_CMD: { m_ir->CreateBr(next); m_ir->SetInsertPoint(exec); m_ir->CreateUnreachable(); m_ir->SetInsertPoint(fail); m_ir->CreateUnreachable(); m_ir->SetInsertPoint(next); m_ir->CreateStore(ci, spu_ptr<u8>(&spu_thread::ch_mfc_cmd, &spu_mfc_cmd::cmd)); update_pc(); ensure_gpr_stores(); call("spu_exec_mfc_cmd_saveable", &exec_mfc_cmd<true>, m_thread); return; } case MFC_SDCRZ_CMD: case MFC_GETLLAR_CMD: case MFC_PUTLLC_CMD: case MFC_PUTLLUC_CMD: case MFC_PUTQLLUC_CMD: { // TODO m_ir->CreateBr(next); m_ir->SetInsertPoint(exec); m_ir->CreateUnreachable(); m_ir->SetInsertPoint(fail); m_ir->CreateUnreachable(); m_ir->SetInsertPoint(next); m_ir->CreateStore(ci, spu_ptr<u8>(&spu_thread::ch_mfc_cmd, &spu_mfc_cmd::cmd)); update_pc(); call("spu_exec_mfc_cmd", &exec_mfc_cmd<false>, m_thread); return; } case MFC_SNDSIG_CMD: case MFC_SNDSIGB_CMD: case MFC_SNDSIGF_CMD: case MFC_PUT_CMD: case MFC_PUTB_CMD: case MFC_PUTF_CMD: case MFC_PUTR_CMD: case MFC_PUTRB_CMD: case MFC_PUTRF_CMD: case MFC_GET_CMD: case MFC_GETB_CMD: case MFC_GETF_CMD: { // Try to obtain constant size u64 csize = -1; if (auto ci = llvm::dyn_cast<llvm::ConstantInt>(size.value)) { csize = ci->getZExtValue(); } if (cmd >= MFC_SNDSIG_CMD && csize != 4) { csize = -1; } llvm::Value* src = m_ir->CreateGEP(get_type<u8>(), m_lsptr, zext<u64>(lsa).eval(m_ir)); llvm::Value* dst = m_ir->CreateGEP(get_type<u8>(), m_memptr, zext<u64>(eal).eval(m_ir)); if (cmd & MFC_GET_CMD) { std::swap(src, dst); } llvm::Value* barrier = m_ir->CreateLoad(get_type<u32>(), pb); if (cmd & (MFC_BARRIER_MASK | MFC_FENCE_MASK)) { barrier = m_ir->CreateOr(barrier, m_ir->CreateLoad(get_type<u32>(), pf)); } const auto cond = m_ir->CreateIsNull(m_ir->CreateAnd(mask, barrier)); m_ir->CreateCondBr(cond, exec, fail, m_md_likely); m_ir->SetInsertPoint(exec); const auto copy = llvm::BasicBlock::Create(m_context, "", m_function); // Always use interpreter function for MFC debug option if (!must_use_cpp_functions) { const auto mmio = llvm::BasicBlock::Create(m_context, "", m_function); m_ir->CreateCondBr(m_ir->CreateICmpUGE(eal.value, m_ir->getInt32(0xe0000000)), mmio, copy, m_md_unlikely); m_ir->SetInsertPoint(mmio); } m_ir->CreateStore(ci, spu_ptr<u8>(&spu_thread::ch_mfc_cmd, &spu_mfc_cmd::cmd)); call("spu_exec_mfc_cmd", &exec_mfc_cmd<false>, m_thread); m_ir->CreateBr(next); m_ir->SetInsertPoint(copy); llvm::Type* vtype = get_type<u8[16]>(); switch (csize) { case 0: case umax: { break; } case 1: { vtype = get_type<u8>(); break; } case 2: { vtype = get_type<u16>(); break; } case 4: { vtype = get_type<u32>(); break; } case 8: { vtype = get_type<u64>(); break; } default: { if (csize % 16 || csize > 0x4000) { spu_log.error("[0x%x] MFC_Cmd: invalid size %u", m_pos, csize); } } } // Check if the LS address is constant and 256 bit aligned u64 clsa = umax; if (auto ci = llvm::dyn_cast<llvm::ConstantInt>(lsa.value)) { clsa = ci->getZExtValue(); } u32 stride = 16; if (m_use_avx && csize >= 32 && !(clsa % 32)) { vtype = get_type<u8[32]>(); stride = 32; } if (csize > 0 && csize <= 16) { // Generate single copy operation m_ir->CreateStore(m_ir->CreateLoad(vtype, src), dst); } else if (csize <= stride * 16 && !(csize % 32)) { // Generate fixed sequence of copy operations for (u32 i = 0; i < csize; i += stride) { const auto _src = m_ir->CreateGEP(get_type<u8>(), src, m_ir->getInt32(i)); const auto _dst = m_ir->CreateGEP(get_type<u8>(), dst, m_ir->getInt32(i)); if (csize - i < stride) { m_ir->CreateStore(m_ir->CreateLoad(get_type<u8[16]>(), _src), _dst); } else { m_ir->CreateAlignedStore(m_ir->CreateAlignedLoad(vtype, _src, llvm::MaybeAlign{16}), _dst, llvm::MaybeAlign{16}); } } } else if (csize) { // TODO auto spu_memcpy = [](u8* dst, const u8* src, u32 size) { std::memcpy(dst, src, size); }; call("spu_memcpy", +spu_memcpy, dst, src, zext<u32>(size).eval(m_ir)); } // Disable certain thing m_ir->CreateStore(m_ir->getInt32(0), spu_ptr<u32>(&spu_thread::last_faddr)); m_ir->CreateBr(next); break; } case MFC_BARRIER_CMD: case MFC_EIEIO_CMD: case MFC_SYNC_CMD: { const auto cond = m_ir->CreateIsNull(m_ir->CreateLoad(get_type<u32>(), spu_ptr<u32>(&spu_thread::mfc_size))); m_ir->CreateCondBr(cond, exec, fail, m_md_likely); m_ir->SetInsertPoint(exec); m_ir->CreateFence(llvm::AtomicOrdering::SequentiallyConsistent); m_ir->CreateBr(next); break; } default: { // TODO spu_log.error("[0x%x] MFC_Cmd: unknown command (0x%x)", m_pos, cmd); m_ir->CreateBr(next); m_ir->SetInsertPoint(exec); m_ir->CreateUnreachable(); break; } } // Fallback: enqueue the command m_ir->SetInsertPoint(fail); // Get MFC slot, redirect to invalid memory address const auto slot = m_ir->CreateLoad(get_type<u32>(), spu_ptr<u32>(&spu_thread::mfc_size)); const auto off0 = m_ir->CreateAdd(m_ir->CreateMul(slot, m_ir->getInt32(sizeof(spu_mfc_cmd))), m_ir->getInt32(::offset32(&spu_thread::mfc_queue))); const auto ptr0 = m_ir->CreateGEP(get_type<u8>(), m_thread, m_ir->CreateZExt(off0, get_type<u64>())); const auto ptr1 = m_ir->CreateGEP(get_type<u8>(), m_memptr, m_ir->getInt64(0xffdeadf0)); const auto pmfc = m_ir->CreateSelect(m_ir->CreateICmpULT(slot, m_ir->getInt32(16)), ptr0, ptr1); m_ir->CreateStore(ci, _ptr<u8>(pmfc, ::offset32(&spu_mfc_cmd::cmd))); switch (u64 cmd = ci->getZExtValue()) { case MFC_GETLLAR_CMD: case MFC_PUTLLC_CMD: case MFC_PUTLLUC_CMD: case MFC_PUTQLLUC_CMD: { break; } case MFC_PUTL_CMD: case MFC_PUTLB_CMD: case MFC_PUTLF_CMD: case MFC_PUTRL_CMD: case MFC_PUTRLB_CMD: case MFC_PUTRLF_CMD: case MFC_GETL_CMD: case MFC_GETLB_CMD: case MFC_GETLF_CMD: { break; } case MFC_SDCRZ_CMD: { break; } case MFC_SNDSIG_CMD: case MFC_SNDSIGB_CMD: case MFC_SNDSIGF_CMD: case MFC_PUT_CMD: case MFC_PUTB_CMD: case MFC_PUTF_CMD: case MFC_PUTR_CMD: case MFC_PUTRB_CMD: case MFC_PUTRF_CMD: case MFC_GET_CMD: case MFC_GETB_CMD: case MFC_GETF_CMD: { m_ir->CreateStore(tag.value, _ptr<u8>(pmfc, ::offset32(&spu_mfc_cmd::tag))); m_ir->CreateStore(size.value, _ptr<u16>(pmfc, ::offset32(&spu_mfc_cmd::size))); m_ir->CreateStore(lsa.value, _ptr<u32>(pmfc, ::offset32(&spu_mfc_cmd::lsa))); m_ir->CreateStore(eal.value, _ptr<u32>(pmfc, ::offset32(&spu_mfc_cmd::eal))); m_ir->CreateStore(m_ir->CreateOr(m_ir->CreateLoad(get_type<u32>(), pf), mask), pf); if (cmd & MFC_BARRIER_MASK) m_ir->CreateStore(m_ir->CreateOr(m_ir->CreateLoad(get_type<u32>(), pb), mask), pb); break; } case MFC_BARRIER_CMD: case MFC_EIEIO_CMD: case MFC_SYNC_CMD: { m_ir->CreateStore(m_ir->getInt32(-1), pb); m_ir->CreateStore(m_ir->CreateOr(m_ir->CreateLoad(get_type<u32>(), pf), mask), pf); break; } default: { m_ir->CreateUnreachable(); break; } } m_ir->CreateStore(m_ir->CreateAdd(slot, m_ir->getInt32(1)), spu_ptr<u32>(&spu_thread::mfc_size)); m_ir->CreateBr(next); m_ir->SetInsertPoint(next); return; } // Fallback to unoptimized WRCH implementation (TODO) spu_log.warning("[0x%x] MFC_Cmd: $%u is not a constant", m_pos, +op.rt); break; } case MFC_WrListStallAck: { const auto mask = eval(splat<u32>(1) << (val & 0x1f)); const auto _ptr = spu_ptr<u32>(&spu_thread::ch_stall_mask); const auto _old = m_ir->CreateLoad(get_type<u32>(), _ptr); const auto _new = m_ir->CreateAnd(_old, m_ir->CreateNot(mask.value)); m_ir->CreateStore(_new, _ptr); const auto next = llvm::BasicBlock::Create(m_context, "", m_function); const auto _mfc = llvm::BasicBlock::Create(m_context, "", m_function); m_ir->CreateCondBr(m_ir->CreateICmpNE(_old, _new), _mfc, next); m_ir->SetInsertPoint(_mfc); ensure_gpr_stores(); update_pc(); call("spu_list_unstall", &exec_list_unstall, m_thread, eval(val & 0x1f).value); m_ir->CreateBr(next); m_ir->SetInsertPoint(next); return; } case SPU_WrDec: { call("spu_get_events", &exec_get_events, m_thread, m_ir->getInt32(SPU_EVENT_TM)); #if defined(ARCH_X64) if (utils::get_tsc_freq() && !(g_cfg.core.spu_loop_detection) && (g_cfg.core.clocks_scale == 100)) { const auto tsc = m_ir->CreateCall(get_intrinsic(llvm::Intrinsic::x86_rdtsc)); const auto tscx = m_ir->CreateMul(m_ir->CreateUDiv(tsc, m_ir->getInt64(utils::get_tsc_freq())), m_ir->getInt64(80000000)); const auto tscm = m_ir->CreateUDiv(m_ir->CreateMul(m_ir->CreateURem(tsc, m_ir->getInt64(utils::get_tsc_freq())), m_ir->getInt64(80000000)), m_ir->getInt64(utils::get_tsc_freq())); const auto tsctb = m_ir->CreateAdd(tscx, tscm); m_ir->CreateStore(tsctb, spu_ptr<u64>(&spu_thread::ch_dec_start_timestamp)); } else #endif { m_ir->CreateStore(call("get_timebased_time", &get_timebased_time), spu_ptr<u64>(&spu_thread::ch_dec_start_timestamp)); } m_ir->CreateStore(val.value, spu_ptr<u32>(&spu_thread::ch_dec_value)); m_ir->CreateStore(m_ir->getInt8(0), spu_ptr<u8>(&spu_thread::is_dec_frozen)); return; } case SPU_Set_Bkmk_Tag: case SPU_PM_Start_Ev: case SPU_PM_Stop_Ev: { return; } default: break; } update_pc(); ensure_gpr_stores(); call("spu_write_channel", &exec_wrch, m_thread, m_ir->getInt32(op.ra), val.value); } void LNOP(spu_opcode_t) // { } void NOP(spu_opcode_t) // { } void SYNC(spu_opcode_t) // { // This instruction must be used following a store instruction that modifies the instruction stream. m_ir->CreateFence(llvm::AtomicOrdering::SequentiallyConsistent); if (g_cfg.core.spu_block_size == spu_block_size_type::safe && !m_interp_magn) { m_block->block_end = m_ir->GetInsertBlock(); update_pc(m_pos + 4); tail_chunk(m_dispatch); } } void DSYNC(spu_opcode_t) // { // This instruction forces all earlier load, store, and channel instructions to complete before proceeding. m_ir->CreateFence(llvm::AtomicOrdering::SequentiallyConsistent); } void MFSPR(spu_opcode_t op) // { // Check SPUInterpreter for notes. set_vr(op.rt, splat<u32[4]>(0)); } void MTSPR(spu_opcode_t) // { // Check SPUInterpreter for notes. } template <typename TA, typename TB> auto mpyh(TA&& a, TB&& b) { return bitcast<u32[4]>(bitcast<u16[8]>((std::forward<TA>(a) >> 16)) * bitcast<u16[8]>(std::forward<TB>(b))) << 16; } template <typename TA, typename TB> auto mpyu(TA&& a, TB&& b) { return (std::forward<TA>(a) << 16 >> 16) * (std::forward<TB>(b) << 16 >> 16); } void SF(spu_opcode_t op) { set_vr(op.rt, get_vr(op.rb) - get_vr(op.ra)); } void OR(spu_opcode_t op) { set_vr(op.rt, get_vr(op.ra) | get_vr(op.rb)); } void BG(spu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.ra, op.rb); set_vr(op.rt, zext<u32[4]>(a <= b)); } void SFH(spu_opcode_t op) { set_vr(op.rt, get_vr<u16[8]>(op.rb) - get_vr<u16[8]>(op.ra)); } void NOR(spu_opcode_t op) { set_vr(op.rt, ~(get_vr(op.ra) | get_vr(op.rb))); } void ABSDB(spu_opcode_t op) { const auto [a, b] = get_vrs<u8[16]>(op.ra, op.rb); set_vr(op.rt, absd(a, b)); } void ROT(spu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.ra, op.rb); set_vr(op.rt, rol(a, b)); } void ROTM(spu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.ra, op.rb); auto minusb = eval(-b); if (auto [ok, x] = match_expr(b, -match<u32[4]>()); ok) { minusb = eval(x); } if (auto k = get_known_bits(minusb); !!(k.Zero & 32)) { set_vr(op.rt, a >> (minusb & 31)); return; } set_vr(op.rt, inf_lshr(a, minusb & 63)); } void ROTMA(spu_opcode_t op) { const auto [a, b] = get_vrs<s32[4]>(op.ra, op.rb); auto minusb = eval(-b); if (auto [ok, x] = match_expr(b, -match<s32[4]>()); ok) { minusb = eval(x); } if (auto k = get_known_bits(minusb); !!(k.Zero & 32)) { set_vr(op.rt, a >> (minusb & 31)); return; } set_vr(op.rt, inf_ashr(a, minusb & 63)); } void SHL(spu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.ra, op.rb); if (auto k = get_known_bits(b); !!(k.Zero & 32)) { set_vr(op.rt, a << (b & 31)); return; } set_vr(op.rt, inf_shl(a, b & 63)); } void ROTH(spu_opcode_t op) { const auto [a, b] = get_vrs<u16[8]>(op.ra, op.rb); set_vr(op.rt, rol(a, b)); } void ROTHM(spu_opcode_t op) { const auto [a, b] = get_vrs<u16[8]>(op.ra, op.rb); auto minusb = eval(-b); if (auto [ok, x] = match_expr(b, -match<u16[8]>()); ok) { minusb = eval(x); } if (auto k = get_known_bits(minusb); !!(k.Zero & 16)) { set_vr(op.rt, a >> (minusb & 15)); return; } set_vr(op.rt, inf_lshr(a, minusb & 31)); } void ROTMAH(spu_opcode_t op) { const auto [a, b] = get_vrs<s16[8]>(op.ra, op.rb); auto minusb = eval(-b); if (auto [ok, x] = match_expr(b, -match<s16[8]>()); ok) { minusb = eval(x); } if (auto k = get_known_bits(minusb); !!(k.Zero & 16)) { set_vr(op.rt, a >> (minusb & 15)); return; } set_vr(op.rt, inf_ashr(a, minusb & 31)); } void SHLH(spu_opcode_t op) { const auto [a, b] = get_vrs<u16[8]>(op.ra, op.rb); if (auto k = get_known_bits(b); !!(k.Zero & 16)) { set_vr(op.rt, a << (b & 15)); return; } set_vr(op.rt, inf_shl(a, b & 31)); } void ROTI(spu_opcode_t op) { const auto a = get_vr<u32[4]>(op.ra); const auto i = get_imm<u32[4]>(op.i7, false); set_vr(op.rt, rol(a, i)); } void ROTMI(spu_opcode_t op) { const auto a = get_vr<u32[4]>(op.ra); const auto i = get_imm<u32[4]>(op.i7, false); set_vr(op.rt, inf_lshr(a, -i & 63)); } void ROTMAI(spu_opcode_t op) { const auto a = get_vr<s32[4]>(op.ra); const auto i = get_imm<s32[4]>(op.i7, false); set_vr(op.rt, inf_ashr(a, -i & 63)); } void SHLI(spu_opcode_t op) { const auto a = get_vr<u32[4]>(op.ra); const auto i = get_imm<u32[4]>(op.i7, false); set_vr(op.rt, inf_shl(a, i & 63)); } void ROTHI(spu_opcode_t op) { const auto a = get_vr<u16[8]>(op.ra); const auto i = get_imm<u16[8]>(op.i7, false); set_vr(op.rt, rol(a, i)); } void ROTHMI(spu_opcode_t op) { const auto a = get_vr<u16[8]>(op.ra); const auto i = get_imm<u16[8]>(op.i7, false); set_vr(op.rt, inf_lshr(a, -i & 31)); } void ROTMAHI(spu_opcode_t op) { const auto a = get_vr<s16[8]>(op.ra); const auto i = get_imm<s16[8]>(op.i7, false); set_vr(op.rt, inf_ashr(a, -i & 31)); } void SHLHI(spu_opcode_t op) { const auto a = get_vr<u16[8]>(op.ra); const auto i = get_imm<u16[8]>(op.i7, false); set_vr(op.rt, inf_shl(a, i & 31)); } void A(spu_opcode_t op) { if (auto [a, b] = match_vrs<u32[4]>(op.ra, op.rb); a && b) { static const auto MP = match<u32[4]>(); if (auto [ok, a0, b0, b1, a1] = match_expr(a, mpyh(MP, MP) + mpyh(MP, MP)); ok) { if (auto [ok, a2, b2] = match_expr(b, mpyu(MP, MP)); ok && a2.eq(a0, a1) && b2.eq(b0, b1)) { // 32-bit multiplication spu_log.notice("mpy32 in %s at 0x%05x", m_hash, m_pos); set_vr(op.rt, a0 * b0); return; } } } set_vr(op.rt, get_vr(op.ra) + get_vr(op.rb)); } void AND(spu_opcode_t op) { if (match_vr<u8[16], u16[8], u64[2]>(op.ra, [&](auto a, auto /*MP1*/) { if (auto b = match_vr_as(a, op.rb)) { set_vr(op.rt, a & b); return true; } return match_vr<u8[16], u16[8], u64[2]>(op.rb, [&](auto /*b*/, auto /*MP2*/) { set_vr(op.rt, a & get_vr_as(a, op.rb)); return true; }); })) { return; } set_vr(op.rt, get_vr(op.ra) & get_vr(op.rb)); } void CG(spu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.ra, op.rb); set_vr(op.rt, zext<u32[4]>(a + b < a)); } void AH(spu_opcode_t op) { set_vr(op.rt, get_vr<u16[8]>(op.ra) + get_vr<u16[8]>(op.rb)); } void NAND(spu_opcode_t op) { set_vr(op.rt, ~(get_vr(op.ra) & get_vr(op.rb))); } void AVGB(spu_opcode_t op) { set_vr(op.rt, avg(get_vr<u8[16]>(op.ra), get_vr<u8[16]>(op.rb))); } void GB(spu_opcode_t op) { // GFNI trick to extract selected bit from bytes // By treating the first input as constant, and the second input as variable, // with only 1 bit set in our constant, gf2p8affineqb will extract that selected bit // from each byte of the second operand if (m_use_gfni) { const auto a = get_vr<u8[16]>(op.ra); const auto as = zshuffle(a, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 12, 8, 4, 0); set_vr(op.rt, gf2p8affineqb(build<u8[16]>(0x0, 0x0, 0x0, 0x0, 0x01, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x01, 0x0, 0x0, 0x0), as, 0x0)); return; } const auto a = get_vr<s32[4]>(op.ra); const auto m = zext<u32>(bitcast<i4>(trunc<bool[4]>(a))); set_vr(op.rt, insert(splat<u32[4]>(0), 3, eval(m))); } void GBH(spu_opcode_t op) { if (m_use_gfni) { const auto a = get_vr<u8[16]>(op.ra); const auto as = zshuffle(a, 16, 16, 16, 16, 16, 16, 16, 16, 14, 12, 10, 8, 6, 4, 2, 0); set_vr(op.rt, gf2p8affineqb(build<u8[16]>(0x0, 0x0, 0x0, 0x0, 0x01, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x01, 0x0, 0x0, 0x0), as, 0x0)); return; } const auto a = get_vr<s16[8]>(op.ra); const auto m = zext<u32>(bitcast<u8>(trunc<bool[8]>(a))); set_vr(op.rt, insert(splat<u32[4]>(0), 3, eval(m))); } void GBB(spu_opcode_t op) { const auto a = get_vr<u8[16]>(op.ra); if (m_use_gfni) { const auto as = zshuffle(a, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); const auto m = gf2p8affineqb(build<u8[16]>(0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x01, 0x01, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0), as, 0x0); set_vr(op.rt, zshuffle(m, 16, 16, 16, 16, 16, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)); return; } const auto m = zext<u32>(bitcast<u16>(trunc<bool[16]>(a))); set_vr(op.rt, insert(splat<u32[4]>(0), 3, eval(m))); } void FSM(spu_opcode_t op) { // FSM following a comparison instruction if (match_vr<s8[16], s16[8], s32[4], s64[2]>(op.ra, [&](auto c, auto MP) { using VT = typename decltype(MP)::type; if (auto [ok, x] = match_expr(c, sext<VT>(match<bool[std::extent_v<VT>]>())); ok) { set_vr(op.rt, (splat_scalar(c))); return true; } return false; })) { return; } const auto v = extract(get_vr(op.ra), 3); const auto m = bitcast<bool[4]>(trunc<i4>(v)); set_vr(op.rt, sext<s32[4]>(m)); } void FSMH(spu_opcode_t op) { const auto v = extract(get_vr(op.ra), 3); const auto m = bitcast<bool[8]>(trunc<u8>(v)); set_vr(op.rt, sext<s16[8]>(m)); } void FSMB(spu_opcode_t op) { const auto v = extract(get_vr(op.ra), 3); const auto m = bitcast<bool[16]>(trunc<u16>(v)); set_vr(op.rt, sext<s8[16]>(m)); } template <typename TA> static auto byteswap(TA&& a) { return zshuffle(std::forward<TA>(a), 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); } void ROTQBYBI(spu_opcode_t op) { const auto a = get_vr<u8[16]>(op.ra); // Data with swapped endian from a load instruction if (auto [ok, as] = match_expr(a, byteswap(match<u8[16]>())); ok) { const auto sc = build<u8[16]>(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); const auto sh = sc + (splat_scalar(get_vr<u8[16]>(op.rb)) >> 3); if (m_use_avx512_icl) { set_vr(op.rt, vpermb(as, sh)); return; } set_vr(op.rt, pshufb(as, (sh & 0xf))); return; } const auto sc = build<u8[16]>(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); const auto sh = sc - (splat_scalar(get_vr<u8[16]>(op.rb)) >> 3); if (m_use_avx512_icl) { set_vr(op.rt, vpermb(a, sh)); return; } set_vr(op.rt, pshufb(a, (sh & 0xf))); } void ROTQMBYBI(spu_opcode_t op) { const auto a = get_vr<u8[16]>(op.ra); const auto b = get_vr<s32[4]>(op.rb); auto minusb = eval(-(b >> 3)); if (auto [ok, v0, v1] = match_expr(b, match<s32[4]>() - match<s32[4]>()); ok) { if (auto [ok1, data] = get_const_vector(v0.value, m_pos); ok1) { if (data == v128::from32p(7)) { minusb = eval(v1 >> 3); } } } const auto minusbx = eval(bitcast<u8[16]>(minusb) & 0x1f); // Data with swapped endian from a load instruction if (auto [ok, as] = match_expr(a, byteswap(match<u8[16]>())); ok) { const auto sc = build<u8[16]>(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); const auto sh = sc - splat_scalar(minusbx); set_vr(op.rt, pshufb(as, sh)); return; } const auto sc = build<u8[16]>(112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127); const auto sh = sc + splat_scalar(minusbx); set_vr(op.rt, pshufb(a, sh)); } void SHLQBYBI(spu_opcode_t op) { const auto a = get_vr<u8[16]>(op.ra); const auto b = get_vr<u8[16]>(op.rb); // Data with swapped endian from a load instruction if (auto [ok, as] = match_expr(a, byteswap(match<u8[16]>())); ok) { const auto sc = build<u8[16]>(127, 126, 125, 124, 123, 122, 121, 120, 119, 118, 117, 116, 115, 114, 113, 112); const auto sh = sc + (splat_scalar(b) >> 3); set_vr(op.rt, pshufb(as, sh)); return; } const auto sc = build<u8[16]>(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); const auto sh = sc - (splat_scalar(b) >> 3); set_vr(op.rt, pshufb(a, sh)); } template <typename RT, typename T> auto spu_get_insertion_shuffle_mask(T&& index) { const auto c = bitcast<RT>(build<u8[16]>(0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10)); using e_type = std::remove_extent_t<RT>; const auto v = splat<e_type>(static_cast<e_type>(sizeof(e_type) == 8 ? 0x01020304050607ull : 0x010203ull)); return insert(c, std::forward<T>(index), v); } void CBX(spu_opcode_t op) { if (m_finfo && m_finfo->fn && op.ra == s_reg_sp) { // Optimization with aligned stack assumption. Strange because SPU code could use CBD instead, but encountered in wild. set_vr(op.rt, spu_get_insertion_shuffle_mask<u8[16]>(~get_scalar(get_vr(op.rb)) & 0xf)); return; } const auto s = get_scalar(get_vr(op.ra)) + get_scalar(get_vr(op.rb)); set_vr(op.rt, spu_get_insertion_shuffle_mask<u8[16]>(~s & 0xf)); } void CHX(spu_opcode_t op) { if (m_finfo && m_finfo->fn && op.ra == s_reg_sp) { // See CBX. set_vr(op.rt, spu_get_insertion_shuffle_mask<u16[8]>(~get_scalar(get_vr(op.rb)) >> 1 & 0x7)); return; } const auto s = get_scalar(get_vr(op.ra)) + get_scalar(get_vr(op.rb)); set_vr(op.rt, spu_get_insertion_shuffle_mask<u16[8]>(~s >> 1 & 0x7)); } void CWX(spu_opcode_t op) { if (m_finfo && m_finfo->fn && op.ra == s_reg_sp) { // See CBX. set_vr(op.rt, spu_get_insertion_shuffle_mask<u32[4]>(~get_scalar(get_vr(op.rb)) >> 2 & 0x3)); return; } const auto s = get_scalar(get_vr(op.ra)) + get_scalar(get_vr(op.rb)); set_vr(op.rt, spu_get_insertion_shuffle_mask<u32[4]>(~s >> 2 & 0x3)); } void CDX(spu_opcode_t op) { if (m_finfo && m_finfo->fn && op.ra == s_reg_sp) { // See CBX. set_vr(op.rt, spu_get_insertion_shuffle_mask<u64[2]>(~get_scalar(get_vr(op.rb)) >> 3 & 0x1)); return; } const auto s = get_scalar(get_vr(op.ra)) + get_scalar(get_vr(op.rb)); set_vr(op.rt, spu_get_insertion_shuffle_mask<u64[2]>(~s >> 3 & 0x1)); } void ROTQBI(spu_opcode_t op) { const auto a = get_vr(op.ra); const auto b = splat_scalar(get_vr(op.rb) & 0x7); set_vr(op.rt, fshl(a, zshuffle(a, 3, 0, 1, 2), b)); } void ROTQMBI(spu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.ra, op.rb); auto minusb = eval(-b); if (auto [ok, x] = match_expr(b, -match<u32[4]>()); ok) { minusb = eval(x); } const auto bx = splat_scalar(minusb) & 0x7; set_vr(op.rt, fshr(zshuffle(a, 1, 2, 3, 4), a, bx)); } void SHLQBI(spu_opcode_t op) { const auto a = get_vr(op.ra); const auto b = splat_scalar(get_vr(op.rb) & 0x7); set_vr(op.rt, fshl(a, zshuffle(a, 4, 0, 1, 2), b)); } #if defined(ARCH_X64) static __m128i exec_rotqby(__m128i a, u8 b) { alignas(32) const __m128i buf[2]{a, a}; return _mm_loadu_si128(reinterpret_cast<const __m128i*>(reinterpret_cast<const u8*>(buf) + (16 - (b & 0xf)))); } #elif defined(ARCH_ARM64) #else #error "Unimplemented" #endif void ROTQBY(spu_opcode_t op) { const auto a = get_vr<u8[16]>(op.ra); const auto b = get_vr<u8[16]>(op.rb); #if defined(ARCH_X64) if (!m_use_ssse3) { value_t<u8[16]> r; r.value = call<u8[16]>("spu_rotqby", &exec_rotqby, a.value, eval(extract(b, 12)).value); set_vr(op.rt, r); return; } #endif // Data with swapped endian from a load instruction if (auto [ok, as] = match_expr(a, byteswap(match<u8[16]>())); ok) { const auto sc = build<u8[16]>(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); const auto sh = eval(sc + splat_scalar(b)); if (m_use_avx512_icl) { set_vr(op.rt, vpermb(as, sh)); return; } set_vr(op.rt, pshufb(as, (sh & 0xf))); return; } const auto sc = build<u8[16]>(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); const auto sh = eval(sc - splat_scalar(b)); if (m_use_avx512_icl) { set_vr(op.rt, vpermb(a, sh)); return; } set_vr(op.rt, pshufb(a, (sh & 0xf))); } void ROTQMBY(spu_opcode_t op) { const auto a = get_vr<u8[16]>(op.ra); const auto b = get_vr<u32[4]>(op.rb); auto minusb = eval(-b); if (auto [ok, x] = match_expr(b, -match<u32[4]>()); ok) { minusb = eval(x); } const auto minusbx = bitcast<u8[16]>(minusb); // Data with swapped endian from a load instruction if (auto [ok, as] = match_expr(a, byteswap(match<u8[16]>())); ok) { const auto sc = build<u8[16]>(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); const auto sh = sc - (splat_scalar(minusbx) & 0x1f); set_vr(op.rt, pshufb(as, sh)); return; } const auto sc = build<u8[16]>(112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127); const auto sh = sc + (splat_scalar(minusbx) & 0x1f); set_vr(op.rt, pshufb(a, sh)); } void SHLQBY(spu_opcode_t op) { const auto a = get_vr<u8[16]>(op.ra); const auto b = get_vr<u8[16]>(op.rb); // Data with swapped endian from a load instruction if (auto [ok, as] = match_expr(a, byteswap(match<u8[16]>())); ok) { const auto sc = build<u8[16]>(127, 126, 125, 124, 123, 122, 121, 120, 119, 118, 117, 116, 115, 114, 113, 112); const auto sh = sc + (splat_scalar(b) & 0x1f); set_vr(op.rt, pshufb(as, sh)); return; } const auto sc = build<u8[16]>(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); const auto sh = sc - (splat_scalar(b) & 0x1f); set_vr(op.rt, pshufb(a, sh)); } template <typename T> static llvm_calli<u32[4], T> orx(T&& a) { return {"spu_orx", {std::forward<T>(a)}}; } void ORX(spu_opcode_t op) { register_intrinsic("spu_orx", [&](llvm::CallInst* ci) { const auto a = value<u32[4]>(ci->getOperand(0)); const auto x = zshuffle(a, 2, 3, 0, 1) | a; const auto y = zshuffle(x, 1, 0, 3, 2) | x; return zshuffle(y, 4, 4, 4, 3); }); set_vr(op.rt, orx(get_vr(op.ra))); } void CBD(spu_opcode_t op) { if (m_finfo && m_finfo->fn && op.ra == s_reg_sp) { // Known constant with aligned stack assumption (optimization). set_vr(op.rt, spu_get_insertion_shuffle_mask<u8[16]>(~get_imm<u32>(op.i7) & 0xf)); return; } const auto a = get_scalar(get_vr(op.ra)) + get_imm<u32>(op.i7); set_vr(op.rt, spu_get_insertion_shuffle_mask<u8[16]>(~a & 0xf)); } void CHD(spu_opcode_t op) { if (m_finfo && m_finfo->fn && op.ra == s_reg_sp) { // See CBD. set_vr(op.rt, spu_get_insertion_shuffle_mask<u16[8]>(~get_imm<u32>(op.i7) >> 1 & 0x7)); return; } const auto a = get_scalar(get_vr(op.ra)) + get_imm<u32>(op.i7); set_vr(op.rt, spu_get_insertion_shuffle_mask<u16[8]>(~a >> 1 & 0x7)); } void CWD(spu_opcode_t op) { if (m_finfo && m_finfo->fn && op.ra == s_reg_sp) { // See CBD. set_vr(op.rt, spu_get_insertion_shuffle_mask<u32[4]>(~get_imm<u32>(op.i7) >> 2 & 0x3)); return; } const auto a = get_scalar(get_vr(op.ra)) + get_imm<u32>(op.i7); set_vr(op.rt, spu_get_insertion_shuffle_mask<u32[4]>(~a >> 2 & 0x3)); } void CDD(spu_opcode_t op) { if (m_finfo && m_finfo->fn && op.ra == s_reg_sp) { // See CBD. set_vr(op.rt, spu_get_insertion_shuffle_mask<u64[2]>(~get_imm<u32>(op.i7) >> 3 & 0x1)); return; } const auto a = get_scalar(get_vr(op.ra)) + get_imm<u32>(op.i7); set_vr(op.rt, spu_get_insertion_shuffle_mask<u64[2]>(~a >> 3 & 0x1)); } void ROTQBII(spu_opcode_t op) { const auto a = get_vr(op.ra); const auto b = eval(get_imm(op.i7, false) & 0x7); set_vr(op.rt, fshl(a, zshuffle(a, 3, 0, 1, 2), b)); } void ROTQMBII(spu_opcode_t op) { const auto a = get_vr(op.ra); const auto b = eval(-get_imm(op.i7, false) & 0x7); set_vr(op.rt, fshr(zshuffle(a, 1, 2, 3, 4), a, b)); } void SHLQBII(spu_opcode_t op) { const auto a = get_vr(op.ra); const auto b = eval(get_imm(op.i7, false) & 0x7); set_vr(op.rt, fshl(a, zshuffle(a, 4, 0, 1, 2), b)); } void ROTQBYI(spu_opcode_t op) { const auto a = get_vr<u8[16]>(op.ra); const auto sc = build<u8[16]>(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); const auto sh = (sc - get_imm<u8[16]>(op.i7, false)) & 0xf; set_vr(op.rt, pshufb(a, sh)); } void ROTQMBYI(spu_opcode_t op) { const auto a = get_vr<u8[16]>(op.ra); const auto sc = build<u8[16]>(112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127); const auto sh = sc + (-get_imm<u8[16]>(op.i7, false) & 0x1f); set_vr(op.rt, pshufb(a, sh)); } void SHLQBYI(spu_opcode_t op) { if (get_reg_raw(op.ra) && !op.i7) return set_reg_fixed(op.rt, get_reg_raw(op.ra), false); // For expressions matching const auto a = get_vr<u8[16]>(op.ra); const auto sc = build<u8[16]>(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); const auto sh = sc - (get_imm<u8[16]>(op.i7, false) & 0x1f); set_vr(op.rt, pshufb(a, sh)); } void CGT(spu_opcode_t op) { set_vr(op.rt, sext<s32[4]>(get_vr<s32[4]>(op.ra) > get_vr<s32[4]>(op.rb))); } void XOR(spu_opcode_t op) { set_vr(op.rt, get_vr(op.ra) ^ get_vr(op.rb)); } void CGTH(spu_opcode_t op) { set_vr(op.rt, sext<s16[8]>(get_vr<s16[8]>(op.ra) > get_vr<s16[8]>(op.rb))); } void EQV(spu_opcode_t op) { set_vr(op.rt, ~(get_vr(op.ra) ^ get_vr(op.rb))); } void CGTB(spu_opcode_t op) { set_vr(op.rt, sext<s8[16]>(get_vr<s8[16]>(op.ra) > get_vr<s8[16]>(op.rb))); } void SUMB(spu_opcode_t op) { if (m_use_avx512) { const auto [a, b] = get_vrs<u8[16]>(op.ra, op.rb); const auto zeroes = splat<u8[16]>(0); if (op.ra == op.rb && !m_interp_magn) { set_vr(op.rt, vdbpsadbw(a, zeroes, 0)); return; } const auto ax = vdbpsadbw(a, zeroes, 0); const auto bx = vdbpsadbw(b, zeroes, 0); set_vr(op.rt, shuffle2(ax, bx, 0, 9, 2, 11, 4, 13, 6, 15)); return; } if (m_use_vnni) { const auto [a, b] = get_vrs<u32[4]>(op.ra, op.rb); const auto zeroes = splat<u32[4]>(0); const auto ones = splat<u32[4]>(0x01010101); const auto ax = bitcast<u16[8]>(vpdpbusd(zeroes, a, ones)); const auto bx = bitcast<u16[8]>(vpdpbusd(zeroes, b, ones)); set_vr(op.rt, shuffle2(ax, bx, 0, 8, 2, 10, 4, 12, 6, 14)); return; } const auto [a, b] = get_vrs<u16[8]>(op.ra, op.rb); const auto ahs = eval((a >> 8) + (a & 0xff)); const auto bhs = eval((b >> 8) + (b & 0xff)); const auto lsh = shuffle2(ahs, bhs, 0, 9, 2, 11, 4, 13, 6, 15); const auto hsh = shuffle2(ahs, bhs, 1, 8, 3, 10, 5, 12, 7, 14); set_vr(op.rt, lsh + hsh); } void CLZ(spu_opcode_t op) { set_vr(op.rt, ctlz(get_vr(op.ra))); } void XSWD(spu_opcode_t op) { set_vr(op.rt, get_vr<s64[2]>(op.ra) << 32 >> 32); } void XSHW(spu_opcode_t op) { set_vr(op.rt, get_vr<s32[4]>(op.ra) << 16 >> 16); } void CNTB(spu_opcode_t op) { set_vr(op.rt, ctpop(get_vr<u8[16]>(op.ra))); } void XSBH(spu_opcode_t op) { set_vr(op.rt, get_vr<s16[8]>(op.ra) << 8 >> 8); } void CLGT(spu_opcode_t op) { set_vr(op.rt, sext<s32[4]>(get_vr(op.ra) > get_vr(op.rb))); } void ANDC(spu_opcode_t op) { set_vr(op.rt, get_vr(op.ra) & ~get_vr(op.rb)); } void CLGTH(spu_opcode_t op) { set_vr(op.rt, sext<s16[8]>(get_vr<u16[8]>(op.ra) > get_vr<u16[8]>(op.rb))); } void ORC(spu_opcode_t op) { set_vr(op.rt, get_vr(op.ra) | ~get_vr(op.rb)); } void CLGTB(spu_opcode_t op) { set_vr(op.rt, sext<s8[16]>(get_vr<u8[16]>(op.ra) > get_vr<u8[16]>(op.rb))); } void CEQ(spu_opcode_t op) { set_vr(op.rt, sext<s32[4]>(get_vr(op.ra) == get_vr(op.rb))); } void MPYHHU(spu_opcode_t op) { set_vr(op.rt, (get_vr(op.ra) >> 16) * (get_vr(op.rb) >> 16)); } void ADDX(spu_opcode_t op) { set_vr(op.rt, llvm_sum{get_vr(op.ra), get_vr(op.rb), get_vr(op.rt) & 1}); } void SFX(spu_opcode_t op) { set_vr(op.rt, get_vr(op.rb) - get_vr(op.ra) - (~get_vr(op.rt) & 1)); } void CGX(spu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.ra, op.rb); const auto x = (get_vr<s32[4]>(op.rt) << 31) >> 31; const auto s = eval(a + b); set_vr(op.rt, noncast<u32[4]>(sext<s32[4]>(s < a) | (sext<s32[4]>(s == noncast<u32[4]>(x)) & x)) >> 31); } void BGX(spu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.ra, op.rb); const auto c = get_vr<s32[4]>(op.rt) << 31; set_vr(op.rt, noncast<u32[4]>(sext<s32[4]>(b > a) | (sext<s32[4]>(a == b) & c)) >> 31); } void MPYHHA(spu_opcode_t op) { set_vr(op.rt, (get_vr<s32[4]>(op.ra) >> 16) * (get_vr<s32[4]>(op.rb) >> 16) + get_vr<s32[4]>(op.rt)); } void MPYHHAU(spu_opcode_t op) { set_vr(op.rt, (get_vr(op.ra) >> 16) * (get_vr(op.rb) >> 16) + get_vr(op.rt)); } void MPY(spu_opcode_t op) { set_vr(op.rt, (get_vr<s32[4]>(op.ra) << 16 >> 16) * (get_vr<s32[4]>(op.rb) << 16 >> 16)); } void MPYH(spu_opcode_t op) { set_vr(op.rt, mpyh(get_vr(op.ra), get_vr(op.rb))); } void MPYHH(spu_opcode_t op) { set_vr(op.rt, (get_vr<s32[4]>(op.ra) >> 16) * (get_vr<s32[4]>(op.rb) >> 16)); } void MPYS(spu_opcode_t op) { set_vr(op.rt, (get_vr<s32[4]>(op.ra) << 16 >> 16) * (get_vr<s32[4]>(op.rb) << 16 >> 16) >> 16); } void CEQH(spu_opcode_t op) { set_vr(op.rt, sext<s16[8]>(get_vr<u16[8]>(op.ra) == get_vr<u16[8]>(op.rb))); } void MPYU(spu_opcode_t op) { set_vr(op.rt, mpyu(get_vr(op.ra), get_vr(op.rb))); } void CEQB(spu_opcode_t op) { set_vr(op.rt, sext<s8[16]>(get_vr<u8[16]>(op.ra) == get_vr<u8[16]>(op.rb))); } void FSMBI(spu_opcode_t op) { const auto m = bitcast<bool[16]>(get_imm<u16>(op.i16)); set_vr(op.rt, sext<s8[16]>(m)); } void IL(spu_opcode_t op) { set_vr(op.rt, get_imm<s32[4]>(op.si16)); } void ILHU(spu_opcode_t op) { set_vr(op.rt, get_imm<u32[4]>(op.i16) << 16); } void ILH(spu_opcode_t op) { set_vr(op.rt, get_imm<u16[8]>(op.i16)); } void IOHL(spu_opcode_t op) { set_vr(op.rt, get_vr(op.rt) | get_imm(op.i16)); } void ORI(spu_opcode_t op) { if (get_reg_raw(op.ra) && !op.si10) return set_reg_fixed(op.rt, get_reg_raw(op.ra), false); // For expressions matching set_vr(op.rt, get_vr<s32[4]>(op.ra) | get_imm<s32[4]>(op.si10)); } void ORHI(spu_opcode_t op) { if (get_reg_raw(op.ra) && !op.si10) return set_reg_fixed(op.rt, get_reg_raw(op.ra), false); set_vr(op.rt, get_vr<s16[8]>(op.ra) | get_imm<s16[8]>(op.si10)); } void ORBI(spu_opcode_t op) { if (get_reg_raw(op.ra) && !op.si10) return set_reg_fixed(op.rt, get_reg_raw(op.ra), false); set_vr(op.rt, get_vr<s8[16]>(op.ra) | get_imm<s8[16]>(op.si10)); } void SFI(spu_opcode_t op) { set_vr(op.rt, get_imm<s32[4]>(op.si10) - get_vr<s32[4]>(op.ra)); } void SFHI(spu_opcode_t op) { set_vr(op.rt, get_imm<s16[8]>(op.si10) - get_vr<s16[8]>(op.ra)); } void ANDI(spu_opcode_t op) { if (get_reg_raw(op.ra) && op.si10 == -1) return set_reg_fixed(op.rt, get_reg_raw(op.ra), false); set_vr(op.rt, get_vr<s32[4]>(op.ra) & get_imm<s32[4]>(op.si10)); } void ANDHI(spu_opcode_t op) { if (get_reg_raw(op.ra) && op.si10 == -1) return set_reg_fixed(op.rt, get_reg_raw(op.ra), false); set_vr(op.rt, get_vr<s16[8]>(op.ra) & get_imm<s16[8]>(op.si10)); } void ANDBI(spu_opcode_t op) { if (get_reg_raw(op.ra) && static_cast<s8>(op.si10) == -1) return set_reg_fixed(op.rt, get_reg_raw(op.ra), false); set_vr(op.rt, get_vr<s8[16]>(op.ra) & get_imm<s8[16]>(op.si10)); } void AI(spu_opcode_t op) { if (get_reg_raw(op.ra) && !op.si10) return set_reg_fixed(op.rt, get_reg_raw(op.ra), false); set_vr(op.rt, get_vr<s32[4]>(op.ra) + get_imm<s32[4]>(op.si10)); } void AHI(spu_opcode_t op) { if (get_reg_raw(op.ra) && !op.si10) return set_reg_fixed(op.rt, get_reg_raw(op.ra), false); set_vr(op.rt, get_vr<s16[8]>(op.ra) + get_imm<s16[8]>(op.si10)); } void XORI(spu_opcode_t op) { if (get_reg_raw(op.ra) && !op.si10) return set_reg_fixed(op.rt, get_reg_raw(op.ra), false); set_vr(op.rt, get_vr<s32[4]>(op.ra) ^ get_imm<s32[4]>(op.si10)); } void XORHI(spu_opcode_t op) { if (get_reg_raw(op.ra) && !op.si10) return set_reg_fixed(op.rt, get_reg_raw(op.ra), false); set_vr(op.rt, get_vr<s16[8]>(op.ra) ^ get_imm<s16[8]>(op.si10)); } void XORBI(spu_opcode_t op) { if (get_reg_raw(op.ra) && !op.si10) return set_reg_fixed(op.rt, get_reg_raw(op.ra), false); set_vr(op.rt, get_vr<s8[16]>(op.ra) ^ get_imm<s8[16]>(op.si10)); } void CGTI(spu_opcode_t op) { set_vr(op.rt, sext<s32[4]>(get_vr<s32[4]>(op.ra) > get_imm<s32[4]>(op.si10))); } void CGTHI(spu_opcode_t op) { set_vr(op.rt, sext<s16[8]>(get_vr<s16[8]>(op.ra) > get_imm<s16[8]>(op.si10))); } void CGTBI(spu_opcode_t op) { set_vr(op.rt, sext<s8[16]>(get_vr<s8[16]>(op.ra) > get_imm<s8[16]>(op.si10))); } void CLGTI(spu_opcode_t op) { set_vr(op.rt, sext<s32[4]>(get_vr(op.ra) > get_imm(op.si10))); } void CLGTHI(spu_opcode_t op) { set_vr(op.rt, sext<s16[8]>(get_vr<u16[8]>(op.ra) > get_imm<u16[8]>(op.si10))); } void CLGTBI(spu_opcode_t op) { set_vr(op.rt, sext<s8[16]>(get_vr<u8[16]>(op.ra) > get_imm<u8[16]>(op.si10))); } void MPYI(spu_opcode_t op) { set_vr(op.rt, (get_vr<s32[4]>(op.ra) << 16 >> 16) * get_imm<s32[4]>(op.si10)); } void MPYUI(spu_opcode_t op) { set_vr(op.rt, (get_vr(op.ra) << 16 >> 16) * (get_imm(op.si10) & 0xffff)); } void CEQI(spu_opcode_t op) { set_vr(op.rt, sext<s32[4]>(get_vr(op.ra) == get_imm(op.si10))); } void CEQHI(spu_opcode_t op) { set_vr(op.rt, sext<s16[8]>(get_vr<u16[8]>(op.ra) == get_imm<u16[8]>(op.si10))); } void CEQBI(spu_opcode_t op) { set_vr(op.rt, sext<s8[16]>(get_vr<u8[16]>(op.ra) == get_imm<u8[16]>(op.si10))); } void ILA(spu_opcode_t op) { set_vr(op.rt, get_imm(op.i18)); } void SELB(spu_opcode_t op) { if (match_vr<s8[16], s16[8], s32[4], s64[2]>(op.rc, [&](auto c, auto MP) { using VT = typename decltype(MP)::type; // If the control mask comes from a comparison instruction, replace SELB with select if (auto [ok, x] = match_expr(c, sext<VT>(match<bool[std::extent_v<VT>]>())); ok) { if constexpr (std::extent_v<VT> == 2) // u64[2] { // Try to select floats as floats if a OR b is typed as f64[2] if (auto [a, b] = match_vrs<f64[2]>(op.ra, op.rb); a || b) { set_vr(op.rt4, select(x, get_vr<f64[2]>(op.rb), get_vr<f64[2]>(op.ra))); return true; } } if constexpr (std::extent_v<VT> == 4) // u32[4] { // Match division (adjusted) (TODO) if (auto a = match_vr<f32[4]>(op.ra)) { static const auto MT = match<f32[4]>(); if (auto [div_ok, diva, divb] = match_expr(a, MT / MT); div_ok) { if (auto b = match_vr<s32[4]>(op.rb)) { if (auto [add1_ok] = match_expr(b, bitcast<s32[4]>(a) + splat<s32[4]>(1)); add1_ok) { if (auto [fm_ok, a1, b1] = match_expr(x, bitcast<s32[4]>(fm(MT, MT)) > splat<s32[4]>(-1)); fm_ok) { if (auto [fnma_ok] = match_expr(a1, fnms(divb, bitcast<f32[4]>(b), diva)); fnma_ok) { if (fabs(b1).eval(m_ir) == fsplat<f32[4]>(1.0).eval(m_ir)) { set_vr(op.rt4, diva / divb); return true; } if (auto [sel_ok] = match_expr(b1, bitcast<f32[4]>((bitcast<u32[4]>(diva) & 0x80000000) | 0x3f800000)); sel_ok) { set_vr(op.rt4, diva / divb); return true; } } } } } } } if (auto [a, b] = match_vrs<f64[4]>(op.ra, op.rb); a || b) { set_vr(op.rt4, select(x, get_vr<f64[4]>(op.rb), get_vr<f64[4]>(op.ra))); return true; } if (auto [a, b] = match_vrs<f32[4]>(op.ra, op.rb); a || b) { set_vr(op.rt4, select(x, get_vr<f32[4]>(op.rb), get_vr<f32[4]>(op.ra))); return true; } } if (auto [ok, y] = match_expr(x, bitcast<bool[std::extent_v<VT>]>(match<get_int_vt<std::extent_v<VT>>>())); ok) { // Don't ruin FSMB/FSM/FSMH instructions return false; } set_vr(op.rt4, select(x, get_vr<VT>(op.rb), get_vr<VT>(op.ra))); return true; } return false; })) { return; } const auto c = get_vr(op.rc); // Check if the constant mask doesn't require bit granularity if (auto [ok, mask] = get_const_vector(c.value, m_pos); ok) { bool sel_32 = true; for (u32 i = 0; i < 4; i++) { if (mask._u32[i] && mask._u32[i] != 0xFFFFFFFF) { sel_32 = false; break; } } if (sel_32) { if (auto [a, b] = match_vrs<f64[4]>(op.ra, op.rb); a || b) { set_vr(op.rt4, select(noncast<s32[4]>(c) != 0, get_vr<f64[4]>(op.rb), get_vr<f64[4]>(op.ra))); return; } else if (auto [a, b] = match_vrs<f32[4]>(op.ra, op.rb); a || b) { set_vr(op.rt4, select(noncast<s32[4]>(c) != 0, get_vr<f32[4]>(op.rb), get_vr<f32[4]>(op.ra))); return; } set_vr(op.rt4, select(noncast<s32[4]>(c) != 0, get_vr<u32[4]>(op.rb), get_vr<u32[4]>(op.ra))); return; } bool sel_16 = true; for (u32 i = 0; i < 8; i++) { if (mask._u16[i] && mask._u16[i] != 0xFFFF) { sel_16 = false; break; } } if (sel_16) { set_vr(op.rt4, select(bitcast<s16[8]>(c) != 0, get_vr<u16[8]>(op.rb), get_vr<u16[8]>(op.ra))); return; } bool sel_8 = true; for (u32 i = 0; i < 16; i++) { if (mask._u8[i] && mask._u8[i] != 0xFF) { sel_8 = false; break; } } if (sel_8) { set_vr(op.rt4, select(bitcast<s8[16]>(c) != 0,get_vr<u8[16]>(op.rb), get_vr<u8[16]>(op.ra))); return; } } const auto op1 = get_reg_raw(op.rb); const auto op2 = get_reg_raw(op.ra); if ((op1 && op1->getType() == get_type<f64[4]>()) || (op2 && op2->getType() == get_type<f64[4]>())) { // Optimization: keep xfloat values in doubles even if the mask is unpredictable (hard way) const auto c = get_vr<u32[4]>(op.rc); const auto b = get_vr<f64[4]>(op.rb); const auto a = get_vr<f64[4]>(op.ra); const auto m = conv_xfloat_mask(c.value); const auto x = m_ir->CreateAnd(double_as_uint64(b.value), m); const auto y = m_ir->CreateAnd(double_as_uint64(a.value), m_ir->CreateNot(m)); set_reg_fixed(op.rt4, uint64_as_double(m_ir->CreateOr(x, y))); return; } set_vr(op.rt4, (get_vr(op.rb) & c) | (get_vr(op.ra) & ~c)); } void SHUFB(spu_opcode_t op) // { if (match_vr<u8[16], u16[8], u32[4], u64[2]>(op.rc, [&](auto c, auto MP) { using VT = typename decltype(MP)::type; // If the mask comes from a constant generation instruction, replace SHUFB with insert if (auto [ok, i] = match_expr(c, spu_get_insertion_shuffle_mask<VT>(match<u32>())); ok) { set_vr(op.rt4, insert(get_vr<VT>(op.rb), i, get_scalar(get_vr<VT>(op.ra)))); return true; } return false; })) { return; } const auto c = get_vr<u8[16]>(op.rc); if (auto [ok, mask] = get_const_vector(c.value, m_pos); ok) { // Optimization: SHUFB with constant mask if (((mask._u64[0] | mask._u64[1]) & 0xe0e0e0e0e0e0e0e0) == 0) { // Trivial insert or constant shuffle (TODO) static constexpr struct mask_info { u64 i1; u64 i0; decltype(&cpu_translator::get_type<void>) type; u64 extract_from; u64 insert_to; } s_masks[30] { { 0x0311121314151617, 0x18191a1b1c1d1e1f, &cpu_translator::get_type<u8[16]>, 12, 15 }, { 0x1003121314151617, 0x18191a1b1c1d1e1f, &cpu_translator::get_type<u8[16]>, 12, 14 }, { 0x1011031314151617, 0x18191a1b1c1d1e1f, &cpu_translator::get_type<u8[16]>, 12, 13 }, { 0x1011120314151617, 0x18191a1b1c1d1e1f, &cpu_translator::get_type<u8[16]>, 12, 12 }, { 0x1011121303151617, 0x18191a1b1c1d1e1f, &cpu_translator::get_type<u8[16]>, 12, 11 }, { 0x1011121314031617, 0x18191a1b1c1d1e1f, &cpu_translator::get_type<u8[16]>, 12, 10 }, { 0x1011121314150317, 0x18191a1b1c1d1e1f, &cpu_translator::get_type<u8[16]>, 12, 9 }, { 0x1011121314151603, 0x18191a1b1c1d1e1f, &cpu_translator::get_type<u8[16]>, 12, 8 }, { 0x1011121314151617, 0x03191a1b1c1d1e1f, &cpu_translator::get_type<u8[16]>, 12, 7 }, { 0x1011121314151617, 0x18031a1b1c1d1e1f, &cpu_translator::get_type<u8[16]>, 12, 6 }, { 0x1011121314151617, 0x1819031b1c1d1e1f, &cpu_translator::get_type<u8[16]>, 12, 5 }, { 0x1011121314151617, 0x18191a031c1d1e1f, &cpu_translator::get_type<u8[16]>, 12, 4 }, { 0x1011121314151617, 0x18191a1b031d1e1f, &cpu_translator::get_type<u8[16]>, 12, 3 }, { 0x1011121314151617, 0x18191a1b1c031e1f, &cpu_translator::get_type<u8[16]>, 12, 2 }, { 0x1011121314151617, 0x18191a1b1c1d031f, &cpu_translator::get_type<u8[16]>, 12, 1 }, { 0x1011121314151617, 0x18191a1b1c1d1e03, &cpu_translator::get_type<u8[16]>, 12, 0 }, { 0x0203121314151617, 0x18191a1b1c1d1e1f, &cpu_translator::get_type<u16[8]>, 6, 7 }, { 0x1011020314151617, 0x18191a1b1c1d1e1f, &cpu_translator::get_type<u16[8]>, 6, 6 }, { 0x1011121302031617, 0x18191a1b1c1d1e1f, &cpu_translator::get_type<u16[8]>, 6, 5 }, { 0x1011121314150203, 0x18191a1b1c1d1e1f, &cpu_translator::get_type<u16[8]>, 6, 4 }, { 0x1011121314151617, 0x02031a1b1c1d1e1f, &cpu_translator::get_type<u16[8]>, 6, 3 }, { 0x1011121314151617, 0x181902031c1d1e1f, &cpu_translator::get_type<u16[8]>, 6, 2 }, { 0x1011121314151617, 0x18191a1b02031e1f, &cpu_translator::get_type<u16[8]>, 6, 1 }, { 0x1011121314151617, 0x18191a1b1c1d0203, &cpu_translator::get_type<u16[8]>, 6, 0 }, { 0x0001020314151617, 0x18191a1b1c1d1e1f, &cpu_translator::get_type<u32[4]>, 3, 3 }, { 0x1011121300010203, 0x18191a1b1c1d1e1f, &cpu_translator::get_type<u32[4]>, 3, 2 }, { 0x1011121314151617, 0x000102031c1d1e1f, &cpu_translator::get_type<u32[4]>, 3, 1 }, { 0x1011121314151617, 0x18191a1b00010203, &cpu_translator::get_type<u32[4]>, 3, 0 }, { 0x0001020304050607, 0x18191a1b1c1d1e1f, &cpu_translator::get_type<u64[2]>, 1, 1 }, { 0x1011121303151617, 0x0001020304050607, &cpu_translator::get_type<u64[2]>, 1, 0 }, }; // Check important constants from CWD-like constant generation instructions for (const auto& cm : s_masks) { if (mask._u64[0] == cm.i0 && mask._u64[1] == cm.i1) { const auto t = (this->*cm.type)(); const auto a = get_reg_fixed(op.ra, t); const auto b = get_reg_fixed(op.rb, t); const auto e = m_ir->CreateExtractElement(a, cm.extract_from); set_reg_fixed(op.rt4, m_ir->CreateInsertElement(b, e, cm.insert_to)); return; } } } // Adjusted shuffle mask v128 smask = ~mask & v128::from8p(op.ra == op.rb ? 0xf : 0x1f); // Blend mask for encoded constants v128 bmask{}; for (u32 i = 0; i < 16; i++) { if (mask._bytes[i] >= 0xe0) bmask._bytes[i] = 0x80; else if (mask._bytes[i] >= 0xc0) bmask._bytes[i] = 0xff; } const auto a = get_vr<u8[16]>(op.ra); const auto b = get_vr<u8[16]>(op.rb); const auto c = make_const_vector(smask, get_type<u8[16]>()); const auto d = make_const_vector(bmask, get_type<u8[16]>()); llvm::Value* r = d; if ((~mask._u64[0] | ~mask._u64[1]) & 0x8080808080808080) [[likely]] { r = m_ir->CreateShuffleVector(b.value, op.ra == op.rb ? b.value : a.value, m_ir->CreateZExt(c, get_type<u32[16]>())); if ((mask._u64[0] | mask._u64[1]) & 0x8080808080808080) { r = m_ir->CreateSelect(m_ir->CreateICmpSLT(make_const_vector(mask, get_type<u8[16]>()), llvm::ConstantInt::get(get_type<u8[16]>(), 0)), d, r); } } set_reg_fixed(op.rt4, r); return; } // Check whether shuffle mask doesn't contain fixed value selectors bool perm_only = false; if (auto k = get_known_bits(c); !!(k.Zero & 0x80)) { perm_only = true; } const auto a = get_vr<u8[16]>(op.ra); const auto b = get_vr<u8[16]>(op.rb); // Data with swapped endian from a load instruction if (auto [ok, as] = match_expr(a, byteswap(match<u8[16]>())); ok) { if (auto [ok, bs] = match_expr(b, byteswap(match<u8[16]>())); ok) { // Undo endian swapping, and rely on pshufb/vperm2b to re-reverse endianness if (m_use_avx512_icl && (op.ra != op.rb)) { if (perm_only) { set_vr(op.rt4, vperm2b(as, bs, c)); return; } const auto m = gf2p8affineqb(c, build<u8[16]>(0x40, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x40, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20), 0x7f); const auto mm = select(noncast<s8[16]>(m) >= 0, splat<u8[16]>(0), m); const auto ab = vperm2b(as, bs, c); set_vr(op.rt4, select(noncast<s8[16]>(c) >= 0, ab, mm)); return; } const auto x = pshufb(build<u8[16]>(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x80, 0x80), (c >> 4)); const auto ax = pshufb(as, c); const auto bx = pshufb(bs, c); if (perm_only) set_vr(op.rt4, select_by_bit4(c, ax, bx)); else set_vr(op.rt4, select_by_bit4(c, ax, bx) | x); return; } if (auto [ok, data] = get_const_vector(b.value, m_pos); ok) { if (data == v128::from8p(data._u8[0])) { if (m_use_avx512_icl) { if (perm_only) { set_vr(op.rt4, vperm2b256to128(as, b, c)); return; } const auto m = gf2p8affineqb(c, build<u8[16]>(0x40, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x40, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20), 0x7f); const auto mm = select(noncast<s8[16]>(m) >= 0, splat<u8[16]>(0), m); const auto ab = vperm2b256to128(as, b, c); set_vr(op.rt4, select(noncast<s8[16]>(c) >= 0, ab, mm)); return; } // See above const auto x = pshufb(build<u8[16]>(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x80, 0x80), (c >> 4)); const auto ax = pshufb(as, c); if (perm_only) set_vr(op.rt4, select_by_bit4(c, ax, b)); else set_vr(op.rt4, select_by_bit4(c, ax, b) | x); return; } } } if (auto [ok, bs] = match_expr(b, byteswap(match<u8[16]>())); ok) { if (auto [ok, data] = get_const_vector(a.value, m_pos); ok) { if (data == v128::from8p(data._u8[0])) { // See above const auto x = pshufb(build<u8[16]>(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x80, 0x80), (c >> 4)); const auto bx = pshufb(bs, c); if (perm_only) set_vr(op.rt4, select_by_bit4(c, a, bx)); else set_vr(op.rt4, select_by_bit4(c, a, bx) | x); return; } } } if (m_use_avx512_icl && (op.ra != op.rb || m_interp_magn)) { if (auto [ok, data] = get_const_vector(b.value, m_pos); ok) { if (data == v128::from8p(data._u8[0])) { if (perm_only) { set_vr(op.rt4, vperm2b256to128(a, b, eval(c ^ 0xf))); return; } const auto m = gf2p8affineqb(c, build<u8[16]>(0x40, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x40, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20), 0x7f); const auto mm = select(noncast<s8[16]>(m) >= 0, splat<u8[16]>(0), m); const auto ab = vperm2b256to128(a, b, eval(c ^ 0xf)); set_vr(op.rt4, select(noncast<s8[16]>(c) >= 0, ab, mm)); return; } } if (auto [ok, data] = get_const_vector(a.value, m_pos); ok) { if (data == v128::from8p(data._u8[0])) { if (perm_only) { set_vr(op.rt4, vperm2b256to128(b, a, eval(c ^ 0x1f))); return; } const auto m = gf2p8affineqb(c, build<u8[16]>(0x40, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x40, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20), 0x7f); const auto mm = select(noncast<s8[16]>(m) >= 0, splat<u8[16]>(0), m); const auto ab = vperm2b256to128(b, a, eval(c ^ 0x1f)); set_vr(op.rt4, select(noncast<s8[16]>(c) >= 0, ab, mm)); return; } } if (perm_only) { set_vr(op.rt4, vperm2b(a, b, eval(c ^ 0xf))); return; } const auto m = gf2p8affineqb(c, build<u8[16]>(0x40, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x40, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20), 0x7f); const auto mm = select(noncast<s8[16]>(m) >= 0, splat<u8[16]>(0), m); const auto cr = eval(c ^ 0xf); const auto ab = vperm2b(a, b, cr); set_vr(op.rt4, select(noncast<s8[16]>(c) >= 0, ab, mm)); return; } const auto x = pshufb(build<u8[16]>(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x80, 0x80), (c >> 4)); const auto cr = eval(c ^ 0xf); const auto ax = pshufb(a, cr); const auto bx = pshufb(b, cr); if (perm_only) set_vr(op.rt4, select_by_bit4(cr, ax, bx)); else set_vr(op.rt4, select_by_bit4(cr, ax, bx) | x); } void MPYA(spu_opcode_t op) { set_vr(op.rt4, (get_vr<s32[4]>(op.ra) << 16 >> 16) * (get_vr<s32[4]>(op.rb) << 16 >> 16) + get_vr<s32[4]>(op.rc)); } void FSCRRD(spu_opcode_t op) // { // Hack set_vr(op.rt, splat<u32[4]>(0)); } void FSCRWR(spu_opcode_t /*op*/) // { // Hack } void DFCGT(spu_opcode_t op) // { return UNK(op); } void DFCEQ(spu_opcode_t op) // { return UNK(op); } void DFCMGT(spu_opcode_t op) // { return UNK(op); } void DFCMEQ(spu_opcode_t op) // { return UNK(op); } void DFTSV(spu_opcode_t op) // { return UNK(op); } void DFA(spu_opcode_t op) { set_vr(op.rt, get_vr<f64[2]>(op.ra) + get_vr<f64[2]>(op.rb)); } void DFS(spu_opcode_t op) { set_vr(op.rt, get_vr<f64[2]>(op.ra) - get_vr<f64[2]>(op.rb)); } void DFM(spu_opcode_t op) { set_vr(op.rt, get_vr<f64[2]>(op.ra) * get_vr<f64[2]>(op.rb)); } void DFMA(spu_opcode_t op) { const auto [a, b, c] = get_vrs<f64[2]>(op.ra, op.rb, op.rt); if (g_cfg.core.use_accurate_dfma) set_vr(op.rt, fmuladd(a, b, c, true)); else set_vr(op.rt, a * b + c); } void DFMS(spu_opcode_t op) { const auto [a, b, c] = get_vrs<f64[2]>(op.ra, op.rb, op.rt); if (g_cfg.core.use_accurate_dfma) set_vr(op.rt, fmuladd(a, b, -c, true)); else set_vr(op.rt, a * b - c); } void DFNMS(spu_opcode_t op) { const auto [a, b, c] = get_vrs<f64[2]>(op.ra, op.rb, op.rt); if (g_cfg.core.use_accurate_dfma) set_vr(op.rt, fmuladd(-a, b, c, true)); else set_vr(op.rt, c - (a * b)); } void DFNMA(spu_opcode_t op) { const auto [a, b, c] = get_vrs<f64[2]>(op.ra, op.rb, op.rt); if (g_cfg.core.use_accurate_dfma) set_vr(op.rt, -fmuladd(a, b, c, true)); else set_vr(op.rt, -(a * b + c)); } bool is_input_positive(value_t<f32[4]> a) { if (auto [ok, v0, v1] = match_expr(a, match<f32[4]>() * match<f32[4]>()); ok && v0.eq(v1)) { return true; } return false; } // clamping helpers value_t<f32[4]> clamp_positive_smax(value_t<f32[4]> v) { return eval(bitcast<f32[4]>(min(bitcast<s32[4]>(v),splat<s32[4]>(0x7f7fffff)))); } value_t<f32[4]> clamp_negative_smax(value_t<f32[4]> v) { if (is_input_positive(v)) { return v; } return eval(bitcast<f32[4]>(min(bitcast<u32[4]>(v),splat<u32[4]>(0xff7fffff)))); } value_t<f32[4]> clamp_smax(value_t<f32[4]> v) { if (m_use_avx512) { if (is_input_positive(v)) { return eval(clamp_positive_smax(v)); } if (auto [ok, data] = get_const_vector(v.value, m_pos); ok) { // Avoid pessimation when input is constant return eval(clamp_positive_smax(clamp_negative_smax(v))); } return eval(vrangeps(v, fsplat<f32[4]>(std::bit_cast<f32, u32>(0x7f7fffff)), 0x2, 0xff)); } return eval(clamp_positive_smax(clamp_negative_smax(v))); } // FMA favouring zeros value_t<f32[4]> xmuladd(value_t<f32[4]> a, value_t<f32[4]> b, value_t<f32[4]> c) { const auto ma = eval(sext<s32[4]>(fcmp_uno(a != fsplat<f32[4]>(0.)))); const auto mb = eval(sext<s32[4]>(fcmp_uno(b != fsplat<f32[4]>(0.)))); const auto ca = eval(bitcast<f32[4]>(bitcast<s32[4]>(a) & mb)); const auto cb = eval(bitcast<f32[4]>(bitcast<s32[4]>(b) & ma)); return eval(fmuladd(ca, cb, c)); } // Checks for postive and negative zero, or Denormal (treated as zero) // If sign is +-1 check equality againts all sign bits bool is_spu_float_zero(v128 a, int sign = 0) { for (u32 i = 0; i < 4; i++) { const u32 exponent = a._u32[i] & 0x7f800000u; if (exponent || (sign && (sign >= 0) != (a._s32[i] >= 0))) { // Normalized number return false; } } return true; } template <typename T> static llvm_calli<f32[4], T> frest(T&& a) { return {"spu_frest", {std::forward<T>(a)}}; } void FREST(spu_opcode_t op) { register_intrinsic("spu_frest", [&](llvm::CallInst* ci) { const auto a = bitcast<u32[4]>(value<f32[4]>(ci->getOperand(0))); const auto a_fraction = (a >> splat<u32[4]>(18)) & splat<u32[4]>(0x1F); const auto a_exponent = (a & splat<u32[4]>(0x7F800000u)); const auto r_exponent = sub_sat(build<u16[8]>(0000, 0x7E80, 0000, 0x7E80, 0000, 0x7E80, 0000, 0x7E80), bitcast<u16[8]>(a_exponent)); const auto fix_exponent = select((a_exponent > 0), bitcast<u32[4]>(r_exponent), splat<u32[4]>(0x7F800000u)); const auto a_sign = (a & splat<u32[4]>(0x80000000)); value_t<u32[4]> final_result = eval(splat<u32[4]>(0)); for (u32 i = 0; i < 4; i++) { const auto eval_fraction = eval(extract(a_fraction, i)); value_t<u32> r_fraction = load_const<u32>(m_spu_frest_fraction_lut, eval_fraction); final_result = eval(insert(final_result, i, r_fraction)); } return bitcast<f32[4]>(bitcast<u32[4]>(final_result | bitcast<u32[4]>(fix_exponent) | a_sign)); }); set_vr(op.rt, frest(get_vr<f32[4]>(op.ra))); } template <typename T> static llvm_calli<f32[4], T> frsqest(T&& a) { return {"spu_frsqest", {std::forward<T>(a)}}; } void FRSQEST(spu_opcode_t op) { register_intrinsic("spu_frsqest", [&](llvm::CallInst* ci) { const auto a = bitcast<u32[4]>(value<f32[4]>(ci->getOperand(0))); const auto a_fraction = (a >> splat<u32[4]>(18)) & splat<u32[4]>(0x3F); const auto a_exponent = (a >> splat<u32[4]>(23)) & splat<u32[4]>(0xFF); value_t<u32[4]> final_result = eval(splat<u32[4]>(0)); for (u32 i = 0; i < 4; i++) { const auto eval_fraction = eval(extract(a_fraction, i)); const auto eval_exponent = eval(extract(a_exponent, i)); value_t<u32> r_fraction = load_const<u32>(m_spu_frsqest_fraction_lut, eval_fraction); value_t<u32> r_exponent = load_const<u32>(m_spu_frsqest_exponent_lut, eval_exponent); final_result = eval(insert(final_result, i, eval(r_fraction | r_exponent))); } return bitcast<f32[4]>(final_result); }); set_vr(op.rt, frsqest(get_vr<f32[4]>(op.ra))); } template <typename T, typename U> static llvm_calli<s32[4], T, U> fcgt(T&& a, U&& b) { return {"spu_fcgt", {std::forward<T>(a), std::forward<U>(b)}}; } void FCGT(spu_opcode_t op) { if (g_cfg.core.spu_xfloat_accuracy == xfloat_accuracy::accurate) { set_vr(op.rt, sext<s32[4]>(fcmp_ord(get_vr<f64[4]>(op.ra) > get_vr<f64[4]>(op.rb)))); return; } register_intrinsic("spu_fcgt", [&](llvm::CallInst* ci) { const auto a = value<f32[4]>(ci->getOperand(0)); const auto b = value<f32[4]>(ci->getOperand(1)); const value_t<f32[4]> ab[2]{a, b}; std::bitset<2> safe_int_compare(0); std::bitset<2> safe_nonzero_compare(0); for (u32 i = 0; i < 2; i++) { if (auto [ok, data] = get_const_vector(ab[i].value, m_pos, __LINE__ + i); ok) { safe_int_compare.set(i); safe_nonzero_compare.set(i); for (u32 j = 0; j < 4; j++) { const u32 value = data._u32[j]; const u8 exponent = static_cast<u8>(value >> 23); if (value >= 0x7f7fffffu || !exponent) { // Postive or negative zero, Denormal (treated as zero), Negative constant, or Normalized number with exponent +127 // Cannot used signed integer compare safely // Note: Technically this optimization is accurate for any positive value, but due to the fact that // we don't produce "extended range" values the same way as real hardware, it's not safe to apply // this optimization for values outside of the range of x86 floating point hardware. safe_int_compare.reset(i); if (!exponent) safe_nonzero_compare.reset(i); } } } } if (safe_int_compare.any()) { return eval(sext<s32[4]>(bitcast<s32[4]>(a) > bitcast<s32[4]>(b))); } const auto ai = eval(bitcast<s32[4]>(a)); const auto bi = eval(bitcast<s32[4]>(b)); if (!safe_nonzero_compare.any()) { return eval(sext<s32[4]>(fcmp_uno(a != b) & select((ai & bi) >= 0, ai > bi, ai < bi))); } else { return eval(sext<s32[4]>(select((ai & bi) >= 0, ai > bi, ai < bi))); } }); set_vr(op.rt, fcgt(get_vr<f32[4]>(op.ra), get_vr<f32[4]>(op.rb))); } template <typename T, typename U> static llvm_calli<s32[4], T, U> fcmgt(T&& a, U&& b) { return {"spu_fcmgt", {std::forward<T>(a), std::forward<U>(b)}}; } void FCMGT(spu_opcode_t op) { if (g_cfg.core.spu_xfloat_accuracy == xfloat_accuracy::accurate) { set_vr(op.rt, sext<s32[4]>(fcmp_ord(fabs(get_vr<f64[4]>(op.ra)) > fabs(get_vr<f64[4]>(op.rb))))); return; } register_intrinsic("spu_fcmgt", [&](llvm::CallInst* ci) { const auto a = value<f32[4]>(ci->getOperand(0)); const auto b = value<f32[4]>(ci->getOperand(1)); const value_t<f32[4]> ab[2]{a, b}; std::bitset<2> safe_int_compare(0); for (u32 i = 0; i < 2; i++) { if (auto [ok, data] = get_const_vector(ab[i].value, m_pos, __LINE__ + i); ok) { safe_int_compare.set(i); for (u32 j = 0; j < 4; j++) { const u32 value = data._u32[j]; const u8 exponent = static_cast<u8>(value >> 23); if ((value & 0x7fffffffu) >= 0x7f7fffffu || !exponent) { // See above safe_int_compare.reset(i); } } } } const auto ma = eval(fabs(a)); const auto mb = eval(fabs(b)); const auto mai = eval(bitcast<s32[4]>(ma)); const auto mbi = eval(bitcast<s32[4]>(mb)); if (safe_int_compare.any()) { return eval(sext<s32[4]>(mai > mbi)); } if (g_cfg.core.spu_xfloat_accuracy == xfloat_accuracy::approximate) { return eval(sext<s32[4]>(fcmp_uno(ma > mb) & (mai > mbi))); } else { return eval(sext<s32[4]>(fcmp_ord(ma > mb))); } }); set_vr(op.rt, fcmgt(get_vr<f32[4]>(op.ra), get_vr<f32[4]>(op.rb))); } template <typename T, typename U> static llvm_calli<f32[4], T, U> fa(T&& a, U&& b) { return {"spu_fa", {std::forward<T>(a), std::forward<U>(b)}}; } void FA(spu_opcode_t op) { if (g_cfg.core.spu_xfloat_accuracy == xfloat_accuracy::accurate) { set_vr(op.rt, get_vr<f64[4]>(op.ra) + get_vr<f64[4]>(op.rb)); return; } register_intrinsic("spu_fa", [&](llvm::CallInst* ci) { const auto a = value<f32[4]>(ci->getOperand(0)); const auto b = value<f32[4]>(ci->getOperand(1)); return a + b; }); set_vr(op.rt, fa(get_vr<f32[4]>(op.ra), get_vr<f32[4]>(op.rb))); } template <typename T, typename U> static llvm_calli<f32[4], T, U> fs(T&& a, U&& b) { return {"spu_fs", {std::forward<T>(a), std::forward<U>(b)}}; } void FS(spu_opcode_t op) { if (g_cfg.core.spu_xfloat_accuracy == xfloat_accuracy::accurate) { set_vr(op.rt, get_vr<f64[4]>(op.ra) - get_vr<f64[4]>(op.rb)); return; } register_intrinsic("spu_fs", [&](llvm::CallInst* ci) { const auto a = value<f32[4]>(ci->getOperand(0)); const auto b = value<f32[4]>(ci->getOperand(1)); if (g_cfg.core.spu_xfloat_accuracy == xfloat_accuracy::approximate) { const auto bc = clamp_smax(b); // for #4478 return eval(a - bc); } else { return eval(a - b); } }); set_vr(op.rt, fs(get_vr<f32[4]>(op.ra), get_vr<f32[4]>(op.rb))); } template <typename T, typename U> static llvm_calli<f32[4], T, U> fm(T&& a, U&& b) { return llvm_calli<f32[4], T, U>{"spu_fm", {std::forward<T>(a), std::forward<U>(b)}}.set_order_equality_hint(1, 1); } void FM(spu_opcode_t op) { if (g_cfg.core.spu_xfloat_accuracy == xfloat_accuracy::accurate) { set_vr(op.rt, get_vr<f64[4]>(op.ra) * get_vr<f64[4]>(op.rb)); return; } register_intrinsic("spu_fm", [&](llvm::CallInst* ci) { const auto a = value<f32[4]>(ci->getOperand(0)); const auto b = value<f32[4]>(ci->getOperand(1)); if (g_cfg.core.spu_xfloat_accuracy == xfloat_accuracy::approximate) { if (a.value == b.value) { return eval(a * b); } const auto ma = sext<s32[4]>(fcmp_uno(a != fsplat<f32[4]>(0.))); const auto mb = sext<s32[4]>(fcmp_uno(b != fsplat<f32[4]>(0.))); return eval(bitcast<f32[4]>(bitcast<s32[4]>(a * b) & ma & mb)); } else { return eval(a * b); } }); if (op.ra == op.rb && !m_interp_magn) { const auto a = get_vr<f32[4]>(op.ra); set_vr(op.rt, fm(a, a)); return; } const auto [a, b] = get_vrs<f32[4]>(op.ra, op.rb); // This causes issues in LBP 1(first platform on first temple level doesn't come down when grabbed) // Presumably 1/x might result in Zero/NaN when a/x doesn't if (g_cfg.core.spu_xfloat_accuracy == xfloat_accuracy::relaxed) { auto full_fm_accurate = [&](const auto& a, const auto& div) { const auto div_result = a / div; const auto result_and = bitcast<u32[4]>(div_result) & 0x7fffffffu; const auto result_cmp_inf = sext<s32[4]>(result_and == splat<u32[4]>(0x7F800000u)); const auto result_cmp_nan = sext<s32[4]>(result_and <= splat<u32[4]>(0x7F800000u)); const auto and_mask = bitcast<u32[4]>(result_cmp_nan) & splat<u32[4]>(0xFFFFFFFFu); const auto or_mask = bitcast<u32[4]>(result_cmp_inf) & splat<u32[4]>(0xFFFFFFFu); set_vr(op.rt, bitcast<f32[4]>((bitcast<u32[4]>(div_result) & and_mask) | or_mask)); }; // FM(a, re_accurate(div)) if (const auto [ok_re_acc, div, one] = match_expr(b, re_accurate(match<f32[4]>(), match<f32[4]>())); ok_re_acc) { full_fm_accurate(a, div); erase_stores(one, b); return; } // FM(re_accurate(div), b) if (const auto [ok_re_acc, div, one] = match_expr(a, re_accurate(match<f32[4]>(), match<f32[4]>())); ok_re_acc) { full_fm_accurate(b, div); erase_stores(one, a); return; } } set_vr(op.rt, fm(a, b)); } template <typename T> static llvm_calli<f64[2], T> fesd(T&& a) { return {"spu_fesd", {std::forward<T>(a)}}; } void FESD(spu_opcode_t op) { if (g_cfg.core.spu_xfloat_accuracy == xfloat_accuracy::accurate) { const auto r = zshuffle(get_vr<f64[4]>(op.ra), 1, 3); const auto d = bitcast<s64[2]>(r); const auto a = eval(d & 0x7fffffffffffffff); const auto s = eval(d & 0x8000000000000000); const auto i = select(a == 0x47f0000000000000, eval(s | 0x7ff0000000000000), d); const auto n = select(a > 0x47f0000000000000, splat<s64[2]>(0x7ff8000000000000), i); set_vr(op.rt, bitcast<f64[2]>(n)); return; } register_intrinsic("spu_fesd", [&](llvm::CallInst* ci) { const auto a = value<f32[4]>(ci->getOperand(0)); return fpcast<f64[2]>(zshuffle(a, 1, 3)); }); set_vr(op.rt, fesd(get_vr<f32[4]>(op.ra))); } template <typename T> static llvm_calli<f32[4], T> frds(T&& a) { return {"spu_frds", {std::forward<T>(a)}}; } void FRDS(spu_opcode_t op) { if (g_cfg.core.spu_xfloat_accuracy == xfloat_accuracy::accurate) { const auto r = get_vr<f64[2]>(op.ra); const auto d = bitcast<s64[2]>(r); const auto a = eval(d & 0x7fffffffffffffff); const auto s = eval(d & 0x8000000000000000); const auto i = select(a > 0x47f0000000000000, eval(s | 0x47f0000000000000), d); const auto n = select(a > 0x7ff0000000000000, splat<s64[2]>(0x47f8000000000000), i); const auto z = select(a < 0x3810000000000000, s, n); set_vr(op.rt, zshuffle(bitcast<f64[2]>(z), 2, 0, 3, 1), nullptr, false); return; } register_intrinsic("spu_frds", [&](llvm::CallInst* ci) { const auto a = value<f64[2]>(ci->getOperand(0)); return zshuffle(fpcast<f32[2]>(a), 2, 0, 3, 1); }); set_vr(op.rt, frds(get_vr<f64[2]>(op.ra))); } template <typename T, typename U> static llvm_calli<s32[4], T, U> fceq(T&& a, U&& b) { return {"spu_fceq", {std::forward<T>(a), std::forward<U>(b)}}; } void FCEQ(spu_opcode_t op) { if (g_cfg.core.spu_xfloat_accuracy == xfloat_accuracy::accurate) { set_vr(op.rt, sext<s32[4]>(fcmp_ord(get_vr<f64[4]>(op.ra) == get_vr<f64[4]>(op.rb)))); return; } register_intrinsic("spu_fceq", [&](llvm::CallInst* ci) { const auto a = value<f32[4]>(ci->getOperand(0)); const auto b = value<f32[4]>(ci->getOperand(1)); const value_t<f32[4]> ab[2]{a, b}; std::bitset<2> safe_float_compare(0); std::bitset<2> safe_int_compare(0); for (u32 i = 0; i < 2; i++) { if (auto [ok, data] = get_const_vector(ab[i].value, m_pos, __LINE__ + i); ok) { safe_float_compare.set(i); safe_int_compare.set(i); for (u32 j = 0; j < 4; j++) { const u32 value = data._u32[j]; const u8 exponent = static_cast<u8>(value >> 23); // unsafe if nan if (exponent == 255) { safe_float_compare.reset(i); } // unsafe if denormal or 0 if (!exponent) { safe_int_compare.reset(i); } } } } if (safe_float_compare.any()) { return eval(sext<s32[4]>(fcmp_ord(a == b))); } if (safe_int_compare.any()) { return eval(sext<s32[4]>(bitcast<s32[4]>(a) == bitcast<s32[4]>(b))); } if (g_cfg.core.spu_xfloat_accuracy == xfloat_accuracy::approximate) { return eval(sext<s32[4]>(fcmp_ord(a == b)) | sext<s32[4]>(bitcast<s32[4]>(a) == bitcast<s32[4]>(b))); } else { return eval(sext<s32[4]>(fcmp_ord(a == b))); } }); set_vr(op.rt, fceq(get_vr<f32[4]>(op.ra), get_vr<f32[4]>(op.rb))); } template <typename T, typename U> static llvm_calli<s32[4], T, U> fcmeq(T&& a, U&& b) { return {"spu_fcmeq", {std::forward<T>(a), std::forward<U>(b)}}; } void FCMEQ(spu_opcode_t op) { if (g_cfg.core.spu_xfloat_accuracy == xfloat_accuracy::accurate) { set_vr(op.rt, sext<s32[4]>(fcmp_ord(fabs(get_vr<f64[4]>(op.ra)) == fabs(get_vr<f64[4]>(op.rb))))); return; } register_intrinsic("spu_fcmeq", [&](llvm::CallInst* ci) { const auto a = value<f32[4]>(ci->getOperand(0)); const auto b = value<f32[4]>(ci->getOperand(1)); const value_t<f32[4]> ab[2]{a, b}; std::bitset<2> safe_float_compare(0); std::bitset<2> safe_int_compare(0); for (u32 i = 0; i < 2; i++) { if (auto [ok, data] = get_const_vector(ab[i].value, m_pos, __LINE__ + i); ok) { safe_float_compare.set(i); safe_int_compare.set(i); for (u32 j = 0; j < 4; j++) { const u32 value = data._u32[j]; const u8 exponent = static_cast<u8>(value >> 23); // unsafe if nan if (exponent == 255) { safe_float_compare.reset(i); } // unsafe if denormal or 0 if (!exponent) { safe_int_compare.reset(i); } } } } const auto fa = eval(fabs(a)); const auto fb = eval(fabs(b)); if (safe_float_compare.any()) { return eval(sext<s32[4]>(fcmp_ord(fa == fb))); } if (safe_int_compare.any()) { return eval(sext<s32[4]>(bitcast<s32[4]>(fa) == bitcast<s32[4]>(fb))); } if (g_cfg.core.spu_xfloat_accuracy == xfloat_accuracy::approximate) { return eval(sext<s32[4]>(fcmp_ord(fa == fb)) | sext<s32[4]>(bitcast<s32[4]>(fa) == bitcast<s32[4]>(fb))); } else { return eval(sext<s32[4]>(fcmp_ord(fa == fb))); } }); set_vr(op.rt, fcmeq(get_vr<f32[4]>(op.ra), get_vr<f32[4]>(op.rb))); } value_t<f32[4]> fma32x4(value_t<f32[4]> a, value_t<f32[4]> b, value_t<f32[4]> c) { // Optimization: Emit only a floating multiply if the addend is zero // This is odd since SPU code could just use the FM instruction, but it seems common enough if (auto [ok, data] = get_const_vector(c.value, m_pos); ok) { if (is_spu_float_zero(data, 0)) { return eval(a * b); } } if ([&]() { if (auto [ok, data] = get_const_vector(a.value, m_pos); ok) { if (is_spu_float_zero(data, 0)) { return true; } } if (auto [ok, data] = get_const_vector(b.value, m_pos); ok) { if (is_spu_float_zero(data, 0)) { return true; } } return false; }()) { // Just return the added value if either a or b are +-0 return c; } if (m_use_fma) { return eval(fmuladd(a, b, c, true)); } // Convert to doubles const auto xa = fpcast<f64[4]>(a); const auto xb = fpcast<f64[4]>(b); const auto xc = fpcast<f64[4]>(c); const auto xr = fmuladd(xa, xb, xc, false); return eval(fpcast<f32[4]>(xr)); } template <typename T, typename U, typename V> static llvm_calli<f32[4], T, U, V> fnms(T&& a, U&& b, V&& c) { return llvm_calli<f32[4], T, U, V>{"spu_fnms", {std::forward<T>(a), std::forward<U>(b), std::forward<V>(c)}}.set_order_equality_hint(1, 1, 0); } void FNMS(spu_opcode_t op) { // See FMA. if (g_cfg.core.spu_xfloat_accuracy == xfloat_accuracy::accurate) { const auto [a, b, c] = get_vrs<f64[4]>(op.ra, op.rb, op.rc); set_vr(op.rt4, fmuladd(-a, b, c)); return; } register_intrinsic("spu_fnms", [&](llvm::CallInst* ci) { const auto a = value<f32[4]>(ci->getOperand(0)); const auto b = value<f32[4]>(ci->getOperand(1)); const auto c = value<f32[4]>(ci->getOperand(2)); return fma32x4(eval(-clamp_smax(a)), clamp_smax(b), c); }); set_vr(op.rt4, fnms(get_vr<f32[4]>(op.ra), get_vr<f32[4]>(op.rb), get_vr<f32[4]>(op.rc))); } template <typename T, typename U, typename V> static llvm_calli<f32[4], T, U, V> fma(T&& a, U&& b, V&& c) { return llvm_calli<f32[4], T, U, V>{"spu_fma", {std::forward<T>(a), std::forward<U>(b), std::forward<V>(c)}}.set_order_equality_hint(1, 1, 0); } template <typename T, typename U> static llvm_calli<f32[4], T, U> re_accurate(T&& a, U&& b) { return {"spu_re_acc", {std::forward<T>(a), std::forward<U>(b)}}; } void FMA(spu_opcode_t op) { // Hardware FMA produces the same result as multiple + add on the limited double range (xfloat). if (g_cfg.core.spu_xfloat_accuracy == xfloat_accuracy::accurate) { const auto [a, b, c] = get_vrs<f64[4]>(op.ra, op.rb, op.rc); set_vr(op.rt4, fmuladd(a, b, c)); return; } register_intrinsic("spu_fma", [&](llvm::CallInst* ci) { const auto a = value<f32[4]>(ci->getOperand(0)); const auto b = value<f32[4]>(ci->getOperand(1)); const auto c = value<f32[4]>(ci->getOperand(2)); if (g_cfg.core.spu_xfloat_accuracy == xfloat_accuracy::approximate) { const auto ma = sext<s32[4]>(fcmp_uno(a != fsplat<f32[4]>(0.))); const auto mb = sext<s32[4]>(fcmp_uno(b != fsplat<f32[4]>(0.))); const auto ca = bitcast<f32[4]>(bitcast<s32[4]>(a) & mb); const auto cb = bitcast<f32[4]>(bitcast<s32[4]>(b) & ma); return fma32x4(eval(ca), eval(cb), c); } else { return fma32x4(a, b, c); } }); if (m_use_avx512) { register_intrinsic("spu_re_acc", [&](llvm::CallInst* ci) { const auto div = value<f32[4]>(ci->getOperand(0)); const auto the_one = value<f32[4]>(ci->getOperand(1)); const auto div_result = the_one / div; return vfixupimmps(bitcast<f32[4]>(splat<u32[4]>(0xFFFFFFFFu)), div_result, splat<u32[4]>(0x11001188u), 0, 0xff); }); } else { register_intrinsic("spu_re_acc", [&](llvm::CallInst* ci) { const auto div = value<f32[4]>(ci->getOperand(0)); const auto the_one = value<f32[4]>(ci->getOperand(1)); const auto div_result = the_one / div; // from ps3 hardware testing: Inf => NaN and NaN => Zero const auto result_and = bitcast<u32[4]>(div_result) & 0x7fffffffu; const auto result_cmp_inf = sext<s32[4]>(result_and == splat<u32[4]>(0x7F800000u)); const auto result_cmp_nan = sext<s32[4]>(result_and <= splat<u32[4]>(0x7F800000u)); const auto and_mask = bitcast<u32[4]>(result_cmp_nan) & splat<u32[4]>(0xFFFFFFFFu); const auto or_mask = bitcast<u32[4]>(result_cmp_inf) & splat<u32[4]>(0xFFFFFFFu); return bitcast<f32[4]>((bitcast<u32[4]>(div_result) & and_mask) | or_mask); }); } const auto [a, b, c] = get_vrs<f32[4]>(op.ra, op.rb, op.rc); static const auto MT = match<f32[4]>(); auto check_sqrt_pattern_for_float = [&](f32 float_value) -> bool { auto match_fnms = [&](f32 float_value) { auto res = match_expr(a, fnms(MT, MT, fsplat<f32[4]>(float_value))); if (std::get<0>(res)) return res; return match_expr(b, fnms(MT, MT, fsplat<f32[4]>(float_value))); }; auto match_fm_half = [&]() { auto res = match_expr(a, fm(MT, fsplat<f32[4]>(0.5))); if (std::get<0>(res)) return res; res = match_expr(a, fm(fsplat<f32[4]>(0.5), MT)); if (std::get<0>(res)) return res; res = match_expr(b, fm(MT, fsplat<f32[4]>(0.5))); if (std::get<0>(res)) return res; return match_expr(b, fm(fsplat<f32[4]>(0.5), MT)); }; if (auto [ok_fnma, a1, b1] = match_fnms(float_value); ok_fnma) { if (auto [ok_fm2, fm_half_mul] = match_fm_half(); ok_fm2 && fm_half_mul.eq(b1)) { if (fm_half_mul.eq(b1)) { if (auto [ok_fm1, a3, b3] = match_expr(c, fm(MT, MT)); ok_fm1 && a3.eq(a1)) { if (auto [ok_sqrte, src] = match_expr(a3, spu_rsqrte(MT)); ok_sqrte && src.eq(b3)) { erase_stores(a, b, c, a3); set_vr(op.rt4, fsqrt(fabs(src))); return true; } } else if (auto [ok_fm1, a3, b3] = match_expr(c, fm(MT, MT)); ok_fm1 && b3.eq(a1)) { if (auto [ok_sqrte, src] = match_expr(b3, spu_rsqrte(MT)); ok_sqrte && src.eq(a3)) { erase_stores(a, b, c, b3); set_vr(op.rt4, fsqrt(fabs(src))); return true; } } } else if (fm_half_mul.eq(a1)) { if (auto [ok_fm1, a3, b3] = match_expr(c, fm(MT, MT)); ok_fm1 && a3.eq(b1)) { if (auto [ok_sqrte, src] = match_expr(a3, spu_rsqrte(MT)); ok_sqrte && src.eq(b3)) { erase_stores(a, b, c, a3); set_vr(op.rt4, fsqrt(fabs(src))); return true; } } else if (auto [ok_fm1, a3, b3] = match_expr(c, fm(MT, MT)); ok_fm1 && b3.eq(b1)) { if (auto [ok_sqrte, src] = match_expr(b3, spu_rsqrte(MT)); ok_sqrte && src.eq(a3)) { erase_stores(a, b, c, b3); set_vr(op.rt4, fsqrt(fabs(src))); return true; } } } } } return false; }; if (check_sqrt_pattern_for_float(1.0f)) return; if (check_sqrt_pattern_for_float(std::bit_cast<f32>(std::bit_cast<u32>(1.0f) + 1))) return; auto check_accurate_reciprocal_pattern_for_float = [&](f32 float_value) -> bool { // FMA(FNMS(div, spu_re(div), float_value), spu_re(div), spu_re(div)) if (auto [ok_fnms, div] = match_expr(a, fnms(MT, b, fsplat<f32[4]>(float_value))); ok_fnms && op.rb == op.rc) { if (auto [ok_re] = match_expr(b, spu_re(div)); ok_re) { erase_stores(a, b, c); set_vr(op.rt4, re_accurate(div, fsplat<f32[4]>(float_value))); return true; } } // FMA(FNMS(spu_re(div), div, float_value), spu_re(div), spu_re(div)) if (auto [ok_fnms, div] = match_expr(a, fnms(b, MT, fsplat<f32[4]>(float_value))); ok_fnms && op.rb == op.rc) { if (auto [ok_re] = match_expr(b, spu_re(div)); ok_re) { erase_stores(a, b, c); set_vr(op.rt4, re_accurate(div, fsplat<f32[4]>(float_value))); return true; } } // FMA(spu_re(div), FNMS(div, spu_re(div), float_value), spu_re(div)) if (auto [ok_fnms, div] = match_expr(b, fnms(MT, a, fsplat<f32[4]>(float_value))); ok_fnms && op.ra == op.rc) { if (auto [ok_re] = match_expr(a, spu_re(div)); ok_re) { erase_stores(a, b, c); set_vr(op.rt4, re_accurate(div, fsplat<f32[4]>(float_value))); return true; } } // FMA(spu_re(div), FNMS(spu_re(div), div, float_value), spu_re(div)) if (auto [ok_fnms, div] = match_expr(b, fnms(a, MT, fsplat<f32[4]>(float_value))); ok_fnms && op.ra == op.rc) { if (auto [ok_re] = match_expr(a, spu_re(div)); ok_re) { erase_stores(a, b, c); set_vr(op.rt4, re_accurate(div, fsplat<f32[4]>(float_value))); return true; } } return false; }; if (check_accurate_reciprocal_pattern_for_float(1.0f)) return; if (check_accurate_reciprocal_pattern_for_float(std::bit_cast<f32>(std::bit_cast<u32>(1.0f) + 1))) return; // NFS Most Wanted doesn't like this if (g_cfg.core.spu_xfloat_accuracy == xfloat_accuracy::relaxed) { // Those patterns are not safe vs non optimization as inaccuracy from spu_re will spread with early fm before the accuracy is improved // Match division (fast) // FMA(FNMS(fm(diva<*> spu_re(divb)), divb, diva), spu_re(divb), fm(diva<*> spu_re(divb))) if (auto [ok_fnma, divb, diva] = match_expr(a, fnms(c, MT, MT)); ok_fnma) { if (auto [ok_fm, fm1, fm2] = match_expr(c, fm(MT, MT)); ok_fm && ((fm1.eq(diva) && fm2.eq(b)) || (fm1.eq(b) && fm2.eq(diva)))) { if (auto [ok_re] = match_expr(b, spu_re(divb)); ok_re) { erase_stores(b, c); set_vr(op.rt4, diva / divb); return; } } } // FMA(spu_re(divb), FNMS(fm(diva <*> spu_re(divb)), divb, diva), fm(diva <*> spu_re(divb))) if (auto [ok_fnma, divb, diva] = match_expr(b, fnms(c, MT, MT)); ok_fnma) { if (auto [ok_fm, fm1, fm2] = match_expr(c, fm(MT, MT)); ok_fm && ((fm1.eq(diva) && fm2.eq(a)) || (fm1.eq(a) && fm2.eq(diva)))) { if (auto [ok_re] = match_expr(a, spu_re(divb)); ok_re) { erase_stores(a, c); set_vr(op.rt4, diva / divb); return; } } } } // Not all patterns can be simplified because of block scope // Those todos don't necessarily imply a missing pattern if (auto [ok_re, mystery] = match_expr(a, spu_re(MT)); ok_re) { spu_log.todo("[%s:0x%05x] Unmatched spu_re(a) found in FMA", m_hash, m_pos); } if (auto [ok_re, mystery] = match_expr(b, spu_re(MT)); ok_re) { spu_log.todo("[%s:0x%05x] Unmatched spu_re(b) found in FMA", m_hash, m_pos); } if (auto [ok_resq, mystery] = match_expr(c, spu_rsqrte(MT)); ok_resq) { spu_log.todo("[%s:0x%05x] Unmatched spu_rsqrte(c) found in FMA", m_hash, m_pos); } set_vr(op.rt4, fma(a, b, c)); } template <typename T, typename U, typename V> static llvm_calli<f32[4], T, U, V> fms(T&& a, U&& b, V&& c) { return llvm_calli<f32[4], T, U, V>{"spu_fms", {std::forward<T>(a), std::forward<U>(b), std::forward<V>(c)}}.set_order_equality_hint(1, 1, 0); } void FMS(spu_opcode_t op) { // See FMA. if (g_cfg.core.spu_xfloat_accuracy == xfloat_accuracy::accurate) { const auto [a, b, c] = get_vrs<f64[4]>(op.ra, op.rb, op.rc); set_vr(op.rt4, fmuladd(a, b, -c)); return; } register_intrinsic("spu_fms", [&](llvm::CallInst* ci) { const auto a = value<f32[4]>(ci->getOperand(0)); const auto b = value<f32[4]>(ci->getOperand(1)); const auto c = value<f32[4]>(ci->getOperand(2)); if (g_cfg.core.spu_xfloat_accuracy == xfloat_accuracy::approximate) { return fma32x4(clamp_smax(a), clamp_smax(b), eval(-c)); } else { return fma32x4(a, b, eval(-c)); } }); set_vr(op.rt4, fms(get_vr<f32[4]>(op.ra), get_vr<f32[4]>(op.rb), get_vr<f32[4]>(op.rc))); } template <typename T, typename U> static llvm_calli<f32[4], T, U> fi(T&& a, U&& b) { return {"spu_fi", {std::forward<T>(a), std::forward<U>(b)}}; } template <typename T> static llvm_calli<f32[4], T> spu_re(T&& a) { return {"spu_re", {std::forward<T>(a)}}; } template <typename T> static llvm_calli<f32[4], T> spu_rsqrte(T&& a) { return {"spu_rsqrte", {std::forward<T>(a)}}; } void FI(spu_opcode_t op) { register_intrinsic("spu_fi", [&](llvm::CallInst* ci) { // TODO: adjustment for denormals(for accurate xfloat only?) const auto a = bitcast<u32[4]>(value<f32[4]>(ci->getOperand(0))); const auto b = bitcast<u32[4]>(value<f32[4]>(ci->getOperand(1))); const auto base = (b & 0x007ffc00u) << 9; // Base fraction const auto ymul = (b & 0x3ff) * (a & 0x7ffff); // Step fraction * Y fraction (fixed point at 2^-32) const auto comparison = (ymul > base); // Should exponent be adjusted? const auto bnew = (base - ymul) >> (zext<u32[4]>(comparison) ^ 9); // Shift one less bit if exponent is adjusted const auto base_result = (b & 0xff800000u) | (bnew & ~0xff800000u); // Inject old sign and exponent const auto adjustment = bitcast<u32[4]>(sext<s32[4]>(comparison)) & (1 << 23); // exponent adjustement for negative bnew return clamp_smax(eval(bitcast<f32[4]>(base_result - adjustment))); }); const auto [a, b] = get_vrs<f32[4]>(op.ra, op.rb); switch (g_cfg.core.spu_xfloat_accuracy) { case xfloat_accuracy::approximate: { // For approximate, create a pattern but do not optimize yet register_intrinsic("spu_re", [&](llvm::CallInst* ci) { const auto a = bitcast<u32[4]>(value<f32[4]>(ci->getOperand(0))); const auto a_fraction = (a >> splat<u32[4]>(18)) & splat<u32[4]>(0x1F); const auto a_exponent = (a & splat<u32[4]>(0x7F800000u)); const auto r_exponent = sub_sat(build<u16[8]>(0000, 0x7E80, 0000, 0x7E80, 0000, 0x7E80, 0000, 0x7E80), bitcast<u16[8]>(a_exponent)); const auto fix_exponent = select((a_exponent > 0), bitcast<u32[4]>(r_exponent), splat<u32[4]>(0x7F800000u)); const auto a_sign = (a & splat<u32[4]>(0x80000000)); value_t<u32[4]> b = eval(splat<u32[4]>(0)); for (u32 i = 0; i < 4; i++) { const auto eval_fraction = eval(extract(a_fraction, i)); value_t<u32> r_fraction = load_const<u32>(m_spu_frest_fraction_lut, eval_fraction); b = eval(insert(b, i, r_fraction)); } b = eval(b | fix_exponent | a_sign); const auto base = (b & 0x007ffc00u) << 9; // Base fraction const auto ymul = (b & 0x3ff) * (a & 0x7ffff); // Step fraction * Y fraction (fixed point at 2^-32) const auto comparison = (ymul > base); // Should exponent be adjusted? const auto bnew = (base - ymul) >> (zext<u32[4]>(comparison) ^ 9); // Shift one less bit if exponent is adjusted const auto base_result = (b & 0xff800000u) | (bnew & ~0xff800000u); // Inject old sign and exponent const auto adjustment = bitcast<u32[4]>(sext<s32[4]>(comparison)) & (1 << 23); // exponent adjustement for negative bnew return clamp_smax(eval(bitcast<f32[4]>(base_result - adjustment))); }); register_intrinsic("spu_rsqrte", [&](llvm::CallInst* ci) { const auto a = bitcast<u32[4]>(value<f32[4]>(ci->getOperand(0))); const auto a_fraction = (a >> splat<u32[4]>(18)) & splat<u32[4]>(0x3F); const auto a_exponent = (a >> splat<u32[4]>(23)) & splat<u32[4]>(0xFF); value_t<u32[4]> b = eval(splat<u32[4]>(0)); for (u32 i = 0; i < 4; i++) { const auto eval_fraction = eval(extract(a_fraction, i)); const auto eval_exponent = eval(extract(a_exponent, i)); value_t<u32> r_fraction = load_const<u32>(m_spu_frsqest_fraction_lut, eval_fraction); value_t<u32> r_exponent = load_const<u32>(m_spu_frsqest_exponent_lut, eval_exponent); b = eval(insert(b, i, eval(r_fraction | r_exponent))); } const auto base = (b & 0x007ffc00u) << 9; // Base fraction const auto ymul = (b & 0x3ff) * (a & 0x7ffff); // Step fraction * Y fraction (fixed point at 2^-32) const auto comparison = (ymul > base); // Should exponent be adjusted? const auto bnew = (base - ymul) >> (zext<u32[4]>(comparison) ^ 9); // Shift one less bit if exponent is adjusted const auto base_result = (b & 0xff800000u) | (bnew & ~0xff800000u); // Inject old sign and exponent const auto adjustment = bitcast<u32[4]>(sext<s32[4]>(comparison)) & (1 << 23); // exponent adjustement for negative bnew return clamp_smax(eval(bitcast<f32[4]>(base_result - adjustment))); }); break; } case xfloat_accuracy::relaxed: { // For relaxed, agressively optimize and use intrinsics, those make the results vary per cpu register_intrinsic("spu_re", [&](llvm::CallInst* ci) { const auto a = value<f32[4]>(ci->getOperand(0)); return fre(a); }); register_intrinsic("spu_rsqrte", [&](llvm::CallInst* ci) { const auto a = value<f32[4]>(ci->getOperand(0)); return frsqe(a); }); break; } default: break; } // Do not pattern match for accurate if(g_cfg.core.spu_xfloat_accuracy == xfloat_accuracy::approximate || g_cfg.core.spu_xfloat_accuracy == xfloat_accuracy::relaxed) { if (const auto [ok, mb] = match_expr(b, frest(match<f32[4]>())); ok && mb.eq(a)) { erase_stores(b); set_vr(op.rt, spu_re(a)); return; } if (const auto [ok, mb] = match_expr(b, frsqest(match<f32[4]>())); ok && mb.eq(a)) { erase_stores(b); set_vr(op.rt, spu_rsqrte(a)); return; } } const auto r = eval(fi(a, b)); if (!m_interp_magn && g_cfg.core.spu_xfloat_accuracy != xfloat_accuracy::accurate) spu_log.todo("[%s:0x%05x] Unmatched spu_fi found", m_hash, m_pos); set_vr(op.rt, r); } void CFLTS(spu_opcode_t op) { if (g_cfg.core.spu_xfloat_accuracy == xfloat_accuracy::accurate) { value_t<f64[4]> a = get_vr<f64[4]>(op.ra); value_t<f64[4]> s; if (m_interp_magn) s = eval(vsplat<f64[4]>(bitcast<f64>(((1023 + 173) - get_imm<u64>(op.i8)) << 52))); else s = eval(fsplat<f64[4]>(std::exp2(static_cast<int>(173 - op.i8)))); if (op.i8 != 173 || m_interp_magn) a = eval(a * s); value_t<s32[4]> r; if (auto ca = llvm::dyn_cast<llvm::ConstantDataVector>(a.value)) { const f64 data[4] { ca->getElementAsDouble(0), ca->getElementAsDouble(1), ca->getElementAsDouble(2), ca->getElementAsDouble(3) }; v128 result; for (u32 i = 0; i < 4; i++) { if (data[i] >= std::exp2(31.f)) { result._s32[i] = smax; } else if (data[i] < std::exp2(-31.f)) { result._s32[i] = smin; } else { result._s32[i] = static_cast<s32>(data[i]); } } r.value = make_const_vector(result, get_type<s32[4]>()); set_vr(op.rt, r); return; } if (llvm::isa<llvm::ConstantAggregateZero>(a.value)) { set_vr(op.rt, splat<u32[4]>(0)); return; } r.value = m_ir->CreateFPToSI(a.value, get_type<s32[4]>()); set_vr(op.rt, r ^ sext<s32[4]>(fcmp_ord(a >= fsplat<f64[4]>(std::exp2(31.f))))); } else { value_t<f32[4]> a = get_vr<f32[4]>(op.ra); value_t<f32[4]> s; if (m_interp_magn) s = eval(vsplat<f32[4]>(load_const<f32>(m_scale_float_to, get_imm<u8>(op.i8)))); else s = eval(fsplat<f32[4]>(std::exp2(static_cast<float>(static_cast<s16>(173 - op.i8))))); if (op.i8 != 173 || m_interp_magn) a = eval(a * s); value_t<s32[4]> r; r.value = m_ir->CreateFPToSI(a.value, get_type<s32[4]>()); set_vr(op.rt, r ^ sext<s32[4]>(bitcast<s32[4]>(a) > splat<s32[4]>(((31 + 127) << 23) - 1))); } } void CFLTU(spu_opcode_t op) { if (g_cfg.core.spu_xfloat_accuracy == xfloat_accuracy::accurate) { value_t<f64[4]> a = get_vr<f64[4]>(op.ra); value_t<f64[4]> s; if (m_interp_magn) s = eval(vsplat<f64[4]>(bitcast<f64>(((1023 + 173) - get_imm<u64>(op.i8)) << 52))); else s = eval(fsplat<f64[4]>(std::exp2(static_cast<int>(173 - op.i8)))); if (op.i8 != 173 || m_interp_magn) a = eval(a * s); value_t<s32[4]> r; if (auto ca = llvm::dyn_cast<llvm::ConstantDataVector>(a.value)) { const f64 data[4] { ca->getElementAsDouble(0), ca->getElementAsDouble(1), ca->getElementAsDouble(2), ca->getElementAsDouble(3) }; v128 result; for (u32 i = 0; i < 4; i++) { if (data[i] >= std::exp2(32.f)) { result._u32[i] = umax; } else if (data[i] < 0.) { result._u32[i] = 0; } else { result._u32[i] = static_cast<u32>(data[i]); } } r.value = make_const_vector(result, get_type<s32[4]>()); set_vr(op.rt, r); return; } if (llvm::isa<llvm::ConstantAggregateZero>(a.value)) { set_vr(op.rt, splat<u32[4]>(0)); return; } r.value = m_ir->CreateFPToUI(a.value, get_type<s32[4]>()); set_vr(op.rt, select(fcmp_ord(a >= fsplat<f64[4]>(std::exp2(32.f))), splat<s32[4]>(-1), r & sext<s32[4]>(fcmp_ord(a >= fsplat<f64[4]>(0.))))); } else { value_t<f32[4]> a = get_vr<f32[4]>(op.ra); value_t<f32[4]> s; if (m_interp_magn) s = eval(vsplat<f32[4]>(load_const<f32>(m_scale_float_to, get_imm<u8>(op.i8)))); else s = eval(fsplat<f32[4]>(std::exp2(static_cast<float>(static_cast<s16>(173 - op.i8))))); if (op.i8 != 173 || m_interp_magn) a = eval(a * s); value_t<s32[4]> r; if (m_use_avx512) { const auto sc = eval(bitcast<f32[4]>(max(bitcast<s32[4]>(a),splat<s32[4]>(0x0)))); r.value = m_ir->CreateFPToUI(sc.value, get_type<s32[4]>()); set_vr(op.rt, r); return; } r.value = m_ir->CreateFPToUI(a.value, get_type<s32[4]>()); set_vr(op.rt, select(bitcast<s32[4]>(a) > splat<s32[4]>(((32 + 127) << 23) - 1), splat<s32[4]>(-1), r & ~(bitcast<s32[4]>(a) >> 31))); } } void CSFLT(spu_opcode_t op) { if (g_cfg.core.spu_xfloat_accuracy == xfloat_accuracy::accurate) { value_t<s32[4]> a = get_vr<s32[4]>(op.ra); value_t<f64[4]> r; if (auto [ok, data] = get_const_vector(a.value, m_pos); ok) { r.value = build<f64[4]>(data._s32[0], data._s32[1], data._s32[2], data._s32[3]).eval(m_ir); } else { r.value = m_ir->CreateSIToFP(a.value, get_type<f64[4]>()); } value_t<f64[4]> s; if (m_interp_magn) s = eval(vsplat<f64[4]>(bitcast<f64>((get_imm<u64>(op.i8) + (1023 - 155)) << 52))); else s = eval(fsplat<f64[4]>(std::exp2(static_cast<int>(op.i8 - 155)))); if (op.i8 != 155 || m_interp_magn) r = eval(r * s); set_vr(op.rt, r); } else { value_t<f32[4]> r; r.value = m_ir->CreateSIToFP(get_vr<s32[4]>(op.ra).value, get_type<f32[4]>()); value_t<f32[4]> s; if (m_interp_magn) s = eval(vsplat<f32[4]>(load_const<f32>(m_scale_to_float, get_imm<u8>(op.i8)))); else s = eval(fsplat<f32[4]>(std::exp2(static_cast<float>(static_cast<s16>(op.i8 - 155))))); if (op.i8 != 155 || m_interp_magn) r = eval(r * s); set_vr(op.rt, r); } } void CUFLT(spu_opcode_t op) { if (g_cfg.core.spu_xfloat_accuracy == xfloat_accuracy::accurate) { value_t<s32[4]> a = get_vr<s32[4]>(op.ra); value_t<f64[4]> r; if (auto [ok, data] = get_const_vector(a.value, m_pos); ok) { r.value = build<f64[4]>(data._u32[0], data._u32[1], data._u32[2], data._u32[3]).eval(m_ir); } else { r.value = m_ir->CreateUIToFP(a.value, get_type<f64[4]>()); } value_t<f64[4]> s; if (m_interp_magn) s = eval(vsplat<f64[4]>(bitcast<f64>((get_imm<u64>(op.i8) + (1023 - 155)) << 52))); else s = eval(fsplat<f64[4]>(std::exp2(static_cast<int>(op.i8 - 155)))); if (op.i8 != 155 || m_interp_magn) r = eval(r * s); set_vr(op.rt, r); } else { value_t<f32[4]> r; r.value = m_ir->CreateUIToFP(get_vr<s32[4]>(op.ra).value, get_type<f32[4]>()); value_t<f32[4]> s; if (m_interp_magn) s = eval(vsplat<f32[4]>(load_const<f32>(m_scale_to_float, get_imm<u8>(op.i8)))); else s = eval(fsplat<f32[4]>(std::exp2(static_cast<float>(static_cast<s16>(op.i8 - 155))))); if (op.i8 != 155 || m_interp_magn) r = eval(r * s); set_vr(op.rt, r); } } void make_store_ls(value_t<u64> addr, value_t<u8[16]> data) { const auto bswapped = byteswap(data); m_ir->CreateStore(bswapped.eval(m_ir), m_ir->CreateGEP(get_type<u8>(), m_lsptr, addr.value)); } auto make_load_ls(value_t<u64> addr) { value_t<u8[16]> data; data.value = m_ir->CreateLoad(get_type<u8[16]>(), m_ir->CreateGEP(get_type<u8>(), m_lsptr, addr.value)); return byteswap(data); } void STQX(spu_opcode_t op) { const auto a = get_vr(op.ra); const auto b = get_vr(op.rb); for (auto pair : std::initializer_list<std::pair<value_t<u32[4]>, value_t<u32[4]>>>{{a, b}, {b, a}}) { if (auto [ok, data] = get_const_vector(pair.first.value, m_pos); ok) { data._u32[3] %= SPU_LS_SIZE; if (data._u32[3] % 0x10 == 0) { value_t<u64> addr = eval(splat<u64>(data._u32[3]) + zext<u64>(extract(pair.second, 3) & 0x3fff0)); make_store_ls(addr, get_vr<u8[16]>(op.rt)); return; } } } value_t<u64> addr = eval(zext<u64>((extract(a, 3) + extract(b, 3)) & 0x3fff0)); make_store_ls(addr, get_vr<u8[16]>(op.rt)); } void LQX(spu_opcode_t op) { const auto a = get_vr(op.ra); const auto b = get_vr(op.rb); for (auto pair : std::initializer_list<std::pair<value_t<u32[4]>, value_t<u32[4]>>>{{a, b}, {b, a}}) { if (auto [ok, data] = get_const_vector(pair.first.value, m_pos); ok) { data._u32[3] %= SPU_LS_SIZE; if (data._u32[3] % 0x10 == 0) { value_t<u64> addr = eval(splat<u64>(data._u32[3]) + zext<u64>(extract(pair.second, 3) & 0x3fff0)); set_vr(op.rt, make_load_ls(addr)); return; } } } value_t<u64> addr = eval(zext<u64>((extract(a, 3) + extract(b, 3)) & 0x3fff0)); set_vr(op.rt, make_load_ls(addr)); } void STQA(spu_opcode_t op) { value_t<u64> addr = eval((get_imm<u64>(op.i16, false) << 2) & 0x3fff0); make_store_ls(addr, get_vr<u8[16]>(op.rt)); } void LQA(spu_opcode_t op) { value_t<u64> addr = eval((get_imm<u64>(op.i16, false) << 2) & 0x3fff0); set_vr(op.rt, make_load_ls(addr)); } llvm::Value* get_pc_as_u64(u32 addr) { return m_ir->CreateAdd(m_ir->CreateZExt(m_base_pc, get_type<u64>()), m_ir->getInt64(addr - m_base)); } void STQR(spu_opcode_t op) // { value_t<u64> addr; addr.value = m_interp_magn ? m_ir->CreateZExt(m_interp_pc, get_type<u64>()) : get_pc_as_u64(m_pos); addr = eval(((get_imm<u64>(op.i16, false) << 2) + addr) & (m_interp_magn ? 0x3fff0 : ~0xf)); make_store_ls(addr, get_vr<u8[16]>(op.rt)); } void LQR(spu_opcode_t op) // { value_t<u64> addr; addr.value = m_interp_magn ? m_ir->CreateZExt(m_interp_pc, get_type<u64>()) : get_pc_as_u64(m_pos); addr = eval(((get_imm<u64>(op.i16, false) << 2) + addr) & (m_interp_magn ? 0x3fff0 : ~0xf)); set_vr(op.rt, make_load_ls(addr)); } void STQD(spu_opcode_t op) { if (m_finfo && m_finfo->fn) { if (op.rt <= s_reg_sp || (op.rt >= s_reg_80 && op.rt <= s_reg_127)) { if (m_block->bb->reg_save_dom[op.rt] && get_reg_raw(op.rt) == m_finfo->load[op.rt]) { return; } } } value_t<u64> addr = eval(zext<u64>(extract(get_vr(op.ra), 3) & 0x3fff0) + (get_imm<u64>(op.si10) << 4)); make_store_ls(addr, get_vr<u8[16]>(op.rt)); } void LQD(spu_opcode_t op) { value_t<u64> addr = eval(zext<u64>(extract(get_vr(op.ra), 3) & 0x3fff0) + (get_imm<u64>(op.si10) << 4)); set_vr(op.rt, make_load_ls(addr)); } void make_halt(value_t<bool> cond) { const auto next = llvm::BasicBlock::Create(m_context, "", m_function); const auto halt = llvm::BasicBlock::Create(m_context, "", m_function); m_ir->CreateCondBr(cond.value, halt, next, m_md_unlikely); m_ir->SetInsertPoint(halt); if (m_interp_magn) m_ir->CreateStore(m_function->getArg(2), spu_ptr<u32>(&spu_thread::pc)); else update_pc(); const auto ptr = _ptr<u32>(m_memptr, 0xffdead00); m_ir->CreateStore(m_ir->getInt32("HALT"_u32), ptr); m_ir->CreateBr(next); m_ir->SetInsertPoint(next); } void HGT(spu_opcode_t op) { const auto cond = eval(extract(get_vr<s32[4]>(op.ra), 3) > extract(get_vr<s32[4]>(op.rb), 3)); make_halt(cond); } void HEQ(spu_opcode_t op) { const auto cond = eval(extract(get_vr(op.ra), 3) == extract(get_vr(op.rb), 3)); make_halt(cond); } void HLGT(spu_opcode_t op) { const auto cond = eval(extract(get_vr(op.ra), 3) > extract(get_vr(op.rb), 3)); make_halt(cond); } void HGTI(spu_opcode_t op) { const auto cond = eval(extract(get_vr<s32[4]>(op.ra), 3) > get_imm<s32>(op.si10)); make_halt(cond); } void HEQI(spu_opcode_t op) { const auto cond = eval(extract(get_vr(op.ra), 3) == get_imm<u32>(op.si10)); make_halt(cond); } void HLGTI(spu_opcode_t op) { const auto cond = eval(extract(get_vr(op.ra), 3) > get_imm<u32>(op.si10)); make_halt(cond); } void HBR([[maybe_unused]] spu_opcode_t op) // { // TODO: use the hint. } void HBRA([[maybe_unused]] spu_opcode_t op) // { // TODO: use the hint. } void HBRR([[maybe_unused]] spu_opcode_t op) // { // TODO: use the hint. } // TODO static u32 exec_check_interrupts(spu_thread* _spu, u32 addr) { _spu->set_interrupt_status(true); if (_spu->ch_events.load().count) { _spu->interrupts_enabled = false; _spu->srr0 = addr; // Test for BR/BRA instructions (they are equivalent at zero pc) const u32 br = _spu->_ref<const u32>(0); if ((br & 0xfd80007f) == 0x30000000) { return (br >> 5) & 0x3fffc; } return 0; } return addr; } llvm::BasicBlock* add_block_indirect(spu_opcode_t op, value_t<u32> addr, bool ret = true) { if (m_interp_magn) { m_interp_bblock = llvm::BasicBlock::Create(m_context, "", m_function); const auto cblock = m_ir->GetInsertBlock(); const auto result = llvm::BasicBlock::Create(m_context, "", m_function); const auto e_exec = llvm::BasicBlock::Create(m_context, "", m_function); const auto d_test = llvm::BasicBlock::Create(m_context, "", m_function); const auto d_exec = llvm::BasicBlock::Create(m_context, "", m_function); const auto d_done = llvm::BasicBlock::Create(m_context, "", m_function); m_ir->SetInsertPoint(result); m_ir->CreateCondBr(get_imm<bool>(op.e).value, e_exec, d_test, m_md_unlikely); m_ir->SetInsertPoint(e_exec); const auto e_addr = call("spu_check_interrupts", &exec_check_interrupts, m_thread, addr.value); m_ir->CreateBr(d_test); m_ir->SetInsertPoint(d_test); const auto target = m_ir->CreatePHI(get_type<u32>(), 2); target->addIncoming(addr.value, result); target->addIncoming(e_addr, e_exec); m_ir->CreateCondBr(get_imm<bool>(op.d).value, d_exec, d_done, m_md_unlikely); m_ir->SetInsertPoint(d_exec); m_ir->CreateStore(m_ir->getFalse(), spu_ptr<bool>(&spu_thread::interrupts_enabled)); m_ir->CreateBr(d_done); m_ir->SetInsertPoint(d_done); m_ir->CreateBr(m_interp_bblock); m_ir->SetInsertPoint(cblock); m_interp_pc = target; return result; } if (llvm::isa<llvm::Constant>(addr.value)) { // Fixed branch excludes the possibility it's a function return (TODO) ret = false; } if (m_finfo && m_finfo->fn && op.opcode) { const auto cblock = m_ir->GetInsertBlock(); const auto result = llvm::BasicBlock::Create(m_context, "", m_function); m_ir->SetInsertPoint(result); ret_function(); m_ir->SetInsertPoint(cblock); return result; } // Load stack addr if necessary value_t<u32> sp; if (ret && g_cfg.core.spu_block_size != spu_block_size_type::safe) { if (op.opcode) { sp = eval(extract(get_reg_fixed(1), 3) & 0x3fff0); } else { sp.value = m_ir->CreateLoad(get_type<u32>(), spu_ptr<u32>(&spu_thread::gpr, 1, &v128::_u32, 3)); } } const auto cblock = m_ir->GetInsertBlock(); const auto result = llvm::BasicBlock::Create(m_context, "", m_function); m_ir->SetInsertPoint(result); if (op.e) { addr.value = call("spu_check_interrupts", &exec_check_interrupts, m_thread, addr.value); } if (op.d) { m_ir->CreateStore(m_ir->getFalse(), spu_ptr<bool>(&spu_thread::interrupts_enabled)); } m_ir->CreateStore(addr.value, spu_ptr<u32>(&spu_thread::pc)); if (ret && g_cfg.core.spu_block_size >= spu_block_size_type::mega) { // Compare address stored in stack mirror with addr const auto stack0 = eval(zext<u64>(sp) + ::offset32(&spu_thread::stack_mirror)); const auto stack1 = eval(stack0 + 8); const auto _ret = m_ir->CreateLoad(get_type<u64>(), m_ir->CreateGEP(get_type<u8>(), m_thread, stack0.value)); const auto link = m_ir->CreateLoad(get_type<u64>(), m_ir->CreateGEP(get_type<u8>(), m_thread, stack1.value)); const auto fail = llvm::BasicBlock::Create(m_context, "", m_function); const auto done = llvm::BasicBlock::Create(m_context, "", m_function); const auto next = llvm::BasicBlock::Create(m_context, "", m_function); m_ir->CreateCondBr(m_ir->CreateICmpEQ(addr.value, m_ir->CreateTrunc(link, get_type<u32>())), next, fail, m_md_likely); m_ir->SetInsertPoint(next); const auto cmp2 = m_ir->CreateLoad(get_type<u32>(), m_ir->CreateGEP(get_type<u8>(), m_lsptr, addr.value)); m_ir->CreateCondBr(m_ir->CreateICmpEQ(cmp2, m_ir->CreateTrunc(_ret, get_type<u32>())), done, fail, m_md_likely); m_ir->SetInsertPoint(done); // Clear stack mirror and return by tail call to the provided return address m_ir->CreateStore(splat<u64[2]>(-1).eval(m_ir), m_ir->CreateGEP(get_type<u8>(), m_thread, stack0.value)); const auto targ = m_ir->CreateAdd(m_ir->CreateLShr(_ret, 32), get_segment_base()); const auto type = m_finfo->chunk->getFunctionType(); const auto fval = m_ir->CreateIntToPtr(targ, type->getPointerTo()); tail_chunk({type, fval}, m_ir->CreateTrunc(m_ir->CreateLShr(link, 32), get_type<u32>())); m_ir->SetInsertPoint(fail); } if (g_cfg.core.spu_block_size >= spu_block_size_type::mega) { // Try to load chunk address from the function table const auto fail = llvm::BasicBlock::Create(m_context, "", m_function); const auto done = llvm::BasicBlock::Create(m_context, "", m_function); const auto ad32 = m_ir->CreateSub(addr.value, m_base_pc); m_ir->CreateCondBr(m_ir->CreateICmpULT(ad32, m_ir->getInt32(m_size)), done, fail, m_md_likely); m_ir->SetInsertPoint(done); const auto ad64 = m_ir->CreateZExt(ad32, get_type<u64>()); const auto pptr = dyn_cast<llvm::GetElementPtrInst>(m_ir->CreateGEP(m_function_table->getValueType(), m_function_table, {m_ir->getInt64(0), m_ir->CreateLShr(ad64, 2, "", true)})); tail_chunk({m_dispatch->getFunctionType(), m_ir->CreateLoad(pptr->getResultElementType(), pptr)}); m_ir->SetInsertPoint(fail); } tail_chunk(nullptr); m_ir->SetInsertPoint(cblock); return result; } llvm::BasicBlock* add_block_next() { if (m_interp_magn) { const auto cblock = m_ir->GetInsertBlock(); m_ir->SetInsertPoint(m_interp_bblock); const auto target = m_ir->CreatePHI(get_type<u32>(), 2); target->addIncoming(m_interp_pc_next, cblock); target->addIncoming(m_interp_pc, m_interp_bblock->getSinglePredecessor()); m_ir->SetInsertPoint(cblock); m_interp_pc = target; return m_interp_bblock; } return add_block(m_pos + 4); } void BIZ(spu_opcode_t op) // { if (m_block) m_block->block_end = m_ir->GetInsertBlock(); const auto rt = get_vr<u8[16]>(op.rt); // Checking for zero doesn't care about the order of the bytes, // so load the data before it's byteswapped if (auto [ok, as] = match_expr(rt, byteswap(match<u8[16]>())); ok) { m_block->block_end = m_ir->GetInsertBlock(); const auto cond = eval(extract(bitcast<u32[4]>(as), 0) == 0); const auto addr = eval(extract(get_vr(op.ra), 3) & 0x3fffc); const auto target = add_block_indirect(op, addr); m_ir->CreateCondBr(cond.value, target, add_block_next()); return; } const auto ox = get_vr<u32[4]>(op.rt); // Instead of extracting the value generated by orx, just test the input to orx with ptest if (auto [ok, as] = match_expr(ox, orx(match<u32[4]>())); ok) { m_block->block_end = m_ir->GetInsertBlock(); const auto a = extract(bitcast<u64[2]>(as), 0); const auto b = extract(bitcast<u64[2]>(as), 1); const auto cond = eval((a | b) == 0); const auto addr = eval(extract(get_vr(op.ra), 3) & 0x3fffc); const auto target = add_block_indirect(op, addr); m_ir->CreateCondBr(cond.value, target, add_block_next()); return; } // Check sign bit instead (optimization) if (match_vr<s32[4], s64[2]>(op.rt, [&](auto c, auto MP) { using VT = typename decltype(MP)::type; if (auto [ok, x] = match_expr(c, sext<VT>(match<bool[std::extent_v<VT>]>())); ok) { const auto a = get_vr<s8[16]>(op.rt); const auto cond = eval(bitcast<s16>(trunc<bool[16]>(a)) >= 0); const auto addr = eval(extract(get_vr(op.ra), 3) & 0x3fffc); const auto target = add_block_indirect(op, addr); m_ir->CreateCondBr(cond.value, target, add_block_next()); return true; } return false; })) { return; } const auto cond = eval(extract(get_vr(op.rt), 3) == 0); const auto addr = eval(extract(get_vr(op.ra), 3) & 0x3fffc); const auto target = add_block_indirect(op, addr); m_ir->CreateCondBr(cond.value, target, add_block_next()); } void BINZ(spu_opcode_t op) // { if (m_block) m_block->block_end = m_ir->GetInsertBlock(); const auto rt = get_vr<u8[16]>(op.rt); // Checking for zero doesn't care about the order of the bytes, // so load the data before it's byteswapped if (auto [ok, as] = match_expr(rt, byteswap(match<u8[16]>())); ok) { m_block->block_end = m_ir->GetInsertBlock(); const auto cond = eval(extract(bitcast<u32[4]>(as), 0) != 0); const auto addr = eval(extract(get_vr(op.ra), 3) & 0x3fffc); const auto target = add_block_indirect(op, addr); m_ir->CreateCondBr(cond.value, target, add_block_next()); return; } const auto ox = get_vr<u32[4]>(op.rt); // Instead of extracting the value generated by orx, just test the input to orx with ptest if (auto [ok, as] = match_expr(ox, orx(match<u32[4]>())); ok) { m_block->block_end = m_ir->GetInsertBlock(); const auto a = extract(bitcast<u64[2]>(as), 0); const auto b = extract(bitcast<u64[2]>(as), 1); const auto cond = eval((a | b) != 0); const auto addr = eval(extract(get_vr(op.ra), 3) & 0x3fffc); const auto target = add_block_indirect(op, addr); m_ir->CreateCondBr(cond.value, target, add_block_next()); return; } // Check sign bit instead (optimization) if (match_vr<s32[4], s64[2]>(op.rt, [&](auto c, auto MP) { using VT = typename decltype(MP)::type; if (auto [ok, x] = match_expr(c, sext<VT>(match<bool[std::extent_v<VT>]>())); ok) { const auto a = get_vr<s8[16]>(op.rt); const auto cond = eval(bitcast<s16>(trunc<bool[16]>(a)) < 0); const auto addr = eval(extract(get_vr(op.ra), 3) & 0x3fffc); const auto target = add_block_indirect(op, addr); m_ir->CreateCondBr(cond.value, target, add_block_next()); return true; } return false; })) { return; } const auto cond = eval(extract(get_vr(op.rt), 3) != 0); const auto addr = eval(extract(get_vr(op.ra), 3) & 0x3fffc); const auto target = add_block_indirect(op, addr); m_ir->CreateCondBr(cond.value, target, add_block_next()); } void BIHZ(spu_opcode_t op) // { if (m_block) m_block->block_end = m_ir->GetInsertBlock(); // Check sign bits of 2 vector elements (optimization) if (match_vr<s8[16], s16[8], s32[4], s64[2]>(op.rt, [&](auto c, auto MP) { using VT = typename decltype(MP)::type; if (auto [ok, x] = match_expr(c, sext<VT>(match<bool[std::extent_v<VT>]>())); ok) { const auto a = get_vr<s8[16]>(op.rt); const auto cond = eval((bitcast<s16>(trunc<bool[16]>(a)) & 0x3000) == 0); const auto addr = eval(extract(get_vr(op.ra), 3) & 0x3fffc); const auto target = add_block_indirect(op, addr); m_ir->CreateCondBr(cond.value, target, add_block_next()); return true; } return false; })) { return; } const auto cond = eval(extract(get_vr<u16[8]>(op.rt), 6) == 0); const auto addr = eval(extract(get_vr(op.ra), 3) & 0x3fffc); const auto target = add_block_indirect(op, addr); m_ir->CreateCondBr(cond.value, target, add_block_next()); } void BIHNZ(spu_opcode_t op) // { if (m_block) m_block->block_end = m_ir->GetInsertBlock(); // Check sign bits of 2 vector elements (optimization) if (match_vr<s8[16], s16[8], s32[4], s64[2]>(op.rt, [&](auto c, auto MP) { using VT = typename decltype(MP)::type; if (auto [ok, x] = match_expr(c, sext<VT>(match<bool[std::extent_v<VT>]>())); ok) { const auto a = get_vr<s8[16]>(op.rt); const auto cond = eval((bitcast<s16>(trunc<bool[16]>(a)) & 0x3000) != 0); const auto addr = eval(extract(get_vr(op.ra), 3) & 0x3fffc); const auto target = add_block_indirect(op, addr); m_ir->CreateCondBr(cond.value, target, add_block_next()); return true; } return false; })) { return; } const auto cond = eval(extract(get_vr<u16[8]>(op.rt), 6) != 0); const auto addr = eval(extract(get_vr(op.ra), 3) & 0x3fffc); const auto target = add_block_indirect(op, addr); m_ir->CreateCondBr(cond.value, target, add_block_next()); } void BI(spu_opcode_t op) // { if (m_block) m_block->block_end = m_ir->GetInsertBlock(); const auto addr = eval(extract(get_vr(op.ra), 3) & 0x3fffc); if (m_interp_magn) { m_ir->CreateBr(add_block_indirect(op, addr)); return; } // Create jump table if necessary (TODO) const auto tfound = m_targets.find(m_pos); if (op.d && tfound != m_targets.end() && tfound->second.size() == 1 && tfound->second[0] == spu_branch_target(m_pos, 1)) { // Interrupts-disable pattern m_ir->CreateStore(m_ir->getFalse(), spu_ptr<bool>(&spu_thread::interrupts_enabled)); return; } if (!op.d && !op.e && tfound != m_targets.end() && tfound->second.size() > 1) { // Shift aligned address for switch const auto addrfx = m_ir->CreateSub(addr.value, m_base_pc); const auto sw_arg = m_ir->CreateLShr(addrfx, 2, "", true); // Initialize jump table targets std::map<u32, llvm::BasicBlock*> targets; for (u32 target : tfound->second) { if (m_block_info[target / 4]) { targets.emplace(target, nullptr); } } // Initialize target basic blocks for (auto& pair : targets) { pair.second = add_block(pair.first); } if (targets.empty()) { // Emergency exit spu_log.error("[%s] [0x%05x] No jump table targets at 0x%05x (%u)", m_hash, m_entry, m_pos, tfound->second.size()); m_ir->CreateBr(add_block_indirect(op, addr)); return; } // Get jump table bounds (optimization) const u32 start = targets.begin()->first; const u32 end = targets.rbegin()->first + 4; // Emit switch instruction aiming for a jumptable in the end (indirectbr could guarantee it) const auto sw = m_ir->CreateSwitch(sw_arg, llvm::BasicBlock::Create(m_context, "", m_function), (end - start) / 4); for (u32 pos = start; pos < end; pos += 4) { if (m_block_info[pos / 4] && targets.count(pos)) { const auto found = targets.find(pos); if (found != targets.end()) { sw->addCase(m_ir->getInt32(pos / 4 - m_base / 4), found->second); continue; } } sw->addCase(m_ir->getInt32(pos / 4 - m_base / 4), sw->getDefaultDest()); } // Exit function on unexpected target m_ir->SetInsertPoint(sw->getDefaultDest()); m_ir->CreateStore(addr.value, spu_ptr<u32>(&spu_thread::pc)); if (m_finfo && m_finfo->fn) { // Can't afford external tail call in true functions m_ir->CreateStore(m_ir->getInt32("BIJT"_u32), _ptr<u32>(m_memptr, 0xffdead20)); m_ir->CreateCall(m_test_state, {m_thread}); m_ir->CreateBr(sw->getDefaultDest()); } else { tail_chunk(nullptr); } } else { // Simple indirect branch m_ir->CreateBr(add_block_indirect(op, addr)); } } void BISL(spu_opcode_t op) // { if (m_block) m_block->block_end = m_ir->GetInsertBlock(); const auto addr = eval(extract(get_vr(op.ra), 3) & 0x3fffc); set_link(op); m_ir->CreateBr(add_block_indirect(op, addr, false)); } void IRET(spu_opcode_t op) // { if (m_block) m_block->block_end = m_ir->GetInsertBlock(); value_t<u32> srr0; srr0.value = m_ir->CreateLoad(get_type<u32>(), spu_ptr<u32>(&spu_thread::srr0)); m_ir->CreateBr(add_block_indirect(op, srr0)); } void BISLED(spu_opcode_t op) // { if (m_block) m_block->block_end = m_ir->GetInsertBlock(); const auto addr = eval(extract(get_vr(op.ra), 3) & 0x3fffc); set_link(op); const auto mask = m_ir->CreateTrunc(m_ir->CreateLShr(m_ir->CreateLoad(get_type<u64>(), spu_ptr<u64>(&spu_thread::ch_events), true), 32), get_type<u32>()); const auto res = call("spu_get_events", &exec_get_events, m_thread, mask); const auto target = add_block_indirect(op, addr); m_ir->CreateCondBr(m_ir->CreateICmpNE(res, m_ir->getInt32(0)), target, add_block_next()); } void BRZ(spu_opcode_t op) // { if (m_interp_magn) { value_t<u32> target; target.value = m_interp_pc; target = eval((target + (get_imm<u32>(op.i16, false) << 2)) & 0x3fffc); m_interp_pc = m_ir->CreateSelect(eval(extract(get_vr(op.rt), 3) == 0).value, target.value, m_interp_pc_next); return; } const u32 target = spu_branch_target(m_pos, op.i16); const auto rt = get_vr<u8[16]>(op.rt); // Checking for zero doesn't care about the order of the bytes, // so load the data before it's byteswapped if (auto [ok, as] = match_expr(rt, byteswap(match<u8[16]>())); ok) { if (target != m_pos + 4) { m_block->block_end = m_ir->GetInsertBlock(); const auto cond = eval(extract(bitcast<u32[4]>(as), 0) == 0); m_ir->CreateCondBr(cond.value, add_block(target), add_block(m_pos + 4)); return; } } const auto ox = get_vr<u32[4]>(op.rt); // Instead of extracting the value generated by orx, just test the input to orx with ptest if (auto [ok, as] = match_expr(ox, orx(match<u32[4]>())); ok) { if (target != m_pos + 4) { m_block->block_end = m_ir->GetInsertBlock(); const auto a = extract(bitcast<u64[2]>(as), 0); const auto b = extract(bitcast<u64[2]>(as), 1); const auto cond = eval((a | b) == 0); m_ir->CreateCondBr(cond.value, add_block(target), add_block(m_pos + 4)); return; } } // Check sign bit instead (optimization) if (match_vr<s32[4], s64[2]>(op.rt, [&](auto c, auto MP) { using VT = typename decltype(MP)::type; if (auto [ok, x] = match_expr(c, sext<VT>(match<bool[std::extent_v<VT>]>())); ok) { if (target != m_pos + 4) { m_block->block_end = m_ir->GetInsertBlock(); const auto a = get_vr<s8[16]>(op.rt); const auto cond = eval(bitcast<s16>(trunc<bool[16]>(a)) >= 0); m_ir->CreateCondBr(cond.value, add_block(target), add_block(m_pos + 4)); return true; } } return false; })) { return; } if (target != m_pos + 4) { m_block->block_end = m_ir->GetInsertBlock(); const auto cond = eval(extract(get_vr(op.rt), 3) == 0); m_ir->CreateCondBr(cond.value, add_block(target), add_block(m_pos + 4)); } } void BRNZ(spu_opcode_t op) // { if (m_interp_magn) { value_t<u32> target; target.value = m_interp_pc; target = eval((target + (get_imm<u32>(op.i16, false) << 2)) & 0x3fffc); m_interp_pc = m_ir->CreateSelect(eval(extract(get_vr(op.rt), 3) != 0).value, target.value, m_interp_pc_next); return; } const u32 target = spu_branch_target(m_pos, op.i16); const auto rt = get_vr<u8[16]>(op.rt); // Checking for zero doesn't care about the order of the bytes, // so load the data before it's byteswapped if (auto [ok, as] = match_expr(rt, byteswap(match<u8[16]>())); ok) { if (target != m_pos + 4) { m_block->block_end = m_ir->GetInsertBlock(); const auto cond = eval(extract(bitcast<u32[4]>(as), 0) != 0); m_ir->CreateCondBr(cond.value, add_block(target), add_block(m_pos + 4)); return; } } const auto ox = get_vr<u32[4]>(op.rt); // Instead of extracting the value generated by orx, just test the input to orx with ptest if (auto [ok, as] = match_expr(ox, orx(match<u32[4]>())); ok) { if (target != m_pos + 4) { m_block->block_end = m_ir->GetInsertBlock(); const auto a = extract(bitcast<u64[2]>(as), 0); const auto b = extract(bitcast<u64[2]>(as), 1); const auto cond = eval((a | b) != 0); m_ir->CreateCondBr(cond.value, add_block(target), add_block(m_pos + 4)); return; } } // Check sign bit instead (optimization) if (match_vr<s32[4], s64[2]>(op.rt, [&](auto c, auto MP) { using VT = typename decltype(MP)::type; if (auto [ok, x] = match_expr(c, sext<VT>(match<bool[std::extent_v<VT>]>())); ok) { if (target != m_pos + 4) { m_block->block_end = m_ir->GetInsertBlock(); const auto a = get_vr<s8[16]>(op.rt); const auto cond = eval(bitcast<s16>(trunc<bool[16]>(a)) < 0); m_ir->CreateCondBr(cond.value, add_block(target), add_block(m_pos + 4)); return true; } } return false; })) { return; } if (target != m_pos + 4) { m_block->block_end = m_ir->GetInsertBlock(); const auto cond = eval(extract(get_vr(op.rt), 3) != 0); m_ir->CreateCondBr(cond.value, add_block(target), add_block(m_pos + 4)); } } void BRHZ(spu_opcode_t op) // { if (m_interp_magn) { value_t<u32> target; target.value = m_interp_pc; target = eval((target + (get_imm<u32>(op.i16, false) << 2)) & 0x3fffc); m_interp_pc = m_ir->CreateSelect(eval(extract(get_vr<u16[8]>(op.rt), 6) == 0).value, target.value, m_interp_pc_next); return; } const u32 target = spu_branch_target(m_pos, op.i16); // Check sign bits of 2 vector elements (optimization) if (match_vr<s8[16], s16[8], s32[4], s64[2]>(op.rt, [&](auto c, auto MP) { using VT = typename decltype(MP)::type; if (auto [ok, x] = match_expr(c, sext<VT>(match<bool[std::extent_v<VT>]>())); ok) { if (target != m_pos + 4) { m_block->block_end = m_ir->GetInsertBlock(); const auto a = get_vr<s8[16]>(op.rt); const auto cond = eval((bitcast<s16>(trunc<bool[16]>(a)) & 0x3000) == 0); m_ir->CreateCondBr(cond.value, add_block(target), add_block(m_pos + 4)); return true; } } return false; })) { return; } if (target != m_pos + 4) { m_block->block_end = m_ir->GetInsertBlock(); const auto cond = eval(extract(get_vr<u16[8]>(op.rt), 6) == 0); m_ir->CreateCondBr(cond.value, add_block(target), add_block(m_pos + 4)); } } void BRHNZ(spu_opcode_t op) // { if (m_interp_magn) { value_t<u32> target; target.value = m_interp_pc; target = eval((target + (get_imm<u32>(op.i16, false) << 2)) & 0x3fffc); m_interp_pc = m_ir->CreateSelect(eval(extract(get_vr<u16[8]>(op.rt), 6) != 0).value, target.value, m_interp_pc_next); return; } const u32 target = spu_branch_target(m_pos, op.i16); // Check sign bits of 2 vector elements (optimization) if (match_vr<s8[16], s16[8], s32[4], s64[2]>(op.rt, [&](auto c, auto MP) { using VT = typename decltype(MP)::type; if (auto [ok, x] = match_expr(c, sext<VT>(match<bool[std::extent_v<VT>]>())); ok) { if (target != m_pos + 4) { m_block->block_end = m_ir->GetInsertBlock(); const auto a = get_vr<s8[16]>(op.rt); const auto cond = eval((bitcast<s16>(trunc<bool[16]>(a)) & 0x3000) != 0); m_ir->CreateCondBr(cond.value, add_block(target), add_block(m_pos + 4)); return true; } } return false; })) { return; } if (target != m_pos + 4) { m_block->block_end = m_ir->GetInsertBlock(); const auto cond = eval(extract(get_vr<u16[8]>(op.rt), 6) != 0); m_ir->CreateCondBr(cond.value, add_block(target), add_block(m_pos + 4)); } } void BRA(spu_opcode_t op) // { if (m_interp_magn) { m_interp_pc = eval((get_imm<u32>(op.i16, false) << 2) & 0x3fffc).value; return; } const auto compiled_pos = m_ir->getInt32(m_pos); const u32 target = spu_branch_target(0, op.i16); m_block->block_end = m_ir->GetInsertBlock(); const auto real_pos = get_pc(m_pos); value_t<u32> target_val; target_val.value = m_ir->getInt32(target); m_ir->CreateCondBr(m_ir->CreateICmpEQ(real_pos, compiled_pos), add_block(target, true), add_block_indirect({}, target_val)); } void BRASL(spu_opcode_t op) // { set_link(op); BRA(op); } void BR(spu_opcode_t op) // { if (m_interp_magn) { value_t<u32> target; target.value = m_interp_pc; target = eval((target + (get_imm<u32>(op.i16, false) << 2)) & 0x3fffc); m_interp_pc = target.value; return; } const u32 target = spu_branch_target(m_pos, op.i16); if (target != m_pos + 4) { m_block->block_end = m_ir->GetInsertBlock(); m_ir->CreateBr(add_block(target)); } } void BRSL(spu_opcode_t op) // { set_link(op); const u32 target = spu_branch_target(m_pos, op.i16); if (m_finfo && m_finfo->fn && target != m_pos + 4) { if (auto fn = add_function(target)->fn) { call_function(fn); return; } else { spu_log.fatal("[0x%x] Can't add function 0x%x", m_pos, target); return; } } BR(op); } void set_link(spu_opcode_t op) { if (m_interp_magn) { value_t<u32> next; next.value = m_interp_pc_next; set_vr(op.rt, insert(splat<u32[4]>(0), 3, next)); return; } set_vr(op.rt, insert(splat<u32[4]>(0), 3, value<u32>(get_pc(m_pos + 4)) & 0x3fffc)); if (m_finfo && m_finfo->fn) { return; } if (g_cfg.core.spu_block_size >= spu_block_size_type::mega && m_block_info[m_pos / 4 + 1] && m_entry_info[m_pos / 4 + 1]) { // Store the return function chunk address at the stack mirror const auto pfunc = add_function(m_pos + 4); const auto stack0 = eval(zext<u64>(extract(get_reg_fixed(1), 3) & 0x3fff0) + ::offset32(&spu_thread::stack_mirror)); const auto stack1 = eval(stack0 + 8); const auto rel_ptr = m_ir->CreateSub(m_ir->CreatePtrToInt(pfunc->chunk, get_type<u64>()), get_segment_base()); const auto ptr_plus_op = m_ir->CreateOr(m_ir->CreateShl(rel_ptr, 32), m_ir->getInt64(m_next_op)); const auto base_plus_pc = m_ir->CreateOr(m_ir->CreateShl(m_ir->CreateZExt(m_base_pc, get_type<u64>()), 32), m_ir->getInt64(m_pos + 4)); m_ir->CreateStore(ptr_plus_op, m_ir->CreateGEP(get_type<u8>(), m_thread, stack0.value)); m_ir->CreateStore(base_plus_pc, m_ir->CreateGEP(get_type<u8>(), m_thread, stack1.value)); } } llvm::Value* get_segment_base() { const auto type = llvm::FunctionType::get(get_type<void>(), {}, false); const auto func = llvm::cast<llvm::Function>(m_module->getOrInsertFunction("spu_segment_base", type).getCallee()); m_engine->updateGlobalMapping("spu_segment_base", reinterpret_cast<u64>(jit_runtime::alloc(0, 0))); return m_ir->CreatePtrToInt(func, get_type<u64>()); } static decltype(&spu_llvm_recompiler::UNK) decode(u32 op); }; std::unique_ptr<spu_recompiler_base> spu_recompiler_base::make_llvm_recompiler(u8 magn) { return std::make_unique<spu_llvm_recompiler>(magn); } const spu_decoder<spu_llvm_recompiler> s_spu_llvm_decoder; decltype(&spu_llvm_recompiler::UNK) spu_llvm_recompiler::decode(u32 op) { return s_spu_llvm_decoder.decode(op); } #else std::unique_ptr<spu_recompiler_base> spu_recompiler_base::make_llvm_recompiler(u8 magn) { if (magn) { return nullptr; } fmt::throw_exception("LLVM is not available in this build."); } #endif // LLVM_AVAILABLE
240,580
C++
.cpp
7,084
29.688594
227
0.618636
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,179
PPUAnalyser.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/PPUAnalyser.cpp
#include "stdafx.h" #include "PPUAnalyser.h" #include "PPUOpcodes.h" #include "PPUThread.h" #include <unordered_set> #include "util/yaml.hpp" #include "util/asm.hpp" LOG_CHANNEL(ppu_validator); const ppu_decoder<ppu_itype> s_ppu_itype; template<> void fmt_class_string<ppu_attr>::format(std::string& out, u64 arg) { format_enum(out, arg, [](ppu_attr value) { switch (value) { case ppu_attr::known_addr: return "known_addr"; case ppu_attr::known_size: return "known_size"; case ppu_attr::no_return: return "no_return"; case ppu_attr::no_size: return "no_size"; case ppu_attr::has_mfvscr: return "has_mfvscr"; case ppu_attr::__bitset_enum_max: break; } return unknown; }); } template<> void fmt_class_string<bs_t<ppu_attr>>::format(std::string& out, u64 arg) { format_bitset(out, arg, "[", ",", "]", &fmt_class_string<ppu_attr>::format); } void ppu_module::validate(u32 reloc) { // Load custom PRX configuration if available if (fs::file yml{path + ".yml"}) { const auto [cfg, error] = yaml_load(yml.to_string()); if (!error.empty()) { ppu_validator.error("Failed to load %s.yml: %s", path, error); return; } u32 index = 0; // Validate detected functions using information provided for (const auto func : cfg["functions"]) { const u32 addr = func["addr"].as<u32>(-1); const u32 size = func["size"].as<u32>(0); if (addr != umax && index < funcs.size()) { u32 found = funcs[index].addr - reloc; while (addr > found && index + 1 < funcs.size()) { ppu_validator.warning("%s.yml : unexpected function at 0x%x (0x%x, 0x%x)", path, found, addr, size); index++; found = funcs[index].addr - reloc; } if (addr < found) { ppu_validator.error("%s.yml : function not found (0x%x, 0x%x)", path, addr, size); continue; } if (size && size != funcs[index].size) { if (size + 4 != funcs[index].size || get_ref<u32>(addr + size) != ppu_instructions::NOP()) { ppu_validator.error("%s.yml : function size mismatch at 0x%x(size=0x%x) (0x%x, 0x%x)", path, found, funcs[index].size, addr, size); } } index++; } else { ppu_validator.error("%s.yml : function not found at the end (0x%x, 0x%x)", path, addr, size); break; } } if (!index) { return; // ??? } while (index < funcs.size()) { if (funcs[index].size) { ppu_validator.error("%s.yml : function not covered at 0x%x (size=0x%x)", path, funcs[index].addr, funcs[index].size); } index++; } ppu_validator.success("%s.yml : validation completed", path); } } static u32 ppu_test(const be_t<u32>* ptr, const void* fend, ppu_pattern_array pat) { const be_t<u32>* cur = ptr; for (auto& p : pat) { if (cur >= fend) { return 0; } if (*cur == ppu_instructions::NOP()) { cur++; if (cur >= fend) { return 0; } } if ((*cur & p.mask) != p.opcode) { return 0; } cur++; } return ::narrow<u32>((cur - ptr) * sizeof(*ptr)); } static u32 ppu_test(const be_t<u32>* ptr, const void* fend, ppu_pattern_matrix pats) { for (auto pat : pats) { if (const u32 len = ppu_test(ptr, fend, pat)) { return len; } } return 0; } namespace ppu_patterns { using namespace ppu_instructions; const ppu_pattern abort1[] { { STDU(r1, r1, -0xc0) }, { MFLR(r0) }, { STD(r26, r1, 0x90) }, { STD(r27, r1, 0x98) }, { STD(r28, r1, 0xa0) }, { STD(r29, r1, 0xa8) }, { STD(r30, r1, 0xb0) }, { STD(r31, r1, 0xb8) }, { STD(r0, r1, 0xd0) }, { LI(r3, 4) }, { LI(r4, 0) }, { LI(r11, 0x3dc) }, { SC(0) }, { MR(r29, r1) }, { CLRLDI(r29, r29, 32) }, { LWZ(r4, r2, 0), 0xffff }, { ADDI(r31, r1, 0x70) }, { LI(r3, 1) }, { LI(r5, 0x19) }, { MR(r6, r31) }, { LWZ(r28, r29, 4) }, { LI(r11, 0x193) }, { SC(0) }, { ADDI(r26, r1, 0x78) }, { LD(r3, r28, 0x10) }, { MR(r4, r26) }, { B(0, false, true), 0x3fffffc }, // .hex2str { LI(r5, 0x10) }, { CLRLDI(r4, r3, 32) }, { MR(r6, r31) }, { LI(r3, 1) }, { LI(r11, 0x193) }, { SC(0) }, { LWZ(r27, r2, 0), 0xffff }, { LI(r3, 1) }, { LI(r5, 1) }, { MR(r4, r27) }, { MR(r6, r31) }, { LI(r11, 0x193) }, { SC(0) }, { LD(r28, r28, 0) }, { CMPDI(cr7, r28, 0) }, { BEQ(cr7, +0x6c) }, { LWZ(r30, r2, 0), 0xffff }, { LI(r3, 1) }, { MR(r4, r30) }, { LI(r5, 0x19) }, { MR(r6, r31) }, { LI(r11, 0x193) }, { SC(0) }, { CLRLDI(r29, r28, 32) }, { CLRLDI(r4, r26, 32) }, { LD(r3, r29, 0x10) }, { 0, 0xffffffff }, // .hex2str { LI(r5, 0x10) }, { CLRLDI(r4, r3, 32) }, { MR(r6, r31) }, { LI(r3, 1) }, { LI(r11, 0x193) }, { SC(0) }, { LI(r3, 1) }, { MR(r4, r27) }, { LI(r5, 1) }, { MR(r6, r31) }, { LI(r11, 0x193) }, { SC(0) }, { LD(r28, r29, 0) }, { CMPDI(cr7, r28, 0) }, { BNE(cr7, -0x60) }, { LWZ(r4, r2, 0), 0xffff }, { MR(r6, r31) }, { LI(r3, 1) }, { LI(r5, 0x27) }, { LI(r11, 0x193) }, { SC(0) }, { LI(r3, 1) }, { B(0, false, true), 0x3fffffc }, // .sys_process_exit { LD(r2, r1, 0x28) }, { LI(r3, 1) }, { B(0, false, true), 0x3fffffc }, // .exit }; const ppu_pattern abort2[] { { STDU(r1, r1, -0xc0) }, { MFLR(r0) }, { STD(r27, r1, 0x98) }, { STD(r28, r1, 0xa0) }, { STD(r29, r1, 0xa8) }, { STD(r30, r1, 0xb0) }, { STD(r31, r1, 0xb8) }, { STD(r0, r1, 0xd0) }, { MR(r9, r1) }, { CLRLDI(r9, r9, 32) }, { LWZ(r4, r2, 0), 0xffff }, { ADDI(r31, r1, 0x70) }, { LI(r3, 1) }, { LI(r5, 0x19) }, { MR(r6, r31) }, { LWZ(r29, r9, 4) }, { LI(r11, 0x193) }, { SC(0) }, { ADDI(r27, r1, 0x78) }, { LD(r3, r29, 0x10) }, { MR(r4, r27) }, { B(0, false, true), 0x3fffffc }, // .hex2str { LI(r5, 0x10) }, { CLRLDI(r4, r3, 32) }, { MR(r6, r31) }, { LI(r3, 1) }, { LI(r11, 0x193) }, { SC(0) }, { LWZ(r28, r2, 0), 0xffff }, { LI(r3, 1) }, { LI(r5, 1) }, { MR(r4, r28) }, { MR(r6, r31) }, { LI(r11, 0x193) }, { SC(0) }, { LD(r29, r29, 0) }, { CMPDI(cr7, r29, 0) }, { BEQ(cr7, +0x6c) }, { LWZ(r30, r2, 0), 0xffff }, { LI(r3, 1) }, { MR(r4, r30) }, { LI(r5, 0x19) }, { MR(r6, r31) }, { LI(r11, 0x193) }, { SC(0) }, { CLRLDI(r29, r29, 32) }, { CLRLDI(r4, r27, 32) }, { LD(r3, r29, 0x10) }, { 0, 0xffffffff }, // .hex2str { LI(r5, 0x10) }, { CLRLDI(r4, r3, 32) }, { MR(r6, r31) }, { LI(r3, 1) }, { LI(r11, 0x193) }, { SC(0) }, { LI(r3, 1) }, { MR(r4, r28) }, { LI(r5, 1) }, { MR(r6, r31) }, { LI(r11, 0x193) }, { SC(0) }, { LD(r29, r29, 0) }, { CMPDI(cr7, r29, 0) }, { BNE(cr7, -0x60) }, { LWZ(r4, r2, 0), 0xffff }, { MR(r6, r31) }, { LI(r3, 1) }, { LI(r5, 0x27) }, { LI(r11, 0x193) }, { SC(0) }, { LI(r3, 1) }, { B(0, false, true), 0x3fffffc }, // .sys_process_exit { LD(r2, r1, 0x28) }, { LI(r3, 1) }, { B(0, false, true), 0x3fffffc }, // .exit }; const ppu_pattern_array abort[] { abort1, abort2, }; const ppu_pattern get_context[] { ADDI(r3, r3, 0xf), CLRRDI(r3, r3, 4), STD(r1, r3, 0), STD(r2, r3, 8), STD(r14, r3, 0x18), STD(r15, r3, 0x20), STD(r16, r3, 0x28), STD(r17, r3, 0x30), STD(r18, r3, 0x38), STD(r19, r3, 0x40), STD(r20, r3, 0x48), STD(r21, r3, 0x50), STD(r22, r3, 0x58), STD(r23, r3, 0x60), STD(r24, r3, 0x68), STD(r25, r3, 0x70), STD(r26, r3, 0x78), STD(r27, r3, 0x80), STD(r28, r3, 0x88), STD(r29, r3, 0x90), STD(r30, r3, 0x98), STD(r31, r3, 0xa0), MFLR(r0), STD(r0, r3, 0xa8), 0x7c000026, // mfcr r0 STD(r0, r3, 0xb0), STFD(f14, r3, 0xb8), STFD(f15, r3, 0xc0), STFD(F16, r3, 0xc8), STFD(f17, r3, 0xd0), STFD(f18, r3, 0xd8), STFD(f19, r3, 0xe0), STFD(f20, r3, 0xe8), STFD(f21, r3, 0xf0), STFD(f22, r3, 0xf8), STFD(f23, r3, 0x100), STFD(f24, r3, 0x108), STFD(f25, r3, 0x110), STFD(f26, r3, 0x118), STFD(f27, r3, 0x120), STFD(f28, r3, 0x128), STFD(f29, r3, 0x130), STFD(f30, r3, 0x138), STFD(f31, r3, 0x140), 0x7c0042A6, // mfspr r0, vrsave STD(r0, r3, 0x148), ADDI(r4, r3, 0x150), ADDI(r5, r3, 0x160), ADDI(r6, r3, 0x170), ADDI(r7, r3, 0x180), STVX(v20, r0, r4), STVX(v21, r0, r5), STVX(v22, r0, r6), STVX(v23, r0, r7), ADDI(r4, r4, 0x40), ADDI(r5, r5, 0x40), ADDI(r6, r6, 0x40), ADDI(r7, r7, 0x40), STVX(v24, r0, r4), STVX(v25, r0, r5), STVX(v26, r0, r6), STVX(v27, r0, r7), ADDI(r4, r4, 0x40), ADDI(r5, r5, 0x40), ADDI(r6, r6, 0x40), ADDI(r7, r7, 0x40), STVX(v28, r0, r4), STVX(v29, r0, r5), STVX(v30, r0, r6), STVX(v31, r0, r7), LI(r3, 0), BLR(), }; const ppu_pattern set_context[] { ADDI(r3, r3, 0xf), CLRRDI(r3, r3, 4), LD(r1, r3, 0), LD(r2, r3, 8), LD(r14, r3, 0x18), LD(r15, r3, 0x20), LD(r16, r3, 0x28), LD(r17, r3, 0x30), LD(r18, r3, 0x38), LD(r19, r3, 0x40), LD(r20, r3, 0x48), LD(r21, r3, 0x50), LD(r22, r3, 0x58), LD(r23, r3, 0x60), LD(r24, r3, 0x68), LD(r25, r3, 0x70), LD(r26, r3, 0x78), LD(r27, r3, 0x80), LD(r28, r3, 0x88), LD(r29, r3, 0x90), LD(r30, r3, 0x98), LD(r31, r3, 0xa0), LD(r0, r3, 0xa8), MTLR(r0), LD(r0, r3, 0xb0), 0x7c101120, // mtocrf 1, r0 0x7c102120, // mtocrf 2, r0 0x7c104120, // mtocrf 4, r0 0x7c108120, // mtocrf 8, r0 0x7c110120, // mtocrf 0x10, r0 0x7c120120, // mtocrf 0x20, r0 0x7c140120, // mtocrf 0x40, r0 0x7c180120, // mtocrf 0x80, r0 LFD(f14, r3, 0xb8), LFD(f15, r3, 0xc0), LFD(F16, r3, 0xc8), LFD(f17, r3, 0xd0), LFD(f18, r3, 0xd8), LFD(f19, r3, 0xe0), LFD(f20, r3, 0xe8), LFD(f21, r3, 0xf0), LFD(f22, r3, 0xf8), LFD(f23, r3, 0x100), LFD(f24, r3, 0x108), LFD(f25, r3, 0x110), LFD(f26, r3, 0x118), LFD(f27, r3, 0x120), LFD(f28, r3, 0x128), LFD(f29, r3, 0x130), LFD(f30, r3, 0x138), LFD(f31, r3, 0x140), LD(r0, r3, 0x148), 0x7c0043A6, //mtspr vrsave, r0 ADDI(r5, r3, 0x150), ADDI(r6, r3, 0x160), ADDI(r7, r3, 0x170), ADDI(r8, r3, 0x180), LVX(v20, r0, r5), LVX(v21, r0, r6), LVX(v22, r0, r7), LVX(v23, r0, r8), ADDI(r5, r5, 0x40), ADDI(r6, r6, 0x40), ADDI(r7, r7, 0x40), ADDI(r8, r8, 0x40), LVX(v24, r0, r5), LVX(v25, r0, r6), LVX(v26, r0, r7), LVX(v27, r0, r8), ADDI(r5, r5, 0x40), ADDI(r6, r6, 0x40), ADDI(r7, r7, 0x40), ADDI(r8, r8, 0x40), LVX(v28, r0, r5), LVX(v29, r0, r6), LVX(v30, r0, r7), LVX(v31, r0, r8), LI(r3, 0), 0x7c041810, // subfc r0, r4, r3 0x7c640194, // addze r3, r4 BLR(), }; const ppu_pattern x26c[] { LI(r9, 0), STD(r9, r6, 0), MR(r1, r6), STDU(r1, r1, -0x70), STD(r9, r1, 0), CLRLDI(r7, r3, 32), LWZ(r0, r7, 0), MTCTR(r0), LWZ(r2, r7, 4), MR(r3, r4), MR(r4, r5), BCTRL(), }; const ppu_pattern x2a0[] { MR(r8, r1), 0x7d212850, // subf r9, r1, r5 0x7c21496a, // stdux r1, r1, r9 MFLR(r0), STD(r0, r8, 0x10), STD(r2, r1, 0x28), CLRLDI(r7, r3, 32), LWZ(r0, r7, 0), MTCTR(r0), LWZ(r2, r7, 4), MR(r3, r4), BCTRL(), LD(r2, r1, 0x28), LD(r9, r1, 0x0), LD(r0, r9, 0x10), MTLR(r0), MR(r1, r9), BLR(), }; } bool ppu_module::analyse(u32 lib_toc, u32 entry, const u32 sec_end, const std::vector<u32>& applied, const std::vector<u32>& exported_funcs, std::function<bool()> check_aborted) { if (segs.empty()) { return false; } // Assume first segment is executable const u32 start = segs[0].addr; // End of executable segment (may change) u32 end = sec_end ? sec_end : segs[0].addr + segs[0].size; // Known TOCs (usually only 1) std::unordered_set<u32> TOCs; // Known functions std::map<u32, ppu_function> fmap; std::set<u32> known_functions; // Function analysis workload std::vector<std::reference_wrapper<ppu_function>> func_queue; // Known references (within segs, addr and value alignment = 4) std::set<u32> addr_heap; if (entry) { addr_heap.emplace(entry); } auto verify_func = [&](u32 addr) { if (entry) { // Fixed addresses return true; } // Check if the storage address exists within relocations for (auto& rel : this->relocs) { if ((rel.addr & -8) == (addr & -8)) { if (rel.type != 38 && rel.type != 44 && (rel.addr & -4) != (addr & -4)) { continue; } return true; } } return false; }; auto can_trap_continue = [](ppu_opcode_t op, ppu_itype::type type) { if ((op.bo & 0x1c) == 0x1c || (op.bo & 0x7) == 0x7) { // All signed or unsigned <=>, must be true return false; } if (op.simm16 == 0 && (type == ppu_itype::TWI || type == ppu_itype::TDI) && (op.bo & 0x5) == 0x5) { // Logically greater or equal to 0 return false; } return true; }; // Register new function auto add_func = [&](u32 addr, u32 toc, u32 caller) -> ppu_function& { ppu_function& func = fmap[addr]; if (caller) { // Register caller func.callers.emplace(caller); } if (func.addr) { if (toc && func.toc && func.toc != umax && func.toc != toc) { func.toc = -1; } else if (toc && func.toc == 0) { // Must then update TOC recursively func.toc = toc; func_queue.emplace_back(func); } return func; } func_queue.emplace_back(func); func.addr = addr; func.toc = toc; ppu_log.trace("Function 0x%x added (toc=0x%x)", addr, toc); return func; }; static const auto advance = [](auto& _ptr, auto& ptr, u32 count) { const auto old_ptr = ptr; _ptr += count; ptr += count; return old_ptr; }; // Register new TOC and find basic set of functions auto add_toc = [&](u32 toc) { if (!toc || toc == umax || !TOCs.emplace(toc).second) { return; } // Grope for OPD section (TODO: optimization, better constraints) for (const auto& seg : segs) { if (seg.size < 8) continue; const vm::cptr<void> seg_end = vm::cast(seg.addr + seg.size - 8); vm::cptr<u32> _ptr = vm::cast(seg.addr); auto ptr = get_ptr<u32>(_ptr); for (; _ptr <= seg_end;) { if (ptr[1] == toc && FN(x >= start && x < end && x % 4 == 0)(ptr[0]) && verify_func(_ptr.addr())) { // New function ppu_log.trace("OPD*: [0x%x] 0x%x (TOC=0x%x)", _ptr, ptr[0], ptr[1]); add_func(*ptr, addr_heap.count(_ptr.addr()) ? toc : 0, 0); advance(_ptr, ptr, 2); } else { advance(_ptr, ptr, 1); } } } }; // Get next reliable function address auto get_limit = [&](u32 addr) -> u32 { auto it = known_functions.lower_bound(addr); return it == known_functions.end() ? end : *it; }; // Find references indiscriminately for (const auto& seg : segs) { if (seg.size < 4) continue; vm::cptr<u32> _ptr = vm::cast(seg.addr); const vm::cptr<void> seg_end = vm::cast(seg.addr + seg.size - 4); auto ptr = get_ptr<u32>(_ptr); for (; _ptr <= seg_end; advance(_ptr, ptr, 1)) { const u32 value = *ptr; if (value % 4) { continue; } for (const auto& _seg : segs) { if (!_seg.addr) continue; if (value >= start && value < end) { addr_heap.emplace(value); break; } } } } // Find OPD section for (const auto& sec : secs) { if (sec.size % 8) { continue; } vm::cptr<void> sec_end = vm::cast(sec.addr + sec.size); // Probe for (vm::cptr<u32> _ptr = vm::cast(sec.addr); _ptr < sec_end; _ptr += 2) { auto ptr = get_ptr<u32>(_ptr); if (_ptr + 6 <= sec_end && !ptr[0] && !ptr[2] && ptr[1] == ptr[4] && ptr[3] == ptr[5]) { // Special OPD format case (some homebrews) advance(_ptr, ptr, 4); } if (_ptr + 2 > sec_end) { sec_end.set(0); break; } const u32 addr = ptr[0]; const u32 _toc = ptr[1]; // Rough Table of Contents borders const u32 toc_begin = _toc - 0x8000; //const u32 toc_end = _toc + 0x7ffc; // TODO: improve TOC constraints if (toc_begin % 4 || !get_ptr<u8>(toc_begin) || toc_begin >= 0x40000000 || (toc_begin >= start && toc_begin < end)) { sec_end.set(0); break; } if (addr % 4 || addr < start || addr >= end || !verify_func(_ptr.addr())) { sec_end.set(0); break; } } if (sec_end) ppu_log.notice("Reading OPD section at 0x%x...", sec.addr); // Mine for (vm::cptr<u32> _ptr = vm::cast(sec.addr); _ptr < sec_end; _ptr += 2) { auto ptr = get_ptr<u32>(_ptr); // Special case: see "Probe" if (!ptr[0]) advance(_ptr, ptr, 4); // Add function and TOC const u32 addr = ptr[0]; const u32 toc = ptr[1]; ppu_log.trace("OPD: [0x%x] 0x%x (TOC=0x%x)", _ptr, addr, toc); TOCs.emplace(toc); auto& func = add_func(addr, addr_heap.count(_ptr.addr()) ? toc : 0, 0); func.attr += ppu_attr::known_addr; known_functions.emplace(addr); } } // Register TOC from entry point if (entry && !lib_toc) { lib_toc = get_ref<u32>(entry) ? get_ref<u32>(entry + 4) : get_ref<u32>(entry + 20); } // Secondary attempt if (TOCs.empty() && lib_toc) { add_toc(lib_toc); } // Clean TOCs for (auto&& pair : fmap) { if (pair.second.toc == umax) { pair.second.toc = 0; } } // Find .eh_frame section for (const auto& sec : secs) { if (sec.size % 4) { continue; } vm::cptr<void> sec_end = vm::cast(sec.addr + sec.size); // Probe for (vm::cptr<u32> _ptr = vm::cast(sec.addr); _ptr < sec_end;) { if (!_ptr.aligned() || _ptr.addr() < sec.addr || _ptr >= sec_end) { sec_end.set(0); break; } const auto ptr = get_ptr<u32>(_ptr); const u32 size = ptr[0] + 4; if (size == 4 && _ptr + 1 == sec_end) { // Null terminator break; } if (size % 4 || size < 0x10 || _ptr + size / 4 > sec_end) { sec_end.set(0); break; } if (ptr[1]) { const u32 cie_off = _ptr.addr() - ptr[1] + 4; if (cie_off % 4 || cie_off < sec.addr || cie_off >= sec_end.addr()) { sec_end.set(0); break; } } _ptr = vm::cast(_ptr.addr() + size); } if (sec_end && sec.size > 4) ppu_log.notice("Reading .eh_frame section at 0x%x...", sec.addr); // Mine for (vm::cptr<u32> _ptr = vm::cast(sec.addr); _ptr < sec_end; _ptr = vm::cast(_ptr.addr() + *get_ptr<u32>(_ptr) + 4)) { const auto ptr = get_ptr<u32>(_ptr); if (ptr[0] == 0u) { // Null terminator break; } if (ptr[1] == 0u) { // CIE ppu_log.trace(".eh_frame: [0x%x] CIE 0x%x", ptr, ptr[0]); } else { // Get associated CIE (currently unused) const vm::cptr<u32> cie = vm::cast(_ptr.addr() - ptr[1] + 4); u32 addr = 0; u32 size = 0; // TODO: 64 bit or 32 bit values (approximation) if (ptr[2] == 0u && ptr[3] == 0u) { size = ptr[5]; } else if ((ptr[2] + 1 == 0u || ptr[2] == 0u) && ptr[4] == 0u && ptr[5]) { addr = ptr[3]; size = ptr[5]; } else if (ptr[2] + 1 && ptr[3]) { addr = ptr[2]; size = ptr[3]; } else { ppu_log.error(".eh_frame: [0x%x] 0x%x, 0x%x, 0x%x, 0x%x, 0x%x", ptr, ptr[0], ptr[1], ptr[2], ptr[3], ptr[4]); continue; } // TODO: absolute/relative offset (approximation) if (addr > 0xc0000000) { addr += _ptr.addr() + 8; } ppu_log.trace(".eh_frame: [0x%x] FDE 0x%x (cie=*0x%x, addr=0x%x, size=0x%x)", ptr, ptr[0], cie, addr, size); // TODO: invalid offsets, zero offsets (removed functions?) if (addr % 4 || size % 4 || size > (end - start) || addr < start || addr + size > end) { if (addr) ppu_log.error(".eh_frame: Invalid function 0x%x", addr); continue; } //auto& func = add_func(addr, 0, 0); //func.attr += ppu_attr::known_addr; //func.attr += ppu_attr::known_size; //func.size = size; //known_functions.emplace(func); } } } bool used_fallback = false; if (func_queue.empty()) { for (u32 addr : exported_funcs) { const u32 faddr = get_ref<u32>(addr); if (addr < start || addr >= start + segs[0].size) { // TODO: Reverse engineer how it works (maybe some flag in exports) if (faddr < start || faddr >= start + segs[0].size) { ppu_log.notice("Export not usable at 0x%x / 0x%x (0x%x...0x%x)", addr, faddr, start, start + segs[0].size); continue; } addr = faddr; } ppu_log.trace("Enqueued exported PPU function 0x%x for analysis", addr); add_func(addr, 0, 0); used_fallback = true; } } if (func_queue.empty() && segs[0].size >= 4u) { // Fallback, identify functions using callers (no jumptable detection, tail calls etc) ppu_log.warning("Looking for PPU functions using callers. ('%s')", name); vm::cptr<u32> _ptr = vm::cast(start); const vm::cptr<void> seg_end = vm::cast(end - 4); for (auto ptr = get_ptr<u32>(_ptr); _ptr <= seg_end; advance(_ptr, ptr, 1)) { const u32 iaddr = _ptr.addr(); const ppu_opcode_t op{*ptr}; const ppu_itype::type type = s_ppu_itype.decode(op.opcode); if ((type == ppu_itype::B || type == ppu_itype::BC) && op.lk && (!op.aa || verify_func(iaddr))) { const u32 target = (op.aa ? 0 : iaddr) + (type == ppu_itype::B ? +op.bt24 : +op.bt14); if (target >= start && target < end && target != iaddr && target != iaddr + 4) { // TODO: Check full executability if (s_ppu_itype.decode(get_ref<u32>(target)) != ppu_itype::UNK) { ppu_log.trace("Enqueued PPU function 0x%x using a caller at 0x%x", target, iaddr); add_func(target, 0, 0); used_fallback = true; } } } } } // Main loop (func_queue may grow) for (usz i = 0; i < func_queue.size(); i++) { if (check_aborted && check_aborted()) { return false; } ppu_function& func = func_queue[i]; // Fixup TOCs if (func.toc && func.toc != umax) { for (u32 addr : func.callers) { ppu_function& caller = fmap[addr]; if (!caller.toc) { add_func(addr, func.toc - caller.trampoline, 0); } } for (u32 addr : func.calls) { ppu_function& callee = fmap[addr]; if (!callee.toc) { add_func(addr, func.toc + func.trampoline, 0); } } } if (func.blocks.empty()) { // Special function analysis const vm::cptr<u32> _ptr = vm::cast(func.addr); const vm::cptr<void> fend = vm::cast(end); const auto ptr = get_ptr<u32>(_ptr); using namespace ppu_instructions; if (_ptr + 1 <= fend && (ptr[0] & 0xfc000001) == B({}, {})) { // Simple trampoline const u32 target = (ptr[0] & 0x2 ? 0 : _ptr.addr()) + ppu_opcode_t{ptr[0]}.bt24; if (target == func.addr) { // Special case func.size = 0x4; func.blocks.emplace(func.addr, func.size); func.attr += ppu_attr::no_return; continue; } if (target >= start && target < end && (~ptr[0] & 0x2 || verify_func(_ptr.addr()))) { auto& new_func = add_func(target, func.toc, func.addr); if (new_func.blocks.empty()) { func_queue.emplace_back(func); continue; } func.size = 0x4; func.blocks.emplace(func.addr, func.size); func.attr += new_func.attr & ppu_attr::no_return; func.calls.emplace(target); func.trampoline = 0; continue; } } if (_ptr + 0x4 <= fend && (ptr[0] & 0xffff0000) == LIS(r11, 0) && (ptr[1] & 0xffff0000) == ADDI(r11, r11, 0) && ptr[2] == MTCTR(r11) && ptr[3] == BCTR()) { // Simple trampoline const u32 target = (ptr[0] << 16) + ppu_opcode_t{ptr[1]}.simm16; if (target >= start && target < end && verify_func(_ptr.addr())) { auto& new_func = add_func(target, func.toc, func.addr); if (new_func.blocks.empty()) { func_queue.emplace_back(func); continue; } func.size = 0x10; func.blocks.emplace(func.addr, func.size); func.attr += new_func.attr & ppu_attr::no_return; func.calls.emplace(target); func.trampoline = 0; continue; } } if (_ptr + 0x7 <= fend && ptr[0] == STD(r2, r1, 0x28) && (ptr[1] & 0xffff0000) == ADDIS(r12, r2, {}) && (ptr[2] & 0xffff0000) == LWZ(r11, r12, {}) && (ptr[3] & 0xffff0000) == ADDIS(r2, r2, {}) && (ptr[4] & 0xffff0000) == ADDI(r2, r2, {}) && ptr[5] == MTCTR(r11) && ptr[6] == BCTR()) { func.toc = -1; func.size = 0x1C; func.blocks.emplace(func.addr, func.size); func.attr += ppu_attr::known_addr; func.attr += ppu_attr::known_size; // Look for another imports to fill gaps (hack) auto _p2 = _ptr + 7; auto p2 = get_ptr<u32>(_p2); while (_p2 + 0x7 <= fend && p2[0] == STD(r2, r1, 0x28) && (p2[1] & 0xffff0000) == ADDIS(r12, r2, {}) && (p2[2] & 0xffff0000) == LWZ(r11, r12, {}) && (p2[3] & 0xffff0000) == ADDIS(r2, r2, {}) && (p2[4] & 0xffff0000) == ADDI(r2, r2, {}) && p2[5] == MTCTR(r11) && p2[6] == BCTR()) { auto& next = add_func(_p2.addr(), -1, func.addr); next.size = 0x1C; next.blocks.emplace(next.addr, next.size); next.attr += ppu_attr::known_addr; next.attr += ppu_attr::known_size; advance(_p2, p2, 7); } continue; } if (_ptr + 0x7 <= fend && ptr[0] == STD(r2, r1, 0x28) && (ptr[1] & 0xffff0000) == ADDIS(r2, r2, {}) && (ptr[2] & 0xffff0000) == ADDI(r2, r2, {}) && (ptr[3] & 0xffff0000) == LIS(r11, {}) && (ptr[4] & 0xffff0000) == ADDI(r11, r11, {}) && ptr[5] == MTCTR(r11) && ptr[6] == BCTR()) { // Trampoline with TOC const u32 target = (ptr[3] << 16) + s16(ptr[4]); const u32 toc_add = (ptr[1] << 16) + s16(ptr[2]); if (target >= start && target < end && verify_func((_ptr + 3).addr())) { auto& new_func = add_func(target, 0, func.addr); if (func.toc && func.toc != umax && new_func.toc == 0) { const u32 toc = func.toc + toc_add; add_toc(toc); add_func(new_func.addr, toc, 0); } else if (new_func.toc && new_func.toc != umax && func.toc == 0) { const u32 toc = new_func.toc - toc_add; add_toc(toc); add_func(func.addr, toc, 0); } //else if (new_func.toc - func.toc != toc_add) //{ // func.toc = -1; // new_func.toc = -1; //} if (new_func.blocks.empty()) { func_queue.emplace_back(func); continue; } func.size = 0x1C; func.blocks.emplace(func.addr, func.size); func.attr += new_func.attr & ppu_attr::no_return; func.calls.emplace(target); func.trampoline = toc_add; continue; } } if (_ptr + 4 <= fend && ptr[0] == STD(r2, r1, 0x28) && (ptr[1] & 0xffff0000) == ADDIS(r2, r2, {}) && (ptr[2] & 0xffff0000) == ADDI(r2, r2, {}) && (ptr[3] & 0xfc000001) == B({}, {})) { // Trampoline with TOC const u32 toc_add = (ptr[1] << 16) + s16(ptr[2]); const u32 target = (ptr[3] & 0x2 ? 0 : (_ptr + 3).addr()) + ppu_opcode_t{ptr[3]}.bt24; if (target >= start && target < end && (~ptr[3] & 0x2 || verify_func((_ptr + 3).addr()))) { auto& new_func = add_func(target, 0, func.addr); if (func.toc && func.toc != umax && new_func.toc == 0) { const u32 toc = func.toc + toc_add; add_toc(toc); add_func(new_func.addr, toc, 0); } else if (new_func.toc && new_func.toc != umax && func.toc == 0) { const u32 toc = new_func.toc - toc_add; add_toc(toc); add_func(func.addr, toc, 0); } //else if (new_func.toc - func.toc != toc_add) //{ // func.toc = -1; // new_func.toc = -1; //} if (new_func.blocks.empty()) { func_queue.emplace_back(func); continue; } func.size = 0x10; func.blocks.emplace(func.addr, func.size); func.attr += new_func.attr & ppu_attr::no_return; func.calls.emplace(target); func.trampoline = toc_add; continue; } } if (_ptr + 8 <= fend && (ptr[0] & 0xffff0000) == LI(r12, 0) && (ptr[1] & 0xffff0000) == ORIS(r12, r12, 0) && (ptr[2] & 0xffff0000) == LWZ(r12, r12, 0) && ptr[3] == STD(r2, r1, 0x28) && ptr[4] == LWZ(r0, r12, 0) && ptr[5] == LWZ(r2, r12, 4) && ptr[6] == MTCTR(r0) && ptr[7] == BCTR()) { // The most used simple import stub func.toc = -1; func.size = 0x20; func.blocks.emplace(func.addr, func.size); func.attr += ppu_attr::known_addr; known_functions.emplace(func.addr); func.attr += ppu_attr::known_size; // Look for another imports to fill gaps (hack) auto _p2 = _ptr + 8; auto p2 = get_ptr<u32>(_p2); while (_p2 + 8 <= fend && (p2[0] & 0xffff0000) == LI(r12, 0) && (p2[1] & 0xffff0000) == ORIS(r12, r12, 0) && (p2[2] & 0xffff0000) == LWZ(r12, r12, 0) && p2[3] == STD(r2, r1, 0x28) && p2[4] == LWZ(r0, r12, 0) && p2[5] == LWZ(r2, r12, 4) && p2[6] == MTCTR(r0) && p2[7] == BCTR()) { auto& next = add_func(_p2.addr(), -1, func.addr); next.size = 0x20; next.blocks.emplace(next.addr, next.size); next.attr += ppu_attr::known_addr; next.attr += ppu_attr::known_size; advance(_p2, p2, 8); known_functions.emplace(next.addr); } continue; } if (_ptr + 3 <= fend && ptr[0] == 0x7c0004acu && ptr[1] == 0x00000000u && ptr[2] == BLR()) { // Weird function (illegal instruction) func.size = 0xc; func.blocks.emplace(func.addr, func.size); //func.attr += ppu_attr::no_return; continue; } if (const u32 len = ppu_test(ptr, get_ptr<void>(fend), ppu_patterns::abort)) { // Function "abort" ppu_log.notice("Function [0x%x]: 'abort'", func.addr); func.attr += ppu_attr::no_return; func.attr += ppu_attr::known_size; func.size = len; } // TODO: detect no_return, scribe more TODOs // Acknowledge completion func.blocks.emplace(vm::cast(func.addr), 0); } // Get function limit const u32 func_end = std::min<u32>(get_limit(func.addr + 1), func.attr & ppu_attr::known_size ? func.addr + func.size : end); // Block analysis workload std::vector<std::reference_wrapper<std::pair<const u32, u32>>> block_queue; // Add new block for analysis auto add_block = [&](u32 addr) -> bool { if (addr < func.addr || addr >= func_end) { return false; } const auto _pair = func.blocks.emplace(addr, 0); if (_pair.second) { block_queue.emplace_back(*_pair.first); return true; } return false; }; for (auto& block : func.blocks) { if (!block.second && block.first < func_end) { block_queue.emplace_back(block); } } // TODO: lower priority? if (func.attr & ppu_attr::no_size) { // Get next function const auto _next = fmap.lower_bound(func.blocks.crbegin()->first + 1); // Get limit const u32 func_end2 = _next == fmap.end() ? func_end : std::min<u32>(_next->first, func_end); // Set more block entries std::for_each(addr_heap.lower_bound(func.addr), addr_heap.lower_bound(func_end2), add_block); } const bool was_empty = block_queue.empty(); // Block loop (block_queue may grow, may be aborted via clearing) for (usz j = 0; j < block_queue.size(); j++) { auto& block = block_queue[j].get(); vm::cptr<u32> _ptr = vm::cast(block.first); auto ptr = ensure(get_ptr<u32>(_ptr)); for (; _ptr.addr() < func_end;) { const u32 iaddr = _ptr.addr(); const ppu_opcode_t op{*advance(_ptr, ptr, 1)}; const ppu_itype::type type = s_ppu_itype.decode(op.opcode); if (type == ppu_itype::UNK) { // Invalid blocks will remain empty break; } else if (type == ppu_itype::B || type == ppu_itype::BC) { const u32 target = (op.aa ? 0 : iaddr) + (type == ppu_itype::B ? +op.bt24 : +op.bt14); if (target < start || target >= end) { ppu_log.warning("[0x%x] Invalid branch at 0x%x -> 0x%x", func.addr, iaddr, target); continue; } if (!op.aa && target == _ptr.addr() && _ptr.addr() < func_end) { ppu_log.notice("[0x%x] Branch to next at 0x%x -> 0x%x", func.addr, iaddr, target); } const bool is_call = op.lk && target != iaddr && target != _ptr.addr() && _ptr.addr() < func_end; const auto pfunc = is_call ? &add_func(target, 0, 0) : nullptr; if (pfunc && pfunc->blocks.empty()) { // Postpone analysis (no info) block_queue.clear(); break; } // Add next block if necessary if ((is_call && !(pfunc->attr & ppu_attr::no_return)) || (type == ppu_itype::BC && (op.bo & 0x14) != 0x14)) { add_block(_ptr.addr()); } if (is_call && pfunc->attr & ppu_attr::no_return) { // Nothing } else if (is_call || target < func.addr || target >= func_end) { // Add function call (including obvious tail call) add_func(target, 0, 0); } else { // Add block add_block(target); } block.second = _ptr.addr() - block.first; break; } else if (type == ppu_itype::BCLR) { if (op.lk || (op.bo & 0x14) != 0x14) { add_block(_ptr.addr()); } block.second = _ptr.addr() - block.first; break; } else if (type == ppu_itype::BCCTR) { if (op.lk || (op.bo & 0x10) != 0x10) { add_block(_ptr.addr()); } else { // Analyse jumptable (TODO) const u32 jt_addr = _ptr.addr(); const u32 jt_end = func_end; for (; _ptr.addr() < jt_end; advance(_ptr, ptr, 1)) { const u32 addr = jt_addr + *ptr; if (addr == jt_addr) { // TODO (cannot branch to jumptable itself) break; } if (addr % 4 || addr < func.addr || addr >= jt_end) { break; } add_block(addr); } if (jt_addr != jt_end && _ptr.addr() == jt_addr) { // Acknowledge jumptable detection failure if (!(func.attr & ppu_attr::no_size)) { ppu_log.warning("[0x%x] Jump table not found! 0x%x-0x%x", func.addr, jt_addr, jt_end); } func.attr += ppu_attr::no_size; add_block(jt_addr); block_queue.clear(); } else { ppu_log.trace("[0x%x] Jump table found: 0x%x-0x%x", func.addr, jt_addr, _ptr); } } block.second = _ptr.addr() - block.first; break; } else if (type & ppu_itype::trap && op.bo) { if (can_trap_continue(op, type)) { add_block(_ptr.addr()); } block.second = _ptr.addr() - block.first; break; } else if (type == ppu_itype::SC) { add_block(_ptr.addr()); block.second = _ptr.addr() - block.first; break; } else if (type == ppu_itype::STDU && func.attr & ppu_attr::no_size && (op.opcode == *ptr || *ptr == ppu_instructions::BLR())) { // Hack ppu_log.success("[0x%x] Instruction repetition: 0x%08x", iaddr, op.opcode); add_block(_ptr.addr()); block.second = _ptr.addr() - block.first; break; } } } if (block_queue.empty() && !was_empty) { // Block aborted: abort function, postpone func_queue.emplace_back(func); continue; } // Finalization: determine function size if (!(func.attr & ppu_attr::known_size)) { const auto last = func.blocks.crbegin(); if (last != func.blocks.crend()) { func.size = last->first + last->second - func.addr; } } // Finalization: normalize blocks for (auto& block : func.blocks) { const auto next = func.blocks.upper_bound(block.first); // Normalize block if necessary if (next != func.blocks.end() && block.second > next->first - block.first) { block.second = next->first - block.first; } // Invalidate blocks out of the function const u32 fend = func.addr + func.size; const u32 bend = block.first + block.second; if (block.first >= fend) { block.second = 0; } else if (bend > fend) { block.second -= bend - fend; } } // Finalization: process remaining tail calls for (const auto& block : func.blocks) { for (vm::cptr<u32> _ptr = vm::cast(block.first); _ptr.addr() < block.first + block.second;) { const u32 iaddr = _ptr.addr(); const ppu_opcode_t op{get_ref<u32>(_ptr++)}; const ppu_itype::type type = s_ppu_itype.decode(op.opcode); if (type == ppu_itype::B || type == ppu_itype::BC) { const u32 target = (op.aa ? 0 : iaddr) + (type == ppu_itype::B ? +op.bt24 : +op.bt14); if (target >= start && target < end && (!op.aa || verify_func(iaddr))) { if (target < func.addr || target >= func.addr + func.size) { func.calls.emplace(target); add_func(target, func.toc ? func.toc + func.trampoline : 0, func.addr); } } } else if (type == ppu_itype::BCCTR && !op.lk) { // Jumptable (do not touch entries) break; } } } // Finalization: decrease known function size (TODO) if (func.attr & ppu_attr::known_size) { const auto last = func.blocks.crbegin(); if (last != func.blocks.crend()) { func.size = std::min<u32>(func.size, last->first + last->second - func.addr); } } } // Function shrinkage, disabled (TODO: it's potentially dangerous but improvable) for (auto& _pair : fmap) { auto& func = _pair.second; // Get next function addr const auto _next = fmap.lower_bound(_pair.first + 1); const u32 next = _next == fmap.end() ? end : _next->first; // Just ensure that functions don't overlap if (func.addr + func.size > next) { ppu_log.trace("Function overlap: [0x%x] 0x%x -> 0x%x", func.addr, func.size, next - func.addr); continue; //func.size = next - func.addr; // Also invalidate blocks for (auto& block : func.blocks) { if (block.first + block.second > next) { block.second = block.first >= next ? 0 : next - block.first; } } } // Suspicious block start u32 start = func.addr + func.size; if (next == end) { continue; } // Analyse gaps between functions for (vm::cptr<u32> _ptr = vm::cast(start); _ptr.addr() < next;) { const u32 addr = _ptr.addr(); const ppu_opcode_t op{get_ref<u32>(_ptr++)}; const ppu_itype::type type = s_ppu_itype.decode(op.opcode); if (type == ppu_itype::UNK) { break; } if (addr == start && op.opcode == ppu_instructions::NOP()) { if (start == func.addr + func.size) { // Extend function with tail NOPs (hack) func.size += 4; } start += 4; continue; } if (type == ppu_itype::SC && op.opcode != ppu_instructions::SC(0)) { break; } if (addr == start && op.opcode == ppu_instructions::BLR()) { start += 4; continue; } if (type == ppu_itype::B || type == ppu_itype::BC) { const u32 target = (op.aa ? 0 : addr) + (type == ppu_itype::B ? +op.bt24 : +op.bt14); if (target == addr) { break; } _ptr.set(next); } else if (type == ppu_itype::BCLR || type == ppu_itype::BCCTR) { _ptr.set(next); } if (_ptr.addr() >= next) { ppu_log.trace("Function gap: [0x%x] 0x%x bytes at 0x%x", func.addr, next - start, start); break; } } } // Fill TOCs for trivial case if (TOCs.size() == 1) { lib_toc = *TOCs.begin(); for (auto&& pair : fmap) { if (pair.second.toc == 0) { pair.second.toc = lib_toc; } } } (fmap.empty() ? ppu_log.error : ppu_log.notice)("Function analysis: %zu functions (%zu enqueued)", fmap.size(), func_queue.size()); // Decompose functions to basic blocks if (!entry && !sec_end) { // Regenerate end from blocks end = 0; } u32 per_instruction_bytes = 0; for (auto&& [_, func] : as_rvalue(fmap)) { if (func.attr & ppu_attr::no_size && entry) { // Disabled for PRX for now const u32 lim = get_limit(func.addr); ppu_log.warning("Function 0x%x will be compiled on per-instruction basis (next=0x%x)", func.addr, lim); for (u32 addr = func.addr; addr < lim; addr += 4) { auto& block = fmap[addr]; block.addr = addr; block.size = 4; block.toc = func.toc; block.attr = ppu_attr::no_size; } per_instruction_bytes += utils::sub_saturate<u32>(lim, func.addr); continue; } for (const auto& [addr, size] : func.blocks) { if (!size) { continue; } auto& block = fmap[addr]; if (block.addr || block.size) { ppu_log.trace("Block __0x%x exists (size=0x%x)", block.addr, block.size); continue; } block.addr = addr; block.size = size; block.toc = func.toc; ppu_log.trace("Block __0x%x added (func=0x%x, size=0x%x, toc=0x%x)", block.addr, _, block.size, block.toc); if (!entry && !sec_end) { // Workaround for SPRX: update end to the last found function end = std::max<u32>(end, block.addr + block.size); } } } // Simple callable block analysis std::vector<std::pair<u32, u32>> block_queue; block_queue.reserve(128000); std::unordered_set<u32> block_set; // Check relocations which may involve block addresses (usually it's type 1) for (auto& rel : this->relocs) { // Disabled (TODO) //if (!vm::check_addr<4>(rel.addr)) { continue; } const u32 target = get_ref<u32>(rel.addr); if (target % 4 || target < start || target >= end) { continue; } switch (rel.type) { case 1: case 24: case 26: case 27: case 28: case 107: case 108: case 109: case 110: { ppu_log.trace("Added block from reloc: 0x%x (0x%x, %u) (heap=%d)", target, rel.addr, rel.type, addr_heap.count(target)); block_queue.emplace_back(target, 0); block_set.emplace(target); continue; } default: { continue; } } } u32 exp = start; u32 lim = end; // Start with full scan block_queue.emplace_back(exp, lim); // Add entries from patches (on per-instruction basis) for (u32 addr : applied) { if (addr % 4 == 0 && addr >= start && addr < segs[0].addr + segs[0].size && !block_set.count(addr)) { block_queue.emplace_back(addr, addr + 4); block_set.emplace(addr); } } // block_queue may grow for (usz i = 0; i < block_queue.size(); i++) { std::tie(exp, lim) = block_queue[i]; if (lim == 0) { // Find next function const auto found = fmap.upper_bound(exp); if (found != fmap.cend()) { lim = found->first; } ppu_log.trace("Block rescan: addr=0x%x, lim=0x%x", exp, lim); } while (exp < lim) { u32 i_pos = exp; u32 block_edges[16]; u32 edge_count = 0; bool is_good = true; bool is_fallback = true; for (; i_pos < lim; i_pos += 4) { const ppu_opcode_t op{get_ref<u32>(i_pos)}; switch (auto type = s_ppu_itype.decode(op.opcode)) { case ppu_itype::UNK: case ppu_itype::ECIWX: case ppu_itype::ECOWX: { // Seemingly bad instruction, skip this block is_good = false; break; } case ppu_itype::TDI: case ppu_itype::TWI: { if (op.bo && (op.ra == 1u || op.ra == 13u || op.ra == 2u)) { // Non-user registers, checking them against a constant value makes no sense is_good = false; break; } [[fallthrough]]; } case ppu_itype::TD: case ppu_itype::TW: { if (!op.bo) { continue; } if (!can_trap_continue(op, type)) { is_fallback = false; } [[fallthrough]]; } case ppu_itype::B: case ppu_itype::BC: { if (type == ppu_itype::B || (type == ppu_itype::BC && (op.bo & 0x14) == 0x14)) { is_fallback = false; } if (type == ppu_itype::B || type == ppu_itype::BC) { if (type == ppu_itype::BC && (op.bo & 0x14) == 0x14 && op.bo & 0xB) { // Invalid form is_good = false; break; } if (entry == 0 && op.aa) { // Ignore absolute branches in PIC (PRX) is_good = false; break; } const u32 target = (op.aa ? 0 : i_pos) + (type == ppu_itype::B ? +op.bt24 : +op.bt14); if (target < segs[0].addr || target >= segs[0].addr + segs[0].size) { // Sanity check is_good = false; break; } const ppu_opcode_t test_op{get_ref<u32>(target)}; const auto type0 = s_ppu_itype.decode(test_op.opcode); if (type0 == ppu_itype::UNK) { is_good = false; break; } // Test another instruction just in case (testing more is unlikely to improve results by much) if (type0 == ppu_itype::SC || (type0 & ppu_itype::trap && !can_trap_continue(test_op, type0))) { // May not be followed by a valid instruction } else if (!(type0 & ppu_itype::branch)) { if (target + 4 >= segs[0].addr + segs[0].size) { is_good = false; break; } const auto type1 = s_ppu_itype.decode(get_ref<u32>(target + 4)); if (type1 == ppu_itype::UNK) { is_good = false; break; } } else if (u32 target0 = (test_op.aa ? 0 : target) + (type0 == ppu_itype::B ? +test_op.bt24 : +test_op.bt14); (type0 == ppu_itype::B || type0 == ppu_itype::BC) && (target0 < segs[0].addr || target0 >= segs[0].addr + segs[0].size)) { // Sanity check is_good = false; break; } if (target != i_pos && !fmap.contains(target)) { if (block_set.count(target) == 0) { ppu_log.trace("Block target found: 0x%x (i_pos=0x%x)", target, i_pos); block_queue.emplace_back(target, 0); block_set.emplace(target); } } } [[fallthrough]]; } case ppu_itype::BCCTR: case ppu_itype::BCLR: case ppu_itype::SC: { if (type == ppu_itype::SC && op.opcode != ppu_instructions::SC(0) && op.opcode != ppu_instructions::SC(1)) { // Strict garbage filter is_good = false; break; } if (type == ppu_itype::BCCTR && op.opcode & 0xe000) { // Garbage filter is_good = false; break; } if (type == ppu_itype::BCLR && op.opcode & 0xe000) { // Garbage filter is_good = false; break; } if (type == ppu_itype::BCCTR || type == ppu_itype::BCLR) { is_fallback = false; } if ((type & ppu_itype::branch && op.lk) || is_fallback) { // if farther instructions are valid: register all blocks // Otherwise, register none (all or nothing) if (edge_count < std::size(block_edges) && i_pos + 4 < lim) { block_edges[edge_count++] = i_pos + 4; is_fallback = true; // Reset value continue; } } // Good block terminator found, add single block break; } default: { // Normal instruction: keep scanning continue; } } break; } if (i_pos < lim) { i_pos += 4; } else if (is_good && is_fallback && lim < end) { // Register fallback target if (!fmap.contains(lim) && !block_set.contains(lim)) { ppu_log.trace("Block target found: 0x%x (i_pos=0x%x)", lim, i_pos); block_queue.emplace_back(lim, 0); block_set.emplace(lim); } } if (is_good) { for (u32 it = 0, prev_addr = exp; it <= edge_count; it++) { const u32 block_end = it < edge_count ? block_edges[it] : i_pos; const u32 block_begin = std::exchange(prev_addr, block_end); auto& block = fmap[block_begin]; if (!block.addr) { block.addr = block_begin; block.size = block_end - block_begin; ppu_log.trace("Block __0x%x added (size=0x%x)", block.addr, block.size); if (get_limit(block_begin) == end) { block.attr += ppu_attr::no_size; } } } } exp = i_pos; } } // Remove overlaps in blocks for (auto it = fmap.begin(), end = fmap.end(); it != fmap.end(); it++) { const auto next = std::next(it); if (next != end && next->first < it->first + it->second.size) { it->second.size = next->first - it->first; } } // Convert map to vector (destructive) for (auto&& [_, block] : as_rvalue(std::move(fmap))) { if (block.attr & ppu_attr::no_size && block.size > 4 && !used_fallback) { ppu_log.warning("Block 0x%x will be compiled on per-instruction basis (size=0x%x)", block.addr, block.size); for (u32 addr = block.addr; addr < block.addr + block.size; addr += 4) { auto& i = funcs.emplace_back(); i.addr = addr; i.size = 4; i.toc = block.toc; i.attr = ppu_attr::no_size; } per_instruction_bytes += block.size; continue; } funcs.emplace_back(std::move(block)); } if (per_instruction_bytes) { const bool error = per_instruction_bytes >= 200 && per_instruction_bytes / 4 >= utils::aligned_div<u32>(::size32(funcs), 128); (error ? ppu_log.error : ppu_log.notice)("%d instructions will be compiled on per-instruction basis in total", per_instruction_bytes / 4); } ppu_log.notice("Block analysis: %zu blocks (%zu enqueued)", funcs.size(), block_queue.size()); return true; } // Temporarily #ifndef _MSC_VER #pragma GCC diagnostic ignored "-Wunused-parameter" #endif void ppu_acontext::UNK(ppu_opcode_t op) { std::fill_n(gpr, 32, spec_gpr{}); ppu_log.error("Unknown/Illegal opcode: 0x%08x at 0x%x", op.opcode, cia); } void ppu_acontext::MFVSCR(ppu_opcode_t op) { } void ppu_acontext::MTVSCR(ppu_opcode_t op) { } void ppu_acontext::VADDCUW(ppu_opcode_t op) { } void ppu_acontext::VADDFP(ppu_opcode_t op) { } void ppu_acontext::VADDSBS(ppu_opcode_t op) { } void ppu_acontext::VADDSHS(ppu_opcode_t op) { } void ppu_acontext::VADDSWS(ppu_opcode_t op) { } void ppu_acontext::VADDUBM(ppu_opcode_t op) { } void ppu_acontext::VADDUBS(ppu_opcode_t op) { } void ppu_acontext::VADDUHM(ppu_opcode_t op) { } void ppu_acontext::VADDUHS(ppu_opcode_t op) { } void ppu_acontext::VADDUWM(ppu_opcode_t op) { } void ppu_acontext::VADDUWS(ppu_opcode_t op) { } void ppu_acontext::VAND(ppu_opcode_t op) { } void ppu_acontext::VANDC(ppu_opcode_t op) { } void ppu_acontext::VAVGSB(ppu_opcode_t op) { } void ppu_acontext::VAVGSH(ppu_opcode_t op) { } void ppu_acontext::VAVGSW(ppu_opcode_t op) { } void ppu_acontext::VAVGUB(ppu_opcode_t op) { } void ppu_acontext::VAVGUH(ppu_opcode_t op) { } void ppu_acontext::VAVGUW(ppu_opcode_t op) { } void ppu_acontext::VCFSX(ppu_opcode_t op) { } void ppu_acontext::VCFUX(ppu_opcode_t op) { } void ppu_acontext::VCMPBFP(ppu_opcode_t op) { } void ppu_acontext::VCMPEQFP(ppu_opcode_t op) { } void ppu_acontext::VCMPEQUB(ppu_opcode_t op) { } void ppu_acontext::VCMPEQUH(ppu_opcode_t op) { } void ppu_acontext::VCMPEQUW(ppu_opcode_t op) { } void ppu_acontext::VCMPGEFP(ppu_opcode_t op) { } void ppu_acontext::VCMPGTFP(ppu_opcode_t op) { } void ppu_acontext::VCMPGTSB(ppu_opcode_t op) { } void ppu_acontext::VCMPGTSH(ppu_opcode_t op) { } void ppu_acontext::VCMPGTSW(ppu_opcode_t op) { } void ppu_acontext::VCMPGTUB(ppu_opcode_t op) { } void ppu_acontext::VCMPGTUH(ppu_opcode_t op) { } void ppu_acontext::VCMPGTUW(ppu_opcode_t op) { } void ppu_acontext::VCTSXS(ppu_opcode_t op) { } void ppu_acontext::VCTUXS(ppu_opcode_t op) { } void ppu_acontext::VEXPTEFP(ppu_opcode_t op) { } void ppu_acontext::VLOGEFP(ppu_opcode_t op) { } void ppu_acontext::VMADDFP(ppu_opcode_t op) { } void ppu_acontext::VMAXFP(ppu_opcode_t op) { } void ppu_acontext::VMAXSB(ppu_opcode_t op) { } void ppu_acontext::VMAXSH(ppu_opcode_t op) { } void ppu_acontext::VMAXSW(ppu_opcode_t op) { } void ppu_acontext::VMAXUB(ppu_opcode_t op) { } void ppu_acontext::VMAXUH(ppu_opcode_t op) { } void ppu_acontext::VMAXUW(ppu_opcode_t op) { } void ppu_acontext::VMHADDSHS(ppu_opcode_t op) { } void ppu_acontext::VMHRADDSHS(ppu_opcode_t op) { } void ppu_acontext::VMINFP(ppu_opcode_t op) { } void ppu_acontext::VMINSB(ppu_opcode_t op) { } void ppu_acontext::VMINSH(ppu_opcode_t op) { } void ppu_acontext::VMINSW(ppu_opcode_t op) { } void ppu_acontext::VMINUB(ppu_opcode_t op) { } void ppu_acontext::VMINUH(ppu_opcode_t op) { } void ppu_acontext::VMINUW(ppu_opcode_t op) { } void ppu_acontext::VMLADDUHM(ppu_opcode_t op) { } void ppu_acontext::VMRGHB(ppu_opcode_t op) { } void ppu_acontext::VMRGHH(ppu_opcode_t op) { } void ppu_acontext::VMRGHW(ppu_opcode_t op) { } void ppu_acontext::VMRGLB(ppu_opcode_t op) { } void ppu_acontext::VMRGLH(ppu_opcode_t op) { } void ppu_acontext::VMRGLW(ppu_opcode_t op) { } void ppu_acontext::VMSUMMBM(ppu_opcode_t op) { } void ppu_acontext::VMSUMSHM(ppu_opcode_t op) { } void ppu_acontext::VMSUMSHS(ppu_opcode_t op) { } void ppu_acontext::VMSUMUBM(ppu_opcode_t op) { } void ppu_acontext::VMSUMUHM(ppu_opcode_t op) { } void ppu_acontext::VMSUMUHS(ppu_opcode_t op) { } void ppu_acontext::VMULESB(ppu_opcode_t op) { } void ppu_acontext::VMULESH(ppu_opcode_t op) { } void ppu_acontext::VMULEUB(ppu_opcode_t op) { } void ppu_acontext::VMULEUH(ppu_opcode_t op) { } void ppu_acontext::VMULOSB(ppu_opcode_t op) { } void ppu_acontext::VMULOSH(ppu_opcode_t op) { } void ppu_acontext::VMULOUB(ppu_opcode_t op) { } void ppu_acontext::VMULOUH(ppu_opcode_t op) { } void ppu_acontext::VNMSUBFP(ppu_opcode_t op) { } void ppu_acontext::VNOR(ppu_opcode_t op) { } void ppu_acontext::VOR(ppu_opcode_t op) { } void ppu_acontext::VPERM(ppu_opcode_t op) { } void ppu_acontext::VPKPX(ppu_opcode_t op) { } void ppu_acontext::VPKSHSS(ppu_opcode_t op) { } void ppu_acontext::VPKSHUS(ppu_opcode_t op) { } void ppu_acontext::VPKSWSS(ppu_opcode_t op) { } void ppu_acontext::VPKSWUS(ppu_opcode_t op) { } void ppu_acontext::VPKUHUM(ppu_opcode_t op) { } void ppu_acontext::VPKUHUS(ppu_opcode_t op) { } void ppu_acontext::VPKUWUM(ppu_opcode_t op) { } void ppu_acontext::VPKUWUS(ppu_opcode_t op) { } void ppu_acontext::VREFP(ppu_opcode_t op) { } void ppu_acontext::VRFIM(ppu_opcode_t op) { } void ppu_acontext::VRFIN(ppu_opcode_t op) { } void ppu_acontext::VRFIP(ppu_opcode_t op) { } void ppu_acontext::VRFIZ(ppu_opcode_t op) { } void ppu_acontext::VRLB(ppu_opcode_t op) { } void ppu_acontext::VRLH(ppu_opcode_t op) { } void ppu_acontext::VRLW(ppu_opcode_t op) { } void ppu_acontext::VRSQRTEFP(ppu_opcode_t op) { } void ppu_acontext::VSEL(ppu_opcode_t op) { } void ppu_acontext::VSL(ppu_opcode_t op) { } void ppu_acontext::VSLB(ppu_opcode_t op) { } void ppu_acontext::VSLDOI(ppu_opcode_t op) { } void ppu_acontext::VSLH(ppu_opcode_t op) { } void ppu_acontext::VSLO(ppu_opcode_t op) { } void ppu_acontext::VSLW(ppu_opcode_t op) { } void ppu_acontext::VSPLTB(ppu_opcode_t op) { } void ppu_acontext::VSPLTH(ppu_opcode_t op) { } void ppu_acontext::VSPLTISB(ppu_opcode_t op) { } void ppu_acontext::VSPLTISH(ppu_opcode_t op) { } void ppu_acontext::VSPLTISW(ppu_opcode_t op) { } void ppu_acontext::VSPLTW(ppu_opcode_t op) { } void ppu_acontext::VSR(ppu_opcode_t op) { } void ppu_acontext::VSRAB(ppu_opcode_t op) { } void ppu_acontext::VSRAH(ppu_opcode_t op) { } void ppu_acontext::VSRAW(ppu_opcode_t op) { } void ppu_acontext::VSRB(ppu_opcode_t op) { } void ppu_acontext::VSRH(ppu_opcode_t op) { } void ppu_acontext::VSRO(ppu_opcode_t op) { } void ppu_acontext::VSRW(ppu_opcode_t op) { } void ppu_acontext::VSUBCUW(ppu_opcode_t op) { } void ppu_acontext::VSUBFP(ppu_opcode_t op) { } void ppu_acontext::VSUBSBS(ppu_opcode_t op) { } void ppu_acontext::VSUBSHS(ppu_opcode_t op) { } void ppu_acontext::VSUBSWS(ppu_opcode_t op) { } void ppu_acontext::VSUBUBM(ppu_opcode_t op) { } void ppu_acontext::VSUBUBS(ppu_opcode_t op) { } void ppu_acontext::VSUBUHM(ppu_opcode_t op) { } void ppu_acontext::VSUBUHS(ppu_opcode_t op) { } void ppu_acontext::VSUBUWM(ppu_opcode_t op) { } void ppu_acontext::VSUBUWS(ppu_opcode_t op) { } void ppu_acontext::VSUMSWS(ppu_opcode_t op) { } void ppu_acontext::VSUM2SWS(ppu_opcode_t op) { } void ppu_acontext::VSUM4SBS(ppu_opcode_t op) { } void ppu_acontext::VSUM4SHS(ppu_opcode_t op) { } void ppu_acontext::VSUM4UBS(ppu_opcode_t op) { } void ppu_acontext::VUPKHPX(ppu_opcode_t op) { } void ppu_acontext::VUPKHSB(ppu_opcode_t op) { } void ppu_acontext::VUPKHSH(ppu_opcode_t op) { } void ppu_acontext::VUPKLPX(ppu_opcode_t op) { } void ppu_acontext::VUPKLSB(ppu_opcode_t op) { } void ppu_acontext::VUPKLSH(ppu_opcode_t op) { } void ppu_acontext::VXOR(ppu_opcode_t op) { } void ppu_acontext::TDI(ppu_opcode_t op) { } void ppu_acontext::TWI(ppu_opcode_t op) { } void ppu_acontext::MULLI(ppu_opcode_t op) { const s64 amin = gpr[op.ra].imin; const s64 amax = gpr[op.ra].imax; // Undef or mixed range (default) s64 min = 0; s64 max = -1; // Special cases like powers of 2 and their negations are not handled if (amin <= amax) { min = amin * op.simm16; max = amax * op.simm16; // Check overflow if (min >> 63 != utils::mulh64(amin, op.simm16) || max >> 63 != utils::mulh64(amax, op.simm16)) { min = 0; max = -1; } else if (min > max) { std::swap(min, max); } } gpr[op.rd] = spec_gpr::range(min, max, gpr[op.ra].tz() + std::countr_zero<u64>(op.simm16)); } void ppu_acontext::SUBFIC(ppu_opcode_t op) { gpr[op.rd] = ~gpr[op.ra] + spec_gpr::fixed(op.simm16) + spec_gpr::fixed(1); } void ppu_acontext::CMPLI(ppu_opcode_t op) { } void ppu_acontext::CMPI(ppu_opcode_t op) { } void ppu_acontext::ADDIC(ppu_opcode_t op) { gpr[op.rd] = gpr[op.ra] + spec_gpr::fixed(op.simm16); } void ppu_acontext::ADDI(ppu_opcode_t op) { gpr[op.rd] = op.ra ? gpr[op.ra] + spec_gpr::fixed(op.simm16) : spec_gpr::fixed(op.simm16); } void ppu_acontext::ADDIS(ppu_opcode_t op) { gpr[op.rd] = op.ra ? gpr[op.ra] + spec_gpr::fixed(op.simm16 * 65536) : spec_gpr::fixed(op.simm16 * 65536); } void ppu_acontext::BC(ppu_opcode_t op) { } void ppu_acontext::SC(ppu_opcode_t op) { } void ppu_acontext::B(ppu_opcode_t op) { } void ppu_acontext::MCRF(ppu_opcode_t op) { } void ppu_acontext::BCLR(ppu_opcode_t op) { } void ppu_acontext::CRNOR(ppu_opcode_t op) { } void ppu_acontext::CRANDC(ppu_opcode_t op) { } void ppu_acontext::ISYNC(ppu_opcode_t op) { } void ppu_acontext::CRXOR(ppu_opcode_t op) { } void ppu_acontext::CRNAND(ppu_opcode_t op) { } void ppu_acontext::CRAND(ppu_opcode_t op) { } void ppu_acontext::CREQV(ppu_opcode_t op) { } void ppu_acontext::CRORC(ppu_opcode_t op) { } void ppu_acontext::CROR(ppu_opcode_t op) { } void ppu_acontext::BCCTR(ppu_opcode_t op) { } void ppu_acontext::RLWIMI(ppu_opcode_t op) { const u64 mask = ppu_rotate_mask(32 + op.mb32, 32 + op.me32); u64 min = gpr[op.rs].bmin; u64 max = gpr[op.rs].bmax; if (op.mb32 <= op.me32) { // 32-bit op, including mnemonics: INSLWI, INSRWI (TODO) min = utils::rol32(static_cast<u32>(min), op.sh32) & mask; max = utils::rol32(static_cast<u32>(max), op.sh32) & mask; } else { // Full 64-bit op with duplication min = utils::rol64(static_cast<u32>(min) | min << 32, op.sh32) & mask; max = utils::rol64(static_cast<u32>(max) | max << 32, op.sh32) & mask; } if (mask != umax) { // Insertion min |= gpr[op.ra].bmin & ~mask; max |= gpr[op.ra].bmax & ~mask; } gpr[op.rs] = spec_gpr::approx(min, max); } void ppu_acontext::RLWINM(ppu_opcode_t op) { const u64 mask = ppu_rotate_mask(32 + op.mb32, 32 + op.me32); u64 min = gpr[op.rs].bmin; u64 max = gpr[op.rs].bmax; if (op.mb32 <= op.me32) { if (op.sh32 == 0) { // CLRLWI, CLRRWI mnemonics gpr[op.ra] = gpr[op.ra] & spec_gpr::fixed(mask); return; } else if (op.mb32 == 0 && op.me32 == 31) { // ROTLWI, ROTRWI mnemonics } else if (op.mb32 == 0 && op.sh32 == 31 - op.me32) { // SLWI mnemonic } else if (op.me32 == 31 && op.sh32 == 32 - op.mb32) { // SRWI mnemonic } else if (op.mb32 == 0 && op.sh32 < 31 - op.me32) { // EXTLWI and other possible mnemonics } else if (op.me32 == 31 && 32 - op.sh32 < op.mb32) { // EXTRWI and other possible mnemonics } min = utils::rol32(static_cast<u32>(min), op.sh32) & mask; max = utils::rol32(static_cast<u32>(max), op.sh32) & mask; } else { // Full 64-bit op with duplication min = utils::rol64(static_cast<u32>(min) | min << 32, op.sh32) & mask; max = utils::rol64(static_cast<u32>(max) | max << 32, op.sh32) & mask; } gpr[op.ra] = spec_gpr::approx(min, max); } void ppu_acontext::RLWNM(ppu_opcode_t op) { const u64 mask = ppu_rotate_mask(32 + op.mb32, 32 + op.me32); u64 min = gpr[op.rs].bmin; u64 max = gpr[op.rs].bmax; if (op.mb32 <= op.me32) { if (op.mb32 == 0 && op.me32 == 31) { // ROTLW mnemonic } // TODO min = 0; max = mask; } else { // Full 64-bit op with duplication min = 0; max = mask; } gpr[op.ra] = spec_gpr::approx(min, max); } void ppu_acontext::ORI(ppu_opcode_t op) { gpr[op.ra] = gpr[op.rs] | spec_gpr::fixed(op.uimm16); } void ppu_acontext::ORIS(ppu_opcode_t op) { gpr[op.ra] = gpr[op.rs] | spec_gpr::fixed(op.uimm16 << 16); } void ppu_acontext::XORI(ppu_opcode_t op) { gpr[op.ra] = gpr[op.rs] ^ spec_gpr::fixed(op.uimm16); } void ppu_acontext::XORIS(ppu_opcode_t op) { gpr[op.ra] = gpr[op.rs] ^ spec_gpr::fixed(op.uimm16 << 16); } void ppu_acontext::ANDI(ppu_opcode_t op) { gpr[op.ra] = gpr[op.rs] & spec_gpr::fixed(op.uimm16); } void ppu_acontext::ANDIS(ppu_opcode_t op) { gpr[op.ra] = gpr[op.rs] & spec_gpr::fixed(op.uimm16 << 16); } void ppu_acontext::RLDICL(ppu_opcode_t op) { const u32 sh = op.sh64; const u32 mb = op.mbe64; const u64 mask = ~0ull >> mb; u64 min = gpr[op.rs].bmin; u64 max = gpr[op.rs].bmax; if (64 - sh < mb) { // EXTRDI mnemonic } else if (64 - sh == mb) { // SRDI mnemonic } else if (sh == 0) { // CLRLDI mnemonic gpr[op.ra] = gpr[op.rs] & spec_gpr::fixed(mask); return; } min = utils::rol64(min, sh) & mask; max = utils::rol64(max, sh) & mask; gpr[op.ra] = spec_gpr::approx(min, max); } void ppu_acontext::RLDICR(ppu_opcode_t op) { const u32 sh = op.sh64; const u32 me = op.mbe64; const u64 mask = ~0ull << (63 - me); u64 min = gpr[op.rs].bmin; u64 max = gpr[op.rs].bmax; if (sh < 63 - me) { // EXTLDI mnemonic } else if (sh == 63 - me) { // SLDI mnemonic } else if (sh == 0) { // CLRRDI mnemonic gpr[op.ra] = gpr[op.rs] & spec_gpr::fixed(mask); return; } min = utils::rol64(min, sh) & mask; max = utils::rol64(max, sh) & mask; gpr[op.ra] = spec_gpr::approx(min, max); } void ppu_acontext::RLDIC(ppu_opcode_t op) { const u32 sh = op.sh64; const u32 mb = op.mbe64; const u64 mask = ppu_rotate_mask(mb, 63 - sh); u64 min = gpr[op.rs].bmin; u64 max = gpr[op.rs].bmax; if (mb == 0 && sh == 0) { gpr[op.ra] = gpr[op.rs]; return; } else if (mb <= 63 - sh) { // CLRLSLDI //gpr[op.ra] = (gpr[op.rs] & spec_gpr::fixed(ppu_rotate_mask(0, sh + mb))) << spec_gpr::fixed(sh); return; } min = utils::rol64(min, sh) & mask; max = utils::rol64(max, sh) & mask; gpr[op.ra] = spec_gpr::approx(min, max); } void ppu_acontext::RLDIMI(ppu_opcode_t op) { const u32 sh = op.sh64; const u32 mb = op.mbe64; const u64 mask = ppu_rotate_mask(mb, 63 - sh); u64 min = gpr[op.rs].bmin; u64 max = gpr[op.rs].bmax; if (mb == 0 && sh == 0) { // Copy } else if (mb <= 63 - sh) { // INSRDI mnemonic } min = utils::rol64(min, sh) & mask; max = utils::rol64(max, sh) & mask; if (mask != umax) { // Insertion min |= gpr[op.ra].bmin & ~mask; max |= gpr[op.ra].bmax & ~mask; } gpr[op.ra] = spec_gpr::approx(min, max); } void ppu_acontext::RLDCL(ppu_opcode_t op) { const u32 mb = op.mbe64; const u64 mask = ~0ull >> mb; u64 min = gpr[op.rs].bmin; u64 max = gpr[op.rs].bmax; // TODO min = 0; max = mask; gpr[op.ra] = spec_gpr::approx(min, max); } void ppu_acontext::RLDCR(ppu_opcode_t op) { const u32 me = op.mbe64; const u64 mask = ~0ull << (63 - me); u64 min = gpr[op.rs].bmin; u64 max = gpr[op.rs].bmax; // TODO min = 0; max = mask; gpr[op.ra] = spec_gpr::approx(min, max); } void ppu_acontext::CMP(ppu_opcode_t op) { } void ppu_acontext::TW(ppu_opcode_t op) { } void ppu_acontext::LVSL(ppu_opcode_t op) { } void ppu_acontext::LVEBX(ppu_opcode_t op) { } void ppu_acontext::SUBFC(ppu_opcode_t op) { gpr[op.rd] = ~gpr[op.ra] + gpr[op.rb] + spec_gpr::fixed(1); } void ppu_acontext::ADDC(ppu_opcode_t op) { gpr[op.rd] = gpr[op.ra] + gpr[op.rb]; } void ppu_acontext::MULHDU(ppu_opcode_t op) { gpr[op.rd].set_undef(); } void ppu_acontext::MULHWU(ppu_opcode_t op) { gpr[op.rd].set_undef(); } void ppu_acontext::MFOCRF(ppu_opcode_t op) { gpr[op.rd].set_undef(); } void ppu_acontext::LWARX(ppu_opcode_t op) { gpr[op.rd] = spec_gpr::range(0, u32{umax}); } void ppu_acontext::LDX(ppu_opcode_t op) { gpr[op.rd].set_undef(); } void ppu_acontext::LWZX(ppu_opcode_t op) { gpr[op.rd].set_undef(); } void ppu_acontext::SLW(ppu_opcode_t op) { gpr[op.ra].set_undef(); } void ppu_acontext::CNTLZW(ppu_opcode_t op) { gpr[op.ra].set_undef(); } void ppu_acontext::SLD(ppu_opcode_t op) { gpr[op.ra].set_undef(); } void ppu_acontext::AND(ppu_opcode_t op) { gpr[op.ra] = gpr[op.rs] & gpr[op.rb]; } void ppu_acontext::CMPL(ppu_opcode_t op) { } void ppu_acontext::LVSR(ppu_opcode_t op) { } void ppu_acontext::LVEHX(ppu_opcode_t op) { } void ppu_acontext::SUBF(ppu_opcode_t op) { gpr[op.rd] = ~gpr[op.ra] + gpr[op.rb] + spec_gpr::fixed(1); } void ppu_acontext::LDUX(ppu_opcode_t op) { const auto addr = gpr[op.ra] + gpr[op.rb]; gpr[op.rd].set_undef(); gpr[op.ra] = addr; } void ppu_acontext::DCBST(ppu_opcode_t op) { } void ppu_acontext::LWZUX(ppu_opcode_t op) { const auto addr = gpr[op.ra] + gpr[op.rb]; gpr[op.rd].set_undef(); gpr[op.ra] = addr; } void ppu_acontext::CNTLZD(ppu_opcode_t op) { gpr[op.ra].set_undef(); } void ppu_acontext::ANDC(ppu_opcode_t op) { gpr[op.ra] = gpr[op.rs] & ~gpr[op.rb]; } void ppu_acontext::TD(ppu_opcode_t op) { } void ppu_acontext::LVEWX(ppu_opcode_t op) { } void ppu_acontext::MULHD(ppu_opcode_t op) { gpr[op.rd].set_undef(); } void ppu_acontext::MULHW(ppu_opcode_t op) { gpr[op.rd].set_undef(); } void ppu_acontext::LDARX(ppu_opcode_t op) { gpr[op.rd] = {}; } void ppu_acontext::DCBF(ppu_opcode_t op) { } void ppu_acontext::LBZX(ppu_opcode_t op) { gpr[op.rd].set_undef(); } void ppu_acontext::LVX(ppu_opcode_t op) { } void ppu_acontext::NEG(ppu_opcode_t op) { gpr[op.rd] = ~gpr[op.ra] + spec_gpr::fixed(1); } void ppu_acontext::LBZUX(ppu_opcode_t op) { const auto addr = gpr[op.ra] + gpr[op.rb]; gpr[op.rd].set_undef(); gpr[op.ra] = addr; } void ppu_acontext::NOR(ppu_opcode_t op) { gpr[op.ra] = ~(gpr[op.rs] | gpr[op.rb]); } void ppu_acontext::STVEBX(ppu_opcode_t op) { } void ppu_acontext::SUBFE(ppu_opcode_t op) { gpr[op.rd] = ~gpr[op.ra] + gpr[op.rb] + spec_gpr::range(0, 1); } void ppu_acontext::ADDE(ppu_opcode_t op) { gpr[op.rd] = gpr[op.ra] + gpr[op.rb] + spec_gpr::range(0, 1); } void ppu_acontext::MTOCRF(ppu_opcode_t op) { } void ppu_acontext::STDX(ppu_opcode_t op) { } void ppu_acontext::STWCX(ppu_opcode_t op) { } void ppu_acontext::STWX(ppu_opcode_t op) { } void ppu_acontext::STVEHX(ppu_opcode_t op) { } void ppu_acontext::STDUX(ppu_opcode_t op) { const auto addr = gpr[op.ra] + gpr[op.rb]; gpr[op.ra] = addr; } void ppu_acontext::STWUX(ppu_opcode_t op) { const auto addr = gpr[op.ra] + gpr[op.rb]; gpr[op.ra] = addr; } void ppu_acontext::STVEWX(ppu_opcode_t op) { } void ppu_acontext::SUBFZE(ppu_opcode_t op) { gpr[op.rd] = ~gpr[op.ra] + spec_gpr::range(0, 1); } void ppu_acontext::ADDZE(ppu_opcode_t op) { gpr[op.rd] = gpr[op.ra] + spec_gpr::range(0, 1); } void ppu_acontext::STDCX(ppu_opcode_t op) { } void ppu_acontext::STBX(ppu_opcode_t op) { } void ppu_acontext::STVX(ppu_opcode_t op) { } void ppu_acontext::SUBFME(ppu_opcode_t op) { gpr[op.rd] = ~gpr[op.ra] + spec_gpr::fixed(-1) + spec_gpr::range(0, 1); } void ppu_acontext::MULLD(ppu_opcode_t op) { gpr[op.rd].set_undef(); } void ppu_acontext::ADDME(ppu_opcode_t op) { gpr[op.rd] = gpr[op.ra] + spec_gpr::fixed(-1) + spec_gpr::range(0, 1); } void ppu_acontext::MULLW(ppu_opcode_t op) { gpr[op.rd].set_undef(); } void ppu_acontext::DCBTST(ppu_opcode_t op) { } void ppu_acontext::STBUX(ppu_opcode_t op) { const auto addr = gpr[op.ra] + gpr[op.rb]; gpr[op.ra] = addr; } void ppu_acontext::ADD(ppu_opcode_t op) { gpr[op.rd] = gpr[op.ra] + gpr[op.rd]; } void ppu_acontext::DCBT(ppu_opcode_t op) { } void ppu_acontext::LHZX(ppu_opcode_t op) { gpr[op.rd].set_undef(); } void ppu_acontext::EQV(ppu_opcode_t op) { gpr[op.ra] = ~(gpr[op.rs] ^ gpr[op.rb]); } void ppu_acontext::ECIWX(ppu_opcode_t op) { gpr[op.rd].set_undef(); } void ppu_acontext::LHZUX(ppu_opcode_t op) { const auto addr = gpr[op.ra] + gpr[op.rb]; gpr[op.rd].set_undef(); gpr[op.ra] = addr; } void ppu_acontext::XOR(ppu_opcode_t op) { gpr[op.ra] = gpr[op.rs] ^ gpr[op.rb]; } void ppu_acontext::MFSPR(ppu_opcode_t op) { gpr[op.rd].set_undef(); } void ppu_acontext::LWAX(ppu_opcode_t op) { gpr[op.rd].set_undef(); } void ppu_acontext::DST(ppu_opcode_t op) { } void ppu_acontext::LHAX(ppu_opcode_t op) { gpr[op.rd].set_undef(); } void ppu_acontext::LVXL(ppu_opcode_t op) { } void ppu_acontext::MFTB(ppu_opcode_t op) { gpr[op.rd].set_undef(); } void ppu_acontext::LWAUX(ppu_opcode_t op) { const auto addr = gpr[op.ra] + gpr[op.rb]; gpr[op.rd].set_undef(); gpr[op.ra] = addr; } void ppu_acontext::DSTST(ppu_opcode_t op) { } void ppu_acontext::LHAUX(ppu_opcode_t op) { const auto addr = gpr[op.ra] + gpr[op.rb]; gpr[op.rd].set_undef(); gpr[op.ra] = addr; } void ppu_acontext::STHX(ppu_opcode_t op) { } void ppu_acontext::ORC(ppu_opcode_t op) { gpr[op.ra] = gpr[op.rs] | ~gpr[op.rb]; } void ppu_acontext::ECOWX(ppu_opcode_t op) { } void ppu_acontext::STHUX(ppu_opcode_t op) { const auto addr = gpr[op.ra] + gpr[op.rb]; gpr[op.ra] = addr; } void ppu_acontext::OR(ppu_opcode_t op) { gpr[op.ra] = gpr[op.rs] | gpr[op.rb]; } void ppu_acontext::DIVDU(ppu_opcode_t op) { gpr[op.rd].set_undef(); } void ppu_acontext::DIVWU(ppu_opcode_t op) { gpr[op.rd].set_undef(); } void ppu_acontext::MTSPR(ppu_opcode_t op) { } void ppu_acontext::DCBI(ppu_opcode_t op) { } void ppu_acontext::NAND(ppu_opcode_t op) { gpr[op.ra] = ~(gpr[op.rs] & gpr[op.rb]); } void ppu_acontext::STVXL(ppu_opcode_t op) { } void ppu_acontext::DIVD(ppu_opcode_t op) { gpr[op.rd].set_undef(); } void ppu_acontext::DIVW(ppu_opcode_t op) { gpr[op.rd].set_undef(); } void ppu_acontext::LVLX(ppu_opcode_t op) { } void ppu_acontext::LDBRX(ppu_opcode_t op) { gpr[op.rd].set_undef(); } void ppu_acontext::LSWX(ppu_opcode_t op) { } void ppu_acontext::LWBRX(ppu_opcode_t op) { gpr[op.rd].set_undef(); } void ppu_acontext::LFSX(ppu_opcode_t op) { } void ppu_acontext::SRW(ppu_opcode_t op) { gpr[op.ra].set_undef(); } void ppu_acontext::SRD(ppu_opcode_t op) { gpr[op.ra].set_undef(); } void ppu_acontext::LVRX(ppu_opcode_t op) { } void ppu_acontext::LSWI(ppu_opcode_t op) { std::fill_n(gpr, 32, spec_gpr{}); } void ppu_acontext::LFSUX(ppu_opcode_t op) { const auto addr = gpr[op.ra] + gpr[op.rb]; gpr[op.ra] = addr; } void ppu_acontext::SYNC(ppu_opcode_t op) { } void ppu_acontext::LFDX(ppu_opcode_t op) { } void ppu_acontext::LFDUX(ppu_opcode_t op) { const auto addr = gpr[op.ra] + gpr[op.rb]; gpr[op.ra] = addr; } void ppu_acontext::STVLX(ppu_opcode_t op) { } void ppu_acontext::STDBRX(ppu_opcode_t op) { } void ppu_acontext::STSWX(ppu_opcode_t op) { } void ppu_acontext::STWBRX(ppu_opcode_t op) { } void ppu_acontext::STFSX(ppu_opcode_t op) { } void ppu_acontext::STVRX(ppu_opcode_t op) { } void ppu_acontext::STFSUX(ppu_opcode_t op) { const auto addr = gpr[op.ra] + gpr[op.rb]; gpr[op.ra] = addr; } void ppu_acontext::STSWI(ppu_opcode_t op) { } void ppu_acontext::STFDX(ppu_opcode_t op) { } void ppu_acontext::STFDUX(ppu_opcode_t op) { const auto addr = gpr[op.ra] + gpr[op.rb]; gpr[op.ra] = addr; } void ppu_acontext::LVLXL(ppu_opcode_t op) { } void ppu_acontext::LHBRX(ppu_opcode_t op) { gpr[op.rd].set_undef(); } void ppu_acontext::SRAW(ppu_opcode_t op) { gpr[op.ra].set_undef(); } void ppu_acontext::SRAD(ppu_opcode_t op) { gpr[op.ra].set_undef(); } void ppu_acontext::LVRXL(ppu_opcode_t op) { } void ppu_acontext::DSS(ppu_opcode_t op) { } void ppu_acontext::SRAWI(ppu_opcode_t op) { gpr[op.ra].set_undef(); } void ppu_acontext::SRADI(ppu_opcode_t op) { gpr[op.ra].set_undef(); } void ppu_acontext::EIEIO(ppu_opcode_t op) { } void ppu_acontext::STVLXL(ppu_opcode_t op) { } void ppu_acontext::STHBRX(ppu_opcode_t op) { } void ppu_acontext::EXTSH(ppu_opcode_t op) { gpr[op.ra].set_undef(); } void ppu_acontext::STVRXL(ppu_opcode_t op) { } void ppu_acontext::EXTSB(ppu_opcode_t op) { gpr[op.ra].set_undef(); } void ppu_acontext::STFIWX(ppu_opcode_t op) { } void ppu_acontext::EXTSW(ppu_opcode_t op) { gpr[op.ra].set_undef(); } void ppu_acontext::ICBI(ppu_opcode_t op) { } void ppu_acontext::DCBZ(ppu_opcode_t op) { } void ppu_acontext::LWZ(ppu_opcode_t op) { gpr[op.rd].set_undef(); } void ppu_acontext::LWZU(ppu_opcode_t op) { const auto addr = gpr[op.ra] + gpr[op.rb]; gpr[op.rd].set_undef(); gpr[op.ra] = addr; } void ppu_acontext::LBZ(ppu_opcode_t op) { gpr[op.rd].set_undef(); } void ppu_acontext::LBZU(ppu_opcode_t op) { const auto addr = gpr[op.ra] + gpr[op.rb]; gpr[op.rd].set_undef(); gpr[op.ra] = addr; } void ppu_acontext::STW(ppu_opcode_t op) { } void ppu_acontext::STWU(ppu_opcode_t op) { const auto addr = gpr[op.ra] + gpr[op.rb]; gpr[op.ra] = addr; } void ppu_acontext::STB(ppu_opcode_t op) { } void ppu_acontext::STBU(ppu_opcode_t op) { const auto addr = gpr[op.ra] + gpr[op.rb]; gpr[op.ra] = addr; } void ppu_acontext::LHZ(ppu_opcode_t op) { gpr[op.rd].set_undef(); } void ppu_acontext::LHZU(ppu_opcode_t op) { const auto addr = gpr[op.ra] + gpr[op.rb]; gpr[op.rd].set_undef(); gpr[op.ra] = addr; } void ppu_acontext::LHA(ppu_opcode_t op) { gpr[op.rd].set_undef(); } void ppu_acontext::LHAU(ppu_opcode_t op) { const auto addr = gpr[op.ra] + gpr[op.rb]; gpr[op.rd].set_undef(); gpr[op.ra] = addr; } void ppu_acontext::STH(ppu_opcode_t op) { } void ppu_acontext::STHU(ppu_opcode_t op) { const auto addr = gpr[op.ra] + gpr[op.rb]; gpr[op.ra] = addr; } void ppu_acontext::LMW(ppu_opcode_t op) { std::fill_n(gpr, 32, spec_gpr{}); } void ppu_acontext::STMW(ppu_opcode_t op) { } void ppu_acontext::LFS(ppu_opcode_t op) { } void ppu_acontext::LFSU(ppu_opcode_t op) { const auto addr = gpr[op.ra] + gpr[op.rb]; gpr[op.ra] = addr; } void ppu_acontext::LFD(ppu_opcode_t op) { } void ppu_acontext::LFDU(ppu_opcode_t op) { const auto addr = gpr[op.ra] + gpr[op.rb]; gpr[op.ra] = addr; } void ppu_acontext::STFS(ppu_opcode_t op) { } void ppu_acontext::STFSU(ppu_opcode_t op) { const auto addr = gpr[op.ra] + gpr[op.rb]; gpr[op.ra] = addr; } void ppu_acontext::STFD(ppu_opcode_t op) { } void ppu_acontext::STFDU(ppu_opcode_t op) { const auto addr = gpr[op.ra] + gpr[op.rb]; gpr[op.ra] = addr; } void ppu_acontext::LD(ppu_opcode_t op) { gpr[op.rd].set_undef(); } void ppu_acontext::LDU(ppu_opcode_t op) { const auto addr = gpr[op.ra] + gpr[op.rb]; gpr[op.rd].set_undef(); gpr[op.ra] = addr; } void ppu_acontext::LWA(ppu_opcode_t op) { gpr[op.rd].set_undef(); } void ppu_acontext::STD(ppu_opcode_t op) { } void ppu_acontext::STDU(ppu_opcode_t op) { const auto addr = gpr[op.ra] + gpr[op.rb]; gpr[op.ra] = addr; } void ppu_acontext::FDIVS(ppu_opcode_t op) { } void ppu_acontext::FSUBS(ppu_opcode_t op) { } void ppu_acontext::FADDS(ppu_opcode_t op) { } void ppu_acontext::FSQRTS(ppu_opcode_t op) { } void ppu_acontext::FRES(ppu_opcode_t op) { } void ppu_acontext::FMULS(ppu_opcode_t op) { } void ppu_acontext::FMADDS(ppu_opcode_t op) { } void ppu_acontext::FMSUBS(ppu_opcode_t op) { } void ppu_acontext::FNMSUBS(ppu_opcode_t op) { } void ppu_acontext::FNMADDS(ppu_opcode_t op) { } void ppu_acontext::MTFSB1(ppu_opcode_t op) { } void ppu_acontext::MCRFS(ppu_opcode_t op) { } void ppu_acontext::MTFSB0(ppu_opcode_t op) { } void ppu_acontext::MTFSFI(ppu_opcode_t op) { } void ppu_acontext::MFFS(ppu_opcode_t op) { } void ppu_acontext::MTFSF(ppu_opcode_t op) { } void ppu_acontext::FCMPU(ppu_opcode_t op) { } void ppu_acontext::FRSP(ppu_opcode_t op) { } void ppu_acontext::FCTIW(ppu_opcode_t op) { } void ppu_acontext::FCTIWZ(ppu_opcode_t op) { } void ppu_acontext::FDIV(ppu_opcode_t op) { } void ppu_acontext::FSUB(ppu_opcode_t op) { } void ppu_acontext::FADD(ppu_opcode_t op) { } void ppu_acontext::FSQRT(ppu_opcode_t op) { } void ppu_acontext::FSEL(ppu_opcode_t op) { } void ppu_acontext::FMUL(ppu_opcode_t op) { } void ppu_acontext::FRSQRTE(ppu_opcode_t op) { } void ppu_acontext::FMSUB(ppu_opcode_t op) { } void ppu_acontext::FMADD(ppu_opcode_t op) { } void ppu_acontext::FNMSUB(ppu_opcode_t op) { } void ppu_acontext::FNMADD(ppu_opcode_t op) { } void ppu_acontext::FCMPO(ppu_opcode_t op) { } void ppu_acontext::FNEG(ppu_opcode_t op) { } void ppu_acontext::FMR(ppu_opcode_t op) { } void ppu_acontext::FNABS(ppu_opcode_t op) { } void ppu_acontext::FABS(ppu_opcode_t op) { } void ppu_acontext::FCTID(ppu_opcode_t op) { } void ppu_acontext::FCTIDZ(ppu_opcode_t op) { } void ppu_acontext::FCFID(ppu_opcode_t op) { } #include <random> const bool s_tes = []() { return true; std::mt19937_64 rnd{123}; for (u32 i = 0; i < 10000; i++) { ppu_acontext::spec_gpr r1, r2, r3; r1 = ppu_acontext::spec_gpr::approx(rnd(), rnd()); r2 = ppu_acontext::spec_gpr::range(rnd(), rnd()); r3 = r1 | r2; for (u32 j = 0; j < 10000; j++) { u64 v1 = rnd(), v2 = rnd(); v1 &= r1.mask(); v1 |= r1.ones(); if (!r2.test(v2)) { v2 = r2.imin; } if (r1.test(v1) && r2.test(v2)) { if (!r3.test(v1 | v2)) { auto exp = ppu_acontext::spec_gpr::approx(r1.ones() & r2.ones(), r1.mask() & r2.mask()); ppu_log.error("ppu_acontext failure:" "\n\tr1 = 0x%016x..0x%016x, 0x%016x:0x%016x" "\n\tr2 = 0x%016x..0x%016x, 0x%016x:0x%016x" "\n\tr3 = 0x%016x..0x%016x, 0x%016x:0x%016x" "\n\tex = 0x%016x..0x%016x" "\n\tv1 = 0x%016x, v2 = 0x%016x, v3 = 0x%016x", r1.imin, r1.imax, r1.bmin, r1.bmax, r2.imin, r2.imax, r2.bmin, r2.bmax, r3.imin, r3.imax, r3.bmin, r3.bmax, exp.imin, exp.imax, v1, v2, v1 | v2); break; } } } } ppu_acontext::spec_gpr r1; r1 = ppu_acontext::spec_gpr::range(0x13311, 0x1fe22); r1 = r1 ^ ppu_acontext::spec_gpr::approx(0x000, 0xf00); ppu_log.success("0x%x..0x%x", r1.imin, r1.imax); return true; }();
77,834
C++
.cpp
3,364
20.021106
177
0.609111
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,180
SPUDisAsm.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/SPUDisAsm.cpp
#include "stdafx.h" #include "SPUDisAsm.h" #include "SPUAnalyser.h" #include "SPUThread.h" const spu_decoder<SPUDisAsm> s_spu_disasm; const extern spu_decoder<spu_itype> g_spu_itype; const extern spu_decoder<spu_iname> g_spu_iname; const extern spu_decoder<spu_iflag> g_spu_iflag; #include "util/v128.hpp" #include "util/simd.hpp" u32 SPUDisAsm::disasm(u32 pc) { last_opcode.clear(); if (pc < m_start_pc || pc >= SPU_LS_SIZE) { return 0; } dump_pc = pc; be_t<u32> op; std::memcpy(&op, m_offset + pc, 4); m_op = op; (this->*(s_spu_disasm.decode(m_op)))({ m_op }); format_by_mode(); return 4; } std::pair<const void*, usz> SPUDisAsm::get_memory_span() const { return {m_offset + m_start_pc, SPU_LS_SIZE - m_start_pc}; } std::unique_ptr<CPUDisAsm> SPUDisAsm::copy_type_erased() const { return std::make_unique<SPUDisAsm>(*this); } std::pair<bool, v128> SPUDisAsm::try_get_const_value(u32 reg, u32 pc, u32 TTL) const { if (!TTL) { // Recursion limit (Time To Live) return {}; } if (pc == umax) { // Default arg: choose pc of previous instruction if (dump_pc == 0) { // Do not underflow return {}; } pc = dump_pc - 4; } // Scan LS backwards from this instruction (until PC=0) // Search for the first register modification or branch instruction for (s32 i = static_cast<s32>(pc); i >= static_cast<s32>(m_start_pc); i -= 4) { const u32 opcode = *reinterpret_cast<const be_t<u32>*>(m_offset + i); const spu_opcode_t op0{ opcode }; const auto type = g_spu_itype.decode(opcode); if (type & spu_itype::branch || type == spu_itype::UNK || !opcode) { if (reg < 80u) { return {}; } // We do not care about function calls if register is non-volatile if ((type != spu_itype::BRSL && type != spu_itype::BRASL && type != spu_itype::BISL) || op0.rt == reg) { return {}; } continue; } // Get constant register value #define GET_CONST_REG(var, reg) \ {\ /* Search for the constant value of the register*/\ const auto [is_const, value] = try_get_const_value(reg, i - 4, TTL - 1);\ \ if (!is_const)\ {\ /* Cannot compute constant value if register is not constant*/\ return {};\ }\ \ var = value;\ } void() /*<- Require a semicolon*/ //const auto flag = g_spu_iflag.decode(opcode); if (u32 dst = type & spu_itype::_quadrop ? +op0.rt4 : +op0.rt; dst == reg && !(type & spu_itype::zregmod)) { // Note: It's not 100% reliable because it won't detect branch targets within [i, dump_pc] range (e.g. if-else statement for command's value) switch (type) { case spu_itype::IL: { return { true, v128::from32p(op0.si16) }; } case spu_itype::ILA: { return { true, v128::from32p(op0.i18) }; } case spu_itype::ILHU: { return { true, v128::from32p(op0.i16 << 16) }; } case spu_itype::ILH: { return { true, v128::from16p(op0.i16) }; } case spu_itype::CBD: case spu_itype::CHD: case spu_itype::CWD: case spu_itype::CDD: { // Aligned stack assumption if (op0.ra == 1u) { u32 size = 0; switch (type) { case spu_itype::CBD: size = 1; break; case spu_itype::CHD: size = 2; break; case spu_itype::CWD: size = 4; break; case spu_itype::CDD: size = 8; break; default: fmt::throw_exception("Unreachable"); } const u32 index = (~op0.i7 & 0xf) / size; auto res = v128::from64(0x18191A1B1C1D1E1Full, 0x1011121314151617ull); switch (size) { case 1: res._u8[index] = 0x03; break; case 2: res._u16[index] = 0x0203; break; case 4: res._u32[index] = 0x00010203; break; case 8: res._u64[index] = 0x0001020304050607ull; break; default: fmt::throw_exception("Unreachable"); } return {true, res}; } return {}; } case spu_itype::FSMBI: { v128 res; for (s32 i = 0; i < 16; i++) { res._u8[i] = (op0.i16 & (1 << i)) ? 0xFF : 0x00; } return { true, res }; } case spu_itype::IOHL: { v128 reg_val{}; // Search for ILHU+IOHL pattern (common pattern for 32-bit constants formation) // But don't limit to it GET_CONST_REG(reg_val, op0.rt); return { true, reg_val | v128::from32p(op0.i16) }; } case spu_itype::SHLQBYI: { if (op0.si7) { // Unimplemented, doubt needed return {}; } // Move register value operation v128 reg_val{}; GET_CONST_REG(reg_val, op0.ra); return { true, reg_val }; } case spu_itype::ORI: { v128 reg_val{}; GET_CONST_REG(reg_val, op0.ra); return { true, reg_val | v128::from32p(op0.si10) }; } default: return {}; } } } return {}; } SPUDisAsm::insert_mask_info SPUDisAsm::try_get_insert_mask_info(const v128& mask) { if ((mask & v128::from8p(0xe0)) != v128{}) { return {}; } s32 first = -1, src_first = 0, last = 16; auto access = [&](u32 index) -> u8 { return mask._u8[index ^ 0xf]; }; for (s32 i = 0; i < 16; i++) { if (access(i) & 0x10) { if ((access(i) & 0x0f) != i) { return {}; } if (first != -1 && last == 16) { last = i; } continue; } if (last != 16) { return {}; } if (first == -1) { src_first = access(i); first = i; } if (src_first + (i - first) != access(i)) { return {}; } } if (first == -1) { return {}; } const u32 size = last - first; if ((size | src_first | first) & (size - 1)) { return {}; } if (size == 16) { // 0x0, 0x1, 0x2, .. 0xE, 0xF is not allowed return {}; } // [type size, dst index, src index] return {size, first / size, src_first / size}; } void SPUDisAsm::WRCH(spu_opcode_t op) { DisAsm("wrch", spu_ch_name[op.ra], spu_reg_name[op.rt]); const auto [is_const, value] = try_get_const_value(op.rt); if (is_const) { switch (op.ra) { case MFC_Cmd: { fmt::append(last_opcode, " #%s", MFC(value._u8[12])); return; } case MFC_WrListStallAck: case MFC_WrTagMask: { const u32 v = value._u32[3]; if (v && !(v & (v - 1))) fmt::append(last_opcode, " #%s (tag=%u)", SignedHex(v), std::countr_zero(v)); // Single-tag mask else fmt::append(last_opcode, " #%s", SignedHex(v)); // Multi-tag mask (or zero) return; } case MFC_EAH: { fmt::append(last_opcode, " #%s", SignedHex(value._u32[3])); return; } case MFC_Size: { fmt::append(last_opcode, " #%s", SignedHex(value._u16[6])); return; } case MFC_TagID: { fmt::append(last_opcode, " #%u", value._u8[12]); return; } case MFC_WrTagUpdate: { const auto upd = fmt::format("%s", mfc_tag_update(value._u32[3])); fmt::append(last_opcode, " #%s", upd == "empty" ? "IMMEDIATE" : upd); return; } case SPU_WrOutIntrMbox: { const u32 code = value._u32[3] >> 24; if (code == 128u) { last_opcode += " #sys_event_flag_set_bit"; } else if (code == 192u) { last_opcode += " #sys_event_flag_set_bit_impatient"; } else { fmt::append(last_opcode, " #%s", SignedHex(value._u32[3])); } return; } default: { fmt::append(last_opcode, " #%s", SignedHex(value._u32[3])); return; } } } } enum CellError : u32; void SPUDisAsm::IOHL(spu_opcode_t op) { DisAsm("iohl", spu_reg_name[op.rt], op.i16); const auto [is_const, value] = try_get_const_equal_value_array<u32>(op.rt); // Only print constant for a 4 equal 32-bit constants array if (is_const) { comment_constant(last_opcode, value | op.i16); } } void SPUDisAsm::SHUFB(spu_opcode_t op) { DisAsm("shufb", spu_reg_name[op.rt4], spu_reg_name[op.ra], spu_reg_name[op.rb], spu_reg_name[op.rc]); const auto [is_const, value] = try_get_const_value(op.rc); if (is_const) { const auto [size, dst, src] = try_get_insert_mask_info(value); if (size) { if ((size >= 4u && !src) || (size == 2u && src == 1u) || (size == 1u && src == 3u)) { // Comment insertion pattern for CWD-alike instruction fmt::append(last_opcode, " #i%u[%u]", size * 8, dst); return; } // Comment insertion pattern for unknown instruction formations fmt::append(last_opcode, " #i%u[%u] = [%u]", size * 8, dst, src); return; } } }
8,180
C++
.cpp
335
20.880597
144
0.604266
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,181
SPUCommonRecompiler.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/SPUCommonRecompiler.cpp
#include "stdafx.h" #include "SPURecompiler.h" #include "Emu/System.h" #include "Emu/system_config.h" #include "Emu/system_progress.hpp" #include "Emu/system_utils.hpp" #include "Emu/cache_utils.hpp" #include "Emu/IdManager.h" #include "Emu/localized_string.h" #include "Crypto/sha1.h" #include "Utilities/StrUtil.h" #include "Utilities/JIT.h" #include "util/init_mutex.hpp" #include "util/shared_ptr.hpp" #include "Emu/Cell/Modules/cellSync.h" #include "SPUThread.h" #include "SPUAnalyser.h" #include "SPUInterpreter.h" #include "SPUDisAsm.h" #include <algorithm> #include <cstring> #include <optional> #include <unordered_set> #include "util/v128.hpp" #include "util/simd.hpp" #include "util/sysinfo.hpp" const extern spu_decoder<spu_itype> g_spu_itype; const extern spu_decoder<spu_iname> g_spu_iname; const extern spu_decoder<spu_iflag> g_spu_iflag; constexpr u32 s_reg_max = spu_recompiler_base::s_reg_max; template<typename T> struct span_less { static int compare(const std::span<T>& lhs, const std::span<T>& rhs) noexcept { // TODO: Replace with std::lexicographical_compare_three_way when it becomes available to all compilers for (usz i = 0, last = std::min(lhs.size(), rhs.size()); i != last; i++) { const T vl = lhs[i]; const T vr = rhs[i]; if (vl != vr) { return vl < vr ? -1 : 1; } } if (lhs.size() != rhs.size()) { return lhs.size() < rhs.size() ? -1 : 1; } return 0; } bool operator()(const std::span<T>& lhs, const std::span<T>& rhs) const noexcept { return compare(lhs, rhs) < 0; } }; template <typename T> inline constexpr span_less<T> s_span_less{}; // Move 4 args for calling native function from a GHC calling convention function #if defined(ARCH_X64) static u8* move_args_ghc_to_native(u8* raw) { #ifdef _WIN32 // mov rcx, r13 // mov rdx, rbp // mov r8, r12 // mov r9, rbx std::memcpy(raw, "\x4C\x89\xE9\x48\x89\xEA\x4D\x89\xE0\x49\x89\xD9", 12); #else // mov rdi, r13 // mov rsi, rbp // mov rdx, r12 // mov rcx, rbx std::memcpy(raw, "\x4C\x89\xEF\x48\x89\xEE\x4C\x89\xE2\x48\x89\xD9", 12); #endif return raw + 12; } #elif defined(ARCH_ARM64) static void ghc_cpp_trampoline(u64 fn_target, native_asm& c, auto& args) { using namespace asmjit; c.mov(args[0], a64::x19); c.mov(args[1], a64::x20); c.mov(args[2], a64::x21); c.mov(args[3], a64::x22); c.mov(a64::x15, Imm(fn_target)); c.br(a64::x15); } #endif DECLARE(spu_runtime::tr_dispatch) = [] { #ifdef __APPLE__ pthread_jit_write_protect_np(false); #endif #if defined(ARCH_X64) // Generate a special trampoline to spu_recompiler_base::dispatch with pause instruction u8* const trptr = jit_runtime::alloc(32, 16); u8* raw = move_args_ghc_to_native(trptr); *raw++ = 0xf3; // pause *raw++ = 0x90; *raw++ = 0xff; // jmp [rip] *raw++ = 0x25; std::memset(raw, 0, 4); const u64 target = reinterpret_cast<u64>(&spu_recompiler_base::dispatch); std::memcpy(raw + 4, &target, 8); return reinterpret_cast<spu_function_t>(trptr); #elif defined(ARCH_ARM64) auto trptr = build_function_asm<spu_function_t>("tr_dispatch", [](native_asm& c, auto& args) { c.yield(); ghc_cpp_trampoline(reinterpret_cast<u64>(&spu_recompiler_base::dispatch), c, args); }); return trptr; #else #error "Unimplemented" #endif }(); DECLARE(spu_runtime::tr_branch) = [] { #if defined(ARCH_X64) // Generate a trampoline to spu_recompiler_base::branch u8* const trptr = jit_runtime::alloc(32, 16); u8* raw = move_args_ghc_to_native(trptr); *raw++ = 0xff; // jmp [rip] *raw++ = 0x25; std::memset(raw, 0, 4); const u64 target = reinterpret_cast<u64>(&spu_recompiler_base::branch); std::memcpy(raw + 4, &target, 8); return reinterpret_cast<spu_function_t>(trptr); #elif defined(ARCH_ARM64) auto trptr = build_function_asm<spu_function_t>("tr_branch", [](native_asm& c, auto& args) { ghc_cpp_trampoline(reinterpret_cast<u64>(&spu_recompiler_base::branch), c, args); }); return trptr; #else #error "Unimplemented" #endif }(); DECLARE(spu_runtime::tr_interpreter) = [] { #if defined(ARCH_X64) u8* const trptr = jit_runtime::alloc(32, 16); u8* raw = move_args_ghc_to_native(trptr); *raw++ = 0xff; // jmp [rip] *raw++ = 0x25; std::memset(raw, 0, 4); const u64 target = reinterpret_cast<u64>(&spu_recompiler_base::old_interpreter); std::memcpy(raw + 4, &target, 8); return reinterpret_cast<spu_function_t>(trptr); #elif defined(ARCH_ARM64) auto trptr = build_function_asm<spu_function_t>("tr_interpreter", [](native_asm& c, auto& args) { ghc_cpp_trampoline(reinterpret_cast<u64>(&spu_recompiler_base::old_interpreter), c, args); }); return trptr; #endif }(); DECLARE(spu_runtime::g_dispatcher) = [] { // Allocate 2^20 positions in data area const auto ptr = reinterpret_cast<std::remove_const_t<decltype(spu_runtime::g_dispatcher)>>(jit_runtime::alloc(sizeof(*g_dispatcher), 64, false)); for (auto& x : *ptr) { x.raw() = tr_dispatch; } return ptr; }(); DECLARE(spu_runtime::tr_all) = [] { #if defined(ARCH_X64) u8* const trptr = jit_runtime::alloc(32, 16); u8* raw = trptr; // Load PC: mov eax, [r13 + spu_thread::pc] *raw++ = 0x41; *raw++ = 0x8b; *raw++ = 0x45; *raw++ = ::narrow<s8>(::offset32(&spu_thread::pc)); // Get LS address starting from PC: lea rcx, [rbp + rax] *raw++ = 0x48; *raw++ = 0x8d; *raw++ = 0x4c; *raw++ = 0x05; *raw++ = 0x00; // mov eax, [rcx] *raw++ = 0x8b; *raw++ = 0x01; // shr eax, (32 - 20) *raw++ = 0xc1; *raw++ = 0xe8; *raw++ = 0x0c; // Load g_dispatcher to rdx *raw++ = 0x48; *raw++ = 0x8d; *raw++ = 0x15; const s32 r32 = ::narrow<s32>(reinterpret_cast<u64>(g_dispatcher) - reinterpret_cast<u64>(raw) - 4); std::memcpy(raw, &r32, 4); raw += 4; // Update block_hash (set zero): mov [r13 + spu_thread::m_block_hash], 0 *raw++ = 0x49; *raw++ = 0xc7; *raw++ = 0x45; *raw++ = ::narrow<s8>(::offset32(&spu_thread::block_hash)); *raw++ = 0x00; *raw++ = 0x00; *raw++ = 0x00; *raw++ = 0x00; // jmp [rdx + rax * 8] *raw++ = 0xff; *raw++ = 0x24; *raw++ = 0xc2; return reinterpret_cast<spu_function_t>(trptr); #elif defined(ARCH_ARM64) auto trptr = build_function_asm<spu_function_t>("tr_all", [](native_asm& c, auto& args) { using namespace asmjit; // Args implicitly defined via registers UNUSED(args); // Inputs: // x19 = m_thread a.k.a arg[0] // x20 = ls_base // x21 - x22 = args[2 - 3] //ensure(::offset32(&spu_thread::pc) <= 32760); //ensure(::offset32(&spu_thread::block_hash) <= 32760); // Load PC c.ldr(a64::w1, arm::Mem(a64::x19, ::offset32(&spu_thread::pc))); // REG_Base + offset(spu_thread::pc) // Compute LS address = REG_Sp + PC, store into x7 (use later) c.add(a64::x7, a64::x20, a64::x1); // Load 32b from LS address c.ldr(a64::w3, arm::Mem(a64::x7)); // shr (32 - 20) c.lsr(a64::w3, a64::w3, Imm(32 - 20)); // Load g_dispatcher c.mov(a64::x4, Imm(reinterpret_cast<u64>(g_dispatcher))); // Update block hash c.mov(a64::x5, Imm(0)); c.str(a64::x5, arm::Mem(a64::x19, ::offset32(&spu_thread::block_hash))); // REG_Base + offset(spu_thread::block_hash) // Jump to [g_dispatcher + idx * 8] c.mov(a64::x6, Imm(8)); c.mul(a64::x6, a64::x3, a64::x6); c.add(a64::x4, a64::x4, a64::x6); c.ldr(a64::x4, arm::Mem(a64::x4)); c.br(a64::x4); // Unreachable guard c.brk(0x42); }); return trptr; #else #error "Unimplemented" #endif }(); DECLARE(spu_runtime::g_gateway) = build_function_asm<spu_function_t>("spu_gateway", [](native_asm& c, auto& args) { // Gateway for SPU dispatcher, converts from native to GHC calling convention, also saves RSP value for spu_escape using namespace asmjit; #if defined(ARCH_X64) #ifdef _WIN32 c.push(x86::r15); c.push(x86::r14); c.push(x86::r13); c.push(x86::r12); c.push(x86::rsi); c.push(x86::rdi); c.push(x86::rbp); c.push(x86::rbx); c.sub(x86::rsp, 0xa8); c.movaps(x86::oword_ptr(x86::rsp, 0x90), x86::xmm15); c.movaps(x86::oword_ptr(x86::rsp, 0x80), x86::xmm14); c.movaps(x86::oword_ptr(x86::rsp, 0x70), x86::xmm13); c.movaps(x86::oword_ptr(x86::rsp, 0x60), x86::xmm12); c.movaps(x86::oword_ptr(x86::rsp, 0x50), x86::xmm11); c.movaps(x86::oword_ptr(x86::rsp, 0x40), x86::xmm10); c.movaps(x86::oword_ptr(x86::rsp, 0x30), x86::xmm9); c.movaps(x86::oword_ptr(x86::rsp, 0x20), x86::xmm8); c.movaps(x86::oword_ptr(x86::rsp, 0x10), x86::xmm7); c.movaps(x86::oword_ptr(x86::rsp, 0), x86::xmm6); #else c.push(x86::rbp); c.push(x86::r15); c.push(x86::r14); c.push(x86::r13); c.push(x86::r12); c.push(x86::rbx); c.push(x86::rax); #endif // Save native stack pointer for longjmp emulation c.mov(x86::qword_ptr(args[0], ::offset32(&spu_thread::hv_ctx, &rpcs3::hypervisor_context_t::regs)), x86::rsp); // Move 4 args (despite spu_function_t def) c.mov(x86::r13, args[0]); c.mov(x86::rbp, args[1]); c.mov(x86::r12, args[2]); c.mov(x86::rbx, args[3]); if (utils::has_avx()) { c.vzeroupper(); } c.call(spu_runtime::tr_all); if (utils::has_avx()) { c.vzeroupper(); } #ifdef _WIN32 c.movaps(x86::xmm6, x86::oword_ptr(x86::rsp, 0)); c.movaps(x86::xmm7, x86::oword_ptr(x86::rsp, 0x10)); c.movaps(x86::xmm8, x86::oword_ptr(x86::rsp, 0x20)); c.movaps(x86::xmm9, x86::oword_ptr(x86::rsp, 0x30)); c.movaps(x86::xmm10, x86::oword_ptr(x86::rsp, 0x40)); c.movaps(x86::xmm11, x86::oword_ptr(x86::rsp, 0x50)); c.movaps(x86::xmm12, x86::oword_ptr(x86::rsp, 0x60)); c.movaps(x86::xmm13, x86::oword_ptr(x86::rsp, 0x70)); c.movaps(x86::xmm14, x86::oword_ptr(x86::rsp, 0x80)); c.movaps(x86::xmm15, x86::oword_ptr(x86::rsp, 0x90)); c.add(x86::rsp, 0xa8); c.pop(x86::rbx); c.pop(x86::rbp); c.pop(x86::rdi); c.pop(x86::rsi); c.pop(x86::r12); c.pop(x86::r13); c.pop(x86::r14); c.pop(x86::r15); #else c.add(x86::rsp, +8); c.pop(x86::rbx); c.pop(x86::r12); c.pop(x86::r13); c.pop(x86::r14); c.pop(x86::r15); c.pop(x86::rbp); #endif c.ret(); #elif defined(ARCH_ARM64) // Save non-volatile regs. We do this within the thread context instead of normal stack const u32 hv_regs_base = ::offset32(&spu_thread::hv_ctx, &rpcs3::hypervisor_context_t::regs); // NOTE: A64 gp-gp-imm add only takes immediates of upto 4095. Larger numbers can work, but need to be multiples of 2 for lowering to replace the instruction correctly // Unfortunately asmjit fails silently on these patterns which can generate incorrect code c.mov(a64::x15, args[0]); c.mov(a64::x14, Imm(hv_regs_base)); c.add(a64::x14, a64::x14, a64::x15); // Reg context offset // Return address of escape should jump to the restore block auto epilogue_addr = c.newLabel(); c.adr(a64::x15, epilogue_addr); c.mov(a64::x16, a64::sp); c.stp(a64::x15, a64::x16, arm::Mem(a64::x14)); c.stp(a64::x18, a64::x19, arm::Mem(a64::x14, 16)); c.stp(a64::x20, a64::x21, arm::Mem(a64::x14, 32)); c.stp(a64::x22, a64::x23, arm::Mem(a64::x14, 48)); c.stp(a64::x24, a64::x25, arm::Mem(a64::x14, 64)); c.stp(a64::x26, a64::x27, arm::Mem(a64::x14, 80)); c.stp(a64::x28, a64::x29, arm::Mem(a64::x14, 96)); c.str(a64::x30, arm::Mem(a64::x14, 112)); // Move 4 args (despite spu_function_t def) c.mov(a64::x19, args[0]); c.mov(a64::x20, args[1]); c.mov(a64::x21, args[2]); c.mov(a64::x22, args[3]); // Inject stack frame for scratchpad. Alternatively use per-function frames but that adds some overhead c.sub(a64::sp, a64::sp, Imm(8192)); c.mov(a64::x0, Imm(reinterpret_cast<u64>(spu_runtime::tr_all))); c.blr(a64::x0); // This is the return point for the far ret. Never jump back into host code without coming through this exit c.bind(epilogue_addr); // Cleanup scratchpad (not needed, we'll reload sp shortly) // c.add(a64::sp, a64::sp, Imm(8192)); // Restore thread context c.mov(a64::x14, Imm(hv_regs_base)); c.add(a64::x14, a64::x14, a64::x19); c.ldr(a64::x16, arm::Mem(a64::x14, 8)); c.ldp(a64::x18, a64::x19, arm::Mem(a64::x14, 16)); c.ldp(a64::x20, a64::x21, arm::Mem(a64::x14, 32)); c.ldp(a64::x22, a64::x23, arm::Mem(a64::x14, 48)); c.ldp(a64::x24, a64::x25, arm::Mem(a64::x14, 64)); c.ldp(a64::x26, a64::x27, arm::Mem(a64::x14, 80)); c.ldp(a64::x28, a64::x29, arm::Mem(a64::x14, 96)); c.ldr(a64::x30, arm::Mem(a64::x14, 112)); // Return c.mov(a64::sp, a64::x16); c.ret(a64::x30); #else #error "Unimplemented" #endif }); DECLARE(spu_runtime::g_escape) = build_function_asm<void(*)(spu_thread*)>("spu_escape", [](native_asm& c, auto& args) { using namespace asmjit; #if defined(ARCH_X64) // Restore native stack pointer (longjmp emulation) c.mov(x86::rsp, x86::qword_ptr(args[0], ::offset32(&spu_thread::hv_ctx, &rpcs3::hypervisor_context_t::regs))); // Return to the return location c.sub(x86::rsp, 8); c.ret(); #elif defined(ARCH_ARM64) // Far ret, jumps to gateway epilogue const u32 reg_base = ::offset32(&spu_thread::hv_ctx, &rpcs3::hypervisor_context_t::regs); c.mov(a64::x19, args[0]); c.mov(a64::x15, Imm(reg_base)); c.add(a64::x15, a64::x15, args[0]); c.ldr(a64::x30, arm::Mem(a64::x15)); c.ret(a64::x30); #else #error "Unimplemented" #endif }); DECLARE(spu_runtime::g_tail_escape) = build_function_asm<void(*)(spu_thread*, spu_function_t, u8*)>("spu_tail_escape", [](native_asm& c, auto& args) { using namespace asmjit; #if defined(ARCH_X64) // Restore native stack pointer (longjmp emulation) c.mov(x86::rsp, x86::qword_ptr(args[0], ::offset32(&spu_thread::hv_ctx, &rpcs3::hypervisor_context_t::regs))); // Adjust stack for initial call instruction in the gateway c.sub(x86::rsp, 16); // Tail call, GHC CC (second arg) c.mov(x86::r13, args[0]); c.mov(x86::rbp, x86::qword_ptr(args[0], ::offset32(&spu_thread::ls))); c.mov(x86::r12, args[2]); c.xor_(x86::ebx, x86::ebx); c.mov(x86::qword_ptr(x86::rsp), args[1]); c.ret(); #elif defined(ARCH_ARM64) // HV pointer const u32 reg_base = ::offset32(&spu_thread::hv_ctx, &rpcs3::hypervisor_context_t::regs); // Tail call, GHC CC c.mov(a64::x19, args[0]); // REG_Base c.mov(a64::x15, Imm(::offset32(&spu_thread::ls))); // SPU::ls offset cannot be correctly encoded for ldr as it is too large c.ldr(a64::x20, arm::Mem(a64::x19, a64::x15)); // REG_Sp c.mov(a64::x21, args[2]); // REG_Hp c.mov(a64::x22, a64::xzr); // REG_R1 // Reset sp to patch leaks. Calls to tail escape may leave their stack "dirty" due to optimizations. c.mov(a64::x14, Imm(reg_base + 8)); c.ldr(a64::x15, arm::Mem(a64::x19, a64::x14)); c.mov(a64::sp, a64::x15); // Push context. This gateway can be returned to normally through a ret chain. // FIXME: Push the current PC and "this" as part of the pseudo-frame and return here directly. c.sub(a64::sp, a64::sp, Imm(16)); c.str(args[0], arm::Mem(a64::sp)); // Allocate scratchpad. Not needed if using per-function frames, or if we just don't care about returning to C++ (jump to gw exit instead) c.sub(a64::sp, a64::sp, Imm(8192)); // Make the far jump c.mov(a64::x15, args[1]); c.blr(a64::x15); // Clear scratch allocation c.add(a64::sp, a64::sp, Imm(8192)); // Restore context. Escape point expects the current thread pointer at x19 c.ldr(a64::x19, arm::Mem(a64::sp)); c.add(a64::sp, a64::sp, Imm(16)); // <Optional> We could technically just emit a return here, but we may not want to for now until support is more mature. // Should we attempt a normal return after this point, we'd be going back to C++ code which we really don't want. // We can't guarantee stack sanity for the C++ code and it's cookies since we're basically treating stack as a scratch playground since we entered the main gateway. // Instead, just fall back to hypervisor here. It also makes debugging easier. c.mov(a64::x15, Imm(reg_base)); c.ldr(a64::x30, arm::Mem(a64::x19, a64::x15)); c.ret(a64::x30); #else #error "Unimplemented" #endif }); DECLARE(spu_runtime::g_interpreter_table) = {}; DECLARE(spu_runtime::g_interpreter) = nullptr; spu_cache::spu_cache(const std::string& loc) : m_file(loc, fs::read + fs::write + fs::create + fs::append) { } spu_cache::~spu_cache() { } extern void utilize_spu_data_segment(u32 vaddr, const void* ls_data_vaddr, u32 size) { if (vaddr % 4) { return; } size &= -4; if (!size || vaddr + size > SPU_LS_SIZE) { return; } if (!g_cfg.core.llvm_precompilation) { return; } g_fxo->need<spu_cache>(); if (!g_fxo->get<spu_cache>().collect_funcs_to_precompile) { return; } std::vector<u32> data(size / 4); std::memcpy(data.data(), ls_data_vaddr, size); spu_cache::precompile_data_t obj{vaddr, std::move(data)}; obj.funcs = spu_thread::discover_functions(vaddr, { reinterpret_cast<const u8*>(ls_data_vaddr), size }, vaddr != 0, umax); if (obj.funcs.empty()) { // Nothing to add return; } if (spu_log.notice) { std::string to_log; for (usz i = 0; i < obj.funcs.size(); i++) { if (i == 0 && obj.funcs.size() < 4) { // Skip newline in this case to_log += ' '; } else if (i % 4 == 0) { fmt::append(to_log, "\n[%02u] ", i / 8); } else { to_log += ", "; } fmt::append(to_log, "0x%05x", obj.funcs[i]); } spu_log.notice("Found SPU function(s) at:%s", to_log); } spu_log.success("Found %u SPU function(s)", obj.funcs.size()); g_fxo->get<spu_cache>().precompile_funcs.push(std::move(obj)); } // For SPU cache validity check static u16 calculate_crc16(const uchar* data, usz length) { u16 crc = umax; while (length--) { u8 x = (crc >> 8) ^ *data++; x ^= (x >> 4); crc = static_cast<u16>((crc << 8) ^ (x << 12) ^ (x << 5) ^ x); } return crc; } std::deque<spu_program> spu_cache::get() { std::deque<spu_program> result; if (!m_file) { return result; } m_file.seek(0); // TODO: signal truncated or otherwise broken file while (true) { struct block_info_t { be_t<u16> crc; be_t<u16> size; be_t<u32> addr; } block_info{}; if (!m_file.read(block_info)) { break; } const u32 crc = block_info.crc; const u32 size = block_info.size; const u32 addr = block_info.addr; if (utils::add_saturate<u32>(addr, size * 4) > SPU_LS_SIZE) { break; } std::vector<u32> func; if (!m_file.read(func, size)) { break; } if (!size || !func[0]) { // Skip old format Giga entries continue; } // CRC check is optional to be compatible with old format if (crc && std::max<u32>(calculate_crc16(reinterpret_cast<const uchar*>(func.data()), size * 4), 1) != crc) { // Invalid, but continue anyway continue; } spu_program res; res.entry_point = addr; res.lower_bound = addr; res.data = std::move(func); result.emplace_front(std::move(res)); } return result; } void spu_cache::add(const spu_program& func) { if (!m_file) { return; } be_t<u32> size = ::size32(func.data); be_t<u32> addr = func.entry_point; // Add CRC (forced non-zero) size |= std::max<u32>(calculate_crc16(reinterpret_cast<const uchar*>(func.data.data()), size * 4), 1) << 16; const fs::iovec_clone gather[3] { {&size, sizeof(size)}, {&addr, sizeof(addr)}, {func.data.data(), func.data.size() * 4} }; // Append data m_file.write_gather(gather, 3); } void spu_cache::initialize(bool build_existing_cache) { spu_runtime::g_interpreter = spu_runtime::g_gateway; if (g_cfg.core.spu_decoder == spu_decoder_type::_static || g_cfg.core.spu_decoder == spu_decoder_type::dynamic) { for (auto& x : *spu_runtime::g_dispatcher) { x.raw() = spu_runtime::tr_interpreter; } } const std::string ppu_cache = rpcs3::cache::get_ppu_cache(); if (ppu_cache.empty()) { return; } // SPU cache file (version + block size type) const std::string loc = ppu_cache + "spu-" + fmt::to_lower(g_cfg.core.spu_block_size.to_string()) + "-v1-tane.dat"; spu_cache cache(loc); if (!cache) { spu_log.error("Failed to initialize SPU cache at: %s", loc); return; } // Read cache auto func_list = cache.get(); atomic_t<usz> fnext{}; atomic_t<u8> fail_flag{0}; auto data_list = g_fxo->get<spu_cache>().precompile_funcs.pop_all(); g_fxo->get<spu_cache>().collect_funcs_to_precompile = false; usz total_precompile = 0; for (auto& sec : data_list) { total_precompile += sec.funcs.size(); } const bool spu_precompilation_enabled = func_list.empty() && g_cfg.core.spu_cache && g_cfg.core.llvm_precompilation; if (spu_precompilation_enabled) { // What compiles in this case goes straight to disk g_fxo->get<spu_cache>() = std::move(cache); } else if (!build_existing_cache) { return; } else { total_precompile = 0; data_list = {}; } atomic_t<usz> data_indexer = 0; if (g_cfg.core.spu_decoder == spu_decoder_type::dynamic || g_cfg.core.spu_decoder == spu_decoder_type::llvm) { if (auto compiler = spu_recompiler_base::make_llvm_recompiler(11)) { compiler->init(); if (compiler->compile({}) && spu_runtime::g_interpreter) { spu_log.success("SPU Runtime: Built the interpreter."); if (g_cfg.core.spu_decoder != spu_decoder_type::llvm) { return; } } else { spu_log.fatal("SPU Runtime: Failed to build the interpreter."); } } } u32 worker_count = 0; std::optional<scoped_progress_dialog> progress_dialog; u32 total_funcs = 0; if (g_cfg.core.spu_decoder == spu_decoder_type::asmjit || g_cfg.core.spu_decoder == spu_decoder_type::llvm) { const usz add_count = func_list.size() + total_precompile; if (add_count) { total_funcs = build_existing_cache ? ::narrow<u32>(add_count) : 0; } worker_count = std::min<u32>(rpcs3::utils::get_max_threads(), ::narrow<u32>(add_count)); } atomic_t<u32> pending_progress = 0; atomic_t<bool> showing_progress = false; if (!g_progr_ptotal) { g_progr_ptotal += total_funcs; showing_progress.release(true); progress_dialog.emplace(get_localized_string(localized_string_id::PROGRESS_DIALOG_BUILDING_SPU_CACHE)); } named_thread_group workers("SPU Worker ", worker_count, [&]() -> uint { #ifdef __APPLE__ pthread_jit_write_protect_np(false); #endif // Set low priority thread_ctrl::scoped_priority low_prio(-1); // Initialize compiler instances for parallel compilation std::unique_ptr<spu_recompiler_base> compiler; if (g_cfg.core.spu_decoder == spu_decoder_type::asmjit) { compiler = spu_recompiler_base::make_asmjit_recompiler(); } else if (g_cfg.core.spu_decoder == spu_decoder_type::llvm) { compiler = spu_recompiler_base::make_llvm_recompiler(); } compiler->init(); // Counter for error reporting u32 logged_error = 0; // How much every thread compiled uint result = 0; // Fake LS std::vector<be_t<u32>> ls(0x10000); usz func_i = fnext++; // Ensure some actions are performed on a single thread const bool is_first_thread = func_i == 0; // Build functions for (; func_i < func_list.size(); func_i = fnext++, (showing_progress ? g_progr_pdone : pending_progress) += build_existing_cache ? 1 : 0) { const spu_program& func = std::as_const(func_list)[func_i]; if (Emu.IsStopped() || fail_flag) { continue; } // Get data start const u32 start = func.lower_bound; const u32 size0 = ::size32(func.data); be_t<u64> hash_start; { sha1_context ctx; u8 output[20]; sha1_starts(&ctx); sha1_update(&ctx, reinterpret_cast<const u8*>(func.data.data()), func.data.size() * 4); sha1_finish(&ctx, output); std::memcpy(&hash_start, output, sizeof(hash_start)); } // Check hash against allowed bounds const bool inverse_bounds = g_cfg.core.spu_llvm_lower_bound > g_cfg.core.spu_llvm_upper_bound; if ((!inverse_bounds && (hash_start < g_cfg.core.spu_llvm_lower_bound || hash_start > g_cfg.core.spu_llvm_upper_bound)) || (inverse_bounds && (hash_start < g_cfg.core.spu_llvm_lower_bound && hash_start > g_cfg.core.spu_llvm_upper_bound))) { spu_log.error("[Debug] Skipped function %s", fmt::base57(hash_start)); result++; continue; } // Initialize LS with function data only for (u32 i = 0, pos = start; i < size0; i++, pos += 4) { ls[pos / 4] = std::bit_cast<be_t<u32>>(func.data[i]); } // Call analyser spu_program func2 = compiler->analyse(ls.data(), func.entry_point); if (func2 != func) { spu_log.error("[0x%05x] SPU Analyser failed, %u vs %u", func2.entry_point, func2.data.size(), size0); if (logged_error < 2) { std::string log; compiler->dump(func, log); spu_log.notice("[0x%05x] Function: %s", func.entry_point, log); logged_error++; } } else if (!compiler->compile(std::move(func2))) { // Likely, out of JIT memory. Signal to prevent further building. fail_flag |= 1; continue; } // Clear fake LS std::memset(ls.data() + start / 4, 0, 4 * (size0 - 1)); result++; if (is_first_thread && !showing_progress) { if (!g_progr_text.load() && !g_progr_ptotal && !g_progr_ftotal) { showing_progress = true; g_progr_pdone += pending_progress.exchange(0); g_progr_ptotal += total_funcs; progress_dialog.emplace(get_localized_string(localized_string_id::PROGRESS_DIALOG_BUILDING_SPU_CACHE)); } } else if (showing_progress && pending_progress) { // Cover missing progress due to a race g_progr_pdone += pending_progress.exchange(0); } } u32 last_sec_idx = umax; for (func_i = data_indexer++;; func_i = data_indexer++, (showing_progress ? g_progr_pdone : pending_progress) += build_existing_cache ? 1 : 0) { usz passed_count = 0; u32 func_addr = 0; u32 next_func = 0; u32 sec_addr = umax; u32 sec_idx = 0; std::span<const u32> inst_data; // Try to get the data this index points to for (auto& sec : data_list) { if (func_i < passed_count + sec.funcs.size()) { const usz func_idx = func_i - passed_count; sec_addr = sec.vaddr; func_addr = ::at32(sec.funcs, func_idx); inst_data = { sec.inst_data.data(), sec.inst_data.size() }; next_func = sec.funcs.size() >= func_idx ? ::narrow<u32>(sec_addr + inst_data.size() * 4) : sec.funcs[func_idx]; break; } passed_count += sec.funcs.size(); sec_idx++; } if (sec_addr == umax) { // End of compilation for thread break; } if (Emu.IsStopped() || fail_flag) { continue; } if (last_sec_idx != sec_idx) { if (last_sec_idx != umax) { // Clear fake LS of previous section auto& sec = data_list[last_sec_idx]; std::memset(ls.data() + sec.vaddr / 4, 0, sec.inst_data.size() * 4); } // Initialize LS with the entire section data for (u32 i = 0, pos = sec_addr; i < inst_data.size(); i++, pos += 4) { ls[pos / 4] = std::bit_cast<be_t<u32>>(inst_data[i]); } last_sec_idx = sec_idx; } u32 block_addr = func_addr; std::map<u32, std::vector<u32>> targets; // Call analyser spu_program func2 = compiler->analyse(ls.data(), block_addr, &targets); while (!func2.data.empty()) { const u32 last_inst = std::bit_cast<be_t<u32>>(func2.data.back()); const u32 prog_size = ::size32(func2.data); if (!compiler->compile(std::move(func2))) { // Likely, out of JIT memory. Signal to prevent further building. fail_flag |= 1; break; } result++; const u32 start_new = block_addr + prog_size * 4; if (start_new >= next_func || (start_new == next_func - 4 && ls[start_new / 4] == 0x200000u)) { // Completed break; } if (auto type = g_spu_itype.decode(last_inst); type == spu_itype::BRSL || type == spu_itype::BRASL || type == spu_itype::BISL || type == spu_itype::SYNC) { if (ls[start_new / 4] && g_spu_itype.decode(ls[start_new / 4]) != spu_itype::UNK) { spu_log.notice("Precompiling fallthrough to 0x%05x", start_new); func2 = compiler->analyse(ls.data(), start_new, &targets); block_addr = start_new; continue; } } if (targets.empty()) { break; } const auto upper = targets.upper_bound(func_addr); if (upper == targets.begin()) { break; } u32 new_entry = umax; // Find the lowest target in the space in-between for (auto it = std::prev(upper); it != targets.end() && it->first < start_new && new_entry > start_new; it++) { for (u32 target : it->second) { if (target >= start_new && target < next_func) { if (target < new_entry) { new_entry = target; if (new_entry == start_new) { // Cannot go lower break; } } } } } if (new_entry != umax && !spu_thread::is_exec_code(new_entry, { reinterpret_cast<const u8*>(ls.data()), SPU_LS_SIZE }, 0, true)) { new_entry = umax; } if (new_entry == umax) { new_entry = start_new; while (new_entry < next_func && (ls[start_new / 4] < 0x3fffc || !spu_thread::is_exec_code(new_entry, { reinterpret_cast<const u8*>(ls.data()), SPU_LS_SIZE }, 0, true))) { new_entry += 4; } if (new_entry >= next_func || (new_entry == next_func - 4 && ls[new_entry / 4] == 0x200000u)) { // Completed break; } } spu_log.notice("Precompiling filler space at 0x%05x (next=0x%05x)", new_entry, next_func); func2 = compiler->analyse(ls.data(), new_entry, &targets); block_addr = new_entry; } if (is_first_thread && !showing_progress) { if (!g_progr_text.load() && !g_progr_ptotal && !g_progr_ftotal) { showing_progress = true; g_progr_pdone += pending_progress.exchange(0); g_progr_ptotal += total_funcs; progress_dialog.emplace(get_localized_string(localized_string_id::PROGRESS_DIALOG_BUILDING_SPU_CACHE)); } } else if (showing_progress && pending_progress) { // Cover missing progress due to a race g_progr_pdone += pending_progress.exchange(0); } } if (showing_progress && pending_progress) { // Cover missing progress due to a race g_progr_pdone += pending_progress.exchange(0); } return result; }); u32 built_total = 0; // Join (implicitly) and print individual results for (u32 i = 0; i < workers.size(); i++) { spu_log.notice("SPU Runtime: Worker %u built %u programs.", i + 1, workers[i]); built_total += workers[i]; } spu_log.notice("SPU Runtime: Workers built %u programs.", built_total); if (Emu.IsStopped()) { spu_log.error("SPU Runtime: Cache building aborted."); return; } if (fail_flag) { spu_log.fatal("SPU Runtime: Cache building failed (out of memory)."); return; } if ((g_cfg.core.spu_decoder == spu_decoder_type::asmjit || g_cfg.core.spu_decoder == spu_decoder_type::llvm) && !func_list.empty()) { spu_log.success("SPU Runtime: Built %u functions.", func_list.size()); if (g_cfg.core.spu_debug) { std::string dump; dump.reserve(10'000'000); std::map<std::span<u8>, spu_program*, span_less<u8>> sorted; for (auto&& f : func_list) { // Interpret as a byte string std::span<u8> data = {reinterpret_cast<u8*>(f.data.data()), f.data.size() * sizeof(u32)}; sorted[data] = &f; } std::unordered_set<u32> depth_n; u32 n_max = 0; for (auto&& [bytes, f] : sorted) { { sha1_context ctx; u8 output[20]; sha1_starts(&ctx); sha1_update(&ctx, bytes.data(), bytes.size()); sha1_finish(&ctx, output); fmt::append(dump, "\n\t[%s] ", fmt::base57(output)); } u32 depth_m = 0; for (auto&& [data, f2] : sorted) { u32 depth = 0; if (f2 == f) { continue; } for (u32 i = 0; i < bytes.size(); i++) { if (i < data.size() && data[i] == bytes[i]) { depth++; } else { break; } } depth_n.emplace(depth); depth_m = std::max(depth, depth_m); } fmt::append(dump, "c=%06d,d=%06d ", depth_n.size(), depth_m); bool sk = false; for (u32 i = 0; i < std::min<usz>(bytes.size(), std::max<usz>(256, depth_m)); i++) { if (depth_m == i) { dump += '|'; sk = true; } fmt::append(dump, "%02x", bytes[i]); if (i % 4 == 3) { if (sk) { sk = false; } else { dump += ' '; } dump += ' '; } } fmt::append(dump, "\n\t%49s", ""); for (u32 i = 0; i < std::min<usz>(f->data.size(), std::max<usz>(64, utils::aligned_div<u32>(depth_m, 4))); i++) { fmt::append(dump, "%-10s", g_spu_iname.decode(std::bit_cast<be_t<u32>>(f->data[i]))); } n_max = std::max(n_max, ::size32(depth_n)); depth_n.clear(); } spu_log.notice("SPU Cache Dump (max_c=%d): %s", n_max, dump); } } // Initialize global cache instance if (g_cfg.core.spu_cache && cache) { g_fxo->get<spu_cache>() = std::move(cache); } } bool spu_program::operator==(const spu_program& rhs) const noexcept { // TODO return entry_point - lower_bound == rhs.entry_point - rhs.lower_bound && data == rhs.data; } bool spu_program::operator<(const spu_program& rhs) const noexcept { const u32 lhs_offs = (entry_point - lower_bound) / 4; const u32 rhs_offs = (rhs.entry_point - rhs.lower_bound) / 4; // Select range for comparison std::span<const u32> lhs_data(data.data() + lhs_offs, data.size() - lhs_offs); std::span<const u32> rhs_data(rhs.data.data() + rhs_offs, rhs.data.size() - rhs_offs); const int cmp0 = span_less<const u32>::compare(lhs_data, rhs_data); if (cmp0 < 0) return true; else if (cmp0 > 0) return false; // Compare from address 0 to the point before the entry point (TODO: undesirable) lhs_data = {data.data(), lhs_offs}; rhs_data = {rhs.data.data(), rhs_offs}; const int cmp1 = span_less<const u32>::compare(lhs_data, rhs_data); if (cmp1 < 0) return true; else if (cmp1 > 0) return false; // TODO return lhs_offs < rhs_offs; } spu_runtime::spu_runtime() { // Clear LLVM output m_cache_path = rpcs3::cache::get_ppu_cache(); if (m_cache_path.empty()) { return; } if (g_cfg.core.spu_debug && g_cfg.core.spu_decoder != spu_decoder_type::dynamic && g_cfg.core.spu_decoder != spu_decoder_type::_static) { if (!fs::create_dir(m_cache_path + "llvm/")) { fs::remove_all(m_cache_path + "llvm/", false); } fs::file(m_cache_path + "spu.log", fs::rewrite); fs::file(m_cache_path + "spu-ir.log", fs::rewrite); } } spu_item* spu_runtime::add_empty(spu_program&& data) { if (data.data.empty()) { return nullptr; } // Store previous item if already added spu_item* prev = nullptr; //Try to add item that doesn't exist yet const auto ret = m_stuff[data.data[0] >> 12].push_if([&](spu_item& _new, spu_item& _old) { if (_new.data == _old.data) { prev = &_old; return false; } return true; }, std::move(data)); if (ret) { return ret; } return prev; } spu_function_t spu_runtime::rebuild_ubertrampoline(u32 id_inst) { // Prepare sorted list static thread_local std::vector<std::pair<std::span<const u32>, spu_function_t>> m_flat_list; // Remember top position auto stuff_it = ::at32(m_stuff, id_inst >> 12).begin(); auto stuff_end = ::at32(m_stuff, id_inst >> 12).end(); { if (stuff_it->trampoline) { return stuff_it->trampoline; } m_flat_list.clear(); for (auto it = stuff_it; it != stuff_end; ++it) { if (const auto ptr = it->compiled.load()) { std::span<const u32> range{it->data.data.data(), it->data.data.size()}; range = range.subspan((it->data.entry_point - it->data.lower_bound) / 4); m_flat_list.emplace_back(range, ptr); } else { // Pull oneself deeper (TODO) ++stuff_it; } } } std::sort(m_flat_list.begin(), m_flat_list.end(), FN(s_span_less<const u32>(x.first, y.first))); struct work { u32 size; u16 from; u16 level; u8* rel32; decltype(m_flat_list)::iterator beg; decltype(m_flat_list)::iterator end; }; // Scratch vector static thread_local std::vector<work> workload; // Generate a dispatcher (übertrampoline) const auto beg = m_flat_list.begin(); const auto _end = m_flat_list.end(); const u32 size0 = ::size32(m_flat_list); auto result = beg->second; if (size0 != 1) { #if defined(ARCH_ARM64) // Allocate some writable executable memory u8* const wxptr = jit_runtime::alloc(size0 * 128 + 16, 16); if (!wxptr) { return nullptr; } // Raw assembly pointer u8* raw = wxptr; auto make_jump = [&](asmjit::arm::CondCode op, auto target) { // 36 bytes // Fallback to dispatch if no target const u64 taddr = target ? reinterpret_cast<u64>(target) : reinterpret_cast<u64>(tr_dispatch); // ldr x9, #16 -> ldr x9, taddr *raw++ = 0x89; *raw++ = 0x00; *raw++ = 0x00; *raw++ = 0x58; if (op == asmjit::arm::CondCode::kAlways) { // br x9 *raw++ = 0x20; *raw++ = 0x01; *raw++ = 0x1F; *raw++ = 0xD6; // nop *raw++ = 0x1F; *raw++ = 0x20; *raw++ = 0x03; *raw++ = 0xD5; // nop *raw++ = 0x1F; *raw++ = 0x20; *raw++ = 0x03; *raw++ = 0xD5; } else { // b.COND #8 -> b.COND do_branch switch (op) { case asmjit::arm::CondCode::kUnsignedLT: *raw++ = 0x43; break; case asmjit::arm::CondCode::kUnsignedGT: *raw++ = 0x48; break; default: asm("brk 0x42"); } *raw++ = 0x00; *raw++ = 0x00; *raw++ = 0x54; // b #16 -> b cont *raw++ = 0x04; *raw++ = 0x00; *raw++ = 0x00; *raw++ = 0x14; // do_branch: br x9 *raw++ = 0x20; *raw++ = 0x01; *raw++ = 0x1f; *raw++ = 0xD6; } // taddr std::memcpy(raw, &taddr, 8); raw += 8; // cont: next instruction }; #elif defined(ARCH_X64) // Allocate some writable executable memory u8* const wxptr = jit_runtime::alloc(size0 * 22 + 14, 16); if (!wxptr) { return nullptr; } // Raw assembly pointer u8* raw = wxptr; // Write jump instruction with rel32 immediate auto make_jump = [&](u8 op, auto target) { ensure(raw + 8 <= wxptr + size0 * 22 + 16); // Fallback to dispatch if no target const u64 taddr = target ? reinterpret_cast<u64>(target) : reinterpret_cast<u64>(tr_dispatch); // Compute the distance const s64 rel = taddr - reinterpret_cast<u64>(raw) - (op != 0xe9 ? 6 : 5); ensure(rel >= s32{smin} && rel <= s32{smax}); if (op != 0xe9) { // First jcc byte *raw++ = 0x0f; ensure((op >> 4) == 0x8); } *raw++ = op; const s32 r32 = static_cast<s32>(rel); std::memcpy(raw, &r32, 4); raw += 4; }; #endif workload.clear(); workload.reserve(size0); workload.emplace_back(); workload.back().size = size0; workload.back().level = 0; workload.back().from = -1; workload.back().rel32 = nullptr; workload.back().beg = beg; workload.back().end = _end; // LS address starting from PC is already loaded into rcx (see spu_runtime::tr_all) for (usz i = 0; i < workload.size(); i++) { // Get copy of the workload info auto w = workload[i]; // Split range in two parts auto it = w.beg; auto it2 = w.beg; u32 size1 = w.size / 2; u32 size2 = w.size - size1; std::advance(it2, w.size / 2); while (ensure(w.level < umax)) { it = it2; size1 = w.size - size2; if (w.level >= w.beg->first.size()) { // Cannot split: smallest function is a prefix of bigger ones (TODO) break; } const u32 x1 = ::at32(w.beg->first, w.level); if (!x1) { // Cannot split: some functions contain holes at this level w.level++; // Resort subrange starting from the new level std::stable_sort(w.beg, w.end, [&](const auto& a, const auto& b) { std::span<const u32> lhs = a.first; std::span<const u32> rhs = b.first; lhs = lhs.subspan(w.level); rhs = rhs.subspan(w.level); return s_span_less<const u32>(lhs, rhs); }); continue; } // Adjust ranges (forward) while (it != w.end && x1 == ::at32(it->first, w.level)) { it++; size1++; } if (it == w.end) { // Cannot split: words are identical within the range at this level w.level++; } else { size2 = w.size - size1; break; } } if (w.rel32) { #if defined(ARCH_X64) // Patch rel32 linking it to the current location if necessary const s32 r32 = ::narrow<s32>(raw - w.rel32); std::memcpy(w.rel32 - 4, &r32, 4); #elif defined(ARCH_ARM64) // Rewrite jump address { u64 raw64 = reinterpret_cast<u64>(raw); memcpy(w.rel32 - 8, &raw64, 8); } #else #error "Unimplemented" #endif } if (w.level >= w.beg->first.size() || w.level >= it->first.size()) { // If functions cannot be compared, assume smallest function spu_log.error("Trampoline simplified at ??? (level=%u)", w.level); #if defined(ARCH_X64) make_jump(0xe9, w.beg->second); // jmp rel32 #elif defined(ARCH_ARM64) u64 branch_target = reinterpret_cast<u64>(w.beg->second); make_jump(asmjit::arm::CondCode::kAlways, branch_target); #else #error "Unimplemented" #endif continue; } // Value for comparison const u32 x = ::at32(it->first, w.level); // Adjust ranges (backward) while (it != m_flat_list.begin()) { it--; if (w.level >= it->first.size()) { it = m_flat_list.end(); break; } if (::at32(it->first, w.level) != x) { it++; break; } ensure(it != w.beg); size1--; size2++; } if (it == m_flat_list.end()) { spu_log.error("Trampoline simplified (II) at ??? (level=%u)", w.level); #if defined(ARCH_X64) make_jump(0xe9, w.beg->second); // jmp rel32 #elif defined(ARCH_ARM64) u64 branch_target = reinterpret_cast<u64>(w.beg->second); make_jump(asmjit::arm::CondCode::kAlways, branch_target); #else #error "Unimplemented" #endif continue; } // Emit 32-bit comparison #if defined(ARCH_X64) ensure(raw + 12 <= wxptr + size0 * 22 + 16); // "Asm overflow" #elif defined(ARCH_ARM64) ensure(raw + (4 * 4) <= wxptr + size0 * 128 + 16); #else #error "Unimplemented" #endif if (w.from != w.level) { // If necessary (level has advanced), emit load: mov eax, [rcx + addr] const u32 cmp_lsa = w.level * 4u; #if defined(ARCH_X64) if (cmp_lsa < 0x80) { *raw++ = 0x8b; *raw++ = 0x41; *raw++ = ::narrow<s8>(cmp_lsa); } else { *raw++ = 0x8b; *raw++ = 0x81; std::memcpy(raw, &cmp_lsa, 4); raw += 4; } #elif defined(ARCH_ARM64) // ldr w9, #8 *raw++ = 0x49; *raw++ = 0x00; *raw++ = 0x00; *raw++ = 0x18; // b #8 *raw++ = 0x02; *raw++ = 0x00; *raw++ = 0x00; *raw++ = 0x14; // cmp_lsa std::memcpy(raw, &cmp_lsa, 4); raw += 4; // ldr w1, [x7, x9] *raw++ = 0xE1; *raw++ = 0x68; *raw++ = 0x69; *raw++ = 0xB8; #else #error "Unimplemented" #endif } // Emit comparison: cmp eax, imm32 #if defined(ARCH_X64) *raw++ = 0x3d; std::memcpy(raw, &x, 4); raw += 4; #elif defined(ARCH_ARM64) // ldr w9, #8 *raw++ = 0x49; *raw++ = 0x00; *raw++ = 0x00; *raw++ = 0x18; // b #8 *raw++ = 0x02; *raw++ = 0x00; *raw++ = 0x00; *raw++ = 0x14; // x std::memcpy(raw, &x, 4); raw += 4; // cmp w1, w9 *raw++ = 0x3f; *raw++ = 0x00; *raw++ = 0x09; *raw++ = 0x6B; #else #error "Unimplemented" #endif // Low subrange target if (size1 == 1) { #if defined(ARCH_X64) make_jump(0x82, w.beg->second); // jb rel32 #elif defined(ARCH_ARM64) u64 branch_target = reinterpret_cast<u64>(w.beg->second); make_jump(asmjit::arm::CondCode::kUnsignedLT, branch_target); #else #error "Unimplemented" #endif } else { #if defined(ARCH_X64) make_jump(0x82, raw); // jb rel32 (stub) #elif defined(ARCH_ARM64) make_jump(asmjit::arm::CondCode::kUnsignedLT, raw); #else #error "Unimplemented" #endif auto& to = workload.emplace_back(w); to.end = it; to.size = size1; to.rel32 = raw; to.from = w.level; } // Second subrange target if (size2 == 1) { #if defined(ARCH_X64) make_jump(0xe9, it->second); // jmp rel32 #elif defined(ARCH_ARM64) u64 branch_target = reinterpret_cast<u64>(it->second); make_jump(asmjit::arm::CondCode::kAlways, branch_target); #else #error "Unimplemented" #endif } else { it2 = it; // Select additional midrange for equality comparison while (it2 != w.end && ::at32(it2->first, w.level) == x) { size2--; it2++; } if (it2 != w.end) { // High subrange target if (size2 == 1) { #if defined(ARCH_X64) make_jump(0x87, it2->second); // ja rel32 #elif defined(ARCH_ARM64) u64 branch_target = reinterpret_cast<u64>(it2->second); make_jump(asmjit::arm::CondCode::kUnsignedGT, branch_target); #else #throw "Unimplemented" #endif } else { #if defined(ARCH_X64) make_jump(0x87, raw); // ja rel32 (stub) #elif defined(ARCH_ARM64) make_jump(asmjit::arm::CondCode::kUnsignedGT, raw); #else #error "Unimplemented" #endif auto& to = workload.emplace_back(w); to.beg = it2; to.size = size2; to.rel32 = raw; to.from = w.level; } const u32 size3 = w.size - size1 - size2; if (size3 == 1) { #if defined(ARCH_X64) make_jump(0xe9, it->second); // jmp rel32 #elif defined(ARCH_ARM64) u64 branch_target = reinterpret_cast<u64>(it->second); make_jump(asmjit::arm::CondCode::kAlways, branch_target); #else #error "Unimplemented" #endif } else { #if defined(ARCH_X64) make_jump(0xe9, raw); // jmp rel32 (stub) #elif defined(ARCH_ARM64) make_jump(asmjit::arm::CondCode::kAlways, raw); #else #error "Unimplemented" #endif auto& to = workload.emplace_back(w); to.beg = it; to.end = it2; to.size = size3; to.rel32 = raw; to.from = w.level; } } else { #if defined(ARCH_X64) make_jump(0xe9, raw); // jmp rel32 (stub) #elif defined(ARCH_ARM64) make_jump(asmjit::arm::CondCode::kAlways, raw); #else #error "Unimplemented" #endif auto& to = workload.emplace_back(w); to.beg = it; to.size = w.size - size1; to.rel32 = raw; to.from = w.level; } } } workload.clear(); result = reinterpret_cast<spu_function_t>(reinterpret_cast<u64>(wxptr)); std::string fname; fmt::append(fname, "__ub%u", m_flat_list.size()); jit_announce(wxptr, raw - wxptr, fname); } if (auto _old = stuff_it->trampoline.compare_and_swap(nullptr, result)) { return _old; } // Install ubertrampoline auto& insert_to = ::at32(*spu_runtime::g_dispatcher, id_inst >> 12); auto _old = insert_to.load(); do { // Make sure we are replacing an older ubertrampoline but not newer one if (_old != tr_dispatch) { bool ok = false; for (auto it = stuff_it; it != stuff_end; ++it) { if (it->trampoline == _old) { ok = true; break; } } if (!ok) { return result; } } } while (!insert_to.compare_exchange(_old, result)); return result; } spu_function_t spu_runtime::find(const u32* ls, u32 addr) const { const u32 index = ls[addr / 4] >> 12; for (const auto& item : ::at32(m_stuff, index)) { if (const auto ptr = item.compiled.load()) { std::span<const u32> range{item.data.data.data(), item.data.data.size()}; range = range.subspan((item.data.entry_point - item.data.lower_bound) / 4); if (addr / 4 + range.size() > 0x10000) { continue; } if (std::equal(range.begin(), range.end(), ls + addr / 4)) { return ptr; } } } return nullptr; } spu_function_t spu_runtime::make_branch_patchpoint(u16 data) const { #if defined(ARCH_X64) u8* const raw = jit_runtime::alloc(16, 16); if (!raw) { return nullptr; } // Save address of the following jmp (GHC CC 3rd argument) raw[0] = 0x4c; // lea r12, [rip+1] raw[1] = 0x8d; raw[2] = 0x25; raw[3] = 0x01; raw[4] = 0x00; raw[5] = 0x00; raw[6] = 0x00; raw[7] = 0x90; // nop // Jump to spu_recompiler_base::branch raw[8] = 0xe9; // Compute the distance const s64 rel = reinterpret_cast<u64>(tr_branch) - reinterpret_cast<u64>(raw + 8) - 5; std::memcpy(raw + 9, &rel, 4); raw[13] = 0xcc; raw[14] = data >> 8; raw[15] = data & 0xff; return reinterpret_cast<spu_function_t>(raw); #elif defined(ARCH_ARM64) #if defined(__APPLE__) pthread_jit_write_protect_np(false); #endif u8* const patch_fn = ensure(jit_runtime::alloc(36, 16)); u8* raw = patch_fn; // adr x21, #16 *raw++ = 0x95; *raw++ = 0x00; *raw++ = 0x00; *raw++ = 0x10; // nop x3 for (int i = 0; i < 3; i++) { *raw++ = 0x1F; *raw++ = 0x20; *raw++ = 0x03; *raw++ = 0xD5; } // ldr x9, #8 *raw++ = 0x49; *raw++ = 0x00; *raw++ = 0x00; *raw++ = 0x58; // br x9 *raw++ = 0x20; *raw++ = 0x01; *raw++ = 0x1F; *raw++ = 0xD6; u64 branch_target = reinterpret_cast<u64>(tr_branch); std::memcpy(raw, &branch_target, 8); raw += 8; *raw++ = static_cast<u8>(data >> 8); *raw++ = static_cast<u8>(data & 0xff); #if defined(__APPLE__) pthread_jit_write_protect_np(true); #endif // Flush all cache lines after potentially writing executable code asm("ISB"); asm("DSB ISH"); return reinterpret_cast<spu_function_t>(patch_fn); #else #error "Unimplemented" #endif } spu_recompiler_base::spu_recompiler_base() { } spu_recompiler_base::~spu_recompiler_base() { } void spu_recompiler_base::dispatch(spu_thread& spu, void*, u8* rip) { // If code verification failed from a patched patchpoint, clear it with a dispatcher jump if (rip) { #if defined(ARCH_X64) const s64 rel = reinterpret_cast<u64>(spu_runtime::tr_all) - reinterpret_cast<u64>(rip - 8) - 5; union { u8 bytes[8]; u64 result; }; bytes[0] = 0xe9; // jmp rel32 std::memcpy(bytes + 1, &rel, 4); bytes[5] = 0x66; // lnop (2 bytes) bytes[6] = 0x90; bytes[7] = 0x90; atomic_storage<u64>::release(*reinterpret_cast<u64*>(rip - 8), result); #elif defined(ARCH_ARM64) union { u8 bytes[16]; u128 result; }; // ldr x9, #8 bytes[0] = 0x49; bytes[1] = 0x00; bytes[2] = 0x00; bytes[3] = 0x58; // br x9 bytes[4] = 0x20; bytes[5] = 0x01; bytes[6] = 0x1F; bytes[7] = 0xD6; const u64 target = reinterpret_cast<u64>(spu_runtime::tr_all); std::memcpy(bytes + 8, &target, 8); #if defined(__APPLE__) pthread_jit_write_protect_np(false); #endif atomic_storage<u128>::release(*reinterpret_cast<u128*>(rip), result); #if defined(__APPLE__) pthread_jit_write_protect_np(true); #endif // Flush all cache lines after potentially writing executable code asm("ISB"); asm("DSB ISH"); #else #error "Unimplemented" #endif } // Second attempt (recover from the recursion after repeated unsuccessful trampoline call) if (spu.block_counter != spu.block_recover && &dispatch != ::at32(*spu_runtime::g_dispatcher, spu._ref<nse_t<u32>>(spu.pc) >> 12)) { spu.block_recover = spu.block_counter; return; } spu.jit->init(); // Compile if (spu._ref<u32>(spu.pc) == 0u) { spu_runtime::g_escape(&spu); return; } const auto func = spu.jit->compile(spu.jit->analyse(spu._ptr<u32>(0), spu.pc)); if (!func) { spu_log.fatal("[0x%05x] Compilation failed.", spu.pc); return; } // Diagnostic if (g_cfg.core.spu_block_size == spu_block_size_type::giga) { const v128 _info = spu.stack_mirror[(spu.gpr[1]._u32[3] & 0x3fff0) >> 4]; if (_info._u64[0] + 1) { spu_log.trace("Called from 0x%x", _info._u32[2] - 4); } } #if defined(__APPLE__) pthread_jit_write_protect_np(true); #endif #if defined(ARCH_ARM64) // Flush all cache lines after potentially writing executable code asm("ISB"); asm("DSB ISH"); #endif spu_runtime::g_tail_escape(&spu, func, nullptr); } void spu_recompiler_base::branch(spu_thread& spu, void*, u8* rip) { #if defined(ARCH_X64) if (const u32 ls_off = ((rip[6] << 8) | rip[7]) * 4) #elif defined(ARCH_ARM64) if (const u32 ls_off = ((rip[16] << 8) | rip[17]) * 4) // See branch_patchpoint `data` #else #error "Unimplemented" #endif { spu_log.todo("Special branch patchpoint hit.\nPlease report to the developer (0x%05x).", ls_off); } // Find function const auto func = spu.jit->get_runtime().find(static_cast<u32*>(spu._ptr<void>(0)), spu.pc); if (!func) { return; } #if defined(ARCH_X64) // Overwrite jump to this function with jump to the compiled function const s64 rel = reinterpret_cast<u64>(func) - reinterpret_cast<u64>(rip) - 5; union { u8 bytes[8]; u64 result; }; if (rel >= s32{smin} && rel <= s32{smax}) { const s64 rel8 = (rel + 5) - 2; if (rel8 >= s8{smin} && rel8 <= s8{smax}) { bytes[0] = 0xeb; // jmp rel8 bytes[1] = static_cast<s8>(rel8); std::memset(bytes + 2, 0xcc, 4); } else { bytes[0] = 0xe9; // jmp rel32 std::memcpy(bytes + 1, &rel, 4); bytes[5] = 0xcc; } bytes[6] = rip[6]; bytes[7] = rip[7]; } else { fmt::throw_exception("Impossible far jump: %p -> %p", rip, func); } atomic_storage<u64>::release(*reinterpret_cast<u64*>(rip), result); #elif defined(ARCH_ARM64) union { u8 bytes[16]; u128 result; }; // ldr x9, #8 bytes[0] = 0x49; bytes[1] = 0x00; bytes[2] = 0x00; bytes[3] = 0x58; // br x9 bytes[4] = 0x20; bytes[5] = 0x01; bytes[6] = 0x1F; bytes[7] = 0xD6; const u64 target = reinterpret_cast<u64>(func); std::memcpy(bytes + 8, &target, 8); #if defined(__APPLE__) pthread_jit_write_protect_np(false); #endif atomic_storage<u128>::release(*reinterpret_cast<u128*>(rip), result); #if defined(__APPLE__) pthread_jit_write_protect_np(true); #endif // Flush all cache lines after potentially writing executable code asm("ISB"); asm("DSB ISH"); #else #error "Unimplemented" #endif spu_runtime::g_tail_escape(&spu, func, rip); } void spu_recompiler_base::old_interpreter(spu_thread& spu, void* ls, u8* /*rip*/) { if (g_cfg.core.spu_decoder != spu_decoder_type::_static) { fmt::throw_exception("Invalid SPU decoder"); } // Select opcode table const auto& table = g_fxo->get<spu_interpreter_rt>(); // LS pointer const auto base = static_cast<const u8*>(ls); while (true) { if (spu.state) [[unlikely]] { if (spu.check_state()) break; } const u32 op = *reinterpret_cast<const be_t<u32>*>(base + spu.pc); if (table.decode(op)(spu, {op})) spu.pc += 4; } } std::vector<u32> spu_thread::discover_functions(u32 base_addr, std::span<const u8> ls, bool is_known_addr, u32 /*entry*/) { std::vector<u32> calls; std::vector<u32> branches; calls.reserve(100); // Discover functions // Use the most simple method: search for instructions that calls them // And then filter invalid cases // TODO: Does not detect jumptables or fixed-addr indirect calls const v128 brasl_mask = is_known_addr ? v128::from32p(0x62u << 23) : v128::from32p(umax); for (u32 i = utils::align<u32>(base_addr, 0x10); i < std::min<u32>(base_addr + ::size32(ls), 0x3FFF0); i += 0x10) { // Search for BRSL LR and BRASL LR or BR // TODO: BISL const v128 inst = read_from_ptr<be_t<v128>>(ls.data(), i - base_addr); const v128 cleared_i16 = gv_and32(inst, v128::from32p(utils::rol32(~0xffff, 7))); const v128 eq_brsl = gv_eq32(cleared_i16, v128::from32p(0x66u << 23)); const v128 eq_brasl = gv_eq32(cleared_i16, brasl_mask); const v128 eq_br = gv_eq32(cleared_i16, v128::from32p(0x64u << 23)); const v128 result = eq_brsl | eq_brasl; if (!gv_testz(result)) { for (u32 j = 0; j < 4; j++) { if (result.u32r[j]) { calls.push_back(i + j * 4); } } } if (!gv_testz(eq_br)) { for (u32 j = 0; j < 4; j++) { if (eq_br.u32r[j]) { branches.push_back(i + j * 4); } } } } calls.erase(std::remove_if(calls.begin(), calls.end(), [&](u32 caller) { // Check the validity of both the callee code and the following caller code return !is_exec_code(caller, ls, base_addr, true) || !is_exec_code(caller + 4, ls, base_addr, true); }), calls.end()); branches.erase(std::remove_if(branches.begin(), branches.end(), [&](u32 caller) { // Check the validity of the callee code return !is_exec_code(caller, ls, base_addr, true); }), branches.end()); std::vector<u32> addrs; for (u32 addr : calls) { const spu_opcode_t op{read_from_ptr<be_t<u32>>(ls, addr - base_addr)}; const u32 func = op_branch_targets(addr, op)[0]; if (func == umax || addr + 4 == func || func == addr || std::count(addrs.begin(), addrs.end(), func)) { continue; } if (std::count(calls.begin(), calls.end(), func)) { // Cannot call another call instruction (link is overwritten) continue; } addrs.push_back(func); // Detect an "arguments passing" block, possible queue another function for (u32 next = func, it = 10; it && next >= base_addr && next < std::min<u32>(base_addr + ::size32(ls), 0x3FFF0); it--, next += 4) { const spu_opcode_t test_op{read_from_ptr<be_t<u32>>(ls, next - base_addr)}; const auto type = g_spu_itype.decode(test_op.opcode); if (type & spu_itype::branch && type != spu_itype::BR) { break; } if (type == spu_itype::UNK || !test_op.opcode) { break; } if (type != spu_itype::BR) { continue; } const u32 target = op_branch_targets(next, op)[0]; if (target == umax || addr + 4 == target || target == addr || std::count(addrs.begin(), addrs.end(), target)) { break; } // Detect backwards branch to the block in examination if (target >= func && target <= next) { break; } if (!is_exec_code(target, ls, base_addr, true)) { break; } addrs.push_back(target); break; } } for (u32 addr : branches) { const spu_opcode_t op{read_from_ptr<be_t<u32>>(ls, addr - base_addr)}; const u32 func = op_branch_targets(addr, op)[0]; if (func == umax || addr + 4 == func || func == addr || !addr) { continue; } // Search for AI R1, -x in the called code // Reasoning: AI R1, -x means stack frame creation, this is likely be a function for (u32 next = func, it = 10; it && next >= base_addr && next < std::min<u32>(base_addr + ::size32(ls), 0x3FFF0); it--, next += 4) { const spu_opcode_t test_op{read_from_ptr<be_t<u32>>(ls, next - base_addr)}; const auto type = g_spu_itype.decode(test_op.opcode); if (type & spu_itype::branch) { break; } if (type == spu_itype::UNK || !test_op.opcode) { break; } bool is_func = false; if (type == spu_itype::AI && test_op.rt == 1u && test_op.ra == 1u) { if (test_op.si10 >= 0) { break; } is_func = true; } if (!is_func) { continue; } addr = SPU_LS_SIZE + 4; // Terminate the next condition, no further checks needed if (std::count(addrs.begin(), addrs.end(), func)) { break; } addrs.push_back(func); break; } // Search for AI R1, +x or OR R3/4, Rx, 0 before the branch // Reasoning: AI R1, +x means stack pointer restoration, branch after that is likely a tail call // R3 and R4 are common function arguments because they are the first two for (u32 back = addr - 4, it = 10; it && back >= base_addr && back < std::min<u32>(base_addr + ::size32(ls), 0x3FFF0); it--, back -= 4) { const spu_opcode_t test_op{read_from_ptr<be_t<u32>>(ls, back - base_addr)}; const auto type = g_spu_itype.decode(test_op.opcode); if (type & spu_itype::branch) { break; } bool is_tail = false; if (type == spu_itype::AI && test_op.rt == 1u && test_op.ra == 1u) { if (test_op.si10 <= 0) { break; } is_tail = true; } else if (!(type & spu_itype::zregmod)) { const u32 op_rt = type & spu_itype::_quadrop ? +test_op.rt4 : +test_op.rt; if (op_rt >= 80u && (type != spu_itype::LQD || test_op.ra != 1u)) { // Modifying non-volatile registers, not a call (and not context restoration) break; } //is_tail = op_rt == 3u || op_rt == 4u; } if (!is_tail) { continue; } if (std::count(addrs.begin(), addrs.end(), func)) { break; } addrs.push_back(func); break; } } std::sort(addrs.begin(), addrs.end()); return addrs; } using reg_state_t = spu_recompiler_base::reg_state_t; using vf = spu_recompiler_base::vf; bool reg_state_t::is_const() const { return !!(flag & vf::is_const); } bool reg_state_t::compare_tags(const reg_state_t& rhs) const { // Compare by tag, address of instruction origin return tag == rhs.tag && origin == rhs.origin && is_instruction == rhs.is_instruction; } bool reg_state_t::operator&(vf to_test) const { return this->flag.all_of(to_test); } bool reg_state_t::is_less_than(u32 imm) const { if (flag & vf::is_const && value < imm) { return true; } if (~known_zeroes < imm) { // The highest number possible within the mask's limit is less than imm return true; } return false; } bool reg_state_t::operator==(const reg_state_t& r) const { if ((flag ^ r.flag) - (vf::is_null + vf::is_mask)) { return false; } return (flag & vf::is_const ? value == r.value : (compare_tags(r) && known_ones == r.known_ones && known_zeroes == r.known_zeroes)); } bool reg_state_t::operator==(u32 imm) const { return flag == vf::is_const && value == imm; } // Compare equality but try to ignore changes in unmasked bits bool reg_state_t::compare_with_mask_indifference(const reg_state_t& r, u32 mask_bits) const { if (!mask_bits) { return true; } if ((r.flag & flag) & vf::is_const) { // Simplified path for consts if (((value ^ r.value) & mask_bits) == 0) { return true; } return false; } const bool is_equal = *this == r; if (is_equal) { return true; } const auto _this = this->downgrade(); const auto _r = r.downgrade(); const bool is_mask_equal = (_this.compare_tags(_r) && _this.flag == _r.flag && !((_this.known_ones ^ _r.known_ones) & mask_bits) && !((_this.known_zeroes ^ _r.known_zeroes) & mask_bits)); return is_mask_equal; } bool reg_state_t::compare_with_mask_indifference(u32 imm, u32 mask_bits) const { if (!mask_bits) { return true; } if (flag & vf::is_const) { if (((value ^ imm) & mask_bits) == 0) { return true; } } return false; } // Ensure unequality but try to ignore changes in unmasked bits bool reg_state_t::unequal_with_mask_indifference(const reg_state_t& r, u32 mask_bits) const { if (!mask_bits) { return true; } if ((r.flag & flag) & vf::is_const) { // Simplified path for consts if ((value ^ r.value) & mask_bits) { return true; } return false; } const bool is_equal = *this == r; if (is_equal) { return false; } // Full path const auto _this = this->downgrade(); const auto _r = r.downgrade(); const bool is_base_value_equal = (_this.compare_tags(_r) && _this.flag == _r.flag); if (!is_base_value_equal) { // Cannot ascertain unequality if the value origin is different return false; } // Find at least one bit that is known to be X state at value 'r', and known to be X^1 state at the objects' value return (((_this.known_ones ^ _r.known_ones) & mask_bits) & ((_this.known_zeroes ^ _r.known_zeroes) & mask_bits)) != 0; } reg_state_t reg_state_t::downgrade() const { if (flag & vf::is_const) { return reg_state_t{vf::is_mask, 0, umax, this->value, ~this->value, this->origin}; } if (!(flag - vf::is_null)) { return reg_state_t{vf::is_mask, 0, this->tag, 0, 0, this->origin}; } return *this; } reg_state_t reg_state_t::merge(const reg_state_t& rhs, u32 current_pc) const { if (rhs == *this) { // Perfect state: no conflicts return rhs; } if ((rhs.flag + flag).all_of(vf::is_const + vf::is_mask)) { // Try to downgrade to a known-bits type value const reg_state_t _rhs = rhs.downgrade(); const reg_state_t _this = this->downgrade(); if ((_rhs.flag & _this.flag) & vf::is_mask) { // Now it is possible to merge the two values reg_state_t res{vf::is_mask, 0, 0, _rhs.known_ones & _this.known_ones, _rhs.known_zeroes & _this.known_zeroes}; if (res.known_zeroes | res.known_ones) { // Success (create new value tag) res.tag = reg_state_t::alloc_tag(); res.origin = current_pc; res.is_instruction = false; return res; } } } return make_unknown(current_pc); } reg_state_t reg_state_t::build_on_top_of(const reg_state_t& rhs) const { if (flag & vf::is_null) { // Value unmodified return rhs; } if (rhs == *this) { // Perfect state: no conflicts return rhs; } return *this; } u32 reg_state_t::get_known_zeroes() const { if (flag & vf::is_const) { return ~value; } return known_zeroes; } u32 reg_state_t::get_known_ones() const { if (flag & vf::is_const) { return value; } return known_ones; } reg_state_t reg_state_t::from_value(u32 value) noexcept { reg_state_t v{}; v.value = value; v.flag = vf::is_const; return v; } u32 reg_state_t::alloc_tag(bool reset) noexcept { static thread_local u32 g_tls_tag = 0; if (reset) { g_tls_tag = 0; return 0; } return ++g_tls_tag; } void reg_state_t::invalidate_if_created(u32 current_pc) { if (!is_const() && origin == current_pc) { tag = reg_state_t::alloc_tag(); } } // Converge 2 register states to the same flow in execution template <usz N> static void merge(std::array<reg_state_t, N>& result, const std::array<reg_state_t, N>& lhs, const std::array<reg_state_t, N>& rhs, u32 current_pc) { usz index = umax; for (reg_state_t& state : result) { index++; state = lhs[index].merge(rhs[index], current_pc); } } // Override RHS state with the newer LHS state template <usz N> static void build_on_top_of(std::array<reg_state_t, N>& result, const std::array<reg_state_t, N>& lhs, const std::array<reg_state_t, N>& rhs) { usz index = umax; for (reg_state_t& state : result) { index++; state = lhs[index].build_on_top_of(rhs[index]); } } struct block_reg_info { u32 pc = SPU_LS_SIZE; // Address std::array<reg_state_t, s_reg_max> local_state{}; bool has_true_state = false; std::array<reg_state_t, s_reg_max> start_reg_state{}; std::array<reg_state_t, s_reg_max> end_reg_state{}; std::array<reg_state_t, s_reg_max> addend_reg_state{}; std::array<reg_state_t, s_reg_max> walkby_state{}; // State that is made by merging state_predecessor and iterating over instructions for final instrucion walk usz next_nodes_count = 0; struct node_t { u32 prev_pc = umax; }; std::vector<node_t> prev_nodes; static std::unique_ptr<block_reg_info> create(u32 pc) noexcept { auto ptr = new block_reg_info{ pc, reg_state_t::make_unknown<s_reg_max>(pc) }; for (reg_state_t& f : ptr->local_state) { f.flag += vf::is_null; } ptr->start_reg_state = ptr->local_state; return std::unique_ptr<block_reg_info>(ptr); } // Evaluate registers state std::array<reg_state_t, s_reg_max>& evaluate_start_state(const std::map<u32, std::unique_ptr<block_reg_info>>& map, bool extensive_evaluation); // This function creates new node if not found and links the proceeding node to the old node // In a manner in which no duplicate paths are formed static void create_node(u32 pc_rhs, u32 parent_pc, std::map<u32, std::unique_ptr<block_reg_info>>& map) { //ensure(parent_node != pc_rhs); ensure(map[parent_pc]); if (!map[pc_rhs]) { map[pc_rhs] = create(pc_rhs); } node_t prev_node{parent_pc}; map[parent_pc]->next_nodes_count++; map[pc_rhs]->prev_nodes.emplace_back(prev_node); } }; spu_program spu_recompiler_base::analyse(const be_t<u32>* ls, u32 entry_point, std::map<u32, std::vector<u32>>* out_target_list) { // Result: addr + raw instruction data spu_program result; result.data.reserve(10000); result.entry_point = entry_point; result.lower_bound = entry_point; // Initialize block entries m_block_info.reset(); m_block_info.set(entry_point / 4); m_entry_info.reset(); m_entry_info.set(entry_point / 4); m_ret_info.reset(); // Simple block entry workload list workload.clear(); workload.push_back(entry_point); std::memset(m_regmod.data(), 0xff, sizeof(m_regmod)); m_use_ra.reset(); m_use_rb.reset(); m_use_rc.reset(); m_targets.clear(); m_preds.clear(); m_preds[entry_point]; m_bbs.clear(); m_chunks.clear(); m_funcs.clear(); m_inst_attrs.clear(); m_patterns.clear(); // SYNC instruction found bool sync = false; u32 hbr_loc = 0; u32 hbr_tg = -1; // Result bounds u32 lsa = entry_point; u32 limit = 0x40000; if (g_cfg.core.spu_block_size == spu_block_size_type::giga) { } // Weak constant propagation context (for guessing branch targets) std::array<bs_t<vf>, 128> vflags{}; // Associated constant values for 32-bit preferred slot std::array<u32, 128> values; for (u32 wi = 0, wa = workload[0]; wi < workload.size();) { const auto next_block = [&] { // Reset value information vflags.fill({}); sync = false; hbr_loc = 0; hbr_tg = -1; wi++; if (wi < workload.size()) { wa = workload[wi]; } }; const u32 pos = wa; const auto add_block = [&](u32 target) { // Validate new target (TODO) if (target >= lsa && target < limit) { // Check for redundancy if (!m_block_info[target / 4]) { m_block_info[target / 4] = true; workload.push_back(target); } // Add predecessor if (std::find(m_preds[target].begin(), m_preds[target].end(), pos) == m_preds[target].end()) { m_preds[target].push_back(pos); } } }; if (pos < lsa || pos >= limit) { // Don't analyse if already beyond the limit next_block(); continue; } const u32 data = ls[pos / 4]; const auto op = spu_opcode_t{data}; wa += 4; m_targets.erase(pos); // Fill register access info if (auto iflags = g_spu_iflag.decode(data)) { if (+iflags & +spu_iflag::use_ra) m_use_ra.set(pos / 4); if (+iflags & +spu_iflag::use_rb) m_use_rb.set(pos / 4); if (+iflags & +spu_iflag::use_rc) m_use_rc.set(pos / 4); } // Analyse instruction switch (const auto type = g_spu_itype.decode(data)) { case spu_itype::UNK: case spu_itype::DFCEQ: case spu_itype::DFCMEQ: case spu_itype::DFCGT: case spu_itype::DFCMGT: case spu_itype::DFTSV: { next_block(); continue; } case spu_itype::SYNC: case spu_itype::STOP: case spu_itype::STOPD: { if (data == 0) { // Stop before null data next_block(); continue; } if (g_cfg.core.spu_block_size == spu_block_size_type::safe) { // Stop on special instructions (TODO) m_targets[pos]; next_block(); break; } if (type == spu_itype::SYNC) { // Remember sync = true; } break; } case spu_itype::IRET: { if (op.d && op.e) { spu_log.error("[0x%x] Invalid interrupt flags (DE)", pos); } m_targets[pos]; next_block(); break; } case spu_itype::BI: case spu_itype::BISL: case spu_itype::BISLED: case spu_itype::BIZ: case spu_itype::BINZ: case spu_itype::BIHZ: case spu_itype::BIHNZ: { if (op.d && op.e) { spu_log.error("[0x%x] Invalid interrupt flags (DE)", pos); } const auto af = vflags[op.ra]; const auto av = values[op.ra]; const bool sl = type == spu_itype::BISL || type == spu_itype::BISLED; if (sl) { m_regmod[pos / 4] = op.rt; vflags[op.rt] = +vf::is_const; values[op.rt] = pos + 4; } if (af & vf::is_const) { const u32 target = spu_branch_target(av); spu_log.warning("[0x%x] At 0x%x: indirect branch to 0x%x%s", entry_point, pos, target, op.d ? " (D)" : op.e ? " (E)" : ""); if (type == spu_itype::BI && target == pos + 4 && op.d) { // Disable interrupts idiom break; } m_targets[pos].push_back(target); if (g_cfg.core.spu_block_size == spu_block_size_type::giga) { if (sync) { spu_log.notice("[0x%x] At 0x%x: ignoring %scall to 0x%x (SYNC)", entry_point, pos, sl ? "" : "tail ", target); if (target > entry_point) { limit = std::min<u32>(limit, target); } } else { m_entry_info[target / 4] = true; add_block(target); } } else if (target > entry_point) { limit = std::min<u32>(limit, target); } if (sl && g_cfg.core.spu_block_size != spu_block_size_type::safe) { m_ret_info[pos / 4 + 1] = true; m_entry_info[pos / 4 + 1] = true; m_targets[pos].push_back(pos + 4); add_block(pos + 4); } } else if (type == spu_itype::BI && g_cfg.core.spu_block_size != spu_block_size_type::safe && !op.d && !op.e && !sync) { // Analyse jump table (TODO) std::vector<u32> jt_abs; std::vector<u32> jt_rel; const u32 start = pos + 4; u64 dabs = 0; u64 drel = 0; for (u32 i = start; i < limit; i += 4) { const u32 target = ls[i / 4]; if (target == 0 || target % 4) { // Address cannot be misaligned: abort break; } if (target >= lsa && target < 0x40000) { // Possible jump table entry (absolute) jt_abs.push_back(target); } if (target + start >= lsa && target + start < 0x40000) { // Possible jump table entry (relative) jt_rel.push_back(target + start); } if (std::max(jt_abs.size(), jt_rel.size()) * 4 + start <= i) { // Neither type of jump table completes jt_abs.clear(); jt_rel.clear(); break; } } // Choose position after the jt as an anchor and compute the average distance for (u32 target : jt_abs) { dabs += std::abs(static_cast<s32>(target - start - jt_abs.size() * 4)); } for (u32 target : jt_rel) { drel += std::abs(static_cast<s32>(target - start - jt_rel.size() * 4)); } // Add detected jump table blocks if (jt_abs.size() >= 3 || jt_rel.size() >= 3) { if (jt_abs.size() == jt_rel.size()) { if (dabs < drel) { jt_rel.clear(); } if (dabs > drel) { jt_abs.clear(); } ensure(jt_abs.size() != jt_rel.size()); } if (jt_abs.size() >= jt_rel.size()) { const u32 new_size = (start - lsa) / 4 + ::size32(jt_abs); if (result.data.size() < new_size) { result.data.resize(new_size); } for (u32 i = 0; i < jt_abs.size(); i++) { add_block(jt_abs[i]); result.data[(start - lsa) / 4 + i] = std::bit_cast<u32, be_t<u32>>(jt_abs[i]); m_targets[start + i * 4]; } m_targets.emplace(pos, std::move(jt_abs)); } if (jt_rel.size() >= jt_abs.size()) { const u32 new_size = (start - lsa) / 4 + ::size32(jt_rel); if (result.data.size() < new_size) { result.data.resize(new_size); } for (u32 i = 0; i < jt_rel.size(); i++) { add_block(jt_rel[i]); result.data[(start - lsa) / 4 + i] = std::bit_cast<u32, be_t<u32>>(jt_rel[i] - start); m_targets[start + i * 4]; } m_targets.emplace(pos, std::move(jt_rel)); } } else if (start + 12 * 4 < limit && ls[start / 4 + 0] == 0x1ce00408u && ls[start / 4 + 1] == 0x24000389u && ls[start / 4 + 2] == 0x24004809u && ls[start / 4 + 3] == 0x24008809u && ls[start / 4 + 4] == 0x2400c809u && ls[start / 4 + 5] == 0x24010809u && ls[start / 4 + 6] == 0x24014809u && ls[start / 4 + 7] == 0x24018809u && ls[start / 4 + 8] == 0x1c200807u && ls[start / 4 + 9] == 0x2401c809u) { spu_log.warning("[0x%x] Pattern 1 detected (hbr=0x%x:0x%x)", pos, hbr_loc, hbr_tg); // Add 8 targets (TODO) for (u32 addr = start + 4; addr < start + 36; addr += 4) { m_targets[pos].push_back(addr); add_block(addr); } } else if (hbr_loc > start && hbr_loc < limit && hbr_tg == start) { spu_log.warning("[0x%x] No patterns detected (hbr=0x%x:0x%x)", pos, hbr_loc, hbr_tg); } } else if (type == spu_itype::BI && sync) { spu_log.notice("[0x%x] At 0x%x: ignoring indirect branch (SYNC)", entry_point, pos); } if (type == spu_itype::BI || sl) { if (type == spu_itype::BI || g_cfg.core.spu_block_size == spu_block_size_type::safe) { m_targets[pos]; } else { m_ret_info[pos / 4 + 1] = true; m_entry_info[pos / 4 + 1] = true; m_targets[pos].push_back(pos + 4); add_block(pos + 4); } } else { m_targets[pos].push_back(pos + 4); add_block(pos + 4); } next_block(); break; } case spu_itype::BRSL: case spu_itype::BRASL: { const u32 target = spu_branch_target(type == spu_itype::BRASL ? 0 : pos, op.i16); m_regmod[pos / 4] = op.rt; vflags[op.rt] = +vf::is_const; values[op.rt] = pos + 4; if (type == spu_itype::BRSL && target == pos + 4) { // Get next instruction address idiom break; } m_targets[pos].push_back(target); if (g_cfg.core.spu_block_size != spu_block_size_type::safe) { m_ret_info[pos / 4 + 1] = true; m_entry_info[pos / 4 + 1] = true; m_targets[pos].push_back(pos + 4); add_block(pos + 4); } if (g_cfg.core.spu_block_size == spu_block_size_type::giga && !sync) { m_entry_info[target / 4] = true; add_block(target); } else { if (g_cfg.core.spu_block_size == spu_block_size_type::giga) { spu_log.notice("[0x%x] At 0x%x: ignoring fixed call to 0x%x (SYNC)", entry_point, pos, target); } if (target > entry_point) { limit = std::min<u32>(limit, target); } } next_block(); break; } case spu_itype::BRA: { const u32 target = spu_branch_target(0, op.i16); if (g_cfg.core.spu_block_size == spu_block_size_type::giga && !sync) { m_entry_info[target / 4] = true; } else { m_targets[pos].push_back(target); if (g_cfg.core.spu_block_size == spu_block_size_type::giga) { spu_log.notice("[0x%x] At 0x%x: ignoring fixed tail call to 0x%x (SYNC)", entry_point, pos, target); } } add_block(target); next_block(); break; } case spu_itype::BR: case spu_itype::BRZ: case spu_itype::BRNZ: case spu_itype::BRHZ: case spu_itype::BRHNZ: { const u32 target = spu_branch_target(pos, op.i16); if (target == pos + 4) { // Nop break; } m_targets[pos].push_back(target); add_block(target); if (type != spu_itype::BR) { m_targets[pos].push_back(pos + 4); add_block(pos + 4); } next_block(); break; } case spu_itype::DSYNC: case spu_itype::HEQ: case spu_itype::HEQI: case spu_itype::HGT: case spu_itype::HGTI: case spu_itype::HLGT: case spu_itype::HLGTI: case spu_itype::LNOP: case spu_itype::NOP: case spu_itype::MTSPR: case spu_itype::FSCRWR: case spu_itype::STQA: case spu_itype::STQD: case spu_itype::STQR: case spu_itype::STQX: { // Do nothing break; } case spu_itype::WRCH: { switch (op.ra) { case MFC_EAL: { m_regmod[pos / 4] = s_reg_mfc_eal; break; } case MFC_LSA: { m_regmod[pos / 4] = s_reg_mfc_lsa; break; } case MFC_TagID: { m_regmod[pos / 4] = s_reg_mfc_tag; break; } case MFC_Size: { m_regmod[pos / 4] = s_reg_mfc_size; break; } default: break; } break; } case spu_itype::LQA: case spu_itype::LQD: case spu_itype::LQR: case spu_itype::LQX: { // Unconst m_regmod[pos / 4] = op.rt; vflags[op.rt] = {}; break; } case spu_itype::HBR: { hbr_loc = spu_branch_target(pos, op.roh << 7 | op.rt); hbr_tg = vflags[op.ra] & vf::is_const && !op.c ? values[op.ra] & 0x3fffc : -1; break; } case spu_itype::HBRA: { hbr_loc = spu_branch_target(pos, op.r0h << 7 | op.rt); hbr_tg = spu_branch_target(0x0, op.i16); break; } case spu_itype::HBRR: { hbr_loc = spu_branch_target(pos, op.r0h << 7 | op.rt); hbr_tg = spu_branch_target(pos, op.i16); break; } case spu_itype::IL: { m_regmod[pos / 4] = op.rt; vflags[op.rt] = +vf::is_const; values[op.rt] = op.si16; break; } case spu_itype::ILA: { m_regmod[pos / 4] = op.rt; vflags[op.rt] = +vf::is_const; values[op.rt] = op.i18; break; } case spu_itype::ILH: { m_regmod[pos / 4] = op.rt; vflags[op.rt] = +vf::is_const; values[op.rt] = op.i16 << 16 | op.i16; break; } case spu_itype::ILHU: { m_regmod[pos / 4] = op.rt; vflags[op.rt] = +vf::is_const; values[op.rt] = op.i16 << 16; break; } case spu_itype::IOHL: { m_regmod[pos / 4] = op.rt; values[op.rt] = values[op.rt] | op.i16; break; } case spu_itype::ORI: { m_regmod[pos / 4] = op.rt; vflags[op.rt] = vflags[op.ra] & vf::is_const; values[op.rt] = values[op.ra] | op.si10; break; } case spu_itype::OR: { m_regmod[pos / 4] = op.rt; vflags[op.rt] = vflags[op.ra] & vflags[op.rb] & vf::is_const; values[op.rt] = values[op.ra] | values[op.rb]; break; } case spu_itype::ANDI: { m_regmod[pos / 4] = op.rt; vflags[op.rt] = vflags[op.ra] & vf::is_const; values[op.rt] = values[op.ra] & op.si10; break; } case spu_itype::AND: { m_regmod[pos / 4] = op.rt; vflags[op.rt] = vflags[op.ra] & vflags[op.rb] & vf::is_const; values[op.rt] = values[op.ra] & values[op.rb]; break; } case spu_itype::AI: { m_regmod[pos / 4] = op.rt; vflags[op.rt] = vflags[op.ra] & vf::is_const; values[op.rt] = values[op.ra] + op.si10; break; } case spu_itype::A: { m_regmod[pos / 4] = op.rt; vflags[op.rt] = vflags[op.ra] & vflags[op.rb] & vf::is_const; values[op.rt] = values[op.ra] + values[op.rb]; break; } case spu_itype::SFI: { m_regmod[pos / 4] = op.rt; vflags[op.rt] = vflags[op.ra] & vf::is_const; values[op.rt] = op.si10 - values[op.ra]; break; } case spu_itype::SF: { m_regmod[pos / 4] = op.rt; vflags[op.rt] = vflags[op.ra] & vflags[op.rb] & vf::is_const; values[op.rt] = values[op.rb] - values[op.ra]; break; } case spu_itype::ROTMI: { m_regmod[pos / 4] = op.rt; if ((0 - op.i7) & 0x20) { vflags[op.rt] = +vf::is_const; values[op.rt] = 0; break; } vflags[op.rt] = vflags[op.ra] & vf::is_const; values[op.rt] = values[op.ra] >> ((0 - op.i7) & 0x1f); break; } case spu_itype::SHLI: { m_regmod[pos / 4] = op.rt; if (op.i7 & 0x20) { vflags[op.rt] = +vf::is_const; values[op.rt] = 0; break; } vflags[op.rt] = vflags[op.ra] & vf::is_const; values[op.rt] = values[op.ra] << (op.i7 & 0x1f); break; } default: { // Unconst const u32 op_rt = type & spu_itype::_quadrop ? +op.rt4 : +op.rt; m_regmod[pos / 4] = op_rt; vflags[op_rt] = {}; break; } } // Insert raw instruction value const u32 new_size = (pos - lsa) / 4; if (result.data.size() <= new_size) { if (result.data.size() < new_size) { result.data.resize(new_size); } result.data.emplace_back(std::bit_cast<u32, be_t<u32>>(data)); } else if (u32& raw_val = result.data[new_size]) { ensure(raw_val == std::bit_cast<u32, be_t<u32>>(data)); } else { raw_val = std::bit_cast<u32, be_t<u32>>(data); } } while (lsa > 0 || limit < 0x40000) { const u32 initial_size = ::size32(result.data); // Check unreachable blocks limit = std::min<u32>(limit, lsa + initial_size * 4); for (auto& pair : m_preds) { bool reachable = false; if (pair.first >= limit) { continue; } // All (direct and indirect) predecessors to check std::vector<u32> workload; // Bit array used to deduplicate workload list workload.push_back(pair.first); m_bits[pair.first / 4] = true; for (usz i = 0; !reachable && i < workload.size(); i++) { for (u32 j = workload[i];; j -= 4) { // Go backward from an address until the entry point is reached if (j == entry_point) { reachable = true; break; } const auto found = m_preds.find(j); bool had_fallthrough = false; if (found != m_preds.end()) { for (u32 new_pred : found->second) { // Check whether the predecessor is previous instruction if (new_pred == j - 4) { had_fallthrough = true; continue; } // Check whether in range and not already added if (new_pred >= lsa && new_pred < limit && !m_bits[new_pred / 4]) { workload.push_back(new_pred); m_bits[new_pred / 4] = true; } } } // Check for possible fallthrough predecessor if (!had_fallthrough) { if (::at32(result.data, (j - lsa) / 4 - 1) == 0 || m_targets.count(j - 4)) { break; } } if (i == 0) { // TODO } } } for (u32 pred : workload) { m_bits[pred / 4] = false; } if (!reachable && pair.first < limit) { limit = pair.first; } } result.data.resize((limit - lsa) / 4); // Check holes in safe mode (TODO) u32 valid_size = 0; for (u32 i = 0; i < result.data.size(); i++) { if (result.data[i] == 0) { const u32 pos = lsa + i * 4; const u32 data = ls[pos / 4]; // Allow only NOP or LNOP instructions in holes if (data == 0x200000 || (data & 0xffffff80) == 0x40200000) { continue; } if (g_cfg.core.spu_block_size != spu_block_size_type::giga) { result.data.resize(valid_size); break; } } else { valid_size = i + 1; } } // Even if NOP or LNOP, should be removed at the end result.data.resize(valid_size); // Repeat if blocks were removed if (result.data.size() == initial_size) { break; } } limit = std::min<u32>(limit, lsa + ::size32(result.data) * 4); m_inst_attrs.resize(result.data.size()); // Cleanup block info for (u32 i = 0; i < workload.size(); i++) { const u32 addr = workload[i]; if (addr < lsa || addr >= limit || !result.data[(addr - lsa) / 4]) { m_block_info[addr / 4] = false; m_entry_info[addr / 4] = false; m_ret_info[addr / 4] = false; m_preds.erase(addr); } } // Complete m_preds and associated m_targets for adjacent blocks for (auto it = m_preds.begin(); it != m_preds.end();) { if (it->first < lsa || it->first >= limit) { it = m_preds.erase(it); continue; } // Erase impossible predecessors const auto new_end = std::remove_if(it->second.begin(), it->second.end(), [&](u32 addr) { return addr < lsa || addr >= limit; }); it->second.erase(new_end, it->second.end()); // Don't add fallthrough target if all predecessors are removed if (it->second.empty() && !m_entry_info[it->first / 4]) { // If not an entry point, remove the block completely m_block_info[it->first / 4] = false; it = m_preds.erase(it); continue; } // Previous instruction address const u32 prev = (it->first - 4) & 0x3fffc; // TODO: check the correctness if (m_targets.count(prev) == 0 && prev >= lsa && prev < limit && result.data[(prev - lsa) / 4]) { // Add target and the predecessor m_targets[prev].push_back(it->first); it->second.push_back(prev); } it++; } if (out_target_list) { out_target_list->insert(m_targets.begin(), m_targets.end()); } // Remove unnecessary target lists for (auto it = m_targets.begin(); it != m_targets.end();) { if (it->first < lsa || it->first >= limit) { it = m_targets.erase(it); continue; } it++; } // Fill holes which contain only NOP and LNOP instructions (TODO: compile) for (u32 i = 0, nnop = 0, vsize = 0; i <= result.data.size(); i++) { if (i >= result.data.size() || result.data[i]) { if (nnop && nnop == i - vsize) { // Write only complete NOP sequence for (u32 j = vsize; j < i; j++) { result.data[j] = std::bit_cast<u32, be_t<u32>>(ls[lsa / 4 + j]); } } nnop = 0; vsize = i + 1; } else { const u32 pos = lsa + i * 4; const u32 data = ls[pos / 4]; if (data == 0x200000 || (data & 0xffffff80) == 0x40200000) { nnop++; } } } // Fill block info for (auto& pred : m_preds) { auto& block = m_bbs[pred.first]; // Copy predeccessors (wrong at this point, needs a fixup later) block.preds = pred.second; // Fill register usage info for (u32 ia = pred.first; ia < limit; ia += 4) { block.size++; // Decode instruction const spu_opcode_t op{std::bit_cast<be_t<u32>>(result.data[(ia - lsa) / 4])}; const auto type = g_spu_itype.decode(op.opcode); u8 reg_save = 255; if (type == spu_itype::STQD && op.ra == s_reg_sp && !block.reg_mod[op.rt] && !block.reg_use[op.rt]) { // Register saved onto the stack before use block.reg_save_dom[op.rt] = true; reg_save = op.rt; } for (auto _use : std::initializer_list<std::pair<u32, bool>>{{op.ra, m_use_ra.test(ia / 4)} , {op.rb, m_use_rb.test(ia / 4)}, {op.rc, m_use_rc.test(ia / 4)}}) { if (_use.second) { const u32 reg = _use.first; // Register reg use only if it happens before reg mod if (!block.reg_mod[reg]) { block.reg_use.set(reg); if (reg_save != reg && block.reg_save_dom[reg]) { // Register is still used after saving; probably not eligible for optimization block.reg_save_dom[reg] = false; } } } } if (type == spu_itype::WRCH && op.ra == MFC_Cmd) { // Expand MFC_Cmd reg use for (u8 reg : {s_reg_mfc_lsa, s_reg_mfc_tag, s_reg_mfc_size}) { if (!block.reg_mod[reg]) block.reg_use.set(reg); } } // Register reg modification if (u8 reg = m_regmod[ia / 4]; reg < s_reg_max) { block.reg_mod.set(reg); block.reg_mod_xf.set(reg, type & spu_itype::xfloat); if (type == spu_itype::SELB && (block.reg_mod_xf[op.ra] || block.reg_mod_xf[op.rb])) block.reg_mod_xf.set(reg); // Possible post-dominating register load if (type == spu_itype::LQD && op.ra == s_reg_sp) block.reg_load_mod[reg] = ia + 1; else block.reg_load_mod[reg] = 0; } // Find targets (also means end of the block) const auto tfound = m_targets.find(ia); if (tfound != m_targets.end()) { // Copy targets block.targets = tfound->second; // Assume that the call reads and modifies all volatile registers (TODO) bool is_call = false; bool is_tail = false; switch (type) { case spu_itype::BRSL: is_call = spu_branch_target(ia, op.i16) != ia + 4; break; case spu_itype::BRASL: is_call = spu_branch_target(0, op.i16) != ia + 4; break; case spu_itype::BISL: case spu_itype::BISLED: is_call = true; break; default: break; } if (is_call) { for (u32 i = 0; i < s_reg_max; ++i) { if (i == s_reg_lr || (i >= 2 && i < s_reg_80) || i > s_reg_127) { if (!block.reg_mod[i]) block.reg_use.set(i); if (!is_tail) { block.reg_mod.set(i); block.reg_mod_xf[i] = false; } } } } break; } } } // Fixup block predeccessors to point to basic blocks, not last instructions for (auto& bb : m_bbs) { const u32 addr = bb.first; for (u32& pred : bb.second.preds) { pred = std::prev(m_bbs.upper_bound(pred))->first; } if (m_entry_info[addr / 4] && g_cfg.core.spu_block_size == spu_block_size_type::giga) { // Register empty chunk m_chunks.push_back(addr); // Register function if necessary if (!m_ret_info[addr / 4]) { m_funcs[addr]; } } } // Ensure there is a function at the lowest address if (g_cfg.core.spu_block_size == spu_block_size_type::giga) { if (auto emp = m_funcs.try_emplace(m_bbs.begin()->first); emp.second) { const u32 addr = emp.first->first; spu_log.error("[0x%05x] Fixed first function at 0x%05x", entry_point, addr); m_entry_info[addr / 4] = true; m_ret_info[addr / 4] = false; } } // Split functions while (g_cfg.core.spu_block_size == spu_block_size_type::giga) { bool need_repeat = false; u32 start = 0; u32 limit = 0x40000; // Walk block list in ascending order for (auto& block : m_bbs) { const u32 addr = block.first; if (m_entry_info[addr / 4] && !m_ret_info[addr / 4]) { const auto upper = m_funcs.upper_bound(addr); start = addr; limit = upper == m_funcs.end() ? 0x40000 : upper->first; } // Find targets that exceed [start; limit) range and make new functions from them for (u32 target : block.second.targets) { const auto tfound = m_bbs.find(target); if (tfound == m_bbs.end()) { continue; } if (target < start || target >= limit) { if (!m_entry_info[target / 4] || m_ret_info[target / 4]) { // Create new function entry (likely a tail call) m_entry_info[target / 4] = true; m_ret_info[target / 4] = false; m_funcs.try_emplace(target); if (target < limit) { need_repeat = true; } } } } block.second.func = start; } if (!need_repeat) { break; } } if (!m_bbs.count(entry_point)) { // Invalid code spu_log.error("[0x%x] Invalid code", entry_point); return {}; } // Fill entry map while (true) { workload.clear(); workload.push_back(entry_point); ensure(m_bbs.count(entry_point)); std::vector<u32> new_entries; for (u32 wi = 0; wi < workload.size(); wi++) { const u32 addr = workload[wi]; auto& block = ::at32(m_bbs, addr); const u32 _new = block.chunk; if (!m_entry_info[addr / 4]) { // Check block predecessors for (u32 pred : block.preds) { const u32 _old = ::at32(m_bbs, pred).chunk; if (_old < 0x40000 && _old != _new) { // If block has multiple 'entry' points, it becomes an entry point itself new_entries.push_back(addr); } } } // Update chunk address block.chunk = m_entry_info[addr / 4] ? addr : _new; // Process block targets for (u32 target : block.targets) { const auto tfound = m_bbs.find(target); if (tfound == m_bbs.end()) { continue; } auto& tb = tfound->second; const u32 value = m_entry_info[target / 4] ? target : block.chunk; if (u32& tval = tb.chunk; tval < 0x40000) { // TODO: fix condition if (tval != value && !m_entry_info[target / 4]) { new_entries.push_back(target); } } else { tval = value; workload.emplace_back(target); } } } if (new_entries.empty()) { break; } for (u32 entry : new_entries) { m_entry_info[entry / 4] = true; // Acknowledge artificial (reversible) chunk entry point m_ret_info[entry / 4] = true; } for (auto& bb : m_bbs) { // Reset chunk info bb.second.chunk = 0x40000; } } workload.clear(); workload.push_back(entry_point); // Fill workload adding targets for (u32 wi = 0; wi < workload.size(); wi++) { const u32 addr = workload[wi]; auto& block = ::at32(m_bbs, addr); block.analysed = true; for (u32 target : block.targets) { const auto tfound = m_bbs.find(target); if (tfound == m_bbs.end()) { continue; } auto& tb = tfound->second; if (!tb.analysed) { workload.push_back(target); tb.analysed = true; } // Limited xfloat hint propagation (possibly TODO) if (tb.chunk == block.chunk) { tb.reg_maybe_xf &= block.reg_mod_xf; } else { tb.reg_maybe_xf.reset(); } } block.reg_origin.fill(0x80000000); block.reg_origin_abs.fill(0x80000000); } // Fill register origin info while (true) { bool must_repeat = false; for (u32 wi = 0; wi < workload.size(); wi++) { const u32 addr = workload[wi]; auto& block = ::at32(m_bbs, addr); // Initialize entry point with default value: unknown origin (requires load) if (m_entry_info[addr / 4]) { for (u32 i = 0; i < s_reg_max; i++) { if (block.reg_origin[i] == 0x80000000) block.reg_origin[i] = 0x40000; } } if (g_cfg.core.spu_block_size == spu_block_size_type::giga && m_entry_info[addr / 4] && !m_ret_info[addr / 4]) { for (u32 i = 0; i < s_reg_max; i++) { if (block.reg_origin_abs[i] == 0x80000000) block.reg_origin_abs[i] = 0x40000; else if (block.reg_origin_abs[i] + 1 == 0) block.reg_origin_abs[i] = -2; } } for (u32 target : block.targets) { const auto tfound = m_bbs.find(target); if (tfound == m_bbs.end()) { continue; } auto& tb = tfound->second; for (u32 i = 0; i < s_reg_max; i++) { if (tb.chunk == block.chunk && tb.reg_origin[i] + 1) { const u32 expected = block.reg_mod[i] ? addr : block.reg_origin[i]; if (tb.reg_origin[i] == 0x80000000) { tb.reg_origin[i] = expected; } else if (tb.reg_origin[i] != expected) { // Set -1 if multiple origins merged (requires PHI node) tb.reg_origin[i] = -1; must_repeat |= !tb.targets.empty(); } } if (g_cfg.core.spu_block_size == spu_block_size_type::giga && tb.func == block.func && tb.reg_origin_abs[i] + 2) { const u32 expected = block.reg_mod[i] ? addr : block.reg_origin_abs[i]; if (tb.reg_origin_abs[i] == 0x80000000) { tb.reg_origin_abs[i] = expected; } else if (tb.reg_origin_abs[i] != expected) { if (tb.reg_origin_abs[i] == 0x40000 || expected + 2 == 0 || expected == 0x40000) { // Set -2: sticky value indicating possible external reg origin (0x40000) tb.reg_origin_abs[i] = -2; must_repeat |= !tb.targets.empty(); } else if (tb.reg_origin_abs[i] + 1) { tb.reg_origin_abs[i] = -1; must_repeat |= !tb.targets.empty(); } } } } } } if (!must_repeat) { break; } for (u32 wi = 0; wi < workload.size(); wi++) { const u32 addr = workload[wi]; auto& block = ::at32(m_bbs, addr); // Reset values for the next attempt (keep negative values) for (u32 i = 0; i < s_reg_max; i++) { if (block.reg_origin[i] <= 0x40000) block.reg_origin[i] = 0x80000000; if (block.reg_origin_abs[i] <= 0x40000) block.reg_origin_abs[i] = 0x80000000; } } } // Fill more block info for (u32 wi = 0; wi < workload.size(); wi++) { if (g_cfg.core.spu_block_size != spu_block_size_type::giga) { break; } const u32 addr = workload[wi]; auto& bb = ::at32(m_bbs, addr); auto& func = ::at32(m_funcs, bb.func); // Update function size func.size = std::max<u16>(func.size, bb.size + (addr - bb.func) / 4); // Copy constants according to reg origin info for (u32 i = 0; i < s_reg_max; i++) { const u32 orig = bb.reg_origin_abs[i]; if (orig < 0x40000) { auto& src = ::at32(m_bbs, orig); bb.reg_const[i] = src.reg_const[i]; bb.reg_val32[i] = src.reg_val32[i]; } if (!bb.reg_save_dom[i] && bb.reg_use[i] && (orig == 0x40000 || orig + 2 == 0)) { // Destroy offset if external reg value is used func.reg_save_off[i] = -1; } } if (u32 orig = bb.reg_origin_abs[s_reg_sp]; orig < 0x40000) { auto& prologue = ::at32(m_bbs, orig); // Copy stack offset (from the assumed prologue) bb.stack_sub = prologue.stack_sub; } else if (orig > 0x40000) { // Unpredictable stack bb.stack_sub = 0x80000000; } spu_opcode_t op{}; auto last_inst = spu_itype::UNK; for (u32 ia = addr; ia < addr + bb.size * 4; ia += 4) { // Decode instruction again op.opcode = std::bit_cast<be_t<u32>>(result.data[(ia - lsa) / 41]); last_inst = g_spu_itype.decode(op.opcode); // Propagate some constants switch (last_inst) { case spu_itype::IL: { bb.reg_const[op.rt] = true; bb.reg_val32[op.rt] = op.si16; break; } case spu_itype::ILA: { bb.reg_const[op.rt] = true; bb.reg_val32[op.rt] = op.i18; break; } case spu_itype::ILHU: { bb.reg_const[op.rt] = true; bb.reg_val32[op.rt] = op.i16 << 16; break; } case spu_itype::ILH: { bb.reg_const[op.rt] = true; bb.reg_val32[op.rt] = op.i16 << 16 | op.i16; break; } case spu_itype::IOHL: { bb.reg_val32[op.rt] = bb.reg_val32[op.rt] | op.i16; break; } case spu_itype::ORI: { bb.reg_const[op.rt] = bb.reg_const[op.ra]; bb.reg_val32[op.rt] = bb.reg_val32[op.ra] | op.si10; break; } case spu_itype::OR: { bb.reg_const[op.rt] = bb.reg_const[op.ra] && bb.reg_const[op.rb]; bb.reg_val32[op.rt] = bb.reg_val32[op.ra] | bb.reg_val32[op.rb]; break; } case spu_itype::AI: { bb.reg_const[op.rt] = bb.reg_const[op.ra]; bb.reg_val32[op.rt] = bb.reg_val32[op.ra] + op.si10; break; } case spu_itype::A: { bb.reg_const[op.rt] = bb.reg_const[op.ra] && bb.reg_const[op.rb]; bb.reg_val32[op.rt] = bb.reg_val32[op.ra] + bb.reg_val32[op.rb]; break; } case spu_itype::SFI: { bb.reg_const[op.rt] = bb.reg_const[op.ra]; bb.reg_val32[op.rt] = op.si10 - bb.reg_val32[op.ra]; break; } case spu_itype::SF: { bb.reg_const[op.rt] = bb.reg_const[op.ra] && bb.reg_const[op.rb]; bb.reg_val32[op.rt] = bb.reg_val32[op.rb] - bb.reg_val32[op.ra]; break; } case spu_itype::STQD: { if (op.ra == s_reg_sp && bb.stack_sub != 0x80000000 && bb.reg_save_dom[op.rt]) { const u32 offset = 0x80000000 + op.si10 * 16 - bb.stack_sub; if (func.reg_save_off[op.rt] == 0) { // Store reg save offset func.reg_save_off[op.rt] = offset; } else if (func.reg_save_off[op.rt] != offset) { // Conflict of different offsets func.reg_save_off[op.rt] = -1; } } break; } case spu_itype::LQD: { if (op.ra == s_reg_sp && bb.stack_sub != 0x80000000 && bb.reg_load_mod[op.rt] == ia + 1) { // Adjust reg load offset bb.reg_load_mod[op.rt] = 0x80000000 + op.si10 * 16 - bb.stack_sub; } // Clear const bb.reg_const[op.rt] = false; break; } default: { // Clear const if reg is modified here if (u8 reg = m_regmod[ia / 4]; reg < s_reg_max) bb.reg_const[reg] = false; break; } } // $SP is modified if (m_regmod[ia / 4] == s_reg_sp) { if (bb.reg_const[s_reg_sp]) { // Making $SP a constant is a funny thing too. bb.stack_sub = 0x80000000; } if (bb.stack_sub != 0x80000000) { switch (last_inst) { case spu_itype::AI: { if (op.ra == s_reg_sp) bb.stack_sub -= op.si10; else bb.stack_sub = 0x80000000; break; } case spu_itype::A: { if (op.ra == s_reg_sp && bb.reg_const[op.rb]) bb.stack_sub -= bb.reg_val32[op.rb]; else if (op.rb == s_reg_sp && bb.reg_const[op.ra]) bb.stack_sub -= bb.reg_val32[op.ra]; else bb.stack_sub = 0x80000000; break; } case spu_itype::SF: { if (op.rb == s_reg_sp && bb.reg_const[op.ra]) bb.stack_sub += bb.reg_val32[op.ra]; else bb.stack_sub = 0x80000000; break; } default: { bb.stack_sub = 0x80000000; break; } } } // Check for funny values. if (bb.stack_sub >= 0x40000 || bb.stack_sub % 16) { bb.stack_sub = 0x80000000; } } } // Analyse terminator instruction const u32 tia = addr + bb.size * 4 - 4; switch (last_inst) { case spu_itype::BR: case spu_itype::BRNZ: case spu_itype::BRZ: case spu_itype::BRHNZ: case spu_itype::BRHZ: case spu_itype::BRSL: { const u32 target = spu_branch_target(tia, op.i16); if (target == tia + 4) { bb.terminator = term_type::fallthrough; } else if (last_inst != spu_itype::BRSL) { // No-op terminator or simple branch instruction bb.terminator = term_type::br; if (target == bb.func) { // Recursive tail call bb.terminator = term_type::ret; } } else if (op.rt == s_reg_lr) { bb.terminator = term_type::call; } else { bb.terminator = term_type::interrupt_call; } break; } case spu_itype::BRA: case spu_itype::BRASL: { bb.terminator = term_type::indirect_call; break; } case spu_itype::BI: { if (op.d || op.e || bb.targets.size() == 1) { bb.terminator = term_type::interrupt_call; } else if (bb.targets.size() > 1) { // Jump table bb.terminator = term_type::br; } else if (op.ra == s_reg_lr) { // Return (TODO) bb.terminator = term_type::ret; } else { // Indirect tail call (TODO) bb.terminator = term_type::interrupt_call; } break; } case spu_itype::BISLED: case spu_itype::IRET: { bb.terminator = term_type::interrupt_call; break; } case spu_itype::BISL: case spu_itype::BIZ: case spu_itype::BINZ: case spu_itype::BIHZ: case spu_itype::BIHNZ: { if (op.d || op.e || bb.targets.size() != 1) { bb.terminator = term_type::interrupt_call; } else if (last_inst != spu_itype::BISL && bb.targets[0] == tia + 4 && op.ra == s_reg_lr) { // Conditional return (TODO) bb.terminator = term_type::ret; } else if (last_inst == spu_itype::BISL) { // Indirect call bb.terminator = term_type::indirect_call; } else { // TODO bb.terminator = term_type::interrupt_call; } break; } default: { // Normal instruction bb.terminator = term_type::fallthrough; break; } } } // Check function blocks, verify and print some reasons for (auto& f : m_funcs) { if (g_cfg.core.spu_block_size != spu_block_size_type::giga) { break; } bool is_ok = true; u32 used_stack = 0; for (auto it = m_bbs.lower_bound(f.first); it != m_bbs.end() && it->second.func == f.first; ++it) { auto& bb = it->second; auto& func = ::at32(m_funcs, bb.func); const u32 addr = it->first; const u32 flim = bb.func + func.size * 4; used_stack |= bb.stack_sub; if (is_ok && bb.terminator >= term_type::indirect_call) { is_ok = false; } if (is_ok && bb.terminator == term_type::ret) { // Check $LR (alternative return registers are currently not supported) if (u32 lr_orig = bb.reg_mod[s_reg_lr] ? addr : bb.reg_origin_abs[s_reg_lr]; lr_orig < 0x40000) { auto& src = ::at32(m_bbs, lr_orig); if (src.reg_load_mod[s_reg_lr] != func.reg_save_off[s_reg_lr]) { spu_log.error("Function 0x%05x: [0x%05x] $LR mismatch (src=0x%x; 0x%x vs 0x%x)", f.first, addr, lr_orig, src.reg_load_mod[0], func.reg_save_off[0]); is_ok = false; } else if (src.reg_load_mod[s_reg_lr] == 0) { spu_log.error("Function 0x%05x: [0x%05x] $LR modified (src=0x%x)", f.first, addr, lr_orig); is_ok = false; } } else if (lr_orig > 0x40000) { spu_log.todo("Function 0x%05x: [0x%05x] $LR unpredictable (src=0x%x)", f.first, addr, lr_orig); is_ok = false; } // Check $80..$127 (should be restored or unmodified) for (u32 i = s_reg_80; is_ok && i <= s_reg_127; i++) { if (u32 orig = bb.reg_mod[i] ? addr : bb.reg_origin_abs[i]; orig < 0x40000) { auto& src = ::at32(m_bbs, orig); if (src.reg_load_mod[i] != func.reg_save_off[i]) { spu_log.error("Function 0x%05x: [0x%05x] $%u mismatch (src=0x%x; 0x%x vs 0x%x)", f.first, addr, i, orig, src.reg_load_mod[i], func.reg_save_off[i]); is_ok = false; } } else if (orig > 0x40000) { spu_log.todo("Function 0x%05x: [0x%05x] $%u unpredictable (src=0x%x)", f.first, addr, i, orig); is_ok = false; } if (func.reg_save_off[i] + 1 == 0) { spu_log.error("Function 0x%05x: [0x%05x] $%u used incorrectly", f.first, addr, i); is_ok = false; } } // Check $SP (should be restored or unmodified) if (bb.stack_sub != 0 && bb.stack_sub != 0x80000000) { spu_log.error("Function 0x%05x: [0x%05x] return with stack frame 0x%x", f.first, addr, bb.stack_sub); is_ok = false; } } if (is_ok && bb.terminator == term_type::call) { // Check call instruction (TODO) if (bb.stack_sub == 0) { // Call without a stack frame spu_log.error("Function 0x%05x: [0x%05x] frameless call", f.first, addr); is_ok = false; } } if (is_ok && bb.terminator == term_type::fallthrough) { // Can't just fall out of the function if (bb.targets.size() != 1 || bb.targets[0] >= flim) { spu_log.error("Function 0x%05x: [0x%05x] bad fallthrough to 0x%x", f.first, addr, bb.targets[0]); is_ok = false; } } if (is_ok && bb.stack_sub == 0x80000000) { spu_log.error("Function 0x%05x: [0x%05x] bad stack frame", f.first, addr); is_ok = false; } // Fill external function targets (calls, possibly tail calls) for (u32 target : bb.targets) { if (target < bb.func || target >= flim || (bb.terminator == term_type::call && target == bb.func)) { if (std::find(func.calls.begin(), func.calls.end(), target) == func.calls.end()) { func.calls.push_back(target); } } } } if (is_ok && used_stack && f.first == entry_point) { spu_log.error("Function 0x%05x: considered possible chunk", f.first); is_ok = false; } // if (is_ok && f.first > 0x1d240 && f.first < 0x1e000) // { // spu_log.error("Function 0x%05x: manually disabled", f.first); // is_ok = false; // } f.second.good = is_ok; } // Check function call graph while (g_cfg.core.spu_block_size == spu_block_size_type::giga) { bool need_repeat = false; for (auto& f : m_funcs) { if (!f.second.good) { continue; } for (u32 call : f.second.calls) { const auto ffound = std::as_const(m_funcs).find(call); if (ffound == m_funcs.cend() || ffound->second.good == false) { need_repeat = true; if (f.second.good) { spu_log.error("Function 0x%05x: calls bad function (0x%05x)", f.first, call); f.second.good = false; } } } } if (!need_repeat) { break; } } auto sort_breakig_reasons = [](const std::array<atomic_t<u64>, 128>& breaking_reason) { std::vector<std::pair<u32, u64>> map; for (u32 i = 0; i < static_cast<u32>(breaking_reason.size()); i++) { if (u64 v = breaking_reason[i]) { map.emplace_back(i, v); } } std::stable_sort(map.begin(), map.end(), FN(x.second > y.second)); return map; }; struct putllc16_statistics_t { atomic_t<u64> all = 0; atomic_t<u64> single = 0; atomic_t<u64> nowrite = 0; std::array<atomic_t<u64>, 128> breaking_reason{}; }; struct rchcnt_statistics_t { atomic_t<u64> all = 0; atomic_t<u64> single = 0; std::array<atomic_t<u64>, 128> breaking_reason{}; }; // Pattern structures struct atomic16_t { bool active = false; // GETLLAR happened u32 lsa_pc = SPU_LS_SIZE; // PC of first LSA write u32 lsa_last_pc = SPU_LS_SIZE; // PC of first LSA write u32 get_pc = SPU_LS_SIZE; // PC of GETLLAR u32 put_pc = SPU_LS_SIZE; // PC of PUTLLC reg_state_t ls{}; // state of LS load/store address register reg_state_t ls_offs = reg_state_t::from_value(0); // Added value to ls reg_state_t lsa{}; // state of LSA register on GETLLAR reg_state_t ls_reg[8]{}; // stores/loads using register bundles with offset reg_state_t ls_abs[8]{}; // stores/loads using absolute address u32 reg = s_reg_max; // Source of address register of LS load/store u32 reg2 = s_reg_max; // Source 2 of address register of LS load/store (STQX/LQX) //u32 ls_offs[8]{}; // LS offset from register (0 if const) bool ls_pc_rel = false; // For STQR/LQR bool ls_access = false; // LS accessed bool ls_write = false; // LS written bool ls_invalid = false; // From this point and on, any store will cancel the optimization bool select_16_or_0_at_runtime = false; bool put_active = false; // PUTLLC happened bool get_rdatomic = false; // True if MFC_RdAtomicStat was read after GETLLAR u32 mem_count = 0; // Return old state for error reporting atomic16_t discard() { const u32 pc = lsa_pc; const u32 last_pc = lsa_last_pc; const atomic16_t old = *this; *this = atomic16_t{}; // Keep some members lsa_pc = pc; lsa_last_pc = last_pc; return old; } // Conditional breakage (break if a full 128-byte reservation is needed) atomic16_t set_invalid_ls(bool write) { ls_invalid = true; ls_write |= write; if (write) { return discard(); } return atomic16_t{}; } }; struct rchcnt_loop_t { bool active = false; // RDCH/RCHCNT happened bool failed = false; // needc this flag to distinguish start of the pattern vs failed pattern (they begin and end of the same address) bool conditioned = false; // needc this flag to distinguish start of the pattern vs failed pattern (they begin and end of the same address) u32 channel = 128; u32 read_pc = SPU_LS_SIZE; // PC of RDCH or RCHCNT (that encloses the loop) reg_state_t ch_state{+vf::is_null}; // Channel stat, example: RCNCNT ch_state, MFC_Cmd reg_state_t ch_product{+vf::is_null}; // Optional comparison state for channl state, example: CEQI ch_product, ch_state, 1 bool product_test_negate = false; // Compare the opposite way, such as: CEQI ch_product, ch_state, 0 which turns 0 t -1 and 1 to 0 std::vector<u32> origins; u32 branch_pc = SPU_LS_SIZE; // Where the loop branch is located u32 branch_target = SPU_LS_SIZE; // The target of the loop branch // Return old state for error reporting rchcnt_loop_t discard() { const rchcnt_loop_t old = *this; *this = rchcnt_loop_t{}; return old; } }; // Reset tags reg_state_t::alloc_tag(true); std::map<u32, std::unique_ptr<block_reg_info>> infos; infos.emplace(entry_point, block_reg_info::create(entry_point)); struct block_reg_state_iterator { u32 pc{}; usz parent_iterator_index = umax; usz parent_target_index = 0; usz iterator_id = 0; usz temp_child_index = umax; usz temp_list_index = umax; // PUTLLC16 optimization analysis tracker atomic16_t atomic16{}; // RDCH/RCHCNT Loop analysis tracker rchcnt_loop_t rchcnt_loop{}; block_reg_state_iterator(u32 _pc, usz _parent_iterator_index = umax, usz _parent_target_index = 0) noexcept : pc(_pc) , parent_iterator_index(_parent_iterator_index) , parent_target_index(_parent_target_index) { } }; std::vector<block_reg_state_iterator> reg_state_it; std::map<u32, atomic16_t> atomic16_all; // RdAtomicStat location -> atomic loop optimization state std::map<u32, rchcnt_loop_t> rchcnt_loop_all; // RDCH/RCHCNT location -> channel read loop optimization state std::map<u32, bool> getllar_starts; // True for failed loops std::map<u32, bool> run_on_block; std::map<u32, bool> logged_block; std::array<reg_state_t, s_reg_max>* true_state_walkby = nullptr; atomic16_t dummy16{}; rchcnt_loop_t dummy_loop{}; bool likely_putllc_loop = false; bool had_putllc_evaluation = false; for (u32 i = 0, count = 0; i < result.data.size(); i++) { const u32 inst = std::bit_cast<be_t<u32>>(result.data[i]); if (spu_opcode_t{inst}.ra == MFC_RdAtomicStat && g_spu_itype.decode(inst) == spu_itype::RDCH) { count++; if (count == 2) { likely_putllc_loop = true; break; } } } usz target_count = 0; for (auto& [pc, loc] : m_targets) { target_count += loc.size(); } const bool should_search_patterns = target_count < 300u; // Treat start of function as an unknown value with tag (because it is) const reg_state_t start_program_count = reg_state_t::make_unknown(entry_point - 1); // Initialize reg_state_it.emplace_back(entry_point); run_on_block[entry_point / 4] = true; enum spu_addr_mask_t : u32 { SPU_LS_MASK_128 = (SPU_LS_SIZE - 1) & -128, SPU_LS_MASK_16 = (SPU_LS_SIZE - 1) & -16, SPU_LS_MASK_4 = (SPU_LS_SIZE - 1) & -4, SPU_LS_MASK_1 = (SPU_LS_SIZE - 1), }; u32 iterator_id_alloc = 0; for (u32 wf = 0, wi = 0, wa = entry_point, bpc = wa; wf <= 1;) { const bool is_form_block = wf == 0; const bool is_pattern_match = wf == 1; dummy16.active = false; dummy_loop.active = false; if (!is_form_block && wa == bpc) { if (wi == 0) { for (auto& [addr, block] : infos) { // Evaluate state for all blocks block->evaluate_start_state(infos, should_search_patterns); } } if (!should_search_patterns) { break; } if (!infos[bpc]) { std::string out = fmt::format("Blocks:"); for (auto& [pc, _] : infos) { if (!_) continue; fmt::append(out, " [0x%x]", pc); } out += '\n'; for (auto& [pc, bb] : m_bbs) { if (!m_block_info[pc / 4]) { continue; } out += fmt::format("\nTargets 0x%x:", pc); for (auto addr : bb.targets) { fmt::append(out, " [0x%x]", addr); } } spu_log.fatal("%s", out); } true_state_walkby = &ensure(infos[bpc])->evaluate_start_state(infos, should_search_patterns); for (reg_state_t& f : *true_state_walkby) { if (f.flag & vf::is_null) { // Evaluate locally f.flag -= vf::is_null; } } } auto& vregs = is_form_block ? infos[bpc]->local_state : *true_state_walkby; const auto atomic16 = is_pattern_match ? &::at32(reg_state_it, wi).atomic16 : &dummy16; const auto rchcnt_loop = is_pattern_match ? &::at32(reg_state_it, wi).rchcnt_loop : &dummy_loop; const u32 pos = wa; wa += 4; const auto break_putllc16 = [&](u32 cause, atomic16_t previous) { if (previous.active && likely_putllc_loop && getllar_starts.contains(previous.lsa_pc)) { const bool is_first = !std::exchange(getllar_starts[previous.lsa_pc], true); if (!is_first) { return; } had_putllc_evaluation = true; g_fxo->get<putllc16_statistics_t>().breaking_reason[cause]++; if (!spu_log.notice) { return; } std::string break_error = fmt::format("PUTLLC pattern breakage [%x mem=%d lsa_const=%d cause=%u] (lsa_pc=0x%x)", pos, previous.mem_count, u32{!previous.ls_offs.is_const()} * 2 + previous.lsa.is_const(), cause, previous.lsa_pc); const auto values = sort_breakig_reasons(g_fxo->get<putllc16_statistics_t>().breaking_reason); std::string tracing = "Top Breaking Reasons:"; usz i = 0; usz fail_count = 0; bool switched_to_minimal = false; for (auto it = values.begin(); it != values.end(); i++, it++) { fail_count += it->second; if (i >= 12) { continue; } if (i < 8 && it->second > 1) { fmt::append(tracing, " [cause=%u, n=%d]", it->first, it->second); } else { if (!std::exchange(switched_to_minimal, true)) { fmt::append(tracing, "; More:"); } fmt::append(tracing, " %u", it->first); } } fmt::append(tracing, " of %d failures", fail_count); spu_log.notice("%s\n%s", break_error, tracing); } }; const auto break_channel_pattern = [&](u32 cause, rchcnt_loop_t previous) { if (previous.active && rchcnt_loop_all.contains(previous.read_pc)) { const bool is_first = !std::exchange(rchcnt_loop_all[previous.read_pc].failed, true); if (!is_first) { return; } g_fxo->get<rchcnt_statistics_t>().breaking_reason[cause]++; if (!spu_log.notice) { return; } std::string break_error = fmt::format("Channel pattern breakage [%x cause=%u] (read_pc=0x%x)", pos, cause, previous.read_pc); const auto values = sort_breakig_reasons(g_fxo->get<rchcnt_statistics_t>().breaking_reason); std::string tracing = "Top Breaking Reasons:"; usz i = 0; usz fail_count = 0; bool switched_to_minimal = false; for (auto it = values.begin(); it != values.end(); i++, it++) { fail_count += it->second; if (i >= 12) { continue; } if (i < 8 && it->second > 1) { fmt::append(tracing, " [cause=%u, n=%d]", it->first, it->second); } else { if (!std::exchange(switched_to_minimal, true)) { fmt::append(tracing, "; More:"); } fmt::append(tracing, " %u", it->first); } } fmt::append(tracing, " of %d failures", fail_count); spu_log.notice("%s\n%s", break_error, tracing); } }; const auto break_all_patterns = [&](u32 cause) { break_putllc16(cause, atomic16->discard()); break_channel_pattern(cause, rchcnt_loop->discard()); }; const auto calculate_absolute_ls_difference = [](u32 addr1, u32 addr2) { addr1 &= SPU_LS_MASK_1; addr2 &= SPU_LS_MASK_1; const u32 abs_diff = (addr1 >= addr2 ? addr1 - addr2 : addr2 - addr1); // Because memory is wrapping-around, take the gap that is smaller return abs_diff >= SPU_LS_SIZE / 2 ? SPU_LS_SIZE - abs_diff : abs_diff; }; bool called_next = false; u32 data{}; const auto next_block = [&]() { if (called_next) { // Guard multiple calles to next_block() return; } called_next = true; if (wf == 0) { wi++; auto& block = infos[bpc]; if (pos == entry_point || (g_cfg.core.spu_block_size != spu_block_size_type::safe && (m_ret_info[bpc / 4] || m_entry_info[bpc / 4]))) { // Do not allow value passthrough for (reg_state_t& f : block->start_reg_state) { f.flag -= vf::is_null; } for (reg_state_t& f : block->local_state) { f.flag -= vf::is_null; } // Block has an external origin, discard all previous information block->end_reg_state = block->local_state; block->has_true_state = true; } block->addend_reg_state = block->local_state; } else { std::vector<usz> to_pop; usz stackframe_it = wi; u32 stackframe_pc = SPU_LS_SIZE; usz entry_index = umax; auto get_block_targets = [&](u32 pc) -> std::span<u32> { if (m_block_info[pc / 4] && m_bbs.count(pc)) { return m_bbs.at(pc).targets; } return {}; }; u32 target_pc = SPU_LS_SIZE; bool insert_entry = false; bool is_code_backdoor = false; while (true) { const auto state_it = &reg_state_it[stackframe_it]; stackframe_pc = state_it->pc; entry_index = state_it->parent_target_index; const auto targets = get_block_targets(stackframe_pc); const usz target_size = targets.size(); while (entry_index < target_size && (targets[entry_index] < lsa || targets[entry_index] >= limit || !m_block_info[targets[entry_index] / 4])) { state_it->parent_target_index++; entry_index = state_it->parent_target_index; } if (entry_index == target_size) { const usz parent_index = state_it->parent_iterator_index; to_pop.emplace_back(stackframe_it); if (parent_index != umax) { stackframe_it = parent_index; } else { // Final wi = 0; break; } } else { target_pc = ::at32(targets, entry_index); usz occurence_count = 0; std::array<usz, 16> duplicate_positions; // Virtual concept (there is no really such thing as loop connectors from the ccompiled-code level) // But it helps to simplify this process bool is_loop_connector = false; bool is_too_extensive = false; bool is_skipable = false; // Hack to avoid extensive analysis of all code paths possible: // Allow up to 4 occurences of the upper-most block // Because, loop "connectors" are usually backward in direction // The proper solution would be to add a precursry function analysis stage which identifies all loop "connectors" and allows duplicates based on it for (usz i = stackframe_it, count = 0;; count++) { auto& entry = ::at32(reg_state_it, i); const u32 entry_pc = entry.pc; if (count == (state_it->atomic16.active ? 25 : 12)) { if (state_it->atomic16.active && !std::exchange(logged_block[target_pc / 4], true)) { spu_log.notice("SPU Blcok Analysis is too extensive at 0x%x", entry_pc); } is_too_extensive = true; break; } if (entry_pc == target_pc) { duplicate_positions[occurence_count++] = i; if (occurence_count == duplicate_positions.size()) { is_loop_connector = true; break; } } const usz parent_idx = entry.parent_iterator_index; if (parent_idx == umax) { break; } ensure(i != parent_idx); // Fill info for later auto& parent = ::at32(reg_state_it, parent_idx); parent.temp_child_index = i; parent.temp_list_index = count; i = parent_idx; } // Scan the code for "code flow" repetitions (entire sequences of blocks equal to each other) // If found, this is 100% a loop, shoulkd it start a third time ignore it if (occurence_count >= 2) { for (usz it_begin = 0; !is_skipable && it_begin < occurence_count - 1; it_begin++) { const usz block_start = duplicate_positions[it_begin + 1]; for (usz it_tail = 0; it_tail < it_begin + 1; it_tail++) { const usz block_tail = duplicate_positions[it_begin - it_tail]; // Check if the distance is precisely two times from the end if (reg_state_it.size() - block_start != utils::rol64(reg_state_it.size() - block_tail, 1)) { continue; } bool is_equal = true; for (usz j = 1; j < reg_state_it.size() - block_tail; j++) { if (reg_state_it[block_start + j].pc != reg_state_it[block_tail + j].pc) { is_equal = false; break; } } if (is_equal) { is_skipable = true; break; } } } } if (is_skipable) { if (!std::exchange(logged_block[target_pc / 4], true)) { spu_log.notice("SPU block is a loop at [0x%05x -> 0x%05x]", state_it->pc, target_pc); } state_it->parent_target_index++; continue; } if (is_loop_connector && !std::exchange(logged_block[target_pc / 4], true)) { spu_log.notice("SPU block analysis is too repetitive at [0x%05x -> 0x%05x]", state_it->pc, target_pc); } insert_entry = true; // Test if the code is an opening to external code (start of the function is always respected because it is already assumed to have no origin) is_code_backdoor = m_ret_info[target_pc / 4] || (m_entry_info[target_pc / 4] && target_pc != entry_point); if (run_on_block[target_pc / 4]) { insert_entry = false; } else if (is_code_backdoor || is_too_extensive || is_loop_connector) { if (reg_state_it[stackframe_it].atomic16.active) { break_putllc16(40, reg_state_it[stackframe_it].atomic16.discard()); } if (reg_state_it[stackframe_it].rchcnt_loop.active) { break_channel_pattern(40, reg_state_it[stackframe_it].rchcnt_loop.discard()); } // Allow the block to run only once, to avoid unnecessary iterations run_on_block[target_pc / 4] = true; } state_it->parent_target_index++; if (!insert_entry) { continue; } break; } } const u32 previous_pc = m_bbs.at(reg_state_it[stackframe_it].pc).size * 4 + reg_state_it[stackframe_it].pc - 4; bool may_return = previous_pc + 4 != entry_point + result.data.size() * 4 && (m_ret_info[(previous_pc / 4) + 1] || m_entry_info[previous_pc / 4]); if (!may_return) { const u32 branch_target = op_branch_targets(previous_pc, spu_opcode_t{data})[0]; if (branch_target == umax || branch_target >= entry_point + result.data.size() * 4 || branch_target < entry_point) { may_return = true; } } if (wi != stackframe_it || may_return || !insert_entry) { // Possible function end if (rchcnt_loop->active) { // Does not post-dominates channel reads auto& pair = rchcnt_loop_all[rchcnt_loop->read_pc]; pair.failed = true; pair.active = false; } } // Backup analyser information const auto atomic16_info = reg_state_it[stackframe_it].atomic16; const auto rchcnt_loop_info = reg_state_it[stackframe_it].rchcnt_loop; // Clean from the back possible because it does not affect old indices // Technically should always do a full cleanup at the moment // TODO: Proper cleanup with keeping old indices valid for (usz it : to_pop) { if (it == reg_state_it.size() - 1) { reg_state_it.pop_back(); } else { // Should not be reachable at the moment //ensure(false); spu_log.error("Failed to clean block analyis steps at block_id %d", reg_state_it[it].iterator_id); } } if (insert_entry) { const usz target_size = get_block_targets(stackframe_pc).size(); spu_log.trace("Emplacing: block_id=%d, pc=0x%x, target_it=%d/%d, new_pc=0x%x (has_it=%d)", reg_state_it[stackframe_it].iterator_id, stackframe_pc, entry_index + 1, target_size, target_pc, atomic16_info.active); auto& next = reg_state_it.emplace_back(target_pc, stackframe_it, 0); if (!is_code_backdoor) { // Restore analyser information (if not an entry) next.atomic16 = atomic16_info; if (previous_pc != rchcnt_loop_info.branch_pc || target_pc == rchcnt_loop_info.branch_target) next.rchcnt_loop = rchcnt_loop_info; } else { if (atomic16_info.active) { break_putllc16(39, atomic16_info); } if (rchcnt_loop_info.active) { // Does not post-dominates channel read auto& pair = rchcnt_loop_all[rchcnt_loop_info.read_pc]; pair.failed = true; pair.active = false; } } next.iterator_id = iterator_id_alloc++; wi = static_cast<u32>(stackframe_it + 1); ensure(stackframe_it + 1 == reg_state_it.size() - 1); } } if (wi >= reg_state_it.size()) { wf++; wi = 0; run_on_block.clear(); if (wf == 1) { reg_state_it.clear(); if (!infos.empty()) { reg_state_it.emplace_back(::at32(infos, entry_point)->pc).iterator_id = iterator_id_alloc++;; } } } if (wi < reg_state_it.size()) { wa = ::at32(reg_state_it, wi).pc; bpc = wa; } }; const auto get_reg = [&](u32 reg) -> const reg_state_t& { return vregs[reg]; }; const auto move_reg = [&](u32 dst, u32 src) { if (dst == src || vregs[src] == vregs[dst]) { return; } vregs[dst] = vregs[src]; // Register storage has changed vregs[dst].flag -= vf::is_null; }; const auto set_const_value = [&](u32 reg, u32 value) { vregs[reg] = reg_state_t::from_value(value); }; const auto inherit_const_value = [&](u32 reg, const reg_state_t& ra, const reg_state_t& rb, u32 value, u32 pos) { if (ra.origin != rb.origin) { pos = reg_state_it[wi].pc; } else { pos = ra.origin; } const bs_t<vf> flag = (ra.flag & rb.flag) - vf::is_null; vregs[reg] = reg_state_t{flag, value, flag & vf::is_const ? u32{umax} : reg_state_t::alloc_tag(), 0, 0, pos}; }; const auto inherit_const_mask_value = [&](u32 reg, reg_state_t state, u32 mask_ones, u32 mask_zeroes) { if ((mask_ones | mask_zeroes) == 0) { state.flag -= vf::is_null; vregs[reg] = state; return; } if (state.flag & vf::is_const) { vregs[reg] = reg_state_t::from_value((state.value | mask_ones) & ~mask_zeroes); return; } const u32 ones = (state.known_ones | mask_ones) & ~mask_zeroes; const u32 zeroes = (state.known_zeroes | mask_zeroes) & ~mask_ones; if ((ones ^ zeroes) == umax) { // Special case: create a constant from full masks vregs[reg] = reg_state_t::from_value(ones); return; } ensure(state.tag != umax); vregs[reg] = reg_state_t{vf::is_mask, 0, state.tag, ones, zeroes, state.origin}; }; const auto unconst = [&](u32 reg, u32 pc) { vregs[reg] = reg_state_t::make_unknown(pc, pos); }; const auto add_block = [&](u32 target) { if (!is_form_block) { return; } // Validate new target (TODO) if (target >= lsa && target < limit) { if (!infos[target]) { infos[target] = block_reg_info::create(target); } block_reg_info::create_node(target, bpc, infos); if (!run_on_block[target / 4]) { reg_state_it.emplace_back(target).iterator_id = iterator_id_alloc++; run_on_block[target / 4] = true; } } }; if (pos < lsa || pos >= limit) { // Don't analyse if already beyond the limit next_block(); continue; } if (bpc != pos && m_preds.count(pos)) { // End of block reached next_block(); continue; } if (g_cfg.core.spu_block_size != spu_block_size_type::safe && (m_ret_info[pos / 4] || m_entry_info[pos / 4] || pos == entry_point)) { ensure(bpc == pos); // Block has an external origin, discard all previous information // TODO: Make the optimizations conditional at runtime instead if (!is_form_block) { // Call for external code break_all_patterns(25); } } if (atomic16->active) { for (auto state : {&atomic16->lsa, &atomic16->ls, &atomic16->ls_offs}) { state->invalidate_if_created(pos); } } if (rchcnt_loop->active) { if (std::find(rchcnt_loop->origins.begin(), rchcnt_loop->origins.end(), pos) != rchcnt_loop->origins.end()) { rchcnt_loop->failed = true; rchcnt_loop->active = false; } } data = std::bit_cast<be_t<u32>>(::at32(result.data, (pos - lsa) / 4)); const auto op = spu_opcode_t{data}; const auto type = g_spu_itype.decode(data); // For debugging if (false && likely_putllc_loop && is_pattern_match) { SPUDisAsm dis_asm(cpu_disasm_mode::dump, reinterpret_cast<const u8*>(result.data.data()), result.lower_bound); dis_asm.disasm(pos); std::string consts; for (auto _use : std::initializer_list<std::pair<u32, bool>>{{op.ra, m_use_ra.test(pos / 4)} , {op.rb, m_use_rb.test(pos / 4)}, {op.rc, m_use_rc.test(pos / 4)}}) { if (!_use.second) { continue; } if (!consts.empty()) { consts += ','; } const u32 reg_file = _use.first; const auto& reg = get_reg(reg_file); if (reg.is_const()) { fmt::append(consts, " r%d=0x%x", reg_file, reg.value); } else { if (u32 mask = reg.known_zeroes | reg.known_ones) { fmt::append(consts, " r%d=#%d-&|0x%x", reg_file, reg.tag, mask); } else { fmt::append(consts, " r%d=#%d", reg_file, reg.tag); } } } if (!consts.empty()) { consts = " {" + consts + " }"; } if (dis_asm.last_opcode.ends_with('\n')) { dis_asm.last_opcode.pop_back(); } spu_log.always()("[SPU=0%x, it=%d] %s%s [%d]", pos, reg_state_it[wi].iterator_id, dis_asm.last_opcode, consts, atomic16->active); } // Analyse instruction switch (type) { case spu_itype::UNK: case spu_itype::DFCEQ: case spu_itype::DFCMEQ: case spu_itype::DFCGT: case spu_itype::DFCMGT: case spu_itype::DFTSV: { // Stop before invalid instructions (TODO) next_block(); continue; } case spu_itype::SYNC: case spu_itype::STOP: case spu_itype::STOPD: { if (data == 0) { // Stop before null data next_block(); continue; } if (g_cfg.core.spu_block_size == spu_block_size_type::safe) { // Stop on special instructions (TODO) next_block(); break; } if (type == spu_itype::SYNC) { // Remember sync = true; } break; } case spu_itype::IRET: case spu_itype::BI: case spu_itype::BISL: case spu_itype::BISLED: case spu_itype::BIZ: case spu_itype::BINZ: case spu_itype::BIHZ: case spu_itype::BIHNZ: { if (op.e || op.d) { break_all_patterns(27); } break; } case spu_itype::BRSL: case spu_itype::BRASL: { break; } case spu_itype::BRA: { break; } case spu_itype::BRZ: case spu_itype::BRNZ: { const u32 next_pc = spu_branch_target(pos, 1); const u32 target = spu_branch_target(pos, op.i16); if (rchcnt_loop->active) { const reg_state_t& rt = vregs[op.rt]; if (rt.is_instruction && (rchcnt_loop->ch_state.origin == rt.origin || rchcnt_loop->ch_product.origin == rt.origin)) { if (rchcnt_loop->conditioned) { // Let's not make it complicated, have a single branch determining the condition break_channel_pattern(54, rchcnt_loop->discard()); break; } rchcnt_loop->conditioned = true; rchcnt_loop->branch_pc = pos; rchcnt_loop->branch_target = rchcnt_loop->product_test_negate != (type == spu_itype::BRZ) ? target : next_pc; break; } } break; } case spu_itype::BR: case spu_itype::BRHZ: case spu_itype::BRHNZ: { break; } case spu_itype::DSYNC: case spu_itype::HEQ: case spu_itype::HEQI: case spu_itype::HGT: case spu_itype::HGTI: case spu_itype::HLGT: case spu_itype::HLGTI: case spu_itype::LNOP: case spu_itype::NOP: case spu_itype::MTSPR: case spu_itype::FSCRWR: { // Do nothing break; } case spu_itype::WRCH: { break_channel_pattern(56, rchcnt_loop->discard()); switch (op.ra) { case MFC_EAL: { move_reg(s_reg_mfc_eal, op.rt); break; } case MFC_LSA: { auto rt = get_reg(op.rt); inherit_const_mask_value(s_reg_mfc_lsa, rt, 0, ~SPU_LS_MASK_1); if (is_pattern_match) { atomic16->lsa_last_pc = pos; } break; } case MFC_TagID: { break; } case MFC_Size: { break; } case MFC_Cmd: { const auto [af, av, atagg, _3, _5, apc, ainst] = get_reg(op.rt); if (!is_pattern_match) { // } else if (af & vf::is_const) { switch (av) { case MFC_GETLLAR_CMD: { // Get LSA and apply mask for GETLLAR // TODO: Simplify this to be a value returning function auto old_lsa = get_reg(s_reg_mfc_lsa); inherit_const_mask_value(s_reg_mfc_lsa, old_lsa, 0, ~SPU_LS_MASK_128); // Restore LSA auto lsa = get_reg(s_reg_mfc_lsa); vregs[s_reg_mfc_lsa] = old_lsa; const u32 lsa_pc = atomic16->lsa_last_pc == SPU_LS_SIZE ? bpc : atomic16->lsa_last_pc; if (atomic16->active) { if (atomic16->lsa_pc != lsa_pc || atomic16->get_pc != pos || atomic16->lsa != lsa) { break_putllc16(30, atomic16->discard()); } } // If LSA write has not happened, use block start atomic16->lsa_pc = lsa_pc; atomic16->get_pc = pos; atomic16->active = true; atomic16->lsa = lsa; if (likely_putllc_loop) { // Register loop entry if (getllar_starts.emplace(atomic16->lsa_pc, false).second) { g_fxo->get<putllc16_statistics_t>().all++; spu_log.notice("[0x%05x] GETLLAR pattern entry point", pos); } } break; } case MFC_PUTLLC_CMD: { if (atomic16->active) { const auto _lsa = get_reg(s_reg_mfc_lsa); // Search the value of LS address stoire/load in latest register file if (atomic16->ls_access && atomic16->ls_write && !atomic16->ls_pc_rel && !atomic16->ls.is_const()) { usz reg_it = umax; u32 regs[2]{s_reg_max, s_reg_max}; for (auto val : {&atomic16->ls, &atomic16->ls_offs}) { reg_it++; if (val->is_const()) { regs[reg_it] = 0; continue; } if (vregs[s_reg_mfc_lsa].compare_with_mask_indifference(*val, SPU_LS_MASK_16)) { regs[reg_it] = s_reg_mfc_lsa; continue; } for (u32 i = 0; i <= s_reg_127; i++) { const auto& _reg = vregs[i]; if (_reg.compare_with_mask_indifference(*val, SPU_LS_MASK_16)) { regs[reg_it] = i; break; } } } if (regs[0] == s_reg_max || regs[1] == s_reg_max) { break_putllc16(3, atomic16->discard()); break; } atomic16->reg = regs[0]; if (!atomic16->ls_offs.is_const()) { atomic16->reg2 = regs[1]; } } if (atomic16->ls_access && atomic16->ls_write && !atomic16->lsa.compare_with_mask_indifference(_lsa, SPU_LS_MASK_128)) { // LSA latest value mismatches with the one written with GETLLAR if (atomic16->lsa.flag != _lsa.flag) { break_putllc16(1, atomic16->discard()); } else { break_putllc16(2, atomic16->discard()); } break; } if (atomic16->ls_access && atomic16->ls_write) { atomic16->select_16_or_0_at_runtime = false; bool ok = false; if (atomic16->ls_pc_rel || !atomic16->ls_offs.is_const()) { // } else if (atomic16->lsa.is_const()) { if (atomic16->ls.is_const()) { if (atomic16->ls_offs.value != 0) { // Rebase constant so we can get rid of ls_offs atomic16->ls.value = spu_ls_target(atomic16->ls_offs.value + atomic16->ls.value); atomic16->ls_offs = reg_state_t::from_value(0); } if (atomic16->ls.compare_with_mask_indifference(atomic16->lsa, SPU_LS_MASK_128)) { ok = true; } } else if (atomic16->ls_offs.compare_with_mask_indifference(atomic16->lsa, SPU_LS_MASK_128) && atomic16->ls.is_less_than(128 - (atomic16->ls_offs.value & 127))) { // Relative memory access with offset less than 128 bytes // Common around SPU utilities which have less strict restrictions about memory alignment ok = true; } } else if (atomic16->lsa.compare_with_mask_indifference(atomic16->ls, SPU_LS_MASK_128) && atomic16->ls_offs == 0) { // Unknown value with known offset of less than 128 bytes ok = true; } if (!ok) { // This is quite common.. let's try to select between putllc16 and putllc0 at runtime! // break_putllc16(100); // atomic16->discard(); // break; atomic16->select_16_or_0_at_runtime = true; } } if (!atomic16->get_rdatomic) { // MFC_RdAtomicStat must have been read, otherwise GETLLAR may not be executed (according to HW tests) break_putllc16(21, atomic16->discard()); } atomic16->put_pc = pos; atomic16->put_active = true; } break; } default: { break_putllc16(4, atomic16->discard()); break; } } } else { break_putllc16(5, atomic16->discard()); } if (!atomic16->active) { // Propagate failure for (auto& atm : atomic16_all) { if (atm.second.active && atm.second.put_pc == pos) { break_putllc16(31, atm.second.discard()); } } } break; } case MFC_EAH: case SPU_WrDec: case SPU_WrSRR0: case SPU_WrEventAck: case SPU_Set_Bkmk_Tag: case SPU_PM_Start_Ev: case SPU_PM_Stop_Ev: case MFC_WrTagMask: //case MFC_WrTagUpdate: // Technically correct to ignore but risky break; default: { break_all_patterns(6); break; } } break; } case spu_itype::RCHCNT: case spu_itype::RDCH: { const bool is_read = type == spu_itype::RDCH; bool invalidate = true; const auto it = rchcnt_loop_all.find(pos); if (it != rchcnt_loop_all.end()) { if (rchcnt_loop->failed || !rchcnt_loop->conditioned || rchcnt_loop->read_pc != pos) { // Propagate faiure it->second.failed = true; it->second.active = false; it->second.conditioned = false; } else { it->second.active = false; } rchcnt_loop->active = false; } if (rchcnt_loop->active) { if (rchcnt_loop->read_pc != pos) { break_channel_pattern(53, rchcnt_loop->discard()); } } switch (op.ra) { case MFC_RdAtomicStat: { if (!is_read) { break; } if (atomic16->active) { if (atomic16->put_active) { if (getllar_starts.contains(atomic16->lsa_pc) && getllar_starts[atomic16->lsa_pc]) { break_putllc16(24, atomic16->discard()); break; } const auto it = atomic16_all.find(pos); if (it == atomic16_all.end()) { // Fresh new pattern detected in a single code path atomic16_all.emplace(pos, *atomic16); } else if (it->second.active) { // Merge pattern attributes between different code paths, may cause detection of failures atomic16_t& existing = it->second; auto compare_tag_and_reg = [](std::pair<const reg_state_t*, u32> a, std::pair<const reg_state_t*, u32> b) { if (b.first->is_const() && a.first->is_const()) { return a.first->compare_with_mask_indifference(*b.first, SPU_LS_MASK_1); } // Compare register source return a.second == b.second; }; if (existing.lsa_pc != atomic16->lsa_pc || existing.put_pc != atomic16->put_pc || !existing.lsa.compare_with_mask_indifference(atomic16->lsa, SPU_LS_MASK_128)) { // Register twice break_putllc16(22, atomic16->discard()); break_putllc16(22, existing.discard()); } if (existing.active && existing.ls_access && atomic16->ls_access && (!compare_tag_and_reg({&existing.ls, existing.reg}, {&atomic16->ls, atomic16->reg}) || existing.ls_offs != atomic16->ls_offs || existing.reg2 != atomic16->reg2)) { // Conflicting loads with stores in more than one code path break_putllc16(27, atomic16->set_invalid_ls(existing.ls_write || atomic16->ls_write)); if (!atomic16->active) { existing.active = false; } } if (existing.active && (existing.ls_write || atomic16->ls_write) && (existing.ls_invalid || atomic16->ls_invalid)) { // Conflicting loads with stores in more than one code path break_putllc16(33, atomic16->discard()); existing.active = false; existing.ls_invalid = true; } if (existing.active && !existing.ls_access && atomic16->ls_access) { // Propagate LS access existing.ls = atomic16->ls; existing.reg = atomic16->reg; existing.reg2 = atomic16->reg2; existing.ls_offs = atomic16->ls_offs; } existing.ls_write |= atomic16->ls_write; existing.ls_invalid |= atomic16->ls_invalid; existing.ls_access |= atomic16->ls_access; existing.mem_count = std::max<u32>(existing.mem_count, atomic16->mem_count); existing.select_16_or_0_at_runtime |= atomic16->select_16_or_0_at_runtime; } atomic16->discard(); } else if (!atomic16->get_rdatomic) { atomic16->get_rdatomic = true; // Go above and beyond and also set the constant for it set_const_value(op.rt, MFC_GETLLAR_SUCCESS); invalidate = false; } } break; } // Let's be safe here and no not allow multi-threaded communications case SPU_WrOutMbox: case SPU_WrOutIntrMbox: case SPU_RdSigNotify1: case SPU_RdSigNotify2: case SPU_RdInMbox: //case SPU_RdEventStat: { if (is_read) { break_putllc16(28, atomic16->discard()); } else { break_putllc16(29, atomic16->discard()); } if (!is_pattern_match || is_read) { // } else if (!rchcnt_loop->active && it == rchcnt_loop_all.end()) { rchcnt_loop->read_pc = pos; rchcnt_loop->channel = op.ra; rchcnt_loop->active = true; unconst(op.rt, pos); rchcnt_loop->ch_state = vregs[op.rt]; invalidate = false; } else if (rchcnt_loop->active) { // Success rchcnt_loop->active = false; if (it == rchcnt_loop_all.end()) { rchcnt_loop_all.emplace(pos, *rchcnt_loop); } } break; } default: { break; } } if (invalidate) { unconst(op.rt, pos); } break; } case spu_itype::STQR: case spu_itype::LQR: { const bool is_store = type == spu_itype::STQR; if (atomic16->active) { atomic16->mem_count++; // Do not clear lower 16 bytes addressing because the program can move on 4-byte basis const u32 offs = spu_branch_target(pos - result.lower_bound, op.si16); if (atomic16->lsa.is_const() && [&]() { bool hack = false; if (offs % 16 == 0 && (pos - result.lower_bound + op.si16 * 4) == offs) { const u32 reservation_bound = (atomic16->lsa.value | 127); const u32 min_offs = offs; // Hack: assume there is no overflow in relative instruction offset // Thus, use instruction position + offset as a lower bound for reservation access if (min_offs > reservation_bound) { spu_log.success("STQR/LQR Atomic Loop Hack: abs_pos=0x%x, abs=0x%x, i16*4=0x%x, ls_bound=0x%x", offs, pos + op.si16 * 4, op.si16 * 4, reservation_bound); hack = true; } } return hack; }()) { // Ignore memory access in this case } else if (atomic16->ls_invalid && is_store) { break_putllc16(35, atomic16->set_invalid_ls(is_store)); } else if (atomic16->ls_access && atomic16->ls != start_program_count) { break_putllc16(7, atomic16->set_invalid_ls(is_store)); } else if (atomic16->ls_access && offs != atomic16->ls_offs) { if (atomic16->ls_offs.compare_with_mask_indifference(offs, SPU_LS_MASK_1)) { atomic16->ls_write |= is_store; } else { // Sad break_putllc16(8, atomic16->set_invalid_ls(is_store)); } } else { atomic16->ls = start_program_count; atomic16->ls_offs = reg_state_t::from_value(offs); atomic16->ls_pc_rel = true; atomic16->ls_write |= is_store; atomic16->ls_access = true; } // atomic16->ls_reg[offs % 128 / 16] = start_program_count; // atomic16->ls_offs[offs % 128 / 16] = offs; } if (is_store) { break; } // Unconst unconst(op.rt, pos); break; } case spu_itype::STQX: case spu_itype::LQX: { const bool is_store = type == spu_itype::STQX; if (atomic16->active) { atomic16->mem_count++; auto ra = get_reg(op.ra); ra.value &= SPU_LS_MASK_1; auto rb = get_reg(op.rb); rb.value &= SPU_LS_MASK_1; const u32 const_flags = u32{ra.is_const()} + u32{rb.is_const()}; switch (const_flags) { case 2: { auto add_res = ra; add_res.value += rb.value; add_res.value &= SPU_LS_MASK_16; add_res.tag = umax; if (atomic16->lsa.unequal_with_mask_indifference(add_res, SPU_LS_MASK_128)) { // Unrelated, ignore } else if (atomic16->ls_invalid && is_store) { break_putllc16(20, atomic16->set_invalid_ls(is_store)); } else if (atomic16->ls_access && add_res != atomic16->ls) { if (atomic16->ls.unequal_with_mask_indifference(add_res, SPU_LS_MASK_128) && atomic16->ls_offs == 0) { // Ok } else if (atomic16->ls_pc_rel) { break_putllc16(8, atomic16->set_invalid_ls(is_store)); } else { // Sad break_putllc16(9, atomic16->set_invalid_ls(is_store)); } } else { atomic16->ls = reg_state_t::from_value(add_res.value); atomic16->ls_offs = reg_state_t::from_value(0); atomic16->ls_pc_rel = false; atomic16->ls_write |= is_store; atomic16->ls_access = true; } break; } case 1: { const auto& state = ra.is_const() ? rb : ra; const auto& _lsa = atomic16->lsa; const u32 offs = (ra.is_const() ? ra.value : rb.value) & SPU_LS_MASK_1; const u32 abs_diff = calculate_absolute_ls_difference(offs, 0); if ((_lsa.unequal_with_mask_indifference(state, SPU_LS_MASK_128) && offs == 0) || (_lsa.compare_with_mask_indifference(state, SPU_LS_MASK_1) && abs_diff >= 128u) || (_lsa.compare_with_mask_indifference(state, SPU_LS_MASK_128) && abs_diff >= 256u) ) { // We already know it's an unrelated load/store // The reason for SPU_LS_SIZE - 128 check is that in case LSA is not aligned, it detects the possible wraparound } else if (atomic16->ls_invalid && is_store) { break_putllc16(23, atomic16->set_invalid_ls(is_store)); } else if (atomic16->ls_access && atomic16->ls != state) { if (atomic16->ls.unequal_with_mask_indifference(state, SPU_LS_MASK_128) && offs == 0) { // Ok } else if (atomic16->ls_pc_rel) { break_putllc16(36, atomic16->set_invalid_ls(is_store)); } else { // Sad break_putllc16(11, atomic16->set_invalid_ls(is_store)); } } else if (atomic16->ls_access) { ensure(!atomic16->ls.is_const()); if (atomic16->ls_offs.compare_with_mask_indifference(offs, SPU_LS_MASK_1)) { // Ok atomic16->ls_write |= is_store; } else if (atomic16->ls_offs.is_const() && atomic16->ls_offs.value / 16 == offs / 16 && state.get_known_zeroes() % 16 >= std::max<u32>(offs % 16, atomic16->ls_offs.value % 16)) { // For special case observed in games (offset cannot cause the address to roll over the next 16 bytes) atomic16->ls_write |= is_store; } else { break_putllc16(12, atomic16->set_invalid_ls(is_store)); } } else { atomic16->ls = state; atomic16->ls_offs = reg_state_t::from_value(offs); atomic16->ls_pc_rel = false; atomic16->ls_write |= is_store; atomic16->ls_access = true; } break; } case 0: { const bool is_ra_first = atomic16->ls_access ? ra == atomic16->ls : op.ra <= op.rb; const auto& state1 = is_ra_first ? ra : rb; const auto& state2 = is_ra_first ? rb : ra; if (atomic16->ls_access && (atomic16->ls != state1 || atomic16->ls_offs != state2)) { if (atomic16->ls_pc_rel) { break_putllc16(32, atomic16->set_invalid_ls(is_store)); } else { // Sad break_putllc16(13, atomic16->set_invalid_ls(is_store)); } } else { atomic16->ls = state1; atomic16->ls_offs = state2; atomic16->ls_pc_rel = false; atomic16->ls_write |= is_store; atomic16->ls_access = true; } break; } default: fmt::throw_exception("Unreachable!"); } } if (is_store) { break; } // Unconst unconst(op.rt, pos); break; } case spu_itype::STQA: case spu_itype::LQA: { const bool is_store = type == spu_itype::STQA; if (atomic16->active) { atomic16->mem_count++; const reg_state_t ca = reg_state_t::from_value(spu_ls_target(0, op.i16)); if (atomic16->lsa.unequal_with_mask_indifference(ca, SPU_LS_MASK_128)) { // We already know it's an unrelated load/store } else if (atomic16->ls_invalid && is_store) { break_putllc16(37, atomic16->set_invalid_ls(is_store)); } else if (atomic16->ls_access && ca != atomic16->ls) { if (atomic16->ls.unequal_with_mask_indifference(ca, SPU_LS_MASK_128) && atomic16->ls_offs == 0) { // Ok } else if (atomic16->ls_pc_rel) { break_putllc16(14, atomic16->set_invalid_ls(is_store)); } else { // Sad break_putllc16(15, atomic16->set_invalid_ls(is_store)); } } else { atomic16->ls = ca; atomic16->ls_offs = reg_state_t::from_value(0); atomic16->ls_pc_rel = false; atomic16->ls_write |= is_store; atomic16->ls_access = true; } } if (is_store) { break; } // Unconst unconst(op.rt, pos); break; } case spu_itype::STQD: case spu_itype::LQD: { const bool is_store = type == spu_itype::STQD; if (atomic16->active) { atomic16->mem_count++; auto ra = get_reg(op.ra); const auto& _lsa = atomic16->lsa; ra.value = ra.is_const() ? spu_ls_target(ra.value, op.si10 * 4) : 0; const u32 offs = ra.is_const() ? 0 : spu_ls_target(0, op.si10 * 4); const u32 abs_diff = calculate_absolute_ls_difference(offs, 0); if ((_lsa.unequal_with_mask_indifference(ra, SPU_LS_MASK_128) && offs == 0) || (_lsa.compare_with_mask_indifference(ra, SPU_LS_MASK_1) && abs_diff >= 128u) || (_lsa.compare_with_mask_indifference(ra, SPU_LS_MASK_128) && abs_diff >= 256u) ) { // We already know it's an unrelated load/store // The reason for SPU_LS_SIZE - 128 check is that in case LSA is not aligned, it detects the possible wraparound } else if (atomic16->ls_invalid && is_store) { break_putllc16(34, atomic16->set_invalid_ls(is_store)); } else if (atomic16->ls_access && atomic16->ls != ra) { if (atomic16->ls.unequal_with_mask_indifference(ra, SPU_LS_MASK_128) && (offs == 0 && atomic16->ls_offs == 0)) { // Ok } else if (atomic16->ls_pc_rel) { break_putllc16(16, atomic16->set_invalid_ls(is_store)); } else { // Sad break_putllc16(17, atomic16->set_invalid_ls(is_store)); } } else if (atomic16->ls_access) { if (atomic16->ls_offs.compare_with_mask_indifference(offs, SPU_LS_MASK_1)) { atomic16->ls_write |= is_store; } else if (atomic16->ls_offs.is_const() && atomic16->ls_offs.value / 16 == offs / 16 && ra.get_known_zeroes() % 16 >= std::max<u32>(offs % 16, atomic16->ls_offs.value % 16)) { // For special case observed in games (offset cannot cause the address to roll over the next 16 bytes) atomic16->ls_write |= is_store; } else { break_putllc16(18, atomic16->set_invalid_ls(is_store)); } } else { atomic16->ls = ra; atomic16->ls_offs = reg_state_t::from_value(offs); atomic16->ls_pc_rel = false; atomic16->ls_write |= is_store; atomic16->ls_access = true; } } if (type == spu_itype::STQD) { break; } // Unconst unconst(op.rt, pos); break; } case spu_itype::HBR: { hbr_loc = spu_branch_target(pos, op.roh << 7 | op.rt); const auto [af, av, at, ao, az, apc, ainst] = get_reg(op.ra); hbr_tg = af & vf::is_const && !op.c ? av & 0x3fffc : -1; break; } case spu_itype::HBRA: { hbr_loc = spu_branch_target(pos, op.r0h << 7 | op.rt); hbr_tg = spu_branch_target(0x0, op.i16); break; } case spu_itype::HBRR: { hbr_loc = spu_branch_target(pos, op.r0h << 7 | op.rt); hbr_tg = spu_branch_target(pos, op.i16); break; } case spu_itype::IL: { set_const_value(op.rt, op.si16); break; } case spu_itype::ILA: { set_const_value(op.rt, op.i18); break; } case spu_itype::ILH: { set_const_value(op.rt, op.i16 << 16 | op.i16); break; } case spu_itype::ILHU: { set_const_value(op.rt, op.i16 << 16); break; } case spu_itype::IOHL: { const auto rt = get_reg(op.rt); inherit_const_mask_value(op.rt, rt, op.i16, 0); break; } case spu_itype::ORI: { if (!op.si10) { move_reg(op.rt, op.ra); break; } const auto ra = get_reg(op.ra); inherit_const_mask_value(op.rt, ra, op.si10, 0); break; } case spu_itype::OR: { if (op.ra == op.rb) { move_reg(op.rt, op.ra); break; } const auto ra = get_reg(op.ra); const auto rb = get_reg(op.rb); const auto [af, av, at, ao, az, apc, ainst] = ra; const auto [bf, bv, bt, bo, bz, bpc, binst] = rb; inherit_const_value(op.rt, ra, rb, av | bv, pos); break; } case spu_itype::XORI: { if (!op.si10) { move_reg(op.rt, op.ra); break; } const auto ra = get_reg(op.ra); const auto [af, av, at, ao, az, apc, ainst] = ra; inherit_const_value(op.rt, ra, ra, av ^ op.si10, pos); break; } case spu_itype::XOR: { if (op.ra == op.rb) { set_const_value(op.rt, 0); break; } const auto ra = get_reg(op.ra); const auto rb = get_reg(op.rb); const auto [af, av, at, ao, az, apc, ainst] = ra; const auto [bf, bv, bt, bo, bz, bpc, binst] = rb; inherit_const_value(op.rt, ra, rb, bv ^ av, pos); break; } case spu_itype::NOR: { const auto ra = get_reg(op.ra); const auto rb = get_reg(op.rb); const auto [af, av, at, ao, az, apc, ainst] = ra; const auto [bf, bv, bt, bo, bz, bpc, binst] = rb; inherit_const_value(op.rt, ra, rb, ~(bv | av), pos); break; } case spu_itype::ANDI: { const auto ra = get_reg(op.ra); inherit_const_mask_value(op.rt, ra, 0, ~op.si10); break; } case spu_itype::AND: { if (op.ra == op.rb) { move_reg(op.rt, op.ra); break; } const auto ra = get_reg(op.ra); const auto rb = get_reg(op.rb); const auto [af, av, at, ao, az, apc, ainst] = ra; const auto [bf, bv, bt, bo, bz, bpc, binst] = rb; inherit_const_value(op.rt, ra, rb, bv & av, pos); break; } case spu_itype::AI: { if (!op.si10) { move_reg(op.rt, op.ra); break; } const auto ra = get_reg(op.ra); const auto [af, av, at, ao, az, apc, ainst] = ra; inherit_const_value(op.rt, ra, ra, av + op.si10, pos); if (u32 mask = ra.get_known_zeroes() & ~op.si10; mask & 1) { // Added zeroes are always zeroes which comes in handy later inherit_const_mask_value(op.rt, vregs[op.rt], 0, (1u << std::countr_one(mask)) - 1); } break; } case spu_itype::A: { const auto ra = get_reg(op.ra); const auto rb = get_reg(op.rb); const auto [af, av, at, ao, az, apc, ainst] = ra; const auto [bf, bv, bt, bo, bz, bpc, binst] = rb; inherit_const_value(op.rt, ra, rb, bv + av, pos); if (u32 mask = ra.get_known_zeroes() & rb.get_known_zeroes(); mask & 1) { // Added zeroes are always zeroes which comes in handy later inherit_const_mask_value(op.rt, vregs[op.rt], 0, (1u << std::countr_one(mask)) - 1); } break; } case spu_itype::SFI: { const auto ra = get_reg(op.ra); const auto [af, av, at, ao, az, apc, ainst] = get_reg(op.ra); inherit_const_value(op.rt, ra, ra, op.si10 - av, pos); break; } case spu_itype::SF: { const auto ra = get_reg(op.ra); const auto rb = get_reg(op.rb); const auto [af, av, at, ao, az, apc, ainst] = ra; const auto [bf, bv, bt, bo, bz, bpc, binst] = rb; inherit_const_value(op.rt, ra, rb, bv - av, pos); if (u32 mask = ra.get_known_zeroes() & rb.get_known_zeroes(); mask & 1) { // Subtracted zeroes are always zeroes which comes in handy later inherit_const_mask_value(op.rt, vregs[op.rt], 0, (1u << std::countr_one(mask)) - 1); } break; } case spu_itype::FSMBI: { const u32 mask = (op.i16 >> 12); const u32 value = (mask & 1 ? 0xff : 0) | (mask & 2 ? 0xff00 : 0) | (mask & 4 ? 0xff0000 : 0) | (mask & 8 ? 0xff000000u : 0); set_const_value(op.rt, value); break; } case spu_itype::ROTMI: { if ((0 - op.i7) & 0x20) { set_const_value(op.rt, 0); break; } if (!op.i7) { move_reg(op.rt, op.ra); break; } const auto ra = get_reg(op.ra); const auto [af, av, at, ao, az, apc, ainst] = get_reg(op.ra); inherit_const_value(op.rt, ra, ra, av >> ((0 - op.i7) & 0x1f), pos); break; } case spu_itype::SHLI: { if (op.i7 & 0x20) { set_const_value(op.rt, 0); break; } if (!op.i7) { move_reg(op.rt, op.ra); break; } const auto ra = get_reg(op.ra); const auto [af, av, at, ao, az, apc, ainst] = ra; inherit_const_value(op.rt, ra, ra, av << (op.i7 & 0x1f), pos); break; } case spu_itype::SELB: { const auto ra = get_reg(op.ra); const auto rb = get_reg(op.rb); // Ignore RC, perform a value merge which also respect bitwise information vregs[op.rt4] = ra.merge(rb, pos); break; } case spu_itype::CEQI: { const auto ra = get_reg(op.ra); const auto [af, av, at, ao, az, apc, ainst] = ra; inherit_const_value(op.rt, ra, ra, av == op.si10 + 0u, pos); if (rchcnt_loop->active) { if (ra.is_instruction && ra.origin == rchcnt_loop->ch_state.origin) { if (op.si10 != 0 && op.si10 != 1) { break_channel_pattern(55, rchcnt_loop->discard()); break; } rchcnt_loop->ch_product = vregs[op.rt]; rchcnt_loop->product_test_negate = op.si10 == 1; } } break; } case spu_itype::SHLQBYI: { if (op.i7 & 0x10) { set_const_value(op.rt, 0); break; } if (!op.i7) { move_reg(op.rt, op.ra); break; } [[fallthrough]]; } default: { // Make unknown value if (!(type & spu_itype::zregmod)) { const u32 op_rt = type & spu_itype::_quadrop ? +op.rt4 : +op.rt; u32 ra = s_reg_max, rb = s_reg_max, rc = s_reg_max; if (m_use_ra.test(pos / 4)) { ra = op.ra; } if (m_use_rb.test(pos / 4)) { rb = op.rb; } if (type & spu_itype::_quadrop && m_use_rc.test(pos / 4)) { rc = op.rc; } u32 reg_pos = SPU_LS_SIZE; for (u32 reg : {ra, rb, rc}) { if (reg != s_reg_max) { if (reg_pos == SPU_LS_SIZE) { reg = vregs[reg].origin; } else if (reg_pos != vregs[reg].origin) { const u32 block_start = reg_state_it[wi].pc; // if (vregs[reg].origin >= block_start && vregs[reg].origin <= pos) // { // reg_pos = std::max<u32>(vregs[reg].origin, reg_pos); // } reg_pos = block_start; break; } } } unconst(op_rt, reg_pos == SPU_LS_SIZE ? pos : reg_pos); if (rchcnt_loop->active) { if (std::find(rchcnt_loop->origins.begin(), rchcnt_loop->origins.end(), vregs[op_rt].origin) == rchcnt_loop->origins.end()) { rchcnt_loop->origins.push_back(vregs[op_rt].origin); } } } break; } } if (m_targets.count(pos)) { for (u32 next_target : ::at32(m_targets, pos)) { add_block(next_target); } next_block(); } } std::string func_hash; if (!result.data.empty()) { sha1_context ctx; u8 output[20]{}; sha1_starts(&ctx); sha1_update(&ctx, reinterpret_cast<const u8*>(result.data.data()), result.data.size() * 4); sha1_finish(&ctx, output); fmt::append(func_hash, "%s", fmt::base57(output)); } for (const auto& [pc_commited, pattern] : atomic16_all) { if (!pattern.active) { continue; } if (getllar_starts.contains(pattern.lsa_pc) && getllar_starts[pattern.lsa_pc]) { continue; } auto& stats = g_fxo->get<putllc16_statistics_t>(); had_putllc_evaluation = true; if (!pattern.ls_write) { spu_log.success("PUTLLC0 Pattern Detected! (put_pc=0x%x, %s) (putllc0=%d, putllc16+0=%d, all=%d)", pattern.put_pc, func_hash, ++stats.nowrite, ++stats.single, +stats.all); add_pattern(false, inst_attr::putllc0, pattern.put_pc - lsa); continue; } union putllc16_info { u32 data; bf_t<u32, 30, 2> type; bf_t<u32, 29, 1> runtime16_select; bf_t<u32, 28, 1> no_notify; bf_t<u32, 18, 8> reg; bf_t<u32, 0, 18> off18; bf_t<u32, 0, 8> reg2; } value{}; enum : u32 { v_const = 0, v_relative = 1, v_reg_offs = 2, v_reg2 = 3, }; for (auto it = infos.lower_bound(utils::sub_saturate<u32>(pattern.put_pc, 512)); it != infos.end() && it->first < pattern.put_pc + 512; it++) { for (auto& state : it->second->end_reg_state) { if (state.is_const() && (state.value & -0x20) == (CELL_SYNC_ERROR_ALIGN & -0x20)) { // Do not notify if it is a cellSync function value.no_notify = 1; spu_log.success("Detected cellSync function at 0x%x, disabling reservation notification.", pattern.put_pc); break; } } if (value.no_notify) { break; } } value.runtime16_select = pattern.select_16_or_0_at_runtime; value.reg = s_reg_max; if (pattern.ls.is_const()) { ensure(pattern.reg == s_reg_max && pattern.reg2 == s_reg_max && pattern.ls_offs.is_const(), "Unexpected register usage"); value.type = v_const; value.off18 = pattern.ls.value & SPU_LS_MASK_1; } else if (pattern.ls == start_program_count) { ensure(pattern.ls_offs.is_const(), "Unexpected register2 usage"); value.type = v_relative; value.off18 = pattern.ls_offs.value & SPU_LS_MASK_1; } else if (pattern.ls_offs.is_const()) { ensure(pattern.reg != s_reg_max, "Not found register usage"); value.type = v_reg_offs; value.reg = pattern.reg; value.off18 = pattern.ls_offs.value; } else { ensure(pattern.reg != s_reg_max, "Not found register usage"); ensure(pattern.reg2 != s_reg_max, "Not found register2 usage"); value.type = v_reg2; value.reg = pattern.reg; value.reg2 = pattern.reg2; } if (g_cfg.core.spu_accurate_reservations) { // Because enabling it is a hack, as it turns out continue; } add_pattern(false, inst_attr::putllc16, pattern.put_pc - result.entry_point, value.data); spu_log.success("PUTLLC16 Pattern Detected! (mem_count=%d, put_pc=0x%x, pc_rel=%d, offset=0x%x, const=%u, two_regs=%d, reg=%u, runtime=%d, 0x%x-%s) (putllc0=%d, putllc16+0=%d, all=%d)" , pattern.mem_count, pattern.put_pc, value.type == v_relative, value.off18, value.type == v_const, value.type == v_reg2, value.reg, value.runtime16_select, entry_point, func_hash, +stats.nowrite, ++stats.single, +stats.all); } for (const auto& [read_pc, pattern] : rchcnt_loop_all) { if (pattern.failed || pattern.read_pc == SPU_LS_SIZE) { continue; } if (pattern.active) { spu_log.error("Channel loop error! (get_pc=0x%x, 0x%x-%s)", read_pc, entry_point, func_hash); continue; } if (inst_attr attr = m_inst_attrs[(read_pc - entry_point) / 4]; attr == inst_attr::none) { add_pattern(false, inst_attr::rchcnt_loop, read_pc - result.entry_point); spu_log.error("Channel Loop Pattern Detected! Report to developers! (read_pc=0x%x, branch_pc=0x%x, branch_target=0x%x, 0x%x-%s)", read_pc, pattern.branch_pc, pattern.branch_target, entry_point, func_hash); } } if (likely_putllc_loop && !had_putllc_evaluation) { spu_log.notice("Likely missed PUTLLC16 patterns. (entry=0x%x)", entry_point); } if (result.data.empty()) { // Blocks starting from 0x0 or invalid instruction won't be compiled, may need special interpreter fallback } return result; } void spu_recompiler_base::dump(const spu_program& result, std::string& out) { SPUDisAsm dis_asm(cpu_disasm_mode::dump, reinterpret_cast<const u8*>(result.data.data()), result.lower_bound); std::string hash; if (!result.data.empty()) { sha1_context ctx; u8 output[20]; sha1_starts(&ctx); sha1_update(&ctx, reinterpret_cast<const u8*>(result.data.data()), result.data.size() * 4); sha1_finish(&ctx, output); fmt::append(hash, "%s", fmt::base57(output)); } else { hash = "N/A"; } fmt::append(out, "========== SPU BLOCK 0x%05x (size %u, %s) ==========\n\n", result.entry_point, result.data.size(), hash); for (auto& bb : m_bbs) { for (u32 pos = bb.first, end = bb.first + bb.second.size * 4; pos < end; pos += 4) { dis_asm.disasm(pos); if (!dis_asm.last_opcode.ends_with('\n')) { dis_asm.last_opcode += '\n'; } fmt::append(out, ">%s", dis_asm.last_opcode); } out += '\n'; if (m_block_info[bb.first / 4]) { fmt::append(out, "A: [0x%05x] %s\n", bb.first, m_entry_info[bb.first / 4] ? (m_ret_info[bb.first / 4] ? "Chunk" : "Entry") : "Block"); fmt::append(out, "\tF: 0x%05x\n", bb.second.func); for (u32 pred : bb.second.preds) { fmt::append(out, "\t<- 0x%05x\n", pred); } for (u32 target : bb.second.targets) { fmt::append(out, "\t-> 0x%05x%s\n", target, m_bbs.count(target) ? "" : " (null)"); } } else { fmt::append(out, "A: [0x%05x] ?\n", bb.first); } out += '\n'; } for (auto& f : m_funcs) { fmt::append(out, "F: [0x%05x]%s\n", f.first, f.second.good ? " (good)" : " (bad)"); fmt::append(out, "\tN: 0x%05x\n", f.second.size * 4 + f.first); for (u32 call : f.second.calls) { fmt::append(out, "\t>> 0x%05x%s\n", call, m_funcs.count(call) ? "" : " (null)"); } } out += '\n'; } struct spu_llvm_worker { lf_queue<std::pair<u64, const spu_program*>> registered; void operator()() { // SPU LLVM Recompiler instance std::unique_ptr<spu_recompiler_base> compiler; // Fake LS std::vector<be_t<u32>> ls; bool set_relax_flag = false; for (auto slice = registered.pop_all();; [&] { if (slice) { slice.pop_front(); } if (slice || thread_ctrl::state() == thread_state::aborting) { return; } if (set_relax_flag) { spu_thread::g_spu_work_count--; set_relax_flag = false; } thread_ctrl::wait_on(utils::bless<atomic_t<u32>>(&registered)[1], 0); slice = registered.pop_all(); }()) { auto* prog = slice.get(); if (thread_ctrl::state() == thread_state::aborting) { break; } if (!prog) { continue; } if (!prog->second) { break; } if (!compiler) { // Postponed initialization compiler = spu_recompiler_base::make_llvm_recompiler(); compiler->init(); ls.resize(SPU_LS_SIZE / sizeof(be_t<u32>)); } if (!set_relax_flag) { spu_thread::g_spu_work_count++; set_relax_flag = true; } const auto& func = *prog->second; // Get data start const u32 start = func.lower_bound; const u32 size0 = ::size32(func.data); // Initialize LS with function data only for (u32 i = 0, pos = start; i < size0; i++, pos += 4) { ls[pos / 4] = std::bit_cast<be_t<u32>>(func.data[i]); } // Call analyser spu_program func2 = compiler->analyse(ls.data(), func.entry_point); if (func2 != func) { spu_log.error("[0x%05x] SPU Analyser failed, %u vs %u", func2.entry_point, func2.data.size(), size0); } else if (const auto target = compiler->compile(std::move(func2))) { // Redirect old function (TODO: patch in multiple places) const s64 rel = reinterpret_cast<u64>(target) - prog->first - 5; union { u8 bytes[8]; u64 result; }; bytes[0] = 0xe9; // jmp rel32 std::memcpy(bytes + 1, &rel, 4); bytes[5] = 0x90; bytes[6] = 0x90; bytes[7] = 0x90; atomic_storage<u64>::release(*reinterpret_cast<u64*>(prog->first), result); } else { spu_log.fatal("[0x%05x] Compilation failed.", func.entry_point); break; } // Clear fake LS std::memset(ls.data() + start / 4, 0, 4 * (size0 - 1)); } if (set_relax_flag) { spu_thread::g_spu_work_count--; set_relax_flag = false; } } }; // SPU LLVM recompiler thread context struct spu_llvm { // Workload lf_queue<std::pair<const u64, spu_item*>> registered; atomic_ptr<named_thread_group<spu_llvm_worker>> m_workers; spu_llvm() { // Dependency g_fxo->init<spu_cache>(); } void operator()() { if (g_cfg.core.spu_decoder != spu_decoder_type::llvm) { return; } while (!registered && thread_ctrl::state() != thread_state::aborting) { // Wait for the first SPU block before launching any thread thread_ctrl::wait_on(utils::bless<atomic_t<u32>>(&registered)[1], 0); } if (thread_ctrl::state() == thread_state::aborting) { return; } // To compile (hash -> item) std::unordered_multimap<u64, spu_item*, value_hash<u64>> enqueued; // Mini-profiler (hash -> number of occurrences) std::unordered_map<u64, atomic_t<u64>, value_hash<u64>> samples; // For synchronization with profiler thread stx::init_mutex prof_mutex; named_thread profiler("SPU LLVM Profiler"sv, [&]() { while (thread_ctrl::state() != thread_state::aborting) { { // Lock if enabled const auto lock = prof_mutex.access(); if (!lock) { // Wait when the profiler is disabled prof_mutex.wait_for_initialized(); continue; } // Collect profiling samples idm::select<named_thread<spu_thread>>([&](u32 /*id*/, spu_thread& spu) { const u64 name = atomic_storage<u64>::load(spu.block_hash); if (auto state = +spu.state; !::is_paused(state) && !::is_stopped(state) && cpu_flag::wait - state) { const auto found = std::as_const(samples).find(name); if (found != std::as_const(samples).end()) { const_cast<atomic_t<u64>&>(found->second)++; } } }); } // Sleep for a short period if enabled thread_ctrl::wait_for(20, false); } }); u32 worker_count = 1; if (uint hc = utils::get_thread_count(); hc >= 12) { worker_count = hc - 12 + 3; } else if (hc >= 6) { worker_count = 2; } u32 worker_index = 0; u32 notify_compile_count = 0; u32 compile_pending = 0; std::vector<u8> notify_compile(worker_count); m_workers = make_single<named_thread_group<spu_llvm_worker>>("SPUW.", worker_count); auto workers_ptr = m_workers.load(); auto& workers = *workers_ptr; while (thread_ctrl::state() != thread_state::aborting) { for (const auto& pair : registered.pop_all()) { enqueued.emplace(pair); // Interrupt and kick profiler thread const auto lock = prof_mutex.init_always([&]{}); // Register new blocks to collect samples samples.emplace(pair.first, 0); } if (enqueued.empty()) { // Send pending notifications if (notify_compile_count) { for (usz i = 0; i < worker_count; i++) { if (notify_compile[i]) { (workers.begin() + i)->registered.notify(); } } } // Interrupt profiler thread and put it to sleep static_cast<void>(prof_mutex.reset()); thread_ctrl::wait_on(utils::bless<atomic_t<u32>>(&registered)[1], 0); std::fill(notify_compile.begin(), notify_compile.end(), 0); // Reset notification flags notify_compile_count = 0; compile_pending = 0; continue; } // Find the most used enqueued item u64 sample_max = 0; auto found_it = enqueued.begin(); for (auto it = enqueued.begin(), end = enqueued.end(); it != end; ++it) { const u64 cur = ::at32(std::as_const(samples), it->first); if (cur > sample_max) { sample_max = cur; found_it = it; } } // Start compiling const spu_program& func = found_it->second->data; // Old function pointer (pre-recompiled) const spu_function_t _old = found_it->second->compiled; // Remove item from the queue enqueued.erase(found_it); // Prefer using an inactive thread for (usz i = 0; i < worker_count && !!(workers.begin() + (worker_index % worker_count))->registered; i++) { worker_index++; } // Push the workload const bool notify = (workers.begin() + (worker_index % worker_count))->registered.template push<false>(reinterpret_cast<u64>(_old), &func); if (notify && !notify_compile[worker_index % worker_count]) { notify_compile[worker_index % worker_count] = 1; notify_compile_count++; } compile_pending++; // Notify all before queue runs out if there is considerable excess // Optimized that: if there are many workers, it acts soon // If there are only a few workers, it postpones notifications until there is some more workload if (notify_compile_count && std::min<u32>(7, utils::aligned_div<u32>(worker_count * 2, 3) + 2) <= compile_pending) { for (usz i = 0; i < worker_count; i++) { if (notify_compile[i]) { (workers.begin() + i)->registered.notify(); } } std::fill(notify_compile.begin(), notify_compile.end(), 0); // Reset notification flags notify_compile_count = 0; compile_pending = 0; } worker_index++; } static_cast<void>(prof_mutex.init_always([&]{ samples.clear(); })); m_workers.reset(); for (u32 i = 0; i < worker_count; i++) { (workers.begin() + i)->operator=(thread_state::aborting); } } spu_llvm& operator=(thread_state) { if (const auto workers = m_workers.load()) { for (u32 i = 0; i < workers->size(); i++) { (workers->begin() + i)->operator=(thread_state::aborting); } } return *this; } static constexpr auto thread_name = "SPU LLVM"sv; }; using spu_llvm_thread = named_thread<spu_llvm>; struct spu_fast : public spu_recompiler_base { virtual void init() override { if (!m_spurt) { m_spurt = &g_fxo->get<spu_runtime>(); } } virtual spu_function_t compile(spu_program&& _func) override { #ifndef ARCH_X64 fmt::throw_exception("Fast LLVM recompiler is unimplemented for architectures other than X86-64"); #endif const auto add_loc = m_spurt->add_empty(std::move(_func)); if (!add_loc) { return nullptr; } if (add_loc->compiled) { return add_loc->compiled; } const spu_program& func = add_loc->data; if (g_cfg.core.spu_debug && !add_loc->logged.exchange(1)) { std::string log; this->dump(func, log); fs::write_file(m_spurt->get_cache_path() + "spu.log", fs::create + fs::write + fs::append, log); } // Allocate executable area with necessary size const auto result = jit_runtime::alloc(22 + 1 + 9 + ::size32(func.data) * (16 + 16) + 36 + 47, 16); if (!result) { return nullptr; } m_pos = func.lower_bound; m_size = ::size32(func.data) * 4; { sha1_context ctx; u8 output[20]; sha1_starts(&ctx); sha1_update(&ctx, reinterpret_cast<const u8*>(func.data.data()), func.data.size() * 4); sha1_finish(&ctx, output); be_t<u64> hash_start; std::memcpy(&hash_start, output, sizeof(hash_start)); m_hash_start = hash_start; } u8* raw = result; // 8-byte intruction for patching (long NOP) *raw++ = 0x0f; *raw++ = 0x1f; *raw++ = 0x84; *raw++ = 0; *raw++ = 0; *raw++ = 0; *raw++ = 0; *raw++ = 0; // mov rax, m_hash_start *raw++ = 0x48; *raw++ = 0xb8; std::memcpy(raw, &m_hash_start, sizeof(m_hash_start)); raw += 8; // Update block_hash: mov [r13 + spu_thread::m_block_hash], rax *raw++ = 0x49; *raw++ = 0x89; *raw++ = 0x45; *raw++ = ::narrow<s8>(::offset32(&spu_thread::block_hash)); // Load PC: mov eax, [r13 + spu_thread::pc] *raw++ = 0x41; *raw++ = 0x8b; *raw++ = 0x45; *raw++ = ::narrow<s8>(::offset32(&spu_thread::pc)); // Get LS address starting from PC: lea rcx, [rbp + rax] *raw++ = 0x48; *raw++ = 0x8d; *raw++ = 0x4c; *raw++ = 0x05; *raw++ = 0x00; // Verification (slow) for (u32 i = 0; i < func.data.size(); i++) { if (!func.data[i]) { continue; } // cmp dword ptr [rcx + off], opc *raw++ = 0x81; *raw++ = 0xb9; const u32 off = i * 4; const u32 opc = func.data[i]; std::memcpy(raw + 0, &off, 4); std::memcpy(raw + 4, &opc, 4); raw += 8; // jne tr_dispatch const s64 rel = reinterpret_cast<u64>(spu_runtime::tr_dispatch) - reinterpret_cast<u64>(raw) - 6; *raw++ = 0x0f; *raw++ = 0x85; std::memcpy(raw + 0, &rel, 4); raw += 4; } // trap //*raw++ = 0xcc; // Secondary prologue: sub rsp,0x28 *raw++ = 0x48; *raw++ = 0x83; *raw++ = 0xec; *raw++ = 0x28; // Fix args: xchg r13,rbp *raw++ = 0x49; *raw++ = 0x87; *raw++ = 0xed; // mov r12d, eax *raw++ = 0x41; *raw++ = 0x89; *raw++ = 0xc4; // mov esi, 0x7f0 *raw++ = 0xbe; *raw++ = 0xf0; *raw++ = 0x07; *raw++ = 0x00; *raw++ = 0x00; // lea rdi, [rbp + spu_thread::gpr] *raw++ = 0x48; *raw++ = 0x8d; *raw++ = 0x7d; *raw++ = ::narrow<s8>(::offset32(&spu_thread::gpr)); // Save base pc: mov [rbp + spu_thread::base_pc], eax *raw++ = 0x89; *raw++ = 0x45; *raw++ = ::narrow<s8>(::offset32(&spu_thread::base_pc)); // inc block_counter *raw++ = 0x48; *raw++ = 0xff; *raw++ = 0x85; const u32 blc_off = ::offset32(&spu_thread::block_counter); std::memcpy(raw, &blc_off, 4); raw += 4; // lea r14, [local epilogue] *raw++ = 0x4c; *raw++ = 0x8d; *raw++ = 0x35; const u32 epi_off = ::size32(func.data) * 16; std::memcpy(raw, &epi_off, 4); raw += 4; // Instructions (each instruction occupies fixed number of bytes) for (u32 i = 0; i < func.data.size(); i++) { const u32 pos = m_pos + i * 4; if (!func.data[i]) { // Save pc: mov [rbp + spu_thread::pc], r12d *raw++ = 0x44; *raw++ = 0x89; *raw++ = 0x65; *raw++ = ::narrow<s8>(::offset32(&spu_thread::pc)); // Epilogue: add rsp,0x28 *raw++ = 0x48; *raw++ = 0x83; *raw++ = 0xc4; *raw++ = 0x28; // ret (TODO) *raw++ = 0xc3; std::memset(raw, 0xcc, 16 - 9); raw += 16 - 9; continue; } // Fix endianness const spu_opcode_t op{std::bit_cast<be_t<u32>>(func.data[i])}; switch (auto type = g_spu_itype.decode(op.opcode)) { case spu_itype::BRZ: case spu_itype::BRHZ: case spu_itype::BRNZ: case spu_itype::BRHNZ: { const u32 target = spu_branch_target(pos, op.i16); if (0 && target >= m_pos && target < m_pos + m_size) { *raw++ = type == spu_itype::BRHZ || type == spu_itype::BRHNZ ? 0x66 : 0x90; *raw++ = 0x83; *raw++ = 0xbd; const u32 off = ::offset32(&spu_thread::gpr, op.rt) + 12; std::memcpy(raw, &off, 4); raw += 4; *raw++ = 0x00; *raw++ = 0x0f; *raw++ = type == spu_itype::BRZ || type == spu_itype::BRHZ ? 0x84 : 0x85; const u32 dif = (target - (pos + 4)) / 4 * 16 + 2; std::memcpy(raw, &dif, 4); raw += 4; *raw++ = 0x66; *raw++ = 0x90; break; } [[fallthrough]]; } default: { // Ballast: mov r15d, pos *raw++ = 0x41; *raw++ = 0xbf; std::memcpy(raw, &pos, 4); raw += 4; // mov ebx, opc *raw++ = 0xbb; std::memcpy(raw, &op, 4); raw += 4; // call spu_* (specially built interpreter function) const s64 rel = spu_runtime::g_interpreter_table[static_cast<usz>(type)] - reinterpret_cast<u64>(raw) - 5; *raw++ = 0xe8; std::memcpy(raw, &rel, 4); raw += 4; break; } } } // Local dispatcher/epilogue: fix stack after branch instruction, then dispatch or return // add rsp, 8 *raw++ = 0x48; *raw++ = 0x83; *raw++ = 0xc4; *raw++ = 0x08; // and rsp, -16 *raw++ = 0x48; *raw++ = 0x83; *raw++ = 0xe4; *raw++ = 0xf0; // lea rax, [r12 - size] *raw++ = 0x49; *raw++ = 0x8d; *raw++ = 0x84; *raw++ = 0x24; const u32 msz = 0u - m_size; std::memcpy(raw, &msz, 4); raw += 4; // sub eax, [rbp + spu_thread::base_pc] *raw++ = 0x2b; *raw++ = 0x45; *raw++ = ::narrow<s8>(::offset32(&spu_thread::base_pc)); // cmp eax, (0 - size) *raw++ = 0x3d; std::memcpy(raw, &msz, 4); raw += 4; // jb epilogue *raw++ = 0x72; *raw++ = +12; // movsxd rax, eax *raw++ = 0x48; *raw++ = 0x63; *raw++ = 0xc0; // shl rax, 2 *raw++ = 0x48; *raw++ = 0xc1; *raw++ = 0xe0; *raw++ = 0x02; // add rax, r14 *raw++ = 0x4c; *raw++ = 0x01; *raw++ = 0xf0; // jmp rax *raw++ = 0xff; *raw++ = 0xe0; // Save pc: mov [rbp + spu_thread::pc], r12d *raw++ = 0x44; *raw++ = 0x89; *raw++ = 0x65; *raw++ = ::narrow<s8>(::offset32(&spu_thread::pc)); // Epilogue: add rsp,0x28 ; ret *raw++ = 0x48; *raw++ = 0x83; *raw++ = 0xc4; *raw++ = 0x28; *raw++ = 0xc3; const auto fn = reinterpret_cast<spu_function_t>(result); // Install pointer carefully const bool added = !add_loc->compiled && add_loc->compiled.compare_and_swap_test(nullptr, fn); // Check hash against allowed bounds const bool inverse_bounds = g_cfg.core.spu_llvm_lower_bound > g_cfg.core.spu_llvm_upper_bound; if ((!inverse_bounds && (m_hash_start < g_cfg.core.spu_llvm_lower_bound || m_hash_start > g_cfg.core.spu_llvm_upper_bound)) || (inverse_bounds && (m_hash_start < g_cfg.core.spu_llvm_lower_bound && m_hash_start > g_cfg.core.spu_llvm_upper_bound))) { spu_log.error("[Debug] Skipped function %s", fmt::base57(be_t<u64>{m_hash_start})); } else if (added) { // Send work to LLVM compiler thread g_fxo->get<spu_llvm_thread>().registered.push(m_hash_start, add_loc); } // Rebuild trampoline if necessary if (!m_spurt->rebuild_ubertrampoline(func.data[0])) { return nullptr; } if (added) { add_loc->compiled.notify_all(); } return fn; } }; std::unique_ptr<spu_recompiler_base> spu_recompiler_base::make_fast_llvm_recompiler() { return std::make_unique<spu_fast>(); } std::array<reg_state_t, s_reg_max>& block_reg_info::evaluate_start_state(const std::map<u32, std::unique_ptr<block_reg_info>>& map, bool extensive_evaluation) { if (!has_true_state) { std::array<reg_state_t, s_reg_max> temp; std::vector<u32> been_there; struct iterator_info { u32 block_pc = SPU_LS_SIZE; struct state_t { u32 block_pc = SPU_LS_SIZE; std::array<reg_state_t, s_reg_max> reg_state; bool disconnected = false; bool state_written = false; }; std::vector<state_t> state_prev; usz completed = 0; usz parent_iterator_index = umax; usz parent_state_index = umax; }; std::vector<iterator_info> info_queue; iterator_info first_entry{pc, {}, 0, umax, umax}; info_queue.emplace_back(std::move(first_entry)); // info_queue may grow for (usz qi = 0; qi < info_queue.size();) { const auto it = std::addressof(info_queue[qi]); ensure(qi == info_queue.size() - 1); auto& cur_node = ::at32(map, it->block_pc); ensure(it->parent_iterator_index == qi - 1); if (cur_node->has_true_state) { // Evaluted somewhen before if (qi != 0) { ensure(!been_there.empty()); been_there.pop_back(); info_queue.pop_back(); qi--; continue; } else { break; } } if (it->state_prev.empty()) { // Build the list here to avoid code duplication const usz real_size = cur_node->prev_nodes.size(); if (real_size) { it->state_prev.resize(real_size); for (usz i = 0; i < real_size; i++) { it->state_prev[i].block_pc = cur_node->prev_nodes[i].prev_pc; } } } const usz next_entry_idx = it->completed; if (next_entry_idx == it->state_prev.size()) { // Result merge from all predecessors // Flag to mark the state as resolved bool is_all_resolved = true; bool has_past_state = false; for (usz bi = 0; bi < it->state_prev.size(); bi++) { if (it->state_prev[bi].disconnected) { is_all_resolved = false; continue; } has_past_state = true; const u32 node_pc = it->state_prev[bi].block_pc; const auto& node = ::at32(map, node_pc); // Check if the node is resolved if (!node->has_true_state) { // Assume this block cannot be resolved at the moment is_all_resolved = false; break; } } if (qi == 0) { // TODO: First block is always resolved here, but this logic can be improved to detect more cases of opportunistic resolving is_all_resolved = true; } auto& res_state = is_all_resolved ? cur_node->start_reg_state : temp; for (usz bi = 0; bi < it->state_prev.size(); bi++) { if (it->state_prev[bi].disconnected) { // Loop state, even if not ignored for a million times the result would still be the same // So ignore it continue; } std::array<reg_state_t, s_reg_max>* arg_state{}; const auto& node = ::at32(map, it->state_prev[bi].block_pc); if (node->has_true_state) { // State is resolved, use the entry's state arg_state = std::addressof(node->end_reg_state); } else { // Use accumulated state from one path of code history arg_state = std::addressof(it->state_prev[bi].reg_state); ensure(it->state_prev[bi].state_written); } if (bi == 0) { res_state = *arg_state; } else { merge(res_state, res_state, *arg_state, it->block_pc); } } std::array<reg_state_t, s_reg_max>* result_storage{}; if (is_all_resolved) { // Complete state of this block result_storage = std::addressof(cur_node->end_reg_state); cur_node->has_true_state = true; } else { // Patch incomplete state into saved state entry of parent block ensure(it->parent_iterator_index != qi); ensure(it->parent_iterator_index != umax); auto& state_vec = ::at32(info_queue, it->parent_iterator_index).state_prev; auto& state = ::at32(state_vec, it->parent_state_index); ensure(state.block_pc == it->block_pc); result_storage = std::addressof(state.reg_state); ensure(!state.state_written); state.state_written = true; } // Stack the newer state on top of the old (if exists) if (has_past_state) { build_on_top_of(*result_storage, cur_node->addend_reg_state, res_state); } else { *result_storage = cur_node->addend_reg_state; } if (qi != 0) { ensure(!been_there.empty()); been_there.pop_back(); info_queue.pop_back(); qi--; } else { ensure(cur_node->has_true_state); break; } } else { const u32 prev_pc = cur_node->prev_nodes[it->completed++].prev_pc; const auto& prev_node = ::at32(map, prev_pc); // Queue for resolving if needed if (!prev_node->has_true_state) { // TODO: The true maximum occurence count need to depend on the amount of branching-outs passed through // Currently allow 2 for short-term code and 1 for long-term code const bool loop_terminator_detected = std::count(been_there.begin(), been_there.end(), prev_pc) >= (qi < 20 ? 2u : 1u); const bool avoid_extensive_analysis = qi >= (extensive_evaluation ? 22 : 16); if (!loop_terminator_detected && !avoid_extensive_analysis) { info_queue.emplace_back(iterator_info{prev_pc, {}, 0, qi, it->completed - 1}); been_there.push_back(prev_pc); qi++; } else { auto& state = ::at32(it->state_prev, it->completed - 1); // Loop state, even if not ignored for a million times the result would be the same // This is similar to multiplying zero a million times // This is true at least for now, that any register difference is considered an unknown state change // So ignore it ensure(state.block_pc == prev_pc); ensure(!state.disconnected); state.disconnected = true; // Repeat // qi += 0; } } else { // Repeat // qi += 0; } } } ensure(has_true_state); } walkby_state = start_reg_state; return walkby_state; } void spu_recompiler_base::add_pattern(bool fill_all, inst_attr attr, u32 start, u32 end) { if (end == umax) { end = start; } m_patterns[start] = pattern_info{utils::address_range::start_end(start, end)}; for (u32 i = start; i <= (fill_all ? end : start); i += 4) { m_inst_attrs[i / 4] = attr; } } extern std::string format_spu_func_info(u32 addr, cpu_thread* spu) { spu_thread* _spu = static_cast<spu_thread*>(spu); std::unique_ptr<spu_recompiler_base> compiler = spu_recompiler_base::make_asmjit_recompiler(); compiler->init(); auto func = compiler->analyse(reinterpret_cast<const be_t<u32>*>(_spu->ls), addr); std::string info; { sha1_context ctx; u8 output[20]; sha1_starts(&ctx); sha1_update(&ctx, reinterpret_cast<const u8*>(func.data.data()), func.data.size() * 4); sha1_finish(&ctx, output); fmt::append(info, "size=%d, end=0x%x, hash=%s", func.data.size(), addr + func.data.size() * 4, fmt::base57(output)); } return info; }
195,798
C++
.cpp
6,994
23.658422
236
0.606159
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,182
PPUThread.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/PPUThread.cpp
#include "stdafx.h" #include "Utilities/JIT.h" #include "Utilities/StrUtil.h" #include "util/serialization.hpp" #include "Crypto/sha1.h" #include "Crypto/unself.h" #include "Loader/ELF.h" #include "Loader/mself.hpp" #include "Emu/localized_string.h" #include "Emu/perf_meter.hpp" #include "Emu/Memory/vm_reservation.h" #include "Emu/Memory/vm_locking.h" #include "Emu/RSX/Core/RSXReservationLock.hpp" #include "Emu/VFS.h" #include "Emu/vfs_config.h" #include "Emu/system_progress.hpp" #include "Emu/system_utils.hpp" #include "PPUThread.h" #include "PPUInterpreter.h" #include "PPUAnalyser.h" #include "PPUModule.h" #include "PPUDisAsm.h" #include "SPURecompiler.h" #include "timers.hpp" #include "lv2/sys_sync.h" #include "lv2/sys_prx.h" #include "lv2/sys_overlay.h" #include "lv2/sys_process.h" #include "lv2/sys_spu.h" #ifdef LLVM_AVAILABLE #ifdef _MSC_VER #pragma warning(push, 0) #else #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wold-style-cast" #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wmissing-noreturn" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #endif #include <llvm/IR/Verifier.h> #include <llvm/Transforms/Utils/BasicBlockUtils.h> #if LLVM_VERSION_MAJOR < 17 #include <llvm/Support/FormattedStream.h> #include <llvm/TargetParser/Host.h> #include <llvm/Object/ObjectFile.h> #include <llvm/IR/InstIterator.h> #include <llvm/IR/LegacyPassManager.h> #include <llvm/Transforms/Scalar.h> #else #include <llvm/Analysis/CGSCCPassManager.h> #include <llvm/Analysis/LoopAnalysisManager.h> #include <llvm/Passes/PassBuilder.h> #include <llvm/Transforms/Scalar/EarlyCSE.h> #endif #ifdef _MSC_VER #pragma warning(pop) #else #pragma GCC diagnostic pop #endif #include "PPUTranslator.h" #endif #include <cfenv> #include <cctype> #include <span> #include <optional> #include "util/asm.hpp" #include "util/vm.hpp" #include "util/v128.hpp" #include "util/simd.hpp" #include "util/sysinfo.hpp" #ifdef __APPLE__ #include <libkern/OSCacheControl.h> #endif extern atomic_t<u64> g_watchdog_hold_ctr; // Should be of the same type using spu_rdata_t = decltype(ppu_thread::rdata); extern void mov_rdata(spu_rdata_t& _dst, const spu_rdata_t& _src); extern void mov_rdata_nt(spu_rdata_t& _dst, const spu_rdata_t& _src); extern bool cmp_rdata(const spu_rdata_t& _lhs, const spu_rdata_t& _rhs); // Verify AVX availability for TSX transactions static const bool s_tsx_avx = utils::has_avx(); template <> void fmt_class_string<ppu_join_status>::format(std::string& out, u64 arg) { format_enum(out, arg, [](ppu_join_status js) { switch (js) { case ppu_join_status::joinable: return "none"; case ppu_join_status::detached: return "detached"; case ppu_join_status::zombie: return "zombie"; case ppu_join_status::exited: return "exited"; case ppu_join_status::max: break; } return unknown; }); } template <> void fmt_class_string<ppu_thread_status>::format(std::string& out, u64 arg) { format_enum(out, arg, [](ppu_thread_status s) { switch (s) { case PPU_THREAD_STATUS_IDLE: return "IDLE"; case PPU_THREAD_STATUS_RUNNABLE: return "RUN"; case PPU_THREAD_STATUS_ONPROC: return "ONPROC"; case PPU_THREAD_STATUS_SLEEP: return "SLEEP"; case PPU_THREAD_STATUS_STOP: return "STOP"; case PPU_THREAD_STATUS_ZOMBIE: return "Zombie"; case PPU_THREAD_STATUS_DELETED: return "Deleted"; case PPU_THREAD_STATUS_UNKNOWN: break; } return unknown; }); } template <> void fmt_class_string<typename ppu_thread::call_history_t>::format(std::string& out, u64 arg) { const auto& history = get_object(arg); PPUDisAsm dis_asm(cpu_disasm_mode::normal, vm::g_sudo_addr); for (u64 count = 0, idx = history.index - 1; idx != umax && count < history.data.size(); count++, idx--) { const u32 pc = history.data[idx % history.data.size()]; dis_asm.disasm(pc); fmt::append(out, "\n(%u) 0x%08x: %s", count, pc, dis_asm.last_opcode); } } template <> void fmt_class_string<typename ppu_thread::syscall_history_t>::format(std::string& out, u64 arg) { const auto& history = get_object(arg); for (u64 count = 0, idx = history.index - 1; idx != umax && count < history.data.size(); count++, idx--) { const auto& entry = history.data[idx % history.data.size()]; fmt::append(out, "\n(%u) 0x%08x: %s, 0x%x, r3=0x%x, r4=0x%x, r5=0x%x, r6=0x%x", count, entry.cia, entry.func_name, entry.error, entry.args[0], entry.args[1], entry.args[2], entry.args[3]); } } extern const ppu_decoder<ppu_itype> g_ppu_itype{}; extern const ppu_decoder<ppu_iname> g_ppu_iname{}; template <> bool serialize<ppu_thread::cr_bits>(utils::serial& ar, typename ppu_thread::cr_bits& o) { if (ar.is_writing()) { ar(o.pack()); } else { o.unpack(ar); } return true; } extern void ppu_initialize(); extern void ppu_finalize(const ppu_module& info, bool force_mem_release = false); extern bool ppu_initialize(const ppu_module& info, bool check_only = false, u64 file_size = 0); static void ppu_initialize2(class jit_compiler& jit, const ppu_module& module_part, const std::string& cache_path, const std::string& obj_name, const ppu_module& whole_module); extern bool ppu_load_exec(const ppu_exec_object&, bool virtual_load, const std::string&, utils::serial* = nullptr); extern std::pair<std::shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_exec_object&, bool virtual_load, const std::string& path, s64 file_offset, utils::serial* = nullptr); extern void ppu_unload_prx(const lv2_prx&); extern std::shared_ptr<lv2_prx> ppu_load_prx(const ppu_prx_object&, bool virtual_load, const std::string&, s64 file_offset, utils::serial* = nullptr); extern void ppu_execute_syscall(ppu_thread& ppu, u64 code); static void ppu_break(ppu_thread&, ppu_opcode_t, be_t<u32>*, ppu_intrp_func*); extern void do_cell_atomic_128_store(u32 addr, const void* to_write); const auto ppu_gateway = build_function_asm<void(*)(ppu_thread*)>("ppu_gateway", [](native_asm& c, auto& args) { // Gateway for PPU, converts from native to GHC calling convention, also saves RSP value for escape using namespace asmjit; #if defined(ARCH_X64) #ifdef _WIN32 c.push(x86::r15); c.push(x86::r14); c.push(x86::r13); c.push(x86::r12); c.push(x86::rsi); c.push(x86::rdi); c.push(x86::rbp); c.push(x86::rbx); c.sub(x86::rsp, 0xa8); c.movaps(x86::oword_ptr(x86::rsp, 0x90), x86::xmm15); c.movaps(x86::oword_ptr(x86::rsp, 0x80), x86::xmm14); c.movaps(x86::oword_ptr(x86::rsp, 0x70), x86::xmm13); c.movaps(x86::oword_ptr(x86::rsp, 0x60), x86::xmm12); c.movaps(x86::oword_ptr(x86::rsp, 0x50), x86::xmm11); c.movaps(x86::oword_ptr(x86::rsp, 0x40), x86::xmm10); c.movaps(x86::oword_ptr(x86::rsp, 0x30), x86::xmm9); c.movaps(x86::oword_ptr(x86::rsp, 0x20), x86::xmm8); c.movaps(x86::oword_ptr(x86::rsp, 0x10), x86::xmm7); c.movaps(x86::oword_ptr(x86::rsp, 0), x86::xmm6); #else c.push(x86::rbp); c.push(x86::r15); c.push(x86::r14); c.push(x86::r13); c.push(x86::r12); c.push(x86::rbx); c.push(x86::rax); #endif // Save native stack pointer for longjmp emulation c.mov(x86::qword_ptr(args[0], ::offset32(&ppu_thread::hv_ctx, &rpcs3::hypervisor_context_t::regs)), x86::rsp); // Initialize args c.mov(x86::r13, x86::qword_ptr(reinterpret_cast<u64>(&vm::g_exec_addr))); c.mov(x86::rbp, args[0]); c.mov(x86::edx, x86::dword_ptr(x86::rbp, ::offset32(&ppu_thread::cia))); // Load PC c.mov(x86::rax, x86::qword_ptr(x86::r13, x86::edx, 1, 0)); // Load call target c.mov(x86::rdx, x86::rax); c.shl(x86::rax, 16); c.shr(x86::rax, 16); c.shr(x86::rdx, 48); c.shl(x86::edx, 13); c.mov(x86::r12d, x86::edx); // Load relocation base c.mov(x86::rbx, x86::qword_ptr(reinterpret_cast<u64>(&vm::g_base_addr))); c.mov(x86::r14, x86::qword_ptr(x86::rbp, ::offset32(&ppu_thread::gpr, 0))); // Load some registers c.mov(x86::rsi, x86::qword_ptr(x86::rbp, ::offset32(&ppu_thread::gpr, 1))); c.mov(x86::rdi, x86::qword_ptr(x86::rbp, ::offset32(&ppu_thread::gpr, 2))); if (utils::has_avx()) { c.vzeroupper(); } c.call(x86::rax); if (utils::has_avx()) { c.vzeroupper(); } #ifdef _WIN32 c.movaps(x86::xmm6, x86::oword_ptr(x86::rsp, 0)); c.movaps(x86::xmm7, x86::oword_ptr(x86::rsp, 0x10)); c.movaps(x86::xmm8, x86::oword_ptr(x86::rsp, 0x20)); c.movaps(x86::xmm9, x86::oword_ptr(x86::rsp, 0x30)); c.movaps(x86::xmm10, x86::oword_ptr(x86::rsp, 0x40)); c.movaps(x86::xmm11, x86::oword_ptr(x86::rsp, 0x50)); c.movaps(x86::xmm12, x86::oword_ptr(x86::rsp, 0x60)); c.movaps(x86::xmm13, x86::oword_ptr(x86::rsp, 0x70)); c.movaps(x86::xmm14, x86::oword_ptr(x86::rsp, 0x80)); c.movaps(x86::xmm15, x86::oword_ptr(x86::rsp, 0x90)); c.add(x86::rsp, 0xa8); c.pop(x86::rbx); c.pop(x86::rbp); c.pop(x86::rdi); c.pop(x86::rsi); c.pop(x86::r12); c.pop(x86::r13); c.pop(x86::r14); c.pop(x86::r15); #else c.add(x86::rsp, +8); c.pop(x86::rbx); c.pop(x86::r12); c.pop(x86::r13); c.pop(x86::r14); c.pop(x86::r15); c.pop(x86::rbp); #endif c.ret(); #else // See https://github.com/ghc/ghc/blob/master/rts/include/stg/MachRegs.h // for GHC calling convention definitions on Aarch64 // and https://developer.arm.com/documentation/den0024/a/The-ABI-for-ARM-64-bit-Architecture/Register-use-in-the-AArch64-Procedure-Call-Standard/Parameters-in-general-purpose-registers // for AArch64 calling convention // PPU function argument layout: // x19 = m_exec // x20 = m_thread, // x21 = seg0 // x22 = m_base // x23 - x25 = gpr[0] - gpr[3] // Push callee saved registers to the hv context // Assume our LLVM compiled code is unsafe and can clobber our stack. GHC on aarch64 treats stack as scratch. // We also want to store the register context at a fixed place so we can read the hypervisor state from any lcoation. // We need to save x18-x30 = 13 x 8B each + 8 bytes for 16B alignment = 112B // Pre-context save // Layout: // pc, sp // x18, x19...x30 // NOTE: Do not touch x19..x30 before saving the registers! const u64 hv_register_array_offset = ::offset32(&ppu_thread::hv_ctx, &rpcs3::hypervisor_context_t::regs); Label hv_ctx_pc = c.newLabel(); // Used to hold the far jump return address // Sanity ensure(hv_register_array_offset < 4096); // Imm10 c.mov(a64::x15, args[0]); c.add(a64::x14, a64::x15, Imm(hv_register_array_offset)); // Per-thread context save c.adr(a64::x15, hv_ctx_pc); // x15 = pc c.mov(a64::x13, a64::sp); // x16 = sp c.stp(a64::x15, a64::x13, arm::Mem(a64::x14)); c.stp(a64::x18, a64::x19, arm::Mem(a64::x14, 16)); c.stp(a64::x20, a64::x21, arm::Mem(a64::x14, 32)); c.stp(a64::x22, a64::x23, arm::Mem(a64::x14, 48)); c.stp(a64::x24, a64::x25, arm::Mem(a64::x14, 64)); c.stp(a64::x26, a64::x27, arm::Mem(a64::x14, 80)); c.stp(a64::x28, a64::x29, arm::Mem(a64::x14, 96)); c.str(a64::x30, arm::Mem(a64::x14, 112)); // Load REG_Base - use absolute jump target to bypass rel jmp range limits c.mov(a64::x19, Imm(reinterpret_cast<u64>(&vm::g_exec_addr))); c.ldr(a64::x19, arm::Mem(a64::x19)); // Load PPUThread struct base -> REG_Sp const arm::GpX ppu_t_base = a64::x20; c.mov(ppu_t_base, args[0]); // Load PC const arm::GpX pc = a64::x15; const arm::GpX cia_addr_reg = a64::x11; // Load offset value c.mov(cia_addr_reg, Imm(static_cast<u64>(::offset32(&ppu_thread::cia)))); // Load cia c.ldr(a64::w15, arm::Mem(ppu_t_base, cia_addr_reg)); // Multiply by 2 to index into ptr table const arm::GpX index_shift = a64::x12; c.mov(index_shift, Imm(2)); c.mul(pc, pc, index_shift); // Load call target const arm::GpX call_target = a64::x13; c.ldr(call_target, arm::Mem(a64::x19, pc)); // Compute REG_Hp const arm::GpX reg_hp = a64::x21; c.mov(reg_hp, call_target); c.lsr(reg_hp, reg_hp, 48); c.lsl(a64::w21, a64::w21, 13); // Zero top 16 bits of call target c.lsl(call_target, call_target, Imm(16)); c.lsr(call_target, call_target, Imm(16)); // Load registers c.mov(a64::x22, Imm(reinterpret_cast<u64>(&vm::g_base_addr))); c.ldr(a64::x22, arm::Mem(a64::x22)); const arm::GpX gpr_addr_reg = a64::x9; c.mov(gpr_addr_reg, Imm(static_cast<u64>(::offset32(&ppu_thread::gpr)))); c.add(gpr_addr_reg, gpr_addr_reg, ppu_t_base); c.ldr(a64::x23, arm::Mem(gpr_addr_reg)); c.ldr(a64::x24, arm::Mem(gpr_addr_reg, 8)); c.ldr(a64::x25, arm::Mem(gpr_addr_reg, 16)); // Thread context save. This is needed for PPU because different functions can switch between x19 and x20 for the base register. // We need a different solution to ensure that no matter which version, we get the right vaue on far return. c.mov(a64::x26, ppu_t_base); // Save thread pointer to stack. SP is the only register preserved across GHC calls. c.sub(a64::sp, a64::sp, Imm(16)); c.str(a64::x20, arm::Mem(a64::sp)); // GHC scratchpad mem. If managed correctly (i.e no returns ever), GHC functions should never require a stack frame. // We allocate a slab to use for all functions as they tail-call into each other. c.sub(a64::sp, a64::sp, Imm(8192)); // Execute LLE call c.blr(call_target); // Return address after far jump. Reset sp and start unwinding... c.bind(hv_ctx_pc); // Clear scratchpad allocation c.add(a64::sp, a64::sp, Imm(8192)); c.ldr(a64::x20, arm::Mem(a64::sp)); c.add(a64::sp, a64::sp, Imm(16)); // We either got here through normal "ret" which keeps our x20 intact, or we jumped here and the escape reset our x20 reg // Either way, x26 contains our thread base and we forcefully reset the stack pointer c.add(a64::x14, a64::x20, Imm(hv_register_array_offset)); // Per-thread context save c.ldr(a64::x15, arm::Mem(a64::x14, 8)); c.ldp(a64::x18, a64::x19, arm::Mem(a64::x14, 16)); c.ldp(a64::x20, a64::x21, arm::Mem(a64::x14, 32)); c.ldp(a64::x22, a64::x23, arm::Mem(a64::x14, 48)); c.ldp(a64::x24, a64::x25, arm::Mem(a64::x14, 64)); c.ldp(a64::x26, a64::x27, arm::Mem(a64::x14, 80)); c.ldp(a64::x28, a64::x29, arm::Mem(a64::x14, 96)); c.ldr(a64::x30, arm::Mem(a64::x14, 112)); // Return c.mov(a64::sp, a64::x15); c.ret(a64::x30); #endif }); const extern auto ppu_escape = build_function_asm<void(*)(ppu_thread*)>("ppu_escape", [](native_asm& c, auto& args) { using namespace asmjit; #if defined(ARCH_X64) // Restore native stack pointer (longjmp emulation) c.mov(x86::rsp, x86::qword_ptr(args[0], ::offset32(&ppu_thread::hv_ctx, &rpcs3::hypervisor_context_t::regs))); // Return to the return location c.sub(x86::rsp, 8); c.ret(); #else // We really shouldn't be using this, but an implementation shoudln't hurt // Far jump return. Only clobbers x30. const arm::GpX ppu_t_base = a64::x20; const u64 hv_register_array_offset = ::offset32(&ppu_thread::hv_ctx, &rpcs3::hypervisor_context_t::regs); c.mov(ppu_t_base, args[0]); c.mov(a64::x30, Imm(hv_register_array_offset)); c.ldr(a64::x30, arm::Mem(ppu_t_base, a64::x30)); c.ret(a64::x30); #endif }); void ppu_recompiler_fallback(ppu_thread& ppu); #if defined(ARCH_X64) const auto ppu_recompiler_fallback_ghc = build_function_asm<void(*)(ppu_thread& ppu)>("", [](native_asm& c, auto& args) { using namespace asmjit; c.mov(args[0], x86::rbp); c.jmp(ppu_recompiler_fallback); }); #elif defined(ARCH_ARM64) const auto ppu_recompiler_fallback_ghc = &ppu_recompiler_fallback; #endif // Get pointer to executable cache static inline u8* ppu_ptr(u32 addr) { return vm::g_exec_addr + u64{addr} * 2; } static inline ppu_intrp_func_t ppu_read(u32 addr) { return read_from_ptr<ppu_intrp_func_t>(ppu_ptr(addr)); } // Get interpreter cache value static ppu_intrp_func_t ppu_cache(u32 addr) { if (g_cfg.core.ppu_decoder != ppu_decoder_type::_static) { fmt::throw_exception("Invalid PPU decoder"); } return g_fxo->get<ppu_interpreter_rt>().decode(vm::read32(addr)); } static ppu_intrp_func ppu_ret = {[](ppu_thread& ppu, ppu_opcode_t, be_t<u32>* this_op, ppu_intrp_func*) { // Fix PC and return (step execution) ppu.cia = vm::get_addr(this_op); return; }}; static void ppu_fallback(ppu_thread& ppu, ppu_opcode_t op, be_t<u32>* this_op, ppu_intrp_func* next_fn) { const auto _pc = vm::get_addr(this_op); const auto _fn = ppu_cache(_pc); write_to_ptr<ppu_intrp_func_t>(ppu_ptr(_pc), _fn); return _fn(ppu, op, this_op, next_fn); } // TODO: Make this a dispatch call void ppu_recompiler_fallback(ppu_thread& ppu) { perf_meter<"PPUFALL1"_u64> perf0; if (g_cfg.core.ppu_debug) { ppu_log.error("Unregistered PPU Function (LR=0x%x)", ppu.lr); } const auto& table = g_fxo->get<ppu_interpreter_rt>(); while (true) { if (uptr func = uptr(ppu_read(ppu.cia)); (func << 16 >> 16) != reinterpret_cast<uptr>(ppu_recompiler_fallback_ghc)) { // We found a recompiler function at cia, return break; } // Run one instruction in interpreter (TODO) const u32 op = vm::read32(ppu.cia); table.decode(op)(ppu, {op}, vm::_ptr<u32>(ppu.cia), &ppu_ret); if (ppu.test_stopped()) { break; } } } void ppu_reservation_fallback(ppu_thread& ppu) { perf_meter<"PPUFALL2"_u64> perf0; const auto& table = g_fxo->get<ppu_interpreter_rt>(); while (true) { // Run one instruction in interpreter (TODO) const u32 op = vm::read32(ppu.cia); table.decode(op)(ppu, {op}, vm::_ptr<u32>(ppu.cia), &ppu_ret); if (!ppu.raddr || !ppu.use_full_rdata) { // We've escaped from reservation, return. return; } if (ppu.test_stopped()) { return; } } } u32 ppu_read_mmio_aware_u32(u8* vm_base, u32 eal) { if (eal >= RAW_SPU_BASE_ADDR) { // RawSPU MMIO auto thread = idm::get<named_thread<spu_thread>>(spu_thread::find_raw_spu((eal - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET)); if (!thread) { // Access Violation } else if ((eal - RAW_SPU_BASE_ADDR) % RAW_SPU_OFFSET + sizeof(u32) - 1 < SPU_LS_SIZE) // LS access { } else if (u32 value{}; thread->read_reg(eal, value)) { return std::bit_cast<be_t<u32>>(value); } else { fmt::throw_exception("Invalid RawSPU MMIO offset (addr=0x%x)", eal); } } // Value is assumed to be swapped return read_from_ptr<u32>(vm_base + eal); } void ppu_write_mmio_aware_u32(u8* vm_base, u32 eal, u32 value) { if (eal >= RAW_SPU_BASE_ADDR) { // RawSPU MMIO auto thread = idm::get<named_thread<spu_thread>>(spu_thread::find_raw_spu((eal - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET)); if (!thread) { // Access Violation } else if ((eal - RAW_SPU_BASE_ADDR) % RAW_SPU_OFFSET + sizeof(u32) - 1 < SPU_LS_SIZE) // LS access { } else if (thread->write_reg(eal, std::bit_cast<be_t<u32>>(value))) { return; } else { fmt::throw_exception("Invalid RawSPU MMIO offset (addr=0x%x)", eal); } } // Value is assumed swapped write_to_ptr<u32>(vm_base + eal, value); } extern bool ppu_test_address_may_be_mmio(std::span<const be_t<u32>> insts) { std::set<u32> reg_offsets; bool found_raw_spu_base = false; bool found_spu_area_offset_element = false; for (u32 inst : insts) { // Common around MMIO (orders IO) if (inst == ppu_instructions::EIEIO()) { return true; } const u32 op_imm16 = (inst & 0xfc00ffff); // RawSPU MMIO base // 0xe00000000 is a common constant so try to find an ORIS 0x10 or ADDIS 0x10 nearby (for multiplying SPU ID by it) if (op_imm16 == ppu_instructions::ADDIS({}, {}, -0x2000) || op_imm16 == ppu_instructions::ORIS({}, {}, 0xe000) || op_imm16 == ppu_instructions::XORIS({}, {}, 0xe000)) { found_raw_spu_base = true; if (found_spu_area_offset_element) { // Found both return true; } } else if (op_imm16 == ppu_instructions::ORIS({}, {}, 0x10) || op_imm16 == ppu_instructions::ADDIS({}, {}, 0x10)) { found_spu_area_offset_element = true; if (found_raw_spu_base) { // Found both return true; } } // RawSPU MMIO base + problem state offset else if (op_imm16 == ppu_instructions::ADDIS({}, {}, -0x1ffc)) { return true; } else if (op_imm16 == ppu_instructions::ORIS({}, {}, 0xe004)) { return true; } else if (op_imm16 == ppu_instructions::XORIS({}, {}, 0xe004)) { return true; } // RawSPU MMIO base + problem state offset + 64k of SNR1 offset else if (op_imm16 == ppu_instructions::ADDIS({}, {}, -0x1ffb)) { return true; } else if (op_imm16 == ppu_instructions::ORIS({}, {}, 0xe005)) { return true; } else if (op_imm16 == ppu_instructions::XORIS({}, {}, 0xe005)) { return true; } // RawSPU MMIO base + problem state offset + 264k of SNR2 offset (STW allows 32K+- offset so in order to access SNR2 it needs to first add another 64k) // SNR2 is the only register currently implemented that has its 0x80000 bit is set so its the only one its hardcoded access is done this way else if (op_imm16 == ppu_instructions::ADDIS({}, {}, -0x1ffa)) { return true; } else if (op_imm16 == ppu_instructions::ORIS({}, {}, 0xe006)) { return true; } else if (op_imm16 == ppu_instructions::XORIS({}, {}, 0xe006)) { return true; } // Try to detect a function that receives RawSPU problem state base pointer as an argument else if ((op_imm16 & ~0xffff) == ppu_instructions::LWZ({}, {}, 0) || (op_imm16 & ~0xffff) == ppu_instructions::STW({}, {}, 0) || (op_imm16 & ~0xffff) == ppu_instructions::ADDI({}, {}, 0)) { const bool is_load = (op_imm16 & ~0xffff) == ppu_instructions::LWZ({}, {}, 0); const bool is_store = (op_imm16 & ~0xffff) == ppu_instructions::STW({}, {}, 0); const bool is_neither = !is_store && !is_load; const bool is_snr = (is_store || is_neither) && ((op_imm16 & 0xffff) == (SPU_RdSigNotify2_offs & 0xffff) || (op_imm16 & 0xffff) == (SPU_RdSigNotify1_offs & 0xffff)); if (is_snr || spu_thread::test_is_problem_state_register_offset(op_imm16 & 0xffff, is_load || is_neither, is_store || is_neither)) { reg_offsets.insert(op_imm16 & 0xffff); if (reg_offsets.size() >= 2) { // Assume high MMIO likelyhood if more than one offset appears in nearby code // Such as common IN_MBOX + OUT_MBOX return true; } } } } return false; } struct ppu_toc_manager { std::unordered_map<u32, u32> toc_map; shared_mutex mutex; }; static void ppu_check_toc(ppu_thread& ppu, ppu_opcode_t op, be_t<u32>* this_op, ppu_intrp_func* next_fn) { ppu.cia = vm::get_addr(this_op); { auto& toc_manager = g_fxo->get<ppu_toc_manager>(); reader_lock lock(toc_manager.mutex); auto& ppu_toc = toc_manager.toc_map; const auto found = ppu_toc.find(ppu.cia); if (found != ppu_toc.end()) { const u32 toc = atomic_storage<u32>::load(found->second); // Compare TOC with expected value if (toc != umax && ppu.gpr[2] != toc) { ppu_log.error("Unexpected TOC (0x%x, expected 0x%x)", ppu.gpr[2], toc); atomic_storage<u32>::exchange(found->second, u32{umax}); } } } // Fallback to the interpreter function return ppu_cache(ppu.cia)(ppu, op, this_op, next_fn); } extern void ppu_register_range(u32 addr, u32 size) { if (!size) { ppu_log.error("ppu_register_range(0x%x): empty range", addr); return; } size = utils::align(size + addr % 0x10000, 0x10000); addr &= -0x10000; // Register executable range at utils::memory_commit(ppu_ptr(addr), u64{size} * 2, utils::protection::rw); ensure(vm::page_protect(addr, size, 0, vm::page_executable)); if (g_cfg.core.ppu_debug) { utils::memory_commit(vm::g_stat_addr + addr, size); } const u64 seg_base = addr; while (size) { if (g_cfg.core.ppu_decoder == ppu_decoder_type::llvm) { // Assume addr is the start of first segment of PRX const uptr entry_value = reinterpret_cast<uptr>(ppu_recompiler_fallback_ghc) | (seg_base << (32 + 3)); write_to_ptr<uptr>(ppu_ptr(addr), entry_value); } else { write_to_ptr<ppu_intrp_func_t>(ppu_ptr(addr), ppu_fallback); } addr += 4; size -= 4; } } static void ppu_far_jump(ppu_thread&, ppu_opcode_t, be_t<u32>*, ppu_intrp_func*); extern void ppu_register_function_at(u32 addr, u32 size, ppu_intrp_func_t ptr = nullptr) { // Initialize specific function if (ptr) { write_to_ptr<uptr>(ppu_ptr(addr), (reinterpret_cast<uptr>(ptr) & 0xffff'ffff'ffffu) | (uptr(ppu_read(addr)) & ~0xffff'ffff'ffffu)); return; } if (!size) { if (g_cfg.core.ppu_debug) { ppu_log.error("ppu_register_function_at(0x%x): empty range", addr); } return; } if (g_cfg.core.ppu_decoder == ppu_decoder_type::llvm) { return; } // Initialize interpreter cache while (size) { if (auto old = ppu_read(addr); old != ppu_break && old != ppu_far_jump) { write_to_ptr<ppu_intrp_func_t>(ppu_ptr(addr), ppu_cache(addr)); } addr += 4; size -= 4; } } extern void ppu_register_function_at(u32 addr, u32 size, u64 ptr) { return ppu_register_function_at(addr, size, reinterpret_cast<ppu_intrp_func_t>(ptr)); } u32 ppu_get_exported_func_addr(u32 fnid, const std::string& module_name); void ppu_return_from_far_jump(ppu_thread& ppu, ppu_opcode_t, be_t<u32>*, ppu_intrp_func*) { auto& calls_info = ppu.hle_func_calls_with_toc_info; ensure(!calls_info.empty()); // Branch to next instruction after far jump call entry with restored R2 and LR const auto restore_info = &calls_info.back(); ppu.cia = restore_info->cia + 4; ppu.lr = restore_info->saved_lr; ppu.gpr[2] = restore_info->saved_r2; calls_info.pop_back(); } static const bool s_init_return_far_jump_func = [] { REG_HIDDEN_FUNC_PURE(ppu_return_from_far_jump); return true; }(); struct ppu_far_jumps_t { struct all_info_t { u32 target; bool link; bool with_toc; std::string module_name; ppu_intrp_func_t func; u32 get_target(u32 pc, ppu_thread* ppu = nullptr) const { u32 direct_target = this->target; bool to_link = this->link; bool from_opd = this->with_toc; if (!this->module_name.empty()) { direct_target = ppu_get_exported_func_addr(direct_target, this->module_name); } if (from_opd && !vm::check_addr<sizeof(ppu_func_opd_t)>(direct_target)) { // Avoid reading unmapped memory under mutex from_opd = false; } if (from_opd) { auto& opd = vm::_ref<ppu_func_opd_t>(direct_target); direct_target = opd.addr; // We modify LR to custom values here to_link = false; if (ppu) { auto& calls_info = ppu->hle_func_calls_with_toc_info; // Save LR and R2 // Set LR to the this ppu_return_from_far_jump branch for restoration of registers // NOTE: In order to clean up this information all calls must return in order auto& saved_info = calls_info.emplace_back(); saved_info.cia = pc; saved_info.saved_lr = std::exchange(ppu->lr, g_fxo->get<ppu_function_manager>().func_addr(FIND_FUNC(ppu_return_from_far_jump), true)); saved_info.saved_r2 = std::exchange(ppu->gpr[2], opd.rtoc); } } if (to_link && ppu) { ppu->lr = pc + 4; } return direct_target; } }; ppu_far_jumps_t(int) noexcept {} std::map<u32, all_info_t> vals; ::jit_runtime rt; mutable shared_mutex mutex; // Get target address, 'ppu' is used in ppu_far_jump in order to modify registers u32 get_target(u32 pc, ppu_thread* ppu = nullptr) { reader_lock lock(mutex); if (auto it = vals.find(pc); it != vals.end()) { all_info_t& all_info = it->second; return all_info.get_target(pc, ppu); } return {}; } // Get function patches in range (entry -> target) std::vector<std::pair<u32, u32>> get_targets(u32 pc, u32 size) { std::vector<std::pair<u32, u32>> targets; reader_lock lock(mutex); auto it = vals.lower_bound(pc); if (it == vals.end()) { return targets; } if (it->first >= pc + size) { return targets; } for (auto end = vals.lower_bound(pc + size); it != end; it++) { all_info_t& all_info = it->second; if (u32 target = all_info.get_target(it->first)) { targets.emplace_back(it->first, target); } } return targets; } // Generate a mini-function which updates PC (for LLVM) and jumps to ppu_far_jump to handle redirections template <bool Locked = true> ppu_intrp_func_t gen_jump(u32 pc) { [[maybe_unused]] std::conditional_t<Locked, std::lock_guard<shared_mutex>, const shared_mutex&> lock(mutex); auto it = vals.find(pc); if (it == vals.end()) { return nullptr; } if (!it->second.func) { it->second.func = build_function_asm<ppu_intrp_func_t>("", [&](native_asm& c, auto& args) { using namespace asmjit; #ifdef ARCH_X64 c.mov(args[0], x86::rbp); c.mov(x86::dword_ptr(args[0], ::offset32(&ppu_thread::cia)), pc); c.jmp(ppu_far_jump); #else Label jmp_address = c.newLabel(); Label imm_address = c.newLabel(); c.ldr(args[1].w(), arm::ptr(imm_address)); c.str(args[1].w(), arm::Mem(args[0], ::offset32(&ppu_thread::cia))); c.ldr(args[1], arm::ptr(jmp_address)); c.br(args[1]); c.align(AlignMode::kCode, 16); c.bind(jmp_address); c.embedUInt64(reinterpret_cast<u64>(ppu_far_jump)); c.bind(imm_address); c.embedUInt32(pc); #endif }, &rt); } return it->second.func; } }; u32 ppu_get_far_jump(u32 pc) { if (!g_fxo->is_init<ppu_far_jumps_t>()) { return 0; } return g_fxo->get<ppu_far_jumps_t>().get_target(pc); } static void ppu_far_jump(ppu_thread& ppu, ppu_opcode_t, be_t<u32>*, ppu_intrp_func*) { const u32 cia = g_fxo->get<ppu_far_jumps_t>().get_target(ppu.cia, &ppu); if (!vm::check_addr(cia, vm::page_executable)) { fmt::throw_exception("PPU far jump failed! (returned cia = 0x%08x)", cia); } ppu.cia = cia; } bool ppu_form_branch_to_code(u32 entry, u32 target, bool link, bool with_toc, std::string module_name) { // Force align entry and target entry &= -4; // Exported functions are using target as FNID, must not be changed if (module_name.empty()) { target &= -4; u32 cia_target = target; if (with_toc) { ppu_func_opd_t opd{}; if (!vm::try_access(target, &opd, sizeof(opd), false)) { // Cannot access function descriptor return false; } // For now allow situations where OPD is changed later by patches or by the program itself //cia_target = opd.addr; // So force a valid target (executable, yet not equal to entry) cia_target = entry ^ 8; } // Target CIA must be aligned, executable and not equal with if (cia_target % 4 || entry == cia_target || !vm::check_addr(cia_target, vm::page_executable)) { return false; } } // Entry must be executable if (!vm::check_addr(entry, vm::page_executable)) { return false; } g_fxo->init<ppu_far_jumps_t>(0); if (!module_name.empty()) { // Always use function descriptor for exported functions with_toc = true; } if (with_toc) { // Always link for calls with function descriptor link = true; } // Register branch target in host memory, not guest memory auto& jumps = g_fxo->get<ppu_far_jumps_t>(); std::lock_guard lock(jumps.mutex); jumps.vals.insert_or_assign(entry, ppu_far_jumps_t::all_info_t{target, link, with_toc, std::move(module_name)}); ppu_register_function_at(entry, 4, g_cfg.core.ppu_decoder == ppu_decoder_type::_static ? &ppu_far_jump : ensure(g_fxo->get<ppu_far_jumps_t>().gen_jump<false>(entry))); return true; } bool ppu_form_branch_to_code(u32 entry, u32 target, bool link, bool with_toc) { return ppu_form_branch_to_code(entry, target, link, with_toc, std::string{}); } bool ppu_form_branch_to_code(u32 entry, u32 target, bool link) { return ppu_form_branch_to_code(entry, target, link, false); } bool ppu_form_branch_to_code(u32 entry, u32 target) { return ppu_form_branch_to_code(entry, target, false); } void ppu_remove_hle_instructions(u32 addr, u32 size) { if (Emu.IsStopped() || !g_fxo->is_init<ppu_far_jumps_t>()) { return; } auto& jumps = g_fxo->get<ppu_far_jumps_t>(); std::lock_guard lock(jumps.mutex); for (auto it = jumps.vals.begin(); it != jumps.vals.end();) { if (it->first >= addr && it->first <= addr + size - 1 && size) { it = jumps.vals.erase(it); continue; } it++; } } atomic_t<bool> g_debugger_pause_all_threads_on_bp = false; // Breakpoint entry point static void ppu_break(ppu_thread& ppu, ppu_opcode_t, be_t<u32>* this_op, ppu_intrp_func* next_fn) { const bool pause_all = g_debugger_pause_all_threads_on_bp; const u32 old_cia = vm::get_addr(this_op); ppu.cia = old_cia; // Pause ppu.state.atomic_op([&](bs_t<cpu_flag>& state) { if (pause_all) state += cpu_flag::dbg_global_pause; if (pause_all || !(state & cpu_flag::dbg_step)) state += cpu_flag::dbg_pause; }); if (pause_all) { // Pause all other threads Emu.CallFromMainThread([]() { Emu.Pause(); }); } if (ppu.check_state() || old_cia != atomic_storage<u32>::load(ppu.cia)) { // Do not execute if PC changed return; } // Fallback to the interpreter function return ppu_cache(ppu.cia)(ppu, {*this_op}, this_op, ppu.state ? &ppu_ret : next_fn); } // Set or remove breakpoint extern bool ppu_breakpoint(u32 addr, bool is_adding) { if (addr % 4 || !vm::check_addr(addr, vm::page_executable) || g_cfg.core.ppu_decoder == ppu_decoder_type::llvm) { return false; } // Remove breakpoint parameters ppu_intrp_func_t func_original = 0; ppu_intrp_func_t breakpoint = &ppu_break; if (u32 hle_addr{}; g_fxo->is_init<ppu_function_manager>() && (hle_addr = g_fxo->get<ppu_function_manager>().addr)) { // HLE function index const u32 index = (addr - hle_addr) / 8; if (addr % 8 == 4 && index < ppu_function_manager::get().size()) { // HLE function placement func_original = ppu_function_manager::get()[index]; } } if (!func_original) { // If not an HLE function use regular instruction function func_original = ppu_cache(addr); } if (is_adding) { if (ppu_read(addr) == ppu_fallback) { ppu_log.error("Unregistered instruction replaced with a breakpoint at 0x%08x", addr); func_original = ppu_fallback; } if (ppu_read(addr) != func_original) { return false; } write_to_ptr<ppu_intrp_func_t>(ppu_ptr(addr), breakpoint); return true; } if (ppu_read(addr) != breakpoint) { return false; } write_to_ptr<ppu_intrp_func_t>(ppu_ptr(addr), func_original); return true; } extern bool ppu_patch(u32 addr, u32 value) { if (addr % 4) { ppu_log.fatal("Patch failed at 0x%x: unanligned memory address.", addr); return false; } vm::writer_lock rlock; if (!vm::check_addr(addr)) { ppu_log.fatal("Patch failed at 0x%x: invalid memory address.", addr); return false; } const bool is_exec = vm::check_addr(addr, vm::page_executable); if (is_exec && g_cfg.core.ppu_decoder == ppu_decoder_type::llvm && !Emu.IsReady()) { // TODO: support recompilers ppu_log.fatal("Patch failed at 0x%x: LLVM recompiler is used.", addr); return false; } *vm::get_super_ptr<u32>(addr) = value; if (is_exec) { if (auto old = ppu_read(addr); old != ppu_break && old != ppu_fallback) { write_to_ptr<ppu_intrp_func_t>(ppu_ptr(addr), ppu_cache(addr)); } } return true; } std::array<u32, 2> op_branch_targets(u32 pc, ppu_opcode_t op) { std::array<u32, 2> res{pc + 4, umax}; if (u32 target = g_fxo->is_init<ppu_far_jumps_t>() ? g_fxo->get<ppu_far_jumps_t>().get_target(pc) : 0) { res[0] = target; return res; } switch (const auto type = g_ppu_itype.decode(op.opcode)) { case ppu_itype::B: case ppu_itype::BC: { res[type == ppu_itype::BC ? 1 : 0] = ((op.aa ? 0 : pc) + (type == ppu_itype::B ? +op.bt24 : +op.bt14)); break; } case ppu_itype::BCCTR: case ppu_itype::BCLR: case ppu_itype::UNK: { res[0] = umax; break; } default: break; } return res; } void ppu_thread::dump_regs(std::string& ret, std::any& custom_data) const { const system_state emu_state = Emu.GetStatus(false); const bool is_stopped_or_frozen = state & cpu_flag::exit || emu_state == system_state::frozen || emu_state <= system_state::stopping; const ppu_debugger_mode mode = debugger_mode.load(); const bool is_decimal = !is_stopped_or_frozen && mode == ppu_debugger_mode::is_decimal; struct dump_registers_data_t { u32 preferred_cr_field_index = 7; }; dump_registers_data_t* func_data = nullptr; func_data = std::any_cast<dump_registers_data_t>(&custom_data); if (!func_data) { custom_data.reset(); custom_data = std::make_any<dump_registers_data_t>(); func_data = ensure(std::any_cast<dump_registers_data_t>(&custom_data)); } PPUDisAsm dis_asm(cpu_disasm_mode::normal, vm::g_sudo_addr); for (uint i = 0; i < 32; ++i) { auto reg = gpr[i]; // Fixup for syscall arguments if (current_function && i >= 3 && i <= 10) reg = syscall_args[i - 3]; auto [is_const, const_value] = dis_asm.try_get_const_gpr_value(i, cia); if (const_value != reg) { // Expectation of predictable code path has not been met (such as a branch directly to the instruction) is_const = false; } fmt::append(ret, "r%d%s%s ", i, i <= 9 ? " " : "", is_const ? "©" : ":"); bool printed_error = false; if ((reg >> 31) == 0x1'ffff'ffff) { const usz old_size = ret.size(); fmt::append(ret, "%s (0x%x)", CellError{static_cast<u32>(reg)}, reg); // Test if failed to format (appended " 0x8".. in such case) if (ret[old_size] == '0') { // Failed ret.resize(old_size); } else { printed_error = true; } } if (!printed_error) { if (is_decimal) { fmt::append(ret, "%-11d", reg); } else { fmt::append(ret, "0x%-8llx", reg); } } constexpr u32 max_str_len = 32; constexpr u32 hex_count = 8; if (reg <= u32{umax} && vm::check_addr<max_str_len>(static_cast<u32>(reg))) { bool is_function = false; u32 toc = 0; auto is_exec_code = [&](u32 addr) { return addr % 4 == 0 && vm::check_addr(addr, vm::page_executable) && g_ppu_itype.decode(*vm::get_super_ptr<u32>(addr)) != ppu_itype::UNK; }; if (const u32 reg_ptr = *vm::get_super_ptr<be_t<u32, 1>>(static_cast<u32>(reg)); vm::check_addr<8>(reg_ptr) && !vm::check_addr(toc, vm::page_executable)) { // Check executability and alignment if (reg % 4 == 0 && is_exec_code(reg_ptr)) { toc = *vm::get_super_ptr<u32>(static_cast<u32>(reg + 4)); if (toc % 4 == 0 && (toc >> 29) == (reg_ptr >> 29) && vm::check_addr(toc) && !vm::check_addr(toc, vm::page_executable)) { is_function = true; reg = reg_ptr; } } } else if (is_exec_code(static_cast<u32>(reg))) { is_function = true; } const auto gpr_buf = vm::get_super_ptr<u8>(static_cast<u32>(reg)); std::string buf_tmp(gpr_buf, gpr_buf + max_str_len); std::string_view sv(buf_tmp.data(), std::min<usz>(buf_tmp.size(), buf_tmp.find_first_of("\0\n"sv))); if (is_function) { if (toc) { fmt::append(ret, " -> func(at=0x%x, toc=0x%x)", reg, toc); } else { dis_asm.disasm(static_cast<u32>(reg)); fmt::append(ret, " -> %s", dis_asm.last_opcode); } } // NTS: size of 3 and above is required // If ends with a newline, only one character is required else if ((sv.size() == buf_tmp.size() || (sv.size() >= (buf_tmp[sv.size()] == '\n' ? 1 : 3))) && std::all_of(sv.begin(), sv.end(), [](u8 c){ return std::isprint(c); })) { fmt::append(ret, " -> \"%s\"", sv); } else { fmt::append(ret, " -> "); for (u32 j = 0; j < hex_count; ++j) { fmt::append(ret, "%02x ", buf_tmp[j]); } } } fmt::trim_back(ret); ret += '\n'; } const u32 current_cia = cia; const u32 cr_packed = cr.pack(); for (u32 addr : { current_cia, current_cia + 4, current_cia + 8, current_cia - 4, current_cia + 12, }) { dis_asm.disasm(addr); if (dis_asm.last_opcode.size() <= 4) { continue; } usz index = dis_asm.last_opcode.rfind(",cr"); if (index > dis_asm.last_opcode.size() - 4) { index = dis_asm.last_opcode.rfind(" cr"); } if (index <= dis_asm.last_opcode.size() - 4) { const char result = dis_asm.last_opcode[index + 3]; if (result >= '0' && result <= '7') { func_data->preferred_cr_field_index = result - '0'; break; } } if (dis_asm.last_opcode.find("stdcx.") != umax || dis_asm.last_opcode.find("stwcx.") != umax) { // Modifying CR0 func_data->preferred_cr_field_index = 0; break; } } const u32 displayed_cr_field = (cr_packed >> ((7 - func_data->preferred_cr_field_index) * 4)) & 0xf; fmt::append(ret, "CR: 0x%08x, CR%d: [LT=%u GT=%u EQ=%u SO=%u]\n", cr_packed, func_data->preferred_cr_field_index, displayed_cr_field >> 3, (displayed_cr_field >> 2) & 1, (displayed_cr_field >> 1) & 1, displayed_cr_field & 1); for (uint i = 0; i < 32; ++i) { const f64 r = fpr[i]; if (!std::bit_cast<u64>(r)) { fmt::append(ret, "f%d%s: %-12.6G [%-18s] (f32=0x%x)\n", i, i <= 9 ? " " : "", r, "", std::bit_cast<u32>(f32(r))); continue; } fmt::append(ret, "f%d%s: %-12.6G [0x%016x] (f32=0x%x)\n", i, i <= 9 ? " " : "", r, std::bit_cast<u64>(r), std::bit_cast<u32>(f32(r))); } for (uint i = 0; i < 32; ++i, ret += '\n') { fmt::append(ret, "v%d%s: ", i, i <= 9 ? " " : ""); const auto r = vr[i]; const u32 i3 = r.u32r[0]; if (v128::from32p(i3) == r) { // Shortand formatting fmt::append(ret, "%08x", i3); fmt::append(ret, " [x: %g]", r.fr[0]); } else { fmt::append(ret, "%08x %08x %08x %08x", r.u32r[0], r.u32r[1], r.u32r[2], r.u32r[3]); fmt::append(ret, " [x: %g y: %g z: %g w: %g]", r.fr[0], r.fr[1], r.fr[2], r.fr[3]); } } fmt::append(ret, "CIA: 0x%x\n", current_cia); fmt::append(ret, "LR: 0x%llx\n", lr); fmt::append(ret, "CTR: 0x%llx\n", ctr); fmt::append(ret, "VRSAVE: 0x%08x\n", vrsave); fmt::append(ret, "XER: [CA=%u | OV=%u | SO=%u | CNT=%u]\n", xer.ca, xer.ov, xer.so, xer.cnt); fmt::append(ret, "VSCR: [SAT=%u | NJ=%u]\n", sat, nj); fmt::append(ret, "FPSCR: [FL=%u | FG=%u | FE=%u | FU=%u]\n", fpscr.fl, fpscr.fg, fpscr.fe, fpscr.fu); const u32 addr = raddr; if (addr) fmt::append(ret, "Reservation Addr: 0x%x", addr); else fmt::append(ret, "Reservation Addr: none"); fmt::append(ret, "\nReservation Data (entire cache line):\n"); be_t<u32> data[32]{}; std::memcpy(data, rdata, sizeof(rdata)); // Show the data even if the reservation was lost inside the atomic loop if (addr && !use_full_rdata) { const u32 offset = addr & 0x78; fmt::append(ret, "[0x%02x] %08x %08x\n", offset, data[offset / sizeof(u32)], data[offset / sizeof(u32) + 1]); // Asterisk marks the offset of data that had been given to the guest PPU code *(&ret.back() - (addr & 4 ? 9 : 18)) = '*'; } else { for (usz i = 0; i < std::size(data); i += 4) { fmt::append(ret, "[0x%02x] %08x %08x %08x %08x\n", i * sizeof(data[0]) , data[i + 0], data[i + 1], data[i + 2], data[i + 3]); } if (addr) { // See the note above *(&ret.back() - (4 - (addr % 16 / 4)) * 9 - (8 - (addr % 128 / 16)) * std::size("[0x00]"sv)) = '*'; } } } std::string ppu_thread::dump_callstack() const { std::string ret; fmt::append(ret, "Call stack:\n=========\n0x%08x (0x0) called\n", cia); for (const auto& sp : dump_callstack_list()) { // TODO: function addresses too fmt::append(ret, "> from 0x%08x (sp=0x%08x)\n", sp.first, sp.second); } return ret; } std::vector<std::pair<u32, u32>> ppu_thread::dump_callstack_list() const { //std::shared_lock rlock(vm::g_mutex); // Needs optimizations // Determine stack range const u64 r1 = gpr[1]; if (r1 > u32{umax} || r1 % 0x10) { return {}; } const u32 stack_ptr = static_cast<u32>(r1); if (!vm::check_addr(stack_ptr, vm::page_writable)) { // Normally impossible unless the code does not follow ABI rules return {}; } u32 stack_min = stack_ptr & ~0xfff; u32 stack_max = stack_min + 4096; while (stack_min && vm::check_addr(stack_min - 4096, vm::page_writable)) { stack_min -= 4096; } while (stack_max + 4096 && vm::check_addr(stack_max, vm::page_writable)) { stack_max += 4096; } std::vector<std::pair<u32, u32>> call_stack_list; bool is_first = true; bool skip_single_frame = false; const u64 _lr = this->lr; const u32 _cia = this->cia; const u64 gpr0 = this->gpr[0]; for ( u64 sp = r1; sp % 0x10 == 0u && sp >= stack_min && sp <= stack_max - ppu_stack_start_offset; is_first = false ) { auto is_invalid = [](u64 addr) { if (addr > u32{umax} || addr % 4 || !vm::check_addr(static_cast<u32>(addr), vm::page_executable)) { return true; } // Ignore HLE stop address return addr == g_fxo->get<ppu_function_manager>().func_addr(1, true); }; if (is_first && !is_invalid(_lr)) { // Detect functions with no stack or before LR has been stored // Tracking if instruction has already been passed through // Instead of using map or set, use two vectors relative to CIA and resize as needed std::vector<be_t<u32>> inst_neg; std::vector<be_t<u32>> inst_pos; auto get_inst = [&](u32 pos) -> be_t<u32>& { static be_t<u32> s_inst_empty{}; if (pos < _cia) { const u32 neg_dist = (_cia - pos - 4) / 4; if (neg_dist >= inst_neg.size()) { const u32 inst_bound = pos & -256; const usz old_size = inst_neg.size(); const usz new_size = neg_dist + (pos - inst_bound) / 4 + 1; if (new_size >= 0x8000) { // Gross lower limit for the function (if it is that size it is unlikely that it is even a leaf function) return s_inst_empty; } inst_neg.resize(new_size); if (!vm::try_access(inst_bound, &inst_neg[old_size], ::narrow<u32>((new_size - old_size) * sizeof(be_t<u32>)), false)) { // Failure (this would be detected as failure by zeroes) } // Reverse the array (because this buffer directs backwards in address) for (usz start = old_size, end = new_size - 1; start < end; start++, end--) { std::swap(inst_neg[start], inst_neg[end]); } } return inst_neg[neg_dist]; } const u32 pos_dist = (pos - _cia) / 4; if (pos_dist >= inst_pos.size()) { const u32 inst_bound = utils::align<u32>(pos, 256); const usz old_size = inst_pos.size(); const usz new_size = pos_dist + (inst_bound - pos) / 4 + 1; if (new_size >= 0x8000) { // Gross upper limit for the function (if it is that size it is unlikely that it is even a leaf function) return s_inst_empty; } inst_pos.resize(new_size); if (!vm::try_access(pos, &inst_pos[old_size], ::narrow<u32>((new_size - old_size) * sizeof(be_t<u32>)), false)) { // Failure (this would be detected as failure by zeroes) } } return inst_pos[pos_dist]; }; bool upper_abort = false; struct context_t { u32 start_point; bool maybe_leaf = false; // True if the function is leaf or at the very end/start of non-leaf bool non_leaf = false; // Absolutely not a leaf bool about_to_push_frame = false; // STDU incoming bool about_to_store_lr = false; // Link is about to be stored on stack bool about_to_pop_frame = false; // ADDI R1 is about to be issued bool about_to_load_link = false; // MTLR is about to be issued bool maybe_use_reg0_instead_of_lr = false; // Use R0 at the end of a non-leaf function if ADDI has been issued before MTLR }; // Start with CIA std::deque<context_t> workload{context_t{_cia}}; usz start = 0; for (; start < workload.size(); start++) { for (u32 wa = workload[start].start_point; vm::check_addr(wa, vm::page_executable);) { be_t<u32>& opcode = get_inst(wa); auto& [_, maybe_leaf, non_leaf, about_to_push_frame, about_to_store_lr, about_to_pop_frame, about_to_load_link, maybe_use_reg0_instead_of_lr] = workload[start]; if (!opcode) { // Already passed or failure of reading break; } const ppu_opcode_t op{opcode}; // Mark as passed through opcode = 0; const auto type = g_ppu_itype.decode(op.opcode); if (workload.size() == 1 && type == ppu_itype::STDU && op.rs == 1u && op.ra == 1u) { if (op.simm16 >= 0) { // Against ABI non_leaf = true; upper_abort = true; break; } // Saving LR to register: this is indeed a new function (ok because LR has not been saved yet) maybe_leaf = true; about_to_push_frame = true; about_to_pop_frame = false; upper_abort = true; break; } if (workload.size() == 1 && type == ppu_itype::STD && op.ra == 1u && op.rs == 0u) { bool found_matching_stdu = false; for (u32 back = 1; back < 20; back++) { be_t<u32>& opcode = get_inst(utils::sub_saturate<u32>(_cia, back * 4)); if (!opcode) { // Already passed or failure of reading break; } const ppu_opcode_t test_op{opcode}; const auto type = g_ppu_itype.decode(test_op.opcode); if (type == ppu_itype::BCLR) { break; } if (type == ppu_itype::STDU && test_op.rs == 1u && test_op.ra == 1u) { if (0 - (test_op.ds << 2) == (op.ds << 2) - 0x10) { found_matching_stdu = true; } break; } } if (found_matching_stdu) { // Saving LR to stack: this is indeed a new function (ok because LR has not been saved yet) maybe_leaf = true; about_to_store_lr = true; about_to_pop_frame = true; upper_abort = true; break; } } const u32 spr = ((op.spr >> 5) | ((op.spr & 0x1f) << 5)); // It can be placed before or after STDU, ignore for now // if (workload.size() == 1 && type == ppu_itype::MFSPR && op.rs == 0u && spr == 0x8) // { // // Saving LR to register: this is indeed a new function (ok because LR has not been saved yet) // maybe_leaf = true; // about_to_store_lr = true; // about_to_pop_frame = true; // } if (type == ppu_itype::MTSPR && spr == 0x8 && op.rs == 0u) { // Test for special case: if ADDI R1 is not found later in code, it means that LR is not restored and R0 should be used instead // Can also search for ADDI R1 backwards and pull the value from stack (needs more research if it is more reliable) maybe_use_reg0_instead_of_lr = true; } if (type == ppu_itype::UNK) { // Ignore for now break; } if ((type & ppu_itype::branch) && op.lk) { // Gave up on LR before saving non_leaf = true; about_to_pop_frame = true; upper_abort = true; break; } // Even if BCLR is conditional, it still counts because LR value is ready for return if (type == ppu_itype::BCLR) { // Returned maybe_leaf = true; upper_abort = true; break; } if (type == ppu_itype::ADDI && op.ra == 1u && op.rd == 1u) { if (op.simm16 < 0) { // Against ABI non_leaf = true; upper_abort = true; break; } else if (op.simm16 > 0) { // Remember that SP is about to be restored about_to_pop_frame = true; non_leaf = true; upper_abort = true; break; } } const auto results = op_branch_targets(wa, op); bool proceeded = false; for (usz res_i = 0; res_i < results.size(); res_i++) { const u32 route_pc = results[res_i]; if (route_pc == umax) { continue; } if (vm::check_addr(route_pc, vm::page_executable) && get_inst(route_pc)) { if (proceeded) { // Remember next route start point workload.push_back(context_t{route_pc}); } else { // Next PC wa = route_pc; proceeded = true; } } } } if (upper_abort) { break; } } const context_t& res = workload[std::min<usz>(start, workload.size() - 1)]; if (res.maybe_leaf && !res.non_leaf) { const u32 result = res.maybe_use_reg0_instead_of_lr ? static_cast<u32>(gpr0) : static_cast<u32>(_lr); // Same stack as far as we know call_stack_list.emplace_back(result, static_cast<u32>(sp)); if (res.about_to_store_lr) { // LR has yet to be stored on stack, ignore the stack value skip_single_frame = true; } } if (res.about_to_pop_frame || (res.maybe_leaf && !res.non_leaf)) { const u64 temp_sp = *vm::get_super_ptr<u64>(static_cast<u32>(sp)); if (temp_sp <= sp) { // Ensure inequality and that the old stack pointer is higher than current break; } // Read the first stack frame so caller addresses can be obtained sp = temp_sp; continue; } } u64 addr = *vm::get_super_ptr<u64>(static_cast<u32>(sp + 16)); if (skip_single_frame) { skip_single_frame = false; } else if (!is_invalid(addr)) { // TODO: function addresses too call_stack_list.emplace_back(static_cast<u32>(addr), static_cast<u32>(sp)); } else if (!is_first) { break; } const u64 temp_sp = *vm::get_super_ptr<u64>(static_cast<u32>(sp)); if (temp_sp <= sp) { // Ensure inequality and that the old stack pointer is higher than current break; } sp = temp_sp; is_first = false; } return call_stack_list; } std::string ppu_thread::dump_misc() const { std::string ret = cpu_thread::dump_misc(); if (ack_suspend) { if (ret.ends_with("\n")) { ret.pop_back(); } fmt::append(ret, " (LV2 suspended)\n"); } fmt::append(ret, "Priority: %d\n", prio.load().prio); fmt::append(ret, "Stack: 0x%x..0x%x\n", stack_addr, stack_addr + stack_size - 1); fmt::append(ret, "Joiner: %s\n", joiner.load()); if (const auto size = cmd_queue.size()) fmt::append(ret, "Commands: %u\n", size); const char* _func = current_function; if (_func) { ret += "In function: "; ret += _func; ret += '\n'; for (u32 i = 3; i <= 10; i++) if (u64 v = gpr[i]; v != syscall_args[i - 3]) fmt::append(ret, " ** r%d: 0x%llx\n", i, v); } else if (is_paused() || is_stopped()) { if (const auto last_func = last_function) { _func = last_func; ret += "Last function: "; ret += _func; ret += '\n'; } } if (const auto _time = start_time) { fmt::append(ret, "Waiting: %fs\n", (get_guest_system_time() - _time) / 1000000.); } else { ret += '\n'; } if (!_func) { ret += '\n'; } return ret; } void ppu_thread::dump_all(std::string& ret) const { cpu_thread::dump_all(ret); if (call_history.data.size() > 1) { ret += "\nCalling History:" "\n================"; fmt::append(ret, "%s", call_history); } if (syscall_history.data.size() > 1) { ret += "\nHLE/LV2 History:" "\n================"; fmt::append(ret, "%s", syscall_history); } } extern thread_local std::string(*g_tls_log_prefix)(); void ppu_thread::cpu_task() { std::fesetround(FE_TONEAREST); if (g_cfg.core.set_daz_and_ftz) { gv_set_zeroing_denormals(); } else { gv_unset_zeroing_denormals(); } // Execute cmd_queue while (cmd64 cmd = cmd_wait()) { const u32 arg = cmd.arg2<u32>(); // 32-bit arg extracted switch (auto type = cmd.arg1<ppu_cmd>()) { case ppu_cmd::opcode: { cmd_pop(), g_fxo->get<ppu_interpreter_rt>().decode(arg)(*this, {arg}, vm::_ptr<u32>(cia - 4), &ppu_ret); break; } case ppu_cmd::set_gpr: { if (arg >= 32) { fmt::throw_exception("Invalid ppu_cmd::set_gpr arg (0x%x)", arg); } gpr[arg % 32] = cmd_get(1).as<u64>(); cmd_pop(1); break; } case ppu_cmd::set_args: { if (arg > 8) { fmt::throw_exception("Unsupported ppu_cmd::set_args size (0x%x)", arg); } for (u32 i = 0; i < arg; i++) { gpr[i + 3] = cmd_get(1 + i).as<u64>(); } cmd_pop(arg); break; } case ppu_cmd::lle_call: { #ifdef __APPLE__ pthread_jit_write_protect_np(true); #endif const vm::ptr<u32> opd(arg < 32 ? vm::cast(gpr[arg]) : vm::cast(arg)); cmd_pop(), fast_call(opd[0], opd[1]); break; } case ppu_cmd::entry_call: { #ifdef __APPLE__ pthread_jit_write_protect_np(true); #endif cmd_pop(), fast_call(entry_func.addr, entry_func.rtoc, true); break; } case ppu_cmd::hle_call: { cmd_pop(), ::at32(ppu_function_manager::get(), arg)(*this, {arg}, vm::_ptr<u32>(cia - 4), &ppu_ret); break; } case ppu_cmd::opd_call: { #ifdef __APPLE__ pthread_jit_write_protect_np(true); #endif const ppu_func_opd_t opd = cmd_get(1).as<ppu_func_opd_t>(); cmd_pop(1), fast_call(opd.addr, opd.rtoc); break; } case ppu_cmd::ptr_call: { const ppu_intrp_func_t func = cmd_get(1).as<ppu_intrp_func_t>(); cmd_pop(1), func(*this, {}, vm::_ptr<u32>(cia - 4), &ppu_ret); break; } case ppu_cmd::cia_call: { loaded_from_savestate = true; cmd_pop(), fast_call(std::exchange(cia, 0), gpr[2], true); break; } case ppu_cmd::initialize: { #ifdef __APPLE__ pthread_jit_write_protect_np(false); #endif cmd_pop(); ppu_initialize(); if (Emu.IsStopped()) { return; } spu_cache::initialize(); #ifdef __APPLE__ pthread_jit_write_protect_np(true); #endif #ifdef ARCH_ARM64 // Flush all cache lines after potentially writing executable code asm("ISB"); asm("DSB ISH"); #endif // Wait until the progress dialog is closed. // We don't want to open a cell dialog while a native progress dialog is still open. while (u32 v = g_progr_ptotal) { if (Emu.IsStopped()) { return; } g_progr_ptotal.wait(v); } g_fxo->get<progress_dialog_workaround>().show_overlay_message_only = true; // Sadly we can't postpone initializing guest time because we need to run PPU threads // (the farther it's postponed, the less accuracy of guest time has been lost) Emu.FixGuestTime(); // Run SPUs waiting on a syscall (savestates related) idm::select<named_thread<spu_thread>>([&](u32, named_thread<spu_thread>& spu) { if (spu.group && spu.index == spu.group->waiter_spu_index) { if (std::exchange(spu.stop_flag_removal_protection, false)) { return; } ensure(spu.state.test_and_reset(cpu_flag::stop)); spu.state.notify_one(); } }); // Check if this is the only PPU left to initialize (savestates related) if (lv2_obj::is_scheduler_ready()) { if (Emu.IsStarting()) { Emu.FinalizeRunRequest(); } } break; } case ppu_cmd::sleep: { cmd_pop(), lv2_obj::sleep(*this); break; } case ppu_cmd::reset_stack: { cmd_pop(), gpr[1] = stack_addr + stack_size - ppu_stack_start_offset; break; } default: { fmt::throw_exception("Unknown ppu_cmd(0x%x)", static_cast<u32>(type)); } } } } void ppu_thread::cpu_sleep() { // Clear reservation raddr = 0; // Setup wait flag and memory flags to relock itself state += g_use_rtm ? cpu_flag::wait : cpu_flag::wait + cpu_flag::memory; if (auto ptr = vm::g_tls_locked) { ptr->compare_and_swap(this, nullptr); } lv2_obj::awake(this); } void ppu_thread::cpu_on_stop() { if (current_function && is_stopped()) { if (start_time) { ppu_log.warning("'%s' aborted (%fs)", current_function, (get_guest_system_time() - start_time) / 1000000.); } else { ppu_log.warning("'%s' aborted", current_function); } } current_function = {}; // TODO: More conditions if (Emu.IsStopped() && g_cfg.core.ppu_debug) { std::string ret; dump_all(ret); ppu_log.notice("thread context: %s", ret); } if (is_stopped()) { if (last_succ == 0 && last_fail == 0 && exec_bytes == 0) { perf_log.notice("PPU thread perf stats are not available."); } else { perf_log.notice("Perf stats for STCX reload: success %u, failure %u", last_succ, last_fail); perf_log.notice("Perf stats for instructions: total %u", exec_bytes / 4); } } } void ppu_thread::exec_task() { if (g_cfg.core.ppu_decoder != ppu_decoder_type::_static) { // HVContext push to allow recursion. This happens with guest callback invocations. const auto old_hv_ctx = hv_ctx; while (true) { if (state) [[unlikely]] { if (check_state()) break; } ppu_gateway(this); } // HVContext pop hv_ctx = old_hv_ctx; return; } const auto cache = vm::g_exec_addr; const auto mem_ = vm::g_base_addr; while (true) { if (test_stopped()) [[unlikely]] { return; } gv_zeroupper(); // Execute instruction (may be step; execute only one instruction if state) const auto op = reinterpret_cast<be_t<u32>*>(mem_ + u64{cia}); const auto fn = reinterpret_cast<ppu_intrp_func*>(cache + u64{cia} * 2); fn->fn(*this, {*op}, op, state ? &ppu_ret : fn + 1); } } ppu_thread::~ppu_thread() { } ppu_thread::ppu_thread(const ppu_thread_params& param, std::string_view name, u32 _prio, int detached) : cpu_thread(idm::last_id()) , stack_size(param.stack_size) , stack_addr(param.stack_addr) , joiner(detached != 0 ? ppu_join_status::detached : ppu_join_status::joinable) , entry_func(param.entry) , start_time(get_guest_system_time()) , is_interrupt_thread(detached < 0) , ppu_tname(make_single<std::string>(name)) { prio.raw().prio = _prio; memset(&hv_ctx, 0, sizeof(hv_ctx)); gpr[1] = stack_addr + stack_size - ppu_stack_start_offset; gpr[13] = param.tls_addr; if (detached >= 0) { // Initialize thread args gpr[3] = param.arg0; gpr[4] = param.arg1; } optional_savestate_state = std::make_shared<utils::serial>(); // Trigger the scheduler state += cpu_flag::suspend; if (!g_use_rtm) { state += cpu_flag::memory; } call_history.data.resize(g_cfg.core.ppu_call_history ? call_history_max_size : 1); syscall_history.data.resize(g_cfg.core.ppu_call_history ? syscall_history_max_size : 1); syscall_history.count_debug_arguments = static_cast<u32>(g_cfg.core.ppu_call_history ? std::size(syscall_history.data[0].args) : 0); #ifdef __APPLE__ pthread_jit_write_protect_np(true); #endif #ifdef ARCH_ARM64 // Flush all cache lines after potentially writing executable code asm("ISB"); asm("DSB ISH"); #endif } struct disable_precomp_t { atomic_t<bool> disable = false; }; void vdecEntry(ppu_thread& ppu, u32 vid); bool ppu_thread::savable() const { if (joiner == ppu_join_status::exited) { return false; } if (cia == g_fxo->get<ppu_function_manager>().func_addr(FIND_FUNC(vdecEntry))) { // Do not attempt to save the state of HLE VDEC threads return false; } return true; } void ppu_thread::serialize_common(utils::serial& ar) { [[maybe_unused]] const s32 version = GET_OR_USE_SERIALIZATION_VERSION(ar.is_writing(), ppu); ar(gpr, fpr, cr, fpscr.bits, lr, ctr, vrsave, cia, xer, sat, nj, prio.raw().all); if (cia % 4 || (cia >> 28) >= 0xCu) { fmt::throw_exception("Failed to serialize PPU thread ID=0x%x (cia=0x%x, ar=%s)", this->id, cia, ar); } if (ar.is_writing()) { ppu_log.notice("Saving PPU Thread [0x%x: %s]: cia=0x%x, state=%s", id, *ppu_tname.load(), cia, +state); } ar(optional_savestate_state, vr); if (!ar.is_writing()) { if (optional_savestate_state->data.empty()) { optional_savestate_state->clear(); } optional_savestate_state->set_reading_state(); } } struct save_lv2_tag { atomic_t<bool> saved = false; atomic_t<bool> loaded = false; }; ppu_thread::ppu_thread(utils::serial& ar) : cpu_thread(idm::last_id()) // last_id() is showed to constructor on serialization , stack_size(ar) , stack_addr(ar) , joiner(ar.pop<ppu_join_status>()) , entry_func(std::bit_cast<ppu_func_opd_t, u64>(ar)) , is_interrupt_thread(ar) { [[maybe_unused]] const s32 version = GET_SERIALIZATION_VERSION(ppu); struct init_pushed { bool pushed = false; atomic_t<u32> inited = false; }; call_history.data.resize(g_cfg.core.ppu_call_history ? call_history_max_size : 1); syscall_history.data.resize(g_cfg.core.ppu_call_history ? syscall_history_max_size : 1); syscall_history.count_debug_arguments = static_cast<u32>(g_cfg.core.ppu_call_history ? std::size(syscall_history.data[0].args) : 0); if (version >= 2 && !g_fxo->get<save_lv2_tag>().loaded.exchange(true)) { ar(lv2_obj::g_priority_order_tag); } if (version >= 3) { // Function and module for HLE function relocation // TODO: Use it ar.pop<std::string>(); ar.pop<std::string>(); } serialize_common(ar); // Restore jm_mask jm_mask = nj ? 0x7F800000 : 0x7fff'ffff; auto queue_intr_entry = [&]() { if (is_interrupt_thread) { void ppu_interrupt_thread_entry(ppu_thread&, ppu_opcode_t, be_t<u32>*, struct ppu_intrp_func*); cmd_list ({ { ppu_cmd::ptr_call, 0 }, std::bit_cast<u64>(&ppu_interrupt_thread_entry) }); } }; switch (const u32 status = ar.pop<u32>()) { case PPU_THREAD_STATUS_IDLE: { stop_flag_removal_protection = true; break; } case PPU_THREAD_STATUS_RUNNABLE: case PPU_THREAD_STATUS_ONPROC: { if (version >= 2) { const u32 order = ar.pop<u32>(); struct awake_pushed { bool pushed = false; shared_mutex dummy; std::map<u32, ppu_thread*> awake_ppus; }; g_fxo->get<awake_pushed>().awake_ppus[order] = this; if (!std::exchange(g_fxo->get<awake_pushed>().pushed, true)) { Emu.PostponeInitCode([this]() { u32 prev = umax; for (auto ppu : g_fxo->get<awake_pushed>().awake_ppus) { ensure(prev + 1 == ppu.first); prev = ppu.first; lv2_obj::awake(ppu.second); } g_fxo->get<awake_pushed>().awake_ppus.clear(); }); } } else { lv2_obj::awake(this); } [[fallthrough]]; } case PPU_THREAD_STATUS_SLEEP: { if (std::exchange(g_fxo->get<init_pushed>().pushed, true)) { cmd_list ({ {ppu_cmd::ptr_call, 0}, +[](ppu_thread&) -> bool { while (!Emu.IsStopped() && !g_fxo->get<init_pushed>().inited) { thread_ctrl::wait_on(g_fxo->get<init_pushed>().inited, 0); } return false; } }); } else { g_fxo->init<disable_precomp_t>(); g_fxo->get<disable_precomp_t>().disable = true; cmd_push({ppu_cmd::initialize, 0}); cmd_list ({ {ppu_cmd::ptr_call, 0}, +[](ppu_thread&) -> bool { auto& inited = g_fxo->get<init_pushed>().inited; inited = 1; inited.notify_all(); return true; } }); } if (status == PPU_THREAD_STATUS_SLEEP) { cmd_list ({ {ppu_cmd::ptr_call, 0}, +[](ppu_thread& ppu) -> bool { const u32 op = vm::read32(ppu.cia); const auto& table = g_fxo->get<ppu_interpreter_rt>(); ppu.loaded_from_savestate = true; ppu.prio.raw().preserve_bit = 1; table.decode(op)(ppu, {op}, vm::_ptr<u32>(ppu.cia), &ppu_ret); ppu.prio.raw().preserve_bit = 0; ppu.optional_savestate_state->clear(); // Reset to writing state ppu.loaded_from_savestate = false; return true; } }); lv2_obj::set_future_sleep(this); } queue_intr_entry(); cmd_push({ppu_cmd::cia_call, 0}); break; } case PPU_THREAD_STATUS_ZOMBIE: { state += cpu_flag::exit; break; } case PPU_THREAD_STATUS_STOP: { queue_intr_entry(); break; } } // Trigger the scheduler state += cpu_flag::suspend; if (!g_use_rtm) { state += cpu_flag::memory; } ppu_tname = make_single<std::string>(ar.pop<std::string>()); ppu_log.notice("Loading PPU Thread [0x%x: %s]: cia=0x%x, state=%s", id, *ppu_tname.load(), cia, +state); } void ppu_thread::save(utils::serial& ar) { USING_SERIALIZATION_VERSION(ppu); const u64 entry = std::bit_cast<u64>(entry_func); ppu_join_status _joiner = joiner; if (_joiner >= ppu_join_status::max) { // Joining thread should recover this member properly _joiner = ppu_join_status::joinable; } ar(stack_size, stack_addr, _joiner, entry, is_interrupt_thread); const bool is_null = ar.m_file_handler && ar.m_file_handler->is_null(); if (!is_null && !g_fxo->get<save_lv2_tag>().saved.exchange(true)) { ar(lv2_obj::g_priority_order_tag); } if (current_module && current_module[0]) { ar(std::string{current_module}); ar(std::string{last_function}); } else { ar(std::string{}); ar(std::string{}); } serialize_common(ar); auto [status, order] = lv2_obj::ppu_state(this, false); if (status == PPU_THREAD_STATUS_SLEEP && cpu_flag::again - state) { // Hack for sys_fs status = PPU_THREAD_STATUS_RUNNABLE; } ar(status); if (status == PPU_THREAD_STATUS_RUNNABLE || status == PPU_THREAD_STATUS_ONPROC) { ar(order); } ar(*ppu_tname.load()); } ppu_thread::thread_name_t::operator std::string() const { std::string thread_name = fmt::format("PPU[0x%x]", _this->id); if (const std::string name = *_this->ppu_tname.load(); !name.empty()) { fmt::append(thread_name, " %s", name); } return thread_name; } void ppu_thread::cmd_push(cmd64 cmd) { // Reserve queue space const u32 pos = cmd_queue.push_begin(); // Write single command cmd_queue[pos] = cmd; } void ppu_thread::cmd_list(std::initializer_list<cmd64> list) { // Reserve queue space const u32 pos = cmd_queue.push_begin(static_cast<u32>(list.size())); // Write command tail in relaxed manner for (u32 i = 1; i < list.size(); i++) { cmd_queue[pos + i].raw() = list.begin()[i]; } // Write command head after all cmd_queue[pos] = *list.begin(); } void ppu_thread::cmd_pop(u32 count) { // Get current position const u32 pos = cmd_queue.peek(); // Clean command buffer for command tail for (u32 i = 1; i <= count; i++) { cmd_queue[pos + i].raw() = cmd64{}; } // Free cmd_queue.pop_end(count + 1); } cmd64 ppu_thread::cmd_wait() { while (true) { if (cmd64 result = cmd_queue[cmd_queue.peek()].exchange(cmd64{})) { return result; } if (is_stopped()) { return {}; } thread_ctrl::wait_on(cmd_notify, 0); cmd_notify = 0; } } be_t<u64>* ppu_thread::get_stack_arg(s32 i, u64 align) { if (align != 1 && align != 2 && align != 4 && align != 8 && align != 16) fmt::throw_exception("Unsupported alignment: 0x%llx", align); return vm::_ptr<u64>(vm::cast((gpr[1] + 0x30 + 0x8 * (i - 1)) & (0 - align))); } void ppu_thread::fast_call(u32 addr, u64 rtoc, bool is_thread_entry) { const auto old_cia = cia; const auto old_rtoc = gpr[2]; const auto old_lr = lr; const auto old_func = current_function; const auto old_fmt = g_tls_log_prefix; interrupt_thread_executing = true; cia = addr; gpr[2] = rtoc; lr = g_fxo->get<ppu_function_manager>().func_addr(1, true); // HLE stop address current_function = nullptr; if (std::exchange(loaded_from_savestate, false)) { lr = old_lr; } g_tls_log_prefix = [] { const auto _this = static_cast<ppu_thread*>(get_current_cpu_thread()); static thread_local shared_ptr<std::string> name_cache; if (!_this->ppu_tname.is_equal(name_cache)) [[unlikely]] { _this->ppu_tname.peek_op([&](const shared_ptr<std::string>& ptr) { if (ptr != name_cache) { name_cache = ptr; } }); } const auto cia = _this->cia; if (_this->current_function && g_fxo->get<ppu_function_manager>().is_func(cia)) { return fmt::format("PPU[0x%x] Thread (%s) [HLE:0x%08x, LR:0x%08x]", _this->id, *name_cache.get(), cia, _this->lr); } extern const char* get_prx_name_by_cia(u32 addr); if (auto name = get_prx_name_by_cia(cia)) { return fmt::format("PPU[0x%x] Thread (%s) [%s: 0x%08x]", _this->id, *name_cache.get(), name, cia); } return fmt::format("PPU[0x%x] Thread (%s) [0x%08x]", _this->id, *name_cache.get(), cia); }; auto at_ret = [&]() { if (old_cia) { if (state & cpu_flag::again) { ppu_log.error("HLE callstack savestate is not implemented!"); } cia = old_cia; gpr[2] = old_rtoc; lr = old_lr; } else if (state & cpu_flag::ret && cia == g_fxo->get<ppu_function_manager>().func_addr(1, true) + 4 && is_thread_entry) { std::string ret; dump_all(ret); ppu_log.error("Returning from the thread entry function! (func=0x%x)", entry_func.addr); ppu_log.notice("Thread context: %s", ret); lv2_obj::sleep(*this); // For savestates state += cpu_flag::again; std::memcpy(syscall_args, &gpr[3], sizeof(syscall_args)); } if (!old_cia && state & cpu_flag::again) { // Fixup argument registers and CIA for reloading std::memcpy(&gpr[3], syscall_args, sizeof(syscall_args)); cia -= 4; } current_function = old_func; g_tls_log_prefix = old_fmt; state -= cpu_flag::ret; }; exec_task(); at_ret(); } std::pair<vm::addr_t, u32> ppu_thread::stack_push(u32 size, u32 align_v) { if (auto cpu = get_current_cpu_thread<ppu_thread>()) { ppu_thread& context = static_cast<ppu_thread&>(*cpu); const u32 old_pos = vm::cast(context.gpr[1]); context.gpr[1] -= size; // room minimal possible size context.gpr[1] &= ~(u64{align_v} - 1); // fix stack alignment auto is_stack = [&](u64 addr) { return addr >= context.stack_addr && addr < context.stack_addr + context.stack_size; }; // TODO: This check does not care about custom stack memory if (is_stack(old_pos) != is_stack(context.gpr[1])) { fmt::throw_exception("Stack overflow (size=0x%x, align=0x%x, SP=0x%llx, stack=*0x%x)", size, align_v, old_pos, context.stack_addr); } else { const u32 addr = static_cast<u32>(context.gpr[1]); std::memset(vm::base(addr), 0, size); return {vm::cast(addr), old_pos - addr}; } } fmt::throw_exception("Invalid thread"); } void ppu_thread::stack_pop_verbose(u32 addr, u32 size) noexcept { if (auto cpu = get_current_cpu_thread<ppu_thread>()) { ppu_thread& context = static_cast<ppu_thread&>(*cpu); if (context.gpr[1] != addr) { ppu_log.error("Stack inconsistency (addr=0x%x, SP=0x%llx, size=0x%x)", addr, context.gpr[1], size); return; } context.gpr[1] += size; return; } ppu_log.error("Invalid thread"); } extern ppu_intrp_func_t ppu_get_syscall(u64 code); void ppu_trap(ppu_thread& ppu, u64 addr) { ensure((addr & (~u64{0xffff'ffff} | 0x3)) == 0); ppu.cia = static_cast<u32>(addr); u32 add = static_cast<u32>(g_cfg.core.stub_ppu_traps) * 4; // If stubbing is enabled, check current instruction and the following if (!add || !vm::check_addr(ppu.cia, vm::page_executable) || !vm::check_addr(ppu.cia + add, vm::page_executable)) { fmt::throw_exception("PPU Trap! Sometimes tweaking the setting \"Stub PPU Traps\" can be a workaround to this crash.\nBest values depend on game code, if unsure try 1."); } ppu_log.error("PPU Trap: Stubbing %d instructions %s.", std::abs(static_cast<s32>(add) / 4), add >> 31 ? "backwards" : "forwards"); ppu.cia += add; // Skip instructions, hope for valid code (interprter may be invoked temporarily) } static void ppu_error(ppu_thread& ppu, u64 addr, u32 /*op*/) { ppu.cia = ::narrow<u32>(addr); ppu_recompiler_fallback(ppu); } static void ppu_check(ppu_thread& ppu, u64 addr) { ppu.cia = ::narrow<u32>(addr); if (ppu.test_stopped()) { return; } } static void ppu_trace(u64 addr) { ppu_log.notice("Trace: 0x%llx", addr); } template <typename T> static T ppu_load_acquire_reservation(ppu_thread& ppu, u32 addr) { perf_meter<"LARX"_u32> perf0; // Do not allow stores accessed from the same cache line to past reservation load atomic_fence_seq_cst(); if (addr % sizeof(T)) { fmt::throw_exception("PPU %s: Unaligned address: 0x%08x", sizeof(T) == 4 ? "LWARX" : "LDARX", addr); } // Always load aligned 64-bit value auto& data = vm::_ref<const atomic_be_t<u64>>(addr & -8); const u64 size_off = (sizeof(T) * 8) & 63; const u64 data_off = (addr & 7) * 8; ppu.raddr = addr; u32 addr_mask = -1; if (const s32 max = g_cfg.core.ppu_128_reservations_loop_max_length) { // If we use it in HLE it means we want the accurate version ppu.use_full_rdata = max < 0 || ppu.current_function || [&]() { const u32 cia = ppu.cia; if ((cia & 0xffff) >= 0x10000u - max * 4) { // Do not cross 64k boundary return false; } const auto inst = vm::_ptr<const nse_t<u32>>(cia); // Search for STWCX or STDCX nearby (LDARX-STWCX and LWARX-STDCX loops will use accurate 128-byte reservations) constexpr u32 store_cond = stx::se_storage<u32>::swap(sizeof(T) == 8 ? 0x7C00012D : 0x7C0001AD); constexpr u32 mask = stx::se_storage<u32>::swap(0xFC0007FF); const auto store_vec = v128::from32p(store_cond); const auto mask_vec = v128::from32p(mask); s32 i = 2; for (const s32 _max = max - 3; i < _max; i += 4) { const auto _inst = v128::loadu(inst + i) & mask_vec; if (!gv_testz(gv_eq32(_inst, store_vec))) { return false; } } for (; i < max; i++) { const u32 val = inst[i] & mask; if (val == store_cond) { return false; } } return true; }(); if (ppu.use_full_rdata) { addr_mask = -128; } } else { ppu.use_full_rdata = false; } if (ppu_log.trace && (addr & addr_mask) == (ppu.last_faddr & addr_mask)) { ppu_log.trace(u8"LARX after fail: addr=0x%x, faddr=0x%x, time=%u c", addr, ppu.last_faddr, (perf0.get() - ppu.last_ftsc)); } if ((addr & addr_mask) == (ppu.last_faddr & addr_mask) && (perf0.get() - ppu.last_ftsc) < 600 && (vm::reservation_acquire(addr) & -128) == ppu.last_ftime) { be_t<u64> rdata; std::memcpy(&rdata, &ppu.rdata[addr & 0x78], 8); if (rdata == data.load()) { ppu.rtime = ppu.last_ftime; ppu.raddr = ppu.last_faddr; ppu.last_ftime = 0; return static_cast<T>(rdata << data_off >> size_off); } ppu.last_fail++; ppu.last_faddr = 0; } else { // Silent failure ppu.last_faddr = 0; } ppu.rtime = vm::reservation_acquire(addr) & -128; be_t<u64> rdata; if (!ppu.use_full_rdata) { rdata = data.load(); // Store only 64 bits of reservation data std::memcpy(&ppu.rdata[addr & 0x78], &rdata, 8); } else { mov_rdata(ppu.rdata, vm::_ref<spu_rdata_t>(addr & -128)); atomic_fence_acquire(); // Load relevant 64 bits of reservation data std::memcpy(&rdata, &ppu.rdata[addr & 0x78], 8); } return static_cast<T>(rdata << data_off >> size_off); } extern u32 ppu_lwarx(ppu_thread& ppu, u32 addr) { return ppu_load_acquire_reservation<u32>(ppu, addr); } extern u64 ppu_ldarx(ppu_thread& ppu, u32 addr) { return ppu_load_acquire_reservation<u64>(ppu, addr); } const auto ppu_stcx_accurate_tx = build_function_asm<u64(*)(u32 raddr, u64 rtime, const void* _old, u64 _new)>("ppu_stcx_accurate_tx", [](native_asm& c, auto& args) { using namespace asmjit; #if defined(ARCH_X64) Label fall = c.newLabel(); Label fail = c.newLabel(); Label _ret = c.newLabel(); Label load = c.newLabel(); //if (utils::has_avx() && !s_tsx_avx) //{ // c.vzeroupper(); //} // Create stack frame if necessary (Windows ABI has only 6 volatile vector registers) c.push(x86::rbp); c.push(x86::r14); c.sub(x86::rsp, 40); #ifdef _WIN32 if (!s_tsx_avx) { c.movups(x86::oword_ptr(x86::rsp, 0), x86::xmm6); c.movups(x86::oword_ptr(x86::rsp, 16), x86::xmm7); } #endif // Prepare registers build_swap_rdx_with(c, args, x86::r10); c.mov(x86::rbp, x86::qword_ptr(reinterpret_cast<u64>(&vm::g_sudo_addr))); c.lea(x86::rbp, x86::qword_ptr(x86::rbp, args[0])); c.and_(x86::rbp, -128); c.prefetchw(x86::byte_ptr(x86::rbp, 0)); c.prefetchw(x86::byte_ptr(x86::rbp, 64)); c.movzx(args[0].r32(), args[0].r16()); c.shr(args[0].r32(), 1); c.lea(x86::r11, x86::qword_ptr(reinterpret_cast<u64>(+vm::g_reservations), args[0])); c.and_(x86::r11, -128 / 2); c.and_(args[0].r32(), 63); // Prepare data if (s_tsx_avx) { c.vmovups(x86::ymm0, x86::ymmword_ptr(args[2], 0)); c.vmovups(x86::ymm1, x86::ymmword_ptr(args[2], 32)); c.vmovups(x86::ymm2, x86::ymmword_ptr(args[2], 64)); c.vmovups(x86::ymm3, x86::ymmword_ptr(args[2], 96)); } else { c.movaps(x86::xmm0, x86::oword_ptr(args[2], 0)); c.movaps(x86::xmm1, x86::oword_ptr(args[2], 16)); c.movaps(x86::xmm2, x86::oword_ptr(args[2], 32)); c.movaps(x86::xmm3, x86::oword_ptr(args[2], 48)); c.movaps(x86::xmm4, x86::oword_ptr(args[2], 64)); c.movaps(x86::xmm5, x86::oword_ptr(args[2], 80)); c.movaps(x86::xmm6, x86::oword_ptr(args[2], 96)); c.movaps(x86::xmm7, x86::oword_ptr(args[2], 112)); } // Alloc r14 to stamp0 const auto stamp0 = x86::r14; build_get_tsc(c, stamp0); Label fail2 = c.newLabel(); Label tx1 = build_transaction_enter(c, fall, [&]() { build_get_tsc(c); c.sub(x86::rax, stamp0); c.cmp(x86::rax, x86::qword_ptr(reinterpret_cast<u64>(&g_rtm_tx_limit2))); c.jae(fall); }); // Check pause flag c.bt(x86::dword_ptr(args[2], ::offset32(&ppu_thread::state) - ::offset32(&ppu_thread::rdata)), static_cast<u32>(cpu_flag::pause)); c.jc(fall); c.xbegin(tx1); if (s_tsx_avx) { c.vxorps(x86::ymm0, x86::ymm0, x86::ymmword_ptr(x86::rbp, 0)); c.vxorps(x86::ymm1, x86::ymm1, x86::ymmword_ptr(x86::rbp, 32)); c.vxorps(x86::ymm2, x86::ymm2, x86::ymmword_ptr(x86::rbp, 64)); c.vxorps(x86::ymm3, x86::ymm3, x86::ymmword_ptr(x86::rbp, 96)); c.vorps(x86::ymm0, x86::ymm0, x86::ymm1); c.vorps(x86::ymm1, x86::ymm2, x86::ymm3); c.vorps(x86::ymm0, x86::ymm1, x86::ymm0); c.vptest(x86::ymm0, x86::ymm0); } else { c.xorps(x86::xmm0, x86::oword_ptr(x86::rbp, 0)); c.xorps(x86::xmm1, x86::oword_ptr(x86::rbp, 16)); c.xorps(x86::xmm2, x86::oword_ptr(x86::rbp, 32)); c.xorps(x86::xmm3, x86::oword_ptr(x86::rbp, 48)); c.xorps(x86::xmm4, x86::oword_ptr(x86::rbp, 64)); c.xorps(x86::xmm5, x86::oword_ptr(x86::rbp, 80)); c.xorps(x86::xmm6, x86::oword_ptr(x86::rbp, 96)); c.xorps(x86::xmm7, x86::oword_ptr(x86::rbp, 112)); c.orps(x86::xmm0, x86::xmm1); c.orps(x86::xmm2, x86::xmm3); c.orps(x86::xmm4, x86::xmm5); c.orps(x86::xmm6, x86::xmm7); c.orps(x86::xmm0, x86::xmm2); c.orps(x86::xmm4, x86::xmm6); c.orps(x86::xmm0, x86::xmm4); c.ptest(x86::xmm0, x86::xmm0); } c.jnz(fail); // Store 8 bytes c.mov(x86::qword_ptr(x86::rbp, args[0], 1, 0), args[3]); c.xend(); c.lock().add(x86::qword_ptr(x86::r11), 64); build_get_tsc(c); c.sub(x86::rax, stamp0); c.jmp(_ret); // XABORT is expensive so try to finish with xend instead c.bind(fail); // Load old data to store back in rdata if (s_tsx_avx) { c.vmovaps(x86::ymm0, x86::ymmword_ptr(x86::rbp, 0)); c.vmovaps(x86::ymm1, x86::ymmword_ptr(x86::rbp, 32)); c.vmovaps(x86::ymm2, x86::ymmword_ptr(x86::rbp, 64)); c.vmovaps(x86::ymm3, x86::ymmword_ptr(x86::rbp, 96)); } else { c.movaps(x86::xmm0, x86::oword_ptr(x86::rbp, 0)); c.movaps(x86::xmm1, x86::oword_ptr(x86::rbp, 16)); c.movaps(x86::xmm2, x86::oword_ptr(x86::rbp, 32)); c.movaps(x86::xmm3, x86::oword_ptr(x86::rbp, 48)); c.movaps(x86::xmm4, x86::oword_ptr(x86::rbp, 64)); c.movaps(x86::xmm5, x86::oword_ptr(x86::rbp, 80)); c.movaps(x86::xmm6, x86::oword_ptr(x86::rbp, 96)); c.movaps(x86::xmm7, x86::oword_ptr(x86::rbp, 112)); } c.xend(); c.jmp(fail2); c.bind(fall); c.mov(x86::rax, -1); c.jmp(_ret); c.bind(fail2); c.lock().sub(x86::qword_ptr(x86::r11), 64); c.bind(load); // Store previous data back to rdata if (s_tsx_avx) { c.vmovaps(x86::ymmword_ptr(args[2], 0), x86::ymm0); c.vmovaps(x86::ymmword_ptr(args[2], 32), x86::ymm1); c.vmovaps(x86::ymmword_ptr(args[2], 64), x86::ymm2); c.vmovaps(x86::ymmword_ptr(args[2], 96), x86::ymm3); } else { c.movaps(x86::oword_ptr(args[2], 0), x86::xmm0); c.movaps(x86::oword_ptr(args[2], 16), x86::xmm1); c.movaps(x86::oword_ptr(args[2], 32), x86::xmm2); c.movaps(x86::oword_ptr(args[2], 48), x86::xmm3); c.movaps(x86::oword_ptr(args[2], 64), x86::xmm4); c.movaps(x86::oword_ptr(args[2], 80), x86::xmm5); c.movaps(x86::oword_ptr(args[2], 96), x86::xmm6); c.movaps(x86::oword_ptr(args[2], 112), x86::xmm7); } c.mov(x86::rax, -1); c.mov(x86::qword_ptr(args[2], ::offset32(&ppu_thread::last_ftime) - ::offset32(&ppu_thread::rdata)), x86::rax); c.xor_(x86::eax, x86::eax); //c.jmp(_ret); c.bind(_ret); #ifdef _WIN32 if (!s_tsx_avx) { c.vmovups(x86::xmm6, x86::oword_ptr(x86::rsp, 0)); c.vmovups(x86::xmm7, x86::oword_ptr(x86::rsp, 16)); } #endif if (s_tsx_avx) { c.vzeroupper(); } c.add(x86::rsp, 40); c.pop(x86::r14); c.pop(x86::rbp); maybe_flush_lbr(c); c.ret(); #else UNUSED(args); // Unimplemented should fail. c.brk(Imm(0x42)); c.ret(a64::x30); #endif }); template <typename T> static bool ppu_store_reservation(ppu_thread& ppu, u32 addr, u64 reg_value) { perf_meter<"STCX"_u32> perf0; if (addr % sizeof(T)) { fmt::throw_exception("PPU %s: Unaligned address: 0x%08x", sizeof(T) == 4 ? "STWCX" : "STDCX", addr); } auto& data = vm::_ref<atomic_be_t<u64>>(addr & -8); auto& res = vm::reservation_acquire(addr); const u64 rtime = ppu.rtime; be_t<u64> old_data = 0; std::memcpy(&old_data, &ppu.rdata[addr & 0x78], sizeof(old_data)); be_t<u64> new_data = old_data; if constexpr (sizeof(T) == sizeof(u32)) { // Rebuild reg_value to be 32-bits of new data and 32-bits of old data const be_t<u32> reg32 = static_cast<u32>(reg_value); std::memcpy(reinterpret_cast<char*>(&new_data) + (addr & 4), &reg32, sizeof(u32)); } else { new_data = reg_value; } // Test if store address is on the same aligned 8-bytes memory as load if (const u32 raddr = std::exchange(ppu.raddr, 0); raddr / 8 != addr / 8) { // If not and it is on the same aligned 128-byte memory, proceed only if 128-byte reservations are enabled // In realhw the store address can be at any address of the 128-byte cache line if (raddr / 128 != addr / 128 || !ppu.use_full_rdata) { // Even when the reservation address does not match the target address must be valid if (!vm::check_addr(addr, vm::page_writable)) { // Access violate data += 0; } return false; } } if (old_data != data || rtime != (res & -128)) { return false; } if ([&]() { if (ppu.use_full_rdata) [[unlikely]] { auto [_oldd, _ok] = res.fetch_op([&](u64& r) { if ((r & -128) != rtime || (r & 127)) { return false; } r += vm::rsrv_unique_lock; return true; }); if (!_ok) { // Already locked or updated: give up return false; } if (g_use_rtm) [[likely]] { switch (u64 count = ppu_stcx_accurate_tx(addr & -8, rtime, ppu.rdata, std::bit_cast<u64>(new_data))) { case umax: { auto& all_data = *vm::get_super_ptr<spu_rdata_t>(addr & -128); auto& sdata = *vm::get_super_ptr<atomic_be_t<u64>>(addr & -8); const bool ok = cpu_thread::suspend_all<+3>(&ppu, {all_data, all_data + 64, &res}, [&] { if ((res & -128) == rtime && cmp_rdata(ppu.rdata, all_data)) { sdata.release(new_data); res += 64; return true; } mov_rdata_nt(ppu.rdata, all_data); res -= 64; return false; }); if (ok) { break; } ppu.last_ftime = -1; [[fallthrough]]; } case 0: { if (ppu.last_faddr == addr) { ppu.last_fail++; } if (ppu.last_ftime != umax) { ppu.last_faddr = 0; return false; } utils::prefetch_read(ppu.rdata); utils::prefetch_read(ppu.rdata + 64); ppu.last_faddr = addr; ppu.last_ftime = res.load() & -128; ppu.last_ftsc = utils::get_tsc(); return false; } default: { if (count > 20000 && g_cfg.core.perf_report) [[unlikely]] { perf_log.warning(u8"STCX: took too long: %.3fµs (%u c)", count / (utils::get_tsc_freq() / 1000'000.), count); } break; } } if (ppu.last_faddr == addr) { ppu.last_succ++; } ppu.last_faddr = 0; return true; } // Align address: we do not need the lower 7 bits anymore addr &= -128; // Cache line data //auto& cline_data = vm::_ref<spu_rdata_t>(addr); data += 0; auto range_lock = vm::alloc_range_lock(); bool success = false; { rsx::reservation_lock rsx_lock(addr, 128); auto& super_data = *vm::get_super_ptr<spu_rdata_t>(addr); success = [&]() { // Full lock (heavyweight) // TODO: vm::check_addr vm::writer_lock lock(addr, range_lock); if (cmp_rdata(ppu.rdata, super_data)) { data.release(new_data); res += 64; return true; } res -= 64; return false; }(); } vm::free_range_lock(range_lock); return success; } if (new_data == old_data) { ppu.last_faddr = 0; return res.compare_and_swap_test(rtime, rtime + 128); } // Aligned 8-byte reservations will be used here addr &= -8; const u64 lock_bits = vm::rsrv_unique_lock; auto [_oldd, _ok] = res.fetch_op([&](u64& r) { if ((r & -128) != rtime || (r & 127)) { return false; } r += lock_bits; return true; }); // Give up if reservation has been locked or updated if (!_ok) { ppu.last_faddr = 0; return false; } // Store previous value in old_data on failure if (data.compare_exchange(old_data, new_data)) { res += 128 - lock_bits; return true; } const u64 old_rtime = res.fetch_sub(lock_bits); // TODO: disabled with this setting on, since it's dangerous to mix if (!g_cfg.core.ppu_128_reservations_loop_max_length) { // Store old_data on failure if (ppu.last_faddr == addr) { ppu.last_fail++; } ppu.last_faddr = addr; ppu.last_ftime = old_rtime & -128; ppu.last_ftsc = utils::get_tsc(); std::memcpy(&ppu.rdata[addr & 0x78], &old_data, 8); } return false; }()) { extern atomic_t<u32> liblv2_begin, liblv2_end; // Avoid notifications from lwmutex or sys_spinlock if (new_data != old_data && (ppu.cia < liblv2_begin || ppu.cia >= liblv2_end)) { const u32 notify = ppu.res_notify; if (notify) { bool notified = false; if (ppu.res_notify_time == (vm::reservation_acquire(notify) & -128)) { ppu.state += cpu_flag::wait; vm::reservation_notifier_notify(notify); notified = true; } if (vm::reservation_notifier_count(addr)) { if (!notified) { ppu.res_notify = addr; ppu.res_notify_time = rtime + 128; } else if ((addr ^ notify) & -128) { vm::reservation_notifier_notify(addr); ppu.res_notify = 0; } } else { ppu.res_notify = 0; } static_cast<void>(ppu.test_stopped()); } else { // Try to postpone notification to when PPU is asleep or join notifications on the same address // This also optimizes a mutex - won't notify after lock is aqcuired (prolonging the critical section duration), only notifies on unlock if (vm::reservation_notifier_count(addr)) { ppu.res_notify = addr; ppu.res_notify_time = rtime + 128; } } } if (addr == ppu.last_faddr) { ppu.last_succ++; } ppu.last_faddr = 0; return true; } const u32 notify = ppu.res_notify; // Do not risk postponing too much (because this is probably an indefinite loop) // And on failure it has some time to do something else if (notify && ((addr ^ notify) & -128)) { if (ppu.res_notify_time == (vm::reservation_acquire(notify) & -128)) { ppu.state += cpu_flag::wait; vm::reservation_notifier_notify(notify); static_cast<void>(ppu.test_stopped()); } ppu.res_notify = 0; } return false; } extern bool ppu_stwcx(ppu_thread& ppu, u32 addr, u32 reg_value) { return ppu_store_reservation<u32>(ppu, addr, reg_value); } extern bool ppu_stdcx(ppu_thread& ppu, u32 addr, u64 reg_value) { return ppu_store_reservation<u64>(ppu, addr, reg_value); } struct jit_core_allocator { const s16 thread_count = g_cfg.core.llvm_threads ? std::min<s32>(g_cfg.core.llvm_threads, limit()) : limit(); // Initialize global semaphore with the max number of threads ::semaphore<0x7fff> sem{std::max<s16>(thread_count, 1)}; static s16 limit() { return static_cast<s16>(std::min<s32>(0x7fff, utils::get_thread_count())); } }; #ifdef LLVM_AVAILABLE namespace { // Compiled PPU module info struct jit_module { void(*symbol_resolver)(u8*, u64) = nullptr; std::shared_ptr<jit_compiler> pjit; bool init = false; }; struct jit_module_manager { struct bucket_t { shared_mutex mutex; std::unordered_map<std::string, jit_module> map; }; std::array<bucket_t, 30> buckets; bucket_t& get_bucket(std::string_view sv) { return buckets[std::hash<std::string_view>()(sv) % std::size(buckets)]; } jit_module& get(const std::string& name) { bucket_t& bucket = get_bucket(name); std::lock_guard lock(bucket.mutex); return bucket.map.emplace(name, jit_module{}).first->second; } void remove(const std::string& name) noexcept { bucket_t& bucket = get_bucket(name); jit_module to_destroy{}; std::lock_guard lock(bucket.mutex); const auto found = bucket.map.find(name); if (found == bucket.map.end()) [[unlikely]] { ppu_log.error("Failed to remove module %s", name); for (auto& buck : buckets) { for (auto& mod : buck.map) { ppu_log.notice("But there is module %s", mod.first); } } return; } to_destroy.pjit = std::move(found->second.pjit); bucket.map.erase(found); } }; } #endif namespace { // Read-only file view starting with specified offset (for MSELF) struct file_view : fs::file_base { const fs::file m_storage; const fs::file& m_file; const u64 m_off; const u64 m_max_size; u64 m_pos; explicit file_view(const fs::file& _file, u64 offset, u64 max_size) noexcept : m_storage(fs::file()) , m_file(_file) , m_off(offset) , m_max_size(max_size) , m_pos(0) { } explicit file_view(fs::file&& _file, u64 offset, u64 max_size) noexcept : m_storage(std::move(_file)) , m_file(m_storage) , m_off(offset) , m_max_size(max_size) , m_pos(0) { } ~file_view() override { } fs::stat_t get_stat() override { fs::stat_t stat = m_file.get_stat(); stat.size = std::min<u64>(utils::sub_saturate<u64>(stat.size, m_off), m_max_size); stat.is_writable = false; return stat; } bool trunc(u64) override { return false; } u64 read(void* buffer, u64 size) override { const u64 result = file_view::read_at(m_pos, buffer, size); m_pos += result; return result; } u64 read_at(u64 offset, void* buffer, u64 size) override { return m_file.read_at(offset + m_off, buffer, std::min<u64>(size, utils::sub_saturate<u64>(m_max_size, offset))); } u64 write(const void*, u64) override { return 0; } u64 seek(s64 offset, fs::seek_mode whence) override { const s64 new_pos = whence == fs::seek_set ? offset : whence == fs::seek_cur ? offset + m_pos : whence == fs::seek_end ? offset + size() : -1; if (new_pos < 0) { fs::g_tls_error = fs::error::inval; return -1; } m_pos = new_pos; return m_pos; } u64 size() override { return std::min<u64>(utils::sub_saturate<u64>(m_file.size(), m_off), m_max_size); } }; } extern fs::file make_file_view(const fs::file& _file, u64 offset, u64 max_size = umax) { fs::file file; file.reset(std::make_unique<file_view>(_file, offset, max_size)); return file; } extern fs::file make_file_view(fs::file&& _file, u64 offset, u64 max_size = umax) { fs::file file; file.reset(std::make_unique<file_view>(std::move(_file), offset, max_size)); return file; } extern void ppu_finalize(const ppu_module& info, bool force_mem_release) { if (info.segs.empty()) { // HLEd modules return; } if (!force_mem_release && info.name.empty()) { // Don't remove main module from memory return; } if (!force_mem_release && Emu.GetCat() == "1P") { return; } const bool may_be_elf = fmt::to_lower(info.path.substr(std::max<usz>(info.path.size(), 3) - 3)) != "prx"; const std::string dev_flash = vfs::get("/dev_flash/"); if (!may_be_elf) { if (!force_mem_release && info.path.starts_with(dev_flash + "sys/external/")) { // Don't remove dev_flash prx from memory return; } } if (g_cfg.core.ppu_decoder != ppu_decoder_type::llvm) { return; } // Get cache path for this executable std::string cache_path = rpcs3::utils::get_cache_dir(info.path); // Add PPU hash and filename fmt::append(cache_path, "ppu-%s-%s/", fmt::base57(info.sha1), info.path.substr(info.path.find_last_of('/') + 1)); #ifdef LLVM_AVAILABLE g_fxo->get<jit_module_manager>().remove(cache_path + "_" + std::to_string(std::bit_cast<usz>(info.segs[0].ptr))); #endif } extern void ppu_precompile(std::vector<std::string>& dir_queue, std::vector<ppu_module*>* loaded_modules) { if (g_cfg.core.ppu_decoder != ppu_decoder_type::llvm) { return; } if (auto dis = g_fxo->try_get<disable_precomp_t>(); dis && dis->disable) { return; } std::optional<scoped_progress_dialog> progress_dialog(std::in_place, get_localized_string(localized_string_id::PROGRESS_DIALOG_SCANNING_PPU_EXECUTABLE)); // Make sure we only have one '/' at the end and remove duplicates. for (std::string& dir : dir_queue) { while (dir.back() == '/' || dir.back() == '\\') dir.pop_back(); dir += '/'; } std::sort(dir_queue.begin(), dir_queue.end()); dir_queue.erase(std::unique(dir_queue.begin(), dir_queue.end()), dir_queue.end()); const std::string firmware_sprx_path = vfs::get("/dev_flash/sys/external/"); struct file_info { std::string path; u64 offset; u64 file_size; file_info() noexcept = default; file_info(std::string _path, u64 offs, u64 size) noexcept : path(std::move(_path)) , offset(offs) , file_size(size) { } }; std::vector<file_info> file_queue; file_queue.reserve(2000); // Find all .sprx files recursively for (usz i = 0; i < dir_queue.size(); i++) { if (Emu.IsStopped()) { file_queue.clear(); break; } ppu_log.notice("Scanning directory: %s", dir_queue[i]); for (auto&& entry : fs::dir(dir_queue[i])) { if (Emu.IsStopped()) { file_queue.clear(); break; } if (entry.is_directory) { if (entry.name != "." && entry.name != "..") { dir_queue.emplace_back(dir_queue[i] + entry.name + '/'); } continue; } // SCE header size if (entry.size <= 0x20) { continue; } std::string upper = fmt::to_upper(entry.name); // Skip already loaded modules or HLEd ones auto is_ignored = [&](s64 /*offset*/) -> bool { if (dir_queue[i] != firmware_sprx_path) { return false; } if (loaded_modules) { if (std::any_of(loaded_modules->begin(), loaded_modules->end(), [&](ppu_module* obj) { return obj->name == entry.name; })) { return true; } } if (g_cfg.core.libraries_control.get_set().count(entry.name + ":lle")) { // Force LLE return false; } else if (g_cfg.core.libraries_control.get_set().count(entry.name + ":hle")) { // Force HLE return true; } extern const std::map<std::string_view, int> g_prx_list; // Use list return g_prx_list.count(entry.name) && ::at32(g_prx_list, entry.name) != 0; }; // Check PRX filename if (upper.ends_with(".PRX") || (upper.ends_with(".SPRX") && entry.name != "libfs_utility_init.sprx"sv)) { if (is_ignored(0)) { continue; } // Get full path file_queue.emplace_back(dir_queue[i] + entry.name, 0, entry.size); continue; } // Check ELF filename if ((upper.ends_with(".ELF") || upper.ends_with(".SELF")) && Emu.GetBoot() != dir_queue[i] + entry.name) { // Get full path file_queue.emplace_back(dir_queue[i] + entry.name, 0, entry.size); continue; } // Check .mself filename if (upper.ends_with(".MSELF")) { if (fs::file mself{dir_queue[i] + entry.name}) { mself_header hdr{}; if (mself.read(hdr) && hdr.get_count(mself.size())) { for (u32 j = 0; j < hdr.count; j++) { mself_record rec{}; std::set<u64> offs; if (mself.read(rec) && rec.get_pos(mself.size())) { if (rec.size <= 0x20) { continue; } if (!offs.emplace(rec.off).second) { // Duplicate continue; } // Read characters safely std::string name(sizeof(rec.name), '\0'); std::memcpy(name.data(), rec.name, name.size()); name = std::string(name.c_str()); upper = fmt::to_upper(name); if (upper.find(".SPRX") != umax || upper.find(".PRX") != umax) { // .sprx inside .mself found file_queue.emplace_back(dir_queue[i] + entry.name, rec.off, rec.size); continue; } if (upper.find(".SELF") != umax || upper.find(".ELF") != umax) { // .self inside .mself found file_queue.emplace_back(dir_queue[i] + entry.name, rec.off, rec.size); continue; } } else { ppu_log.error("MSELF file is possibly truncated"); break; } } } } } } } g_progr_ftotal += ::size32(file_queue); u64 total_files_size = 0; for (const file_info& info : file_queue) { total_files_size += info.file_size; } g_progr_ftotal_bits += total_files_size; *progress_dialog = get_localized_string(localized_string_id::PROGRESS_DIALOG_COMPILING_PPU_MODULES); atomic_t<usz> fnext = 0; lf_queue<file_info> possible_exec_file_paths; // Allow to allocate 2000 times the size of each file for the use of LLVM // This works very nicely with Metal Gear Solid 4 for example: // 2 7MB overlay files -> 14GB // The growth in memory requirements of LLVM is not linear with file size of course // But these estimates should hopefully protect RPCS3 in the coming years // Especially when thread count is on the rise with each CPU generation atomic_t<u32> file_size_limit = static_cast<u32>(std::clamp<u64>(utils::aligned_div<u64>(utils::get_total_memory(), 2000), 65536, u32{umax})); const u32 software_thread_limit = std::min<u32>(g_cfg.core.llvm_threads ? g_cfg.core.llvm_threads : u32{umax}, ::size32(file_queue)); const u32 cpu_thread_limit = utils::get_thread_count() > 8u ? std::max<u32>(utils::get_thread_count(), 2) - 1 : utils::get_thread_count(); // One LLVM thread less named_thread_group workers("SPRX Worker ", std::min<u32>(software_thread_limit, cpu_thread_limit), [&] { #ifdef __APPLE__ pthread_jit_write_protect_np(false); #endif // Set low priority thread_ctrl::scoped_priority low_prio(-1); u32 inc_fdone = 1; u32 restore_mem = 0; for (usz func_i = fnext++; func_i < file_queue.size(); func_i = fnext++, g_progr_fdone += std::exchange(inc_fdone, 1)) { if (Emu.IsStopped()) { continue; } if (restore_mem) { if (!file_size_limit.fetch_add(restore_mem)) { file_size_limit.notify_all(); } restore_mem = 0; } auto& [path, offset, file_size] = file_queue[func_i]; ppu_log.notice("Trying to load: %s", path); // Load MSELF, SPRX or SELF fs::file src{path}; if (!src) { ppu_log.error("Failed to open '%s' (%s)", path, fs::g_tls_error); continue; } if (u64 off = offset) { // Adjust offset for MSELF src = make_file_view(std::move(src), offset, file_size); // Adjust path for MSELF too fmt::append(path, "_x%x", off); } // Some files may fail to decrypt due to the lack of klic src = decrypt_self(std::move(src)); if (!src) { ppu_log.notice("Failed to decrypt '%s'", path); continue; } auto wait_for_memory = [&]() -> bool { // Try not to process too many files at once because it seems to reduce performance and cause RAM shortages // Concurrently compiling more OVL or huge PRX files does not have much theoretical benefit while (!file_size_limit.fetch_op([&](u32& value) { if (value) { // Allow at least one file, make 0 the "memory unavailable" sign value for atomic waiting efficiency const u32 new_val = static_cast<u32>(utils::sub_saturate<u64>(value, file_size)); restore_mem = value - new_val; value = new_val; return true; } // Resort to waiting restore_mem = 0; return false; }).second) { // Wait until not 0 file_size_limit.wait(0); } if (Emu.IsStopped()) { return false; } return true; }; elf_error prx_err{}, ovl_err{}; if (ppu_prx_object obj = src; (prx_err = obj, obj == elf_error::ok)) { if (!wait_for_memory()) { // Emulation stopped continue; } if (auto prx = ppu_load_prx(obj, true, path, offset)) { obj.clear(), src.close(); // Clear decrypted file and elf object memory ppu_initialize(*prx, false, file_size); ppu_finalize(*prx, true); continue; } // Log error prx_err = elf_error::header_type; } if (ppu_exec_object obj = src; (ovl_err = obj, obj == elf_error::ok)) { while (ovl_err == elf_error::ok) { if (Emu.IsStopped()) { break; } const auto [ovlm, error] = ppu_load_overlay(obj, true, path, offset); if (error) { if (error == CELL_CANCEL + 0u) { // Emulation stopped break; } // Abort ovl_err = elf_error::header_type; break; } if (!wait_for_memory()) { // Emulation stopped break; } // Participate in thread execution limitation (takes a long time) if (std::lock_guard lock(g_fxo->get<jit_core_allocator>().sem); !ovlm->analyse(0, ovlm->entry, ovlm->seg0_code_end, ovlm->applied_patches, std::vector<u32>{}, []() { return Emu.IsStopped(); })) { // Emulation stopped break; } obj.clear(), src.close(); // Clear decrypted file and elf object memory ppu_initialize(*ovlm, false, file_size); ppu_finalize(*ovlm, true); break; } if (ovl_err == elf_error::ok) { continue; } } ppu_log.notice("Failed to precompile '%s' (prx: %s, ovl: %s): Attempting compilation as executable file", path, prx_err, ovl_err); possible_exec_file_paths.push(path, offset, file_size); inc_fdone = 0; } if (restore_mem) { if (!file_size_limit.fetch_add(restore_mem)) { file_size_limit.notify_all(); } } }); // Join every thread workers.join(); named_thread exec_worker("PPU Exec Worker", [&] { if (!possible_exec_file_paths) { return; } #ifdef __APPLE__ pthread_jit_write_protect_np(false); #endif // Set low priority thread_ctrl::scoped_priority low_prio(-1); auto slice = possible_exec_file_paths.pop_all(); auto main_module = std::move(g_fxo->get<main_ppu_module>()); for (; slice; slice.pop_front(), g_progr_fdone++) { if (Emu.IsStopped()) { continue; } const auto& [path, _, file_size] = *slice; ppu_log.notice("Trying to load as executable: %s", path); // Load SELF fs::file src{path}; if (!src) { ppu_log.error("Failed to open '%s' (%s)", path, fs::g_tls_error); continue; } // Some files may fail to decrypt due to the lack of klic src = decrypt_self(std::move(src), nullptr, nullptr, true); if (!src) { ppu_log.notice("Failed to decrypt '%s'", path); continue; } elf_error exec_err{}; if (ppu_exec_object obj = src; (exec_err = obj, obj == elf_error::ok)) { while (exec_err == elf_error::ok) { main_ppu_module& _main = g_fxo->get<main_ppu_module>(); _main = {}; auto current_cache = std::move(g_fxo->get<spu_cache>()); if (!ppu_load_exec(obj, true, path)) { // Abort exec_err = elf_error::header_type; break; } if (std::memcmp(main_module.sha1, _main.sha1, sizeof(_main.sha1)) == 0) { g_fxo->get<spu_cache>() = std::move(current_cache); break; } if (!_main.analyse(0, _main.elf_entry, _main.seg0_code_end, _main.applied_patches, std::vector<u32>{}, [](){ return Emu.IsStopped(); })) { g_fxo->get<spu_cache>() = std::move(current_cache); break; } obj.clear(), src.close(); // Clear decrypted file and elf object memory _main.name = ' '; // Make ppu_finalize work Emu.ConfigurePPUCache(); ppu_initialize(_main, false, file_size); spu_cache::initialize(false); ppu_finalize(_main, true); _main = {}; g_fxo->get<spu_cache>() = std::move(current_cache); break; } if (exec_err == elf_error::ok) { continue; } } ppu_log.notice("Failed to precompile '%s' as executable (%s)", path, exec_err); } g_fxo->get<main_ppu_module>() = std::move(main_module); g_fxo->get<spu_cache>().collect_funcs_to_precompile = true; Emu.ConfigurePPUCache(); }); exec_worker(); } extern void ppu_initialize() { if (!g_fxo->is_init<main_ppu_module>()) { return; } if (Emu.IsStopped()) { return; } auto& _main = g_fxo->get<main_ppu_module>(); std::optional<scoped_progress_dialog> progress_dialog(std::in_place, get_localized_string(localized_string_id::PROGRESS_DIALOG_ANALYZING_PPU_EXECUTABLE)); // Analyse executable if (!_main.analyse(0, _main.elf_entry, _main.seg0_code_end, _main.applied_patches, std::vector<u32>{}, [](){ return Emu.IsStopped(); })) { return; } // Validate analyser results (not required) _main.validate(0); *progress_dialog = get_localized_string(localized_string_id::PROGRESS_DIALOG_SCANNING_PPU_MODULES); bool compile_main = false; // Check main module cache if (!_main.segs.empty()) { compile_main = ppu_initialize(_main, true); } std::vector<ppu_module*> module_list; const std::string firmware_sprx_path = vfs::get("/dev_flash/sys/external/"); // If empty we have no indication for firmware cache state, check everything bool compile_fw = !Emu.IsVsh(); idm::select<lv2_obj, lv2_prx>([&](u32, lv2_prx& _module) { if (_module.funcs.empty()) { return; } if (_module.path.starts_with(firmware_sprx_path)) { // Postpone testing compile_fw = false; } module_list.emplace_back(&_module); }); idm::select<lv2_obj, lv2_overlay>([&](u32, lv2_overlay& _module) { module_list.emplace_back(&_module); }); // Check preloaded libraries cache if (!compile_fw) { for (auto ptr : module_list) { if (ptr->path.starts_with(firmware_sprx_path)) { compile_fw |= ppu_initialize(*ptr, true); // Fixup for compatibility with old savestates if (Emu.DeserialManager() && ptr->name == "liblv2.sprx") { static_cast<lv2_prx*>(ptr)->state = PRX_STATE_STARTED; static_cast<lv2_prx*>(ptr)->load_exports(); } } } } std::vector<std::string> dir_queue; const std::string mount_point = vfs::get("/dev_flash/"); bool dev_flash_located = !Emu.GetCat().ends_with('P') && Emu.IsPathInsideDir(Emu.GetBoot(), mount_point) && g_cfg.core.llvm_precompilation; if (compile_fw || dev_flash_located) { if (dev_flash_located) { const std::string eseibrd = mount_point + "/vsh/module/eseibrd.sprx"; if (auto prx = ppu_load_prx(ppu_prx_object{decrypt_self(fs::file{eseibrd})}, true, eseibrd, 0)) { // Check if cache exists for this infinitesimally small prx dev_flash_located = ppu_initialize(*prx, true); } } const std::string firmware_sprx_path = vfs::get(dev_flash_located ? "/dev_flash/"sv : "/dev_flash/sys/external/"sv); dir_queue.emplace_back(firmware_sprx_path); } // Avoid compilation if main's cache exists or it is a standalone SELF with no PARAM.SFO if (compile_main && g_cfg.core.llvm_precompilation && !Emu.GetTitleID().empty() && !Emu.IsChildProcess()) { // Try to add all related directories const std::set<std::string> dirs = Emu.GetGameDirs(); dir_queue.insert(std::end(dir_queue), std::begin(dirs), std::end(dirs)); } progress_dialog.reset(); ppu_precompile(dir_queue, &module_list); if (Emu.IsStopped()) { return; } // Initialize main module cache if (!_main.segs.empty()) { ppu_initialize(_main); } // Initialize preloaded libraries for (auto ptr : module_list) { if (Emu.IsStopped()) { return; } ppu_initialize(*ptr); } } bool ppu_initialize(const ppu_module& info, bool check_only, u64 file_size) { if (g_cfg.core.ppu_decoder != ppu_decoder_type::llvm) { if (check_only || vm::base(info.segs[0].addr) != info.segs[0].ptr) { return false; } auto& toc_manager = g_fxo->get<ppu_toc_manager>(); std::lock_guard lock(toc_manager.mutex); auto& ppu_toc = toc_manager.toc_map; for (const auto& func : info.funcs) { if (func.size && func.blocks.empty()) { ppu_register_function_at(func.addr, func.size); } for (auto& block : func.blocks) { if (!block.second) { continue; } if (g_fxo->is_init<ppu_far_jumps_t>() && !g_fxo->get<ppu_far_jumps_t>().get_targets(block.first, block.second).empty()) { // Replace the block with ppu_far_jump continue; } ppu_register_function_at(block.first, block.second); } if (g_cfg.core.ppu_debug && func.size && func.toc != umax && !ppu_get_far_jump(func.addr)) { ppu_toc[func.addr] = func.toc; write_to_ptr<ppu_intrp_func_t>(ppu_ptr(func.addr), &ppu_check_toc); } } return false; } // Link table static const std::unordered_map<std::string, u64> s_link_table = []() { std::unordered_map<std::string, u64> link_table { { "sys_game_set_system_sw_version", reinterpret_cast<u64>(ppu_execute_syscall) }, { "__trap", reinterpret_cast<u64>(&ppu_trap) }, { "__error", reinterpret_cast<u64>(&ppu_error) }, { "__check", reinterpret_cast<u64>(&ppu_check) }, { "__trace", reinterpret_cast<u64>(&ppu_trace) }, { "__syscall", reinterpret_cast<u64>(ppu_execute_syscall) }, { "__get_tb", reinterpret_cast<u64>(get_timebased_time) }, { "__lwarx", reinterpret_cast<u64>(ppu_lwarx) }, { "__ldarx", reinterpret_cast<u64>(ppu_ldarx) }, { "__stwcx", reinterpret_cast<u64>(ppu_stwcx) }, { "__stdcx", reinterpret_cast<u64>(ppu_stdcx) }, { "__dcbz", reinterpret_cast<u64>(+[](u32 addr){ alignas(64) static constexpr u8 z[128]{}; do_cell_atomic_128_store(addr, z); }) }, { "__resupdate", reinterpret_cast<u64>(vm::reservation_update) }, { "__resinterp", reinterpret_cast<u64>(ppu_reservation_fallback) }, { "__escape", reinterpret_cast<u64>(+ppu_escape) }, { "__read_maybe_mmio32", reinterpret_cast<u64>(+ppu_read_mmio_aware_u32) }, { "__write_maybe_mmio32", reinterpret_cast<u64>(+ppu_write_mmio_aware_u32) }, }; for (u64 index = 0; index < 1024; index++) { if (ppu_get_syscall(index)) { link_table.emplace(fmt::format("%s", ppu_syscall_code(index)), reinterpret_cast<u64>(ppu_execute_syscall)); link_table.emplace(fmt::format("syscall_%u", index), reinterpret_cast<u64>(ppu_execute_syscall)); } } return link_table; }(); // Get cache path for this executable std::string cache_path; if (!info.cache.empty()) { cache_path = info.cache; } else { // New PPU cache location cache_path = rpcs3::utils::get_cache_dir(info.path); // Add PPU hash and filename fmt::append(cache_path, "ppu-%s-%s/", fmt::base57(info.sha1), info.path.substr(info.path.find_last_of('/') + 1)); if (!fs::create_path(cache_path)) { fmt::throw_exception("Failed to create cache directory: %s (%s)", cache_path, fs::g_tls_error); } } #ifdef LLVM_AVAILABLE std::optional<scoped_progress_dialog> progress_dialog; if (!check_only) { // Initialize progress dialog progress_dialog.emplace(get_localized_string(localized_string_id::PROGRESS_DIALOG_LOADING_PPU_MODULES)); } // Permanently loaded compiled PPU modules (name -> data) jit_module& jit_mod = g_fxo->get<jit_module_manager>().get(cache_path + "_" + std::to_string(std::bit_cast<usz>(info.segs[0].ptr))); // Compiler instance (deferred initialization) std::shared_ptr<jit_compiler>& jit = jit_mod.pjit; // Split module into fragments <= 1 MiB usz fpos = 0; // Difference between function name and current location const u32 reloc = info.relocs.empty() ? 0 : ::at32(info.segs, 0).addr; // Info sent to threads std::vector<std::pair<std::string, ppu_module>> workload; // Info to load to main JIT instance (true - compiled) std::vector<std::pair<std::string, bool>> link_workload; // Sync variable to acquire workloads atomic_t<u32> work_cv = 0; bool compiled_new = false; bool has_mfvscr = false; const bool is_being_used_in_emulation = vm::base(info.segs[0].addr) == info.segs[0].ptr; const cpu_thread* cpu = cpu_thread::get_current(); for (auto& func : info.funcs) { if (func.size == 0) { continue; } for (const auto& [addr, size] : func.blocks) { if (size == 0) { continue; } auto i_ptr = ensure(info.get_ptr<u32>(addr)); for (u32 i = addr; i < addr + size; i += 4, i_ptr++) { if (g_ppu_itype.decode(*i_ptr) == ppu_itype::MFVSCR) { ppu_log.warning("MFVSCR found"); has_mfvscr = true; break; } } if (has_mfvscr) { break; } } if (has_mfvscr) { break; } } u32 total_compile = 0; while (!jit_mod.init && fpos < info.funcs.size()) { // Initialize compiler instance if (!jit && is_being_used_in_emulation) { jit = std::make_shared<jit_compiler>(s_link_table, g_cfg.core.llvm_cpu); } // Copy module information (TODO: optimize) ppu_module part; part.copy_part(info); part.funcs.reserve(16000); // Overall block size in bytes usz bsize = 0; usz bcount = 0; while (fpos < info.funcs.size()) { auto& func = info.funcs[fpos]; if (!func.size) { fpos++; continue; } if (bsize + func.size > 100 * 1024 && bsize) { if (bcount >= 1000) { break; } } if (g_fxo->is_init<ppu_far_jumps_t>()) { auto targets = g_fxo->get<ppu_far_jumps_t>().get_targets(func.addr, func.size); for (auto [source, target] : targets) { auto far_jump = ensure(g_fxo->get<ppu_far_jumps_t>().gen_jump(source)); if (source == func.addr && jit) { jit->update_global_mapping(fmt::format("__0x%x", func.addr - reloc), reinterpret_cast<u64>(far_jump)); } ppu_register_function_at(source, 4, far_jump); } if (!targets.empty()) { // Replace the function with ppu_far_jump fpos++; continue; } } // Copy block or function entry ppu_function& entry = part.funcs.emplace_back(func); // Fixup some information entry.name = fmt::format("__0x%x", entry.addr - reloc); if (has_mfvscr && g_cfg.core.ppu_set_sat_bit) { // TODO entry.attr += ppu_attr::has_mfvscr; } if (entry.blocks.empty()) { entry.blocks.emplace(func.addr, func.size); } bsize += func.size; fpos++; bcount++; } // Compute module hash to generate (hopefully) unique object name std::string obj_name; { sha1_context ctx; u8 output[20]; sha1_starts(&ctx); int has_dcbz = !!g_cfg.core.accurate_cache_line_stores; for (const auto& func : part.funcs) { if (func.size == 0) { continue; } const be_t<u32> addr = func.addr - reloc; const be_t<u32> size = func.size; sha1_update(&ctx, reinterpret_cast<const u8*>(&addr), sizeof(addr)); sha1_update(&ctx, reinterpret_cast<const u8*>(&size), sizeof(size)); for (const auto& block : func.blocks) { if (block.second == 0 || reloc) { continue; } // Find relevant relocations auto low = std::lower_bound(part.relocs.cbegin(), part.relocs.cend(), block.first); auto high = std::lower_bound(low, part.relocs.cend(), block.first + block.second); auto addr = block.first; for (; low != high; ++low) { // Aligned relocation address const u32 roff = low->addr & ~3; if (roff > addr) { // Hash from addr to the beginning of the relocation sha1_update(&ctx, ensure(info.get_ptr<const u8>(addr)), roff - addr); } // Hash relocation type instead const be_t<u32> type = low->type; sha1_update(&ctx, reinterpret_cast<const u8*>(&type), sizeof(type)); // Set the next addr addr = roff + 4; } if (has_dcbz == 1) { auto i_ptr = ensure(info.get_ptr<u32>(addr)); for (u32 i = addr, end = block.second + block.first - 1; i <= end; i += 4, i_ptr++) { if (g_ppu_itype.decode(*i_ptr) == ppu_itype::DCBZ) { has_dcbz = 2; break; } } } // Hash from addr to the end of the block sha1_update(&ctx, ensure(info.get_ptr<const u8>(addr)), block.second - (addr - block.first)); } if (reloc) { continue; } if (has_dcbz == 1) { auto i_ptr = ensure(info.get_ptr<u32>(func.addr)); for (u32 i = func.addr, end = func.addr + func.size - 1; i <= end; i += 4, i_ptr++) { if (g_ppu_itype.decode(*i_ptr) == ppu_itype::DCBZ) { has_dcbz = 2; break; } } } sha1_update(&ctx, ensure(info.get_ptr<const u8>(func.addr)), func.size); } if (false) { const be_t<u64> forced_upd = 3; sha1_update(&ctx, reinterpret_cast<const u8*>(&forced_upd), sizeof(forced_upd)); } sha1_finish(&ctx, output); // Settings: should be populated by settings which affect codegen (TODO) enum class ppu_settings : u32 { platform_bit, accurate_dfma, fixup_vnan, fixup_nj_denormals, accurate_cache_line_stores, reservations_128_byte, greedy_mode, accurate_sat, accurate_fpcc, accurate_vnan, accurate_nj_mode, contains_symbol_resolver, __bitset_enum_max }; be_t<bs_t<ppu_settings>> settings{}; #if !defined(_WIN32) && !defined(__APPLE__) settings += ppu_settings::platform_bit; #endif if (g_cfg.core.use_accurate_dfma) settings += ppu_settings::accurate_dfma; if (g_cfg.core.ppu_fix_vnan) settings += ppu_settings::fixup_vnan; if (g_cfg.core.ppu_llvm_nj_fixup) settings += ppu_settings::fixup_nj_denormals; if (has_dcbz == 2) settings += ppu_settings::accurate_cache_line_stores; if (g_cfg.core.ppu_128_reservations_loop_max_length) settings += ppu_settings::reservations_128_byte; if (g_cfg.core.ppu_llvm_greedy_mode) settings += ppu_settings::greedy_mode; if (has_mfvscr && g_cfg.core.ppu_set_sat_bit) settings += ppu_settings::accurate_sat; if (g_cfg.core.ppu_set_fpcc) settings += ppu_settings::accurate_fpcc, fmt::throw_exception("FPCC Not implemented"); if (g_cfg.core.ppu_set_vnan) settings += ppu_settings::accurate_vnan, settings -= ppu_settings::fixup_vnan, fmt::throw_exception("VNAN Not implemented"); if (g_cfg.core.ppu_use_nj_bit) settings += ppu_settings::accurate_nj_mode, settings -= ppu_settings::fixup_nj_denormals, fmt::throw_exception("NJ Not implemented"); if (fpos >= info.funcs.size()) settings += ppu_settings::contains_symbol_resolver; // Avoid invalidating all modules for this purpose // Write version, hash, CPU, settings fmt::append(obj_name, "v6-kusa-%s-%s-%s.obj", fmt::base57(output, 16), fmt::base57(settings), jit_compiler::cpu(g_cfg.core.llvm_cpu)); } if (cpu ? cpu->state.all_of(cpu_flag::exit) : Emu.IsStopped()) { break; } if (!check_only) { total_compile++; link_workload.emplace_back(obj_name, false); } // Check object file if (jit_compiler::check(cache_path + obj_name)) { if (!jit && !check_only) { ppu_log.success("LLVM: Module exists: %s", obj_name); // Done already, revert total amount increase // Avoid incrementing "pdone" instead because it creates false appreciation for both the progress dialog and the user total_compile--; } continue; } if (check_only) { return true; } // Remember, used in ppu_initialize(void) compiled_new = true; // Adjust information (is_compiled) link_workload.back().second = true; // Fill workload list for compilation workload.emplace_back(std::move(obj_name), std::move(part)); } if (check_only) { return false; } // Update progress dialog if (total_compile) { g_progr_ptotal += total_compile; } if (g_progr_ftotal_bits && file_size) { g_progr_fknown_bits += file_size; } // Create worker threads for compilation if (!workload.empty()) { *progress_dialog = get_localized_string(localized_string_id::PROGRESS_DIALOG_COMPILING_PPU_MODULES); u32 thread_count = rpcs3::utils::get_max_threads(); if (workload.size() < thread_count) { thread_count = ::size32(workload); } struct thread_index_allocator { atomic_t<u64> index = 0; }; struct thread_op { atomic_t<u32>& work_cv; std::vector<std::pair<std::string, ppu_module>>& workload; const ppu_module& main_module; const std::string& cache_path; const cpu_thread* cpu; std::unique_lock<decltype(jit_core_allocator::sem)> core_lock; thread_op(atomic_t<u32>& work_cv, std::vector<std::pair<std::string, ppu_module>>& workload , const cpu_thread* cpu, const ppu_module& main_module, const std::string& cache_path, decltype(jit_core_allocator::sem)& sem) noexcept : work_cv(work_cv) , workload(workload) , main_module(main_module) , cache_path(cache_path) , cpu(cpu) { // Save mutex core_lock = std::unique_lock{sem, std::defer_lock}; } thread_op(const thread_op& other) noexcept : work_cv(other.work_cv) , workload(other.workload) , main_module(other.main_module) , cache_path(other.cache_path) , cpu(other.cpu) { if (auto mtx = other.core_lock.mutex()) { // Save mutex core_lock = std::unique_lock{*mtx, std::defer_lock}; } } thread_op(thread_op&& other) noexcept = default; void operator()() { // Set low priority thread_ctrl::scoped_priority low_prio(-1); #ifdef __APPLE__ pthread_jit_write_protect_np(false); #endif for (u32 i = work_cv++; i < workload.size(); i = work_cv++, g_progr_pdone++) { if (cpu ? cpu->state.all_of(cpu_flag::exit) : Emu.IsStopped()) { continue; } // Keep allocating workload const auto& [obj_name, part] = std::as_const(workload)[i]; ppu_log.warning("LLVM: Compiling module %s%s", cache_path, obj_name); // Use another JIT instance jit_compiler jit2({}, g_cfg.core.llvm_cpu, 0x1); ppu_initialize2(jit2, part, cache_path, obj_name, i == workload.size() - 1 ? main_module : part); ppu_log.success("LLVM: Compiled module %s", obj_name); } core_lock.unlock(); } }; // Prevent watchdog thread from terminating g_watchdog_hold_ctr++; named_thread_group threads(fmt::format("PPUW.%u.", ++g_fxo->get<thread_index_allocator>().index), thread_count , thread_op(work_cv, workload, cpu, info, cache_path, g_fxo->get<jit_core_allocator>().sem) , [&](u32 /*thread_index*/, thread_op& op) { // Allocate "core" op.core_lock.lock(); // Second check before creating another thread return work_cv < workload.size() && (cpu ? !cpu->state.all_of(cpu_flag::exit) : !Emu.IsStopped()); }); threads.join(); g_watchdog_hold_ctr--; } bool failed_to_load = false; { if (!is_being_used_in_emulation || (cpu ? cpu->state.all_of(cpu_flag::exit) : Emu.IsStopped())) { return compiled_new; } if (workload.size() < link_workload.size()) { // Only show this message if this task is relevant *progress_dialog = get_localized_string(localized_string_id::PROGRESS_DIALOG_LINKING_PPU_MODULES); } for (const auto& [obj_name, is_compiled] : link_workload) { if (cpu ? cpu->state.all_of(cpu_flag::exit) : Emu.IsStopped()) { break; } if (!failed_to_load && !jit->add(cache_path + obj_name)) { ppu_log.error("LLVM: Failed to load module %s", obj_name); failed_to_load = true; } if (failed_to_load) { if (!is_compiled) { g_progr_pdone++; } continue; } if (!is_compiled) { ppu_log.success("LLVM: Loaded module %s", obj_name); g_progr_pdone++; } } } if (failed_to_load || !is_being_used_in_emulation || (cpu ? cpu->state.all_of(cpu_flag::exit) : Emu.IsStopped())) { return compiled_new; } // Jit can be null if the loop doesn't ever enter. #ifdef __APPLE__ pthread_jit_write_protect_np(false); #endif // Try to patch all single and unregistered BLRs with the same function (TODO: Maybe generalize it into PIC code detection and patching) ppu_intrp_func_t BLR_func = nullptr; const bool showing_only_apply_stage = !g_progr_text.load() && !g_progr_ptotal && !g_progr_ftotal && g_progr_ptotal.compare_and_swap_test(0, 1); progress_dialog = get_localized_string(localized_string_id::PROGRESS_DIALOG_APPLYING_PPU_CODE); if (!jit) { // No functions - nothing to do ensure(info.funcs.empty()); return compiled_new; } const bool is_first = !jit_mod.init; if (is_first) { jit->fin(); } if (is_first) { jit_mod.symbol_resolver = reinterpret_cast<void(*)(u8*, u64)>(jit->get("__resolve_symbols")); ensure(jit_mod.symbol_resolver); } else { ensure(jit_mod.symbol_resolver); } #ifdef __APPLE__ // Symbol resolver is in JIT mem, so we must enable execution pthread_jit_write_protect_np(true); #endif jit_mod.symbol_resolver(vm::g_exec_addr, info.segs[0].addr); #ifdef __APPLE__ // Symbol resolver is in JIT mem, so we must enable execution pthread_jit_write_protect_np(false); #endif // Find a BLR-only function in order to copy it to all BLRs (some games need it) for (const auto& func : info.funcs) { if (func.size == 4 && *info.get_ptr<u32>(func.addr) == ppu_instructions::BLR()) { BLR_func = ppu_read(func.addr); break; } } if (is_first) { jit_mod.init = true; } if (BLR_func) { auto inst_ptr = info.get_ptr<u32>(info.segs[0].addr); for (u32 addr = info.segs[0].addr; addr < info.segs[0].addr + info.segs[0].size; addr += 4, inst_ptr++) { if (*inst_ptr == ppu_instructions::BLR() && (reinterpret_cast<uptr>(ppu_read(addr)) << 16 >> 16) == reinterpret_cast<uptr>(ppu_recompiler_fallback_ghc)) { write_to_ptr<ppu_intrp_func_t>(ppu_ptr(addr), BLR_func); } } } if (showing_only_apply_stage) { // Done g_progr_pdone++; } return compiled_new; #else fmt::throw_exception("LLVM is not available in this build."); #endif } static void ppu_initialize2(jit_compiler& jit, const ppu_module& module_part, const std::string& cache_path, const std::string& obj_name, const ppu_module& whole_module) { #ifdef LLVM_AVAILABLE using namespace llvm; // Create LLVM module std::unique_ptr<Module> _module = std::make_unique<Module>(obj_name, jit.get_context()); // Initialize target _module->setTargetTriple(jit_compiler::triple1()); _module->setDataLayout(jit.get_engine().getTargetMachine()->createDataLayout()); // Initialize translator PPUTranslator translator(jit.get_context(), _module.get(), module_part, jit.get_engine()); // Define some types const auto _func = FunctionType::get(translator.get_type<void>(), { translator.get_type<u8*>(), // Exec base translator.GetContextType()->getPointerTo(), // PPU context translator.get_type<u64>(), // Segment address (for PRX) translator.get_type<u8*>(), // Memory base translator.get_type<u64>(), // r0 translator.get_type<u64>(), // r1 translator.get_type<u64>(), // r2 }, false); // Initialize function list for (const auto& func : module_part.funcs) { if (func.size) { const auto f = cast<Function>(_module->getOrInsertFunction(func.name, _func).getCallee()); f->setCallingConv(CallingConv::GHC); f->addParamAttr(1, llvm::Attribute::NoAlias); f->addFnAttr(Attribute::NoUnwind); } } { if (g_cfg.core.ppu_debug) { translator.build_interpreter(); } #if LLVM_VERSION_MAJOR < 17 legacy::FunctionPassManager pm(_module.get()); // Basic optimizations //pm.add(createCFGSimplificationPass()); //pm.add(createPromoteMemoryToRegisterPass()); pm.add(createEarlyCSEPass()); //pm.add(createTailCallEliminationPass()); //pm.add(createInstructionCombiningPass()); //pm.add(createBasicAAWrapperPass()); //pm.add(new MemoryDependenceAnalysis()); //pm.add(createLICMPass()); //pm.add(createLoopInstSimplifyPass()); //pm.add(createNewGVNPass()); //pm.add(createDeadStoreEliminationPass()); //pm.add(createSCCPPass()); //pm.add(createReassociatePass()); //pm.add(createInstructionCombiningPass()); //pm.add(createInstructionSimplifierPass()); //pm.add(createAggressiveDCEPass()); //pm.add(createCFGSimplificationPass()); //pm.add(createLintPass()); // Check #else // Create the analysis managers. // These must be declared in this order so that they are destroyed in the // correct order due to inter-analysis-manager references. LoopAnalysisManager lam; FunctionAnalysisManager fam; CGSCCAnalysisManager cgam; ModuleAnalysisManager mam; // Create the new pass manager builder. // Take a look at the PassBuilder constructor parameters for more // customization, e.g. specifying a TargetMachine or various debugging // options. PassBuilder pb; // Register all the basic analyses with the managers. pb.registerModuleAnalyses(mam); pb.registerCGSCCAnalyses(cgam); pb.registerFunctionAnalyses(fam); pb.registerLoopAnalyses(lam); pb.crossRegisterProxies(lam, fam, cgam, mam); FunctionPassManager fpm; // Basic optimizations fpm.addPass(EarlyCSEPass()); #endif // Translate functions for (usz fi = 0, fmax = module_part.funcs.size(); fi < fmax; fi++) { if (Emu.IsStopped()) { ppu_log.success("LLVM: Translation cancelled"); return; } if (module_part.funcs[fi].size) { // Translate if (const auto func = translator.Translate(module_part.funcs[fi])) { #ifdef ARCH_X64 // TODO // Run optimization passes #if LLVM_VERSION_MAJOR < 17 pm.run(*func); #else fpm.run(*func, fam); #endif #endif // ARCH_X64 } else { Emu.Pause(); return; } } } // Run this only in one module for all functions if (&whole_module != &module_part) { if (const auto func = translator.GetSymbolResolver(whole_module)) { #ifdef ARCH_X64 // TODO // Run optimization passes #if LLVM_VERSION_MAJOR < 17 pm.run(*func); #else fpm.run(*func, fam); #endif #endif // ARCH_X64 } else { Emu.Pause(); return; } } //legacy::PassManager mpm; // Remove unused functions, structs, global variables, etc //mpm.add(createStripDeadPrototypesPass()); //mpm.add(createFunctionInliningPass()); //mpm.add(createDeadInstEliminationPass()); //mpm.run(*module); std::string result; raw_string_ostream out(result); if (g_cfg.core.llvm_logs) { out << *_module; // print IR fs::write_file(cache_path + obj_name + ".log", fs::rewrite, out.str()); result.clear(); } if (verifyModule(*_module, &out)) { out.flush(); ppu_log.error("LLVM: Verification failed for %s:\n%s", obj_name, result); Emu.CallFromMainThread([]{ Emu.GracefulShutdown(false, true); }); return; } ppu_log.notice("LLVM: %zu functions generated", _module->getFunctionList().size()); } // Load or compile module jit.add(std::move(_module), cache_path); #endif // LLVM_AVAILABLE }
134,572
C++
.cpp
4,449
26.699483
226
0.642997
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,183
MFC.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/MFC.cpp
#include "stdafx.h" #include "MFC.h" template <> void fmt_class_string<MFC>::format(std::string& out, u64 arg) { format_enum(out, arg, [](MFC cmd) { switch (cmd) { case MFC_PUT_CMD: return "PUT"; case MFC_PUTB_CMD: return "PUTB"; case MFC_PUTF_CMD: return "PUTF"; case MFC_PUTS_CMD: return "PUTS"; case MFC_PUTBS_CMD: return "PUTBS"; case MFC_PUTFS_CMD: return "PUTFS"; case MFC_PUTR_CMD: return "PUTR"; case MFC_PUTRB_CMD: return "PUTRB"; case MFC_PUTRF_CMD: return "PUTRF"; case MFC_GET_CMD: return "GET"; case MFC_GETB_CMD: return "GETB"; case MFC_GETF_CMD: return "GETF"; case MFC_GETS_CMD: return "GETS"; case MFC_GETBS_CMD: return "GETBS"; case MFC_GETFS_CMD: return "GETFS"; case MFC_PUTL_CMD: return "PUTL"; case MFC_PUTLB_CMD: return "PUTLB"; case MFC_PUTLF_CMD: return "PUTLF"; case MFC_PUTRL_CMD: return "PUTRL"; case MFC_PUTRLB_CMD: return "PUTRLB"; case MFC_PUTRLF_CMD: return "PUTRLF"; case MFC_GETL_CMD: return "GETL"; case MFC_GETLB_CMD: return "GETLB"; case MFC_GETLF_CMD: return "GETLF"; case MFC_GETLLAR_CMD: return "GETLLAR"; case MFC_PUTLLC_CMD: return "PUTLLC"; case MFC_PUTLLUC_CMD: return "PUTLLUC"; case MFC_PUTQLLUC_CMD: return "PUTQLLUC"; case MFC_SNDSIG_CMD: return "SNDSIG"; case MFC_SNDSIGB_CMD: return "SNDSIGB"; case MFC_SNDSIGF_CMD: return "SNDSIGF"; case MFC_BARRIER_CMD: return "BARRIER"; case MFC_EIEIO_CMD: return "EIEIO"; case MFC_SYNC_CMD: return "SYNC"; case MFC_SDCRT_CMD: return "SDCRT"; case MFC_SDCRTST_CMD: return "SDCRTST"; case MFC_SDCRZ_CMD: return "SDCRZ"; case MFC_SDCRS_CMD: return "SDCRS"; case MFC_SDCRF_CMD: return "SDCRF"; case MFC_BARRIER_MASK: case MFC_FENCE_MASK: case MFC_LIST_MASK: case MFC_START_MASK: case MFC_RESULT_MASK: break; } return unknown; }); } template <> void fmt_class_string<spu_mfc_cmd>::format(std::string& out, u64 arg) { const auto& cmd = get_object(arg); const u8 tag = cmd.tag; fmt::append(out, "%-8s #%02u 0x%05x:0x%08llx 0x%x%s", cmd.cmd, tag & 0x7f, cmd.lsa, u64{cmd.eah} << 32 | cmd.eal, cmd.size, (tag & 0x80) ? " (stalled)" : ""); }
2,134
C++
.cpp
65
30.076923
159
0.690291
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,184
PPUModule.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/PPUModule.cpp
#include "stdafx.h" #include "Emu/Cell/PPUModule.h" #include "Utilities/bin_patch.h" #include "Utilities/StrUtil.h" #include "Utilities/address_range.h" #include "util/serialization.hpp" #include "Crypto/sha1.h" #include "Crypto/unself.h" #include "Loader/ELF.h" #include "Emu/System.h" #include "Emu/system_config.h" #include "Emu/VFS.h" #include "Emu/Cell/PPUOpcodes.h" #include "Emu/Cell/SPUThread.h" #include "Emu/Cell/PPUAnalyser.h" #include "Emu/Cell/lv2/sys_process.h" #include "Emu/Cell/lv2/sys_prx.h" #include "Emu/Cell/lv2/sys_memory.h" #include "Emu/Cell/lv2/sys_overlay.h" #include "Emu/Cell/Modules/StaticHLE.h" #include <map> #include <span> #include <set> #include <algorithm> #include "util/asm.hpp" LOG_CHANNEL(ppu_loader); extern std::string ppu_get_function_name(const std::string& _module, u32 fnid); extern std::string ppu_get_variable_name(const std::string& _module, u32 vnid); extern void ppu_register_range(u32 addr, u32 size); extern void ppu_register_function_at(u32 addr, u32 size, ppu_intrp_func_t ptr); extern void sys_initialize_tls(ppu_thread&, u64, u32, u32, u32); std::unordered_map<std::string, ppu_static_module*>& ppu_module_manager::get() { // In C++ the order of static initialization is undefined if it happens in // separate compilation units, therefore we have to initialize the map on first use. static std::unordered_map<std::string, ppu_static_module*> s_module_map; return s_module_map; } // HLE function name cache std::vector<std::string> g_ppu_function_names; atomic_t<u32> liblv2_begin = 0, liblv2_end = 0; extern u32 ppu_generate_id(std::string_view name) { // Symbol name suffix constexpr auto suffix = "\x67\x59\x65\x99\x04\x25\x04\x90\x56\x64\x27\x49\x94\x89\x74\x1A"sv; sha1_context ctx; u8 output[20]; // Compute SHA-1 hash sha1_starts(&ctx); sha1_update(&ctx, reinterpret_cast<const u8*>(name.data()), name.size()); sha1_update(&ctx, reinterpret_cast<const u8*>(suffix.data()), suffix.size()); sha1_finish(&ctx, output); le_t<u32> result = 0; std::memcpy(&result, output, sizeof(result)); return result; } ppu_static_module::ppu_static_module(const char* name) : name(name) { ppu_module_manager::register_module(this); } void ppu_static_module::add_init_func(void(*func)(ppu_static_module*)) { m_on_init.emplace_back(func); } void ppu_static_module::initialize() { for (auto func : m_on_init) { func(this); } } void ppu_module_manager::register_module(ppu_static_module* _module) { ppu_module_manager::get().emplace(_module->name, _module); } ppu_static_function& ppu_module_manager::access_static_function(const char* _module, u32 fnid) { auto& res = ::at32(ppu_module_manager::get(), _module)->functions[fnid]; if (res.name) { fmt::throw_exception("PPU FNID duplication in module %s (%s, 0x%x)", _module, res.name, fnid); } return res; } ppu_static_variable& ppu_module_manager::access_static_variable(const char* _module, u32 vnid) { auto& res = ::at32(ppu_module_manager::get(), _module)->variables[vnid]; if (res.name) { fmt::throw_exception("PPU VNID duplication in module %s (%s, 0x%x)", _module, res.name, vnid); } return res; } const ppu_static_module* ppu_module_manager::get_module(const std::string& name) { const auto& map = ppu_module_manager::get(); const auto found = map.find(name); return found != map.end() ? found->second : nullptr; } void ppu_module_manager::initialize_modules() { for (auto& _module : ppu_module_manager::get()) { _module.second->initialize(); } } // Global linkage information struct ppu_linkage_info { ppu_linkage_info() = default; ppu_linkage_info(const ppu_linkage_info&) = delete; ppu_linkage_info& operator=(const ppu_linkage_info&) = delete; struct module_data { struct info { ppu_static_function* static_func = nullptr; ppu_static_variable* static_var = nullptr; u32 export_addr = 0; std::set<u32> imports{}; std::set<u32> frefss{}; }; // FNID -> (export; [imports...]) std::map<u32, info> functions{}; std::map<u32, info> variables{}; // Obsolete bool imported = false; }; // Module map std::map<std::string, module_data> modules{}; std::map<std::string, atomic_t<bool>, std::less<>> lib_lock; shared_mutex lib_lock_mutex; shared_mutex mutex; }; // Initialize static modules. static void ppu_initialize_modules(ppu_linkage_info* link, utils::serial* ar = nullptr) { if (!link->modules.empty()) { return; } ppu_module_manager::initialize_modules(); const std::initializer_list<const ppu_static_module*> registered { &ppu_module_manager::cellAdec, &ppu_module_manager::cellAtrac, &ppu_module_manager::cellAtracMulti, &ppu_module_manager::cellAtracXdec, &ppu_module_manager::cellAudio, &ppu_module_manager::cellAvconfExt, &ppu_module_manager::cellAuthDialogUtility, &ppu_module_manager::cellBGDL, &ppu_module_manager::cellCamera, &ppu_module_manager::cellCelp8Enc, &ppu_module_manager::cellCelpEnc, &ppu_module_manager::cellCrossController, &ppu_module_manager::cellDaisy, &ppu_module_manager::cellDmux, &ppu_module_manager::cellDtcpIpUtility, &ppu_module_manager::cellFiber, &ppu_module_manager::cellFont, &ppu_module_manager::cellFontFT, &ppu_module_manager::cell_FreeType2, &ppu_module_manager::cellFs, &ppu_module_manager::cellGame, &ppu_module_manager::cellGameExec, &ppu_module_manager::cellGcmSys, &ppu_module_manager::cellGem, &ppu_module_manager::cellGifDec, &ppu_module_manager::cellHttp, &ppu_module_manager::cellHttps, &ppu_module_manager::cellHttpUtil, &ppu_module_manager::cellImeJp, &ppu_module_manager::cellJpgDec, &ppu_module_manager::cellJpgEnc, &ppu_module_manager::cellKey2char, &ppu_module_manager::cellL10n, &ppu_module_manager::cellLibprof, &ppu_module_manager::cellMic, &ppu_module_manager::cellMusic, &ppu_module_manager::cellMusicDecode, &ppu_module_manager::cellMusicExport, &ppu_module_manager::cellNetAoi, &ppu_module_manager::cellNetCtl, &ppu_module_manager::cellOskDialog, &ppu_module_manager::cellOvis, &ppu_module_manager::cellPamf, &ppu_module_manager::cellPesmUtility, &ppu_module_manager::cellPhotoDecode, &ppu_module_manager::cellPhotoExport, &ppu_module_manager::cellPhotoImportUtil, &ppu_module_manager::cellPngDec, &ppu_module_manager::cellPngEnc, &ppu_module_manager::cellPrint, &ppu_module_manager::cellRec, &ppu_module_manager::cellRemotePlay, &ppu_module_manager::cellResc, &ppu_module_manager::cellRtc, &ppu_module_manager::cellRtcAlarm, &ppu_module_manager::cellRudp, &ppu_module_manager::cellSail, &ppu_module_manager::cellSailRec, &ppu_module_manager::cellSaveData, &ppu_module_manager::cellMinisSaveData, &ppu_module_manager::cellScreenShot, &ppu_module_manager::cellSearch, &ppu_module_manager::cellSheap, &ppu_module_manager::cellSpudll, &ppu_module_manager::cellSpurs, &ppu_module_manager::cellSpursJq, &ppu_module_manager::cellSsl, &ppu_module_manager::cellSubDisplay, &ppu_module_manager::cellSync, &ppu_module_manager::cellSync2, &ppu_module_manager::cellSysconf, &ppu_module_manager::cellSysmodule, &ppu_module_manager::cellSysutil, &ppu_module_manager::cellSysutilAp, &ppu_module_manager::cellSysutilAvc2, &ppu_module_manager::cellSysutilAvcExt, &ppu_module_manager::cellSysutilNpEula, &ppu_module_manager::cellSysutilMisc, &ppu_module_manager::cellUsbd, &ppu_module_manager::cellUsbPspcm, &ppu_module_manager::cellUserInfo, &ppu_module_manager::cellVdec, &ppu_module_manager::cellVideoExport, &ppu_module_manager::cellVideoPlayerUtility, &ppu_module_manager::cellVideoUpload, &ppu_module_manager::cellVoice, &ppu_module_manager::cellVpost, &ppu_module_manager::libad_async, &ppu_module_manager::libad_core, &ppu_module_manager::libfs_utility_init, &ppu_module_manager::libmedi, &ppu_module_manager::libmixer, &ppu_module_manager::libsnd3, &ppu_module_manager::libsynth2, &ppu_module_manager::sceNp, &ppu_module_manager::sceNp2, &ppu_module_manager::sceNpClans, &ppu_module_manager::sceNpCommerce2, &ppu_module_manager::sceNpMatchingInt, &ppu_module_manager::sceNpPlus, &ppu_module_manager::sceNpSns, &ppu_module_manager::sceNpTrophy, &ppu_module_manager::sceNpTus, &ppu_module_manager::sceNpUtil, &ppu_module_manager::sys_crashdump, &ppu_module_manager::sys_io, &ppu_module_manager::sys_net, &ppu_module_manager::sysPrxForUser, &ppu_module_manager::sys_libc, &ppu_module_manager::sys_lv2dbg, &ppu_module_manager::static_hle, &ppu_module_manager::hle_patches, }; // Initialize double-purpose fake OPD array for HLE functions const auto& hle_funcs = ppu_function_manager::get(g_cfg.core.ppu_decoder != ppu_decoder_type::_static); u32& hle_funcs_addr = g_fxo->get<ppu_function_manager>().addr; // Allocate memory for the array (must be called after fixed allocations) if (!hle_funcs_addr) hle_funcs_addr = vm::alloc(::size32(hle_funcs) * 8, vm::main); else vm::page_protect(hle_funcs_addr, utils::align(::size32(hle_funcs) * 8, 0x1000), 0, vm::page_writable); // Initialize as PPU executable code ppu_register_range(hle_funcs_addr, ::size32(hle_funcs) * 8); // Fill the array (visible data: self address and function index) for (u32 addr = hle_funcs_addr, index = 0; index < hle_funcs.size(); addr += 8, index++) { // Function address = next CIA, RTOC = 0 (vm::null) vm::write32(addr + 0, addr + 4); vm::write32(addr + 4, 0); // Register the HLE function directly ppu_register_function_at(addr + 0, 4, nullptr); ppu_register_function_at(addr + 4, 4, hle_funcs[index]); } // Set memory protection to read-only vm::page_protect(hle_funcs_addr, utils::align(::size32(hle_funcs) * 8, 0x1000), 0, 0, vm::page_writable); // Initialize function names const bool is_first = g_ppu_function_names.empty(); if (is_first) { g_ppu_function_names.resize(hle_funcs.size()); g_ppu_function_names[0] = "INVALID"; g_ppu_function_names[1] = "HLE RETURN"; } // For HLE variable allocation u32 alloc_addr = 0; // "Use" all the modules for correct linkage if (ppu_loader.trace) { for (auto& _module : registered) { ppu_loader.trace("Registered static module: %s", _module->name); } } struct hle_vars_save { hle_vars_save() = default; hle_vars_save(const hle_vars_save&) = delete; hle_vars_save& operator =(const hle_vars_save&) = delete; hle_vars_save(utils::serial& ar) { auto& manager = ppu_module_manager::get(); while (true) { const std::string name = ar.pop<std::string>(); if (name.empty()) { // Null termination break; } const auto _module = ::at32(manager, name); auto& variable = _module->variables; for (usz i = 0, end = ar.pop<usz>(); i < end; i++) { auto* ptr = &::at32(variable, ar.pop<u32>()); ptr->addr = ar.pop<u32>(); ensure(!!ptr->var); } } } void save(utils::serial& ar) { for (auto& pair : ppu_module_manager::get()) { const auto _module = pair.second; if (_module->variables.empty()) { continue; } ar(_module->name); ar(_module->variables.size()); for (auto& variable : _module->variables) { ar(variable.first, variable.second.addr); } } // Null terminator ar(std::string{}); } }; if (ar) { g_fxo->init<hle_vars_save>(*ar); } else { g_fxo->init<hle_vars_save>(); } for (auto& pair : ppu_module_manager::get()) { const auto _module = pair.second; auto& linkage = link->modules[_module->name]; for (auto& function : _module->functions) { ppu_loader.trace("** 0x%08X: %s", function.first, function.second.name); if (is_first) { g_ppu_function_names[function.second.index] = fmt::format("%s:%s", function.second.name, _module->name); } auto& flink = linkage.functions[function.first]; flink.static_func = &function.second; flink.export_addr = g_fxo->get<ppu_function_manager>().func_addr(function.second.index); function.second.export_addr = &flink.export_addr; } for (auto& variable : _module->variables) { ppu_loader.trace("** &0x%08X: %s (size=0x%x, align=0x%x)", variable.first, variable.second.name, variable.second.size, variable.second.align); // Allocate HLE variable if (ar) { // Already loaded } else if (variable.second.size >= 0x10000 || variable.second.align >= 0x10000) { variable.second.addr = vm::alloc(variable.second.size, vm::main, std::max<u32>(variable.second.align, 0x10000)); } else { const u32 next = utils::align(alloc_addr, variable.second.align); const u32 end = next + variable.second.size - 1; if (!next || (end >> 16 != alloc_addr >> 16)) { alloc_addr = vm::alloc(0x10000, vm::main); } else { alloc_addr = next; } variable.second.addr = alloc_addr; alloc_addr += variable.second.size; } *variable.second.var = variable.second.addr; ppu_loader.trace("Allocated HLE variable %s.%s at 0x%x", _module->name, variable.second.name, *variable.second.var); // Initialize HLE variable if (variable.second.init) { variable.second.init(); } if ((variable.second.flags & MFF_HIDDEN) == 0) { auto& vlink = linkage.variables[variable.first]; vlink.static_var = &variable.second; vlink.export_addr = variable.second.addr; variable.second.export_addr = &vlink.export_addr; } } } } // For the debugger (g_ppu_function_names shouldn't change, string_view should suffice) extern const std::unordered_map<u32, std::string_view>& get_exported_function_names_as_addr_indexed_map() { struct info_t { std::unordered_map<u32, std::string_view> res; u64 update_time = 0; }; static thread_local std::unique_ptr<info_t> info; if (!info) { info = std::make_unique<info_t>(); info->res.reserve(ppu_module_manager::get().size()); } auto& [res, update_time] = *info; const auto link = g_fxo->try_get<ppu_linkage_info>(); const auto hle_funcs = g_fxo->try_get<ppu_function_manager>(); if (!link || !hle_funcs) { res.clear(); return res; } const u64 current_time = get_system_time(); // Update list every >=0.1 seconds if (current_time - update_time < 100'000) { return res; } update_time = current_time; res.clear(); for (auto& pair : ppu_module_manager::get()) { const auto _module = pair.second; auto& linkage = link->modules[_module->name]; for (auto& function : _module->functions) { auto& flink = linkage.functions[function.first]; u32 addr = flink.export_addr; if (vm::check_addr<4>(addr, vm::page_readable) && addr != hle_funcs->func_addr(function.second.index)) { addr = vm::read32(addr); if (!(addr % 4) && vm::check_addr<4>(addr, vm::page_executable)) { res.try_emplace(addr, g_ppu_function_names[function.second.index]); } } } } return res; } // Resolve relocations for variable/function linkage. static void ppu_patch_refs(const ppu_module& _module, std::vector<ppu_reloc>* out_relocs, u32 fref, u32 faddr) { struct ref_t { be_t<u32> type; be_t<u32> addr; be_t<u32> addend; // Note: Treating it as addend seems to be correct for now, but still unknown if theres more in this variable }; for (const ref_t* ref = &_module.get_ref<ref_t>(fref); ref->type; fref += sizeof(ref_t), ref = &_module.get_ref<ref_t>(fref)) { if (ref->addend) ppu_loader.warning("**** REF(%u): Addend value(0x%x, 0x%x)", ref->type, ref->addr, ref->addend); const u32 raddr = ref->addr; const u32 rtype = ref->type; const u32 rdata = faddr + ref->addend; if (out_relocs) { // Register relocation with unpredictable target (data=0) ppu_reloc _rel; _rel.addr = raddr; _rel.type = rtype; _rel.data = 0; out_relocs->emplace_back(_rel); } // OPs must be similar to relocations switch (rtype) { case 1: { const u32 value = _module.get_ref<u32>(ref->addr) = rdata; ppu_loader.trace("**** REF(1): 0x%x <- 0x%x", ref->addr, value); break; } case 4: { const u16 value = _module.get_ref<u16>(ref->addr) = static_cast<u16>(rdata); ppu_loader.trace("**** REF(4): 0x%x <- 0x%04x (0x%llx)", ref->addr, value, faddr); break; } case 6: { const u16 value = _module.get_ref<u16>(ref->addr) = static_cast<u16>(rdata >> 16) + (rdata & 0x8000 ? 1 : 0); ppu_loader.trace("**** REF(6): 0x%x <- 0x%04x (0x%llx)", ref->addr, value, faddr); break; } case 57: { const u16 value = _module.get_ref<ppu_bf_t<be_t<u16>, 0, 14>>(ref->addr) = static_cast<u16>(rdata) >> 2; ppu_loader.trace("**** REF(57): 0x%x <- 0x%04x (0x%llx)", ref->addr, value, faddr); break; } default: ppu_loader.error("**** REF(%u): Unknown/Illegal type (0x%x, 0x%x)", rtype, raddr, ref->addend); } } } enum PRX_EXPORT_ATTRIBUTES : u16 { PRX_EXPORT_LIBRARY_FLAG = 1, PRX_EXPORT_PRX_MANAGEMENT_FUNCTIONS_FLAG = 0x8000, }; // Export or import module struct struct ppu_prx_module_info { u8 size; u8 unk0; be_t<u16> version; be_t<u16> attributes; be_t<u16> num_func; be_t<u16> num_var; be_t<u16> num_tlsvar; u8 info_hash; u8 info_tlshash; u8 unk1[2]; vm::bcptr<char> name; vm::bcptr<u32> nids; // Imported FNIDs, Exported NIDs vm::bptr<u32> addrs; vm::bcptr<u32> vnids; // Imported VNIDs vm::bcptr<u32> vstubs; be_t<u32> unk4; be_t<u32> unk5; }; bool ppu_form_branch_to_code(u32 entry, u32 target); extern u32 ppu_get_exported_func_addr(u32 fnid, const std::string& module_name) { return g_fxo->get<ppu_linkage_info>().modules[module_name].functions[fnid].export_addr; } extern bool ppu_register_library_lock(std::string_view libname, bool lock_lib) { auto link = g_fxo->try_get<ppu_linkage_info>(); if (!link || libname.empty()) { return false; } reader_lock lock(link->lib_lock_mutex); if (auto it = link->lib_lock.find(libname); it != link->lib_lock.cend()) { return lock_lib ? !it->second.test_and_set() : it->second.test_and_reset(); } if (!lock_lib) { // If lock hasn't been installed it wasn't locked in the first place return false; } lock.upgrade(); auto& lib_lock = link->lib_lock.emplace(std::string{libname}, false).first->second; return !lib_lock.test_and_set(); } // Load and register exports; return special exports found (nameless module) static auto ppu_load_exports(const ppu_module& _module, ppu_linkage_info* link, u32 exports_start, u32 exports_end, bool for_observing_callbacks = false, std::vector<u32>* funcs = nullptr, std::basic_string<char>* loaded_flags = nullptr) { std::unordered_map<u32, u32> result; // Flags were already provided meaning it's an unload operation const bool unload_exports = loaded_flags && !loaded_flags->empty(); std::lock_guard lock(link->mutex); usz unload_index = 0; ppu_prx_module_info lib{}; for (u32 addr = exports_start; addr < exports_end; unload_index++, addr += lib.size ? lib.size : sizeof(ppu_prx_module_info)) { std::memcpy(&lib, &_module.get_ref<ppu_prx_module_info>(addr), sizeof(lib)); const bool is_library = !!(lib.attributes & PRX_EXPORT_LIBRARY_FLAG); const bool is_management = !is_library && !!(lib.attributes & PRX_EXPORT_PRX_MANAGEMENT_FUNCTIONS_FLAG); if (loaded_flags && !unload_exports) { loaded_flags->push_back(false); } if (is_management) { // Set special exports for (u32 i = 0, end = lib.num_func + lib.num_var; i < end; i++) { const u32 nid = _module.get_ref<u32>(lib.nids, i); const u32 addr = _module.get_ref<u32>(lib.addrs, i); if (funcs) { funcs->emplace_back(addr); } if (i < lib.num_func) { ppu_loader.notice("** Special: [%s] at 0x%x [0x%x, 0x%x]", ppu_get_function_name({}, nid), addr, _module.get_ref<u32>(addr), _module.get_ref<u32>(addr + 4)); } else { ppu_loader.notice("** Special: &[%s] at 0x%x", ppu_get_variable_name({}, nid), addr); } result.emplace(nid, addr); } continue; } if (!is_library) { // Skipped if none of the flags is set continue; } const std::string module_name(&_module.get_ref<const char>(lib.name)); if (unload_exports) { if (::at32(*loaded_flags, unload_index)) { ppu_register_library_lock(module_name, false); } continue; } ppu_loader.notice("** Exported module '%s' (vnids=0x%x, vstubs=0x%x, version=0x%x, attributes=0x%x, unk4=0x%x, unk5=0x%x)", module_name, lib.vnids, lib.vstubs, lib.version, lib.attributes, lib.unk4, lib.unk5); if (lib.num_tlsvar) { ppu_loader.error("Unexpected num_tlsvar (%u)!", lib.num_tlsvar); } const bool should_load = for_observing_callbacks || ppu_register_library_lock(module_name, true); if (loaded_flags) { loaded_flags->back() = should_load; } if (!should_load) { ppu_loader.notice("** Skipped module '%s' (already loaded)", module_name); continue; } // Static module const auto _sm = ppu_module_manager::get_module(module_name); // Module linkage auto& mlink = link->modules[module_name]; const auto fnids = +lib.nids; const auto faddrs = +lib.addrs; // Get functions for (u32 i = 0, end = lib.num_func; i < end; i++) { const u32 fnid = _module.get_ref<u32>(fnids, i); const u32 faddr = _module.get_ref<u32>(faddrs, i); ppu_loader.notice("**** %s export: [%s] (0x%08x) at 0x%x [at:0x%x]", module_name, ppu_get_function_name(module_name, fnid), fnid, faddr, _module.get_ref<u32>(faddr)); if (funcs) { funcs->emplace_back(faddr); } if (for_observing_callbacks) { continue; } // Function linkage info auto& flink = mlink.functions[fnid]; if (flink.static_func && flink.export_addr == g_fxo->get<ppu_function_manager>().func_addr(flink.static_func->index)) { flink.export_addr = 0; } if (flink.export_addr) { ppu_loader.notice("Already linked function '%s' in module '%s'", ppu_get_function_name(module_name, fnid), module_name); } //else { // Static function const auto _sf = _sm && _sm->functions.count(fnid) ? &::at32(_sm->functions, fnid) : nullptr; if (_sf && (_sf->flags & MFF_FORCED_HLE)) { // Inject a branch to the HLE implementation const u32 target = g_fxo->get<ppu_function_manager>().func_addr(_sf->index, true); // Set exported function flink.export_addr = target - 4; if (auto ptr = _module.get_ptr<u32>(faddr); vm::try_get_addr(ptr).first) { ppu_form_branch_to_code(*ptr, target); } } else { // Set exported function flink.export_addr = faddr; // Fix imports for (const u32 addr : flink.imports) { _module.get_ref<u32>(addr) = faddr; //ppu_loader.warning("Exported function '%s' in module '%s'", ppu_get_function_name(module_name, fnid), module_name); } for (const u32 fref : flink.frefss) { ppu_patch_refs(_module, nullptr, fref, faddr); } } } } const auto vnids = lib.nids + lib.num_func; const auto vaddrs = lib.addrs + lib.num_func; // Get variables for (u32 i = 0, end = lib.num_var; i < end; i++) { const u32 vnid = _module.get_ref<u32>(vnids, i); const u32 vaddr = _module.get_ref<u32>(vaddrs, i); ppu_loader.notice("**** %s export: &[%s] at 0x%x", module_name, ppu_get_variable_name(module_name, vnid), vaddr); if (for_observing_callbacks) { continue; } // Variable linkage info auto& vlink = mlink.variables[vnid]; if (vlink.static_var && vlink.export_addr == vlink.static_var->addr) { vlink.export_addr = 0; } if (vlink.export_addr) { ppu_loader.error("Already linked variable '%s' in module '%s'", ppu_get_variable_name(module_name, vnid), module_name); } //else { // Set exported variable vlink.export_addr = vaddr; // Fix imports for (const auto vref : vlink.imports) { ppu_patch_refs(_module, nullptr, vref, vaddr); //ppu_loader.warning("Exported variable '%s' in module '%s'", ppu_get_variable_name(module_name, vnid), module_name); } } } } return result; } static auto ppu_load_imports(const ppu_module& _module, std::vector<ppu_reloc>& relocs, ppu_linkage_info* link, u32 imports_start, u32 imports_end) { std::unordered_map<u32, void*> result; std::lock_guard lock(link->mutex); for (u32 addr = imports_start; addr < imports_end;) { const auto& lib = _module.get_ref<const ppu_prx_module_info>(addr); const std::string module_name(&_module.get_ref<const char>(lib.name)); ppu_loader.notice("** Imported module '%s' (ver=0x%x, attr=0x%x, 0x%x, 0x%x) [0x%x]", module_name, lib.version, lib.attributes, lib.unk4, lib.unk5, addr); if (lib.num_tlsvar) { ppu_loader.error("Unexpected num_tlsvar (%u)!", lib.num_tlsvar); } // Static module //const auto _sm = ppu_module_manager::get_module(module_name); // Module linkage auto& mlink = link->modules[module_name]; const auto fnids = +lib.nids; const auto faddrs = +lib.addrs; for (u32 i = 0, end = lib.num_func; i < end; i++) { const u32 fnid = _module.get_ref<u32>(fnids, i); const u32 fstub = _module.get_ref<u32>(faddrs, i); const u32 faddr = (faddrs + i).addr(); ppu_loader.notice("**** %s import: [%s] (0x%08x) -> 0x%x", module_name, ppu_get_function_name(module_name, fnid), fnid, fstub); // Function linkage info auto& flink = link->modules[module_name].functions[fnid]; // Add new import result.emplace(faddr, &flink); flink.imports.emplace(faddr); mlink.imported = true; // Link address (special HLE function by default) const u32 link_addr = flink.export_addr ? flink.export_addr : g_fxo->get<ppu_function_manager>().addr; // Write import table _module.get_ref<u32>(faddr) = link_addr; // Patch refs if necessary (0x2000 seems to be correct flag indicating the presence of additional info) if (const u32 frefs = (lib.attributes & 0x2000) ? +_module.get_ref<u32>(fnids, i + lib.num_func) : 0) { result.emplace(frefs, &flink); flink.frefss.emplace(frefs); ppu_patch_refs(_module, &relocs, frefs, link_addr); } //ppu_loader.warning("Imported function '%s' in module '%s' (0x%x)", ppu_get_function_name(module_name, fnid), module_name, faddr); } const auto vnids = +lib.vnids; const auto vstubs = +lib.vstubs; for (u32 i = 0, end = lib.num_var; i < end; i++) { const u32 vnid = _module.get_ref<u32>(vnids, i); const u32 vref = _module.get_ref<u32>(vstubs, i); ppu_loader.notice("**** %s import: &[%s] (ref=*0x%x)", module_name, ppu_get_variable_name(module_name, vnid), vref); // Variable linkage info auto& vlink = link->modules[module_name].variables[vnid]; // Add new import result.emplace(vref, &vlink); vlink.imports.emplace(vref); mlink.imported = true; // Link if available ppu_patch_refs(_module, &relocs, vref, vlink.export_addr); //ppu_loader.warning("Imported variable '%s' in module '%s' (0x%x)", ppu_get_variable_name(module_name, vnid), module_name, vlink.first); } addr += lib.size ? lib.size : sizeof(ppu_prx_module_info); } return result; } // For _sys_prx_register_module void ppu_manual_load_imports_exports(u32 imports_start, u32 imports_size, u32 exports_start, u32 exports_size, std::basic_string<char>& loaded_flags) { auto& _main = g_fxo->get<main_ppu_module>(); auto& link = g_fxo->get<ppu_linkage_info>(); ppu_module vm_all_fake_module{}; vm_all_fake_module.segs.emplace_back(ppu_segment{0x10000, 0 - 0x10000u, 1 /*LOAD*/, 0, 0 - 0x1000u, vm::base(0x10000)}); vm_all_fake_module.addr_to_seg_index.emplace(0x10000, 0); ppu_load_exports(vm_all_fake_module, &link, exports_start, exports_start + exports_size, false, nullptr, &loaded_flags); if (!imports_size) { return; } ppu_load_imports(vm_all_fake_module, _main.relocs, &link, imports_start, imports_start + imports_size); } // For savestates extern bool is_memory_compatible_for_copy_from_executable_optimization(u32 addr, u32 size) { if (g_cfg.savestate.state_inspection_mode) { return false; } static ppu_exec_object s_ppu_exec; static std::vector<char> zeroes; if (!addr) { // A call for cleanup s_ppu_exec.clear(); zeroes = {}; return false; } if (s_ppu_exec != elf_error::ok) { if (s_ppu_exec != elf_error::stream) { // Failed before return false; } s_ppu_exec.open(decrypt_self(fs::file(Emu.GetBoot()), Emu.klic.empty() ? nullptr : reinterpret_cast<u8*>(&Emu.klic[0]))); if (s_ppu_exec != elf_error::ok) { return false; } } for (const auto& prog : s_ppu_exec.progs) { const u32 vaddr = static_cast<u32>(prog.p_vaddr); const u32 seg_size = static_cast<u32>(prog.p_filesz); const u32 aligned_vaddr = vaddr & -0x10000; const u32 vaddr_offs = vaddr & 0xffff; // Check if the address is a start of segment within the executable if (prog.p_type == 0x1u /* LOAD */ && seg_size && aligned_vaddr == addr && prog.p_vaddr == prog.p_paddr && vaddr_offs + seg_size <= size) { zeroes.resize(std::max<usz>({zeroes.size(), usz{addr + size - (vaddr + seg_size)}, usz{vaddr_offs}})); // Check if gaps between segment and allocation bounds are still zeroes-only if (!std::memcmp(vm::_ptr<char>(aligned_vaddr), zeroes.data(), vaddr_offs) && !std::memcmp(vm::_ptr<char>(vaddr + seg_size), zeroes.data(), (addr + size - (vaddr + seg_size)))) { // Test memory equality return !std::memcmp(prog.bin.data(), vm::base(vaddr), seg_size); } } } return false; } void init_ppu_functions(utils::serial* ar, bool full = false) { g_fxo->need<ppu_linkage_info>(); if (ar) { const u32 addr = g_fxo->init<ppu_function_manager>(*ar)->addr; if (addr % 0x1000 || !vm::check_addr(addr)) { fmt::throw_exception("init_ppu_functions(): Failure to initialize function manager. (addr=0x%x, %s)", addr, *ar); } } else g_fxo->init<ppu_function_manager>(); if (full) { // Initialize HLE modules ppu_initialize_modules(&g_fxo->get<ppu_linkage_info>(), ar); } } static void ppu_check_patch_spu_images(const ppu_module& mod, const ppu_segment& seg) { if (!seg.size) { return; } const bool is_firmware = mod.path.starts_with(vfs::get("/dev_flash/")); const auto _main = g_fxo->try_get<main_ppu_module>(); const std::string_view seg_view{ensure(mod.get_ptr<char>(seg.addr)), seg.size}; auto find_first_of_multiple = [](std::string_view data, std::initializer_list<std::string_view> values, usz index) { u32 pos = static_cast<u32>(data.size()); for (std::string_view value : values) { if (usz pos0 = data.substr(index, pos - index).find(value); pos0 != umax && pos0 + index < pos) { pos = static_cast<u32>(pos0 + index); } } return pos; }; extern void utilize_spu_data_segment(u32 vaddr, const void* ls_data_vaddr, u32 size); // Search for [stqd lr,0x10(sp)] instruction or ELF file signature, whichever comes first const std::initializer_list<std::string_view> prefixes = {"\177ELF"sv, "\x24\0\x40\x80"sv}; u32 prev_bound = 0; for (u32 i = find_first_of_multiple(seg_view, prefixes, 0); i < seg.size; i = find_first_of_multiple(seg_view, prefixes, utils::align<u32>(i + 1, 4))) { const auto elf_header = ensure(mod.get_ptr<u8>(seg.addr + i)); if (i % 4 == 0 && std::memcmp(elf_header, "\x24\0\x40\x80", 4) == 0) { bool next = true; const u32 old_i = i; u32 guid_start = umax, guid_end = umax; for (u32 search = i & -128, tries = 10; tries && search >= prev_bound; tries--, search = utils::sub_saturate<u32>(search, 128)) { if (seg_view[search] != 0x42 && seg_view[search] != 0x43) { continue; } const u32 inst1 = read_from_ptr<be_t<u32>>(seg_view, search); const u32 inst2 = read_from_ptr<be_t<u32>>(seg_view, search + 4); const u32 inst3 = read_from_ptr<be_t<u32>>(seg_view, search + 8); const u32 inst4 = read_from_ptr<be_t<u32>>(seg_view, search + 12); if ((inst1 & 0xfe'00'00'7f) != 0x42000002 || (inst2 & 0xfe'00'00'7f) != 0x42000002 || (inst3 & 0xfe'00'00'7f) != 0x42000002 || (inst4 & 0xfe'00'00'7f) != 0x42000002) { continue; } guid_start = search + seg.addr; i = search; next = false; break; } if (next) { continue; } std::string_view ls_segment = seg_view.substr(i); // Bound to a bit less than LS size ls_segment = ls_segment.substr(0, 0x38000); for (u32 addr_last = 0, valid_count = 0, invalid_count = 0;;) { const u32 instruction = static_cast<u32>(ls_segment.find("\x24\0\x40\x80"sv, addr_last)); if (instruction != umax) { if (instruction % 4 != i % 4) { // Unaligned, continue addr_last = instruction + (i % 4 - instruction % 4) % 4; continue; } // FIXME: This seems to terminate SPU code prematurely in some cases // Likely due to absolute branches if (spu_thread::is_exec_code(instruction, {reinterpret_cast<const u8*>(ls_segment.data()), ls_segment.size()}, 0)) { addr_last = instruction + 4; valid_count++; invalid_count = 0; continue; } if (invalid_count == 0) { // Allow a single case of invalid data addr_last = instruction + 4; invalid_count++; continue; } addr_last = instruction; } if (addr_last >= 0x80 && valid_count >= 2) { const u32 begin = i & -128; u32 end = std::min<u32>(seg.size, utils::align<u32>(i + addr_last + 256, 128)); u32 guessed_ls_addr = 0; // Try to guess LS address by observing the pattern for disable/enable interrupts // ILA R2, PC + 8 // BIE/BID R2 for (u32 found = 0, last_vaddr = 0, it = begin + 16; it < end - 16; it += 4) { const u32 inst1 = read_from_ptr<be_t<u32>>(seg_view, it); const u32 inst2 = read_from_ptr<be_t<u32>>(seg_view, it + 4); const u32 inst3 = read_from_ptr<be_t<u32>>(seg_view, it + 8); const u32 inst4 = read_from_ptr<be_t<u32>>(seg_view, it + 12); if ((inst1 & 0xfe'00'00'7f) == 0x42000002 && (inst2 & 0xfe'00'00'7f) == 0x42000002 && (inst3 & 0xfe'00'00'7f) == 0x42000002 && (inst4 & 0xfe'00'00'7f) == 0x42000002) { // SPURS GUID pattern end = it; guid_end = end + seg.addr; break; } if ((inst1 >> 7) % 4 == 0 && (inst1 & 0xfe'00'00'7f) == 0x42000002 && (inst2 == 0x35040100 || inst2 == 0x35080100)) { const u32 addr_inst = (inst1 >> 7) % 0x40000; if (u32 addr_seg = addr_inst - std::min<u32>(it + 8 - begin, addr_inst)) { if (last_vaddr != addr_seg) { guessed_ls_addr = 0; found = 0; } found++; last_vaddr = addr_seg; if (found >= 2) { // Good segment address guessed_ls_addr = last_vaddr; ppu_log.notice("Found IENABLE/IDSIABLE Pattern at 0x%05x", it + seg.addr); } } } } if (guessed_ls_addr) { end = begin + std::min<u32>(end - begin, SPU_LS_SIZE - guessed_ls_addr); } ppu_log.success("Found valid roaming SPU code at 0x%x..0x%x (guessed_ls_addr=0x%x, GUID=0x%05x..0x%05x)", seg.addr + begin, seg.addr + end, guessed_ls_addr, guid_start, guid_end); if (!is_firmware && _main == &mod) { // Siginify that the base address is unknown by passing 0 utilize_spu_data_segment(guessed_ls_addr ? guessed_ls_addr : 0x4000, seg_view.data() + begin, end - begin); } i = std::max<u32>(end, i + 4) - 4; prev_bound = i + 4; } else { i = old_i; } break; } continue; } // Try to load SPU image const spu_exec_object obj(fs::file(elf_header, seg.size - i)); if (obj != elf_error::ok) { // This address does not have an SPU elf continue; } // Segment info dump std::string name; std::string dump; std::vector<u32> applied; // Executable hash sha1_context sha2; sha1_starts(&sha2); u8 sha1_hash[20]; for (const auto& prog : obj.progs) { // Only hash the data, we are not loading it sha1_update(&sha2, reinterpret_cast<const uchar*>(&prog.p_vaddr), sizeof(prog.p_vaddr)); sha1_update(&sha2, reinterpret_cast<const uchar*>(&prog.p_memsz), sizeof(prog.p_memsz)); sha1_update(&sha2, reinterpret_cast<const uchar*>(&prog.p_filesz), sizeof(prog.p_filesz)); fmt::append(dump, "\n\tSegment: p_type=0x%x, p_vaddr=0x%llx, p_filesz=0x%llx, p_memsz=0x%llx, p_offset=0x%llx", prog.p_type, prog.p_vaddr, prog.p_filesz, prog.p_memsz, prog.p_offset); if (prog.p_type == 0x1u /* LOAD */ && prog.p_filesz > 0u) { if (prog.p_vaddr && !is_firmware && _main == &mod) { extern void utilize_spu_data_segment(u32 vaddr, const void* ls_data_vaddr, u32 size); utilize_spu_data_segment(prog.p_vaddr, (elf_header + prog.p_offset), prog.p_filesz); } sha1_update(&sha2, (elf_header + prog.p_offset), prog.p_filesz); } else if (prog.p_type == 0x4u /* NOTE */ && prog.p_filesz > 0u) { sha1_update(&sha2, (elf_header + prog.p_offset), prog.p_filesz); // We assume that the string SPUNAME exists 0x14 bytes into the NOTE segment name = ensure(mod.get_ptr<const char>(seg.addr + i + prog.p_offset + 0x14)); if (!name.empty()) { fmt::append(dump, "\n\tSPUNAME: '%s'", name); } } } fmt::append(dump, " (image addr: 0x%x, size: 0x%x)", seg.addr + i, obj.highest_offset); sha1_finish(&sha2, sha1_hash); // Format patch name std::string hash("SPU-0000000000000000000000000000000000000000"); for (u32 i = 0; i < sizeof(sha1_hash); i++) { constexpr auto pal = "0123456789abcdef"; hash[4 + i * 2] = pal[sha1_hash[i] >> 4]; hash[5 + i * 2] = pal[sha1_hash[i] & 15]; } if (g_cfg.core.spu_debug) { fs::file temp(fs::get_cache_dir() + "/spu_progs/" + vfs::escape(name.substr(name.find_last_of('/') + 1)) + '_' + hash.substr(4) + ".elf", fs::rewrite); if (!temp || !temp.write(obj.save())) { ppu_loader.error("Failed to dump SPU program from PPU executable: name='%s', hash=%s", name, hash); } } // Try to patch each segment, will only succeed if the address exists in SPU local storage for (const auto& prog : obj.progs) { if (Emu.DeserialManager()) { break; } // Apply the patch g_fxo->get<patch_engine>().apply(applied, hash, [&](u32 addr, u32 /*size*/) { return addr + elf_header + prog.p_offset; }, prog.p_filesz, prog.p_vaddr); if (!Emu.GetTitleID().empty()) { // Alternative patch g_fxo->get<patch_engine>().apply(applied, Emu.GetTitleID() + '-' + hash, [&](u32 addr, u32 /*size*/) { return addr + elf_header + prog.p_offset; }, prog.p_filesz, prog.p_vaddr); } } if (applied.empty()) { ppu_loader.warning("SPU executable hash: %s%s", hash, dump); } else { ppu_loader.success("SPU executable hash: %s (<- %u)%s", hash, applied.size(), dump); } i += ::narrow<u32>(obj.highest_offset) - 4; prev_bound = i + 4; } } void try_spawn_ppu_if_exclusive_program(const ppu_module& m) { // If only PRX/OVL has been loaded at Emu.BootGame(), launch a single PPU thread so its memory can be viewed if (Emu.IsReady() && g_fxo->get<main_ppu_module>().segs.empty() && !Emu.DeserialManager()) { ppu_thread_params p { .stack_addr = vm::cast(vm::alloc(SYS_PROCESS_PARAM_STACK_SIZE_MAX, vm::stack, 4096)), .stack_size = SYS_PROCESS_PARAM_STACK_SIZE_MAX, }; auto ppu = idm::make_ptr<named_thread<ppu_thread>>(p, "test_thread", 0); ppu->cia = m.funcs.empty() ? m.secs[0].addr : m.funcs[0].addr; // For kernel explorer g_fxo->init<lv2_memory_container>(4096); } } struct prx_names_table { shared_mutex mutex; std::set<std::string, std::less<>> registered; atomic_t<const char*> lut[0x1000'0000 / 0x1'0000]{}; SAVESTATE_INIT_POS(4.1); // Dependency on lv2_obj prx_names_table() noexcept { idm::select<lv2_obj, lv2_prx>([this](u32, lv2_prx& prx) { install(prx.name, prx); }); } void install(std::string_view name, lv2_prx& prx) { if (name.empty()) { return; } if (name.ends_with(".sprx"sv) && name.size() > (".sprx"sv).size()) { name = name.substr(0, name.size() - (".sprx"sv).size()); } std::lock_guard lock(mutex); const auto ptr = registered.emplace(name).first->c_str(); for (auto& seg : prx.segs) { if (!seg.size) { continue; } // Doesn't support addresses above 256MB because it wastes memory and is very unlikely (if somehow does occur increase it) const u32 max0 = (seg.addr + seg.size - 1) >> 16; const u32 max = std::min<u32>(::size32(lut), max0); if (max0 > max) { ppu_loader.error("Skipping PRX name registeration: %s, max=0x%x", name, max0 << 16); } for (u32 i = seg.addr >> 16; i <= max; i++) { lut[i].release(ptr); } } } }; const char* get_prx_name_by_cia(u32 addr) { if (auto t = g_fxo->try_get<prx_names_table>()) { addr >>= 16; if (addr < std::size(t->lut)) { return t->lut[addr]; } } return nullptr; } std::shared_ptr<lv2_prx> ppu_load_prx(const ppu_prx_object& elf, bool virtual_load, const std::string& path, s64 file_offset, utils::serial* ar) { if (elf != elf_error::ok) { return nullptr; } // Create new PRX object const auto prx = !ar && !virtual_load ? idm::make_ptr<lv2_obj, lv2_prx>() : std::make_shared<lv2_prx>(); // Access linkage information object auto& link = g_fxo->get<ppu_linkage_info>(); // Initialize HLE modules ppu_initialize_modules(&link); // Library hash sha1_context sha; sha1_starts(&sha); u32 end = 0; u32 toc = 0; // 0x100000: Workaround for analyser glitches u32 allocating_address = 0x100000; for (const auto& prog : elf.progs) { ppu_loader.notice("** Segment: p_type=0x%x, p_vaddr=0x%llx, p_filesz=0x%llx, p_memsz=0x%llx, flags=0x%x", prog.p_type, prog.p_vaddr, prog.p_filesz, prog.p_memsz, prog.p_flags); // Hash big-endian values sha1_update(&sha, reinterpret_cast<const uchar*>(&prog.p_type), sizeof(prog.p_type)); sha1_update(&sha, reinterpret_cast<const uchar*>(&prog.p_flags), sizeof(prog.p_flags)); switch (const u32 p_type = prog.p_type) { case 0x1: // LOAD { auto& _seg = prx->segs.emplace_back(); _seg.flags = prog.p_flags; _seg.type = p_type; if (prog.p_memsz) { const u32 mem_size = ::narrow<u32>(prog.p_memsz); const u32 file_size = ::narrow<u32>(prog.p_filesz); //const u32 init_addr = ::narrow<u32>(prog.p_vaddr); // Alloc segment memory // Or use saved address u32 addr = 0; if (virtual_load) { addr = std::exchange(allocating_address, allocating_address + utils::align<u32>(mem_size, 0x10000)); } else { addr = (!ar ? vm::alloc(mem_size, vm::main) : ar->operator u32()); } _seg.ptr = vm::base(addr); if (virtual_load) { // Leave additional room for the analyser so it can safely access beyond limit a bit // Because with VM the address sapce is not really a limit so any u32 address is valid there, here it is UB to create pointer that goes beyond the boundaries // TODO: Use make_shared_for_overwrite when all compilers support it const usz alloc_size = utils::align<usz>(mem_size, 0x10000) + 4096; prx->allocations.push_back(std::shared_ptr<u8[]>(new u8[alloc_size])); _seg.ptr = prx->allocations.back().get(); std::memset(static_cast<u8*>(_seg.ptr) + prog.bin.size(), 0, alloc_size - 4096 - prog.bin.size()); } else if (!vm::check_addr(addr)) { fmt::throw_exception("vm::alloc() failed (size=0x%x)", mem_size); } _seg.addr = addr; _seg.size = mem_size; _seg.filesz = file_size; prx->addr_to_seg_index.emplace(addr, ::size32(prx->segs) - 1); // Copy segment data if (!ar) std::memcpy(ensure(prx->get_ptr<void>(addr)), prog.bin.data(), file_size); ppu_loader.warning("**** Loaded to 0x%x...0x%x (size=0x%x)", addr, addr + mem_size - 1, mem_size); // Hash segment sha1_update(&sha, reinterpret_cast<const uchar*>(&prog.p_vaddr), sizeof(prog.p_vaddr)); sha1_update(&sha, reinterpret_cast<const uchar*>(&prog.p_memsz), sizeof(prog.p_memsz)); sha1_update(&sha, prog.bin.data(), prog.bin.size()); // Initialize executable code if necessary if (prog.p_flags & 0x1 && !virtual_load) { ppu_register_range(addr, mem_size); } } break; } case 0x700000a4: break; // Relocations default: ppu_loader.error("Unknown segment type! 0x%08x", p_type); } } for (const auto& s : elf.shdrs) { ppu_loader.notice("** Section: sh_type=0x%x, addr=0x%llx, size=0x%llx, flags=0x%x", std::bit_cast<u32>(s.sh_type), s.sh_addr, s.sh_size, s._sh_flags); if (s.sh_type != sec_type::sht_progbits) continue; const u32 addr = vm::cast(s.sh_addr); const u32 size = vm::cast(s.sh_size); if (addr && size) // TODO: some sections with addr=0 are valid { for (usz i = 0; i < prx->segs.size(); i++) { const u32 saddr = static_cast<u32>(elf.progs[i].p_vaddr); if (addr >= saddr && addr < saddr + elf.progs[i].p_memsz) { // "Relocate" section ppu_segment _sec; _sec.addr = addr - saddr + prx->segs[i].addr; _sec.size = size; _sec.type = std::bit_cast<u32>(s.sh_type); _sec.flags = static_cast<u32>(s._sh_flags & 7); _sec.filesz = 0; prx->secs.emplace_back(_sec); if (_sec.flags & 0x4 && i == 0) { end = std::max<u32>(end, _sec.addr + _sec.size); } break; } } } } // Do relocations for (auto& prog : elf.progs) { switch (prog.p_type) { case 0x700000a4: { // Relocation information of the SCE_PPURELA segment struct ppu_prx_relocation_info { be_t<u64> offset; be_t<u16> unk0; u8 index_value; u8 index_addr; be_t<u32> type; vm::bptr<void, u64> ptr; }; for (uint i = 0; i < prog.p_filesz; i += sizeof(ppu_prx_relocation_info)) { const auto& rel = reinterpret_cast<const ppu_prx_relocation_info&>(prog.bin[i]); if (rel.offset >= utils::align<u64>(::at32(prx->segs, rel.index_addr).size, 0x100)) { fmt::throw_exception("Relocation offset out of segment memory! (offset=0x%x, index_addr=%u, seg_size=0x%x)", rel.offset, rel.index_addr, prx->segs[rel.index_addr].size); } const u32 data_base = rel.index_value == 0xFF ? 0 : ::at32(prx->segs, rel.index_value).addr; if (rel.index_value != 0xFF && !data_base) { fmt::throw_exception("Empty segment has been referenced for relocation data! (reloc_offset=0x%x, index_value=%u)", i, rel.index_value); } ppu_reloc _rel; const u32 raddr = _rel.addr = vm::cast(::at32(prx->segs, rel.index_addr).addr + rel.offset); const u32 rtype = _rel.type = rel.type; const u64 rdata = _rel.data = data_base + rel.ptr.addr(); prx->relocs.emplace_back(_rel); if (ar) { continue; } switch (rtype) { case 1: // R_PPC64_ADDR32 { const u32 value = *ensure(prx->get_ptr<u32>(raddr)) = static_cast<u32>(rdata); ppu_loader.trace("**** RELOCATION(1): 0x%x <- 0x%08x (0x%llx)", raddr, value, rdata); break; } case 4: //R_PPC64_ADDR16_LO { const u16 value = *ensure(prx->get_ptr<u16>(raddr)) = static_cast<u16>(rdata); ppu_loader.trace("**** RELOCATION(4): 0x%x <- 0x%04x (0x%llx)", raddr, value, rdata); break; } case 5: //R_PPC64_ADDR16_HI { const u16 value = *ensure(prx->get_ptr<u16>(raddr)) = static_cast<u16>(rdata >> 16); ppu_loader.trace("**** RELOCATION(5): 0x%x <- 0x%04x (0x%llx)", raddr, value, rdata); break; } case 6: //R_PPC64_ADDR16_HA { const u16 value = *ensure(prx->get_ptr<u16>(raddr)) = static_cast<u16>(rdata >> 16) + (rdata & 0x8000 ? 1 : 0); ppu_loader.trace("**** RELOCATION(6): 0x%x <- 0x%04x (0x%llx)", raddr, value, rdata); break; } case 10: //R_PPC64_REL24 { const u32 value = *ensure(prx->get_ptr<ppu_bf_t<be_t<u32>, 6, 24>>(raddr)) = static_cast<u32>(rdata - raddr) >> 2; ppu_loader.warning("**** RELOCATION(10): 0x%x <- 0x%06x (0x%llx)", raddr, value, rdata); break; } case 11: //R_PPC64_REL14 { const u32 value = *ensure(prx->get_ptr<ppu_bf_t<be_t<u32>, 16, 14>>(raddr)) = static_cast<u32>(rdata - raddr) >> 2; ppu_loader.warning("**** RELOCATION(11): 0x%x <- 0x%06x (0x%llx)", raddr, value, rdata); break; } case 38: //R_PPC64_ADDR64 { const u64 value = *ensure(prx->get_ptr<u64>(raddr)) = rdata; ppu_loader.trace("**** RELOCATION(38): 0x%x <- 0x%016llx (0x%llx)", raddr, value, rdata); break; } case 44: //R_PPC64_REL64 { const u64 value = *ensure(prx->get_ptr<u64>(raddr)) = rdata - raddr; ppu_loader.trace("**** RELOCATION(44): 0x%x <- 0x%016llx (0x%llx)", raddr, value, rdata); break; } case 57: //R_PPC64_ADDR16_LO_DS { const u16 value = *ensure(prx->get_ptr<ppu_bf_t<be_t<u16>, 0, 14>>(raddr)) = static_cast<u16>(rdata) >> 2; ppu_loader.trace("**** RELOCATION(57): 0x%x <- 0x%04x (0x%llx)", raddr, value, rdata); break; } default: ppu_loader.error("**** RELOCATION(%u): Illegal/Unknown type! (addr=0x%x; 0x%llx)", rtype, raddr, rdata); } if (rdata == 0) { ppu_loader.todo("**** RELOCATION(%u): 0x%x <- (zero-based value)", rtype, raddr); } } break; } default : break; } } std::vector<u32> exported_funcs; if (!elf.progs.empty() && elf.progs[0].p_paddr) { struct ppu_prx_library_info { be_t<u16> attributes; u8 version[2]; char name[28]; be_t<u32> toc; be_t<u32> exports_start; be_t<u32> exports_end; be_t<u32> imports_start; be_t<u32> imports_end; }; // Access library information (TODO) const auto lib_info = ensure(prx->get_ptr<const ppu_prx_library_info>(::narrow<u32>(prx->segs[0].addr + elf.progs[0].p_paddr - elf.progs[0].p_offset))); const std::string lib_name = lib_info->name; strcpy_trunc(prx->module_info_name, lib_name); prx->module_info_version[0] = lib_info->version[0]; prx->module_info_version[1] = lib_info->version[1]; prx->module_info_attributes = lib_info->attributes; prx->exports_start = lib_info->exports_start; prx->exports_end = lib_info->exports_end; for (u32 start = prx->exports_start, size = 0;; size++) { if (start >= prx->exports_end) { // Preallocate storage prx->m_external_loaded_flags.resize(size); break; } const u8 increment = *ensure(prx->get_ptr<u8>(start)); start += increment ? increment : sizeof(ppu_prx_module_info); } ppu_loader.warning("Library %s (rtoc=0x%x):", lib_name, lib_info->toc); ppu_linkage_info dummy{}; prx->specials = ppu_load_exports(*prx, virtual_load ? &dummy : &link, prx->exports_start, prx->exports_end, true, &exported_funcs); prx->imports = ppu_load_imports(*prx, prx->relocs, virtual_load ? &dummy : &link, lib_info->imports_start, lib_info->imports_end); if (virtual_load) { prx->imports.clear(); } std::stable_sort(prx->relocs.begin(), prx->relocs.end()); toc = lib_info->toc; } else { ppu_loader.error("Library %s: PRX library info not found"); } prx->start.set(prx->specials[0xbc9a0086]); prx->stop.set(prx->specials[0xab779874]); prx->exit.set(prx->specials[0x3ab9a95e]); prx->prologue.set(prx->specials[0x0d10fd3f]); prx->epilogue.set(prx->specials[0x330f7005]); prx->name = path.substr(path.find_last_of('/') + 1); prx->path = path; prx->offset = file_offset; g_fxo->need<prx_names_table>(); g_fxo->get<prx_names_table>().install(prx->name, *prx); sha1_finish(&sha, prx->sha1); // Format patch name std::string hash = fmt::format("PRX-%s", fmt::base57(prx->sha1)); if (prx->path.ends_with("sys/external/liblv2.sprx"sv)) { liblv2_begin = prx->segs[0].addr; liblv2_end = prx->segs[0].addr + prx->segs[0].size; } std::vector<u32> applied; for (usz i = Emu.DeserialManager() ? prx->segs.size() : 0; i < prx->segs.size(); i++) { const auto& seg = prx->segs[i]; if (!seg.size) continue; const std::string hash_seg = fmt::format("%s-%u", hash, i); // Apply the patch std::vector<u32> _applied; g_fxo->get<patch_engine>().apply(_applied, hash_seg, [&](u32 addr, u32 size) { return prx->get_ptr<u8>(addr + seg.addr, size); }, seg.size); if (!Emu.GetTitleID().empty()) { // Alternative patch g_fxo->get<patch_engine>().apply(_applied, Emu.GetTitleID() + '-' + hash_seg, [&](u32 addr, u32 size) { return prx->get_ptr<u8>(addr + seg.addr, size); }, seg.size); } // Rebase patch offsets std::for_each(_applied.begin(), _applied.end(), [&](u32& res) { if (res != umax) res += seg.addr; }); applied.insert(applied.end(), _applied.begin(), _applied.end()); if (_applied.empty()) { ppu_loader.warning("PRX hash of %s[%u]: %s", prx->name, i, hash_seg); } else { ppu_loader.success("PRX hash of %s[%u]: %s (<- %u)", prx->name, i, hash_seg, _applied.size()); } } // Disabled for PRX for now (problematic and does not seem to have any benefit) end = 0; if (!applied.empty() || ar) { // Compare memory changes in memory after executable code sections end if (end >= prx->segs[0].addr && end < prx->segs[0].addr + prx->segs[0].size) { for (const auto& prog : elf.progs) { // Find the first segment if (prog.p_type == 0x1u /* LOAD */ && prog.p_memsz) { std::span<const uchar> elf_memory{prog.bin.begin(), prog.bin.size()}; elf_memory = elf_memory.subspan(end - prx->segs[0].addr); const auto tmp = std::span<uchar>{&prx->get_ref<uchar>(end), elf_memory.size()}; if (!std::equal(elf_memory.begin(), elf_memory.end(), tmp.begin(), tmp.end())) { // There are changes, disable analysis optimization ppu_loader.notice("Disabling analysis optimization due to memory changes from original file"); end = 0; } break; } } } } // Embedded SPU elf patching for (const auto& seg : prx->segs) { ppu_check_patch_spu_images(*prx, seg); } prx->analyse(toc, 0, end, applied, exported_funcs); if (!ar && !virtual_load) { try_spawn_ppu_if_exclusive_program(*prx); } return prx; } void ppu_unload_prx(const lv2_prx& prx) { if (prx.segs.empty() || prx.segs[0].ptr != vm::base(prx.segs[0].addr)) { return; } std::unique_lock lock(g_fxo->get<ppu_linkage_info>().mutex, std::defer_lock); // Clean linkage info for (auto& imp : prx.imports) { if (!lock) { lock.lock(); } auto pinfo = static_cast<ppu_linkage_info::module_data::info*>(imp.second); pinfo->frefss.erase(imp.first); pinfo->imports.erase(imp.first); } //for (auto& exp : prx.exports) //{ // auto pinfo = static_cast<ppu_linkage_info::module_data::info*>(exp.second); // if (pinfo->static_func) // { // pinfo->export_addr = g_fxo->get<ppu_function_manager>().func_addr(pinfo->static_func->index); // } // else if (pinfo->static_var) // { // pinfo->export_addr = pinfo->static_var->addr; // } // else // { // pinfo->export_addr = 0; // } //} if (lock) { lock.unlock(); } if (prx.path.ends_with("sys/external/liblv2.sprx"sv)) { liblv2_begin = 0; liblv2_end = 0; } // Format patch name std::string hash = fmt::format("PRX-%s", fmt::base57(prx.sha1)); for (auto& seg : prx.segs) { if (!seg.size) continue; vm::dealloc(seg.addr, vm::main); const std::string hash_seg = fmt::format("%s-%u", hash, &seg - prx.segs.data()); // Deallocatte memory used for patches g_fxo->get<patch_engine>().unload(hash_seg); if (!Emu.GetTitleID().empty()) { // Alternative patch g_fxo->get<patch_engine>().unload(Emu.GetTitleID() + '-' + hash_seg); } } } bool ppu_load_exec(const ppu_exec_object& elf, bool virtual_load, const std::string& elf_path, utils::serial* ar) { if (elf != elf_error::ok) { return false; } // Check if it is a standalone executable first for (const auto& prog : elf.progs) { if (prog.p_type == 0x1u /* LOAD */ && prog.p_memsz) { using addr_range = utils::address_range; const addr_range r = addr_range::start_length(static_cast<u32>(prog.p_vaddr), static_cast<u32>(prog.p_memsz)); if ((prog.p_vaddr | prog.p_memsz) > u32{umax} || !r.valid() || !r.inside(addr_range::start_length(0x00000000, 0x30000000))) { return false; } } } init_ppu_functions(ar, false); // Set for delayed initialization in ppu_initialize() auto& _main = g_fxo->get<main_ppu_module>(); // Access linkage information object auto& link = g_fxo->get<ppu_linkage_info>(); // TLS information u32 tls_vaddr = 0; u32 tls_fsize = 0; u32 tls_vsize = 0; // Process information u32 sdk_version = SYS_PROCESS_PARAM_SDK_VERSION_UNKNOWN; s32 primary_prio = 1001; u32 primary_stacksize = SYS_PROCESS_PARAM_STACK_SIZE_MAX; u32 malloc_pagesize = SYS_PROCESS_PARAM_MALLOC_PAGE_SIZE_1M; u32 ppc_seg = 0; // Limit for analysis u32 end = 0; // Executable hash sha1_context sha; sha1_starts(&sha); struct on_fatal_error { ppu_module& _main; bool errored = true; ~on_fatal_error() { if (!errored) { return; } // Revert previous allocations on an error for (const auto& seg : _main.segs) { vm::dealloc(seg.addr); } } } error_handler{_main}; if (virtual_load) { // No need for cleanup error_handler.errored = false; } const auto old_process_info = g_ps3_process_info; // Allocate memory at fixed positions for (const auto& prog : elf.progs) { ppu_loader.notice("** Segment: p_type=0x%x, p_vaddr=0x%llx, p_filesz=0x%llx, p_memsz=0x%llx, flags=0x%x", prog.p_type, prog.p_vaddr, prog.p_filesz, prog.p_memsz, prog.p_flags); ppu_segment _seg; const u32 addr = _seg.addr = vm::cast(prog.p_vaddr); const u32 size = _seg.size = ::narrow<u32>(prog.p_memsz); const u32 type = _seg.type = prog.p_type; _seg.flags = prog.p_flags; _seg.filesz = ::narrow<u32>(prog.p_filesz); // Hash big-endian values sha1_update(&sha, reinterpret_cast<const uchar*>(&prog.p_type), sizeof(prog.p_type)); sha1_update(&sha, reinterpret_cast<const uchar*>(&prog.p_flags), sizeof(prog.p_flags)); if (type == 0x1 /* LOAD */ && prog.p_memsz) { if (prog.bin.size() > size || prog.bin.size() != prog.p_filesz) { ppu_loader.error("ppu_load_exec(): Invalid binary size (0x%llx, memsz=0x%x)", prog.bin.size(), size); return false; } const bool already_loaded = ar && vm::check_addr(addr, vm::page_readable, size); _seg.ptr = vm::base(addr); if (virtual_load) { // Leave additional room for the analyser so it can safely access beyond limit a bit // Because with VM the address sapce is not really a limit so any u32 address is valid there, here it is UB to create pointer that goes beyond the boundaries // TODO: Use make_shared_for_overwrite when all compilers support it const usz alloc_size = utils::align<usz>(size, 0x10000) + 4096; _main.allocations.push_back(std::shared_ptr<u8[]>(new u8[alloc_size])); _seg.ptr = _main.allocations.back().get(); std::memset(static_cast<u8*>(_seg.ptr) + prog.bin.size(), 0, alloc_size - 4096 - prog.bin.size()); } else if (already_loaded) { } else if (![&]() -> bool { // 1M pages if it is RSX shared const u32 area_flags = (_seg.flags >> 28) ? vm::page_size_1m : vm::page_size_64k; const u32 alloc_at = std::max<u32>(addr & -0x10000000, 0x10000); const auto area = vm::reserve_map(vm::any, std::max<u32>(addr & -0x10000000, 0x10000), 0x10000000, area_flags); if (!area) { return false; } if (area->addr != alloc_at || (area->flags & 0xf00) != area_flags) { ppu_loader.error("Failed to allocate memory at 0x%x - conflicting memory area exists: area->addr=0x%x, area->flags=0x%x", addr, area->addr, area->flags); return false; } return area->falloc(addr, size); }()) { ppu_loader.error("ppu_load_exec(): vm::falloc() failed (addr=0x%x, memsz=0x%x)", addr, size); return false; } // Store only LOAD segments (TODO) _main.segs.emplace_back(_seg); _main.addr_to_seg_index.emplace(addr, ::size32(_main.segs) - 1); // Copy segment data, hash it if (!already_loaded) { std::memcpy(_main.get_ptr<void>(addr), prog.bin.data(), prog.bin.size()); } else { // For backwards compatibility: already loaded memory will always be writable const u32 size0 = utils::align(size + addr % 0x10000, 0x10000); const u32 addr0 = addr & -0x10000; vm::page_protect(addr0, size0, 0, vm::page_writable | vm::page_readable, vm::page_executable); } sha1_update(&sha, reinterpret_cast<const uchar*>(&prog.p_vaddr), sizeof(prog.p_vaddr)); sha1_update(&sha, reinterpret_cast<const uchar*>(&prog.p_memsz), sizeof(prog.p_memsz)); sha1_update(&sha, prog.bin.data(), prog.bin.size()); // Initialize executable code if necessary if (prog.p_flags & 0x1 && !virtual_load) { ppu_register_range(addr, size); } } } // Load section list, used by the analyser for (const auto& s : elf.shdrs) { ppu_loader.notice("** Section: sh_type=0x%x, addr=0x%llx, size=0x%llx, flags=0x%x", std::bit_cast<u32>(s.sh_type), s.sh_addr, s.sh_size, s._sh_flags); if (s.sh_type != sec_type::sht_progbits) continue; ppu_segment _sec; const u32 addr = _sec.addr = vm::cast(s.sh_addr); const u32 size = _sec.size = vm::cast(s.sh_size); _sec.type = std::bit_cast<u32>(s.sh_type); _sec.flags = static_cast<u32>(s._sh_flags & 7); _sec.filesz = 0; if (addr && size) { _main.secs.emplace_back(_sec); if (_sec.flags & 0x4 && addr >= _main.segs[0].addr && addr + size <= _main.segs[0].addr + _main.segs[0].size) { end = std::max<u32>(end, addr + size); } } } sha1_finish(&sha, _main.sha1); // Format patch name std::string hash("PPU-0000000000000000000000000000000000000000"); for (u32 i = 0; i < 20; i++) { constexpr auto pal = "0123456789abcdef"; hash[4 + i * 2] = pal[_main.sha1[i] >> 4]; hash[5 + i * 2] = pal[_main.sha1[i] & 15]; } Emu.SetExecutableHash(hash); // Apply the patch std::vector<u32> applied; g_fxo->get<patch_engine>().apply(applied, !ar ? hash : std::string{}, [&](u32 addr, u32 size) { return _main.get_ptr<u8>(addr, size); }); if (!ar && !Emu.GetTitleID().empty()) { // Alternative patch g_fxo->get<patch_engine>().apply(applied, Emu.GetTitleID() + '-' + hash, [&](u32 addr, u32 size) { return _main.get_ptr<u8>(addr, size); }); } if (!applied.empty() || ar) { // Compare memory changes in memory after executable code sections end if (end >= _main.segs[0].addr && end < _main.segs[0].addr + _main.segs[0].size) { for (const auto& prog : elf.progs) { // Find the first segment if (prog.p_type == 0x1u /* LOAD */ && prog.p_memsz) { std::span<const uchar> elf_memory{prog.bin.begin(), prog.bin.size()}; elf_memory = elf_memory.subspan(end - _main.segs[0].addr); const auto tmp = std::span<uchar>{&_main.get_ref<u8>(end), elf_memory.size()}; if (!std::equal(elf_memory.begin(), elf_memory.end(), tmp.begin(), tmp.end())) { // There are changes, disable analysis optimization ppu_loader.notice("Disabling analysis optimization due to memory changes from original file"); end = 0; } break; } } } } if (applied.empty()) { ppu_loader.warning("PPU executable hash: %s", hash); } else { ppu_loader.success("PPU executable hash: %s (<- %u)", hash, applied.size()); } // Initialize HLE modules ppu_initialize_modules(&link, ar); // Embedded SPU elf patching for (const auto& seg : _main.segs) { ppu_check_patch_spu_images(_main, seg); } // Static HLE patching if (g_cfg.core.hook_functions && !virtual_load) { auto shle = g_fxo->init<statichle_handler>(0); for (u32 i = _main.segs[0].addr; i < (_main.segs[0].addr + _main.segs[0].size); i += 4) { vm::cptr<u8> _ptr = vm::cast(i); shle->check_against_patterns(_ptr, (_main.segs[0].addr + _main.segs[0].size) - i, i); } } // Read control flags (0 if doesn't exist) g_ps3_process_info.ctrl_flags1 = 0; if (bool not_found = g_ps3_process_info.self_info.valid) { for (const auto& ctrl : g_ps3_process_info.self_info.supplemental_hdr) { if (ctrl.type == 1) { if (!std::exchange(not_found, false)) { ppu_loader.error("More than one control flags header found! (flags1=0x%x)", ctrl.PS3_plaintext_capability_header.ctrl_flag1); break; } g_ps3_process_info.ctrl_flags1 |= ctrl.PS3_plaintext_capability_header.ctrl_flag1; } } ppu_loader.notice("SELF header information found: ctrl_flags1=0x%x, authid=0x%llx", g_ps3_process_info.ctrl_flags1, g_ps3_process_info.self_info.prog_id_hdr.program_authority_id); } // Load other programs for (auto& prog : elf.progs) { switch (const u32 p_type = prog.p_type) { case 0x00000001: break; // LOAD (already loaded) case 0x00000007: // TLS { ppu_loader.notice("TLS info segment found: tls-image=*0x%x, image-size=0x%x, tls-size=0x%x", prog.p_vaddr, prog.p_filesz, prog.p_memsz); if ((prog.p_vaddr | prog.p_filesz | prog.p_memsz) > u32{umax}) { ppu_loader.error("ppu_load_exec(): TLS segment is invalid!"); return false; } tls_vaddr = vm::cast(prog.p_vaddr); tls_fsize = ::narrow<u32>(prog.p_filesz); tls_vsize = ::narrow<u32>(prog.p_memsz); break; } case 0x60000001: // LOOS+1 { if (prog.p_filesz) { struct process_param_t { be_t<u32> size; be_t<u32> magic; be_t<u32> version; be_t<u32> sdk_version; be_t<s32> primary_prio; be_t<u32> primary_stacksize; be_t<u32> malloc_pagesize; be_t<u32> ppc_seg; //be_t<u32> crash_dump_param_addr; }; const auto& info = *ensure(_main.get_ptr<process_param_t>(vm::cast(prog.p_vaddr))); if (info.size < sizeof(process_param_t)) { ppu_loader.warning("Bad process_param size! [0x%x : 0x%x]", info.size, sizeof(process_param_t)); } if (info.magic != SYS_PROCESS_PARAM_MAGIC) { ppu_loader.error("Bad process_param magic! [0x%x]", info.magic); } else { sdk_version = info.sdk_version; if (s32 prio = info.primary_prio; prio < 3072 && (prio >= (g_ps3_process_info.debug_or_root() ? 0 : -512))) { primary_prio = prio; } primary_stacksize = info.primary_stacksize; malloc_pagesize = info.malloc_pagesize; ppc_seg = info.ppc_seg; ppu_loader.notice("*** sdk version: 0x%x", info.sdk_version); ppu_loader.notice("*** primary prio: %d", info.primary_prio); ppu_loader.notice("*** primary stacksize: 0x%x", info.primary_stacksize); ppu_loader.notice("*** malloc pagesize: 0x%x", info.malloc_pagesize); ppu_loader.notice("*** ppc seg: 0x%x", info.ppc_seg); //ppu_loader.notice("*** crash dump param addr: 0x%x", info.crash_dump_param_addr); } } break; } case 0x60000002: // LOOS+2 { if (prog.p_filesz) { struct ppu_proc_prx_param_t { be_t<u32> size; be_t<u32> magic; be_t<u32> version; be_t<u32> unk0; be_t<u32> libent_start; be_t<u32> libent_end; be_t<u32> libstub_start; be_t<u32> libstub_end; be_t<u16> ver; be_t<u16> unk1; be_t<u32> unk2; }; const auto& proc_prx_param = *ensure(_main.get_ptr<const ppu_proc_prx_param_t>(vm::cast(prog.p_vaddr))); ppu_loader.notice("* libent_start = *0x%x", proc_prx_param.libent_start); ppu_loader.notice("* libstub_start = *0x%x", proc_prx_param.libstub_start); ppu_loader.notice("* unk0 = 0x%x", proc_prx_param.unk0); ppu_loader.notice("* unk2 = 0x%x", proc_prx_param.unk2); if (proc_prx_param.magic != 0x1b434cecu) { ppu_loader.error("ppu_load_exec(): Bad magic! (0x%x)", proc_prx_param.magic); return false; } ppu_linkage_info dummy{}; ppu_load_exports(_main, virtual_load ? &dummy : &link, proc_prx_param.libent_start, proc_prx_param.libent_end); ppu_load_imports(_main, _main.relocs, virtual_load ? &dummy : &link, proc_prx_param.libstub_start, proc_prx_param.libstub_end); std::stable_sort(_main.relocs.begin(), _main.relocs.end()); } break; } default: { ppu_loader.error("Unknown phdr type (0x%08x)", p_type); } } } // Initialize memory stats (according to sdk version) u32 mem_size; if (Emu.IsVsh()) { // Because vsh.self comes before any generic application, more memory is available to it mem_size = 0xF000000; } else if (sdk_version > 0x0021FFFF) { mem_size = 0xD500000; } else if (sdk_version > 0x00192FFF) { mem_size = 0xD300000; } else if (sdk_version > 0x0018FFFF) { mem_size = 0xD100000; } else if (sdk_version > 0x0017FFFF) { mem_size = 0xD000000; } else if (sdk_version > 0x00154FFF) { mem_size = 0xCC00000; } else { mem_size = 0xC800000; } if (g_cfg.core.debug_console_mode) { // TODO: Check for all sdk versions mem_size += 0xC000000; } // Initialize process std::vector<std::shared_ptr<lv2_prx>> loaded_modules; // Module list to load at startup std::set<std::string> load_libs; if (g_cfg.core.libraries_control.get_set().count("liblv2.sprx:lle") || !g_cfg.core.libraries_control.get_set().count("liblv2.sprx:hle")) { // Will load libsysmodule.sprx internally load_libs.emplace("liblv2.sprx"); } else if (g_cfg.core.libraries_control.get_set().count("libsysmodule.sprx:lle") || !g_cfg.core.libraries_control.get_set().count("libsysmodule.sprx:hle")) { // Load only libsysmodule.sprx load_libs.emplace("libsysmodule.sprx"); } if (ar || Emu.IsVsh() || virtual_load) { // Cannot be used with vsh.self or savestates (they self-manage itself) load_libs.clear(); } const std::string lle_dir = vfs::get("/dev_flash/sys/external/"); if (!fs::is_file(lle_dir + "liblv2.sprx")) { ppu_loader.error("PS3 firmware is not installed or the installed firmware is invalid." "\nYou should install the PS3 Firmware (Menu: File -> Install Firmware)." "\nVisit https://rpcs3.net/ for Quickstart Guide and more information."); } // Program entry u32 entry = static_cast<u32>(elf.header.e_entry); // Run entry from elf (HLE) // Set path (TODO) _main.name.clear(); _main.path = elf_path; _main.elf_entry = static_cast<u32>(elf.header.e_entry); _main.seg0_code_end = end; _main.applied_patches = applied; if (!virtual_load) { // Set SDK version g_ps3_process_info.sdk_ver = sdk_version; // Set ppc fixed allocations segment permission g_ps3_process_info.ppc_seg = ppc_seg; if (Emu.init_mem_containers) { // Refer to sys_process_exit2 for explanation // Make init_mem_containers empty before call const auto callback = std::move(Emu.init_mem_containers); callback(mem_size); } else if (!ar) { g_fxo->init<id_manager::id_map<lv2_memory_container>>(); g_fxo->init<lv2_memory_container>(mem_size); } void init_fxo_for_exec(utils::serial* ar, bool full); init_fxo_for_exec(ar, false); liblv2_begin = 0; liblv2_end = 0; } else { g_ps3_process_info = old_process_info; } if (!load_libs.empty()) { for (const auto& name : load_libs) { const ppu_prx_object obj = decrypt_self(fs::file(lle_dir + name)); if (obj == elf_error::ok) { ppu_loader.warning("Loading library: %s", name); auto prx = ppu_load_prx(obj, false, lle_dir + name, 0, nullptr); prx->state = PRX_STATE_STARTED; prx->load_exports(); if (prx->funcs.empty()) { ppu_loader.error("Module %s has no functions!", name); } else { // TODO: fix arguments prx->validate(prx->funcs[0].addr); } if (name == "liblv2.sprx") { // Run liblv2.sprx entry point (TODO) entry = prx->start.addr(); } else { loaded_modules.emplace_back(std::move(prx)); } } else { ppu_loader.error("Failed to load /dev_flash/sys/external/%s: %s (forcing HLE implementation)", name, obj.get_error()); } } } if (ar || virtual_load) { error_handler.errored = false; return true; } if (ppc_seg != 0x0) { if (ppc_seg != 0x1) { ppu_loader.todo("Unknown ppc_seg flag value = 0x%x", ppc_seg); } // Additional segment for fixed allocations if (!vm::map(0x30000000, 0x10000000, 0x200)) { fmt::throw_exception("Failed to map ppc_seg's segment!"); } } // Fix primary stack size switch (u32 sz = primary_stacksize) { case SYS_PROCESS_PRIMARY_STACK_SIZE_32K: primary_stacksize = 32 * 1024; break; case SYS_PROCESS_PRIMARY_STACK_SIZE_64K: primary_stacksize = 64 * 1024; break; case SYS_PROCESS_PRIMARY_STACK_SIZE_96K: primary_stacksize = 96 * 1024; break; case SYS_PROCESS_PRIMARY_STACK_SIZE_128K: primary_stacksize = 128 * 1024; break; case SYS_PROCESS_PRIMARY_STACK_SIZE_256K: primary_stacksize = 256 * 1024; break; case SYS_PROCESS_PRIMARY_STACK_SIZE_512K: primary_stacksize = 512 * 1024; break; case SYS_PROCESS_PRIMARY_STACK_SIZE_1M: primary_stacksize = 1024 * 1024; break; default: { // According to elad335, the min value seems to be 64KB instead of the expected 4KB (SYS_PROCESS_PARAM_STACK_SIZE_MIN) primary_stacksize = utils::align<u32>(std::clamp<u32>(sz, 0x10000, SYS_PROCESS_PARAM_STACK_SIZE_MAX), 4096); break; } } // Initialize main thread ppu_thread_params p{}; p.stack_addr = vm::cast(vm::alloc(primary_stacksize, vm::stack, 4096)); p.stack_size = primary_stacksize; p.entry = vm::_ref<ppu_func_opd_t>(entry); auto ppu = idm::make_ptr<named_thread<ppu_thread>>(p, "main_thread", primary_prio, 1); // Write initial data (exitspawn) if (!Emu.data.empty()) { std::memcpy(vm::base(ppu->stack_addr + ppu->stack_size - ::size32(Emu.data)), Emu.data.data(), Emu.data.size()); ppu->gpr[1] -= utils::align<u32>(::size32(Emu.data), 0x10); } // Initialize process arguments // Calculate storage requirements on the stack const u32 pointers_storage_size = u32{sizeof(u64)} * utils::align<u32>(::size32(Emu.envp) + ::size32(Emu.argv) + 2, 2); u32 stack_alloc_size = pointers_storage_size; for (const auto& arg : Emu.argv) { stack_alloc_size += utils::align<u32>(::size32(arg) + 1, 0x10); } for (const auto& arg : Emu.envp) { stack_alloc_size += utils::align<u32>(::size32(arg) + 1, 0x10); } ensure(ppu->stack_size > stack_alloc_size); vm::ptr<u64> args = vm::cast(static_cast<u32>(ppu->stack_addr + ppu->stack_size - stack_alloc_size - utils::align<u32>(::size32(Emu.data), 0x10))); vm::ptr<u8> args_data = vm::cast(args.addr() + pointers_storage_size); const vm::ptr<u64> argv = args; for (const auto& arg : Emu.argv) { const u32 arg_size = ::size32(arg) + 1; std::memcpy(args_data.get_ptr(), arg.data(), arg_size); *args++ = args_data.addr(); args_data = vm::cast(args_data.addr() + utils::align<u32>(arg_size, 0x10)); } *args++ = 0; const vm::ptr<u64> envp = args; args = envp; for (const auto& arg : Emu.envp) { const u32 arg_size = ::size32(arg) + 1; std::memcpy(args_data.get_ptr(), arg.data(), arg_size); *args++ = args_data.addr(); args_data = vm::cast(args_data.addr() + utils::align<u32>(arg_size, 0x10)); } *args++ = 0; ppu->gpr[1] -= stack_alloc_size; ensure(g_fxo->get<lv2_memory_container>().take(primary_stacksize)); ppu->cmd_push({ppu_cmd::initialize, 0}); if (entry == static_cast<u32>(elf.header.e_entry) && !Emu.IsVsh()) { // Set TLS args, call sys_initialize_tls ppu->cmd_list ({ { ppu_cmd::set_args, 4 }, u64{ppu->id}, u64{tls_vaddr}, u64{tls_fsize}, u64{tls_vsize}, { ppu_cmd::hle_call, FIND_FUNC(sys_initialize_tls) }, }); } // Run start functions for (const auto& prx : loaded_modules) { if (!prx->start) { continue; } // Reset arguments, run module entry point function ppu->cmd_list ({ { ppu_cmd::set_args, 2 }, u64{0}, u64{0}, { ppu_cmd::lle_call, prx->start.addr() }, }); } // Set command line arguments, run entry function ppu->cmd_list ({ { ppu_cmd::set_args, 8 }, u64{Emu.argv.size()}, u64{argv.addr()}, u64{envp.addr()}, u64{Emu.envp.size()}, u64{ppu->id}, u64{tls_vaddr}, u64{tls_fsize}, u64{tls_vsize}, { ppu_cmd::set_gpr, 11 }, u64{elf.header.e_entry}, { ppu_cmd::set_gpr, 12 }, u64{malloc_pagesize}, { ppu_cmd::entry_call, 0 }, }); // Set actual memory protection (experimental) for (const auto& prog : elf.progs) { const u32 addr = static_cast<u32>(prog.p_vaddr); const u32 size = static_cast<u32>(prog.p_memsz); if (prog.p_type == 0x1u /* LOAD */ && prog.p_memsz && (prog.p_flags & 0x022000002) == 0u /* W */) { // Set memory protection to read-only when necessary (only if PPU-W, SPU-W, RSX-W are all disabled) ensure(vm::page_protect(addr, utils::align(size, 0x1000), 0, 0, vm::page_writable)); } } error_handler.errored = false; return true; } std::pair<std::shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_exec_object& elf, bool virtual_load, const std::string& path, s64 file_offset, utils::serial* ar) { if (elf != elf_error::ok) { return {nullptr, CELL_ENOENT}; } // Access linkage information object auto& link = g_fxo->get<ppu_linkage_info>(); // Executable hash sha1_context sha; sha1_starts(&sha); // Check if it is an overlay executable first for (const auto& prog : elf.progs) { if (prog.p_type == 0x1u /* LOAD */ && prog.p_memsz) { using addr_range = utils::address_range; const addr_range r = addr_range::start_length(::narrow<u32>(prog.p_vaddr), ::narrow<u32>(prog.p_memsz)); if (!r.valid() || !r.inside(addr_range::start_length(0x30000000, 0x10000000))) { // TODO: Check error and if there's a better way to error check return {nullptr, CELL_ENOEXEC}; } } } std::shared_ptr<lv2_overlay> ovlm = std::make_shared<lv2_overlay>(); // Set path (TODO) ovlm->name = path.substr(path.find_last_of('/') + 1); ovlm->path = path; ovlm->offset = file_offset; u32 end = 0; // Allocate memory at fixed positions for (const auto& prog : elf.progs) { ppu_loader.notice("** Segment: p_type=0x%x, p_vaddr=0x%llx, p_filesz=0x%llx, p_memsz=0x%llx, flags=0x%x", prog.p_type, prog.p_vaddr, prog.p_filesz, prog.p_memsz, prog.p_flags); ppu_segment _seg; const u32 addr = _seg.addr = vm::cast(prog.p_vaddr); const u32 size = _seg.size = ::narrow<u32>(prog.p_memsz); const u32 type = _seg.type = prog.p_type; _seg.flags = prog.p_flags; _seg.filesz = ::narrow<u32>(prog.p_filesz); // Hash big-endian values sha1_update(&sha, reinterpret_cast<const uchar*>(&prog.p_type), sizeof(prog.p_type)); sha1_update(&sha, reinterpret_cast<const uchar*>(&prog.p_flags), sizeof(prog.p_flags)); if (type == 0x1 /* LOAD */ && prog.p_memsz) { if (prog.bin.size() > size || prog.bin.size() != prog.p_filesz) fmt::throw_exception("Invalid binary size (0x%llx, memsz=0x%x)", prog.bin.size(), size); const bool already_loaded = !!ar; // Unimplemented optimization for savestates _seg.ptr = vm::base(addr); if (virtual_load) { // Leave additional room for the analyser so it can safely access beyond limit a bit // Because with VM the address sapce is not really a limit so any u32 address is valid there, here it is UB to create pointer that goes beyond the boundaries // TODO: Use make_shared_for_overwrite when all compilers support it const usz alloc_size = utils::align<usz>(size, 0x10000) + 4096; ovlm->allocations.push_back(std::shared_ptr<u8[]>(new u8[alloc_size])); _seg.ptr = ovlm->allocations.back().get(); std::memset(static_cast<u8*>(_seg.ptr) + prog.bin.size(), 0, alloc_size - 4096 - prog.bin.size()); } else if (already_loaded) { if (!vm::check_addr(addr, vm::page_readable, size)) { ppu_loader.error("ppu_load_overlay(): Archived PPU overlay memory has not been found! (addr=0x%x, memsz=0x%x)", addr, size); return {nullptr, CELL_EABORT}; } } else if (!vm::get(vm::any, 0x30000000)->falloc(addr, size)) { ppu_loader.error("ppu_load_overlay(): vm::falloc() failed (addr=0x%x, memsz=0x%x)", addr, size); // Revert previous allocations for (const auto& seg : ovlm->segs) { ensure(vm::dealloc(seg.addr)); } // TODO: Check error code, maybe disallow more than one overlay instance completely return {nullptr, CELL_EBUSY}; } // Store only LOAD segments (TODO) ovlm->segs.emplace_back(_seg); ovlm->addr_to_seg_index.emplace(addr, ::size32(ovlm->segs) - 1); // Copy segment data, hash it if (!already_loaded) std::memcpy(ensure(ovlm->get_ptr<void>(addr)), prog.bin.data(), prog.bin.size()); sha1_update(&sha, reinterpret_cast<const uchar*>(&prog.p_vaddr), sizeof(prog.p_vaddr)); sha1_update(&sha, reinterpret_cast<const uchar*>(&prog.p_memsz), sizeof(prog.p_memsz)); sha1_update(&sha, prog.bin.data(), prog.bin.size()); // Initialize executable code if necessary if (prog.p_flags & 0x1 && !virtual_load) { ppu_register_range(addr, size); } } } // Load section list, used by the analyser for (const auto& s : elf.shdrs) { ppu_loader.notice("** Section: sh_type=0x%x, addr=0x%llx, size=0x%llx, flags=0x%x", std::bit_cast<u32>(s.sh_type), s.sh_addr, s.sh_size, s._sh_flags); if (s.sh_type != sec_type::sht_progbits) continue; ppu_segment _sec; const u32 addr = _sec.addr = vm::cast(s.sh_addr); const u32 size = _sec.size = vm::cast(s.sh_size); _sec.type = std::bit_cast<u32>(s.sh_type); _sec.flags = static_cast<u32>(s._sh_flags & 7); _sec.filesz = 0; if (addr && size) { ovlm->secs.emplace_back(_sec); if (_sec.flags & 0x4 && addr >= ovlm->segs[0].addr && addr + size <= ovlm->segs[0].addr + ovlm->segs[0].size) { end = std::max<u32>(end, addr + size); } } } sha1_finish(&sha, ovlm->sha1); // Format patch name std::string hash("OVL-0000000000000000000000000000000000000000"); for (u32 i = 0; i < 20; i++) { constexpr auto pal = "0123456789abcdef"; hash[4 + i * 2] = pal[ovlm->sha1[i] >> 4]; hash[5 + i * 2] = pal[ovlm->sha1[i] & 15]; } // Apply the patch std::vector<u32> applied; g_fxo->get<patch_engine>().apply(applied, !Emu.DeserialManager() ? hash : std::string{}, [ovlm](u32 addr, u32 size) { return ovlm->get_ptr<u8>(addr, size); }); if (!Emu.DeserialManager() && !Emu.GetTitleID().empty()) { // Alternative patch g_fxo->get<patch_engine>().apply(applied, Emu.GetTitleID() + '-' + hash, [ovlm](u32 addr, u32 size) { return ovlm->get_ptr<u8>(addr, size); }); } if (!applied.empty() || ar) { // Compare memory changes in memory after executable code sections end if (end >= ovlm->segs[0].addr && end < ovlm->segs[0].addr + ovlm->segs[0].size) { for (const auto& prog : elf.progs) { // Find the first segment if (prog.p_type == 0x1u /* LOAD */ && prog.p_memsz) { std::span<const uchar> elf_memory{prog.bin.begin(), prog.bin.size()}; elf_memory = elf_memory.subspan(end - ovlm->segs[0].addr); if (!std::equal(elf_memory.begin(), elf_memory.end(), &ovlm->get_ref<u8>(end))) { // There are changes, disable analysis optimization ppu_loader.notice("Disabling analysis optimization due to memory changes from original file"); end = 0; } break; } } } } // Embedded SPU elf patching for (const auto& seg : ovlm->segs) { ppu_check_patch_spu_images(*ovlm, seg); } if (applied.empty()) { ppu_loader.warning("OVL hash of %s: %s", ovlm->name, hash); } else { ppu_loader.success("OVL hash of %s: %s (<- %u)", ovlm->name, hash, applied.size()); } // Load other programs for (auto& prog : elf.progs) { switch (const u32 p_type = prog.p_type) { case 0x00000001: break; // LOAD (already loaded) case 0x60000001: // LOOS+1 { if (prog.p_filesz) { struct process_param_t { be_t<u32> size; //0x60 be_t<u32> magic; //string OVLM be_t<u32> version; //0x17000 be_t<u32> sdk_version; //seems to be correct //string "stage_ovlm" //and a lot of zeros. }; const auto& info = *ensure(ovlm->get_ptr<process_param_t>(vm::cast(prog.p_vaddr))); if (info.size < sizeof(process_param_t)) { ppu_loader.warning("Bad process_param size! [0x%x : 0x%x]", info.size, u32{sizeof(process_param_t)}); } if (info.magic != 0x4f564c4du) //string "OVLM" { ppu_loader.error("Bad process_param magic! [0x%x]", info.magic); } else { ppu_loader.notice("*** sdk version: 0x%x", info.sdk_version); } } break; } case 0x60000002: // LOOS+2 seems to be 0x0 in size for overlay elfs, at least in known cases { if (prog.p_filesz) { struct ppu_proc_prx_param_t { be_t<u32> size; be_t<u32> magic; be_t<u32> version; be_t<u32> unk0; be_t<u32> libent_start; be_t<u32> libent_end; be_t<u32> libstub_start; be_t<u32> libstub_end; be_t<u16> ver; be_t<u16> unk1; be_t<u32> unk2; }; const auto& proc_prx_param = *ensure(ovlm->get_ptr<const ppu_proc_prx_param_t>(vm::cast(prog.p_vaddr))); ppu_loader.notice("* libent_start = *0x%x", proc_prx_param.libent_start); ppu_loader.notice("* libstub_start = *0x%x", proc_prx_param.libstub_start); ppu_loader.notice("* unk0 = 0x%x", proc_prx_param.unk0); ppu_loader.notice("* unk2 = 0x%x", proc_prx_param.unk2); if (proc_prx_param.magic != 0x1b434cecu) { fmt::throw_exception("Bad magic! (0x%x)", proc_prx_param.magic); } ppu_linkage_info dummy{}; ppu_load_exports(*ovlm, virtual_load ? &dummy : &link, proc_prx_param.libent_start, proc_prx_param.libent_end); ppu_load_imports(*ovlm, ovlm->relocs, virtual_load ? &dummy : &link, proc_prx_param.libstub_start, proc_prx_param.libstub_end); } break; } default: { ppu_loader.error("Unknown phdr type (0x%08x)", p_type); } } } ovlm->entry = static_cast<u32>(elf.header.e_entry); ovlm->seg0_code_end = end; ovlm->applied_patches = std::move(applied); const bool is_being_used_in_emulation = (vm::base(ovlm->segs[0].addr) == ovlm->segs[0].ptr); if (!is_being_used_in_emulation) { // Postpone to later return {std::move(ovlm), {}}; } const auto cpu = cpu_thread::get_current(); // Analyse executable (TODO) if (!ovlm->analyse(0, ovlm->entry, end, ovlm->applied_patches, std::vector<u32>{}, !cpu ? std::function<bool()>() : [cpu]() { return !!(cpu->state & cpu_flag::exit); })) { return {nullptr, CellError{CELL_CANCEL + 0u}}; } // Validate analyser results (not required) ovlm->validate(0); if (!ar && !virtual_load) { ensure(idm::import_existing<lv2_obj, lv2_overlay>(ovlm)); try_spawn_ppu_if_exclusive_program(*ovlm); } return {std::move(ovlm), {}}; } bool ppu_load_rel_exec(const ppu_rel_object& elf) { ppu_module relm{}; struct on_fatal_error { ppu_module& relm; bool errored = true; ~on_fatal_error() { if (!errored) { return; } // Revert previous allocations on an error for (const auto& seg : relm.secs) { vm::dealloc(seg.addr); } } } error_handler{relm}; u32 memsize = 0; for (const auto& s : elf.shdrs) { if (s.sh_type != sec_type::sht_progbits) { memsize = utils::align<u32>(memsize + vm::cast(s.sh_size), 128); } } u32 addr = vm::alloc(memsize, vm::main); if (!addr) { ppu_loader.error("ppu_load_rel_exec(): vm::alloc() failed (memsz=0x%x)", memsize); return false; } ppu_register_range(addr, memsize); // Copy references to sections for the purpose of sorting executable sections before non-executable ones std::vector<const elf_shdata<elf_be, u64>*> shdrs(elf.shdrs.size()); for (auto& ref : shdrs) { ref = &elf.shdrs[&ref - shdrs.data()]; } std::stable_sort(shdrs.begin(), shdrs.end(), [](auto& a, auto& b) -> bool { const bs_t<sh_flag> flags_a_has = a->sh_flags() - b->sh_flags(); return flags_a_has.all_of(sh_flag::shf_execinstr); }); // Load sections for (auto ptr : shdrs) { const auto& s = *ptr; ppu_loader.notice("** Section: sh_type=0x%x, addr=0x%llx, size=0x%llx, flags=0x%x", std::bit_cast<u32>(s.sh_type), s.sh_addr, s.sh_size, s._sh_flags); if (s.sh_type == sec_type::sht_progbits && s.sh_size && s.sh_flags().all_of(sh_flag::shf_alloc)) { ppu_segment _sec; const u32 size = _sec.size = vm::cast(s.sh_size); _sec.type = std::bit_cast<u32>(s.sh_type); _sec.flags = static_cast<u32>(s._sh_flags & 7); _sec.filesz = size; _sec.addr = addr; relm.secs.emplace_back(_sec); std::memcpy(vm::base(addr), s.get_bin().data(), size); addr = utils::align<u32>(addr + size, 128); } } try_spawn_ppu_if_exclusive_program(relm); error_handler.errored = false; return true; }
88,525
C++
.cpp
2,563
30.858759
237
0.654681
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,185
PPUTranslator.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/PPUTranslator.cpp
#include <bit> #ifdef LLVM_AVAILABLE #include "Emu/system_config.h" #include "Emu/Cell/Common.h" #include "PPUTranslator.h" #include "PPUThread.h" #include "SPUThread.h" #include "util/types.hpp" #include "util/endian.hpp" #include "util/logs.hpp" #include "util/v128.hpp" #include "util/simd.hpp" #include <algorithm> #include <unordered_set> #include <span> #ifdef ARCH_ARM64 #include "Emu/CPU/Backends/AArch64/AArch64JIT.h" #include "Emu/IdManager.h" #include "Utilities/ppu_patch.h" #endif using namespace llvm; const ppu_decoder<PPUTranslator> s_ppu_decoder; extern const ppu_decoder<ppu_itype> g_ppu_itype; extern const ppu_decoder<ppu_iname> g_ppu_iname; PPUTranslator::PPUTranslator(LLVMContext& context, Module* _module, const ppu_module& info, ExecutionEngine& engine) : cpu_translator(_module, false) , m_info(info) , m_pure_attr() { // Bind context cpu_translator::initialize(context, engine); // Initialize transform passes clear_transforms(); #ifdef ARCH_ARM64 { // Base reg table definition // Assume all functions named __0x... are PPU functions and take the m_exec as the first arg std::vector<std::pair<std::string, aarch64::gpr>> base_reg_lookup = { { "__0x", aarch64::x20 }, // PPU blocks { "__indirect", aarch64::x20 }, // Indirect jumps { "ppu_", aarch64::x19 }, // Fixed JIT helpers (e.g ppu_gateway) { "__", aarch64::x19 } // Probably link table entries }; // Build list of imposter functions built by the patch manager. g_fxo->need<ppu_patch_block_registry_t>(); std::vector<std::string> faux_functions_list; for (const auto& a : g_fxo->get<ppu_patch_block_registry_t>().block_addresses) { faux_functions_list.push_back(fmt::format("__0x%x", a)); } aarch64::GHC_frame_preservation_pass::config_t config = { .debug_info = false, // Set to "true" to insert debug frames on x27 .use_stack_frames = false, // We don't need this since the PPU GW allocates global scratch on the stack .hypervisor_context_offset = ::offset32(&ppu_thread::hv_ctx), .exclusion_callback = {}, // Unused, we don't have special exclusion functions on PPU .base_register_lookup = base_reg_lookup, .faux_function_list = std::move(faux_functions_list) }; // Create transform pass std::unique_ptr<translator_pass> ghc_fixup_pass = std::make_unique<aarch64::GHC_frame_preservation_pass>(config); // Register it register_transform_pass(ghc_fixup_pass); } #endif reset_transforms(); // Thread context struct (TODO: safer member access) const u32 off0 = offset32(&ppu_thread::state); const u32 off1 = offset32(&ppu_thread::gpr); std::vector<Type*> thread_struct; thread_struct.emplace_back(ArrayType::get(GetType<char>(), off0)); thread_struct.emplace_back(GetType<u32>()); // state thread_struct.emplace_back(ArrayType::get(GetType<char>(), off1 - off0 - 4)); thread_struct.insert(thread_struct.end(), 32, GetType<u64>()); // gpr[0..31] thread_struct.insert(thread_struct.end(), 32, GetType<f64>()); // fpr[0..31] thread_struct.insert(thread_struct.end(), 32, GetType<u32[4]>()); // vr[0..31] thread_struct.insert(thread_struct.end(), 32, GetType<bool>()); // cr[0..31] thread_struct.insert(thread_struct.end(), 32, GetType<bool>()); // fpscr thread_struct.insert(thread_struct.end(), 2, GetType<u64>()); // lr, ctr thread_struct.insert(thread_struct.end(), 2, GetType<u32>()); // vrsave, cia thread_struct.insert(thread_struct.end(), 3, GetType<bool>()); // so, ov, ca thread_struct.insert(thread_struct.end(), 1, GetType<u8>()); // cnt thread_struct.insert(thread_struct.end(), 1, GetType<bool>()); // nj thread_struct.emplace_back(ArrayType::get(GetType<char>(), 3)); // Padding thread_struct.insert(thread_struct.end(), 1, GetType<u32[4]>()); // sat thread_struct.insert(thread_struct.end(), 1, GetType<u32>()); // jm_mask m_thread_type = StructType::create(m_context, thread_struct, "context_t"); const auto md_name = MDString::get(m_context, "branch_weights"); const auto md_low = ValueAsMetadata::get(ConstantInt::get(GetType<u32>(), 1)); const auto md_high = ValueAsMetadata::get(ConstantInt::get(GetType<u32>(), 666)); // Metadata for branch weights m_md_likely = MDTuple::get(m_context, {md_name, md_high, md_low}); m_md_unlikely = MDTuple::get(m_context, {md_name, md_low, md_high}); // Sort relevant relocations (TODO) const auto caddr = m_info.segs[0].addr; const auto cend = caddr + m_info.segs[0].size; for (const auto& rel : m_info.relocs) { if (rel.addr >= caddr && rel.addr < cend) { // Check relocation type switch (rel.type) { // Ignore relative relocations, they are handled in emitted code // Comment out types we haven't confirmed as used and working case 10: case 11: // case 12: // case 13: // case 26: // case 28: { ppu_log.notice("Ignoring relative relocation at 0x%x (%u)", rel.addr, rel.type); continue; } // Ignore 64-bit relocations case 20: case 22: case 38: case 43: case 44: case 45: case 46: case 51: case 68: case 73: case 78: { ppu_log.error("Ignoring 64-bit relocation at 0x%x (%u)", rel.addr, rel.type); continue; } default: break; } // Align relocation address (TODO) if (!m_relocs.emplace(rel.addr & ~3, &rel).second) { ppu_log.error("Relocation repeated at 0x%x (%u)", rel.addr, rel.type); } } } if (!m_info.relocs.empty()) { m_reloc = &m_info.segs[0]; } const auto nan_v = v128::from32p(0x7FC00000u); nan_vec4 = make_const_vector(nan_v, get_type<f32[4]>()); } PPUTranslator::~PPUTranslator() { } Type* PPUTranslator::GetContextType() { return m_thread_type; } u32 ppu_get_far_jump(u32 pc); bool ppu_test_address_may_be_mmio(std::span<const be_t<u32>> insts); Function* PPUTranslator::Translate(const ppu_function& info) { m_function = m_module->getFunction(info.name); std::fill(std::begin(m_globals), std::end(m_globals), nullptr); std::fill(std::begin(m_locals), std::end(m_locals), nullptr); IRBuilder<> irb(BasicBlock::Create(m_context, "__entry", m_function)); m_ir = &irb; // Instruction address is (m_addr + base) const u64 base = m_reloc ? m_reloc->addr : 0; m_addr = info.addr - base; m_attr = info.attr; // Don't emit check in small blocks without terminator bool need_check = info.size >= 16; for (u64 addr = m_addr; addr < m_addr + info.size; addr += 4) { const u32 op = *ensure(m_info.get_ptr<u32>(::narrow<u32>(addr + base))); switch (g_ppu_itype.decode(op)) { case ppu_itype::UNK: case ppu_itype::ECIWX: case ppu_itype::ECOWX: case ppu_itype::TD: case ppu_itype::TDI: case ppu_itype::TW: case ppu_itype::TWI: case ppu_itype::B: case ppu_itype::BC: case ppu_itype::BCCTR: case ppu_itype::BCLR: case ppu_itype::SC: { need_check = true; break; } default: { break; } } } m_thread = m_function->getArg(1); m_base = m_function->getArg(3); m_exec = m_function->getArg(0); m_seg0 = m_function->getArg(2); m_gpr[0] = m_function->getArg(4); m_gpr[1] = m_function->getArg(5); m_gpr[2] = m_function->getArg(6); const auto body = BasicBlock::Create(m_context, "__body", m_function); //Call(GetType<void>(), "__trace", GetAddr()); if (need_check) { // Check status register in the entry block auto ptr = llvm::dyn_cast<GetElementPtrInst>(m_ir->CreateStructGEP(m_thread_type, m_thread, 1)); assert(ptr->getResultElementType() == GetType<u32>()); const auto vstate = m_ir->CreateLoad(ptr->getResultElementType(), ptr, true); const auto vcheck = BasicBlock::Create(m_context, "__test", m_function); m_ir->CreateCondBr(m_ir->CreateIsNull(vstate), body, vcheck, m_md_likely); m_ir->SetInsertPoint(vcheck); // Raise wait flag as soon as possible m_ir->CreateAtomicRMW(llvm::AtomicRMWInst::Or, ptr, m_ir->getInt32((+cpu_flag::wait).operator u32()), llvm::MaybeAlign{4}, llvm::AtomicOrdering::AcquireRelease); // Create tail call to the check function Call(GetType<void>(), "__check", m_thread, GetAddr())->setTailCall(); m_ir->CreateRetVoid(); } else { m_ir->CreateBr(body); } m_ir->SetInsertPoint(body); // Process blocks const auto block = std::make_pair(info.addr, info.size); { // Optimize BLR (prefetch LR) if (*ensure(m_info.get_ptr<u32>(block.first + block.second - 4)) == ppu_instructions::BLR()) { RegLoad(m_lr); } // Process the instructions for (m_addr = block.first - base; m_addr < block.first + block.second - base; m_addr += 4) { if (m_ir->GetInsertBlock()->getTerminator()) { break; } // Find the relocation at current address const auto rel_found = m_relocs.find(m_addr + base); if (rel_found != m_relocs.end()) { m_rel = rel_found->second; } else { m_rel = nullptr; } // Reset MMIO hint m_may_be_mmio = true; const u32 op = *ensure(m_info.get_ptr<u32>(::narrow<u32>(m_addr + base))); (this->*(s_ppu_decoder.decode(op)))({op}); if (m_rel) { // This is very bad. m_rel is normally set to nullptr after a relocation is handled (so it wasn't) ppu_log.error("LLVM: [0x%x] Unsupported relocation(%u) in '%s' (opcode=0x%x '%s'). Please report.", rel_found->first, m_rel->type, m_info.name, op, g_ppu_iname.decode(op)); return nullptr; } } // Finalize current block if necessary (create branch to the next address) if (!m_ir->GetInsertBlock()->getTerminator()) { FlushRegisters(); CallFunction(m_addr); } } run_transforms(*m_function); return m_function; } Function* PPUTranslator::GetSymbolResolver(const ppu_module& info) { m_function = cast<Function>(m_module->getOrInsertFunction("__resolve_symbols", FunctionType::get(get_type<void>(), { get_type<u8*>(), get_type<u64>() }, false)).getCallee()); IRBuilder<> irb(BasicBlock::Create(m_context, "__entry", m_function)); m_ir = &irb; // Instruction address is (m_addr + base) const u64 base = m_reloc ? m_reloc->addr : 0; m_exec = m_function->getArg(0); m_seg0 = m_function->getArg(1); const auto ftype = FunctionType::get(get_type<void>(), { get_type<u8*>(), // Exec base GetContextType()->getPointerTo(), // PPU context get_type<u64>(), // Segment address (for PRX) get_type<u8*>(), // Memory base get_type<u64>(), // r0 get_type<u64>(), // r1 get_type<u64>(), // r2 }, false); // Store function addresses in PPU jumptable using internal resolving instead of patching it externally. // Because, LLVM processed it extremely slow. (regression) // This is made in loop instead of inlined because it took tremendous amount of time to compile. std::vector<u32> vec_addrs; vec_addrs.reserve(info.funcs.size()); // Create an array of function pointers std::vector<llvm::Constant*> functions; for (const auto& f : info.funcs) { if (!f.size) { continue; } vec_addrs.push_back(static_cast<u32>(f.addr - base)); functions.push_back(cast<Function>(m_module->getOrInsertFunction(fmt::format("__0x%x", f.addr - base), ftype).getCallee())); } if (vec_addrs.empty()) { // Possible special case for no functions (allowing the do-while optimization) m_ir->CreateRetVoid(); run_transforms(*m_function); return m_function; } const auto addr_array_type = ArrayType::get(get_type<u32>(), vec_addrs.size()); const auto addr_array = new GlobalVariable(*m_module, addr_array_type, false, GlobalValue::PrivateLinkage, ConstantDataArray::get(m_context, vec_addrs)); // Create an array of function pointers const auto func_table_type = ArrayType::get(ftype->getPointerTo(), info.funcs.size()); const auto init_func_table = ConstantArray::get(func_table_type, functions); const auto func_table = new GlobalVariable(*m_module, func_table_type, false, GlobalVariable::PrivateLinkage, init_func_table); const auto loop_block = BasicBlock::Create(m_context, "__loop", m_function); const auto after_loop = BasicBlock::Create(m_context, "__after_loop", m_function); m_ir->CreateBr(loop_block); m_ir->SetInsertPoint(loop_block); const auto init_index_value = m_ir->getInt64(0); // Loop body const auto body_block = BasicBlock::Create(m_context, "__body", m_function); m_ir->CreateBr(body_block); // As do-while because vec_addrs is known to be more than 0 m_ir->SetInsertPoint(body_block); const auto index_value = m_ir->CreatePHI(get_type<u64>(), 2); index_value->addIncoming(init_index_value, loop_block); auto ptr_inst = dyn_cast<GetElementPtrInst>(m_ir->CreateGEP(addr_array->getValueType(), addr_array, {m_ir->getInt64(0), index_value})); assert(ptr_inst->getResultElementType() == get_type<u32>()); const auto func_pc = ZExt(m_ir->CreateLoad(ptr_inst->getResultElementType(), ptr_inst), get_type<u64>()); ptr_inst = dyn_cast<GetElementPtrInst>(m_ir->CreateGEP(func_table->getValueType(), func_table, {m_ir->getInt64(0), index_value})); assert(ptr_inst->getResultElementType() == ftype->getPointerTo()); const auto faddr = m_ir->CreateLoad(ptr_inst->getResultElementType(), ptr_inst); const auto faddr_int = m_ir->CreatePtrToInt(faddr, get_type<uptr>()); const auto fval = m_ir->CreateOr(m_ir->CreateShl(m_seg0, 32 + 3), faddr_int); const auto pos = m_ir->CreateShl(m_reloc ? m_ir->CreateAdd(func_pc, m_seg0) : func_pc, 1); const auto ptr = dyn_cast<GetElementPtrInst>(m_ir->CreateGEP(get_type<u8>(), m_exec, pos)); // Store to jumptable m_ir->CreateStore(fval, ptr); // Increment index and branch back to loop const auto post_add = m_ir->CreateAdd(index_value, m_ir->getInt64(1)); index_value->addIncoming(post_add, body_block); Value* index_check = m_ir->CreateICmpULT(post_add, m_ir->getInt64(vec_addrs.size())); m_ir->CreateCondBr(index_check, body_block, after_loop); // Set insertion point to afterloop_block m_ir->SetInsertPoint(after_loop); m_ir->CreateRetVoid(); run_transforms(*m_function); return m_function; } Value* PPUTranslator::VecHandleNan(Value* val) { const auto is_nan = m_ir->CreateFCmpUNO(val, val); val = m_ir->CreateSelect(is_nan, nan_vec4, val); return val; } Value* PPUTranslator::VecHandleDenormal(Value* val) { const auto type = val->getType(); const auto value = bitcast(val, GetType<u32[4]>()); const auto mask = SExt(m_ir->CreateICmpEQ(m_ir->CreateAnd(value, Broadcast(RegLoad(m_jm_mask), 4)), ConstantAggregateZero::get(value->getType())), GetType<s32[4]>()); const auto nz = m_ir->CreateLShr(mask, 1); const auto result = m_ir->CreateAnd(m_ir->CreateNot(nz), value); return bitcast(result, type); } Value* PPUTranslator::VecHandleResult(Value* val) { val = g_cfg.core.ppu_fix_vnan ? VecHandleNan(val) : val; val = g_cfg.core.ppu_llvm_nj_fixup ? VecHandleDenormal(val) : val; return val; } Value* PPUTranslator::GetAddr(u64 _add) { if (m_reloc) { // Load segment address from global variable, compute actual instruction address return m_ir->CreateAdd(m_ir->getInt64(m_addr + _add), m_seg0); } return m_ir->getInt64(m_addr + _add); } Type* PPUTranslator::ScaleType(Type* type, s32 pow2) { ensure(type->getScalarType()->isIntegerTy()); ensure(pow2 > -32 && pow2 < 32); uint scaled = type->getScalarSizeInBits(); ensure((scaled & (scaled - 1)) == 0); if (pow2 > 0) { scaled <<= pow2; } else if (pow2 < 0) { scaled >>= -pow2; } ensure(scaled); const auto new_type = m_ir->getIntNTy(scaled); const auto vec_type = dyn_cast<FixedVectorType>(type); return vec_type ? VectorType::get(new_type, vec_type->getNumElements(), false) : cast<Type>(new_type); } Value* PPUTranslator::DuplicateExt(Value* arg) { const auto extended = ZExt(arg); return m_ir->CreateOr(extended, m_ir->CreateShl(extended, arg->getType()->getScalarSizeInBits())); } Value* PPUTranslator::RotateLeft(Value* arg, u64 n) { return !n ? arg : m_ir->CreateOr(m_ir->CreateShl(arg, n), m_ir->CreateLShr(arg, arg->getType()->getScalarSizeInBits() - n)); } Value* PPUTranslator::RotateLeft(Value* arg, Value* n) { const u64 mask = arg->getType()->getScalarSizeInBits() - 1; return m_ir->CreateOr(m_ir->CreateShl(arg, m_ir->CreateAnd(n, mask)), m_ir->CreateLShr(arg, m_ir->CreateAnd(m_ir->CreateNeg(n), mask))); } void PPUTranslator::CallFunction(u64 target, Value* indirect) { const auto type = m_function->getFunctionType(); const auto block = m_ir->GetInsertBlock(); FunctionCallee callee; auto seg0 = m_seg0; if (!indirect) { const u64 base = m_reloc ? m_reloc->addr : 0; const u32 caddr = m_info.segs[0].addr; const u32 cend = caddr + m_info.segs[0].size - 1; const u64 _target = target + base; if (_target >= u32{umax}) { Call(GetType<void>(), "__error", m_thread, GetAddr(), m_ir->getInt32(*ensure(m_info.get_ptr<u32>(::narrow<u32>(m_addr + base))))); m_ir->CreateRetVoid(); return; } else if (_target >= caddr && _target <= cend) { u32 target_last = static_cast<u32>(_target); std::unordered_set<u32> passed_targets{target_last}; // Try to follow unconditional branches as long as there is no infinite loop while (target_last != _target) { const ppu_opcode_t op{*ensure(m_info.get_ptr<u32>(target_last))}; const ppu_itype::type itype = g_ppu_itype.decode(op.opcode); if (((itype == ppu_itype::BC && (op.bo & 0x14) == 0x14) || itype == ppu_itype::B) && !op.lk) { const u32 new_target = (op.aa ? 0 : target_last) + (itype == ppu_itype::B ? +op.bt24 : +op.bt14); if (target_last >= caddr && target_last <= cend) { if (passed_targets.emplace(new_target).second) { // Ok target_last = new_target; continue; } // Infinite loop detected target_last = static_cast<u32>(_target); } // Odd destination } else if (itype == ppu_itype::BCLR && (op.bo & 0x14) == 0x14 && !op.lk) { // Special case: empty function // In this case the branch can be treated as BCLR because previous CIA does not matter indirect = RegLoad(m_lr); } break; } if (!indirect) { callee = m_module->getOrInsertFunction(fmt::format("__0x%x", target_last - base), type); cast<Function>(callee.getCallee())->setCallingConv(CallingConv::GHC); } } else { indirect = m_reloc ? m_ir->CreateAdd(m_ir->getInt64(target), seg0) : m_ir->getInt64(target); } } if (indirect) { m_ir->CreateStore(Trunc(indirect, GetType<u32>()), m_ir->CreateStructGEP(m_thread_type, m_thread, static_cast<uint>(&m_cia - m_locals))); // Try to optimize if (auto inst = dyn_cast_or_null<Instruction>(indirect)) { if (auto next = inst->getNextNode()) { m_ir->SetInsertPoint(next); } } const auto pos = m_ir->CreateShl(indirect, 1); const auto ptr = dyn_cast<GetElementPtrInst>(m_ir->CreateGEP(get_type<u8>(), m_exec, pos)); const auto val = m_ir->CreateLoad(get_type<u64>(), ptr); callee = FunctionCallee(type, m_ir->CreateIntToPtr(m_ir->CreateAnd(val, 0xffff'ffff'ffff), type->getPointerTo())); // Load new segment address seg0 = m_ir->CreateShl(m_ir->CreateLShr(val, 48), 13); } m_ir->SetInsertPoint(block); const auto c = m_ir->CreateCall(callee, {m_exec, m_thread, seg0, m_base, GetGpr(0), GetGpr(1), GetGpr(2)}); c->setTailCallKind(llvm::CallInst::TCK_Tail); c->setCallingConv(CallingConv::GHC); m_ir->CreateRetVoid(); } Value* PPUTranslator::RegInit(Value*& local) { const auto index = ::narrow<uint>(&local - m_locals); if (auto old = cast_or_null<Instruction>(m_globals[index])) { old->eraseFromParent(); } // (Re)Initialize global, will be written in FlushRegisters m_globals[index] = m_ir->CreateStructGEP(m_thread_type, m_thread, index); return m_globals[index]; } Value* PPUTranslator::RegLoad(Value*& local) { const auto index = ::narrow<uint>(&local - m_locals); if (local) { // Simple load return local; } // Load from the global value auto ptr = llvm::dyn_cast<llvm::GetElementPtrInst>(m_ir->CreateStructGEP(m_thread_type, m_thread, index)); local = m_ir->CreateLoad(ptr->getResultElementType(), ptr); return local; } void PPUTranslator::RegStore(llvm::Value* value, llvm::Value*& local) { RegInit(local); local = value; } void PPUTranslator::FlushRegisters() { const auto block = m_ir->GetInsertBlock(); for (auto& local : m_locals) { const auto index = ::narrow<uint>(&local - m_locals); // Store value if necessary if (local && m_globals[index]) { if (auto next = cast<Instruction>(m_globals[index])->getNextNode()) { m_ir->SetInsertPoint(next); } else { m_ir->SetInsertPoint(block); } m_ir->CreateStore(local, m_globals[index]); m_globals[index] = nullptr; } } m_ir->SetInsertPoint(block); } Value* PPUTranslator::Solid(Value* value) { const u32 size = ::narrow<u32>(+value->getType()->getPrimitiveSizeInBits()); /* Workarounds (casting bool vectors directly may produce invalid code) */ if (value->getType() == GetType<bool[4]>()) { return bitcast(SExt(value, GetType<u32[4]>()), m_ir->getIntNTy(128)); } if (value->getType() == GetType<bool[8]>()) { return bitcast(SExt(value, GetType<u16[8]>()), m_ir->getIntNTy(128)); } if (value->getType() == GetType<bool[16]>()) { return bitcast(SExt(value, GetType<u8[16]>()), m_ir->getIntNTy(128)); } return bitcast(value, m_ir->getIntNTy(size)); } Value* PPUTranslator::IsZero(Value* value) { return m_ir->CreateIsNull(Solid(value)); } Value* PPUTranslator::IsNotZero(Value* value) { return m_ir->CreateIsNotNull(Solid(value)); } Value* PPUTranslator::IsOnes(Value* value) { value = Solid(value); return m_ir->CreateICmpEQ(value, ConstantInt::getSigned(value->getType(), -1)); } Value* PPUTranslator::IsNotOnes(Value* value) { value = Solid(value); return m_ir->CreateICmpNE(value, ConstantInt::getSigned(value->getType(), -1)); } Value* PPUTranslator::Broadcast(Value* value, u32 count) { if (const auto cv = dyn_cast<Constant>(value)) { return ConstantVector::getSplat(llvm::ElementCount::get(count, false), cv); } return m_ir->CreateVectorSplat(count, value); } Value* PPUTranslator::Shuffle(Value* left, Value* right, std::initializer_list<u32> indices) { const auto type = left->getType(); if (!right) { right = UndefValue::get(type); } if (!m_is_be) { std::vector<u32> data; data.reserve(indices.size()); const u32 mask = cast<FixedVectorType>(type)->getNumElements() - 1; // Transform indices (works for vectors with size 2^N) for (usz i = 0; i < indices.size(); i++) { data.push_back(*(indices.begin() + indices.size() - 1 - i) ^ mask); } return m_ir->CreateShuffleVector(left, right, ConstantDataVector::get(m_context, data)); } return m_ir->CreateShuffleVector(left, right, ConstantDataVector::get(m_context, { indices.begin(), indices.end() })); } Value* PPUTranslator::SExt(Value* value, Type* type) { type = type ? type : ScaleType(value->getType(), 1); return value->getType() != type ? m_ir->CreateSExt(value, type) : value; } Value* PPUTranslator::ZExt(Value* value, Type* type) { type = type ? type : ScaleType(value->getType(), 1); return value->getType() != type ? m_ir->CreateZExt(value, type) : value; } Value* PPUTranslator::Add(std::initializer_list<Value*> args) { Value* result{}; for (auto arg : args) { result = result ? m_ir->CreateAdd(result, arg) : arg; } return result; } Value* PPUTranslator::Trunc(Value* value, Type* type) { type = type ? type : ScaleType(value->getType(), -1); return type != value->getType() ? m_ir->CreateTrunc(value, type) : value; } void PPUTranslator::UseCondition(MDNode* hint, Value* cond) { FlushRegisters(); if (cond) { const auto local = BasicBlock::Create(m_context, "__cond", m_function); const auto next = BasicBlock::Create(m_context, "__next", m_function); m_ir->CreateCondBr(cond, local, next, hint); m_ir->SetInsertPoint(next); CallFunction(m_addr + 4); m_ir->SetInsertPoint(local); } } llvm::Value* PPUTranslator::GetMemory(llvm::Value* addr) { return m_ir->CreateGEP(get_type<u8>(), m_base, addr); } void PPUTranslator::TestAborted() { const auto body = BasicBlock::Create(m_context, fmt::format("__body_0x%x_%s", m_cia, m_ir->GetInsertBlock()->getName().str()), m_function); // Check status register in the entry block auto ptr = llvm::dyn_cast<GetElementPtrInst>(m_ir->CreateStructGEP(m_thread_type, m_thread, 1)); assert(ptr->getResultElementType() == GetType<u32>()); const auto vstate = m_ir->CreateLoad(ptr->getResultElementType(), ptr, true); const auto vcheck = BasicBlock::Create(m_context, fmt::format("__test_0x%x_%s", m_cia, m_ir->GetInsertBlock()->getName().str()), m_function); m_ir->CreateCondBr(m_ir->CreateIsNull(m_ir->CreateAnd(vstate, static_cast<u32>(cpu_flag::again + cpu_flag::exit))), body, vcheck, m_md_likely); m_ir->SetInsertPoint(vcheck); // Create tail call to the check function Call(GetType<void>(), "__check", m_thread, GetAddr())->setTailCall(); m_ir->CreateRetVoid(); m_ir->SetInsertPoint(body); } Value* PPUTranslator::ReadMemory(Value* addr, Type* type, bool is_be, u32 align) { const u32 size = ::narrow<u32>(+type->getPrimitiveSizeInBits()); if (m_may_be_mmio && size == 32) { // Test for MMIO patterns struct instructions_to_test { be_t<u32> insts[128]; }; m_may_be_mmio = false; if (auto ptr = m_info.get_ptr<instructions_to_test>(std::max<u32>(m_info.segs[0].addr, (m_reloc ? m_reloc->addr : 0) + utils::sub_saturate<u32>(::narrow<u32>(m_addr), sizeof(instructions_to_test) / 2)))) { if (ppu_test_address_may_be_mmio(std::span(ptr->insts))) { m_may_be_mmio = true; } } } if (is_be ^ m_is_be && size > 8) { llvm::Value* value{}; // Read, byteswap, bitcast const auto int_type = m_ir->getIntNTy(size); if (m_may_be_mmio && size == 32) { FlushRegisters(); RegStore(Trunc(GetAddr()), m_cia); ppu_log.notice("LLVM: Detected potential MMIO32 read at [0x%08x]", m_addr + (m_reloc ? m_reloc->addr : 0)); value = Call(GetType<u32>(), "__read_maybe_mmio32", m_base, addr); TestAborted(); } else { const auto inst = m_ir->CreateAlignedLoad(int_type, GetMemory(addr), llvm::MaybeAlign{align}); inst->setVolatile(true); value = inst; } return bitcast(Call(int_type, fmt::format("llvm.bswap.i%u", size), value), type); } if (m_may_be_mmio && size == 32) { FlushRegisters(); RegStore(Trunc(GetAddr()), m_cia); ppu_log.notice("LLVM: Detected potential MMIO32 read at [0x%08x]", m_addr + (m_reloc ? m_reloc->addr : 0)); Value* r = Call(GetType<u32>(), "__read_maybe_mmio32", m_base, addr); TestAborted(); return r; } // Read normally const auto r = m_ir->CreateAlignedLoad(type, GetMemory(addr), llvm::MaybeAlign{align}); r->setVolatile(true); return r; } void PPUTranslator::WriteMemory(Value* addr, Value* value, bool is_be, u32 align) { const auto type = value->getType(); const u32 size = ::narrow<u32>(+type->getPrimitiveSizeInBits()); if (is_be ^ m_is_be && size > 8) { // Bitcast, byteswap const auto int_type = m_ir->getIntNTy(size); value = Call(int_type, fmt::format("llvm.bswap.i%u", size), bitcast(value, int_type)); } if (m_may_be_mmio && size == 32) { // Test for MMIO patterns struct instructions_to_test { be_t<u32> insts[128]; }; if (auto ptr = m_info.get_ptr<instructions_to_test>(std::max<u32>(m_info.segs[0].addr, (m_reloc ? m_reloc->addr : 0) + utils::sub_saturate<u32>(::narrow<u32>(m_addr), sizeof(instructions_to_test) / 2)))) { if (ppu_test_address_may_be_mmio(std::span(ptr->insts))) { FlushRegisters(); RegStore(Trunc(GetAddr()), m_cia); ppu_log.notice("LLVM: Detected potential MMIO32 write at [0x%08x]", m_addr + (m_reloc ? m_reloc->addr : 0)); Call(GetType<void>(), "__write_maybe_mmio32", m_base, addr, value); TestAborted(); return; } } } // Write m_ir->CreateAlignedStore(value, GetMemory(addr), llvm::MaybeAlign{align})->setVolatile(true); } void PPUTranslator::CompilationError(const std::string& error) { ppu_log.error("LLVM: [0x%08x] Error: %s", m_addr + (m_reloc ? m_reloc->addr : 0), error); } void PPUTranslator::MFVSCR(ppu_opcode_t op) { const auto vsat = g_cfg.core.ppu_set_sat_bit ? ZExt(IsNotZero(RegLoad(m_sat)), GetType<u32>()) : m_ir->getInt32(0); const auto vscr = m_ir->CreateOr(vsat, m_ir->CreateShl(ZExt(RegLoad(m_nj), GetType<u32>()), 16)); SetVr(op.vd, m_ir->CreateInsertElement(ConstantAggregateZero::get(GetType<u32[4]>()), vscr, m_ir->getInt32(m_is_be ? 3 : 0))); } void PPUTranslator::MTVSCR(ppu_opcode_t op) { const auto vscr = m_ir->CreateExtractElement(GetVr(op.vb, VrType::vi32), m_ir->getInt32(m_is_be ? 3 : 0)); const auto nj = Trunc(m_ir->CreateLShr(vscr, 16), GetType<bool>()); RegStore(nj, m_nj); if (g_cfg.core.ppu_llvm_nj_fixup) RegStore(m_ir->CreateSelect(nj, m_ir->getInt32(0x7f80'0000), m_ir->getInt32(0x7fff'ffff)), m_jm_mask); if (g_cfg.core.ppu_set_sat_bit) RegStore(m_ir->CreateInsertElement(ConstantAggregateZero::get(GetType<u32[4]>()), m_ir->CreateAnd(vscr, 1), m_ir->getInt32(0)), m_sat); } void PPUTranslator::VADDCUW(ppu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.va, op.vb); set_vr(op.vd, zext<u32[4]>(a + b < a)); } void PPUTranslator::VADDFP(ppu_opcode_t op) { const auto [a, b] = get_vrs<f32[4]>(op.va, op.vb); set_vr(op.vd, vec_handle_result(a + b)); } void PPUTranslator::VADDSBS(ppu_opcode_t op) { const auto [a, b] = get_vrs<s8[16]>(op.va, op.vb); const auto r = add_sat(a, b); set_vr(op.vd, r); set_sat(r ^ (a + b)); } void PPUTranslator::VADDSHS(ppu_opcode_t op) { const auto [a, b] = get_vrs<s16[8]>(op.va, op.vb); const auto r = add_sat(a, b); set_vr(op.vd, r); set_sat(r ^ (a + b)); } void PPUTranslator::VADDSWS(ppu_opcode_t op) { const auto [a, b] = get_vrs<s32[4]>(op.va, op.vb); const auto r = add_sat(a, b); set_vr(op.vd, r); set_sat(r ^ (a + b)); } void PPUTranslator::VADDUBM(ppu_opcode_t op) { const auto [a, b] = get_vrs<u8[16]>(op.va, op.vb); set_vr(op.vd, a + b); } void PPUTranslator::VADDUBS(ppu_opcode_t op) { const auto [a, b] = get_vrs<u8[16]>(op.va, op.vb); const auto r = add_sat(a, b); set_vr(op.vd, r); set_sat(r ^ (a + b)); } void PPUTranslator::VADDUHM(ppu_opcode_t op) { const auto [a, b] = get_vrs<u16[8]>(op.va, op.vb); set_vr(op.vd, a + b); } void PPUTranslator::VADDUHS(ppu_opcode_t op) { const auto [a, b] = get_vrs<u16[8]>(op.va, op.vb); const auto r = add_sat(a, b); set_vr(op.vd, r); set_sat(r ^ (a + b)); } void PPUTranslator::VADDUWM(ppu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.va, op.vb); set_vr(op.vd, a + b); } void PPUTranslator::VADDUWS(ppu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.va, op.vb); const auto r = add_sat(a, b); set_vr(op.vd, r); set_sat(r ^ (a + b)); } void PPUTranslator::VAND(ppu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.va, op.vb); set_vr(op.vd, a & b); } void PPUTranslator::VANDC(ppu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.va, op.vb); set_vr(op.vd, a & ~b); } void PPUTranslator::VAVGSB(ppu_opcode_t op) { const auto [a, b] = get_vrs<s8[16]>(op.va, op.vb); set_vr(op.vd, avg(a, b)); } void PPUTranslator::VAVGSH(ppu_opcode_t op) { const auto [a, b] = get_vrs<s16[8]>(op.va, op.vb); set_vr(op.vd, avg(a, b)); } void PPUTranslator::VAVGSW(ppu_opcode_t op) { const auto [a, b] = get_vrs<s32[4]>(op.va, op.vb); set_vr(op.vd, avg(a, b)); } void PPUTranslator::VAVGUB(ppu_opcode_t op) { const auto [a, b] = get_vrs<u8[16]>(op.va, op.vb); set_vr(op.vd, avg(a, b)); } void PPUTranslator::VAVGUH(ppu_opcode_t op) { const auto [a, b] = get_vrs<u16[8]>(op.va, op.vb); set_vr(op.vd, avg(a, b)); } void PPUTranslator::VAVGUW(ppu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.va, op.vb); set_vr(op.vd, avg(a, b)); } void PPUTranslator::VCFSX(ppu_opcode_t op) { const auto b = get_vr<s32[4]>(op.vb); set_vr(op.vd, fpcast<f32[4]>(b) * fsplat<f32[4]>(std::pow(2, -static_cast<int>(op.vuimm)))); } void PPUTranslator::VCFUX(ppu_opcode_t op) { const auto b = get_vr<u32[4]>(op.vb); set_vr(op.vd, fpcast<f32[4]>(b) * fsplat<f32[4]>(std::pow(2, -static_cast<int>(op.vuimm)))); } void PPUTranslator::VCMPBFP(ppu_opcode_t op) { const auto [a, b] = get_vrs<f32[4]>(op.va, op.vb); const auto nle = sext<s32[4]>(fcmp_uno(a > b)) & 0x8000'0000; const auto nge = sext<s32[4]>(fcmp_uno(a < -b)) & 0x4000'0000; const auto r = eval(nle | nge); set_vr(op.vd, r); if (op.oe) SetCrField(6, m_ir->getFalse(), m_ir->getFalse(), IsZero(r.value), m_ir->getFalse()); } void PPUTranslator::VCMPEQFP(ppu_opcode_t op) { const auto [a, b] = get_vrs<f32[4]>(op.va, op.vb); const auto r = eval(sext<s32[4]>(fcmp_ord(a == b))); set_vr(op.vd, r); if (op.oe) SetCrField(6, IsOnes(r.value), m_ir->getFalse(), IsZero(r.value), m_ir->getFalse()); } void PPUTranslator::VCMPEQUB(ppu_opcode_t op) { const auto [a, b] = get_vrs<u8[16]>(op.va, op.vb); const auto r = eval(sext<s8[16]>(a == b)); set_vr(op.vd, r); if (op.oe) SetCrField(6, IsOnes(r.value), m_ir->getFalse(), IsZero(r.value), m_ir->getFalse()); } void PPUTranslator::VCMPEQUH(ppu_opcode_t op) { const auto [a, b] = get_vrs<u16[8]>(op.va, op.vb); const auto r = eval(sext<s16[8]>(a == b)); set_vr(op.vd, r); if (op.oe) SetCrField(6, IsOnes(r.value), m_ir->getFalse(), IsZero(r.value), m_ir->getFalse()); } void PPUTranslator::VCMPEQUW(ppu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.va, op.vb); const auto r = eval(sext<s32[4]>(a == b)); set_vr(op.vd, r); if (op.oe) SetCrField(6, IsOnes(r.value), m_ir->getFalse(), IsZero(r.value), m_ir->getFalse()); } void PPUTranslator::VCMPGEFP(ppu_opcode_t op) { const auto [a, b] = get_vrs<f32[4]>(op.va, op.vb); const auto r = eval(sext<s32[4]>(fcmp_ord(a >= b))); set_vr(op.vd, r); if (op.oe) SetCrField(6, IsOnes(r.value), m_ir->getFalse(), IsZero(r.value), m_ir->getFalse()); } void PPUTranslator::VCMPGTFP(ppu_opcode_t op) { const auto [a, b] = get_vrs<f32[4]>(op.va, op.vb); const auto r = eval(sext<s32[4]>(fcmp_ord(a > b))); set_vr(op.vd, r); if (op.oe) SetCrField(6, IsOnes(r.value), m_ir->getFalse(), IsZero(r.value), m_ir->getFalse()); } void PPUTranslator::VCMPGTSB(ppu_opcode_t op) { const auto [a, b] = get_vrs<s8[16]>(op.va, op.vb); const auto r = eval(sext<s8[16]>(a > b)); set_vr(op.vd, r); if (op.oe) SetCrField(6, IsOnes(r.value), m_ir->getFalse(), IsZero(r.value), m_ir->getFalse()); } void PPUTranslator::VCMPGTSH(ppu_opcode_t op) { const auto [a, b] = get_vrs<s16[8]>(op.va, op.vb); const auto r = eval(sext<s16[8]>(a > b)); set_vr(op.vd, r); if (op.oe) SetCrField(6, IsOnes(r.value), m_ir->getFalse(), IsZero(r.value), m_ir->getFalse()); } void PPUTranslator::VCMPGTSW(ppu_opcode_t op) { const auto [a, b] = get_vrs<s32[4]>(op.va, op.vb); const auto r = eval(sext<s32[4]>(a > b)); set_vr(op.vd, r); if (op.oe) SetCrField(6, IsOnes(r.value), m_ir->getFalse(), IsZero(r.value), m_ir->getFalse()); } void PPUTranslator::VCMPGTUB(ppu_opcode_t op) { const auto [a, b] = get_vrs<u8[16]>(op.va, op.vb); const auto r = eval(sext<s8[16]>(a > b)); set_vr(op.vd, r); if (op.oe) SetCrField(6, IsOnes(r.value), m_ir->getFalse(), IsZero(r.value), m_ir->getFalse()); } void PPUTranslator::VCMPGTUH(ppu_opcode_t op) { const auto [a, b] = get_vrs<u16[8]>(op.va, op.vb); const auto r = eval(sext<s16[8]>(a > b)); set_vr(op.vd, r); if (op.oe) SetCrField(6, IsOnes(r.value), m_ir->getFalse(), IsZero(r.value), m_ir->getFalse()); } void PPUTranslator::VCMPGTUW(ppu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.va, op.vb); const auto r = eval(sext<s32[4]>(a > b)); set_vr(op.vd, r); if (op.oe) SetCrField(6, IsOnes(r.value), m_ir->getFalse(), IsZero(r.value), m_ir->getFalse()); } void PPUTranslator::VCTSXS(ppu_opcode_t op) { const auto b = get_vr<f32[4]>(op.vb); const auto scaled = b * fsplat<f32[4]>(std::pow(2, 0 + op.vuimm)); const auto const1 = fsplat<f32[4]>(-std::pow(2, 31)); const auto is_nan = fcmp_uno(b != b); const auto sat_l = fcmp_ord(scaled < const1); const auto sat_h = fcmp_ord(scaled >= fsplat<f32[4]>(std::pow(2, 31))); value_t<s32[4]> converted = eval(fpcast<s32[4]>(select(sat_l, const1, scaled))); if (g_cfg.core.ppu_fix_vnan) converted = eval(select(is_nan, splat<s32[4]>(0), converted)); // NaN -> 0 set_vr(op.vd, select(sat_h, splat<s32[4]>(0x7fff'ffff), converted)); set_sat(sext<s32[4]>(sat_l) | sext<s32[4]>(sat_h)); } void PPUTranslator::VCTUXS(ppu_opcode_t op) { const auto b = get_vr<f32[4]>(op.vb); const auto scaled = b * fsplat<f32[4]>(std::pow(2, 0 + op.vuimm)); const auto const0 = fsplat<f32[4]>(0.); const auto is_nan = fcmp_uno(b != b); const auto sat_l = fcmp_ord(scaled < const0); const auto sat_h = fcmp_ord(scaled >= fsplat<f32[4]>(std::pow(2, 32))); value_t<u32[4]> converted = eval(fpcast<u32[4]>(select(sat_l, const0, scaled))); if (g_cfg.core.ppu_fix_vnan) converted = eval(select(is_nan, splat<u32[4]>(0), converted)); // NaN -> 0 set_vr(op.vd, select(sat_h, splat<u32[4]>(0xffff'ffff), converted)); set_sat(sext<s32[4]>(sat_l) | sext<s32[4]>(sat_h)); } void PPUTranslator::VEXPTEFP(ppu_opcode_t op) { const auto b = get_vr<f32[4]>(op.vb); set_vr(op.vd, vec_handle_result(llvm_calli<f32[4], decltype(b)>{"llvm.exp2.v4f32", {b}})); } void PPUTranslator::VLOGEFP(ppu_opcode_t op) { const auto b = get_vr<f32[4]>(op.vb); set_vr(op.vd, vec_handle_result(llvm_calli<f32[4], decltype(b)>{"llvm.log2.v4f32", {b}})); } void PPUTranslator::VMADDFP(ppu_opcode_t op) { auto [a, b, c] = get_vrs<f32[4]>(op.va, op.vb, op.vc); // Optimization: Emit only a floating multiply if the addend is zero if (auto [ok, data] = get_const_vector(b.value, ::narrow<u32>(m_addr)); ok) { if (data == v128::from32p(1u << 31)) { set_vr(op.vd, vec_handle_result(a * c)); ppu_log.notice("LLVM: VMADDFP with -0 addend at [0x%08x]", m_addr + (m_reloc ? m_reloc->addr : 0)); return; } if (!m_use_fma && data == v128{}) { set_vr(op.vd, vec_handle_result(a * c + fsplat<f32[4]>(0.f))); ppu_log.notice("LLVM: VMADDFP with -0 addend at [0x%08x]", m_addr + (m_reloc ? m_reloc->addr : 0)); return; } } if (m_use_fma) { set_vr(op.vd, vec_handle_result(fmuladd(a, c, b))); return; } // Emulated FMA via double precision (caution: out-of-lane algorithm) const auto xa = fpcast<f64[4]>(a); const auto xb = fpcast<f64[4]>(b); const auto xc = fpcast<f64[4]>(c); const auto xr = fmuladd(xa, xc, xb); set_vr(op.vd, vec_handle_result(fpcast<f32[4]>(xr))); } void PPUTranslator::VMAXFP(ppu_opcode_t op) { const auto [a, b] = get_vrs<f32[4]>(op.va, op.vb); set_vr(op.vd, vec_handle_result(bitcast<f32[4]>(bitcast<u32[4]>(fmax(a, b)) & bitcast<u32[4]>(fmax(b, a))))); } void PPUTranslator::VMAXSB(ppu_opcode_t op) { const auto [a, b] = get_vrs<s8[16]>(op.va, op.vb); set_vr(op.vd, max(a, b)); } void PPUTranslator::VMAXSH(ppu_opcode_t op) { const auto [a, b] = get_vrs<s16[8]>(op.va, op.vb); set_vr(op.vd, max(a, b)); } void PPUTranslator::VMAXSW(ppu_opcode_t op) { const auto [a, b] = get_vrs<s32[4]>(op.va, op.vb); set_vr(op.vd, max(a, b)); } void PPUTranslator::VMAXUB(ppu_opcode_t op) { const auto [a, b] = get_vrs<u8[16]>(op.va, op.vb); set_vr(op.vd, max(a, b)); } void PPUTranslator::VMAXUH(ppu_opcode_t op) { const auto [a, b] = get_vrs<u16[8]>(op.va, op.vb); set_vr(op.vd, max(a, b)); } void PPUTranslator::VMAXUW(ppu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.va, op.vb); set_vr(op.vd, max(a, b)); } void PPUTranslator::VMHADDSHS(ppu_opcode_t op) { // Caution: out-of-lane algorithm const auto [a, b, c] = get_vrs<s16[8]>(op.va, op.vb, op.vc); const auto m = ((sext<s32[8]>(a) * sext<s32[8]>(b)) >> 15) + sext<s32[8]>(c); const auto r = trunc<u16[8]>(min(max(m, splat<s32[8]>(-0x8000)), splat<s32[8]>(0x7fff))); set_vr(op.vd, r); set_sat(trunc<u16[8]>((m + 0x8000) >> 16)); } void PPUTranslator::VMHRADDSHS(ppu_opcode_t op) { // Caution: out-of-lane algorithm const auto [a, b, c] = get_vrs<s16[8]>(op.va, op.vb, op.vc); const auto m = ((sext<s32[8]>(a) * sext<s32[8]>(b) + splat<s32[8]>(0x4000)) >> 15) + sext<s32[8]>(c); const auto r = trunc<u16[8]>(min(max(m, splat<s32[8]>(-0x8000)), splat<s32[8]>(0x7fff))); set_vr(op.vd, r); set_sat(trunc<u16[8]>((m + 0x8000) >> 16)); } void PPUTranslator::VMINFP(ppu_opcode_t op) { const auto [a, b] = get_vrs<f32[4]>(op.va, op.vb); set_vr(op.vd, vec_handle_result(bitcast<f32[4]>(bitcast<u32[4]>(fmin(a, b)) | bitcast<u32[4]>(fmin(b, a))))); } void PPUTranslator::VMINSB(ppu_opcode_t op) { const auto [a, b] = get_vrs<s8[16]>(op.va, op.vb); set_vr(op.vd, min(a, b)); } void PPUTranslator::VMINSH(ppu_opcode_t op) { const auto [a, b] = get_vrs<s16[8]>(op.va, op.vb); set_vr(op.vd, min(a, b)); } void PPUTranslator::VMINSW(ppu_opcode_t op) { const auto [a, b] = get_vrs<s32[4]>(op.va, op.vb); set_vr(op.vd, min(a, b)); } void PPUTranslator::VMINUB(ppu_opcode_t op) { const auto [a, b] = get_vrs<u8[16]>(op.va, op.vb); set_vr(op.vd, min(a, b)); } void PPUTranslator::VMINUH(ppu_opcode_t op) { const auto [a, b] = get_vrs<u16[8]>(op.va, op.vb); set_vr(op.vd, min(a, b)); } void PPUTranslator::VMINUW(ppu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.va, op.vb); set_vr(op.vd, min(a, b)); } void PPUTranslator::VMLADDUHM(ppu_opcode_t op) { const auto [a, b, c] = get_vrs<u16[8]>(op.va, op.vb, op.vc); set_vr(op.vd, a * b + c); } void PPUTranslator::VMRGHB(ppu_opcode_t op) { const auto [a, b] = get_vrs<u8[16]>(op.va, op.vb); set_vr(op.vd, shuffle2(a, b, 24, 8, 25, 9, 26, 10, 27, 11, 28, 12, 29, 13, 30, 14, 31, 15)); } void PPUTranslator::VMRGHH(ppu_opcode_t op) { const auto [a, b] = get_vrs<u16[8]>(op.va, op.vb); set_vr(op.vd, shuffle2(a, b, 12, 4, 13, 5, 14, 6, 15, 7)); } void PPUTranslator::VMRGHW(ppu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.va, op.vb); set_vr(op.vd, shuffle2(a, b, 6, 2, 7, 3)); } void PPUTranslator::VMRGLB(ppu_opcode_t op) { const auto [a, b] = get_vrs<u8[16]>(op.va, op.vb); set_vr(op.vd, shuffle2(a, b, 16, 0, 17, 1, 18, 2, 19, 3, 20, 4, 21, 5, 22, 6, 23, 7)); } void PPUTranslator::VMRGLH(ppu_opcode_t op) { const auto [a, b] = get_vrs<u16[8]>(op.va, op.vb); set_vr(op.vd, shuffle2(a, b, 8, 0, 9, 1, 10, 2, 11, 3)); } void PPUTranslator::VMRGLW(ppu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.va, op.vb); set_vr(op.vd, shuffle2(a, b, 4, 0, 5, 1)); } void PPUTranslator::VMSUMMBM(ppu_opcode_t op) { const auto a = get_vr<s16[8]>(op.va); const auto b = get_vr<u16[8]>(op.vb); const auto c = get_vr<s32[4]>(op.vc); const auto ml = bitcast<s32[4]>((a << 8 >> 8) * noncast<s16[8]>(b << 8 >> 8)); const auto mh = bitcast<s32[4]>((a >> 8) * noncast<s16[8]>(b >> 8)); set_vr(op.vd, ((ml << 16 >> 16) + (ml >> 16)) + ((mh << 16 >> 16) + (mh >> 16)) + c); } void PPUTranslator::VMSUMSHM(ppu_opcode_t op) { const auto [a, b, c] = get_vrs<s32[4]>(op.va, op.vb, op.vc); const auto ml = (a << 16 >> 16) * (b << 16 >> 16); const auto mh = (a >> 16) * (b >> 16); set_vr(op.vd, ml + mh + c); } void PPUTranslator::VMSUMSHS(ppu_opcode_t op) { const auto [a, b, c] = get_vrs<s32[4]>(op.va, op.vb, op.vc); const auto ml = (a << 16 >> 16) * (b << 16 >> 16); const auto mh = (a >> 16) * (b >> 16); const auto m = eval(ml + mh); const auto s = eval(m + c); const auto z = eval((c >> 31) ^ 0x7fffffff); const auto mx = eval(m ^ sext<s32[4]>(m == 0x80000000u)); const auto x = eval(((mx ^ s) & ~(c ^ mx)) >> 31); set_vr(op.vd, eval((z & x) | (s & ~x))); set_sat(x); } void PPUTranslator::VMSUMUBM(ppu_opcode_t op) { const auto [a, b] = get_vrs<u16[8]>(op.va, op.vb); const auto c = get_vr<u32[4]>(op.vc); const auto ml = bitcast<u32[4]>((a << 8 >> 8) * (b << 8 >> 8)); const auto mh = bitcast<u32[4]>((a >> 8) * (b >> 8)); set_vr(op.vd, eval(((ml << 16 >> 16) + (ml >> 16)) + ((mh << 16 >> 16) + (mh >> 16)) + c)); } void PPUTranslator::VMSUMUHM(ppu_opcode_t op) { const auto [a, b, c] = get_vrs<u32[4]>(op.va, op.vb, op.vc); const auto ml = (a << 16 >> 16) * (b << 16 >> 16); const auto mh = (a >> 16) * (b >> 16); set_vr(op.vd, ml + mh + c); } void PPUTranslator::VMSUMUHS(ppu_opcode_t op) { const auto [a, b, c] = get_vrs<u32[4]>(op.va, op.vb, op.vc); const auto ml = (a << 16 >> 16) * (b << 16 >> 16); const auto mh = (a >> 16) * (b >> 16); const auto s = eval(ml + mh); const auto s2 = eval(s + c); const auto x = eval((s < ml) | (s2 < s)); set_vr(op.vd, select(x, splat<u32[4]>(-1), s2)); set_sat(x); } void PPUTranslator::VMULESB(ppu_opcode_t op) { const auto [a, b] = get_vrs<s16[8]>(op.va, op.vb); set_vr(op.vd, (a >> 8) * (b >> 8)); } void PPUTranslator::VMULESH(ppu_opcode_t op) { const auto [a, b] = get_vrs<s32[4]>(op.va, op.vb); set_vr(op.vd, (a >> 16) * (b >> 16)); } void PPUTranslator::VMULEUB(ppu_opcode_t op) { const auto [a, b] = get_vrs<u16[8]>(op.va, op.vb); set_vr(op.vd, (a >> 8) * (b >> 8)); } void PPUTranslator::VMULEUH(ppu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.va, op.vb); set_vr(op.vd, (a >> 16) * (b >> 16)); } void PPUTranslator::VMULOSB(ppu_opcode_t op) { const auto [a, b] = get_vrs<s16[8]>(op.va, op.vb); set_vr(op.vd, (a << 8 >> 8) * (b << 8 >> 8)); } void PPUTranslator::VMULOSH(ppu_opcode_t op) { const auto [a, b] = get_vrs<s32[4]>(op.va, op.vb); set_vr(op.vd, (a << 16 >> 16) * (b << 16 >> 16)); } void PPUTranslator::VMULOUB(ppu_opcode_t op) { const auto [a, b] = get_vrs<u16[8]>(op.va, op.vb); set_vr(op.vd, (a << 8 >> 8) * (b << 8 >> 8)); } void PPUTranslator::VMULOUH(ppu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.va, op.vb); set_vr(op.vd, (a << 16 >> 16) * (b << 16 >> 16)); } void PPUTranslator::VNMSUBFP(ppu_opcode_t op) { auto [a, b, c] = get_vrs<f32[4]>(op.va, op.vb, op.vc); // Optimization: Emit only a floating multiply if the addend is zero if (const auto [ok, data] = get_const_vector(b.value, ::narrow<u32>(m_addr)); ok) { if (data == v128{}) { set_vr(op.vd, vec_handle_result(-(a * c))); ppu_log.notice("LLVM: VNMSUBFP with 0 addend at [0x%08x]", m_addr + (m_reloc ? m_reloc->addr : 0)); return; } if (!m_use_fma && data == v128::from32p(1u << 31)) { set_vr(op.vd, vec_handle_result(-(a * c - fsplat<f32[4]>(0.f)))); ppu_log.notice("LLVM: VNMSUBFP with -0 addend at [0x%08x]", m_addr + (m_reloc ? m_reloc->addr : 0)); return; } } // Differs from the emulated path with regards to negative zero if (m_use_fma) { set_vr(op.vd, vec_handle_result(-fmuladd(a, c, -b))); return; } // Emulated FMA via double precision (caution: out-of-lane algorithm) const auto xa = fpcast<f64[4]>(a); const auto xb = fpcast<f64[4]>(b); const auto xc = fpcast<f64[4]>(c); const auto nr = xa * xc - xb; set_vr(op.vd, vec_handle_result(fpcast<f32[4]>(-nr))); } void PPUTranslator::VNOR(ppu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.va, op.vb); set_vr(op.vd, ~(a | b)); } void PPUTranslator::VOR(ppu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.va, op.vb); set_vr(op.vd, a | b); } void PPUTranslator::VPERM(ppu_opcode_t op) { const auto [a, b, c] = get_vrs<u8[16]>(op.va, op.vb, op.vc); if (op.ra == op.rb) { set_vr(op.vd, pshufb(a, ~c & 0xf)); return; } if (m_use_avx512_icl) { const auto i = eval(~c); set_vr(op.vd, vperm2b(b, a, i)); return; } const auto i = eval(~c & 0x1f); set_vr(op.vd, select(noncast<s8[16]>(c << 3) >= 0, pshufb(a, i), pshufb(b, i))); } void PPUTranslator::VPKPX(ppu_opcode_t op) { // Caution: out-of-lane algorithm const auto [a, b] = get_vrs<u32[4]>(op.va, op.vb); const auto ab = shuffle2(b, a, 0, 1, 2, 3, 4, 5, 6, 7); const auto e1 = (ab & 0x01f80000) >> 9; const auto e2 = (ab & 0xf800) >> 6; const auto e3 = (ab & 0xf8) >> 3; set_vr(op.vd, trunc<u16[8]>(e1 | e2 | e3)); } void PPUTranslator::VPKSHSS(ppu_opcode_t op) { // Caution: potentially out-of-lane algorithm const auto [a, b] = get_vrs<s16[8]>(op.va, op.vb); const auto ab = shuffle2(b, a, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); const auto r = trunc<u8[16]>(min(max(ab, splat<s16[16]>(-0x80)), splat<s16[16]>(0x7f))); set_vr(op.vd, r); set_sat(bitcast<u16[8]>((a + 0x80) | (b + 0x80)) >> 8); } void PPUTranslator::VPKSHUS(ppu_opcode_t op) { // Caution: potentially out-of-lane algorithm const auto [a, b] = get_vrs<s16[8]>(op.va, op.vb); const auto ab = shuffle2(b, a, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); const auto r = trunc<u8[16]>(min(max(ab, splat<s16[16]>(0)), splat<s16[16]>(0xff))); set_vr(op.vd, r); set_sat(bitcast<u16[8]>(a | b) >> 8); } void PPUTranslator::VPKSWSS(ppu_opcode_t op) { // Caution: potentially out-of-lane algorithm const auto [a, b] = get_vrs<s32[4]>(op.va, op.vb); const auto ab = shuffle2(b, a, 0, 1, 2, 3, 4, 5, 6, 7); const auto r = trunc<u16[8]>(min(max(ab, splat<s32[8]>(-0x8000)), splat<s32[8]>(0x7fff))); set_vr(op.vd, r); set_sat(bitcast<u32[4]>((a + 0x8000) | (b + 0x8000)) >> 16); } void PPUTranslator::VPKSWUS(ppu_opcode_t op) { // Caution: potentially out-of-lane algorithm const auto [a, b] = get_vrs<s32[4]>(op.va, op.vb); const auto ab = shuffle2(b, a, 0, 1, 2, 3, 4, 5, 6, 7); const auto r = trunc<u16[8]>(min(max(ab, splat<s32[8]>(0)), splat<s32[8]>(0xffff))); set_vr(op.vd, r); set_sat(bitcast<u32[4]>(a | b) >> 16); } void PPUTranslator::VPKUHUM(ppu_opcode_t op) { const auto [a, b] = get_vrs<u8[16]>(op.va, op.vb); const auto r = shuffle2(b, a, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); set_vr(op.vd, r); } void PPUTranslator::VPKUHUS(ppu_opcode_t op) { const auto [a, b] = get_vrs<u16[8]>(op.va, op.vb); const auto ta = bitcast<u8[16]>(min(a, splat<u16[8]>(0xff))); const auto tb = bitcast<u8[16]>(min(b, splat<u16[8]>(0xff))); const auto r = shuffle2(tb, ta, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); set_vr(op.vd, r); set_sat((a | b) >> 8); } void PPUTranslator::VPKUWUM(ppu_opcode_t op) { const auto [a, b] = get_vrs<u16[8]>(op.va, op.vb); const auto r = shuffle2(b, a, 0, 2, 4, 6, 8, 10, 12, 14); set_vr(op.vd, r); } void PPUTranslator::VPKUWUS(ppu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.va, op.vb); const auto ta = bitcast<u16[8]>(min(a, splat<u32[4]>(0xffff))); const auto tb = bitcast<u16[8]>(min(b, splat<u32[4]>(0xffff))); const auto r = shuffle2(tb, ta, 0, 2, 4, 6, 8, 10, 12, 14); set_vr(op.vd, r); set_sat((a | b) >> 16); } void PPUTranslator::VREFP(ppu_opcode_t op) { set_vr(op.vd, vec_handle_result(fsplat<f32[4]>(1.0) / get_vr<f32[4]>(op.vb))); } void PPUTranslator::VRFIM(ppu_opcode_t op) { set_vr(op.vd, vec_handle_result(callf<f32[4]>(get_intrinsic<f32[4]>(Intrinsic::floor), get_vr<f32[4]>(op.vb)))); } void PPUTranslator::VRFIN(ppu_opcode_t op) { set_vr(op.vd, vec_handle_result(callf<f32[4]>(get_intrinsic<f32[4]>(Intrinsic::roundeven), get_vr<f32[4]>(op.vb)))); } void PPUTranslator::VRFIP(ppu_opcode_t op) { set_vr(op.vd, vec_handle_result(callf<f32[4]>(get_intrinsic<f32[4]>(Intrinsic::ceil), get_vr<f32[4]>(op.vb)))); } void PPUTranslator::VRFIZ(ppu_opcode_t op) { set_vr(op.vd, vec_handle_result(callf<f32[4]>(get_intrinsic<f32[4]>(Intrinsic::trunc), get_vr<f32[4]>(op.vb)))); } void PPUTranslator::VRLB(ppu_opcode_t op) { const auto [a, b] = get_vrs<u8[16]>(op.va, op.vb); set_vr(op.vd, rol(a, b)); } void PPUTranslator::VRLH(ppu_opcode_t op) { const auto [a, b] = get_vrs<u16[8]>(op.va, op.vb); set_vr(op.vd, rol(a, b)); } void PPUTranslator::VRLW(ppu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.va, op.vb); set_vr(op.vd, rol(a, b)); } void PPUTranslator::VRSQRTEFP(ppu_opcode_t op) { set_vr(op.vd, vec_handle_result(fsplat<f32[4]>(1.0) / callf<f32[4]>(get_intrinsic<f32[4]>(Intrinsic::sqrt), get_vr<f32[4]>(op.vb)))); } void PPUTranslator::VSEL(ppu_opcode_t op) { const auto c = get_vr<u32[4]>(op.vc); // Check if the constant mask doesn't require bit granularity if (auto [ok, mask] = get_const_vector(c.value, ::narrow<u32>(m_addr)); ok) { bool sel_32 = true; for (u32 i = 0; i < 4; i++) { if (mask._u32[i] && mask._u32[i] != 0xFFFFFFFF) { sel_32 = false; break; } } if (sel_32) { set_vr(op.vd, select(noncast<s32[4]>(c) != 0, get_vr<u32[4]>(op.vb), get_vr<u32[4]>(op.va))); return; } bool sel_16 = true; for (u32 i = 0; i < 8; i++) { if (mask._u16[i] && mask._u16[i] != 0xFFFF) { sel_16 = false; break; } } if (sel_16) { set_vr(op.vd, select(bitcast<s16[8]>(c) != 0, get_vr<u16[8]>(op.vb), get_vr<u16[8]>(op.va))); return; } bool sel_8 = true; for (u32 i = 0; i < 16; i++) { if (mask._u8[i] && mask._u8[i] != 0xFF) { sel_8 = false; break; } } if (sel_8) { set_vr(op.vd, select(bitcast<s8[16]>(c) != 0,get_vr<u8[16]>(op.vb), get_vr<u8[16]>(op.va))); return; } } const auto [a, b] = get_vrs<u32[4]>(op.va, op.vb); set_vr(op.vd, eval((b & c) | (a & ~c))); } void PPUTranslator::VSL(ppu_opcode_t op) { const auto [a, b] = get_vrs<u8[16]>(op.va, op.vb); set_vr(op.vd, fshl(a, zshuffle(a, 16, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14), b)); } void PPUTranslator::VSLB(ppu_opcode_t op) { const auto [a, b] = get_vrs<u8[16]>(op.va, op.vb); set_vr(op.vd, a << (b & 7)); } void PPUTranslator::VSLDOI(ppu_opcode_t op) { if (op.vsh == 0) { set_vr(op.vd, get_vr<u32[4]>(op.va)); } else if ((op.vsh % 4) == 0) { const auto [a, b] = get_vrs<u32[4]>(op.va, op.vb); const auto s = op.vsh / 4; const auto x = 7; set_vr(op.vd, shuffle2(b, a, (s + 3) ^ x, (s + 2) ^ x, (s + 1) ^ x, (s) ^ x)); } else if ((op.vsh % 2) == 0) { const auto [a, b] = get_vrs<u16[8]>(op.va, op.vb); const auto s = op.vsh / 2; const auto x = 15; set_vr(op.vd, shuffle2(b, a, (s + 7) ^ x, (s + 6) ^ x, (s + 5) ^ x, (s + 4) ^ x, (s + 3) ^ x, (s + 2) ^ x, (s + 1) ^ x, (s) ^ x)); } else { const auto [a, b] = get_vrs<u8[16]>(op.va, op.vb); const auto s = op.vsh; const auto x = 31; set_vr(op.vd, shuffle2(b, a, (s + 15) ^ x, (s + 14) ^ x, (s + 13) ^ x, (s + 12) ^ x, (s + 11) ^ x, (s + 10) ^ x, (s + 9) ^ x, (s + 8) ^ x, (s + 7) ^ x, (s + 6) ^ x, (s + 5) ^ x, (s + 4) ^ x, (s + 3) ^ x, (s + 2) ^ x, (s + 1) ^ x, (s) ^ x)); } } void PPUTranslator::VSLH(ppu_opcode_t op) { const auto [a, b] = get_vrs<u16[8]>(op.va, op.vb); set_vr(op.vd, a << (b & 15)); } void PPUTranslator::VSLO(ppu_opcode_t op) { // TODO (rare) const auto [a, b] = get_vrs<u128>(op.va, op.vb); set_vr(op.vd, a << (b & 0x78)); } void PPUTranslator::VSLW(ppu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.va, op.vb); set_vr(op.vd, a << (b & 31)); } void PPUTranslator::VSPLTB(ppu_opcode_t op) { const u32 ui = ~op.vuimm & 0xf; set_vr(op.vd, zshuffle(get_vr<u8[16]>(op.vb), ui, ui, ui, ui, ui, ui, ui, ui, ui, ui, ui, ui, ui, ui, ui, ui)); } void PPUTranslator::VSPLTH(ppu_opcode_t op) { const u32 ui = ~op.vuimm & 0x7; set_vr(op.vd, zshuffle(get_vr<u16[8]>(op.vb), ui, ui, ui, ui, ui, ui, ui, ui)); } void PPUTranslator::VSPLTISB(ppu_opcode_t op) { set_vr(op.vd, splat<u8[16]>(op.vsimm)); } void PPUTranslator::VSPLTISH(ppu_opcode_t op) { set_vr(op.vd, splat<u16[8]>(op.vsimm)); } void PPUTranslator::VSPLTISW(ppu_opcode_t op) { set_vr(op.vd, splat<u32[4]>(op.vsimm)); } void PPUTranslator::VSPLTW(ppu_opcode_t op) { const u32 ui = ~op.vuimm & 0x3; set_vr(op.vd, zshuffle(get_vr<u32[4]>(op.vb), ui, ui, ui, ui)); } void PPUTranslator::VSR(ppu_opcode_t op) { const auto [a, b] = get_vrs<u8[16]>(op.va, op.vb); set_vr(op.vd, fshr(zshuffle(a, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), a, b)); } void PPUTranslator::VSRAB(ppu_opcode_t op) { const auto [a, b] = get_vrs<s8[16]>(op.va, op.vb); set_vr(op.vd, a >> (b & 7)); } void PPUTranslator::VSRAH(ppu_opcode_t op) { const auto [a, b] = get_vrs<s16[8]>(op.va, op.vb); set_vr(op.vd, a >> (b & 15)); } void PPUTranslator::VSRAW(ppu_opcode_t op) { const auto [a, b] = get_vrs<s32[4]>(op.va, op.vb); set_vr(op.vd, a >> (b & 31)); } void PPUTranslator::VSRB(ppu_opcode_t op) { const auto [a, b] = get_vrs<u8[16]>(op.va, op.vb); set_vr(op.vd, a >> (b & 7)); } void PPUTranslator::VSRH(ppu_opcode_t op) { const auto [a, b] = get_vrs<u16[8]>(op.va, op.vb); set_vr(op.vd, a >> (b & 15)); } void PPUTranslator::VSRO(ppu_opcode_t op) { // TODO (very rare) const auto [a, b] = get_vrs<u128>(op.va, op.vb); set_vr(op.vd, a >> (b & 0x78)); } void PPUTranslator::VSRW(ppu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.va, op.vb); set_vr(op.vd, a >> (b & 31)); } void PPUTranslator::VSUBCUW(ppu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.va, op.vb); set_vr(op.vd, zext<u32[4]>(a >= b)); } void PPUTranslator::VSUBFP(ppu_opcode_t op) { const auto [a, b] = get_vrs<f32[4]>(op.va, op.vb); set_vr(op.vd, vec_handle_result(a - b)); } void PPUTranslator::VSUBSBS(ppu_opcode_t op) { const auto [a, b] = get_vrs<s8[16]>(op.va, op.vb); const auto r = sub_sat(a, b); set_vr(op.vd, r); set_sat(r ^ (a - b)); } void PPUTranslator::VSUBSHS(ppu_opcode_t op) { const auto [a, b] = get_vrs<s16[8]>(op.va, op.vb); const auto r = sub_sat(a, b); set_vr(op.vd, r); set_sat(r ^ (a - b)); } void PPUTranslator::VSUBSWS(ppu_opcode_t op) { const auto [a, b] = get_vrs<s32[4]>(op.va, op.vb); const auto r = sub_sat(a, b); set_vr(op.vd, r); set_sat(r ^ (a - b)); } void PPUTranslator::VSUBUBM(ppu_opcode_t op) { const auto [a, b] = get_vrs<u8[16]>(op.va, op.vb); set_vr(op.vd, eval(a - b)); } void PPUTranslator::VSUBUBS(ppu_opcode_t op) { const auto [a, b] = get_vrs<u8[16]>(op.va, op.vb); const auto r = sub_sat(a, b); set_vr(op.vd, r); set_sat(r ^ (a - b)); } void PPUTranslator::VSUBUHM(ppu_opcode_t op) { const auto [a, b] = get_vrs<u16[8]>(op.va, op.vb); set_vr(op.vd, eval(a - b)); } void PPUTranslator::VSUBUHS(ppu_opcode_t op) { const auto [a, b] = get_vrs<u16[8]>(op.va, op.vb); const auto r = sub_sat(a, b); set_vr(op.vd, r); set_sat(r ^ (a - b)); } void PPUTranslator::VSUBUWM(ppu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.va, op.vb); set_vr(op.vd, eval(a - b)); } void PPUTranslator::VSUBUWS(ppu_opcode_t op) { const auto [a, b] = get_vrs<u32[4]>(op.va, op.vb); const auto r = sub_sat(a, b); set_vr(op.vd, r); set_sat(r ^ (a - b)); } void PPUTranslator::VSUMSWS(ppu_opcode_t op) { const auto [a, b] = get_vrs<s32[4]>(op.va, op.vb); const auto x = sext<s64[2]>(zshuffle(a, 0, 1)); const auto y = sext<s64[2]>(zshuffle(a, 2, 3)); const auto z = sext<s64[2]>(zshuffle(b, 0, 4)); const auto s = eval(x + y + z); const auto r = min(max(zshuffle(s, 0, 2) + zshuffle(s, 1, 2), splat<s64[2]>(-0x8000'0000ll)), splat<s64[2]>(0x7fff'ffff)); set_vr(op.vd, zshuffle(bitcast<u32[4]>(r), 0, 4, 4, 4)); set_sat(bitcast<u64[2]>(r + 0x8000'0000) >> 32); } void PPUTranslator::VSUM2SWS(ppu_opcode_t op) { const auto [a, b] = get_vrs<s64[2]>(op.va, op.vb); const auto x = a << 32 >> 32; const auto y = a >> 32; const auto z = b >> 32; const auto r = min(max(x + y + z, splat<s64[2]>(-0x8000'0000ll)), splat<s64[2]>(0x7fff'ffff)); set_vr(op.vd, zshuffle(bitcast<u32[4]>(r), 0, 4, 2, 4)); set_sat(bitcast<u64[2]>(r + 0x8000'0000) >> 32); } void PPUTranslator::VSUM4SBS(ppu_opcode_t op) { const auto a = get_vr<s16[8]>(op.va); const auto b = get_vr<s32[4]>(op.vb); const auto x = eval(bitcast<s32[4]>((a << 8 >> 8) + (a >> 8))); const auto s = eval((x << 16 >> 16) + (x >> 16)); const auto r = add_sat(s, b); set_vr(op.vd, r); set_sat(r ^ (s + b)); } void PPUTranslator::VSUM4SHS(ppu_opcode_t op) { const auto a = get_vr<s32[4]>(op.va); const auto b = get_vr<s32[4]>(op.vb); const auto s = eval((a << 16 >> 16) + (a >> 16)); const auto r = add_sat(s, b); set_vr(op.vd, r); set_sat(r ^ (s + b)); } void PPUTranslator::VSUM4UBS(ppu_opcode_t op) { const auto a = get_vr<u16[8]>(op.va); const auto b = get_vr<u32[4]>(op.vb); const auto x = eval(bitcast<u32[4]>((a & 0xff) + (a >> 8))); const auto s = eval((x & 0xffff) + (x >> 16)); const auto r = add_sat(s, b); set_vr(op.vd, r); set_sat(r ^ (s + b)); } #define UNPACK_PIXEL_OP(px) (px & 0xff00001f) | ((px << 6) & 0x1f0000) | ((px << 3) & 0x1f00) void PPUTranslator::VUPKHPX(ppu_opcode_t op) { // Caution: potentially out-of-lane algorithm const auto px = sext<s32[4]>(zshuffle(get_vr<s16[8]>(op.vb), 4, 5, 6, 7)); set_vr(op.vd, UNPACK_PIXEL_OP(px)); } void PPUTranslator::VUPKHSB(ppu_opcode_t op) { // Caution: potentially out-of-lane algorithm const auto r = sext<s16[8]>(zshuffle(get_vr<s8[16]>(op.vb), 8, 9, 10, 11, 12, 13, 14, 15)); set_vr(op.vd, r); } void PPUTranslator::VUPKHSH(ppu_opcode_t op) { // Caution: potentially out-of-lane algorithm const auto r = sext<s32[4]>(zshuffle(get_vr<s16[8]>(op.vb), 4, 5, 6, 7)); set_vr(op.vd, r); } void PPUTranslator::VUPKLPX(ppu_opcode_t op) { // Caution: potentially out-of-lane algorithm const auto px = sext<s32[4]>(zshuffle(get_vr<s16[8]>(op.vb), 0, 1, 2, 3)); set_vr(op.vd, UNPACK_PIXEL_OP(px)); } void PPUTranslator::VUPKLSB(ppu_opcode_t op) { // Caution: potentially out-of-lane algorithm const auto r = sext<s16[8]>(zshuffle(get_vr<s8[16]>(op.vb), 0, 1, 2, 3, 4, 5, 6, 7)); set_vr(op.vd, r); } void PPUTranslator::VUPKLSH(ppu_opcode_t op) { // Caution: potentially out-of-lane algorithm const auto r = sext<s32[4]>(zshuffle(get_vr<s16[8]>(op.vb), 0, 1, 2, 3)); set_vr(op.vd, r); } void PPUTranslator::VXOR(ppu_opcode_t op) { if (op.va == op.vb) { // Assign zero, break dependencies set_vr(op.vd, splat<u32[4]>(0)); return; } const auto [a, b] = get_vrs<u32[4]>(op.va, op.vb); set_vr(op.vd, a ^ b); } void PPUTranslator::TDI(ppu_opcode_t op) { UseCondition(m_md_unlikely, CheckTrapCondition(op.bo, GetGpr(op.ra), m_ir->getInt64(op.simm16))); Trap(); } void PPUTranslator::TWI(ppu_opcode_t op) { UseCondition(m_md_unlikely, CheckTrapCondition(op.bo, GetGpr(op.ra, 32), m_ir->getInt32(op.simm16))); Trap(); } void PPUTranslator::MULLI(ppu_opcode_t op) { SetGpr(op.rd, m_ir->CreateMul(GetGpr(op.ra), m_ir->getInt64(op.simm16))); } void PPUTranslator::SUBFIC(ppu_opcode_t op) { const auto a = GetGpr(op.ra); const auto imm = m_ir->getInt64(op.simm16); const auto result = m_ir->CreateSub(imm, a); SetGpr(op.rd, result); SetCarry(m_ir->CreateICmpULE(result, imm)); } void PPUTranslator::CMPLI(ppu_opcode_t op) { SetCrFieldUnsignedCmp(op.crfd, GetGpr(op.ra, op.l10 ? 64 : 32), op.l10 ? m_ir->getInt64(op.uimm16) : m_ir->getInt32(op.uimm16)); } void PPUTranslator::CMPI(ppu_opcode_t op) { SetCrFieldSignedCmp(op.crfd, GetGpr(op.ra, op.l10 ? 64 : 32), op.l10 ? m_ir->getInt64(op.simm16) : m_ir->getInt32(op.simm16)); } void PPUTranslator::ADDIC(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.simm16); if (m_rel && (m_rel->type >= 4u && m_rel->type <= 6u)) { imm = SExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()); m_rel = nullptr; } const auto a = GetGpr(op.ra); const auto result = m_ir->CreateAdd(a, imm); SetGpr(op.rd, result); SetCarry(m_ir->CreateICmpULT(result, imm)); if (op.main & 1) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::ADDI(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.simm16); if (m_rel && (m_rel->type >= 4u && m_rel->type <= 6u)) { imm = SExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()); m_rel = nullptr; } SetGpr(op.rd, op.ra ? m_ir->CreateAdd(GetGpr(op.ra), imm) : imm); } void PPUTranslator::ADDIS(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.simm16 << 16); if (m_rel && (m_rel->type >= 4u && m_rel->type <= 6u)) { imm = m_ir->CreateShl(SExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()), 16); m_rel = nullptr; } SetGpr(op.rd, op.ra ? m_ir->CreateAdd(GetGpr(op.ra), imm) : imm); } void PPUTranslator::BC(ppu_opcode_t op) { const s32 bt14 = op.bt14; // Workaround for VS 16.5 const u64 target = (op.aa ? 0 : m_addr) + bt14; if (op.aa && m_reloc) { CompilationError("Branch with absolute address"); } if (op.lk) { m_ir->CreateStore(GetAddr(+4), m_ir->CreateStructGEP(m_thread_type, m_thread, static_cast<uint>(&m_lr - m_locals))); } UseCondition(CheckBranchProbability(op.bo), CheckBranchCondition(op.bo, op.bi)); CallFunction(target); } void PPUTranslator::SC(ppu_opcode_t op) { if (op.opcode != ppu_instructions::SC(0) && op.opcode != ppu_instructions::SC(1)) { return UNK(op); } const auto num = GetGpr(11); RegStore(Trunc(GetAddr()), m_cia); FlushRegisters(); if (!op.lev && isa<ConstantInt>(num)) { // Try to determine syscall using the constant value from r11 const u64 index = cast<ConstantInt>(num)->getZExtValue(); if (index < 1024) { Call(GetType<void>(), fmt::format("%s", ppu_syscall_code(index)), m_thread); //Call(GetType<void>(), "__escape", m_thread)->setTailCall(); m_ir->CreateRetVoid(); return; } } Call(GetType<void>(), op.lev ? "__lv1call" : "__syscall", m_thread, num); //Call(GetType<void>(), "__escape", m_thread)->setTailCall(); m_ir->CreateRetVoid(); } void PPUTranslator::B(ppu_opcode_t op) { const s32 bt24 = op.bt24; // Workaround for VS 16.5 const u64 target = (op.aa ? 0 : m_addr) + bt24; if (op.aa && m_reloc) { CompilationError("Branch with absolute address"); } if (op.lk) { RegStore(GetAddr(+4), m_lr); } FlushRegisters(); CallFunction(target); } void PPUTranslator::MCRF(ppu_opcode_t op) { const auto le = GetCrb(op.crfs * 4 + 0); const auto ge = GetCrb(op.crfs * 4 + 1); const auto eq = GetCrb(op.crfs * 4 + 2); const auto so = GetCrb(op.crfs * 4 + 3); SetCrField(op.crfd, le, ge, eq, so); } void PPUTranslator::BCLR(ppu_opcode_t op) { const auto target = RegLoad(m_lr); if (op.lk) { m_ir->CreateStore(GetAddr(+4), m_ir->CreateStructGEP(m_thread_type, m_thread, static_cast<uint>(&m_lr - m_locals))); } UseCondition(CheckBranchProbability(op.bo), CheckBranchCondition(op.bo, op.bi)); CallFunction(0, target); } void PPUTranslator::CRNOR(ppu_opcode_t op) { SetCrb(op.crbd, m_ir->CreateNot(m_ir->CreateOr(GetCrb(op.crba), GetCrb(op.crbb)))); } void PPUTranslator::CRANDC(ppu_opcode_t op) { SetCrb(op.crbd, m_ir->CreateAnd(GetCrb(op.crba), m_ir->CreateNot(GetCrb(op.crbb)))); } void PPUTranslator::ISYNC(ppu_opcode_t) { m_ir->CreateFence(AtomicOrdering::Acquire); } void PPUTranslator::CRXOR(ppu_opcode_t op) { SetCrb(op.crbd, m_ir->CreateXor(GetCrb(op.crba), GetCrb(op.crbb))); } void PPUTranslator::DCBI(ppu_opcode_t) { } void PPUTranslator::CRNAND(ppu_opcode_t op) { SetCrb(op.crbd, m_ir->CreateNot(m_ir->CreateAnd(GetCrb(op.crba), GetCrb(op.crbb)))); } void PPUTranslator::CRAND(ppu_opcode_t op) { SetCrb(op.crbd, m_ir->CreateAnd(GetCrb(op.crba), GetCrb(op.crbb))); } void PPUTranslator::CREQV(ppu_opcode_t op) { SetCrb(op.crbd, m_ir->CreateNot(m_ir->CreateXor(GetCrb(op.crba), GetCrb(op.crbb)))); } void PPUTranslator::CRORC(ppu_opcode_t op) { SetCrb(op.crbd, m_ir->CreateOr(GetCrb(op.crba), m_ir->CreateNot(GetCrb(op.crbb)))); } void PPUTranslator::CROR(ppu_opcode_t op) { SetCrb(op.crbd, m_ir->CreateOr(GetCrb(op.crba), GetCrb(op.crbb))); } void PPUTranslator::BCCTR(ppu_opcode_t op) { const auto target = RegLoad(m_ctr); if (op.lk) { m_ir->CreateStore(GetAddr(+4), m_ir->CreateStructGEP(m_thread_type, m_thread, static_cast<uint>(&m_lr - m_locals))); } UseCondition(CheckBranchProbability(op.bo | 0x4), CheckBranchCondition(op.bo | 0x4, op.bi)); CallFunction(0, target); } void PPUTranslator::RLWIMI(ppu_opcode_t op) { const u64 mask = ppu_rotate_mask(32 + op.mb32, 32 + op.me32); Value* result; if (op.mb32 <= op.me32) { if (op.mb32 == 0 && op.me32 == 31) { result = RotateLeft(GetGpr(op.rs, 32), op.sh32); } else if (op.mb32 == 0 && op.sh32 == 31 - op.me32) { result = m_ir->CreateShl(GetGpr(op.rs, 32), op.sh32); } else if (op.me32 == 31 && op.sh32 == 32 - op.mb32) { result = m_ir->CreateLShr(GetGpr(op.rs, 32), 32 - op.sh32); } else if (op.mb32 == 0 && op.sh32 < 31 - op.me32) { // INSLWI and other possible mnemonics result = m_ir->CreateAnd(m_ir->CreateShl(GetGpr(op.rs, 32), op.sh32), mask); } else if (op.me32 == 31 && 32 - op.sh32 < op.mb32) { // INSRWI and other possible mnemonics result = m_ir->CreateAnd(m_ir->CreateLShr(GetGpr(op.rs, 32), 32 - op.sh32), mask); } else { // Generic op result = m_ir->CreateAnd(RotateLeft(GetGpr(op.rs, 32), op.sh32), mask); } // Extend 32-bit op result result = ZExt(result); } else { // Full 64-bit op with duplication result = m_ir->CreateAnd(RotateLeft(DuplicateExt(GetGpr(op.rs, 32)), op.sh32), mask); } if (mask != umax) { // Insertion result = m_ir->CreateOr(result, m_ir->CreateAnd(GetGpr(op.ra), ~mask)); } SetGpr(op.ra, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::RLWINM(ppu_opcode_t op) { const u64 mask = ppu_rotate_mask(32 + op.mb32, 32 + op.me32); Value* result; if (op.mb32 <= op.me32) { if (op.mb32 == 0 && op.me32 == 31) { // ROTLWI, ROTRWI mnemonics result = RotateLeft(GetGpr(op.rs, 32), op.sh32); } else if (op.mb32 == 0 && op.sh32 == 31 - op.me32) { // SLWI mnemonic result = m_ir->CreateShl(GetGpr(op.rs, 32), op.sh32); } else if (op.me32 == 31 && op.sh32 == 32 - op.mb32) { // SRWI mnemonic result = m_ir->CreateLShr(GetGpr(op.rs, 32), 32 - op.sh32); } else if (op.mb32 == 0 && op.sh32 < 31 - op.me32) { // EXTLWI and other possible mnemonics result = m_ir->CreateAnd(m_ir->CreateShl(GetGpr(op.rs, 32), op.sh32), mask); } else if (op.me32 == 31 && 32 - op.sh32 < op.mb32) { // EXTRWI and other possible mnemonics result = m_ir->CreateAnd(m_ir->CreateLShr(GetGpr(op.rs, 32), 32 - op.sh32), mask); } else { // Generic op, including CLRLWI, CLRRWI mnemonics result = m_ir->CreateAnd(RotateLeft(GetGpr(op.rs, 32), op.sh32), mask); } // Extend 32-bit op result result = ZExt(result); } else { // Full 64-bit op with duplication result = m_ir->CreateAnd(RotateLeft(DuplicateExt(GetGpr(op.rs, 32)), op.sh32), mask); } SetGpr(op.ra, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::RLWNM(ppu_opcode_t op) { const u64 mask = ppu_rotate_mask(32 + op.mb32, 32 + op.me32); Value* result; if (op.mb32 <= op.me32) { if (op.mb32 == 0 && op.me32 == 31) { // ROTLW mnemonic result = RotateLeft(GetGpr(op.rs, 32), GetGpr(op.rb, 32)); } else { // Generic op result = m_ir->CreateAnd(RotateLeft(GetGpr(op.rs, 32), GetGpr(op.rb, 32)), mask); } // Extend 32-bit op result result = ZExt(result); } else { // Full 64-bit op with duplication result = m_ir->CreateAnd(RotateLeft(DuplicateExt(GetGpr(op.rs, 32)), GetGpr(op.rb)), mask); } SetGpr(op.ra, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::ORI(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.uimm16); if (m_rel && (m_rel->type >= 4u && m_rel->type <= 6u)) { imm = ZExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()); m_rel = nullptr; } SetGpr(op.ra, m_ir->CreateOr(GetGpr(op.rs), imm)); } void PPUTranslator::ORIS(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.uimm16 << 16); if (m_rel && (m_rel->type >= 4u && m_rel->type <= 6u)) { imm = m_ir->CreateShl(ZExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()), 16); m_rel = nullptr; } SetGpr(op.ra, m_ir->CreateOr(GetGpr(op.rs), imm)); } void PPUTranslator::XORI(ppu_opcode_t op) { SetGpr(op.ra, m_ir->CreateXor(GetGpr(op.rs), op.uimm16)); } void PPUTranslator::XORIS(ppu_opcode_t op) { SetGpr(op.ra, m_ir->CreateXor(GetGpr(op.rs), op.uimm16 << 16)); } void PPUTranslator::ANDI(ppu_opcode_t op) { const auto result = m_ir->CreateAnd(GetGpr(op.rs), op.uimm16); SetGpr(op.ra, result); SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::ANDIS(ppu_opcode_t op) { const auto result = m_ir->CreateAnd(GetGpr(op.rs), op.uimm16 << 16); SetGpr(op.ra, result); SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::RLDICL(ppu_opcode_t op) { const u32 sh = op.sh64; const u32 mb = op.mbe64; const u64 mask = ~0ull >> mb; Value* result; if (64 - sh < mb) { // EXTRDI and other possible mnemonics result = m_ir->CreateAnd(m_ir->CreateLShr(GetGpr(op.rs), 64 - sh), mask); } else if (64 - sh == mb) { // SRDI mnemonic result = m_ir->CreateLShr(GetGpr(op.rs), 64 - sh); } else { // Generic op, including CLRLDI mnemonic result = m_ir->CreateAnd(RotateLeft(GetGpr(op.rs), sh), mask); } SetGpr(op.ra, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::RLDICR(ppu_opcode_t op) { const u32 sh = op.sh64; const u32 me = op.mbe64; const u64 mask = ~0ull << (63 - me); Value* result; if (sh < 63 - me) { // EXTLDI and other possible mnemonics result = m_ir->CreateAnd(m_ir->CreateShl(GetGpr(op.rs), sh), mask); } else if (sh == 63 - me) { // SLDI mnemonic result = m_ir->CreateShl(GetGpr(op.rs), sh); } else { // Generic op, including CLRRDI mnemonic result = m_ir->CreateAnd(RotateLeft(GetGpr(op.rs), sh), mask); } SetGpr(op.ra, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::RLDIC(ppu_opcode_t op) { const u32 sh = op.sh64; const u32 mb = op.mbe64; const u64 mask = ppu_rotate_mask(mb, 63 - sh); Value* result; if (mb == 0 && sh == 0) { result = GetGpr(op.rs); } else if (mb <= 63 - sh) { // CLRLSLDI and other possible mnemonics result = m_ir->CreateAnd(m_ir->CreateShl(GetGpr(op.rs), sh), mask); } else { // Generic op result = m_ir->CreateAnd(RotateLeft(GetGpr(op.rs), sh), mask); } SetGpr(op.ra, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::RLDIMI(ppu_opcode_t op) { const u32 sh = op.sh64; const u32 mb = op.mbe64; const u64 mask = ppu_rotate_mask(mb, 63 - sh); Value* result; if (mb == 0 && sh == 0) { result = GetGpr(op.rs); } else if (mb <= 63 - sh) { // INSRDI and other possible mnemonics result = m_ir->CreateAnd(m_ir->CreateShl(GetGpr(op.rs), sh), mask); } else { // Generic op result = m_ir->CreateAnd(RotateLeft(GetGpr(op.rs), sh), mask); } if (mask != umax) { // Insertion result = m_ir->CreateOr(result, m_ir->CreateAnd(GetGpr(op.ra), ~mask)); } SetGpr(op.ra, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::RLDCL(ppu_opcode_t op) { const u32 mb = op.mbe64; const u64 mask = ~0ull >> mb; const auto result = m_ir->CreateAnd(RotateLeft(GetGpr(op.rs), GetGpr(op.rb)), mask); SetGpr(op.ra, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::RLDCR(ppu_opcode_t op) { const u32 me = op.mbe64; const u64 mask = ~0ull << (63 - me); const auto result = m_ir->CreateAnd(RotateLeft(GetGpr(op.rs), GetGpr(op.rb)), mask); SetGpr(op.ra, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::CMP(ppu_opcode_t op) { SetCrFieldSignedCmp(op.crfd, GetGpr(op.ra, op.l10 ? 64 : 32), GetGpr(op.rb, op.l10 ? 64 : 32)); } void PPUTranslator::TW(ppu_opcode_t op) { if (op.opcode != ppu_instructions::TRAP()) { UseCondition(m_md_unlikely, CheckTrapCondition(op.bo, GetGpr(op.ra, 32), GetGpr(op.rb, 32))); } else { FlushRegisters(); } Trap(); } void PPUTranslator::LVSL(ppu_opcode_t op) { const auto addr = value<u64>(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb)); set_vr(op.vd, build<u8[16]>(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) + vsplat<u8[16]>(trunc<u8>(addr & 0xf))); } void PPUTranslator::LVEBX(ppu_opcode_t op) { return LVX(op); } void PPUTranslator::SUBFC(ppu_opcode_t op) { const auto a = GetGpr(op.ra); const auto b = GetGpr(op.rb); const auto result = m_ir->CreateSub(b, a); SetGpr(op.rd, result); SetCarry(m_ir->CreateICmpULE(result, b)); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); if (op.oe) UNK(op); } void PPUTranslator::ADDC(ppu_opcode_t op) { const auto a = GetGpr(op.ra); const auto b = GetGpr(op.rb); const auto result = m_ir->CreateAdd(a, b); SetGpr(op.rd, result); SetCarry(m_ir->CreateICmpULT(result, b)); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); if (op.oe) { //const auto s = m_ir->CreateCall(get_intrinsic<u64>(llvm::Intrinsic::sadd_with_overflow), {a, b}); //SetOverflow(m_ir->CreateExtractValue(s, {1})); SetOverflow(m_ir->CreateICmpSLT(m_ir->CreateAnd(m_ir->CreateXor(a, m_ir->CreateNot(b)), m_ir->CreateXor(a, result)), m_ir->getInt64(0))); } } void PPUTranslator::MULHDU(ppu_opcode_t op) { const auto a = ZExt(GetGpr(op.ra)); const auto b = ZExt(GetGpr(op.rb)); const auto result = Trunc(m_ir->CreateLShr(m_ir->CreateMul(a, b), 64)); SetGpr(op.rd, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::MULHWU(ppu_opcode_t op) { const auto a = ZExt(GetGpr(op.ra, 32)); const auto b = ZExt(GetGpr(op.rb, 32)); SetGpr(op.rd, m_ir->CreateLShr(m_ir->CreateMul(a, b), 32)); if (op.rc) SetCrFieldSignedCmp(0, GetGpr(op.rd), m_ir->getInt64(0)); } void PPUTranslator::MFOCRF(ppu_opcode_t op) { if (op.l11) { // MFOCRF #if LLVM_VERSION_MAJOR < 17 const u64 pos = countLeadingZeros<u32>(op.crm) - 24; #else const u64 pos = countl_zero<u32>(op.crm) - 24; #endif if (pos >= 8 || 0x80u >> pos != op.crm) { SetGpr(op.rd, UndefValue::get(GetType<u64>())); return; } } else if (std::none_of(m_cr + 0, m_cr + 32, [](auto* p) { return p; })) { // MFCR (optimized) Value* ln0 = m_ir->CreateIntToPtr(m_ir->CreatePtrToInt(m_ir->CreateStructGEP(m_thread_type, m_thread, 99), GetType<uptr>()), GetType<u8[16]>()->getPointerTo()); Value* ln1 = m_ir->CreateIntToPtr(m_ir->CreatePtrToInt(m_ir->CreateStructGEP(m_thread_type, m_thread, 115), GetType<uptr>()), GetType<u8[16]>()->getPointerTo()); ln0 = m_ir->CreateLoad(GetType<u8[16]>(), ln0); ln1 = m_ir->CreateLoad(GetType<u8[16]>(), ln1); if (!m_is_be) { ln0 = Shuffle(ln0, nullptr, {15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0}); ln1 = Shuffle(ln1, nullptr, {15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0}); } const auto m0 = ZExt(bitcast<u16>(Trunc(ln0, GetType<bool[16]>()))); const auto m1 = ZExt(bitcast<u16>(Trunc(ln1, GetType<bool[16]>()))); SetGpr(op.rd, m_ir->CreateOr(m_ir->CreateShl(m0, 16), m1)); return; } Value* result{}; for (u32 i = 0; i < 8; i++) { if (!op.l11 || op.crm & (128 >> i)) { for (u32 b = i * 4; b < i * 4 + 4; b++) { const auto value = m_ir->CreateShl(ZExt(GetCrb(b), GetType<u64>()), 31 - b); result = result ? m_ir->CreateOr(result, value) : value; } } } SetGpr(op.rd, result); } void PPUTranslator::LWARX(ppu_opcode_t op) { if (g_cfg.core.ppu_128_reservations_loop_max_length) { RegStore(Trunc(GetAddr()), m_cia); FlushRegisters(); Call(GetType<void>(), "__resinterp", m_thread); //Call(GetType<void>(), "__escape", m_thread)->setTailCall(); m_ir->CreateRetVoid(); return; } SetGpr(op.rd, Call(GetType<u32>(), "__lwarx", m_thread, op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb))); } void PPUTranslator::LDX(ppu_opcode_t op) { SetGpr(op.rd, ReadMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb), GetType<u64>())); } void PPUTranslator::LWZX(ppu_opcode_t op) { m_may_be_mmio &= (op.ra != 1u && op.ra != 13u && op.rb != 1u && op.rb != 13u); // Stack register and TLS address register are unlikely to be used in MMIO address calculation SetGpr(op.rd, ReadMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb), GetType<u32>())); } void PPUTranslator::SLW(ppu_opcode_t op) { const auto shift_num = m_ir->CreateAnd(GetGpr(op.rb), 0x3f); const auto shift_res = m_ir->CreateShl(GetGpr(op.rs), shift_num); const auto result = m_ir->CreateAnd(shift_res, 0xffffffff); SetGpr(op.ra, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::CNTLZW(ppu_opcode_t op) { const auto result = Call(GetType<u32>(), "llvm.ctlz.i32", GetGpr(op.rs, 32), m_ir->getFalse()); SetGpr(op.ra, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt32(0)); } void PPUTranslator::SLD(ppu_opcode_t op) { const auto shift_num = m_ir->CreateAnd(GetGpr(op.rb), 0x7f); const auto shift_arg = GetGpr(op.rs); const auto result = Trunc(m_ir->CreateShl(ZExt(shift_arg), ZExt(shift_num))); SetGpr(op.ra, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::AND(ppu_opcode_t op) { const auto result = op.rs == op.rb ? GetGpr(op.rs) : m_ir->CreateAnd(GetGpr(op.rs), GetGpr(op.rb)); SetGpr(op.ra, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::CMPL(ppu_opcode_t op) { SetCrFieldUnsignedCmp(op.crfd, GetGpr(op.ra, op.l10 ? 64 : 32), GetGpr(op.rb, op.l10 ? 64 : 32)); } void PPUTranslator::LVSR(ppu_opcode_t op) { const auto addr = value<u64>(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb)); set_vr(op.vd, build<u8[16]>(31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16) - vsplat<u8[16]>(trunc<u8>(addr & 0xf))); } void PPUTranslator::LVEHX(ppu_opcode_t op) { return LVX(op); } void PPUTranslator::SUBF(ppu_opcode_t op) { const auto a = GetGpr(op.ra); const auto b = GetGpr(op.rb); const auto result = m_ir->CreateSub(b, a); SetGpr(op.rd, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); if (op.oe) { //const auto s = m_ir->CreateCall(get_intrinsic<u64>(llvm::Intrinsic::ssub_with_overflow), {b, m_ir->CreateNot(a)}); //SetOverflow(m_ir->CreateExtractValue(s, {1})); SetOverflow(m_ir->CreateICmpSLT(m_ir->CreateAnd(m_ir->CreateXor(a, b), m_ir->CreateXor(m_ir->CreateNot(a), result)), m_ir->getInt64(0))); } } void PPUTranslator::LDUX(ppu_opcode_t op) { const auto addr = m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)); SetGpr(op.rd, ReadMemory(addr, GetType<u64>())); SetGpr(op.ra, addr); } void PPUTranslator::DCBST(ppu_opcode_t) { } void PPUTranslator::LWZUX(ppu_opcode_t op) { m_may_be_mmio &= (op.ra != 1u && op.ra != 13u && op.rb != 1u && op.rb != 13u); // Stack register and TLS address register are unlikely to be used in MMIO address calculation const auto addr = m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)); SetGpr(op.rd, ReadMemory(addr, GetType<u32>())); SetGpr(op.ra, addr); } void PPUTranslator::CNTLZD(ppu_opcode_t op) { const auto result = Call(GetType<u64>(), "llvm.ctlz.i64", GetGpr(op.rs), m_ir->getFalse()); SetGpr(op.ra, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::ANDC(ppu_opcode_t op) { const auto result = m_ir->CreateAnd(GetGpr(op.rs), m_ir->CreateNot(GetGpr(op.rb))); SetGpr(op.ra, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::TD(ppu_opcode_t op) { UseCondition(m_md_unlikely, CheckTrapCondition(op.bo, GetGpr(op.ra), GetGpr(op.rb))); Trap(); } void PPUTranslator::LVEWX(ppu_opcode_t op) { return LVX(op); } void PPUTranslator::MULHD(ppu_opcode_t op) { const auto a = SExt(GetGpr(op.ra)); // i128 const auto b = SExt(GetGpr(op.rb)); const auto result = Trunc(m_ir->CreateLShr(m_ir->CreateMul(a, b), 64)); SetGpr(op.rd, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::MULHW(ppu_opcode_t op) { const auto a = SExt(GetGpr(op.ra, 32)); const auto b = SExt(GetGpr(op.rb, 32)); SetGpr(op.rd, m_ir->CreateAShr(m_ir->CreateMul(a, b), 32)); if (op.rc) SetCrFieldSignedCmp(0, GetGpr(op.rd), m_ir->getInt64(0)); } void PPUTranslator::LDARX(ppu_opcode_t op) { if (g_cfg.core.ppu_128_reservations_loop_max_length) { RegStore(Trunc(GetAddr()), m_cia); FlushRegisters(); Call(GetType<void>(), "__resinterp", m_thread); //Call(GetType<void>(), "__escape", m_thread)->setTailCall(); m_ir->CreateRetVoid(); return; } SetGpr(op.rd, Call(GetType<u64>(), "__ldarx", m_thread, op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb))); } void PPUTranslator::DCBF(ppu_opcode_t) { } void PPUTranslator::LBZX(ppu_opcode_t op) { SetGpr(op.rd, ReadMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb), GetType<u8>())); } void PPUTranslator::LVX(ppu_opcode_t op) { const auto addr = m_ir->CreateAnd(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb), ~0xfull); const auto data = ReadMemory(addr, GetType<u8[16]>(), m_is_be, 16); SetVr(op.vd, m_is_be ? data : Shuffle(data, nullptr, { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 })); } void PPUTranslator::NEG(ppu_opcode_t op) { const auto reg = GetGpr(op.ra); const auto result = m_ir->CreateNeg(reg); SetGpr(op.rd, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); if (op.oe) SetOverflow(m_ir->CreateICmpEQ(result, m_ir->getInt64(1ull << 63))); } void PPUTranslator::LBZUX(ppu_opcode_t op) { const auto addr = m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)); SetGpr(op.rd, ReadMemory(addr, GetType<u8>())); SetGpr(op.ra, addr); } void PPUTranslator::NOR(ppu_opcode_t op) { const auto result = m_ir->CreateNot(op.rs == op.rb ? GetGpr(op.rs) : m_ir->CreateOr(GetGpr(op.rs), GetGpr(op.rb))); SetGpr(op.ra, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::STVEBX(ppu_opcode_t op) { const auto addr = op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb); WriteMemory(addr, m_ir->CreateExtractElement(GetVr(op.vs, VrType::vi8), m_ir->CreateXor(m_ir->CreateAnd(addr, 15), m_is_be ? 0 : 15))); } void PPUTranslator::SUBFE(ppu_opcode_t op) { const auto a = m_ir->CreateNot(GetGpr(op.ra)); const auto b = GetGpr(op.rb); const auto c = GetCarry(); const auto r1 = m_ir->CreateAdd(a, b); const auto r2 = m_ir->CreateAdd(r1, ZExt(c, GetType<u64>())); SetGpr(op.rd, r2); SetCarry(m_ir->CreateOr(m_ir->CreateICmpULT(r1, a), m_ir->CreateICmpULT(r2, r1))); if (op.rc) SetCrFieldSignedCmp(0, r2, m_ir->getInt64(0)); if (op.oe) UNK(op); } void PPUTranslator::ADDE(ppu_opcode_t op) { const auto a = GetGpr(op.ra); const auto b = GetGpr(op.rb); const auto c = GetCarry(); const auto r1 = m_ir->CreateAdd(a, b); const auto r2 = m_ir->CreateAdd(r1, ZExt(c, GetType<u64>())); SetGpr(op.rd, r2); SetCarry(m_ir->CreateOr(m_ir->CreateICmpULT(r1, a), m_ir->CreateICmpULT(r2, r1))); if (op.rc) SetCrFieldSignedCmp(0, r2, m_ir->getInt64(0)); if (op.oe) UNK(op); } void PPUTranslator::MTOCRF(ppu_opcode_t op) { if (op.l11) { // MTOCRF #if LLVM_VERSION_MAJOR < 17 const u64 pos = countLeadingZeros<u32>(op.crm) - 24; #else const u64 pos = countl_zero<u32>(op.crm) - 24; #endif if (pos >= 8 || 0x80u >> pos != op.crm) { return; } } else { // MTCRF } static u8 s_table[64] { 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, }; if (!m_mtocr_table) { m_mtocr_table = new GlobalVariable(*m_module, ArrayType::get(GetType<u8>(), 64), true, GlobalValue::PrivateLinkage, ConstantDataArray::get(m_context, s_table)); } const auto value = GetGpr(op.rs, 32); for (u32 i = 0; i < 8; i++) { if (op.crm & (128 >> i)) { // Discard pending values std::fill_n(m_cr + i * 4, 4, nullptr); std::fill_n(m_g_cr + i * 4, 4, nullptr); const auto index = m_ir->CreateAnd(m_ir->CreateLShr(value, 28 - i * 4), 15); const auto src = m_ir->CreateGEP(m_mtocr_table->getValueType(), m_mtocr_table, {m_ir->getInt32(0), m_ir->CreateShl(index, 2)}); const auto dst = m_ir->CreateStructGEP(m_thread_type, m_thread, static_cast<uint>(m_cr - m_locals) + i * 4); Call(GetType<void>(), "llvm.memcpy.p0.p0.i32", dst, src, m_ir->getInt32(4), m_ir->getFalse()); } } } void PPUTranslator::STDX(ppu_opcode_t op) { WriteMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb), GetGpr(op.rs)); } void PPUTranslator::STWCX(ppu_opcode_t op) { const auto bit = Call(GetType<bool>(), "__stwcx", m_thread, op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb), GetGpr(op.rs, 32)); SetCrField(0, m_ir->getFalse(), m_ir->getFalse(), bit); } void PPUTranslator::STWX(ppu_opcode_t op) { m_may_be_mmio &= (op.ra != 1u && op.ra != 13u && op.rb != 1u && op.rb != 13u); // Stack register and TLS address register are unlikely to be used in MMIO address calculation WriteMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb), GetGpr(op.rs, 32)); } void PPUTranslator::STVEHX(ppu_opcode_t op) { const auto addr = m_ir->CreateAnd(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb), -2); WriteMemory(addr, m_ir->CreateExtractElement(GetVr(op.vs, VrType::vi16), m_ir->CreateLShr(m_ir->CreateXor(m_ir->CreateAnd(addr, 15), m_is_be ? 0 : 15), 1)), true, 2); } void PPUTranslator::STDUX(ppu_opcode_t op) { const auto addr = m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)); WriteMemory(addr, GetGpr(op.rs)); SetGpr(op.ra, addr); } void PPUTranslator::STWUX(ppu_opcode_t op) { const auto addr = m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)); m_may_be_mmio &= (op.ra != 1u && op.ra != 13u && op.rb != 1u && op.rb != 13u); // Stack register and TLS address register are unlikely to be used in MMIO address calculation WriteMemory(addr, GetGpr(op.rs, 32)); SetGpr(op.ra, addr); } void PPUTranslator::STVEWX(ppu_opcode_t op) { const auto addr = m_ir->CreateAnd(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb), -4); WriteMemory(addr, m_ir->CreateExtractElement(GetVr(op.vs, VrType::vi32), m_ir->CreateLShr(m_ir->CreateXor(m_ir->CreateAnd(addr, 15), m_is_be ? 0 : 15), 2)), true, 4); } void PPUTranslator::ADDZE(ppu_opcode_t op) { const auto a = GetGpr(op.ra); const auto c = GetCarry(); const auto result = m_ir->CreateAdd(a, ZExt(c, GetType<u64>())); SetGpr(op.rd, result); SetCarry(m_ir->CreateICmpULT(result, a)); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); if (op.oe) UNK(op); } void PPUTranslator::SUBFZE(ppu_opcode_t op) { const auto a = m_ir->CreateNot(GetGpr(op.ra)); const auto c = GetCarry(); const auto result = m_ir->CreateAdd(a, ZExt(c, GetType<u64>())); SetGpr(op.rd, result); SetCarry(m_ir->CreateICmpULT(result, a)); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); if (op.oe) UNK(op); } void PPUTranslator::STDCX(ppu_opcode_t op) { const auto bit = Call(GetType<bool>(), "__stdcx", m_thread, op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb), GetGpr(op.rs)); SetCrField(0, m_ir->getFalse(), m_ir->getFalse(), bit); } void PPUTranslator::STBX(ppu_opcode_t op) { WriteMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb), GetGpr(op.rs, 8)); } void PPUTranslator::STVX(ppu_opcode_t op) { const auto value = GetVr(op.vs, VrType::vi8); const auto data = m_is_be ? value : Shuffle(value, nullptr, { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 }); WriteMemory(m_ir->CreateAnd(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb), -16), data, m_is_be, 16); } void PPUTranslator::SUBFME(ppu_opcode_t op) { const auto a = m_ir->CreateNot(GetGpr(op.ra)); const auto c = GetCarry(); const auto result = m_ir->CreateSub(a, ZExt(m_ir->CreateNot(c), GetType<u64>())); SetGpr(op.rd, result); SetCarry(m_ir->CreateOr(c, IsNotZero(a))); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); if (op.oe) UNK(op); } void PPUTranslator::MULLD(ppu_opcode_t op) { const auto a = GetGpr(op.ra); const auto b = GetGpr(op.rb); const auto result = m_ir->CreateMul(a, b); SetGpr(op.rd, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); if (op.oe) UNK(op); } void PPUTranslator::ADDME(ppu_opcode_t op) { const auto a = GetGpr(op.ra); const auto c = GetCarry(); const auto result = m_ir->CreateSub(a, ZExt(m_ir->CreateNot(c), GetType<u64>())); SetGpr(op.rd, result); SetCarry(m_ir->CreateOr(c, IsNotZero(a))); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); if (op.oe) UNK(op); } void PPUTranslator::MULLW(ppu_opcode_t op) { const auto a = SExt(GetGpr(op.ra, 32)); const auto b = SExt(GetGpr(op.rb, 32)); const auto result = m_ir->CreateMul(a, b); SetGpr(op.rd, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); if (op.oe) UNK(op); } void PPUTranslator::DCBTST(ppu_opcode_t) { } void PPUTranslator::STBUX(ppu_opcode_t op) { const auto addr = m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)); WriteMemory(addr, GetGpr(op.rs, 8)); SetGpr(op.ra, addr); } void PPUTranslator::ADD(ppu_opcode_t op) { const auto a = GetGpr(op.ra); const auto b = GetGpr(op.rb); const auto result = m_ir->CreateAdd(a, b); SetGpr(op.rd, result); if (op.oe) { //const auto s = m_ir->CreateCall(get_intrinsic<u64>(llvm::Intrinsic::sadd_with_overflow), {a, b}); //SetOverflow(m_ir->CreateExtractValue(s, {1})); SetOverflow(m_ir->CreateICmpSLT(m_ir->CreateAnd(m_ir->CreateXor(a, m_ir->CreateNot(b)), m_ir->CreateXor(a, result)), m_ir->getInt64(0))); } if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::DCBT(ppu_opcode_t) { } void PPUTranslator::LHZX(ppu_opcode_t op) { SetGpr(op.rd, ReadMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb), GetType<u16>())); } void PPUTranslator::EQV(ppu_opcode_t op) { const auto result = m_ir->CreateNot(m_ir->CreateXor(GetGpr(op.rs), GetGpr(op.rb))); SetGpr(op.ra, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::ECIWX(ppu_opcode_t op) { UNK(op); } void PPUTranslator::LHZUX(ppu_opcode_t op) { const auto addr = m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)); SetGpr(op.rd, ReadMemory(addr, GetType<u16>())); SetGpr(op.ra, addr); } void PPUTranslator::XOR(ppu_opcode_t op) { const auto result = op.rs == op.rb ? static_cast<Value*>(m_ir->getInt64(0)) : m_ir->CreateXor(GetGpr(op.rs), GetGpr(op.rb)); SetGpr(op.ra, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::MFSPR(ppu_opcode_t op) { Value* result; switch (const u32 n = (op.spr >> 5) | ((op.spr & 0x1f) << 5)) { case 0x001: // MFXER result = ZExt(RegLoad(m_cnt), GetType<u64>()); result = m_ir->CreateOr(result, m_ir->CreateShl(ZExt(RegLoad(m_so), GetType<u64>()), 29)); result = m_ir->CreateOr(result, m_ir->CreateShl(ZExt(RegLoad(m_ov), GetType<u64>()), 30)); result = m_ir->CreateOr(result, m_ir->CreateShl(ZExt(RegLoad(m_ca), GetType<u64>()), 31)); break; case 0x008: // MFLR result = RegLoad(m_lr); break; case 0x009: // MFCTR result = RegLoad(m_ctr); break; case 0x100: result = ZExt(RegLoad(m_vrsave)); break; case 0x10C: // MFTB result = Call(GetType<u64>(), m_pure_attr, "__get_tb"); break; case 0x10D: // MFTBU result = m_ir->CreateLShr(Call(GetType<u64>(), m_pure_attr, "__get_tb"), 32); break; default: result = Call(GetType<u64>(), fmt::format("__mfspr_%u", n)); break; } SetGpr(op.rd, result); } void PPUTranslator::LWAX(ppu_opcode_t op) { SetGpr(op.rd, SExt(ReadMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb), GetType<s32>()))); } void PPUTranslator::DST(ppu_opcode_t) { } void PPUTranslator::LHAX(ppu_opcode_t op) { SetGpr(op.rd, SExt(ReadMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb), GetType<s16>()), GetType<s64>())); } void PPUTranslator::LVXL(ppu_opcode_t op) { return LVX(op); } void PPUTranslator::MFTB(ppu_opcode_t op) { Value* result; switch (const u32 n = (op.spr >> 5) | ((op.spr & 0x1f) << 5)) { case 0x10C: // MFTB result = Call(GetType<u64>(), m_pure_attr, "__get_tb"); break; case 0x10D: // MFTBU result = m_ir->CreateLShr(Call(GetType<u64>(), m_pure_attr, "__get_tb"), 32); break; default: result = Call(GetType<u64>(), fmt::format("__mftb_%u", n)); break; } SetGpr(op.rd, result); } void PPUTranslator::LWAUX(ppu_opcode_t op) { const auto addr = m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)); SetGpr(op.rd, SExt(ReadMemory(addr, GetType<s32>()))); SetGpr(op.ra, addr); } void PPUTranslator::DSTST(ppu_opcode_t) { } void PPUTranslator::LHAUX(ppu_opcode_t op) { const auto addr = m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)); SetGpr(op.rd, SExt(ReadMemory(addr, GetType<s16>()), GetType<s64>())); SetGpr(op.ra, addr); } void PPUTranslator::STHX(ppu_opcode_t op) { WriteMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb), GetGpr(op.rs, 16)); } void PPUTranslator::ORC(ppu_opcode_t op) { const auto result = op.rs == op.rb ? static_cast<Value*>(m_ir->getInt64(-1)) : m_ir->CreateOr(GetGpr(op.rs), m_ir->CreateNot(GetGpr(op.rb))); SetGpr(op.ra, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::ECOWX(ppu_opcode_t op) { UNK(op); } void PPUTranslator::STHUX(ppu_opcode_t op) { const auto addr = m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)); WriteMemory(addr, GetGpr(op.rs, 16)); SetGpr(op.ra, addr); } void PPUTranslator::OR(ppu_opcode_t op) { const auto result = op.rs == op.rb ? GetGpr(op.rs) : m_ir->CreateOr(GetGpr(op.rs), GetGpr(op.rb)); SetGpr(op.ra, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::DIVDU(ppu_opcode_t op) { const auto a = GetGpr(op.ra); const auto b = GetGpr(op.rb); const auto o = IsZero(b); const auto result = m_ir->CreateUDiv(a, m_ir->CreateSelect(o, m_ir->getInt64(-1), b)); SetGpr(op.rd, m_ir->CreateSelect(o, m_ir->getInt64(0), result)); if (op.rc) SetCrFieldSignedCmp(0, GetGpr(op.rd), m_ir->getInt64(0)); if (op.oe) SetOverflow(o); } void PPUTranslator::DIVWU(ppu_opcode_t op) { const auto a = GetGpr(op.ra, 32); const auto b = GetGpr(op.rb, 32); const auto o = IsZero(b); const auto result = m_ir->CreateUDiv(a, m_ir->CreateSelect(o, m_ir->getInt32(0xffffffff), b)); SetGpr(op.rd, m_ir->CreateSelect(o, m_ir->getInt32(0), result)); if (op.rc) SetCrFieldSignedCmp(0, GetGpr(op.rd), m_ir->getInt64(0)); if (op.oe) SetOverflow(o); } void PPUTranslator::MTSPR(ppu_opcode_t op) { const auto value = GetGpr(op.rs); switch (const u32 n = (op.spr >> 5) | ((op.spr & 0x1f) << 5)) { case 0x001: // MTXER RegStore(Trunc(m_ir->CreateLShr(value, 31), GetType<bool>()), m_ca); RegStore(Trunc(m_ir->CreateLShr(value, 30), GetType<bool>()), m_ov); RegStore(Trunc(m_ir->CreateLShr(value, 29), GetType<bool>()), m_so); RegStore(Trunc(value, GetType<u8>()), m_cnt); break; case 0x008: // MTLR RegStore(value, m_lr); break; case 0x009: // MTCTR RegStore(value, m_ctr); break; case 0x100: RegStore(Trunc(value), m_vrsave); break; default: Call(GetType<void>(), fmt::format("__mtspr_%u", n), value); break; } } void PPUTranslator::NAND(ppu_opcode_t op) { const auto result = m_ir->CreateNot(op.rs == op.rb ? GetGpr(op.rs) : m_ir->CreateAnd(GetGpr(op.rs), GetGpr(op.rb))); SetGpr(op.ra, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::STVXL(ppu_opcode_t op) { return STVX(op); } void PPUTranslator::DIVD(ppu_opcode_t op) { const auto a = GetGpr(op.ra); const auto b = GetGpr(op.rb); const auto o = m_ir->CreateOr(IsZero(b), m_ir->CreateAnd(m_ir->CreateICmpEQ(a, m_ir->getInt64(1ull << 63)), IsOnes(b))); const auto result = m_ir->CreateSDiv(a, m_ir->CreateSelect(o, m_ir->getInt64(1ull << 63), b)); SetGpr(op.rd, m_ir->CreateSelect(o, m_ir->getInt64(0), result)); if (op.rc) SetCrFieldSignedCmp(0, GetGpr(op.rd), m_ir->getInt64(0)); if (op.oe) SetOverflow(o); } void PPUTranslator::DIVW(ppu_opcode_t op) { const auto a = GetGpr(op.ra, 32); const auto b = GetGpr(op.rb, 32); const auto o = m_ir->CreateOr(IsZero(b), m_ir->CreateAnd(m_ir->CreateICmpEQ(a, m_ir->getInt32(s32{smin})), IsOnes(b))); const auto result = m_ir->CreateSDiv(a, m_ir->CreateSelect(o, m_ir->getInt32(s32{smin}), b)); SetGpr(op.rd, m_ir->CreateSelect(o, m_ir->getInt32(0), result)); if (op.rc) SetCrFieldSignedCmp(0, GetGpr(op.rd), m_ir->getInt64(0)); if (op.oe) SetOverflow(o); } void PPUTranslator::LVLX(ppu_opcode_t op) { const auto addr = op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb); const auto data = ReadMemory(m_ir->CreateAnd(addr, ~0xfull), GetType<u8[16]>(), m_is_be, 16); set_vr(op.vd, pshufb(value<u8[16]>(data), build<u8[16]>(127, 126, 125, 124, 123, 122, 121, 120, 119, 118, 117, 116, 115, 114, 113, 112) + vsplat<u8[16]>(trunc<u8>(value<u64>(addr) & 0xf)))); } void PPUTranslator::LDBRX(ppu_opcode_t op) { SetGpr(op.rd, ReadMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb), GetType<u64>(), false)); } void PPUTranslator::LSWX(ppu_opcode_t op) { Call(GetType<void>(), "__lswx_not_supported", m_ir->getInt32(op.rd), RegLoad(m_cnt), op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb)); } void PPUTranslator::LWBRX(ppu_opcode_t op) { SetGpr(op.rd, ReadMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb), GetType<u32>(), false)); } void PPUTranslator::LFSX(ppu_opcode_t op) { m_may_be_mmio &= (op.ra != 1u && op.ra != 13u && op.rb != 1u && op.rb != 13u); // Stack register and TLS address register are unlikely to be used in MMIO address calculation SetFpr(op.frd, ReadMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb), GetType<f32>())); } void PPUTranslator::SRW(ppu_opcode_t op) { const auto shift_num = m_ir->CreateAnd(GetGpr(op.rb), 0x3f); const auto shift_arg = m_ir->CreateAnd(GetGpr(op.rs), 0xffffffff); const auto result = m_ir->CreateLShr(shift_arg, shift_num); SetGpr(op.ra, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::SRD(ppu_opcode_t op) { const auto shift_num = m_ir->CreateAnd(GetGpr(op.rb), 0x7f); const auto shift_arg = GetGpr(op.rs); const auto result = Trunc(m_ir->CreateLShr(ZExt(shift_arg), ZExt(shift_num))); SetGpr(op.ra, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::LVRX(ppu_opcode_t op) { const auto addr = op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb); const auto offset = eval(trunc<u8>(value<u64>(addr) & 0xf)); // Read from instruction address if offset is 0, this prevents accessing potentially bad memory from addr (because no actual memory is dereferenced) const auto data = ReadMemory(m_ir->CreateAnd(m_ir->CreateSelect(m_ir->CreateIsNull(offset.value), m_reloc ? m_seg0 : GetAddr(0), addr), ~0xfull), GetType<u8[16]>(), m_is_be, 16); set_vr(op.vd, pshufb(value<u8[16]>(data), build<u8[16]>(255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240) + vsplat<u8[16]>(offset))); } void PPUTranslator::LSWI(ppu_opcode_t op) { Value* addr = op.ra ? GetGpr(op.ra) : m_ir->getInt64(0); u32 index = op.rb ? op.rb : 32; u32 reg = op.rd; while (index) { if (index > 3) { SetGpr(reg, ReadMemory(addr, GetType<u32>())); index -= 4; if (index) { addr = m_ir->CreateAdd(addr, m_ir->getInt64(4)); } } else { Value* buf = nullptr; u32 i = 3; while (index) { const auto byte = m_ir->CreateShl(ZExt(ReadMemory(addr, GetType<u8>()), GetType<u32>()), i * 8); buf = buf ? m_ir->CreateOr(buf, byte) : byte; if (--index) { addr = m_ir->CreateAdd(addr, m_ir->getInt64(1)); i--; } } SetGpr(reg, buf); } reg = (reg + 1) % 32; } } void PPUTranslator::LFSUX(ppu_opcode_t op) { const auto addr = m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)); SetFpr(op.frd, ReadMemory(addr, GetType<f32>())); SetGpr(op.ra, addr); } void PPUTranslator::SYNC(ppu_opcode_t op) { // sync: Full seq cst barrier // lwsync: Acq/Release barrier (but not really it seems from observing libsre.sprx) m_ir->CreateFence(op.l10 && false ? AtomicOrdering::AcquireRelease : AtomicOrdering::SequentiallyConsistent); } void PPUTranslator::LFDX(ppu_opcode_t op) { SetFpr(op.frd, ReadMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb), GetType<f64>())); } void PPUTranslator::LFDUX(ppu_opcode_t op) { const auto addr = m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)); SetFpr(op.frd, ReadMemory(addr, GetType<f64>())); SetGpr(op.ra, addr); } void PPUTranslator::STVLX(ppu_opcode_t op) { const auto addr = op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb); const auto data = pshufb(get_vr<u8[16]>(op.vs), build<u8[16]>(127, 126, 125, 124, 123, 122, 121, 120, 119, 118, 117, 116, 115, 114, 113, 112) + vsplat<u8[16]>(trunc<u8>(value<u64>(addr) & 0xf))); const auto mask = bitcast<bool[16]>(splat<u16>(0xffff) << trunc<u16>(value<u64>(addr) & 0xf)); const auto ptr = value<u8(*)[16]>(GetMemory(m_ir->CreateAnd(addr, ~0xfull))); const auto align = splat<u32>(16); eval(llvm_calli<void, decltype(data), decltype(ptr), decltype(align), decltype(mask)>{"llvm.masked.store.v16i8.p0", {data, ptr, align, mask}}); } void PPUTranslator::STDBRX(ppu_opcode_t op) { WriteMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb), GetGpr(op.rs), false); } void PPUTranslator::STSWX(ppu_opcode_t op) { Call(GetType<void>(), "__stswx_not_supported", m_ir->getInt32(op.rs), RegLoad(m_cnt), op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb)); } void PPUTranslator::STWBRX(ppu_opcode_t op) { WriteMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb), GetGpr(op.rs, 32), false); } void PPUTranslator::STFSX(ppu_opcode_t op) { WriteMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb), GetFpr(op.frs, 32)); } void PPUTranslator::STVRX(ppu_opcode_t op) { const auto addr = op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb); const auto data = pshufb(get_vr<u8[16]>(op.vs), build<u8[16]>(255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240) + vsplat<u8[16]>(trunc<u8>(value<u64>(addr) & 0xf))); const auto mask = bitcast<bool[16]>(trunc<u16>(splat<u64>(0xffff) << (value<u64>(addr) & 0xf) >> 16)); const auto ptr = value<u8(*)[16]>(GetMemory(m_ir->CreateAnd(addr, ~0xfull))); const auto align = splat<u32>(16); eval(llvm_calli<void, decltype(data), decltype(ptr), decltype(align), decltype(mask)>{"llvm.masked.store.v16i8.p0", {data, ptr, align, mask}}); } void PPUTranslator::STFSUX(ppu_opcode_t op) { const auto addr = m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)); WriteMemory(addr, GetFpr(op.frs, 32)); SetGpr(op.ra, addr); } void PPUTranslator::STSWI(ppu_opcode_t op) { Value* addr = op.ra ? GetGpr(op.ra) : m_ir->getInt64(0); u32 index = op.rb ? op.rb : 32; u32 reg = op.rd; while (index) { if (index > 3) { WriteMemory(addr, GetGpr(reg, 32)); index -= 4; if (index) { addr = m_ir->CreateAdd(addr, m_ir->getInt64(4)); } } else { Value* buf = GetGpr(reg, 32); while (index) { WriteMemory(addr, Trunc(m_ir->CreateLShr(buf, 24), GetType<u8>())); if (--index) { buf = m_ir->CreateShl(buf, 8); addr = m_ir->CreateAdd(addr, m_ir->getInt64(1)); } } } reg = (reg + 1) % 32; } } void PPUTranslator::STFDX(ppu_opcode_t op) { WriteMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb), GetFpr(op.frs)); } void PPUTranslator::STFDUX(ppu_opcode_t op) { const auto addr = m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)); WriteMemory(addr, GetFpr(op.frs)); SetGpr(op.ra, addr); } void PPUTranslator::LVLXL(ppu_opcode_t op) { return LVLX(op); } void PPUTranslator::LHBRX(ppu_opcode_t op) { SetGpr(op.rd, ReadMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb), GetType<u16>(), false)); } void PPUTranslator::SRAW(ppu_opcode_t op) { const auto shift_num = m_ir->CreateAnd(GetGpr(op.rb), 0x3f); const auto shift_arg = GetGpr(op.rs, 32); const auto arg_ext = SExt(shift_arg); const auto result = m_ir->CreateAShr(arg_ext, shift_num); SetGpr(op.ra, result); SetCarry(m_ir->CreateAnd(m_ir->CreateICmpSLT(shift_arg, m_ir->getInt32(0)), m_ir->CreateICmpNE(arg_ext, m_ir->CreateShl(result, shift_num)))); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::SRAD(ppu_opcode_t op) { const auto shift_num = ZExt(m_ir->CreateAnd(GetGpr(op.rb), 0x7f)); // i128 const auto shift_arg = GetGpr(op.rs); const auto arg_ext = SExt(shift_arg); // i128 const auto res_128 = m_ir->CreateAShr(arg_ext, shift_num); // i128 const auto result = Trunc(res_128); SetGpr(op.ra, result); SetCarry(m_ir->CreateAnd(m_ir->CreateICmpSLT(shift_arg, m_ir->getInt64(0)), m_ir->CreateICmpNE(arg_ext, m_ir->CreateShl(res_128, shift_num)))); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::LVRXL(ppu_opcode_t op) { return LVRX(op); } void PPUTranslator::DSS(ppu_opcode_t) { } void PPUTranslator::SRAWI(ppu_opcode_t op) { const auto shift_arg = GetGpr(op.rs, 32); const auto res_32 = m_ir->CreateAShr(shift_arg, op.sh32); const auto result = SExt(res_32); SetGpr(op.ra, result); SetCarry(m_ir->CreateAnd(m_ir->CreateICmpSLT(shift_arg, m_ir->getInt32(0)), m_ir->CreateICmpNE(shift_arg, m_ir->CreateShl(res_32, op.sh32)))); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::SRADI(ppu_opcode_t op) { const auto shift_arg = GetGpr(op.rs); const auto result = m_ir->CreateAShr(shift_arg, op.sh64); SetGpr(op.ra, result); SetCarry(m_ir->CreateAnd(m_ir->CreateICmpSLT(shift_arg, m_ir->getInt64(0)), m_ir->CreateICmpNE(shift_arg, m_ir->CreateShl(result, op.sh64)))); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::EIEIO(ppu_opcode_t) { // TODO m_ir->CreateFence(AtomicOrdering::SequentiallyConsistent); } void PPUTranslator::STVLXL(ppu_opcode_t op) { return STVLX(op); } void PPUTranslator::STHBRX(ppu_opcode_t op) { WriteMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb), GetGpr(op.rs, 16), false); } void PPUTranslator::EXTSH(ppu_opcode_t op) { const auto result = SExt(GetGpr(op.rs, 16), GetType<s64>()); SetGpr(op.ra, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::STVRXL(ppu_opcode_t op) { return STVRX(op); } void PPUTranslator::EXTSB(ppu_opcode_t op) { const auto result = SExt(GetGpr(op.rs, 8), GetType<s64>()); SetGpr(op.ra, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::STFIWX(ppu_opcode_t op) { WriteMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb), GetFpr(op.frs, 32, true)); } void PPUTranslator::EXTSW(ppu_opcode_t op) { const auto result = SExt(GetGpr(op.rs, 32)); SetGpr(op.ra, result); if (op.rc) SetCrFieldSignedCmp(0, result, m_ir->getInt64(0)); } void PPUTranslator::ICBI(ppu_opcode_t) { } void PPUTranslator::DCBZ(ppu_opcode_t op) { const auto addr = m_ir->CreateAnd(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), GetGpr(op.rb)) : GetGpr(op.rb), -128); if (g_cfg.core.accurate_cache_line_stores) { Call(GetType<void>(), "__dcbz", addr); } else { Call(GetType<void>(), "llvm.memset.p0.i32", GetMemory(addr), m_ir->getInt8(0), m_ir->getInt32(128), m_ir->getFalse()); } } void PPUTranslator::LWZ(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.simm16); if (m_rel && (m_rel->type >= 4u && m_rel->type <= 6u)) { imm = SExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()); m_rel = nullptr; } m_may_be_mmio &= (op.ra != 1u && op.ra != 13u); // Stack register and TLS address register are unlikely to be used in MMIO address calculation m_may_be_mmio &= op.simm16 == 0 || spu_thread::test_is_problem_state_register_offset(op.uimm16, true, false); // Either exact MMIO address or MMIO base with completing s16 address offset if (m_may_be_mmio) { struct instructions_data { be_t<u32> insts[3]; }; // Quick invalidation: expect exact MMIO address, so if the register is being reused with different offset than it's likely not MMIO if (auto ptr = m_info.get_ptr<instructions_data>(::narrow<u32>(m_addr + 4 + (m_reloc ? m_reloc->addr : 0)))) { for (u32 inst : ptr->insts) { ppu_opcode_t test_op{inst}; if (test_op.simm16 == op.simm16 || test_op.ra != op.ra) { // Same offset (at least according to this test) or different register continue; } if (op.simm16 && spu_thread::test_is_problem_state_register_offset(test_op.uimm16, true, false)) { // Found register reuse with different MMIO offset continue; } switch (g_ppu_itype.decode(inst)) { case ppu_itype::LWZ: case ppu_itype::STW: { // Not MMIO m_may_be_mmio = false; break; } default: break; } } } } SetGpr(op.rd, ReadMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), imm) : imm, GetType<u32>())); } void PPUTranslator::LWZU(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.simm16); if (m_rel && (m_rel->type >= 4u && m_rel->type <= 6u)) { imm = SExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()); m_rel = nullptr; } m_may_be_mmio &= (op.ra != 1u && op.ra != 13u); // Stack register and TLS address register are unlikely to be used in MMIO address calculation m_may_be_mmio &= op.simm16 == 0 || spu_thread::test_is_problem_state_register_offset(op.uimm16, true, false); // Either exact MMIO address or MMIO base with completing s16 address offset const auto addr = m_ir->CreateAdd(GetGpr(op.ra), imm); SetGpr(op.rd, ReadMemory(addr, GetType<u32>())); SetGpr(op.ra, addr); } void PPUTranslator::LBZ(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.simm16); if (m_rel && (m_rel->type >= 4u && m_rel->type <= 6u)) { imm = SExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()); m_rel = nullptr; } SetGpr(op.rd, ReadMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), imm) : imm, GetType<u8>())); } void PPUTranslator::LBZU(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.simm16); if (m_rel && (m_rel->type >= 4u && m_rel->type <= 6u)) { imm = SExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()); m_rel = nullptr; } const auto addr = m_ir->CreateAdd(GetGpr(op.ra), imm); SetGpr(op.rd, ReadMemory(addr, GetType<u8>())); SetGpr(op.ra, addr); } void PPUTranslator::STW(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.simm16); if (m_rel && (m_rel->type >= 4u && m_rel->type <= 6u)) { imm = SExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()); m_rel = nullptr; } m_may_be_mmio &= (op.ra != 1u && op.ra != 13u); // Stack register and TLS address register are unlikely to be used in MMIO address calculation m_may_be_mmio &= op.simm16 == 0 || spu_thread::test_is_problem_state_register_offset(op.uimm16, false, true); // Either exact MMIO address or MMIO base with completing s16 address offset if (m_may_be_mmio) { struct instructions_data { be_t<u32> insts[3]; }; // Quick invalidation: expect exact MMIO address, so if the register is being reused with different offset than it's likely not MMIO if (auto ptr = m_info.get_ptr<instructions_data>(::narrow<u32>(m_addr + 4 + (m_reloc ? m_reloc->addr : 0)))) { for (u32 inst : ptr->insts) { ppu_opcode_t test_op{inst}; if (test_op.simm16 == op.simm16 || test_op.ra != op.ra) { // Same offset (at least according to this test) or different register continue; } if (op.simm16 && spu_thread::test_is_problem_state_register_offset(test_op.uimm16, false, true)) { // Found register reuse with different MMIO offset continue; } switch (g_ppu_itype.decode(inst)) { case ppu_itype::LWZ: case ppu_itype::STW: { // Not MMIO m_may_be_mmio = false; break; } default: break; } } } } const auto value = GetGpr(op.rs, 32); const auto addr = op.ra ? m_ir->CreateAdd(GetGpr(op.ra), imm) : imm; WriteMemory(addr, value); //Insomniac engine v3 & v4 (newer R&C, Fuse, Resitance 3) if (auto ci = llvm::dyn_cast<ConstantInt>(value)) { if (ci->getZExtValue() == 0xAAAAAAAA) { Call(GetType<void>(), "__resupdate", addr, m_ir->getInt32(128)); } } } void PPUTranslator::STWU(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.simm16); if (m_rel && (m_rel->type >= 4u && m_rel->type <= 6u)) { imm = SExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()); m_rel = nullptr; } m_may_be_mmio &= (op.ra != 1u && op.ra != 13u);// Stack register and TLS address register are unlikely to be used in MMIO address calculatio m_may_be_mmio &= op.simm16 == 0 || spu_thread::test_is_problem_state_register_offset(op.uimm16, false, true); // Either exact MMIO address or MMIO base with completing s16 address offset const auto addr = m_ir->CreateAdd(GetGpr(op.ra), imm); WriteMemory(addr, GetGpr(op.rs, 32)); SetGpr(op.ra, addr); } void PPUTranslator::STB(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.simm16); if (m_rel && (m_rel->type >= 4u && m_rel->type <= 6u)) { imm = SExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()); m_rel = nullptr; } WriteMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), imm) : imm, GetGpr(op.rs, 8)); } void PPUTranslator::STBU(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.simm16); if (m_rel && (m_rel->type >= 4u && m_rel->type <= 6u)) { imm = SExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()); m_rel = nullptr; } const auto addr = m_ir->CreateAdd(GetGpr(op.ra), imm); WriteMemory(addr, GetGpr(op.rs, 8)); SetGpr(op.ra, addr); } void PPUTranslator::LHZ(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.simm16); if (m_rel && (m_rel->type >= 4u && m_rel->type <= 6u)) { imm = SExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()); m_rel = nullptr; } SetGpr(op.rd, ReadMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), imm) : imm, GetType<u16>())); } void PPUTranslator::LHZU(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.simm16); if (m_rel && (m_rel->type >= 4u && m_rel->type <= 6u)) { imm = SExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()); m_rel = nullptr; } const auto addr = m_ir->CreateAdd(GetGpr(op.ra), imm); SetGpr(op.rd, ReadMemory(addr, GetType<u16>())); SetGpr(op.ra, addr); } void PPUTranslator::LHA(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.simm16); if (m_rel && (m_rel->type >= 4u && m_rel->type <= 6u)) { imm = SExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()); m_rel = nullptr; } SetGpr(op.rd, SExt(ReadMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), imm) : imm, GetType<s16>()), GetType<s64>())); } void PPUTranslator::LHAU(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.simm16); if (m_rel && (m_rel->type >= 4u && m_rel->type <= 6u)) { imm = SExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()); m_rel = nullptr; } const auto addr = m_ir->CreateAdd(GetGpr(op.ra), imm); SetGpr(op.rd, SExt(ReadMemory(addr, GetType<s16>()), GetType<s64>())); SetGpr(op.ra, addr); } void PPUTranslator::STH(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.simm16); if (m_rel && (m_rel->type >= 4u && m_rel->type <= 6u)) { imm = SExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()); m_rel = nullptr; } WriteMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), imm) : imm, GetGpr(op.rs, 16)); } void PPUTranslator::STHU(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.simm16); if (m_rel && (m_rel->type >= 4u && m_rel->type <= 6u)) { imm = SExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()); m_rel = nullptr; } const auto addr = m_ir->CreateAdd(GetGpr(op.ra), imm); WriteMemory(addr, GetGpr(op.rs, 16)); SetGpr(op.ra, addr); } void PPUTranslator::LMW(ppu_opcode_t op) { m_may_be_mmio &= op.rd == 31u && (op.ra != 1u && op.ra != 13u); // Stack register and TLS address register are unlikely to be used in MMIO address calculatio for (u32 i = 0; i < 32 - op.rd; i++) { SetGpr(i + op.rd, ReadMemory(op.ra ? m_ir->CreateAdd(m_ir->getInt64(op.simm16 + i * 4), GetGpr(op.ra)) : m_ir->getInt64(op.simm16 + i * 4), GetType<u32>())); } } void PPUTranslator::STMW(ppu_opcode_t op) { m_may_be_mmio &= op.rs == 31u && (op.ra != 1u && op.ra != 13u); // Stack register and TLS address register are unlikely to be used in MMIO address calculatio for (u32 i = 0; i < 32 - op.rs; i++) { WriteMemory(op.ra ? m_ir->CreateAdd(m_ir->getInt64(op.simm16 + i * 4), GetGpr(op.ra)) : m_ir->getInt64(op.simm16 + i * 4), GetGpr(i + op.rs, 32)); } } void PPUTranslator::LFS(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.simm16); if (m_rel && (m_rel->type >= 4u && m_rel->type <= 6u)) { imm = SExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()); m_rel = nullptr; } m_may_be_mmio &= (op.ra != 1u && op.ra != 13u); // Stack register and TLS address register are unlikely to be used in MMIO address calculatio m_may_be_mmio &= op.simm16 == 0 || spu_thread::test_is_problem_state_register_offset(op.uimm16, true, false); // Either exact MMIO address or MMIO base with completing s16 address offset SetFpr(op.frd, ReadMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), imm) : imm, GetType<f32>())); } void PPUTranslator::LFSU(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.simm16); if (m_rel && (m_rel->type >= 4u && m_rel->type <= 6u)) { imm = SExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()); m_rel = nullptr; } m_may_be_mmio &= (op.ra != 1u && op.ra != 13u); // Stack register and TLS address register are unlikely to be used in MMIO address calculatio m_may_be_mmio &= op.simm16 == 0 || spu_thread::test_is_problem_state_register_offset(op.uimm16, true, false); // Either exact MMIO address or MMIO base with completing s16 address offset const auto addr = m_ir->CreateAdd(GetGpr(op.ra), imm); SetFpr(op.frd, ReadMemory(addr, GetType<f32>())); SetGpr(op.ra, addr); } void PPUTranslator::LFD(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.simm16); if (m_rel && (m_rel->type >= 4u && m_rel->type <= 6u)) { imm = SExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()); m_rel = nullptr; } SetFpr(op.frd, ReadMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), imm) : imm, GetType<f64>())); } void PPUTranslator::LFDU(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.simm16); if (m_rel && (m_rel->type >= 4u && m_rel->type <= 6u)) { imm = SExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()); m_rel = nullptr; } const auto addr = m_ir->CreateAdd(GetGpr(op.ra), imm); SetFpr(op.frd, ReadMemory(addr, GetType<f64>())); SetGpr(op.ra, addr); } void PPUTranslator::STFS(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.simm16); if (m_rel && (m_rel->type >= 4u && m_rel->type <= 6u)) { imm = SExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()); m_rel = nullptr; } else { m_may_be_mmio &= op.simm16 == 0 || spu_thread::test_is_problem_state_register_offset(op.uimm16, false, true); // Either exact MMIO address or MMIO base with completing s16 address offset } m_may_be_mmio &= (op.ra != 1u && op.ra != 13u); // Stack register and TLS address register are unlikely to be used in MMIO address calculation WriteMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), imm) : imm, GetFpr(op.frs, 32)); } void PPUTranslator::STFSU(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.simm16); if (m_rel && (m_rel->type >= 4u && m_rel->type <= 6u)) { imm = SExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()); m_rel = nullptr; } else { m_may_be_mmio &= op.simm16 == 0 || spu_thread::test_is_problem_state_register_offset(op.uimm16, false, true); // Either exact MMIO address or MMIO base with completing s16 address offset } m_may_be_mmio &= (op.ra != 1u && op.ra != 13u); // Stack register and TLS address register are unlikely to be used in MMIO address calculation const auto addr = m_ir->CreateAdd(GetGpr(op.ra), imm); WriteMemory(addr, GetFpr(op.frs, 32)); SetGpr(op.ra, addr); } void PPUTranslator::STFD(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.simm16); if (m_rel && (m_rel->type >= 4u && m_rel->type <= 6u)) { imm = SExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()); m_rel = nullptr; } WriteMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), imm) : imm, GetFpr(op.frs)); } void PPUTranslator::STFDU(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.simm16); if (m_rel && (m_rel->type >= 4u && m_rel->type <= 6u)) { imm = SExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()); m_rel = nullptr; } const auto addr = m_ir->CreateAdd(GetGpr(op.ra), imm); WriteMemory(addr, GetFpr(op.frs)); SetGpr(op.ra, addr); } void PPUTranslator::LD(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.ds << 2); if (m_rel && m_rel->type == 57) { imm = m_ir->CreateAnd(SExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()), ~3); m_rel = nullptr; } SetGpr(op.rd, ReadMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), imm) : imm, GetType<u64>())); } void PPUTranslator::LDU(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.ds << 2); if (m_rel && m_rel->type == 57) { imm = m_ir->CreateAnd(SExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()), ~3); m_rel = nullptr; } const auto addr = m_ir->CreateAdd(GetGpr(op.ra), imm); SetGpr(op.rd, ReadMemory(addr, GetType<u64>())); SetGpr(op.ra, addr); } void PPUTranslator::LWA(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.ds << 2); if (m_rel && m_rel->type == 57) { imm = m_ir->CreateAnd(SExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()), ~3); m_rel = nullptr; } SetGpr(op.rd, SExt(ReadMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), imm) : imm, GetType<s32>()))); } void PPUTranslator::STD(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.ds << 2); if (m_rel && m_rel->type == 57) { imm = m_ir->CreateAnd(SExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()), ~3); m_rel = nullptr; } WriteMemory(op.ra ? m_ir->CreateAdd(GetGpr(op.ra), imm) : imm, GetGpr(op.rs)); } void PPUTranslator::STDU(ppu_opcode_t op) { Value* imm = m_ir->getInt64(op.ds << 2); if (m_rel && m_rel->type == 57) { imm = m_ir->CreateAnd(SExt(ReadMemory(GetAddr(+2), GetType<u16>()), GetType<u64>()), ~3); m_rel = nullptr; } const auto addr = m_ir->CreateAdd(GetGpr(op.ra), imm); WriteMemory(addr, GetGpr(op.rs)); SetGpr(op.ra, addr); } void PPUTranslator::FDIVS(ppu_opcode_t op) { const auto a = GetFpr(op.fra); const auto b = GetFpr(op.frb); const auto result = m_ir->CreateFPTrunc(m_ir->CreateFDiv(a, b), GetType<f32>()); SetFpr(op.frd, result); //SetFPSCR_FR(Call(GetType<bool>(), m_pure_attr, "__fdivs_get_fr", a, b)); //SetFPSCR_FI(Call(GetType<bool>(), m_pure_attr, "__fdivs_get_fi", a, b)); //SetFPSCRException(m_fpscr_ox, Call(GetType<bool>(), m_pure_attr, "__fdivs_get_ox", a, b)); //SetFPSCRException(m_fpscr_ux, Call(GetType<bool>(), m_pure_attr, "__fdivs_get_ux", a, b)); //SetFPSCRException(m_fpscr_zx, Call(GetType<bool>(), m_pure_attr, "__fdivs_get_zx", a, b)); //SetFPSCRException(m_fpscr_vxsnan, Call(GetType<bool>(), m_pure_attr, "__fdivs_get_vxsnan", a, b)); //SetFPSCRException(m_fpscr_vxidi, Call(GetType<bool>(), m_pure_attr, "__fdivs_get_vxidi", a, b)); //SetFPSCRException(m_fpscr_vxzdz, Call(GetType<bool>(), m_pure_attr, "__fdivs_get_vxzdz", a, b)); SetFPRF(result, op.rc != 0); } void PPUTranslator::FSUBS(ppu_opcode_t op) { const auto a = GetFpr(op.fra); const auto b = GetFpr(op.frb); const auto result = m_ir->CreateFPTrunc(m_ir->CreateFSub(a, b), GetType<f32>()); SetFpr(op.frd, result); //SetFPSCR_FR(Call(GetType<bool>(), m_pure_attr, "__fsubs_get_fr", a, b)); //SetFPSCR_FI(Call(GetType<bool>(), m_pure_attr, "__fsubs_get_fi", a, b)); //SetFPSCRException(m_fpscr_ox, Call(GetType<bool>(), m_pure_attr, "__fsubs_get_ox", a, b)); //SetFPSCRException(m_fpscr_ux, Call(GetType<bool>(), m_pure_attr, "__fsubs_get_ux", a, b)); //SetFPSCRException(m_fpscr_vxsnan, Call(GetType<bool>(), m_pure_attr, "__fsubs_get_vxsnan", a, b)); //SetFPSCRException(m_fpscr_vxisi, Call(GetType<bool>(), m_pure_attr, "__fsubs_get_vxisi", a, b)); SetFPRF(result, op.rc != 0); } void PPUTranslator::FADDS(ppu_opcode_t op) { const auto a = GetFpr(op.fra); const auto b = GetFpr(op.frb); const auto result = m_ir->CreateFPTrunc(m_ir->CreateFAdd(a, b), GetType<f32>()); SetFpr(op.frd, result); //SetFPSCR_FR(Call(GetType<bool>(), m_pure_attr, "__fadds_get_fr", a, b)); //SetFPSCR_FI(Call(GetType<bool>(), m_pure_attr, "__fadds_get_fi", a, b)); //SetFPSCRException(m_fpscr_ox, Call(GetType<bool>(), m_pure_attr, "__fadds_get_ox", a, b)); //SetFPSCRException(m_fpscr_ux, Call(GetType<bool>(), m_pure_attr, "__fadds_get_ux", a, b)); //SetFPSCRException(m_fpscr_vxsnan, Call(GetType<bool>(), m_pure_attr, "__fadds_get_vxsnan", a, b)); //SetFPSCRException(m_fpscr_vxisi, Call(GetType<bool>(), m_pure_attr, "__fadds_get_vxisi", a, b)); SetFPRF(result, op.rc != 0); } void PPUTranslator::FSQRTS(ppu_opcode_t op) { const auto b = GetFpr(op.frb); const auto result = m_ir->CreateFPTrunc(Call(GetType<f64>(), "llvm.sqrt.f64", b), GetType<f32>()); SetFpr(op.frd, result); //SetFPSCR_FR(Call(GetType<bool>(), m_pure_attr, "__fsqrts_get_fr", b)); //SetFPSCR_FI(Call(GetType<bool>(), m_pure_attr, "__fsqrts_get_fi", b)); //SetFPSCRException(m_fpscr_ox, Call(GetType<bool>(), m_pure_attr, "__fsqrts_get_ox", b)); //SetFPSCRException(m_fpscr_ux, Call(GetType<bool>(), m_pure_attr, "__fsqrts_get_ux", b)); //SetFPSCRException(m_fpscr_vxsnan, Call(GetType<bool>(), m_pure_attr, "__fsqrts_get_vxsnan", b)); //SetFPSCRException(m_fpscr_vxsqrt, Call(GetType<bool>(), m_pure_attr, "__fsqrts_get_vxsqrt", b)); SetFPRF(result, op.rc != 0); } void PPUTranslator::FRES(ppu_opcode_t op) { if (!m_fres_table) { m_fres_table = new GlobalVariable(*m_module, ArrayType::get(GetType<u32>(), 128), true, GlobalValue::PrivateLinkage, ConstantDataArray::get(m_context, ppu_fres_mantissas)); } const auto a = GetFpr(op.frb); const auto b = bitcast<u64>(a); const auto n = m_ir->CreateFCmpUNO(a, a); // test for NaN const auto e = m_ir->CreateAnd(m_ir->CreateLShr(b, 52), 0x7ff); // double exp const auto i = m_ir->CreateAnd(m_ir->CreateLShr(b, 45), 0x7f); // mantissa LUT index const auto ptr = dyn_cast<GetElementPtrInst>(m_ir->CreateGEP(m_fres_table->getValueType(), m_fres_table, {m_ir->getInt64(0), i})); assert(ptr->getResultElementType() == get_type<u32>()); const auto m = m_ir->CreateShl(ZExt(m_ir->CreateLoad(ptr->getResultElementType(), ptr)), 29); const auto c = m_ir->CreateICmpUGE(e, m_ir->getInt64(0x3ff + 0x80)); // test for INF const auto x = m_ir->CreateShl(m_ir->CreateSub(m_ir->getInt64(0x7ff - 2), e), 52); const auto s = m_ir->CreateSelect(c, m_ir->getInt64(0), m_ir->CreateOr(x, m)); const auto r = bitcast<f64>(m_ir->CreateSelect(n, m_ir->CreateOr(b, 0x8'0000'0000'0000), m_ir->CreateOr(s, m_ir->CreateAnd(b, 0x8000'0000'0000'0000)))); SetFpr(op.frd, m_ir->CreateFPTrunc(r, GetType<f32>())); //m_ir->CreateStore(GetUndef<bool>(), m_fpscr_fr); //m_ir->CreateStore(GetUndef<bool>(), m_fpscr_fi); //m_ir->CreateStore(GetUndef<bool>(), m_fpscr_xx); //SetFPSCRException(m_fpscr_ox, Call(GetType<bool>(), m_pure_attr, "__fres_get_ox", b)); //SetFPSCRException(m_fpscr_ux, Call(GetType<bool>(), m_pure_attr, "__fres_get_ux", b)); //SetFPSCRException(m_fpscr_zx, Call(GetType<bool>(), m_pure_attr, "__fres_get_zx", b)); //SetFPSCRException(m_fpscr_vxsnan, Call(GetType<bool>(), m_pure_attr, "__fres_get_vxsnan", b)); SetFPRF(r, op.rc != 0); } void PPUTranslator::FMULS(ppu_opcode_t op) { const auto a = GetFpr(op.fra); const auto c = GetFpr(op.frc); const auto result = m_ir->CreateFPTrunc(m_ir->CreateFMul(a, c), GetType<f32>()); SetFpr(op.frd, result); //SetFPSCR_FR(Call(GetType<bool>(), m_pure_attr, "__fmuls_get_fr", a, c)); //SetFPSCR_FI(Call(GetType<bool>(), m_pure_attr, "__fmuls_get_fi", a, c)); //SetFPSCRException(m_fpscr_ox, Call(GetType<bool>(), m_pure_attr, "__fmuls_get_ox", a, c)); //SetFPSCRException(m_fpscr_ux, Call(GetType<bool>(), m_pure_attr, "__fmuls_get_ux", a, c)); //SetFPSCRException(m_fpscr_vxsnan, Call(GetType<bool>(), m_pure_attr, "__fmuls_get_vxsnan", a, c)); //SetFPSCRException(m_fpscr_vximz, Call(GetType<bool>(), m_pure_attr, "__fmuls_get_vximz", a, c)); SetFPRF(result, op.rc != 0); } void PPUTranslator::FMADDS(ppu_opcode_t op) { const auto a = GetFpr(op.fra); const auto b = GetFpr(op.frb); const auto c = GetFpr(op.frc); llvm::Value* result; if (g_cfg.core.use_accurate_dfma) { result = m_ir->CreateCall(get_intrinsic<f64>(llvm::Intrinsic::fma), {a, c, b}); } else { result = m_ir->CreateFAdd(m_ir->CreateFMul(a, c), b); } SetFpr(op.frd, m_ir->CreateFPTrunc(result, GetType<f32>())); //SetFPSCR_FR(Call(GetType<bool>(), m_pure_attr, "__fmadds_get_fr", a, b, c)); //SetFPSCR_FI(Call(GetType<bool>(), m_pure_attr, "__fmadds_get_fi", a, b, c)); //SetFPSCRException(m_fpscr_ox, Call(GetType<bool>(), m_pure_attr, "__fmadds_get_ox", a, b, c)); //SetFPSCRException(m_fpscr_ux, Call(GetType<bool>(), m_pure_attr, "__fmadds_get_ux", a, b, c)); //SetFPSCRException(m_fpscr_vxsnan, Call(GetType<bool>(), m_pure_attr, "__fmadds_get_vxsnan", a, b, c)); //SetFPSCRException(m_fpscr_vxisi, Call(GetType<bool>(), m_pure_attr, "__fmadds_get_vxisi", a, b, c)); //SetFPSCRException(m_fpscr_vximz, Call(GetType<bool>(), m_pure_attr, "__fmadds_get_vximz", a, b, c)); SetFPRF(result, op.rc != 0); } void PPUTranslator::FMSUBS(ppu_opcode_t op) { const auto a = GetFpr(op.fra); const auto b = GetFpr(op.frb); const auto c = GetFpr(op.frc); llvm::Value* result; if (g_cfg.core.use_accurate_dfma) { result = m_ir->CreateCall(get_intrinsic<f64>(llvm::Intrinsic::fma), {a, c, m_ir->CreateFNeg(b)}); } else { result = m_ir->CreateFSub(m_ir->CreateFMul(a, c), b); } SetFpr(op.frd, m_ir->CreateFPTrunc(result, GetType<f32>())); //SetFPSCR_FR(Call(GetType<bool>(), m_pure_attr, "__fmadds_get_fr", a, b, c)); // TODO ??? //SetFPSCR_FI(Call(GetType<bool>(), m_pure_attr, "__fmadds_get_fi", a, b, c)); //SetFPSCRException(m_fpscr_ox, Call(GetType<bool>(), m_pure_attr, "__fmadds_get_ox", a, b, c)); //SetFPSCRException(m_fpscr_ux, Call(GetType<bool>(), m_pure_attr, "__fmadds_get_ux", a, b, c)); //SetFPSCRException(m_fpscr_vxsnan, Call(GetType<bool>(), m_pure_attr, "__fmadds_get_vxsnan", a, b, c)); //SetFPSCRException(m_fpscr_vxisi, Call(GetType<bool>(), m_pure_attr, "__fmadds_get_vxisi", a, b, c)); //SetFPSCRException(m_fpscr_vximz, Call(GetType<bool>(), m_pure_attr, "__fmadds_get_vximz", a, b, c)); SetFPRF(result, op.rc != 0); } void PPUTranslator::FNMSUBS(ppu_opcode_t op) { const auto a = GetFpr(op.fra); const auto b = GetFpr(op.frb); const auto c = GetFpr(op.frc); llvm::Value* result; if (g_cfg.core.use_accurate_dfma) { result = m_ir->CreateCall(get_intrinsic<f64>(llvm::Intrinsic::fma), {a, c, m_ir->CreateFNeg(b)}); } else { result = m_ir->CreateFSub(m_ir->CreateFMul(a, c), b); } SetFpr(op.frd, m_ir->CreateFPTrunc(m_ir->CreateFNeg(result), GetType<f32>())); //SetFPSCR_FR(Call(GetType<bool>(), m_pure_attr, "__fmadds_get_fr", a, b, c)); // TODO ??? //SetFPSCR_FI(Call(GetType<bool>(), m_pure_attr, "__fmadds_get_fi", a, b, c)); //SetFPSCRException(m_fpscr_ox, Call(GetType<bool>(), m_pure_attr, "__fmadds_get_ox", a, b, c)); //SetFPSCRException(m_fpscr_ux, Call(GetType<bool>(), m_pure_attr, "__fmadds_get_ux", a, b, c)); //SetFPSCRException(m_fpscr_vxsnan, Call(GetType<bool>(), m_pure_attr, "__fmadds_get_vxsnan", a, b, c)); //SetFPSCRException(m_fpscr_vxisi, Call(GetType<bool>(), m_pure_attr, "__fmadds_get_vxisi", a, b, c)); //SetFPSCRException(m_fpscr_vximz, Call(GetType<bool>(), m_pure_attr, "__fmadds_get_vximz", a, b, c)); SetFPRF(result, op.rc != 0); } void PPUTranslator::FNMADDS(ppu_opcode_t op) { const auto a = GetFpr(op.fra); const auto b = GetFpr(op.frb); const auto c = GetFpr(op.frc); llvm::Value* result; if (g_cfg.core.use_accurate_dfma) { result = m_ir->CreateCall(get_intrinsic<f64>(llvm::Intrinsic::fma), {a, c, b}); } else { result = m_ir->CreateFAdd(m_ir->CreateFMul(a, c), b); } SetFpr(op.frd, m_ir->CreateFPTrunc(m_ir->CreateFNeg(result), GetType<f32>())); //SetFPSCR_FR(Call(GetType<bool>(), m_pure_attr, "__fmadds_get_fr", a, b, c)); // TODO ??? //SetFPSCR_FI(Call(GetType<bool>(), m_pure_attr, "__fmadds_get_fi", a, b, c)); //SetFPSCRException(m_fpscr_ox, Call(GetType<bool>(), m_pure_attr, "__fmadds_get_ox", a, b, c)); //SetFPSCRException(m_fpscr_ux, Call(GetType<bool>(), m_pure_attr, "__fmadds_get_ux", a, b, c)); //SetFPSCRException(m_fpscr_vxsnan, Call(GetType<bool>(), m_pure_attr, "__fmadds_get_vxsnan", a, b, c)); //SetFPSCRException(m_fpscr_vxisi, Call(GetType<bool>(), m_pure_attr, "__fmadds_get_vxisi", a, b, c)); //SetFPSCRException(m_fpscr_vximz, Call(GetType<bool>(), m_pure_attr, "__fmadds_get_vximz", a, b, c)); SetFPRF(result, op.rc != 0); } void PPUTranslator::MTFSB1(ppu_opcode_t op) { SetFPSCRBit(op.crbd, m_ir->getTrue(), true); if (op.rc) SetCrFieldFPCC(1); } void PPUTranslator::MCRFS(ppu_opcode_t op) { const auto lt = GetFPSCRBit(op.crfs * 4 + 0); const auto gt = GetFPSCRBit(op.crfs * 4 + 1); const auto eq = GetFPSCRBit(op.crfs * 4 + 2); const auto un = GetFPSCRBit(op.crfs * 4 + 3); SetCrField(op.crfd, lt, gt, eq, un); } void PPUTranslator::MTFSB0(ppu_opcode_t op) { SetFPSCRBit(op.crbd, m_ir->getFalse(), false); if (op.rc) SetCrFieldFPCC(1); } void PPUTranslator::MTFSFI(ppu_opcode_t op) { SetFPSCRBit(op.crfd * 4 + 0, m_ir->getInt1((op.i & 8) != 0), false); if (op.crfd != 0) { SetFPSCRBit(op.crfd * 4 + 1, m_ir->getInt1((op.i & 4) != 0), false); SetFPSCRBit(op.crfd * 4 + 2, m_ir->getInt1((op.i & 2) != 0), false); } SetFPSCRBit(op.crfd * 4 + 3, m_ir->getInt1((op.i & 1) != 0), false); if (op.rc) SetCrFieldFPCC(1); } void PPUTranslator::MFFS(ppu_opcode_t op) { Value* result = m_ir->getInt64(0); for (u32 i = 16; i < 20; i++) { result = m_ir->CreateOr(result, m_ir->CreateShl(ZExt(RegLoad(m_fc[i]), GetType<u64>()), i ^ 31)); } SetFpr(op.frd, result); if (op.rc) SetCrFieldFPCC(1); } void PPUTranslator::MTFSF(ppu_opcode_t op) { const auto value = GetFpr(op.frb, 32, true); for (u32 i = 16; i < 20; i++) { if (i != 1 && i != 2 && (op.flm & (128 >> (i / 4))) != 0) { SetFPSCRBit(i, Trunc(m_ir->CreateLShr(value, i ^ 31), GetType<bool>()), false); } } if (op.rc) SetCrFieldFPCC(1); } void PPUTranslator::FCMPU(ppu_opcode_t op) { const auto a = GetFpr(op.fra); const auto b = GetFpr(op.frb); const auto lt = m_ir->CreateFCmpOLT(a, b); const auto gt = m_ir->CreateFCmpOGT(a, b); const auto eq = m_ir->CreateFCmpOEQ(a, b); const auto un = m_ir->CreateFCmpUNO(a, b); SetCrField(op.crfd, lt, gt, eq, un); SetFPCC(lt, gt, eq, un); //SetFPSCRException(m_fpscr_vxsnan, Call(GetType<bool>(), m_pure_attr, "__fcmpu_get_vxsnan", a, b)); } void PPUTranslator::FRSP(ppu_opcode_t op) { const auto b = GetFpr(op.frb); const auto result = m_ir->CreateFPTrunc(b, GetType<f32>()); SetFpr(op.frd, result); //SetFPSCR_FR(Call(GetType<bool>(), m_pure_attr, "__frsp_get_fr", b)); //SetFPSCR_FI(Call(GetType<bool>(), m_pure_attr, "__frsp_get_fi", b)); //SetFPSCRException(m_fpscr_ox, Call(GetType<bool>(), m_pure_attr, "__frsp_get_ox", b)); //SetFPSCRException(m_fpscr_ux, Call(GetType<bool>(), m_pure_attr, "__frsp_get_ux", b)); //SetFPSCRException(m_fpscr_vxsnan, Call(GetType<bool>(), m_pure_attr, "__frsp_get_vxsnan", b)); SetFPRF(result, op.rc != 0); } void PPUTranslator::FCTIW(ppu_opcode_t op) { const auto b = GetFpr(op.frb); const auto xormask = m_ir->CreateSExt(m_ir->CreateFCmpOGE(b, ConstantFP::get(GetType<f64>(), std::exp2l(31.))), GetType<s32>()); // fix result saturation (0x80000000 -> 0x7fffffff) #if defined(ARCH_X64) SetFpr(op.frd, m_ir->CreateXor(xormask, Call(GetType<s32>(), "llvm.x86.sse2.cvtsd2si", m_ir->CreateInsertElement(GetUndef<f64[2]>(), b, u64{0})))); #elif defined(ARCH_ARM64) SetFpr(op.frd, m_ir->CreateXor(xormask, Call(GetType<s32>(), "llvm.aarch64.neon.fcvtns.i32.f64", b))); #endif //SetFPSCR_FR(Call(GetType<bool>(), m_pure_attr, "__fctiw_get_fr", b)); //SetFPSCR_FI(Call(GetType<bool>(), m_pure_attr, "__fctiw_get_fi", b)); //SetFPSCRException(m_fpscr_vxsnan, Call(GetType<bool>(), m_pure_attr, "__fctiw_get_vxsnan", b)); //SetFPSCRException(m_fpscr_vxcvi, m_ir->CreateOr(sat_l, sat_h)); //m_ir->CreateStore(GetUndef<bool>(), m_fpscr_c); //SetFPCC(GetUndef<bool>(), GetUndef<bool>(), GetUndef<bool>(), GetUndef<bool>(), op.rc != 0); } void PPUTranslator::FCTIWZ(ppu_opcode_t op) { const auto b = GetFpr(op.frb); const auto xormask = m_ir->CreateSExt(m_ir->CreateFCmpOGE(b, ConstantFP::get(GetType<f64>(), std::exp2l(31.))), GetType<s32>()); // fix result saturation (0x80000000 -> 0x7fffffff) #if defined(ARCH_X64) SetFpr(op.frd, m_ir->CreateXor(xormask, Call(GetType<s32>(), "llvm.x86.sse2.cvttsd2si", m_ir->CreateInsertElement(GetUndef<f64[2]>(), b, u64{0})))); #elif defined(ARCH_ARM64) SetFpr(op.frd, m_ir->CreateXor(xormask, Call(GetType<s32>(), "llvm.aarch64.neon.fcvtzs.i32.f64", b))); #endif } void PPUTranslator::FDIV(ppu_opcode_t op) { const auto a = GetFpr(op.fra); const auto b = GetFpr(op.frb); const auto result = m_ir->CreateFDiv(a, b); SetFpr(op.frd, result); //SetFPSCR_FR(Call(GetType<bool>(), m_pure_attr, "__fdiv_get_fr", a, b)); //SetFPSCR_FI(Call(GetType<bool>(), m_pure_attr, "__fdiv_get_fi", a, b)); //SetFPSCRException(m_fpscr_ox, Call(GetType<bool>(), m_pure_attr, "__fdiv_get_ox", a, b)); //SetFPSCRException(m_fpscr_ux, Call(GetType<bool>(), m_pure_attr, "__fdiv_get_ux", a, b)); //SetFPSCRException(m_fpscr_zx, Call(GetType<bool>(), m_pure_attr, "__fdiv_get_zx", a, b)); //SetFPSCRException(m_fpscr_vxsnan, Call(GetType<bool>(), m_pure_attr, "__fdiv_get_vxsnan", a, b)); //SetFPSCRException(m_fpscr_vxidi, Call(GetType<bool>(), m_pure_attr, "__fdiv_get_vxidi", a, b)); //SetFPSCRException(m_fpscr_vxzdz, Call(GetType<bool>(), m_pure_attr, "__fdiv_get_vxzdz", a, b)); SetFPRF(result, op.rc != 0); } void PPUTranslator::FSUB(ppu_opcode_t op) { const auto a = GetFpr(op.fra); const auto b = GetFpr(op.frb); const auto result = m_ir->CreateFSub(a, b); SetFpr(op.frd, result); //SetFPSCR_FR(Call(GetType<bool>(), m_pure_attr, "__fsub_get_fr", a, b)); //SetFPSCR_FI(Call(GetType<bool>(), m_pure_attr, "__fsub_get_fi", a, b)); //SetFPSCRException(m_fpscr_ox, Call(GetType<bool>(), m_pure_attr, "__fsub_get_ox", a, b)); //SetFPSCRException(m_fpscr_ux, Call(GetType<bool>(), m_pure_attr, "__fsub_get_ux", a, b)); //SetFPSCRException(m_fpscr_vxsnan, Call(GetType<bool>(), m_pure_attr, "__fsub_get_vxsnan", a, b)); //SetFPSCRException(m_fpscr_vxisi, Call(GetType<bool>(), m_pure_attr, "__fsub_get_vxisi", a, b)); SetFPRF(result, op.rc != 0); } void PPUTranslator::FADD(ppu_opcode_t op) { const auto a = GetFpr(op.fra); const auto b = GetFpr(op.frb); const auto result = m_ir->CreateFAdd(a, b); SetFpr(op.frd, result); //SetFPSCR_FR(Call(GetType<bool>(), m_pure_attr, "__fadd_get_fr", a, b)); //SetFPSCR_FI(Call(GetType<bool>(), m_pure_attr, "__fadd_get_fi", a, b)); //SetFPSCRException(m_fpscr_ox, Call(GetType<bool>(), m_pure_attr, "__fadd_get_ox", a, b)); //SetFPSCRException(m_fpscr_ux, Call(GetType<bool>(), m_pure_attr, "__fadd_get_ux", a, b)); //SetFPSCRException(m_fpscr_vxsnan, Call(GetType<bool>(), m_pure_attr, "__fadd_get_vxsnan", a, b)); //SetFPSCRException(m_fpscr_vxisi, Call(GetType<bool>(), m_pure_attr, "__fadd_get_vxisi", a, b)); SetFPRF(result, op.rc != 0); } void PPUTranslator::FSQRT(ppu_opcode_t op) { const auto b = GetFpr(op.frb); const auto result = Call(GetType<f64>(), "llvm.sqrt.f64", b); SetFpr(op.frd, result); //SetFPSCR_FR(Call(GetType<bool>(), m_pure_attr, "__fsqrt_get_fr", b)); //SetFPSCR_FI(Call(GetType<bool>(), m_pure_attr, "__fsqrt_get_fi", b)); //SetFPSCRException(m_fpscr_ox, Call(GetType<bool>(), m_pure_attr, "__fsqrt_get_ox", b)); //SetFPSCRException(m_fpscr_ux, Call(GetType<bool>(), m_pure_attr, "__fsqrt_get_ux", b)); //SetFPSCRException(m_fpscr_vxsnan, Call(GetType<bool>(), m_pure_attr, "__fsqrt_get_vxsnan", b)); //SetFPSCRException(m_fpscr_vxsqrt, Call(GetType<bool>(), m_pure_attr, "__fsqrt_get_vxsqrt", b)); SetFPRF(result, op.rc != 0); } void PPUTranslator::FSEL(ppu_opcode_t op) { const auto a = GetFpr(op.fra); const auto b = GetFpr(op.frb); const auto c = GetFpr(op.frc); SetFpr(op.frd, m_ir->CreateSelect(m_ir->CreateFCmpOGE(a, ConstantFP::get(GetType<f64>(), 0.0)), c, b)); if (op.rc) SetCrFieldFPCC(1); } void PPUTranslator::FMUL(ppu_opcode_t op) { const auto a = GetFpr(op.fra); const auto c = GetFpr(op.frc); const auto result = m_ir->CreateFMul(a, c); SetFpr(op.frd, result); //SetFPSCR_FR(Call(GetType<bool>(), m_pure_attr, "__fmul_get_fr", a, c)); //SetFPSCR_FI(Call(GetType<bool>(), m_pure_attr, "__fmul_get_fi", a, c)); //SetFPSCRException(m_fpscr_ox, Call(GetType<bool>(), m_pure_attr, "__fmul_get_ox", a, c)); //SetFPSCRException(m_fpscr_ux, Call(GetType<bool>(), m_pure_attr, "__fmul_get_ux", a, c)); //SetFPSCRException(m_fpscr_vxsnan, Call(GetType<bool>(), m_pure_attr, "__fmul_get_vxsnan", a, c)); //SetFPSCRException(m_fpscr_vximz, Call(GetType<bool>(), m_pure_attr, "__fmul_get_vximz", a, c)); SetFPRF(result, op.rc != 0); } void PPUTranslator::FRSQRTE(ppu_opcode_t op) { if (!m_frsqrte_table) { m_frsqrte_table = new GlobalVariable(*m_module, ArrayType::get(GetType<u32>(), 0x8000), true, GlobalValue::PrivateLinkage, ConstantDataArray::get(m_context, ppu_frqrte_lut.data)); } const auto b = m_ir->CreateBitCast(GetFpr(op.frb), GetType<u64>()); const auto ptr = dyn_cast<GetElementPtrInst>(m_ir->CreateGEP(m_frsqrte_table->getValueType(), m_frsqrte_table, {m_ir->getInt64(0), m_ir->CreateLShr(b, 49)})); assert(ptr->getResultElementType() == get_type<u32>()); const auto v = m_ir->CreateLoad(ptr->getResultElementType(), ptr); const auto result = m_ir->CreateBitCast(m_ir->CreateShl(ZExt(v), 32), GetType<f64>()); SetFpr(op.frd, result); //m_ir->CreateStore(GetUndef<bool>(), m_fpscr_fr); //m_ir->CreateStore(GetUndef<bool>(), m_fpscr_fi); //m_ir->CreateStore(GetUndef<bool>(), m_fpscr_xx); //SetFPSCRException(m_fpscr_zx, Call(GetType<bool>(), m_pure_attr, "__frsqrte_get_zx", b)); //SetFPSCRException(m_fpscr_vxsnan, Call(GetType<bool>(), m_pure_attr, "__frsqrte_get_vxsnan", b)); //SetFPSCRException(m_fpscr_vxsqrt, Call(GetType<bool>(), m_pure_attr, "__frsqrte_get_vxsqrt", b)); SetFPRF(result, op.rc != 0); } void PPUTranslator::FMSUB(ppu_opcode_t op) { const auto a = GetFpr(op.fra); const auto b = GetFpr(op.frb); const auto c = GetFpr(op.frc); llvm::Value* result; if (g_cfg.core.use_accurate_dfma) { result = m_ir->CreateCall(get_intrinsic<f64>(llvm::Intrinsic::fma), {a, c, m_ir->CreateFNeg(b)}); } else { result = m_ir->CreateFSub(m_ir->CreateFMul(a, c), b); } SetFpr(op.frd, result); //SetFPSCR_FR(Call(GetType<bool>(), m_pure_attr, "__fmadd_get_fr", a, b, c)); // TODO ??? //SetFPSCR_FI(Call(GetType<bool>(), m_pure_attr, "__fmadd_get_fi", a, b, c)); //SetFPSCRException(m_fpscr_ox, Call(GetType<bool>(), m_pure_attr, "__fmadd_get_ox", a, b, c)); //SetFPSCRException(m_fpscr_ux, Call(GetType<bool>(), m_pure_attr, "__fmadd_get_ux", a, b, c)); //SetFPSCRException(m_fpscr_vxsnan, Call(GetType<bool>(), m_pure_attr, "__fmadd_get_vxsnan", a, b, c)); //SetFPSCRException(m_fpscr_vxisi, Call(GetType<bool>(), m_pure_attr, "__fmadd_get_vxisi", a, b, c)); //SetFPSCRException(m_fpscr_vximz, Call(GetType<bool>(), m_pure_attr, "__fmadd_get_vximz", a, b, c)); SetFPRF(result, op.rc != 0); } void PPUTranslator::FMADD(ppu_opcode_t op) { const auto a = GetFpr(op.fra); const auto b = GetFpr(op.frb); const auto c = GetFpr(op.frc); llvm::Value* result; if (g_cfg.core.use_accurate_dfma) { result = m_ir->CreateCall(get_intrinsic<f64>(llvm::Intrinsic::fma), { a, c, b }); } else { result = m_ir->CreateFAdd(m_ir->CreateFMul(a, c), b); } SetFpr(op.frd, result); //SetFPSCR_FR(Call(GetType<bool>(), m_pure_attr, "__fmadd_get_fr", a, b, c)); //SetFPSCR_FI(Call(GetType<bool>(), m_pure_attr, "__fmadd_get_fi", a, b, c)); //SetFPSCRException(m_fpscr_ox, Call(GetType<bool>(), m_pure_attr, "__fmadd_get_ox", a, b, c)); //SetFPSCRException(m_fpscr_ux, Call(GetType<bool>(), m_pure_attr, "__fmadd_get_ux", a, b, c)); //SetFPSCRException(m_fpscr_vxsnan, Call(GetType<bool>(), m_pure_attr, "__fmadd_get_vxsnan", a, b, c)); //SetFPSCRException(m_fpscr_vxisi, Call(GetType<bool>(), m_pure_attr, "__fmadd_get_vxisi", a, b, c)); //SetFPSCRException(m_fpscr_vximz, Call(GetType<bool>(), m_pure_attr, "__fmadd_get_vximz", a, b, c)); SetFPRF(result, op.rc != 0); } void PPUTranslator::FNMSUB(ppu_opcode_t op) { const auto a = GetFpr(op.fra); const auto b = GetFpr(op.frb); const auto c = GetFpr(op.frc); llvm::Value* result; if (g_cfg.core.use_accurate_dfma) { result = m_ir->CreateCall(get_intrinsic<f64>(llvm::Intrinsic::fma), {a, c, m_ir->CreateFNeg(b)}); } else { result = m_ir->CreateFSub(m_ir->CreateFMul(a, c), b); } SetFpr(op.frd, m_ir->CreateFNeg(result)); //SetFPSCR_FR(Call(GetType<bool>(), m_pure_attr, "__fmadd_get_fr", a, b, c)); // TODO ??? //SetFPSCR_FI(Call(GetType<bool>(), m_pure_attr, "__fmadd_get_fi", a, b, c)); //SetFPSCRException(m_fpscr_ox, Call(GetType<bool>(), m_pure_attr, "__fmadd_get_ox", a, b, c)); //SetFPSCRException(m_fpscr_ux, Call(GetType<bool>(), m_pure_attr, "__fmadd_get_ux", a, b, c)); //SetFPSCRException(m_fpscr_vxsnan, Call(GetType<bool>(), m_pure_attr, "__fmadd_get_vxsnan", a, b, c)); //SetFPSCRException(m_fpscr_vxisi, Call(GetType<bool>(), m_pure_attr, "__fmadd_get_vxisi", a, b, c)); //SetFPSCRException(m_fpscr_vximz, Call(GetType<bool>(), m_pure_attr, "__fmadd_get_vximz", a, b, c)); SetFPRF(result, op.rc != 0); } void PPUTranslator::FNMADD(ppu_opcode_t op) { const auto a = GetFpr(op.fra); const auto b = GetFpr(op.frb); const auto c = GetFpr(op.frc); llvm::Value* result; if (g_cfg.core.use_accurate_dfma) { result = m_ir->CreateCall(get_intrinsic<f64>(llvm::Intrinsic::fma), {a, c, b}); } else { result = m_ir->CreateFAdd(m_ir->CreateFMul(a, c), b); } SetFpr(op.frd, m_ir->CreateFNeg(result)); //SetFPSCR_FR(Call(GetType<bool>(), m_pure_attr, "__fmadd_get_fr", a, b, c)); // TODO ??? //SetFPSCR_FI(Call(GetType<bool>(), m_pure_attr, "__fmadd_get_fi", a, b, c)); //SetFPSCRException(m_fpscr_ox, Call(GetType<bool>(), m_pure_attr, "__fmadd_get_ox", a, b, c)); //SetFPSCRException(m_fpscr_ux, Call(GetType<bool>(), m_pure_attr, "__fmadd_get_ux", a, b, c)); //SetFPSCRException(m_fpscr_vxsnan, Call(GetType<bool>(), m_pure_attr, "__fmadd_get_vxsnan", a, b, c)); //SetFPSCRException(m_fpscr_vxisi, Call(GetType<bool>(), m_pure_attr, "__fmadd_get_vxisi", a, b, c)); //SetFPSCRException(m_fpscr_vximz, Call(GetType<bool>(), m_pure_attr, "__fmadd_get_vximz", a, b, c)); SetFPRF(result, op.rc != 0); } void PPUTranslator::FCMPO(ppu_opcode_t op) { const auto a = GetFpr(op.fra); const auto b = GetFpr(op.frb); const auto lt = m_ir->CreateFCmpOLT(a, b); const auto gt = m_ir->CreateFCmpOGT(a, b); const auto eq = m_ir->CreateFCmpOEQ(a, b); const auto un = m_ir->CreateFCmpUNO(a, b); SetCrField(op.crfd, lt, gt, eq, un); SetFPCC(lt, gt, eq, un); //SetFPSCRException(m_fpscr_vxsnan, Call(GetType<bool>(), m_pure_attr, "__fcmpo_get_vxsnan", a, b)); //SetFPSCRException(m_fpscr_vxvc, Call(GetType<bool>(), m_pure_attr, "__fcmpo_get_vxvc", a, b)); } void PPUTranslator::FNEG(ppu_opcode_t op) { const auto b = GetFpr(op.frb); SetFpr(op.frd, m_ir->CreateFNeg(b)); if (op.rc) SetCrFieldFPCC(1); } void PPUTranslator::FMR(ppu_opcode_t op) { SetFpr(op.frd, GetFpr(op.frb)); if (op.rc) SetCrFieldFPCC(1); } void PPUTranslator::FNABS(ppu_opcode_t op) { SetFpr(op.frd, m_ir->CreateFNeg(Call(GetType<f64>(), "llvm.fabs.f64", GetFpr(op.frb)))); if (op.rc) SetCrFieldFPCC(1); } void PPUTranslator::FABS(ppu_opcode_t op) { SetFpr(op.frd, Call(GetType<f64>(), "llvm.fabs.f64", GetFpr(op.frb))); if (op.rc) SetCrFieldFPCC(1); } void PPUTranslator::FCTID(ppu_opcode_t op) { const auto b = GetFpr(op.frb); const auto xormask = m_ir->CreateSExt(m_ir->CreateFCmpOGE(b, ConstantFP::get(GetType<f64>(), std::exp2l(63.))), GetType<s64>()); // fix result saturation (0x8000000000000000 -> 0x7fffffffffffffff) #if defined(ARCH_X64) SetFpr(op.frd, m_ir->CreateXor(xormask, Call(GetType<s64>(), "llvm.x86.sse2.cvtsd2si64", m_ir->CreateInsertElement(GetUndef<f64[2]>(), b, u64{0})))); #elif defined(ARCH_ARM64) SetFpr(op.frd, m_ir->CreateXor(xormask, Call(GetType<s64>(), "llvm.aarch64.neon.fcvtns.i64.f64", b))); #endif //SetFPSCR_FR(Call(GetType<bool>(), m_pure_attr, "__fctid_get_fr", b)); //SetFPSCR_FI(Call(GetType<bool>(), m_pure_attr, "__fctid_get_fi", b)); //SetFPSCRException(m_fpscr_vxsnan, Call(GetType<bool>(), m_pure_attr, "__fctid_get_vxsnan", b)); //SetFPSCRException(m_fpscr_vxcvi, m_ir->CreateOr(sat_l, sat_h)); //m_ir->CreateStore(GetUndef<bool>(), m_fpscr_c); //SetFPCC(GetUndef<bool>(), GetUndef<bool>(), GetUndef<bool>(), GetUndef<bool>(), op.rc != 0); } void PPUTranslator::FCTIDZ(ppu_opcode_t op) { const auto b = GetFpr(op.frb); const auto xormask = m_ir->CreateSExt(m_ir->CreateFCmpOGE(b, ConstantFP::get(GetType<f64>(), std::exp2l(63.))), GetType<s64>()); // fix result saturation (0x8000000000000000 -> 0x7fffffffffffffff) #if defined(ARCH_X64) SetFpr(op.frd, m_ir->CreateXor(xormask, Call(GetType<s64>(), "llvm.x86.sse2.cvttsd2si64", m_ir->CreateInsertElement(GetUndef<f64[2]>(), b, u64{0})))); #elif defined(ARCH_ARM64) SetFpr(op.frd, m_ir->CreateXor(xormask, Call(GetType<s64>(), "llvm.aarch64.neon.fcvtzs.i64.f64", b))); #endif } void PPUTranslator::FCFID(ppu_opcode_t op) { const auto b = GetFpr(op.frb, 64, true); const auto result = m_ir->CreateSIToFP(b, GetType<f64>()); SetFpr(op.frd, result); //SetFPSCR_FR(Call(GetType<bool>(), m_pure_attr, "__fcfid_get_fr", b)); //SetFPSCR_FI(Call(GetType<bool>(), m_pure_attr, "__fcfid_get_fi", b)); SetFPRF(result, op.rc != 0); } void PPUTranslator::UNK(ppu_opcode_t op) { FlushRegisters(); Call(GetType<void>(), "__error", m_thread, GetAddr(), m_ir->getInt32(op.opcode)); //Call(GetType<void>(), "__escape", m_thread)->setTailCall(); m_ir->CreateRetVoid(); } Value* PPUTranslator::GetGpr(u32 r, u32 num_bits) { return Trunc(RegLoad(m_gpr[r]), m_ir->getIntNTy(num_bits)); } void PPUTranslator::SetGpr(u32 r, Value* value) { RegStore(ZExt(value, GetType<u64>()), m_gpr[r]); } Value* PPUTranslator::GetFpr(u32 r, u32 bits, bool as_int) { const auto value = RegLoad(m_fpr[r]); if (!as_int && bits == 64) { return value; } else if (!as_int && bits == 32) { return m_ir->CreateFPTrunc(value, GetType<f32>()); } else { return Trunc(bitcast(value, GetType<u64>()), m_ir->getIntNTy(bits)); } } void PPUTranslator::SetFpr(u32 r, Value* val) { const auto f64_val = val->getType() == GetType<s32>() ? bitcast(SExt(val), GetType<f64>()) : val->getType() == GetType<s64>() ? bitcast(val, GetType<f64>()) : val->getType() == GetType<f32>() ? m_ir->CreateFPExt(val, GetType<f64>()) : val; RegStore(f64_val, m_fpr[r]); } Value* PPUTranslator::GetVr(u32 vr, VrType type) { const auto value = RegLoad(m_vr[vr]); llvm::Type* _type{}; switch (type) { case VrType::vi32: _type = GetType<u32[4]>(); break; case VrType::vi8 : _type = GetType<u8[16]>(); break; case VrType::vi16: _type = GetType<u16[8]>(); break; case VrType::vf : _type = GetType<f32[4]>(); break; case VrType::i128: _type = GetType<u128>(); break; default: ensure(false); } return bitcast(value, _type); } void PPUTranslator::SetVr(u32 vr, Value* value) { const auto type = value->getType(); const auto size = type->getPrimitiveSizeInBits(); if (type->isVectorTy() && size != 128) { if (type->getScalarType()->isIntegerTy(1)) { // Sign-extend bool values value = SExt(value, ScaleType(type, 7 - s32(std::log2(+size)))); } else if (size == 256 || size == 512) { // Truncate big vectors value = Trunc(value, ScaleType(type, 7 - s32(std::log2(+size)))); } } ensure(value->getType()->getPrimitiveSizeInBits() == 128); RegStore(value, m_vr[vr]); } Value* PPUTranslator::GetCrb(u32 crb) { return RegLoad(m_cr[crb]); } void PPUTranslator::SetCrb(u32 crb, Value* value) { RegStore(value, m_cr[crb]); } void PPUTranslator::SetCrField(u32 group, Value* lt, Value* gt, Value* eq, Value* so) { SetCrb(group * 4 + 0, lt ? lt : GetUndef<bool>()); SetCrb(group * 4 + 1, gt ? gt : GetUndef<bool>()); SetCrb(group * 4 + 2, eq ? eq : GetUndef<bool>()); SetCrb(group * 4 + 3, so ? so : RegLoad(m_so)); } void PPUTranslator::SetCrFieldSignedCmp(u32 n, Value* a, Value* b) { const auto lt = m_ir->CreateICmpSLT(a, b); const auto gt = m_ir->CreateICmpSGT(a, b); const auto eq = m_ir->CreateICmpEQ(a, b); SetCrField(n, lt, gt, eq); } void PPUTranslator::SetCrFieldUnsignedCmp(u32 n, Value* a, Value* b) { const auto lt = m_ir->CreateICmpULT(a, b); const auto gt = m_ir->CreateICmpUGT(a, b); const auto eq = m_ir->CreateICmpEQ(a, b); SetCrField(n, lt, gt, eq); } void PPUTranslator::SetCrFieldFPCC(u32 n) { SetCrField(n, GetFPSCRBit(16), GetFPSCRBit(17), GetFPSCRBit(18), GetFPSCRBit(19)); } void PPUTranslator::SetFPCC(Value* lt, Value* gt, Value* eq, Value* un, bool set_cr) { SetFPSCRBit(16, lt, false); SetFPSCRBit(17, gt, false); SetFPSCRBit(18, eq, false); SetFPSCRBit(19, un, false); if (set_cr) SetCrField(1, lt, gt, eq, un); } void PPUTranslator::SetFPRF(Value* value, bool /*set_cr*/) { //const bool is32 = value->getType()->isFloatTy() ? true : value->getType()->isDoubleTy() ? false : ensure(false); //const auto zero = ConstantFP::get(value->getType(), 0.0); //const auto is_nan = m_ir->CreateFCmpUNO(value, zero); //const auto is_inf = Call(GetType<bool>(), m_pure_attr, is32 ? "__is_inf32" : "__is_inf", value); // TODO //const auto is_denorm = Call(GetType<bool>(), m_pure_attr, is32 ? "__is_denorm32" : "__is_denorm", value); // TODO //const auto is_neg_zero = Call(GetType<bool>(), m_pure_attr, is32 ? "__is_neg_zero32" : "__is_neg_zero", value); // TODO //const auto cc = m_ir->CreateOr(is_nan, m_ir->CreateOr(is_denorm, is_neg_zero)); //const auto lt = m_ir->CreateFCmpOLT(value, zero); //const auto gt = m_ir->CreateFCmpOGT(value, zero); //const auto eq = m_ir->CreateFCmpOEQ(value, zero); //const auto un = m_ir->CreateOr(is_nan, is_inf); //m_ir->CreateStore(cc, m_fpscr_c); //SetFPCC(lt, gt, eq, un, set_cr); } void PPUTranslator::SetFPSCR_FR(Value* /*value*/) { //m_ir->CreateStore(value, m_fpscr_fr); } void PPUTranslator::SetFPSCR_FI(Value* /*value*/) { //m_ir->CreateStore(value, m_fpscr_fi); //SetFPSCRException(m_fpscr_xx, value); } void PPUTranslator::SetFPSCRException(Value* /*ptr*/, Value* /*value*/) { //m_ir->CreateStore(m_ir->CreateOr(m_ir->CreateLoad(ptr), value), ptr); //m_ir->CreateStore(m_ir->CreateOr(m_ir->CreateLoad(m_fpscr_fx), value), m_fpscr_fx); } Value* PPUTranslator::GetFPSCRBit(u32 n) { //if (n == 1 && m_fpscr[24]) //{ // // Floating-Point Enabled Exception Summary (FEX) 24-29 // Value* value = m_ir->CreateLoad(m_fpscr[24]); // for (u32 i = 25; i <= 29; i++) value = m_ir->CreateOr(value, m_ir->CreateLoad(m_fpscr[i])); // return value; //} //if (n == 2 && m_fpscr[7]) //{ // // Floating-Point Invalid Operation Exception Summary (VX) 7-12, 21-23 // Value* value = m_ir->CreateLoad(m_fpscr[7]); // for (u32 i = 8; i <= 12; i++) value = m_ir->CreateOr(value, m_ir->CreateLoad(m_fpscr[i])); // for (u32 i = 21; i <= 23; i++) value = m_ir->CreateOr(value, m_ir->CreateLoad(m_fpscr[i])); // return value; //} if (n < 16 || n > 19) { return nullptr; // ??? } // Get bit const auto value = RegLoad(m_fc[n]); //if (n == 0 || (n >= 3 && n <= 12) || (n >= 21 && n <= 23)) //{ // // Clear FX or exception bits // m_ir->CreateStore(m_ir->getFalse(), m_fpscr[n]); //} return value; } void PPUTranslator::SetFPSCRBit(u32 n, Value* value, bool /*update_fx*/) { if (n < 16 || n > 19) { //CompilationError("SetFPSCRBit(): inaccessible bit " + std::to_string(n)); return; // ??? } //if (update_fx) //{ // if ((n >= 3 && n <= 12) || (n >= 21 && n <= 23)) // { // // Update FX bit if necessary // m_ir->CreateStore(m_ir->CreateOr(m_ir->CreateLoad(m_fpscr_fx), value), m_fpscr_fx); // } //} //if (n >= 24 && n <= 28) CompilationError("SetFPSCRBit: exception enable bit " + std::to_string(n)); //if (n == 29) CompilationError("SetFPSCRBit: NI bit"); //if (n >= 30) CompilationError("SetFPSCRBit: RN bit"); // Store the bit RegStore(value, m_fc[n]); } Value* PPUTranslator::GetCarry() { return RegLoad(m_ca); } void PPUTranslator::SetCarry(Value* bit) { RegStore(bit, m_ca); } void PPUTranslator::SetOverflow(Value* bit) { RegStore(bit, m_ov); RegStore(m_ir->CreateOr(RegLoad(m_so), bit), m_so); } Value* PPUTranslator::CheckTrapCondition(u32 to, Value* left, Value* right) { if ((to & 0x3) == 0x3 || (to & 0x18) == 0x18) { // Not-equal check or always-true return to & 0x4 ? m_ir->getTrue() : m_ir->CreateICmpNE(left, right); } Value* trap_condition = nullptr; auto add_condition = [&](Value* cond) { if (!trap_condition) { trap_condition = cond; return; } trap_condition = m_ir->CreateOr(trap_condition, cond); }; if (to & 0x10) add_condition(m_ir->CreateICmpSLT(left, right)); if (to & 0x8) add_condition(m_ir->CreateICmpSGT(left, right)); if (to & 0x4) add_condition(m_ir->CreateICmpEQ(left, right)); if (to & 0x2) add_condition(m_ir->CreateICmpULT(left, right)); if (to & 0x1) add_condition(m_ir->CreateICmpUGT(left, right)); return trap_condition ? trap_condition : m_ir->getFalse(); } void PPUTranslator::Trap() { Call(GetType<void>(), "__trap", m_thread, GetAddr()); //Call(GetType<void>(), "__escape", m_thread)->setTailCall(); m_ir->CreateRetVoid(); } Value* PPUTranslator::CheckBranchCondition(u32 bo, u32 bi) { const bool bo0 = (bo & 0x10) != 0; const bool bo1 = (bo & 0x08) != 0; const bool bo2 = (bo & 0x04) != 0; const bool bo3 = (bo & 0x02) != 0; // Decrement counter if necessary const auto ctr = bo2 ? nullptr : m_ir->CreateSub(RegLoad(m_ctr), m_ir->getInt64(1)); // Store counter if necessary if (ctr) RegStore(ctr, m_ctr); // Generate counter condition const auto use_ctr = bo2 ? nullptr : m_ir->CreateICmp(bo3 ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE, ctr, m_ir->getInt64(0)); // Generate condition bit access const auto use_cond = bo0 ? nullptr : bo1 ? GetCrb(bi) : m_ir->CreateNot(GetCrb(bi)); if (use_ctr && use_cond) { // Combine conditions if necessary return m_ir->CreateAnd(use_ctr, use_cond); } return use_ctr ? use_ctr : use_cond; } MDNode* PPUTranslator::CheckBranchProbability(u32 bo) { const bool bo0 = (bo & 0x10) != 0; const bool bo1 = (bo & 0x08) != 0; const bool bo2 = (bo & 0x04) != 0; const bool bo3 = (bo & 0x02) != 0; const bool bo4 = (bo & 0x01) != 0; if ((bo0 && bo1) || (bo2 && bo3)) { return bo4 ? m_md_likely : m_md_unlikely; } return nullptr; } void PPUTranslator::build_interpreter() { #define BUILD_VEC_INST(i) { \ m_function = llvm::cast<llvm::Function>(m_module->getOrInsertFunction("op_" #i, get_type<void>(), m_thread_type->getPointerTo()).getCallee()); \ std::fill(std::begin(m_globals), std::end(m_globals), nullptr); \ std::fill(std::begin(m_locals), std::end(m_locals), nullptr); \ IRBuilder<> irb(BasicBlock::Create(m_context, "__entry", m_function)); \ m_ir = &irb; \ m_thread = m_function->getArg(0); \ ppu_opcode_t op{}; \ op.vd = 0; \ op.va = 1; \ op.vb = 2; \ op.vc = 3; \ this->i(op); \ FlushRegisters(); \ m_ir->CreateRetVoid(); \ run_transforms(*m_function); \ } BUILD_VEC_INST(VADDCUW); BUILD_VEC_INST(VADDFP); BUILD_VEC_INST(VADDSBS); BUILD_VEC_INST(VADDSHS); BUILD_VEC_INST(VADDSWS); BUILD_VEC_INST(VADDUBM); BUILD_VEC_INST(VADDUBS); BUILD_VEC_INST(VADDUHM); BUILD_VEC_INST(VADDUHS); BUILD_VEC_INST(VADDUWM); BUILD_VEC_INST(VADDUWS); BUILD_VEC_INST(VAND); BUILD_VEC_INST(VANDC); BUILD_VEC_INST(VAVGSB); BUILD_VEC_INST(VAVGSH); BUILD_VEC_INST(VAVGSW); BUILD_VEC_INST(VAVGUB); BUILD_VEC_INST(VAVGUH); BUILD_VEC_INST(VAVGUW); BUILD_VEC_INST(VCFSX); BUILD_VEC_INST(VCFUX); BUILD_VEC_INST(VCMPBFP); BUILD_VEC_INST(VCMPBFP_); BUILD_VEC_INST(VCMPEQFP); BUILD_VEC_INST(VCMPEQFP_); BUILD_VEC_INST(VCMPEQUB); BUILD_VEC_INST(VCMPEQUB_); BUILD_VEC_INST(VCMPEQUH); BUILD_VEC_INST(VCMPEQUH_); BUILD_VEC_INST(VCMPEQUW); BUILD_VEC_INST(VCMPEQUW_); BUILD_VEC_INST(VCMPGEFP); BUILD_VEC_INST(VCMPGEFP_); BUILD_VEC_INST(VCMPGTFP); BUILD_VEC_INST(VCMPGTFP_); BUILD_VEC_INST(VCMPGTSB); BUILD_VEC_INST(VCMPGTSB_); BUILD_VEC_INST(VCMPGTSH); BUILD_VEC_INST(VCMPGTSH_); BUILD_VEC_INST(VCMPGTSW); BUILD_VEC_INST(VCMPGTSW_); BUILD_VEC_INST(VCMPGTUB); BUILD_VEC_INST(VCMPGTUB_); BUILD_VEC_INST(VCMPGTUH); BUILD_VEC_INST(VCMPGTUH_); BUILD_VEC_INST(VCMPGTUW); BUILD_VEC_INST(VCMPGTUW_); BUILD_VEC_INST(VCTSXS); BUILD_VEC_INST(VCTUXS); BUILD_VEC_INST(VEXPTEFP); BUILD_VEC_INST(VLOGEFP); BUILD_VEC_INST(VMADDFP); BUILD_VEC_INST(VMAXFP); BUILD_VEC_INST(VMAXSB); BUILD_VEC_INST(VMAXSH); BUILD_VEC_INST(VMAXSW); BUILD_VEC_INST(VMAXUB); BUILD_VEC_INST(VMAXUH); BUILD_VEC_INST(VMAXUW); BUILD_VEC_INST(VMHADDSHS); BUILD_VEC_INST(VMHRADDSHS); BUILD_VEC_INST(VMINFP); BUILD_VEC_INST(VMINSB); BUILD_VEC_INST(VMINSH); BUILD_VEC_INST(VMINSW); BUILD_VEC_INST(VMINUB); BUILD_VEC_INST(VMINUH); BUILD_VEC_INST(VMINUW); BUILD_VEC_INST(VMLADDUHM); BUILD_VEC_INST(VMRGHB); BUILD_VEC_INST(VMRGHH); BUILD_VEC_INST(VMRGHW); BUILD_VEC_INST(VMRGLB); BUILD_VEC_INST(VMRGLH); BUILD_VEC_INST(VMRGLW); BUILD_VEC_INST(VMSUMMBM); BUILD_VEC_INST(VMSUMSHM); BUILD_VEC_INST(VMSUMSHS); BUILD_VEC_INST(VMSUMUBM); BUILD_VEC_INST(VMSUMUHM); BUILD_VEC_INST(VMSUMUHS); BUILD_VEC_INST(VMULESB); BUILD_VEC_INST(VMULESH); BUILD_VEC_INST(VMULEUB); BUILD_VEC_INST(VMULEUH); BUILD_VEC_INST(VMULOSB); BUILD_VEC_INST(VMULOSH); BUILD_VEC_INST(VMULOUB); BUILD_VEC_INST(VMULOUH); BUILD_VEC_INST(VNMSUBFP); BUILD_VEC_INST(VNOR); BUILD_VEC_INST(VOR); BUILD_VEC_INST(VPERM); BUILD_VEC_INST(VPKPX); BUILD_VEC_INST(VPKSHSS); BUILD_VEC_INST(VPKSHUS); BUILD_VEC_INST(VPKSWSS); BUILD_VEC_INST(VPKSWUS); BUILD_VEC_INST(VPKUHUM); BUILD_VEC_INST(VPKUHUS); BUILD_VEC_INST(VPKUWUM); BUILD_VEC_INST(VPKUWUS); BUILD_VEC_INST(VREFP); BUILD_VEC_INST(VRFIM); BUILD_VEC_INST(VRFIN); BUILD_VEC_INST(VRFIP); BUILD_VEC_INST(VRFIZ); BUILD_VEC_INST(VRLB); BUILD_VEC_INST(VRLH); BUILD_VEC_INST(VRLW); BUILD_VEC_INST(VRSQRTEFP); BUILD_VEC_INST(VSEL); BUILD_VEC_INST(VSL); BUILD_VEC_INST(VSLB); BUILD_VEC_INST(VSLDOI); BUILD_VEC_INST(VSLH); BUILD_VEC_INST(VSLO); BUILD_VEC_INST(VSLW); BUILD_VEC_INST(VSPLTB); BUILD_VEC_INST(VSPLTH); BUILD_VEC_INST(VSPLTISB); BUILD_VEC_INST(VSPLTISH); BUILD_VEC_INST(VSPLTISW); BUILD_VEC_INST(VSPLTW); BUILD_VEC_INST(VSR); BUILD_VEC_INST(VSRAB); BUILD_VEC_INST(VSRAH); BUILD_VEC_INST(VSRAW); BUILD_VEC_INST(VSRB); BUILD_VEC_INST(VSRH); BUILD_VEC_INST(VSRO); BUILD_VEC_INST(VSRW); BUILD_VEC_INST(VSUBCUW); BUILD_VEC_INST(VSUBFP); BUILD_VEC_INST(VSUBSBS); BUILD_VEC_INST(VSUBSHS); BUILD_VEC_INST(VSUBSWS); BUILD_VEC_INST(VSUBUBM); BUILD_VEC_INST(VSUBUBS); BUILD_VEC_INST(VSUBUHM); BUILD_VEC_INST(VSUBUHS); BUILD_VEC_INST(VSUBUWM); BUILD_VEC_INST(VSUBUWS); BUILD_VEC_INST(VSUMSWS); BUILD_VEC_INST(VSUM2SWS); BUILD_VEC_INST(VSUM4SBS); BUILD_VEC_INST(VSUM4SHS); BUILD_VEC_INST(VSUM4UBS); BUILD_VEC_INST(VUPKHPX); BUILD_VEC_INST(VUPKHSB); BUILD_VEC_INST(VUPKHSH); BUILD_VEC_INST(VUPKLPX); BUILD_VEC_INST(VUPKLSB); BUILD_VEC_INST(VUPKLSH); BUILD_VEC_INST(VXOR); #undef BUILD_VEC_INST } #endif
167,519
C++
.cpp
4,694
33.493396
242
0.662478
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,186
SPUInterpreter.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/SPUInterpreter.cpp
#include "stdafx.h" #include "SPUInterpreter.h" #include "Utilities/JIT.h" #include "SPUThread.h" #include "Emu/Cell/Common.h" #include "Emu/Cell/SPUAnalyser.h" #include "Emu/system_config.h" #include "util/asm.hpp" #include "util/v128.hpp" #include "util/simd.hpp" #include "util/sysinfo.hpp" #include <cmath> #include <cfenv> #if !defined(_MSC_VER) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wold-style-cast" #endif #if defined(ARCH_ARM64) #if !defined(_MSC_VER) #pragma GCC diagnostic ignored "-Wstrict-aliasing" #endif #undef FORCE_INLINE #include "Emu/CPU/sse2neon.h" #endif const extern spu_decoder<spu_itype> g_spu_itype; const extern spu_decoder<spu_iname> g_spu_iname; const extern spu_decoder<spu_iflag> g_spu_iflag; enum class spu_exec_bit : u64 { use_dfma, __bitset_enum_max }; using enum spu_exec_bit; template <spu_exec_bit... Flags0> struct spu_exec_select { template <spu_exec_bit Flag, spu_exec_bit... Flags, typename F> static spu_intrp_func_t select(bs_t<spu_exec_bit> selected, F func) { // Make sure there is no flag duplication, otherwise skip flag if constexpr (((Flags0 != Flag) && ...)) { // Test only relevant flags at runtime (compiling both variants) if (selected & Flag) { // In this branch, selected flag is added to Flags0 return spu_exec_select<Flags0..., Flag>::template select<Flags...>(selected, func); } } return spu_exec_select<Flags0...>::template select<Flags...>(selected, func); } template <typename F> static spu_intrp_func_t select(bs_t<spu_exec_bit>, F func) { // Instantiate interpreter function with required set of flags return func.template operator()<Flags0...>(); } }; #ifdef ARCH_X64 static constexpr spu_opcode_t s_op{}; #endif namespace asmjit { template <uint I, uint N> static void build_spu_gpr_load(x86::Assembler& c, x86::Xmm x, const bf_t<u32, I, N>&, bool store = false) { static_assert(N == 7, "Invalid bitfield"); #ifdef _WIN32 const auto& spu = x86::rcx; const auto& op = x86::edx; #else const auto& spu = x86::rdi; const auto& op = x86::esi; #endif c.mov(x86::eax, op); if constexpr (I >= 4) { c.shr(x86::eax, I - 4); c.and_(x86::eax, 0x7f << 4); } else { c.and_(x86::eax, 0x7f); c.shl(x86::eax, I + 4); } const auto ptr = x86::oword_ptr(spu, x86::rax, 0, ::offset32(&spu_thread::gpr)); if (utils::has_avx()) { if (store) c.vmovdqa(ptr, x); else c.vmovdqa(x, ptr); } else { if (store) c.movdqa(ptr, x); else c.movdqa(x, ptr); } } template <uint I, uint N> static void build_spu_gpr_store(x86::Assembler& c, x86::Xmm x, const bf_t<u32, I, N>&, bool store = true) { build_spu_gpr_load(c, x, bf_t<u32, I, N>{}, store); } } template <spu_exec_bit... Flags> bool UNK(spu_thread&, spu_opcode_t op) { spu_log.fatal("Unknown/Illegal instruction (0x%08x)", op.opcode); return false; } void spu_interpreter::set_interrupt_status(spu_thread& spu, spu_opcode_t op) { if (op.e) { if (op.d) { fmt::throw_exception("Undefined behaviour"); } spu.set_interrupt_status(true); } else if (op.d) { spu.set_interrupt_status(false); } if (spu.check_mfc_interrupts(spu.pc) && spu.state & cpu_flag::pending) { spu.do_mfc(); } } template <spu_exec_bit... Flags> bool STOP(spu_thread& spu, spu_opcode_t op) { const bool allow = std::exchange(spu.allow_interrupts_in_cpu_work, false); const bool advance_pc = spu.stop_and_signal(op.opcode & 0x3fff); spu.allow_interrupts_in_cpu_work = allow; if (!advance_pc) { return false; } if (spu.state) { spu.pc += 4; return false; } return true; } template <spu_exec_bit... Flags> bool LNOP(spu_thread&, spu_opcode_t) { return true; } // This instruction must be used following a store instruction that modifies the instruction stream. template <spu_exec_bit... Flags> bool SYNC(spu_thread&, spu_opcode_t) { atomic_fence_seq_cst(); return true; } // This instruction forces all earlier load, store, and channel instructions to complete before proceeding. template <spu_exec_bit... Flags> bool DSYNC(spu_thread&, spu_opcode_t) { atomic_fence_seq_cst(); return true; } template <spu_exec_bit... Flags> bool MFSPR(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt].clear(); // All SPRs read as zero. TODO: check it. return true; } template <spu_exec_bit... Flags> bool RDCH(spu_thread& spu, spu_opcode_t op) { const bool allow = std::exchange(spu.allow_interrupts_in_cpu_work, false); const s64 result = spu.get_ch_value(op.ra); spu.allow_interrupts_in_cpu_work = allow; if (result < 0) { return false; } spu.gpr[op.rt] = v128::from32r(static_cast<u32>(result)); if (spu.state) { spu.pc += 4; return false; } return true; } template <spu_exec_bit... Flags> bool RCHCNT(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = v128::from32r(spu.get_ch_count(op.ra)); return true; } template <spu_exec_bit... Flags> bool SF(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = gv_sub32(spu.gpr[op.rb], spu.gpr[op.ra]); return true; } template <spu_exec_bit... Flags> bool OR(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = spu.gpr[op.ra] | spu.gpr[op.rb]; return true; } template <spu_exec_bit... Flags> bool BG(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_add_epi32(gv_gtu32(spu.gpr[op.ra], spu.gpr[op.rb]), _mm_set1_epi32(1)); return true; } template <spu_exec_bit... Flags> bool SFH(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = gv_sub16(spu.gpr[op.rb], spu.gpr[op.ra]); return true; } template <spu_exec_bit... Flags> bool NOR(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = ~(spu.gpr[op.ra] | spu.gpr[op.rb]); return true; } template <spu_exec_bit... Flags> bool ABSDB(spu_thread& spu, spu_opcode_t op) { const auto a = spu.gpr[op.ra]; const auto b = spu.gpr[op.rb]; spu.gpr[op.rt] = gv_sub8(gv_maxu8(a, b), gv_minu8(a, b)); return true; } template <spu_exec_bit... Flags> bool ROT(spu_thread& spu, spu_opcode_t op) { const auto a = spu.gpr[op.ra]; const auto b = spu.gpr[op.rb]; for (u32 i = 0; i < 4; i++) { spu.gpr[op.rt]._u32[i] = utils::rol32(a._u32[i], b._u32[i]); } return true; } template <spu_exec_bit... Flags> bool ROTM(spu_thread& spu, spu_opcode_t op) { const auto a = spu.gpr[op.ra]; const auto b = spu.gpr[op.rb]; for (u32 i = 0; i < 4; i++) { const u64 value = a._u32[i]; spu.gpr[op.rt]._u32[i] = static_cast<u32>(value >> ((0 - b._u32[i]) & 0x3f)); } return true; } template <spu_exec_bit... Flags> bool ROTMA(spu_thread& spu, spu_opcode_t op) { const auto a = spu.gpr[op.ra]; const auto b = spu.gpr[op.rb]; for (u32 i = 0; i < 4; i++) { const s64 value = a._s32[i]; spu.gpr[op.rt]._s32[i] = static_cast<s32>(value >> ((0 - b._u32[i]) & 0x3f)); } return true; } template <spu_exec_bit... Flags> bool SHL(spu_thread& spu, spu_opcode_t op) { const auto a = spu.gpr[op.ra]; const auto b = spu.gpr[op.rb]; for (u32 i = 0; i < 4; i++) { const u64 value = a._u32[i]; spu.gpr[op.rt]._u32[i] = static_cast<u32>(value << (b._u32[i] & 0x3f)); } return true; } template <spu_exec_bit... Flags> bool ROTH(spu_thread& spu, spu_opcode_t op) { const auto a = spu.gpr[op.ra]; const auto b = spu.gpr[op.rb]; for (u32 i = 0; i < 8; i++) { spu.gpr[op.rt]._u16[i] = utils::rol16(a._u16[i], b._u16[i]); } return true; } template <spu_exec_bit... Flags> bool ROTHM(spu_thread& spu, spu_opcode_t op) { const auto a = spu.gpr[op.ra]; const auto b = spu.gpr[op.rb]; for (u32 i = 0; i < 8; i++) { const u32 value = a._u16[i]; spu.gpr[op.rt]._u16[i] = static_cast<u16>(value >> ((0 - b._u16[i]) & 0x1f)); } return true; } template <spu_exec_bit... Flags> bool ROTMAH(spu_thread& spu, spu_opcode_t op) { const auto a = spu.gpr[op.ra]; const auto b = spu.gpr[op.rb]; for (u32 i = 0; i < 8; i++) { const s32 value = a._s16[i]; spu.gpr[op.rt]._s16[i] = static_cast<s16>(value >> ((0 - b._u16[i]) & 0x1f)); } return true; } template <spu_exec_bit... Flags> bool SHLH(spu_thread& spu, spu_opcode_t op) { const auto a = spu.gpr[op.ra]; const auto b = spu.gpr[op.rb]; for (u32 i = 0; i < 8; i++) { const u32 value = a._u16[i]; spu.gpr[op.rt]._u16[i] = static_cast<u16>(value << (b._u16[i] & 0x1f)); } return true; } template <spu_exec_bit... Flags> bool ROTI(spu_thread& spu, spu_opcode_t op) { const auto a = spu.gpr[op.ra]; const s32 n = op.i7 & 0x1f; spu.gpr[op.rt] = _mm_or_si128(_mm_slli_epi32(a, n), _mm_srli_epi32(a, 32 - n)); return true; } template <spu_exec_bit... Flags> bool ROTMI(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_srli_epi32(spu.gpr[op.ra], (0-op.i7) & 0x3f); return true; } template <spu_exec_bit... Flags> bool ROTMAI(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_srai_epi32(spu.gpr[op.ra], (0-op.i7) & 0x3f); return true; } template <spu_exec_bit... Flags> bool SHLI(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_slli_epi32(spu.gpr[op.ra], op.i7 & 0x3f); return true; } template <spu_exec_bit... Flags> bool ROTHI(spu_thread& spu, spu_opcode_t op) { const auto a = spu.gpr[op.ra]; const s32 n = op.i7 & 0xf; spu.gpr[op.rt] = _mm_or_si128(_mm_slli_epi16(a, n), _mm_srli_epi16(a, 16 - n)); return true; } template <spu_exec_bit... Flags> bool ROTHMI(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_srli_epi16(spu.gpr[op.ra], (0-op.i7) & 0x1f); return true; } template <spu_exec_bit... Flags> bool ROTMAHI(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_srai_epi16(spu.gpr[op.ra], (0-op.i7) & 0x1f); return true; } template <spu_exec_bit... Flags> bool SHLHI(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_slli_epi16(spu.gpr[op.ra], op.i7 & 0x1f); return true; } template <spu_exec_bit... Flags> bool A(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = gv_add32(spu.gpr[op.ra], spu.gpr[op.rb]); return true; } template <spu_exec_bit... Flags> bool AND(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = spu.gpr[op.ra] & spu.gpr[op.rb]; return true; } template <spu_exec_bit... Flags> bool CG(spu_thread& spu, spu_opcode_t op) { const auto a = _mm_xor_si128(spu.gpr[op.ra], _mm_set1_epi32(0x7fffffff)); const auto b = _mm_xor_si128(spu.gpr[op.rb], _mm_set1_epi32(0x80000000)); spu.gpr[op.rt] = _mm_srli_epi32(_mm_cmpgt_epi32(b, a), 31); return true; } template <spu_exec_bit... Flags> bool AH(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = gv_add16(spu.gpr[op.ra], spu.gpr[op.rb]); return true; } template <spu_exec_bit... Flags> bool NAND(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = ~(spu.gpr[op.ra] & spu.gpr[op.rb]); return true; } template <spu_exec_bit... Flags> bool AVGB(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_avg_epu8(spu.gpr[op.ra], spu.gpr[op.rb]); return true; } template <spu_exec_bit... Flags> bool MTSPR(spu_thread&, spu_opcode_t) { // SPR writes are ignored. TODO: check it. return true; } template <spu_exec_bit... Flags> bool WRCH(spu_thread& spu, spu_opcode_t op) { const bool allow = std::exchange(spu.allow_interrupts_in_cpu_work, false); const bool advance_pc = spu.set_ch_value(op.ra, spu.gpr[op.rt]._u32[3]); spu.allow_interrupts_in_cpu_work = allow; if (!advance_pc) { return false; } if (spu.state) { spu.pc += 4; return false; } return true; } template <spu_exec_bit... Flags> bool BIZ(spu_thread& spu, spu_opcode_t op) { if (spu.gpr[op.rt]._u32[3] == 0) { spu.pc = spu_branch_target(spu.gpr[op.ra]._u32[3]); spu_interpreter::set_interrupt_status(spu, op); return false; } return true; } template <spu_exec_bit... Flags> bool BINZ(spu_thread& spu, spu_opcode_t op) { if (spu.gpr[op.rt]._u32[3] != 0) { spu.pc = spu_branch_target(spu.gpr[op.ra]._u32[3]); spu_interpreter::set_interrupt_status(spu, op); return false; } return true; } template <spu_exec_bit... Flags> bool BIHZ(spu_thread& spu, spu_opcode_t op) { if (spu.gpr[op.rt]._u16[6] == 0) { spu.pc = spu_branch_target(spu.gpr[op.ra]._u32[3]); spu_interpreter::set_interrupt_status(spu, op); return false; } return true; } template <spu_exec_bit... Flags> bool BIHNZ(spu_thread& spu, spu_opcode_t op) { if (spu.gpr[op.rt]._u16[6] != 0) { spu.pc = spu_branch_target(spu.gpr[op.ra]._u32[3]); spu_interpreter::set_interrupt_status(spu, op); return false; } return true; } template <spu_exec_bit... Flags> bool STOPD(spu_thread& spu, spu_opcode_t) { return spu.stop_and_signal(0x3fff); } template <spu_exec_bit... Flags> bool STQX(spu_thread& spu, spu_opcode_t op) { spu._ref<v128>((spu.gpr[op.ra]._u32[3] + spu.gpr[op.rb]._u32[3]) & 0x3fff0) = spu.gpr[op.rt]; return true; } template <spu_exec_bit... Flags> bool BI(spu_thread& spu, spu_opcode_t op) { spu.pc = spu_branch_target(spu.gpr[op.ra]._u32[3]); spu_interpreter::set_interrupt_status(spu, op); return false; } template <spu_exec_bit... Flags> bool BISL(spu_thread& spu, spu_opcode_t op) { const u32 target = spu_branch_target(spu.gpr[op.ra]._u32[3]); spu.gpr[op.rt] = v128::from32r(spu_branch_target(spu.pc + 4)); spu.pc = target; spu_interpreter::set_interrupt_status(spu, op); return false; } template <spu_exec_bit... Flags> bool IRET(spu_thread& spu, spu_opcode_t op) { spu.pc = spu.srr0; spu_interpreter::set_interrupt_status(spu, op); return false; } template <spu_exec_bit... Flags> bool BISLED(spu_thread& spu, spu_opcode_t op) { const u32 target = spu_branch_target(spu.gpr[op.ra]._u32[3]); spu.gpr[op.rt] = v128::from32r(spu_branch_target(spu.pc + 4)); if (spu.get_events().count) { spu.pc = target; spu_interpreter::set_interrupt_status(spu, op); return false; } return true; } template <spu_exec_bit... Flags> bool HBR(spu_thread&, spu_opcode_t) { return true; } template <spu_exec_bit... Flags> bool GB(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = v128::from32r(_mm_movemask_ps(_mm_castsi128_ps(_mm_slli_epi32(spu.gpr[op.ra], 31)))); return true; } template <spu_exec_bit... Flags> bool GBH(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = v128::from32r(_mm_movemask_epi8(_mm_packs_epi16(_mm_slli_epi16(spu.gpr[op.ra], 15), _mm_setzero_si128()))); return true; } template <spu_exec_bit... Flags> bool GBB(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = v128::from32r(_mm_movemask_epi8(_mm_slli_epi64(spu.gpr[op.ra], 7))); return true; } template <spu_exec_bit... Flags> bool FSM(spu_thread& spu, spu_opcode_t op) { const auto bits = _mm_shuffle_epi32(spu.gpr[op.ra], 0xff); const auto mask = _mm_set_epi32(8, 4, 2, 1); spu.gpr[op.rt] = _mm_cmpeq_epi32(_mm_and_si128(bits, mask), mask); return true; } template <spu_exec_bit... Flags> bool FSMH(spu_thread& spu, spu_opcode_t op) { const auto vsrc = spu.gpr[op.ra]; const auto bits = _mm_shuffle_epi32(_mm_unpackhi_epi16(vsrc, vsrc), 0xaa); const auto mask = _mm_set_epi16(128, 64, 32, 16, 8, 4, 2, 1); spu.gpr[op.rt] = _mm_cmpeq_epi16(_mm_and_si128(bits, mask), mask); return true; } template <spu_exec_bit... Flags> bool FSMB(spu_thread& spu, spu_opcode_t op) { const auto vsrc = spu.gpr[op.ra]; const auto bits = _mm_shuffle_epi32(_mm_shufflehi_epi16(_mm_unpackhi_epi8(vsrc, vsrc), 0x50), 0xfa); const auto mask = _mm_set_epi8(-128, 64, 32, 16, 8, 4, 2, 1, -128, 64, 32, 16, 8, 4, 2, 1); spu.gpr[op.rt] = _mm_cmpeq_epi8(_mm_and_si128(bits, mask), mask); return true; } template <spu_exec_bit... Flags> bool FREST(spu_thread& spu, spu_opcode_t op) { v128 fraction_index = v128(_mm_srli_epi32(spu.gpr[op.ra], 18)) & v128(_mm_set1_epi32(0x1F)); v128 exponent_index = v128(_mm_srli_epi32(spu.gpr[op.ra], 23)) & v128(_mm_set1_epi32(0xFF)); v128 sign = spu.gpr[op.ra] & _mm_set1_epi32(0x80000000); // AVX2 // v128 fraction = _mm_i32gather_epi32(spu_frest_fraction_lut, fraction_index, 4); // v128 exponent = _mm_i32gather_epi32(spu_frest_exponent_lut, exponent_index, 4); v128 result; for (u32 index = 0; index < 4; index++) { u32 r = spu_frest_fraction_lut[fraction_index._u32[index]]; r |= spu_frest_exponent_lut[exponent_index._u32[index]]; r |= sign._u32[index]; result._u32[index] = r; } spu.gpr[op.rt] = result; return true; } template <spu_exec_bit... Flags> bool FRSQEST(spu_thread& spu, spu_opcode_t op) { v128 fraction_index = v128(_mm_srli_epi32(spu.gpr[op.ra], 18)) & v128(_mm_set1_epi32(0x3F)); v128 exponent_index = v128(_mm_srli_epi32(spu.gpr[op.ra], 23)) & v128(_mm_set1_epi32(0xFF)); // AVX2 // v128 fraction = _mm_i32gather_epi32(spu_frsqest_fraction_lut, fraction_index, 4); // v128 exponent = _mm_i32gather_epi32(spu_frsqest_exponent_lut, exponent_index, 4); v128 result; for (u32 index = 0; index < 4; index++) { u32 r = spu_frsqest_fraction_lut[fraction_index._u32[index]]; r |= spu_frsqest_exponent_lut[exponent_index._u32[index]]; result._u32[index] = r; } spu.gpr[op.rt] = result; return true; } template <spu_exec_bit... Flags> bool LQX(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = spu._ref<v128>((spu.gpr[op.ra]._u32[3] + spu.gpr[op.rb]._u32[3]) & 0x3fff0); return true; } template <spu_exec_bit... Flags> bool ROTQBYBI(spu_thread& spu, spu_opcode_t op) { const __m128i a = spu.gpr[op.ra]; alignas(32) const __m128i buf[2]{a, a}; spu.gpr[op.rt] = _mm_loadu_si128(reinterpret_cast<const __m128i*>(reinterpret_cast<const u8*>(buf) + (16 - (spu.gpr[op.rb]._u32[3] >> 3 & 0xf)))); return true; } template <spu_exec_bit... Flags> bool ROTQMBYBI(spu_thread& spu, spu_opcode_t op) { const __m128i a = spu.gpr[op.ra]; alignas(64) const __m128i buf[3]{a, _mm_setzero_si128(), _mm_setzero_si128()}; spu.gpr[op.rt] = _mm_loadu_si128(reinterpret_cast<const __m128i*>(reinterpret_cast<const u8*>(buf) + ((0 - (spu.gpr[op.rb]._u32[3] >> 3)) & 0x1f))); return true; } template <spu_exec_bit... Flags> bool SHLQBYBI(spu_thread& spu, spu_opcode_t op) { const __m128i a = spu.gpr[op.ra]; alignas(64) const __m128i buf[3]{_mm_setzero_si128(), _mm_setzero_si128(), a}; spu.gpr[op.rt] = _mm_loadu_si128(reinterpret_cast<const __m128i*>(reinterpret_cast<const u8*>(buf) + (32 - (spu.gpr[op.rb]._u32[3] >> 3 & 0x1f)))); return true; } template <spu_exec_bit... Flags> bool CBX(spu_thread& spu, spu_opcode_t op) { if (op.ra == 1 && (spu.gpr[1]._u32[3] & 0xF)) { fmt::throw_exception("Unexpected SP value: LS:0x%05x", spu.gpr[1]._u32[3]); } const s32 t = ~(spu.gpr[op.rb]._u32[3] + spu.gpr[op.ra]._u32[3]) & 0xf; spu.gpr[op.rt] = v128::from64(0x18191A1B1C1D1E1Full, 0x1011121314151617ull); spu.gpr[op.rt]._u8[t] = 0x03; return true; } template <spu_exec_bit... Flags> bool CHX(spu_thread& spu, spu_opcode_t op) { if (op.ra == 1 && (spu.gpr[1]._u32[3] & 0xF)) { fmt::throw_exception("Unexpected SP value: LS:0x%05x", spu.gpr[1]._u32[3]); } const s32 t = (~(spu.gpr[op.rb]._u32[3] + spu.gpr[op.ra]._u32[3]) & 0xe) >> 1; spu.gpr[op.rt] = v128::from64(0x18191A1B1C1D1E1Full, 0x1011121314151617ull); spu.gpr[op.rt]._u16[t] = 0x0203; return true; } template <spu_exec_bit... Flags> bool CWX(spu_thread& spu, spu_opcode_t op) { if (op.ra == 1 && (spu.gpr[1]._u32[3] & 0xF)) { fmt::throw_exception("Unexpected SP value: LS:0x%05x", spu.gpr[1]._u32[3]); } const s32 t = (~(spu.gpr[op.rb]._u32[3] + spu.gpr[op.ra]._u32[3]) & 0xc) >> 2; spu.gpr[op.rt] = v128::from64(0x18191A1B1C1D1E1Full, 0x1011121314151617ull); spu.gpr[op.rt]._u32[t] = 0x00010203; return true; } template <spu_exec_bit... Flags> bool CDX(spu_thread& spu, spu_opcode_t op) { if (op.ra == 1 && (spu.gpr[1]._u32[3] & 0xF)) { fmt::throw_exception("Unexpected SP value: LS:0x%05x", spu.gpr[1]._u32[3]); } const s32 t = (~(spu.gpr[op.rb]._u32[3] + spu.gpr[op.ra]._u32[3]) & 0x8) >> 3; spu.gpr[op.rt] = v128::from64(0x18191A1B1C1D1E1Full, 0x1011121314151617ull); spu.gpr[op.rt]._u64[t] = 0x0001020304050607ull; return true; } template <spu_exec_bit... Flags> bool ROTQBI(spu_thread& spu, spu_opcode_t op) { const auto a = spu.gpr[op.ra]; const s32 n = spu.gpr[op.rb]._s32[3] & 0x7; spu.gpr[op.rt] = _mm_or_si128(_mm_slli_epi64(a, n), _mm_srli_epi64(_mm_shuffle_epi32(a, 0x4E), 64 - n)); return true; } template <spu_exec_bit... Flags> bool ROTQMBI(spu_thread& spu, spu_opcode_t op) { const auto a = spu.gpr[op.ra]; const s32 n = -spu.gpr[op.rb]._s32[3] & 0x7; spu.gpr[op.rt] = _mm_or_si128(_mm_srli_epi64(a, n), _mm_slli_epi64(_mm_srli_si128(a, 8), 64 - n)); return true; } template <spu_exec_bit... Flags> bool SHLQBI(spu_thread& spu, spu_opcode_t op) { const auto a = spu.gpr[op.ra]; const s32 n = spu.gpr[op.rb]._u32[3] & 0x7; spu.gpr[op.rt] = _mm_or_si128(_mm_slli_epi64(a, n), _mm_srli_epi64(_mm_slli_si128(a, 8), 64 - n)); return true; } template <spu_exec_bit... Flags> bool ROTQBY(spu_thread& spu, spu_opcode_t op) { const __m128i a = spu.gpr[op.ra]; alignas(32) const __m128i buf[2]{a, a}; spu.gpr[op.rt] = _mm_loadu_si128(reinterpret_cast<const __m128i*>(reinterpret_cast<const u8*>(buf) + (16 - (spu.gpr[op.rb]._u32[3] & 0xf)))); return true; } template <spu_exec_bit... Flags> bool ROTQMBY(spu_thread& spu, spu_opcode_t op) { const __m128i a = spu.gpr[op.ra]; alignas(64) const __m128i buf[3]{a, _mm_setzero_si128(), _mm_setzero_si128()}; spu.gpr[op.rt] = _mm_loadu_si128(reinterpret_cast<const __m128i*>(reinterpret_cast<const u8*>(buf) + ((0 - spu.gpr[op.rb]._u32[3]) & 0x1f))); return true; } template <spu_exec_bit... Flags> bool SHLQBY(spu_thread& spu, spu_opcode_t op) { const __m128i a = spu.gpr[op.ra]; alignas(64) const __m128i buf[3]{_mm_setzero_si128(), _mm_setzero_si128(), a}; spu.gpr[op.rt] = _mm_loadu_si128(reinterpret_cast<const __m128i*>(reinterpret_cast<const u8*>(buf) + (32 - (spu.gpr[op.rb]._u32[3] & 0x1f)))); return true; } template <spu_exec_bit... Flags> bool ORX(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = v128::from32r(spu.gpr[op.ra]._u32[0] | spu.gpr[op.ra]._u32[1] | spu.gpr[op.ra]._u32[2] | spu.gpr[op.ra]._u32[3]); return true; } template <spu_exec_bit... Flags> bool CBD(spu_thread& spu, spu_opcode_t op) { if (op.ra == 1 && (spu.gpr[1]._u32[3] & 0xF)) { fmt::throw_exception("Unexpected SP value: LS:0x%05x", spu.gpr[1]._u32[3]); } const s32 t = ~(op.i7 + spu.gpr[op.ra]._u32[3]) & 0xf; spu.gpr[op.rt] = v128::from64(0x18191A1B1C1D1E1Full, 0x1011121314151617ull); spu.gpr[op.rt]._u8[t] = 0x03; return true; } template <spu_exec_bit... Flags> bool CHD(spu_thread& spu, spu_opcode_t op) { if (op.ra == 1 && (spu.gpr[1]._u32[3] & 0xF)) { fmt::throw_exception("Unexpected SP value: LS:0x%05x", spu.gpr[1]._u32[3]); } const s32 t = (~(op.i7 + spu.gpr[op.ra]._u32[3]) & 0xe) >> 1; spu.gpr[op.rt] = v128::from64(0x18191A1B1C1D1E1Full, 0x1011121314151617ull); spu.gpr[op.rt]._u16[t] = 0x0203; return true; } template <spu_exec_bit... Flags> bool CWD(spu_thread& spu, spu_opcode_t op) { if (op.ra == 1 && (spu.gpr[1]._u32[3] & 0xF)) { fmt::throw_exception("Unexpected SP value: LS:0x%05x", spu.gpr[1]._u32[3]); } const s32 t = (~(op.i7 + spu.gpr[op.ra]._u32[3]) & 0xc) >> 2; spu.gpr[op.rt] = v128::from64(0x18191A1B1C1D1E1Full, 0x1011121314151617ull); spu.gpr[op.rt]._u32[t] = 0x00010203; return true; } template <spu_exec_bit... Flags> bool CDD(spu_thread& spu, spu_opcode_t op) { if (op.ra == 1 && (spu.gpr[1]._u32[3] & 0xF)) { fmt::throw_exception("Unexpected SP value: LS:0x%05x", spu.gpr[1]._u32[3]); } const s32 t = (~(op.i7 + spu.gpr[op.ra]._u32[3]) & 0x8) >> 3; spu.gpr[op.rt] = v128::from64(0x18191A1B1C1D1E1Full, 0x1011121314151617ull); spu.gpr[op.rt]._u64[t] = 0x0001020304050607ull; return true; } template <spu_exec_bit... Flags> bool ROTQBII(spu_thread& spu, spu_opcode_t op) { const auto a = spu.gpr[op.ra]; const s32 n = op.i7 & 0x7; spu.gpr[op.rt] = _mm_or_si128(_mm_slli_epi64(a, n), _mm_srli_epi64(_mm_shuffle_epi32(a, 0x4E), 64 - n)); return true; } template <spu_exec_bit... Flags> bool ROTQMBII(spu_thread& spu, spu_opcode_t op) { const auto a = spu.gpr[op.ra]; const s32 n = (0-op.i7) & 0x7; spu.gpr[op.rt] = _mm_or_si128(_mm_srli_epi64(a, n), _mm_slli_epi64(_mm_srli_si128(a, 8), 64 - n)); return true; } template <spu_exec_bit... Flags> bool SHLQBII(spu_thread& spu, spu_opcode_t op) { const auto a = spu.gpr[op.ra]; const s32 n = op.i7 & 0x7; spu.gpr[op.rt] = _mm_or_si128(_mm_slli_epi64(a, n), _mm_srli_epi64(_mm_slli_si128(a, 8), 64 - n)); return true; } template <spu_exec_bit... Flags> bool ROTQBYI(spu_thread& spu, spu_opcode_t op) { const __m128i a = spu.gpr[op.ra]; alignas(32) const __m128i buf[2]{a, a}; spu.gpr[op.rt] = _mm_loadu_si128(reinterpret_cast<const __m128i*>(reinterpret_cast<const u8*>(buf) + (16 - (op.i7 & 0xf)))); return true; } template <spu_exec_bit... Flags> bool ROTQMBYI(spu_thread& spu, spu_opcode_t op) { const __m128i a = spu.gpr[op.ra]; alignas(64) const __m128i buf[3]{a, _mm_setzero_si128(), _mm_setzero_si128()}; spu.gpr[op.rt] = _mm_loadu_si128(reinterpret_cast<const __m128i*>(reinterpret_cast<const u8*>(buf) + ((0 - op.i7) & 0x1f))); return true; } template <spu_exec_bit... Flags> bool SHLQBYI(spu_thread& spu, spu_opcode_t op) { const __m128i a = spu.gpr[op.ra]; alignas(64) const __m128i buf[3]{_mm_setzero_si128(), _mm_setzero_si128(), a}; spu.gpr[op.rt] = _mm_loadu_si128(reinterpret_cast<const __m128i*>(reinterpret_cast<const u8*>(buf) + (32 - (op.i7 & 0x1f)))); return true; } template <spu_exec_bit... Flags> bool NOP(spu_thread&, spu_opcode_t) { return true; } template <spu_exec_bit... Flags> bool CGT(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_cmpgt_epi32(spu.gpr[op.ra], spu.gpr[op.rb]); return true; } template <spu_exec_bit... Flags> bool XOR(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = spu.gpr[op.ra] ^ spu.gpr[op.rb]; return true; } template <spu_exec_bit... Flags> bool CGTH(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_cmpgt_epi16(spu.gpr[op.ra], spu.gpr[op.rb]); return true; } template <spu_exec_bit... Flags> bool EQV(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = ~(spu.gpr[op.ra] ^ spu.gpr[op.rb]); return true; } template <spu_exec_bit... Flags> bool CGTB(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_cmpgt_epi8(spu.gpr[op.ra], spu.gpr[op.rb]); return true; } template <spu_exec_bit... Flags> bool SUMB(spu_thread& spu, spu_opcode_t op) { const auto m1 = _mm_set1_epi16(0xff); const auto m2 = _mm_set1_epi32(0xffff); const auto a = spu.gpr[op.ra]; const auto b = spu.gpr[op.rb]; const auto a1 = _mm_srli_epi16(a, 8); const auto a2 = _mm_and_si128(a, m1); const auto b1 = _mm_srli_epi16(b, 8); const auto b2 = _mm_and_si128(b, m1); const auto sa = _mm_add_epi16(a1, a2); const auto sb = _mm_add_epi16(b1, b2); const auto s2 = _mm_and_si128(sa, m2); const auto s1 = _mm_srli_epi32(sa, 16); const auto s4 = _mm_andnot_si128(m2, sb); const auto s3 = _mm_slli_epi32(sb, 16); spu.gpr[op.rt] = _mm_or_si128(_mm_add_epi16(s1, s2), _mm_add_epi16(s3, s4)); return true; } template <spu_exec_bit... Flags> bool HGT(spu_thread& spu, spu_opcode_t op) { if (spu.gpr[op.ra]._s32[3] > spu.gpr[op.rb]._s32[3]) { spu.halt(); } return true; } template <spu_exec_bit... Flags> bool CLZ(spu_thread& spu, spu_opcode_t op) { for (u32 i = 0; i < 4; i++) { spu.gpr[op.rt]._u32[i] = std::countl_zero(spu.gpr[op.ra]._u32[i]); } return true; } template <spu_exec_bit... Flags> bool XSWD(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt]._s64[0] = spu.gpr[op.ra]._s32[0]; spu.gpr[op.rt]._s64[1] = spu.gpr[op.ra]._s32[2]; return true; } template <spu_exec_bit... Flags> bool XSHW(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_srai_epi32(_mm_slli_epi32(spu.gpr[op.ra], 16), 16); return true; } template <spu_exec_bit... Flags> bool CNTB(spu_thread& spu, spu_opcode_t op) { const auto a = spu.gpr[op.ra]; const auto mask1 = _mm_set1_epi8(0x55); const auto sum1 = _mm_add_epi8(_mm_and_si128(_mm_srli_epi64(a, 1), mask1), _mm_and_si128(a, mask1)); const auto mask2 = _mm_set1_epi8(0x33); const auto sum2 = _mm_add_epi8(_mm_and_si128(_mm_srli_epi64(sum1, 2), mask2), _mm_and_si128(sum1, mask2)); const auto mask3 = _mm_set1_epi8(0x0f); const auto sum3 = _mm_add_epi8(_mm_and_si128(_mm_srli_epi64(sum2, 4), mask3), _mm_and_si128(sum2, mask3)); spu.gpr[op.rt] = sum3; return true; } template <spu_exec_bit... Flags> bool XSBH(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_srai_epi16(_mm_slli_epi16(spu.gpr[op.ra], 8), 8); return true; } template <spu_exec_bit... Flags> bool CLGT(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = gv_gtu32(spu.gpr[op.ra], spu.gpr[op.rb]); return true; } template <spu_exec_bit... Flags> bool ANDC(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = gv_andn(spu.gpr[op.rb], spu.gpr[op.ra]); return true; } template <spu_exec_bit... Flags> bool FCGT(spu_thread& spu, spu_opcode_t op) { // IMPL NOTES: // if (v is inf) v = (inf - 1) i.e nearest normal value to inf with mantissa bits left intact // if (v is denormalized) v = 0 flush denormals // return v1 > v2 // branching simulated using bitwise ops and_not+or const auto zero = _mm_set1_ps(0.f); const auto nan_check_a = _mm_cmpunord_ps(spu.gpr[op.ra], zero); //mask true where a is extended const auto nan_check_b = _mm_cmpunord_ps(spu.gpr[op.rb], zero); //mask true where b is extended //calculate lowered a and b. The mantissa bits are left untouched for now unless its proven they should be flushed const auto last_exp_bit = _mm_castsi128_ps(_mm_set1_epi32(0x00800000)); const auto lowered_a =_mm_andnot_ps(last_exp_bit, spu.gpr[op.ra]); //a is lowered to largest unextended value with sign const auto lowered_b = _mm_andnot_ps(last_exp_bit, spu.gpr[op.rb]); //b is lowered to largest unextended value with sign //check if a and b are denormalized const auto all_exp_bits = _mm_castsi128_ps(_mm_set1_epi32(0x7f800000)); const auto denorm_check_a = _mm_cmpeq_ps(zero, _mm_and_ps(all_exp_bits, spu.gpr[op.ra])); const auto denorm_check_b = _mm_cmpeq_ps(zero, _mm_and_ps(all_exp_bits, spu.gpr[op.rb])); //set a and b to their lowered values if they are extended const auto a_values_lowered = _mm_and_ps(nan_check_a, lowered_a); const auto original_a_masked = _mm_andnot_ps(nan_check_a, spu.gpr[op.ra]); const auto a_final1 = _mm_or_ps(a_values_lowered, original_a_masked); const auto b_values_lowered = _mm_and_ps(nan_check_b, lowered_b); const auto original_b_masked = _mm_andnot_ps(nan_check_b, spu.gpr[op.rb]); const auto b_final1 = _mm_or_ps(b_values_lowered, original_b_masked); //Flush denormals to zero const auto final_a = _mm_andnot_ps(denorm_check_a, a_final1); const auto final_b = _mm_andnot_ps(denorm_check_b, b_final1); spu.gpr[op.rt] = _mm_cmplt_ps(final_b, final_a); return true; } template <spu_exec_bit... Flags> bool DFCGT(spu_thread&, spu_opcode_t) { spu_log.fatal("DFCGT"); return false; } template <spu_exec_bit... Flags> bool FA(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = gv_addfs(spu.gpr[op.ra], spu.gpr[op.rb]); return true; } template <spu_exec_bit... Flags> bool FS(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = gv_subfs(spu.gpr[op.ra], spu.gpr[op.rb]); return true; } template <spu_exec_bit... Flags> bool FM(spu_thread& spu, spu_opcode_t op) { const auto zero = _mm_set1_ps(0.f); const auto sign_bits = _mm_castsi128_ps(_mm_set1_epi32(0x80000000)); const auto all_exp_bits = _mm_castsi128_ps(_mm_set1_epi32(0x7f800000)); //check denormals const auto denorm_check_a = _mm_cmpeq_ps(zero, _mm_and_ps(all_exp_bits, spu.gpr[op.ra])); const auto denorm_check_b = _mm_cmpeq_ps(zero, _mm_and_ps(all_exp_bits, spu.gpr[op.rb])); const auto denorm_operand_mask = _mm_or_ps(denorm_check_a, denorm_check_b); //compute result with flushed denormal inputs const auto primary_result = _mm_mul_ps(spu.gpr[op.ra], spu.gpr[op.rb]); const auto denom_result_mask = _mm_cmpeq_ps(zero, _mm_and_ps(all_exp_bits, primary_result)); const auto flushed_result = _mm_andnot_ps(_mm_or_ps(denom_result_mask, denorm_operand_mask), primary_result); //check for extended const auto nan_check = _mm_cmpeq_ps(_mm_and_ps(primary_result, all_exp_bits), all_exp_bits); const auto sign_mask = _mm_xor_ps(_mm_and_ps(sign_bits, spu.gpr[op.ra]), _mm_and_ps(sign_bits, spu.gpr[op.rb])); const auto extended_result = _mm_or_ps(sign_mask, _mm_andnot_ps(sign_bits, primary_result)); const auto final_extended = _mm_andnot_ps(denorm_operand_mask, extended_result); //if nan, result = ext, else result = flushed const auto set1 = _mm_andnot_ps(nan_check, flushed_result); const auto set2 = _mm_and_ps(nan_check, final_extended); spu.gpr[op.rt] = _mm_or_ps(set1, set2); return true; } template <spu_exec_bit... Flags> bool CLGTH(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = gv_gtu16(spu.gpr[op.ra], spu.gpr[op.rb]); return true; } template <spu_exec_bit... Flags> bool ORC(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = spu.gpr[op.ra] | ~spu.gpr[op.rb]; return true; } template <spu_exec_bit... Flags> bool FCMGT(spu_thread& spu, spu_opcode_t op) { //IMPL NOTES: See FCGT const auto zero = _mm_set1_ps(0.f); const auto nan_check_a = _mm_cmpunord_ps(spu.gpr[op.ra], zero); //mask true where a is extended const auto nan_check_b = _mm_cmpunord_ps(spu.gpr[op.rb], zero); //mask true where b is extended //check if a and b are denormalized const auto all_exp_bits = _mm_castsi128_ps(_mm_set1_epi32(0x7f800000)); const auto denorm_check_a = _mm_cmpeq_ps(zero, _mm_and_ps(all_exp_bits, spu.gpr[op.ra])); const auto denorm_check_b = _mm_cmpeq_ps(zero, _mm_and_ps(all_exp_bits, spu.gpr[op.rb])); //Flush denormals to zero const auto final_a = _mm_andnot_ps(denorm_check_a, spu.gpr[op.ra]); const auto final_b = _mm_andnot_ps(denorm_check_b, spu.gpr[op.rb]); //Mask to make a > b if a is extended but b is not (is this necessary on x86?) const auto nan_mask = _mm_andnot_ps(nan_check_b, _mm_xor_ps(nan_check_a, nan_check_b)); const auto sign_mask = _mm_castsi128_ps(_mm_set1_epi32(0x7fffffff)); const auto comparison = _mm_cmplt_ps(_mm_and_ps(final_b, sign_mask), _mm_and_ps(final_a, sign_mask)); spu.gpr[op.rt] = _mm_or_ps(comparison, nan_mask); return true; } template <spu_exec_bit... Flags> bool DFCMGT(spu_thread&, spu_opcode_t) { spu_log.fatal("DFCMGT"); return false; } template <spu_exec_bit... Flags> bool DFA(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = gv_addfd(spu.gpr[op.ra], spu.gpr[op.rb]); return true; } template <spu_exec_bit... Flags> bool DFS(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = gv_subfd(spu.gpr[op.ra], spu.gpr[op.rb]); return true; } template <spu_exec_bit... Flags> bool DFM(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_mul_pd(spu.gpr[op.ra], spu.gpr[op.rb]); return true; } template <spu_exec_bit... Flags> bool CLGTB(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = gv_gtu8(spu.gpr[op.ra], spu.gpr[op.rb]); return true; } template <spu_exec_bit... Flags> bool HLGT(spu_thread& spu, spu_opcode_t op) { if (spu.gpr[op.ra]._u32[3] > spu.gpr[op.rb]._u32[3]) { spu.halt(); } return true; } template <spu_exec_bit... Flags> bool DFMA(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_add_pd(_mm_mul_pd(spu.gpr[op.ra], spu.gpr[op.rb]), spu.gpr[op.rt]); return true; } template <spu_exec_bit... Flags> bool DFMS(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_sub_pd(_mm_mul_pd(spu.gpr[op.ra], spu.gpr[op.rb]), spu.gpr[op.rt]); return true; } template <spu_exec_bit... Flags> bool DFNMS(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_sub_pd(spu.gpr[op.rt], _mm_mul_pd(spu.gpr[op.ra], spu.gpr[op.rb])); return true; } template <spu_exec_bit... Flags> bool DFNMA(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_xor_pd(_mm_add_pd(_mm_mul_pd(spu.gpr[op.ra], spu.gpr[op.rb]), spu.gpr[op.rt]), _mm_set1_pd(-0.0)); return true; } template <spu_exec_bit... Flags> bool CEQ(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_cmpeq_epi32(spu.gpr[op.ra], spu.gpr[op.rb]); return true; } template <spu_exec_bit... Flags> bool MPYHHU(spu_thread& spu, spu_opcode_t op) { const auto a = spu.gpr[op.ra]; const auto b = spu.gpr[op.rb]; spu.gpr[op.rt] = _mm_or_si128(_mm_srli_epi32(_mm_mullo_epi16(a, b), 16), _mm_and_si128(_mm_mulhi_epu16(a, b), _mm_set1_epi32(0xffff0000))); return true; } template <spu_exec_bit... Flags> bool ADDX(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = gv_add32(gv_add32(spu.gpr[op.ra], spu.gpr[op.rb]), spu.gpr[op.rt] & v128::from32p(1)); return true; } template <spu_exec_bit... Flags> bool SFX(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = gv_sub32(gv_sub32(spu.gpr[op.rb], spu.gpr[op.ra]), gv_andn(spu.gpr[op.rt], v128::from32p(1))); return true; } template <spu_exec_bit... Flags> bool CGX(spu_thread& spu, spu_opcode_t op) { for (s32 i = 0; i < 4; i++) { const u64 carry = spu.gpr[op.rt]._u32[i] & 1; spu.gpr[op.rt]._u32[i] = (carry + spu.gpr[op.ra]._u32[i] + spu.gpr[op.rb]._u32[i]) >> 32; } return true; } template <spu_exec_bit... Flags> bool BGX(spu_thread& spu, spu_opcode_t op) { for (s32 i = 0; i < 4; i++) { const s64 result = u64{spu.gpr[op.rb]._u32[i]} - spu.gpr[op.ra]._u32[i] - (1 - (spu.gpr[op.rt]._u32[i] & 1)); spu.gpr[op.rt]._u32[i] = result >= 0; } return true; } template <spu_exec_bit... Flags> bool MPYHHA(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_add_epi32(spu.gpr[op.rt], _mm_madd_epi16(_mm_srli_epi32(spu.gpr[op.ra], 16), _mm_srli_epi32(spu.gpr[op.rb], 16))); return true; } template <spu_exec_bit... Flags> bool MPYHHAU(spu_thread& spu, spu_opcode_t op) { const auto a = spu.gpr[op.ra]; const auto b = spu.gpr[op.rb]; spu.gpr[op.rt] = _mm_add_epi32(spu.gpr[op.rt], _mm_or_si128(_mm_srli_epi32(_mm_mullo_epi16(a, b), 16), _mm_and_si128(_mm_mulhi_epu16(a, b), _mm_set1_epi32(0xffff0000)))); return true; } template <spu_exec_bit... Flags> bool FSCRRD(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt].clear(); return true; } template <spu_exec_bit... Flags> bool FESD(spu_thread& spu, spu_opcode_t op) { const auto a = spu.gpr[op.ra]; spu.gpr[op.rt] = _mm_cvtps_pd(_mm_shuffle_ps(a, a, 0x8d)); return true; } template <spu_exec_bit... Flags> bool FRDS(spu_thread& spu, spu_opcode_t op) { const auto t = _mm_cvtpd_ps(spu.gpr[op.ra]); spu.gpr[op.rt] = _mm_shuffle_ps(t, t, 0x72); return true; } template <spu_exec_bit... Flags> bool FSCRWR(spu_thread&, spu_opcode_t) { return true; } template <spu_exec_bit... Flags> bool DFTSV(spu_thread&, spu_opcode_t) { spu_log.fatal("DFTSV"); return false; } template <spu_exec_bit... Flags> bool FCEQ(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_cmpeq_ps(spu.gpr[op.rb], spu.gpr[op.ra]); return true; } template <spu_exec_bit... Flags> bool DFCEQ(spu_thread&, spu_opcode_t) { spu_log.fatal("DFCEQ"); return false; } template <spu_exec_bit... Flags> bool MPY(spu_thread& spu, spu_opcode_t op) { const auto mask = _mm_set1_epi32(0xffff); spu.gpr[op.rt] = _mm_madd_epi16(_mm_and_si128(spu.gpr[op.ra], mask), _mm_and_si128(spu.gpr[op.rb], mask)); return true; } template <spu_exec_bit... Flags> bool MPYH(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_slli_epi32(_mm_mullo_epi16(_mm_srli_epi32(spu.gpr[op.ra], 16), spu.gpr[op.rb]), 16); return true; } template <spu_exec_bit... Flags> bool MPYHH(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_madd_epi16(_mm_srli_epi32(spu.gpr[op.ra], 16), _mm_srli_epi32(spu.gpr[op.rb], 16)); return true; } template <spu_exec_bit... Flags> bool MPYS(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_srai_epi32(_mm_slli_epi32(_mm_mulhi_epi16(spu.gpr[op.ra], spu.gpr[op.rb]), 16), 16); return true; } template <spu_exec_bit... Flags> bool CEQH(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_cmpeq_epi16(spu.gpr[op.ra], spu.gpr[op.rb]); return true; } template <spu_exec_bit... Flags> bool FCMEQ(spu_thread& spu, spu_opcode_t op) { const auto mask = _mm_castsi128_ps(_mm_set1_epi32(0x7fffffff)); spu.gpr[op.rt] = _mm_cmpeq_ps(_mm_and_ps(spu.gpr[op.rb], mask), _mm_and_ps(spu.gpr[op.ra], mask)); return true; } template <spu_exec_bit... Flags> bool DFCMEQ(spu_thread&, spu_opcode_t) { spu_log.fatal("DFCMEQ"); return false; } template <spu_exec_bit... Flags> bool MPYU(spu_thread& spu, spu_opcode_t op) { const auto a = spu.gpr[op.ra]; const auto b = spu.gpr[op.rb]; spu.gpr[op.rt] = _mm_or_si128(_mm_slli_epi32(_mm_mulhi_epu16(a, b), 16), _mm_and_si128(_mm_mullo_epi16(a, b), _mm_set1_epi32(0xffff))); return true; } template <spu_exec_bit... Flags> bool CEQB(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_cmpeq_epi8(spu.gpr[op.ra], spu.gpr[op.rb]); return true; } template <spu_exec_bit... Flags> bool FI(spu_thread& spu, spu_opcode_t op) { // TODO const auto mask_se = _mm_castsi128_ps(_mm_set1_epi32(0xff800000)); // sign and exponent mask const auto mask_bf = _mm_castsi128_ps(_mm_set1_epi32(0x007ffc00)); // base fraction mask const auto mask_sf = _mm_set1_epi32(0x000003ff); // step fraction mask const auto mask_yf = _mm_set1_epi32(0x0007ffff); // Y fraction mask (bits 13..31) const auto base = _mm_or_ps(_mm_and_ps(spu.gpr[op.rb], mask_bf), _mm_castsi128_ps(_mm_set1_epi32(0x3f800000))); const auto step = _mm_mul_ps(_mm_cvtepi32_ps(_mm_and_si128(spu.gpr[op.rb], mask_sf)), _mm_set1_ps(std::exp2(-13.f))); const auto y = _mm_mul_ps(_mm_cvtepi32_ps(_mm_and_si128(spu.gpr[op.ra], mask_yf)), _mm_set1_ps(std::exp2(-19.f))); spu.gpr[op.rt] = _mm_or_ps(_mm_and_ps(mask_se, spu.gpr[op.rb]), _mm_andnot_ps(mask_se, _mm_sub_ps(base, _mm_mul_ps(step, y)))); return true; } template <spu_exec_bit... Flags> bool HEQ(spu_thread& spu, spu_opcode_t op) { if (spu.gpr[op.ra]._s32[3] == spu.gpr[op.rb]._s32[3]) { spu.halt(); } return true; } template <spu_exec_bit... Flags> bool CFLTS(spu_thread& spu, spu_opcode_t op) { const auto scaled = _mm_mul_ps(spu.gpr[op.ra], g_spu_imm.scale[173 - op.i8]); spu.gpr[op.rt] = _mm_xor_si128(_mm_cvttps_epi32(scaled), _mm_castps_si128(_mm_cmpge_ps(scaled, _mm_set1_ps(0x80000000)))); return true; } template <spu_exec_bit... Flags> bool CFLTU(spu_thread& spu, spu_opcode_t op) { const auto scaled1 = _mm_max_ps(_mm_mul_ps(spu.gpr[op.ra], g_spu_imm.scale[173 - op.i8]), _mm_set1_ps(0.0f)); const auto scaled2 = _mm_and_ps(_mm_sub_ps(scaled1, _mm_set1_ps(0x80000000)), _mm_cmpge_ps(scaled1, _mm_set1_ps(0x80000000))); spu.gpr[op.rt] = _mm_or_si128(_mm_or_si128(_mm_cvttps_epi32(scaled1), _mm_cvttps_epi32(scaled2)), _mm_castps_si128(_mm_cmpge_ps(scaled1, _mm_set1_ps(0x100000000)))); return true; } template <spu_exec_bit... Flags> bool CSFLT(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_mul_ps(_mm_cvtepi32_ps(spu.gpr[op.ra]), g_spu_imm.scale[op.i8 - 155]); return true; } template <spu_exec_bit... Flags> bool CUFLT(spu_thread& spu, spu_opcode_t op) { const auto a = spu.gpr[op.ra]; const auto fix = _mm_and_ps(_mm_castsi128_ps(_mm_srai_epi32(a, 31)), _mm_set1_ps(0x80000000)); spu.gpr[op.rt] = _mm_mul_ps(_mm_add_ps(_mm_cvtepi32_ps(_mm_and_si128(a, _mm_set1_epi32(0x7fffffff))), fix), g_spu_imm.scale[op.i8 - 155]); return true; } template <spu_exec_bit... Flags> bool BRZ(spu_thread& spu, spu_opcode_t op) { if (spu.gpr[op.rt]._u32[3] == 0) { spu.pc = spu_branch_target(spu.pc, op.i16); return false; } return true; } template <spu_exec_bit... Flags> bool STQA(spu_thread& spu, spu_opcode_t op) { spu._ref<v128>(spu_ls_target(0, op.i16)) = spu.gpr[op.rt]; return true; } template <spu_exec_bit... Flags> bool BRNZ(spu_thread& spu, spu_opcode_t op) { if (spu.gpr[op.rt]._u32[3] != 0) { spu.pc = spu_branch_target(spu.pc, op.i16); return false; } return true; } template <spu_exec_bit... Flags> bool BRHZ(spu_thread& spu, spu_opcode_t op) { if (spu.gpr[op.rt]._u16[6] == 0) { spu.pc = spu_branch_target(spu.pc, op.i16); return false; } return true; } template <spu_exec_bit... Flags> bool BRHNZ(spu_thread& spu, spu_opcode_t op) { if (spu.gpr[op.rt]._u16[6] != 0) { spu.pc = spu_branch_target(spu.pc, op.i16); return false; } return true; } template <spu_exec_bit... Flags> bool STQR(spu_thread& spu, spu_opcode_t op) { spu._ref<v128>(spu_ls_target(spu.pc, op.i16)) = spu.gpr[op.rt]; return true; } template <spu_exec_bit... Flags> bool BRA(spu_thread& spu, spu_opcode_t op) { spu.pc = spu_branch_target(0, op.i16); return false; } template <spu_exec_bit... Flags> bool LQA(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = spu._ref<v128>(spu_ls_target(0, op.i16)); return true; } template <spu_exec_bit... Flags> bool BRASL(spu_thread& spu, spu_opcode_t op) { const u32 target = spu_branch_target(0, op.i16); spu.gpr[op.rt] = v128::from32r(spu_branch_target(spu.pc + 4)); spu.pc = target; return false; } template <spu_exec_bit... Flags> bool BR(spu_thread& spu, spu_opcode_t op) { spu.pc = spu_branch_target(spu.pc, op.i16); return false; } template <spu_exec_bit... Flags> bool FSMBI(spu_thread& spu, spu_opcode_t op) { const auto vsrc = _mm_set_epi32(0, 0, 0, op.i16); const auto bits = _mm_shuffle_epi32(_mm_shufflelo_epi16(_mm_unpacklo_epi8(vsrc, vsrc), 0x50), 0x50); const auto mask = _mm_set_epi8(-128, 64, 32, 16, 8, 4, 2, 1, -128, 64, 32, 16, 8, 4, 2, 1); spu.gpr[op.rt] = _mm_cmpeq_epi8(_mm_and_si128(bits, mask), mask); return true; } template <spu_exec_bit... Flags> bool BRSL(spu_thread& spu, spu_opcode_t op) { const u32 target = spu_branch_target(spu.pc, op.i16); spu.gpr[op.rt] = v128::from32r(spu_branch_target(spu.pc + 4)); spu.pc = target; return false; } template <spu_exec_bit... Flags> bool LQR(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = spu._ref<v128>(spu_ls_target(spu.pc, op.i16)); return true; } template <spu_exec_bit... Flags> bool IL(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_set1_epi32(op.si16); return true; } template <spu_exec_bit... Flags> bool ILHU(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_set1_epi32(op.i16 << 16); return true; } template <spu_exec_bit... Flags> bool ILH(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_set1_epi16(op.i16); return true; } template <spu_exec_bit... Flags> bool IOHL(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_or_si128(spu.gpr[op.rt], _mm_set1_epi32(op.i16)); return true; } template <spu_exec_bit... Flags> bool ORI(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_or_si128(spu.gpr[op.ra], _mm_set1_epi32(op.si10)); return true; } template <spu_exec_bit... Flags> bool ORHI(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_or_si128(spu.gpr[op.ra], _mm_set1_epi16(op.si10)); return true; } template <spu_exec_bit... Flags> bool ORBI(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_or_si128(spu.gpr[op.ra], _mm_set1_epi8(op.i8)); return true; } template <spu_exec_bit... Flags> bool SFI(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_sub_epi32(_mm_set1_epi32(op.si10), spu.gpr[op.ra]); return true; } template <spu_exec_bit... Flags> bool SFHI(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_sub_epi16(_mm_set1_epi16(op.si10), spu.gpr[op.ra]); return true; } template <spu_exec_bit... Flags> bool ANDI(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_and_si128(spu.gpr[op.ra], _mm_set1_epi32(op.si10)); return true; } template <spu_exec_bit... Flags> bool ANDHI(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_and_si128(spu.gpr[op.ra], _mm_set1_epi16(op.si10)); return true; } template <spu_exec_bit... Flags> bool ANDBI(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_and_si128(spu.gpr[op.ra], _mm_set1_epi8(op.i8)); return true; } template <spu_exec_bit... Flags> bool AI(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_add_epi32(_mm_set1_epi32(op.si10), spu.gpr[op.ra]); return true; } template <spu_exec_bit... Flags> bool AHI(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_add_epi16(_mm_set1_epi16(op.si10), spu.gpr[op.ra]); return true; } template <spu_exec_bit... Flags> bool STQD(spu_thread& spu, spu_opcode_t op) { spu._ref<v128>((spu.gpr[op.ra]._s32[3] + (op.si10 * 16)) & 0x3fff0) = spu.gpr[op.rt]; return true; } template <spu_exec_bit... Flags> bool LQD(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = spu._ref<v128>((spu.gpr[op.ra]._s32[3] + (op.si10 * 16)) & 0x3fff0); return true; } template <spu_exec_bit... Flags> bool XORI(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_xor_si128(spu.gpr[op.ra], _mm_set1_epi32(op.si10)); return true; } template <spu_exec_bit... Flags> bool XORHI(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_xor_si128(spu.gpr[op.ra], _mm_set1_epi16(op.si10)); return true; } template <spu_exec_bit... Flags> bool XORBI(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_xor_si128(spu.gpr[op.ra], _mm_set1_epi8(op.i8)); return true; } template <spu_exec_bit... Flags> bool CGTI(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_cmpgt_epi32(spu.gpr[op.ra], _mm_set1_epi32(op.si10)); return true; } template <spu_exec_bit... Flags> bool CGTHI(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_cmpgt_epi16(spu.gpr[op.ra], _mm_set1_epi16(op.si10)); return true; } template <spu_exec_bit... Flags> bool CGTBI(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_cmpgt_epi8(spu.gpr[op.ra], _mm_set1_epi8(op.i8)); return true; } template <spu_exec_bit... Flags> bool HGTI(spu_thread& spu, spu_opcode_t op) { if (spu.gpr[op.ra]._s32[3] > op.si10) { spu.halt(); } return true; } template <spu_exec_bit... Flags> bool CLGTI(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_cmpgt_epi32(_mm_xor_si128(spu.gpr[op.ra], _mm_set1_epi32(0x80000000)), _mm_set1_epi32(op.si10 ^ 0x80000000)); return true; } template <spu_exec_bit... Flags> bool CLGTHI(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_cmpgt_epi16(_mm_xor_si128(spu.gpr[op.ra], _mm_set1_epi32(0x80008000)), _mm_set1_epi16(op.si10 ^ 0x8000)); return true; } template <spu_exec_bit... Flags> bool CLGTBI(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_cmpgt_epi8(_mm_xor_si128(spu.gpr[op.ra], _mm_set1_epi32(0x80808080)), _mm_set1_epi8(op.i8 ^ 0x80)); return true; } template <spu_exec_bit... Flags> bool HLGTI(spu_thread& spu, spu_opcode_t op) { if (spu.gpr[op.ra]._u32[3] > static_cast<u32>(op.si10)) { spu.halt(); } return true; } template <spu_exec_bit... Flags> bool MPYI(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_madd_epi16(spu.gpr[op.ra], _mm_set1_epi32(op.si10 & 0xffff)); return true; } template <spu_exec_bit... Flags> bool MPYUI(spu_thread& spu, spu_opcode_t op) { const auto a = spu.gpr[op.ra]; const auto i = _mm_set1_epi32(op.si10 & 0xffff); spu.gpr[op.rt] = _mm_or_si128(_mm_slli_epi32(_mm_mulhi_epu16(a, i), 16), _mm_mullo_epi16(a, i)); return true; } template <spu_exec_bit... Flags> bool CEQI(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_cmpeq_epi32(spu.gpr[op.ra], _mm_set1_epi32(op.si10)); return true; } template <spu_exec_bit... Flags> bool CEQHI(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_cmpeq_epi16(spu.gpr[op.ra], _mm_set1_epi16(op.si10)); return true; } template <spu_exec_bit... Flags> bool CEQBI(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_cmpeq_epi8(spu.gpr[op.ra], _mm_set1_epi8(op.i8)); return true; } template <spu_exec_bit... Flags> bool HEQI(spu_thread& spu, spu_opcode_t op) { if (spu.gpr[op.ra]._s32[3] == op.si10) { spu.halt(); } return true; } template <spu_exec_bit... Flags> bool HBRA(spu_thread&, spu_opcode_t) { return true; } template <spu_exec_bit... Flags> bool HBRR(spu_thread&, spu_opcode_t) { return true; } template <spu_exec_bit... Flags> bool ILA(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt] = _mm_set1_epi32(op.i18); return true; } template <spu_exec_bit... Flags> bool SELB(spu_thread& spu, spu_opcode_t op) { spu.gpr[op.rt4] = (spu.gpr[op.rc] & spu.gpr[op.rb]) | gv_andn(spu.gpr[op.rc], spu.gpr[op.ra]); return true; } template <spu_exec_bit... Flags> bool SHUFB(spu_thread& spu, spu_opcode_t op) { __m128i ab[2]{__m128i(spu.gpr[op.rb]), __m128i(spu.gpr[op.ra])}; v128 c = spu.gpr[op.rc]; v128 x = _mm_andnot_si128(c, _mm_set1_epi8(0x1f)); v128 res; // Select bytes for (int i = 0; i < 16; i++) { res._u8[i] = reinterpret_cast<u8*>(ab)[x._u8[i]]; } // Select special values const auto xc0 = _mm_set1_epi8(static_cast<s8>(0xc0)); const auto xe0 = _mm_set1_epi8(static_cast<s8>(0xe0)); const auto cmp0 = _mm_cmpgt_epi8(_mm_setzero_si128(), c); const auto cmp1 = _mm_cmpeq_epi8(_mm_and_si128(c, xc0), xc0); const auto cmp2 = _mm_cmpeq_epi8(_mm_and_si128(c, xe0), xc0); spu.gpr[op.rt4] = _mm_or_si128(_mm_andnot_si128(cmp0, res), _mm_avg_epu8(cmp1, cmp2)); return true; } #if defined(ARCH_X64) const spu_intrp_func_t optimized_shufb = build_function_asm<spu_intrp_func_t>("spu_shufb", [](asmjit::x86::Assembler& c, auto& /*args*/) { using namespace asmjit; const auto& va = x86::xmm0; const auto& vb = x86::xmm1; const auto& vc = x86::xmm2; const auto& vt = x86::xmm3; const auto& vm = x86::xmm4; const auto& v5 = x86::xmm5; Label xc0 = c.newLabel(); Label xe0 = c.newLabel(); Label x0f = c.newLabel(); build_spu_gpr_load(c, va, s_op.ra); build_spu_gpr_load(c, vb, s_op.rb); build_spu_gpr_load(c, vc, s_op.rc); if (utils::has_avx()) { c.vpand(v5, vc, x86::oword_ptr(xe0)); c.vpxor(vc, vc, x86::oword_ptr(x0f)); c.vpshufb(va, va, vc); c.vpslld(vt, vc, 3); c.vmovdqa(vm, x86::oword_ptr(xc0)); c.vpcmpeqb(v5, v5, vm); c.vpshufb(vb, vb, vc); c.vpand(vc, vc, vm); c.vpblendvb(vb, va, vb, vt); c.vpcmpeqb(vt, vc, vm); c.vpavgb(vt, vt, v5); c.vpor(vt, vt, vb); } else { c.movdqa(v5, vc); c.pand(v5, x86::oword_ptr(xe0)); c.movdqa(vt, vc); c.movdqa(vm, x86::oword_ptr(xc0)); c.pand(vt, vm); c.pxor(vc, x86::oword_ptr(x0f)); c.pshufb(va, vc); c.pshufb(vb, vc); c.pslld(vc, 3); c.pcmpeqb(v5, vm); c.pcmpeqb(vt, vm); c.pcmpeqb(vm, vm); c.pcmpgtb(vc, vm); c.pand(va, vc); c.pandn(vc, vb); c.por(vc, va); c.pavgb(vt, v5); c.por(vt, vc); } build_spu_gpr_store(c, vt, s_op.rt4); c.mov(x86::eax, 1); c.ret(); c.align(AlignMode::kData, 16); c.bind(xc0); c.dq(0xc0c0c0c0c0c0c0c0); c.dq(0xc0c0c0c0c0c0c0c0); c.bind(xe0); c.dq(0xe0e0e0e0e0e0e0e0); c.dq(0xe0e0e0e0e0e0e0e0); c.bind(x0f); c.dq(0x0f0f0f0f0f0f0f0f); c.dq(0x0f0f0f0f0f0f0f0f); }); #endif template <spu_exec_bit... Flags> bool MPYA(spu_thread& spu, spu_opcode_t op) { const auto mask = _mm_set1_epi32(0xffff); spu.gpr[op.rt4] = _mm_add_epi32(spu.gpr[op.rc], _mm_madd_epi16(_mm_and_si128(spu.gpr[op.ra], mask), _mm_and_si128(spu.gpr[op.rb], mask))); return true; } template <spu_exec_bit... Flags> bool FNMS(spu_thread& spu, spu_opcode_t op) { const u32 test_bits = 0x7f800000; auto mask = _mm_set1_ps(std::bit_cast<f32>(test_bits)); auto test_a = _mm_and_ps(spu.gpr[op.ra], mask); auto mask_a = _mm_cmpneq_ps(test_a, mask); auto test_b = _mm_and_ps(spu.gpr[op.rb], mask); auto mask_b = _mm_cmpneq_ps(test_b, mask); auto a = _mm_and_ps(spu.gpr[op.ra], mask_a); auto b = _mm_and_ps(spu.gpr[op.rb], mask_b); spu.gpr[op.rt4] = _mm_sub_ps(spu.gpr[op.rc], _mm_mul_ps(a, b)); return true; } template <spu_exec_bit... Flags> bool FMA(spu_thread& spu, spu_opcode_t op) { const u32 test_bits = 0x7f800000; auto mask = _mm_set1_ps(std::bit_cast<f32>(test_bits)); auto test_a = _mm_and_ps(spu.gpr[op.ra], mask); auto mask_a = _mm_cmpneq_ps(test_a, mask); auto test_b = _mm_and_ps(spu.gpr[op.rb], mask); auto mask_b = _mm_cmpneq_ps(test_b, mask); auto a = _mm_and_ps(spu.gpr[op.ra], mask_a); auto b = _mm_and_ps(spu.gpr[op.rb], mask_b); spu.gpr[op.rt4] = _mm_add_ps(_mm_mul_ps(a, b), spu.gpr[op.rc]); return true; } template <spu_exec_bit... Flags> bool FMS(spu_thread& spu, spu_opcode_t op) { const u32 test_bits = 0x7f800000; auto mask = _mm_set1_ps(std::bit_cast<f32>(test_bits)); auto test_a = _mm_and_ps(spu.gpr[op.ra], mask); auto mask_a = _mm_cmpneq_ps(test_a, mask); auto test_b = _mm_and_ps(spu.gpr[op.rb], mask); auto mask_b = _mm_cmpneq_ps(test_b, mask); auto a = _mm_and_ps(spu.gpr[op.ra], mask_a); auto b = _mm_and_ps(spu.gpr[op.rb], mask_b); spu.gpr[op.rt4] = _mm_sub_ps(_mm_mul_ps(a, b), spu.gpr[op.rc]); return true; } #if 0 static void SetHostRoundingMode(u32 rn) { switch (rn) { case FPSCR_RN_NEAR: fesetround(FE_TONEAREST); break; case FPSCR_RN_ZERO: fesetround(FE_TOWARDZERO); break; case FPSCR_RN_PINF: fesetround(FE_UPWARD); break; case FPSCR_RN_MINF: fesetround(FE_DOWNWARD); break; } } // Floating-point utility constants and functions static const u32 FLOAT_MAX_NORMAL_I = 0x7F7FFFFF; static const f32 FLOAT_MAX_NORMAL = std::bit_cast<f32>(FLOAT_MAX_NORMAL_I); static const u32 FLOAT_NAN_I = 0x7FC00000; static const f32 FLOAT_NAN = std::bit_cast<f32>(FLOAT_NAN_I); static const u64 DOUBLE_NAN_I = 0x7FF8000000000000ULL; static const f64 DOUBLE_NAN = std::bit_cast<f64>(DOUBLE_NAN_I); inline bool issnan(double x) { return std::isnan(x) && (std::bit_cast<s64>(x)) << 12 > 0; } inline bool issnan(float x) { return std::isnan(x) && (std::bit_cast<s32>(x)) << 9 > 0; } inline bool isextended(float x) { return fexpf(x) == 255; } inline float extended(bool sign, u32 mantissa) // returns -1^sign * 2^127 * (1.mantissa) { u32 bits = sign << 31 | 0x7F800000 | mantissa; return std::bit_cast<f32>(bits); } inline float ldexpf_extended(float x, int exp) // ldexpf() for extended values, assumes result is in range { u32 bits = std::bit_cast<u32>(x); if (bits << 1 != 0) bits += exp * 0x00800000; return std::bit_cast<f32>(bits); } inline bool isdenormal(float x) { return std::fpclassify(x) == FP_SUBNORMAL; } inline bool isdenormal(double x) { return std::fpclassify(x) == FP_SUBNORMAL; } bool spu_interpreter_precise::FREST(spu_thread& spu, spu_opcode_t op) { fesetround(FE_TOWARDZERO); const auto ra = spu.gpr[op.ra]; v128 res = _mm_rcp_ps(ra); for (int i = 0; i < 4; i++) { const auto a = ra._f[i]; const int exp = fexpf(a); if (exp == 0) { spu.fpscr.setDivideByZeroFlag(i); res._f[i] = extended(std::signbit(a), 0x7FFFFF); } else if (exp >= (0x7e800000 >> 23)) // Special case for values not handled properly in rcpps { res._f[i] = 0.0f; } } spu.gpr[op.rt] = res; return true; } bool spu_interpreter_precise::FRSQEST(spu_thread& spu, spu_opcode_t op) { fesetround(FE_TOWARDZERO); for (int i = 0; i < 4; i++) { const float a = spu.gpr[op.ra]._f[i]; float result; if (fexpf(a) == 0) { spu.fpscr.setDivideByZeroFlag(i); result = extended(0, 0x7FFFFF); } else if (isextended(a)) result = 0.5f / std::sqrt(std::fabs(ldexpf_extended(a, -2))); else result = 1 / std::sqrt(std::fabs(a)); spu.gpr[op.rt]._f[i] = result; } return true; } bool spu_interpreter_precise::FCGT(spu_thread& spu, spu_opcode_t op) { for (int i = 0; i < 4; i++) { const u32 a = spu.gpr[op.ra]._u32[i]; const u32 b = spu.gpr[op.rb]._u32[i]; const u32 abs_a = a & 0x7FFFFFFF; const u32 abs_b = b & 0x7FFFFFFF; const bool a_zero = (abs_a < 0x00800000); const bool b_zero = (abs_b < 0x00800000); bool pass; if (a_zero) pass = b >= 0x80800000; else if (b_zero) pass = static_cast<s32>(a) >= 0x00800000; else if (a >= 0x80000000) pass = (b >= 0x80000000 && a < b); else pass = (b >= 0x80000000 || a > b); spu.gpr[op.rt]._u32[i] = pass ? 0xFFFFFFFF : 0; } return true; } static void FA_FS(spu_thread& spu, spu_opcode_t op, bool sub) { fesetround(FE_TOWARDZERO); for (int w = 0; w < 4; w++) { const float a = spu.gpr[op.ra]._f[w]; const float b = sub ? -spu.gpr[op.rb]._f[w] : spu.gpr[op.rb]._f[w]; float result; if (isdenormal(a)) { spu.fpscr.setSinglePrecisionExceptionFlags(w, FPSCR_SDIFF); if (b == 0.0f || isdenormal(b)) result = +0.0f; else result = b; } else if (isdenormal(b)) { spu.fpscr.setSinglePrecisionExceptionFlags(w, FPSCR_SDIFF); if (a == 0.0f) result = +0.0f; else result = a; } else if (isextended(a) || isextended(b)) { spu.fpscr.setSinglePrecisionExceptionFlags(w, FPSCR_SDIFF); if (isextended(a) && fexpf(b) < 255 - 32) { if (std::signbit(a) != std::signbit(b)) { const u32 bits = std::bit_cast<u32>(a) - 1; result = std::bit_cast<f32>(bits); } else result = a; } else if (isextended(b) && fexpf(a) < 255 - 32) { if (std::signbit(a) != std::signbit(b)) { const u32 bits = std::bit_cast<u32>(b) - 1; result = std::bit_cast<f32>(bits); } else result = b; } else { feclearexcept(FE_ALL_EXCEPT); result = ldexpf_extended(a, -1) + ldexpf_extended(b, -1); result = ldexpf_extended(result, 1); if (fetestexcept(FE_OVERFLOW)) { spu.fpscr.setSinglePrecisionExceptionFlags(w, FPSCR_SOVF); result = extended(std::signbit(result), 0x7FFFFF); } } } else { result = a + b; if (result == std::copysign(FLOAT_MAX_NORMAL, result)) { result = ldexpf_extended(std::ldexp(a, -1) + std::ldexp(b, -1), 1); if (isextended(result)) spu.fpscr.setSinglePrecisionExceptionFlags(w, FPSCR_SDIFF); } else if (isdenormal(result)) { spu.fpscr.setSinglePrecisionExceptionFlags(w, FPSCR_SUNF | FPSCR_SDIFF); result = +0.0f; } else if (result == 0.0f) { if (std::fabs(a) != std::fabs(b)) spu.fpscr.setSinglePrecisionExceptionFlags(w, FPSCR_SUNF | FPSCR_SDIFF); result = +0.0f; } } spu.gpr[op.rt]._f[w] = result; } } bool spu_interpreter_precise::FA(spu_thread& spu, spu_opcode_t op) { FA_FS(spu, op, false); return true; } bool spu_interpreter_precise::FS(spu_thread& spu, spu_opcode_t op) { FA_FS(spu, op, true); return true; } bool spu_interpreter_precise::FM(spu_thread& spu, spu_opcode_t op) { fesetround(FE_TOWARDZERO); for (int w = 0; w < 4; w++) { const float a = spu.gpr[op.ra]._f[w]; const float b = spu.gpr[op.rb]._f[w]; float result; if (isdenormal(a) || isdenormal(b)) { spu.fpscr.setSinglePrecisionExceptionFlags(w, FPSCR_SDIFF); result = +0.0f; } else if (isextended(a) || isextended(b)) { spu.fpscr.setSinglePrecisionExceptionFlags(w, FPSCR_SDIFF); const bool sign = std::signbit(a) ^ std::signbit(b); if (a == 0.0f || b == 0.0f) { result = +0.0f; } else if ((fexpf(a) - 127) + (fexpf(b) - 127) >= 129) { spu.fpscr.setSinglePrecisionExceptionFlags(w, FPSCR_SOVF); result = extended(sign, 0x7FFFFF); } else { if (isextended(a)) result = ldexpf_extended(a, -1) * b; else result = a * ldexpf_extended(b, -1); if (result == std::copysign(FLOAT_MAX_NORMAL, result)) { spu.fpscr.setSinglePrecisionExceptionFlags(w, FPSCR_SOVF); result = extended(sign, 0x7FFFFF); } else result = ldexpf_extended(result, 1); } } else { result = a * b; if (result == std::copysign(FLOAT_MAX_NORMAL, result)) { feclearexcept(FE_ALL_EXCEPT); if (fexpf(a) > fexpf(b)) result = std::ldexp(a, -1) * b; else result = a * std::ldexp(b, -1); result = ldexpf_extended(result, 1); if (isextended(result)) spu.fpscr.setSinglePrecisionExceptionFlags(w, FPSCR_SDIFF); if (fetestexcept(FE_OVERFLOW)) spu.fpscr.setSinglePrecisionExceptionFlags(w, FPSCR_SOVF); } else if (isdenormal(result)) { spu.fpscr.setSinglePrecisionExceptionFlags(w, FPSCR_SUNF | FPSCR_SDIFF); result = +0.0f; } else if (result == 0.0f) { if (a != 0.0f && b != 0.0f) spu.fpscr.setSinglePrecisionExceptionFlags(w, FPSCR_SUNF | FPSCR_SDIFF); result = +0.0f; } } spu.gpr[op.rt]._f[w] = result; } return true; } bool spu_interpreter_precise::FCMGT(spu_thread& spu, spu_opcode_t op) { for (int i = 0; i < 4; i++) { const u32 a = spu.gpr[op.ra]._u32[i]; const u32 b = spu.gpr[op.rb]._u32[i]; const u32 abs_a = a & 0x7FFFFFFF; const u32 abs_b = b & 0x7FFFFFFF; const bool a_zero = (abs_a < 0x00800000); const bool b_zero = (abs_b < 0x00800000); bool pass; if (a_zero) pass = false; else if (b_zero) pass = !a_zero; else pass = abs_a > abs_b; spu.gpr[op.rt]._u32[i] = pass ? 0xFFFFFFFF : 0; } return true; } enum DoubleOp { DFASM_A, DFASM_S, DFASM_M, }; static void DFASM(spu_thread& spu, spu_opcode_t op, DoubleOp operation) { for (int i = 0; i < 2; i++) { double a = spu.gpr[op.ra]._d[i]; double b = spu.gpr[op.rb]._d[i]; if (isdenormal(a)) { spu.fpscr.setDoublePrecisionExceptionFlags(i, FPSCR_DDENORM); a = std::copysign(0.0, a); } if (isdenormal(b)) { spu.fpscr.setDoublePrecisionExceptionFlags(i, FPSCR_DDENORM); b = std::copysign(0.0, b); } double result; if (std::isnan(a) || std::isnan(b)) { spu.fpscr.setDoublePrecisionExceptionFlags(i, FPSCR_DNAN); if (issnan(a) || issnan(b)) spu.fpscr.setDoublePrecisionExceptionFlags(i, FPSCR_DINV); result = DOUBLE_NAN; } else { SetHostRoundingMode(spu.fpscr.checkSliceRounding(i)); feclearexcept(FE_ALL_EXCEPT); switch (operation) { case DFASM_A: result = a + b; break; case DFASM_S: result = a - b; break; case DFASM_M: result = a * b; break; } const u32 e = _mm_getcsr(); if (e & _MM_EXCEPT_INVALID) { spu.fpscr.setDoublePrecisionExceptionFlags(i, FPSCR_DINV); result = DOUBLE_NAN; } else { if (e & _MM_EXCEPT_OVERFLOW) spu.fpscr.setDoublePrecisionExceptionFlags(i, FPSCR_DOVF); if (e & _MM_EXCEPT_UNDERFLOW) spu.fpscr.setDoublePrecisionExceptionFlags(i, FPSCR_DUNF); if (e & _MM_EXCEPT_INEXACT) spu.fpscr.setDoublePrecisionExceptionFlags(i, FPSCR_DINX); } } spu.gpr[op.rt]._d[i] = result; } } bool spu_interpreter_precise::DFA(spu_thread& spu, spu_opcode_t op) { DFASM(spu, op, DFASM_A); return true; } bool spu_interpreter_precise::DFS(spu_thread& spu, spu_opcode_t op) { DFASM(spu, op, DFASM_S); return true; } bool spu_interpreter_precise::DFM(spu_thread& spu, spu_opcode_t op) { DFASM(spu, op, DFASM_M); return true; } static void DFMA(spu_thread& spu, spu_opcode_t op, bool neg, bool sub) { for (int i = 0; i < 2; i++) { double a = spu.gpr[op.ra]._d[i]; double b = spu.gpr[op.rb]._d[i]; double c = spu.gpr[op.rt]._d[i]; if (isdenormal(a)) { spu.fpscr.setDoublePrecisionExceptionFlags(i, FPSCR_DDENORM); a = std::copysign(0.0, a); } if (isdenormal(b)) { spu.fpscr.setDoublePrecisionExceptionFlags(i, FPSCR_DDENORM); b = std::copysign(0.0, b); } if (isdenormal(c)) { spu.fpscr.setDoublePrecisionExceptionFlags(i, FPSCR_DDENORM); c = std::copysign(0.0, c); } double result; if (std::isnan(a) || std::isnan(b) || std::isnan(c)) { spu.fpscr.setDoublePrecisionExceptionFlags(i, FPSCR_DNAN); if (issnan(a) || issnan(b) || issnan(c) || (std::isinf(a) && b == 0.0f) || (a == 0.0f && std::isinf(b))) spu.fpscr.setDoublePrecisionExceptionFlags(i, FPSCR_DINV); result = DOUBLE_NAN; } else { SetHostRoundingMode(spu.fpscr.checkSliceRounding(i)); feclearexcept(FE_ALL_EXCEPT); result = fma(a, b, sub ? -c : c); const u32 e = _mm_getcsr(); if (e & _MM_EXCEPT_INVALID) { spu.fpscr.setDoublePrecisionExceptionFlags(i, FPSCR_DINV); result = DOUBLE_NAN; } else { if (e & _MM_EXCEPT_OVERFLOW) spu.fpscr.setDoublePrecisionExceptionFlags(i, FPSCR_DOVF); if (e & _MM_EXCEPT_UNDERFLOW) spu.fpscr.setDoublePrecisionExceptionFlags(i, FPSCR_DUNF); if (e & _MM_EXCEPT_INEXACT) spu.fpscr.setDoublePrecisionExceptionFlags(i, FPSCR_DINX); if (neg) result = -result; } } spu.gpr[op.rt]._d[i] = result; } } bool spu_interpreter_precise::DFMA(spu_thread& spu, spu_opcode_t op) { ::DFMA(spu, op, false, false); return true; } bool spu_interpreter_precise::DFMS(spu_thread& spu, spu_opcode_t op) { ::DFMA(spu, op, false, true); return true; } bool spu_interpreter_precise::DFNMS(spu_thread& spu, spu_opcode_t op) { ::DFMA(spu, op, true, true); return true; } bool spu_interpreter_precise::DFNMA(spu_thread& spu, spu_opcode_t op) { ::DFMA(spu, op, true, false); return true; } bool spu_interpreter_precise::FSCRRD(spu_thread& spu, spu_opcode_t op) { spu.fpscr.Read(spu.gpr[op.rt]); return true; } bool spu_interpreter_precise::FESD(spu_thread& spu, spu_opcode_t op) { for (int i = 0; i < 2; i++) { const float a = spu.gpr[op.ra]._f[i * 2 + 1]; if (std::isnan(a)) { spu.fpscr.setDoublePrecisionExceptionFlags(i, FPSCR_DNAN); if (issnan(a)) spu.fpscr.setDoublePrecisionExceptionFlags(i, FPSCR_DINV); spu.gpr[op.rt]._d[i] = DOUBLE_NAN; } else if (isdenormal(a)) { spu.fpscr.setDoublePrecisionExceptionFlags(i, FPSCR_DDENORM); spu.gpr[op.rt]._d[i] = 0.0; } else { spu.gpr[op.rt]._d[i] = a; } } return true; } bool spu_interpreter_precise::FRDS(spu_thread& spu, spu_opcode_t op) { for (int i = 0; i < 2; i++) { SetHostRoundingMode(spu.fpscr.checkSliceRounding(i)); const double a = spu.gpr[op.ra]._d[i]; if (std::isnan(a)) { spu.fpscr.setDoublePrecisionExceptionFlags(i, FPSCR_DNAN); if (issnan(a)) spu.fpscr.setDoublePrecisionExceptionFlags(i, FPSCR_DINV); spu.gpr[op.rt]._f[i * 2 + 1] = FLOAT_NAN; } else { feclearexcept(FE_ALL_EXCEPT); spu.gpr[op.rt]._f[i * 2 + 1] = static_cast<float>(a); const u32 e = _mm_getcsr(); if (e & _MM_EXCEPT_OVERFLOW) spu.fpscr.setDoublePrecisionExceptionFlags(i, FPSCR_DOVF); if (e & _MM_EXCEPT_UNDERFLOW) spu.fpscr.setDoublePrecisionExceptionFlags(i, FPSCR_DUNF); if (e & _MM_EXCEPT_INEXACT) spu.fpscr.setDoublePrecisionExceptionFlags(i, FPSCR_DINX); } spu.gpr[op.rt]._u32[i * 2] = 0; } return true; } bool spu_interpreter_precise::FSCRWR(spu_thread& spu, spu_opcode_t op) { spu.fpscr.Write(spu.gpr[op.ra]); return true; } bool spu_interpreter_precise::FCEQ(spu_thread& spu, spu_opcode_t op) { for (int i = 0; i < 4; i++) { const u32 a = spu.gpr[op.ra]._u32[i]; const u32 b = spu.gpr[op.rb]._u32[i]; const u32 abs_a = a & 0x7FFFFFFF; const u32 abs_b = b & 0x7FFFFFFF; const bool a_zero = (abs_a < 0x00800000); const bool b_zero = (abs_b < 0x00800000); const bool pass = a == b || (a_zero && b_zero); spu.gpr[op.rt]._u32[i] = pass ? 0xFFFFFFFF : 0; } return true; } bool spu_interpreter_precise::FCMEQ(spu_thread& spu, spu_opcode_t op) { for (int i = 0; i < 4; i++) { const u32 a = spu.gpr[op.ra]._u32[i]; const u32 b = spu.gpr[op.rb]._u32[i]; const u32 abs_a = a & 0x7FFFFFFF; const u32 abs_b = b & 0x7FFFFFFF; const bool a_zero = (abs_a < 0x00800000); const bool b_zero = (abs_b < 0x00800000); const bool pass = abs_a == abs_b || (a_zero && b_zero); spu.gpr[op.rt]._u32[i] = pass ? 0xFFFFFFFF : 0; } return true; } bool spu_interpreter_precise::FI(spu_thread& spu, spu_opcode_t op) { // TODO spu.gpr[op.rt] = spu.gpr[op.rb]; return true; } bool spu_interpreter_precise::CFLTS(spu_thread& spu, spu_opcode_t op) { const int scale = 173 - (op.i8 & 0xff); //unsigned immediate for (int i = 0; i < 4; i++) { const float a = spu.gpr[op.ra]._f[i]; float scaled; if ((fexpf(a) - 127) + scale >= 32) scaled = std::copysign(4294967296.0f, a); else scaled = std::ldexp(a, scale); s32 result; if (scaled >= 2147483648.0f) result = 0x7FFFFFFF; else if (scaled < -2147483648.0f) result = 0x80000000; else result = static_cast<s32>(scaled); spu.gpr[op.rt]._s32[i] = result; } return true; } bool spu_interpreter_precise::CFLTU(spu_thread& spu, spu_opcode_t op) { const int scale = 173 - (op.i8 & 0xff); //unsigned immediate for (int i = 0; i < 4; i++) { const float a = spu.gpr[op.ra]._f[i]; float scaled; if ((fexpf(a) - 127) + scale >= 32) scaled = std::copysign(4294967296.0f, a); else scaled = std::ldexp(a, scale); u32 result; if (scaled >= 4294967296.0f) result = 0xFFFFFFFF; else if (scaled < 0.0f) result = 0; else result = static_cast<u32>(scaled); spu.gpr[op.rt]._u32[i] = result; } return true; } bool spu_interpreter_precise::CSFLT(spu_thread& spu, spu_opcode_t op) { fesetround(FE_TOWARDZERO); const int scale = 155 - (op.i8 & 0xff); //unsigned immediate for (int i = 0; i < 4; i++) { const s32 a = spu.gpr[op.ra]._s32[i]; spu.gpr[op.rt]._f[i] = static_cast<float>(a); u32 exp = ((spu.gpr[op.rt]._u32[i] >> 23) & 0xff) - scale; if (exp > 255) //< 0 exp = 0; spu.gpr[op.rt]._u32[i] = (spu.gpr[op.rt]._u32[i] & 0x807fffff) | (exp << 23); if (isdenormal(spu.gpr[op.rt]._f[i]) || (spu.gpr[op.rt]._f[i] == 0.0f && a != 0)) { spu.fpscr.setSinglePrecisionExceptionFlags(i, FPSCR_SUNF | FPSCR_SDIFF); spu.gpr[op.rt]._f[i] = 0.0f; } } return true; } bool spu_interpreter_precise::CUFLT(spu_thread& spu, spu_opcode_t op) { fesetround(FE_TOWARDZERO); const int scale = 155 - (op.i8 & 0xff); //unsigned immediate for (int i = 0; i < 4; i++) { const u32 a = spu.gpr[op.ra]._u32[i]; spu.gpr[op.rt]._f[i] = static_cast<float>(a); u32 exp = ((spu.gpr[op.rt]._u32[i] >> 23) & 0xff) - scale; if (exp > 255) //< 0 exp = 0; spu.gpr[op.rt]._u32[i] = (spu.gpr[op.rt]._u32[i] & 0x807fffff) | (exp << 23); if (isdenormal(spu.gpr[op.rt]._f[i]) || (spu.gpr[op.rt]._f[i] == 0.0f && a != 0)) { spu.fpscr.setSinglePrecisionExceptionFlags(i, FPSCR_SUNF | FPSCR_SDIFF); spu.gpr[op.rt]._f[i] = 0.0f; } } return true; } static void FMA(spu_thread& spu, spu_opcode_t op, bool neg, bool sub) { fesetround(FE_TOWARDZERO); for (int w = 0; w < 4; w++) { float a = spu.gpr[op.ra]._f[w]; float b = neg ? -spu.gpr[op.rb]._f[w] : spu.gpr[op.rb]._f[w]; float c = (neg != sub) ? -spu.gpr[op.rc]._f[w] : spu.gpr[op.rc]._f[w]; if (isdenormal(a)) { spu.fpscr.setSinglePrecisionExceptionFlags(w, FPSCR_SDIFF); a = 0.0f; } if (isdenormal(b)) { spu.fpscr.setSinglePrecisionExceptionFlags(w, FPSCR_SDIFF); b = 0.0f; } if (isdenormal(c)) { spu.fpscr.setSinglePrecisionExceptionFlags(w, FPSCR_SDIFF); c = 0.0f; } const bool sign = std::signbit(a) ^ std::signbit(b); float result; if (isextended(a) || isextended(b)) { spu.fpscr.setSinglePrecisionExceptionFlags(w, FPSCR_SDIFF); if (a == 0.0f || b == 0.0f) { result = c; } else if ((fexpf(a) - 127) + (fexpf(b) - 127) >= 130) { spu.fpscr.setSinglePrecisionExceptionFlags(w, FPSCR_SOVF); result = extended(sign, 0x7FFFFF); } else { float new_a, new_b; if (isextended(a)) { new_a = ldexpf_extended(a, -2); new_b = b; } else { new_a = a; new_b = ldexpf_extended(b, -2); } if (fexpf(c) < 3) { result = new_a * new_b; if (c != 0.0f && std::signbit(c) != sign) { u32 bits = std::bit_cast<u32>(result) - 1; result = std::bit_cast<f32>(bits); } } else { result = std::fma(new_a, new_b, ldexpf_extended(c, -2)); } if (std::fabs(result) >= std::ldexp(1.0f, 127)) { spu.fpscr.setSinglePrecisionExceptionFlags(w, FPSCR_SOVF); result = extended(sign, 0x7FFFFF); } else { result = ldexpf_extended(result, 2); } } } else if (isextended(c)) { spu.fpscr.setSinglePrecisionExceptionFlags(w, FPSCR_SDIFF); if (a == 0.0f || b == 0.0f) { result = c; } else if ((fexpf(a) - 127) + (fexpf(b) - 127) < 96) { result = c; if (sign != std::signbit(c)) { u32 bits = std::bit_cast<u32>(result) - 1; result = std::bit_cast<f32>(bits); } } else { result = std::fma(std::ldexp(a, -1), std::ldexp(b, -1), ldexpf_extended(c, -2)); if (std::fabs(result) >= std::ldexp(1.0f, 127)) { spu.fpscr.setSinglePrecisionExceptionFlags(w, FPSCR_SOVF); result = extended(sign, 0x7FFFFF); } else { result = ldexpf_extended(result, 2); } } } else { feclearexcept(FE_ALL_EXCEPT); result = std::fma(a, b, c); if (fetestexcept(FE_OVERFLOW)) { spu.fpscr.setSinglePrecisionExceptionFlags(w, FPSCR_SDIFF); if (fexpf(a) > fexpf(b)) result = std::fma(std::ldexp(a, -2), b, std::ldexp(c, -2)); else result = std::fma(a, std::ldexp(b, -2), std::ldexp(c, -2)); if (fabsf(result) >= std::ldexp(1.0f, 127)) { spu.fpscr.setSinglePrecisionExceptionFlags(w, FPSCR_SOVF); result = extended(sign, 0x7FFFFF); } else { result = ldexpf_extended(result, 2); } } else if (fetestexcept(FE_UNDERFLOW)) { spu.fpscr.setSinglePrecisionExceptionFlags(w, FPSCR_SUNF | FPSCR_SDIFF); } } if (isdenormal(result)) { spu.fpscr.setSinglePrecisionExceptionFlags(w, FPSCR_SUNF | FPSCR_SDIFF); result = 0.0f; } else if (result == 0.0f) { result = +0.0f; } spu.gpr[op.rt4]._f[w] = result; } } bool spu_interpreter_precise::FNMS(spu_thread& spu, spu_opcode_t op) { ::FMA(spu, op, true, true); return true; } bool spu_interpreter_precise::FMA(spu_thread& spu, spu_opcode_t op) { ::FMA(spu, op, false, false); return true; } bool spu_interpreter_precise::FMS(spu_thread& spu, spu_opcode_t op) { ::FMA(spu, op, false, true); return true; } #endif /* __SSE2__ */ template <typename IT> struct spu_interpreter_t { IT UNK; IT HEQ; IT HEQI; IT HGT; IT HGTI; IT HLGT; IT HLGTI; IT HBR; IT HBRA; IT HBRR; IT STOP; IT STOPD; IT LNOP; IT NOP; IT SYNC; IT DSYNC; IT MFSPR; IT MTSPR; IT RDCH; IT RCHCNT; IT WRCH; IT LQD; IT LQX; IT LQA; IT LQR; IT STQD; IT STQX; IT STQA; IT STQR; IT CBD; IT CBX; IT CHD; IT CHX; IT CWD; IT CWX; IT CDD; IT CDX; IT ILH; IT ILHU; IT IL; IT ILA; IT IOHL; IT FSMBI; IT AH; IT AHI; IT A; IT AI; IT SFH; IT SFHI; IT SF; IT SFI; IT ADDX; IT CG; IT CGX; IT SFX; IT BG; IT BGX; IT MPY; IT MPYU; IT MPYI; IT MPYUI; IT MPYH; IT MPYS; IT MPYHH; IT MPYHHA; IT MPYHHU; IT MPYHHAU; IT CLZ; IT CNTB; IT FSMB; IT FSMH; IT FSM; IT GBB; IT GBH; IT GB; IT AVGB; IT ABSDB; IT SUMB; IT XSBH; IT XSHW; IT XSWD; IT AND; IT ANDC; IT ANDBI; IT ANDHI; IT ANDI; IT OR; IT ORC; IT ORBI; IT ORHI; IT ORI; IT ORX; IT XOR; IT XORBI; IT XORHI; IT XORI; IT NAND; IT NOR; IT EQV; IT MPYA; IT SELB; IT SHUFB; IT SHLH; IT SHLHI; IT SHL; IT SHLI; IT SHLQBI; IT SHLQBII; IT SHLQBY; IT SHLQBYI; IT SHLQBYBI; IT ROTH; IT ROTHI; IT ROT; IT ROTI; IT ROTQBY; IT ROTQBYI; IT ROTQBYBI; IT ROTQBI; IT ROTQBII; IT ROTHM; IT ROTHMI; IT ROTM; IT ROTMI; IT ROTQMBY; IT ROTQMBYI; IT ROTQMBYBI; IT ROTQMBI; IT ROTQMBII; IT ROTMAH; IT ROTMAHI; IT ROTMA; IT ROTMAI; IT CEQB; IT CEQBI; IT CEQH; IT CEQHI; IT CEQ; IT CEQI; IT CGTB; IT CGTBI; IT CGTH; IT CGTHI; IT CGT; IT CGTI; IT CLGTB; IT CLGTBI; IT CLGTH; IT CLGTHI; IT CLGT; IT CLGTI; IT BR; IT BRA; IT BRSL; IT BRASL; IT BI; IT IRET; IT BISLED; IT BISL; IT BRNZ; IT BRZ; IT BRHNZ; IT BRHZ; IT BIZ; IT BINZ; IT BIHZ; IT BIHNZ; IT FA; IT DFA; IT FS; IT DFS; IT FM; IT DFM; IT DFMA; IT DFNMS; IT DFMS; IT DFNMA; IT FREST; IT FRSQEST; IT FI; IT CSFLT; IT CFLTS; IT CUFLT; IT CFLTU; IT FRDS; IT FESD; IT FCEQ; IT FCMEQ; IT FCGT; IT FCMGT; IT FSCRWR; IT FSCRRD; IT DFCEQ; IT DFCMEQ; IT DFCGT; IT DFCMGT; IT DFTSV; IT FMA; IT FNMS; IT FMS; }; spu_interpreter_rt_base::spu_interpreter_rt_base() noexcept { // Obtain required set of flags from settings bs_t<spu_exec_bit> selected{}; if (g_cfg.core.use_accurate_dfma) selected += use_dfma; ptrs = std::make_unique<decltype(ptrs)::element_type>(); // Initialize instructions with their own sets of supported flags #define INIT(name, ...) \ ptrs->name = spu_exec_select<>::select<__VA_ARGS__>(selected, []<spu_exec_bit... Flags>(){ return &::name<Flags...>; }); \ using enum spu_exec_bit; INIT(UNK); INIT(HEQ); INIT(HEQI); INIT(HGT); INIT(HGTI); INIT(HLGT); INIT(HLGTI); INIT(HBR); INIT(HBRA); INIT(HBRR); INIT(STOP); INIT(STOPD); INIT(LNOP); INIT(NOP); INIT(SYNC); INIT(DSYNC); INIT(MFSPR); INIT(MTSPR); INIT(RDCH); INIT(RCHCNT); INIT(WRCH); INIT(LQD); INIT(LQX); INIT(LQA); INIT(LQR); INIT(STQD); INIT(STQX); INIT(STQA); INIT(STQR); INIT(CBD); INIT(CBX); INIT(CHD); INIT(CHX); INIT(CWD); INIT(CWX); INIT(CDD); INIT(CDX); INIT(ILH); INIT(ILHU); INIT(IL); INIT(ILA); INIT(IOHL); INIT(FSMBI); INIT(AH); INIT(AHI); INIT(A); INIT(AI); INIT(SFH); INIT(SFHI); INIT(SF); INIT(SFI); INIT(ADDX); INIT(CG); INIT(CGX); INIT(SFX); INIT(BG); INIT(BGX); INIT(MPY); INIT(MPYU); INIT(MPYI); INIT(MPYUI); INIT(MPYH); INIT(MPYS); INIT(MPYHH); INIT(MPYHHA); INIT(MPYHHU); INIT(MPYHHAU); INIT(CLZ); INIT(CNTB); INIT(FSMB); INIT(FSMH); INIT(FSM); INIT(GBB); INIT(GBH); INIT(GB); INIT(AVGB); INIT(ABSDB); INIT(SUMB); INIT(XSBH); INIT(XSHW); INIT(XSWD); INIT(AND); INIT(ANDC); INIT(ANDBI); INIT(ANDHI); INIT(ANDI); INIT(OR); INIT(ORC); INIT(ORBI); INIT(ORHI); INIT(ORI); INIT(ORX); INIT(XOR); INIT(XORBI); INIT(XORHI); INIT(XORI); INIT(NAND); INIT(NOR); INIT(EQV); INIT(MPYA); INIT(SELB); INIT(SHUFB); INIT(SHLH); INIT(SHLHI); INIT(SHL); INIT(SHLI); INIT(SHLQBI); INIT(SHLQBII); INIT(SHLQBY); INIT(SHLQBYI); INIT(SHLQBYBI); INIT(ROTH); INIT(ROTHI); INIT(ROT); INIT(ROTI); INIT(ROTQBY); INIT(ROTQBYI); INIT(ROTQBYBI); INIT(ROTQBI); INIT(ROTQBII); INIT(ROTHM); INIT(ROTHMI); INIT(ROTM); INIT(ROTMI); INIT(ROTQMBY); INIT(ROTQMBYI); INIT(ROTQMBYBI); INIT(ROTQMBI); INIT(ROTQMBII); INIT(ROTMAH); INIT(ROTMAHI); INIT(ROTMA); INIT(ROTMAI); INIT(CEQB); INIT(CEQBI); INIT(CEQH); INIT(CEQHI); INIT(CEQ); INIT(CEQI); INIT(CGTB); INIT(CGTBI); INIT(CGTH); INIT(CGTHI); INIT(CGT); INIT(CGTI); INIT(CLGTB); INIT(CLGTBI); INIT(CLGTH); INIT(CLGTHI); INIT(CLGT); INIT(CLGTI); INIT(BR); INIT(BRA); INIT(BRSL); INIT(BRASL); INIT(BI); INIT(IRET); INIT(BISLED); INIT(BISL); INIT(BRNZ); INIT(BRZ); INIT(BRHNZ); INIT(BRHZ); INIT(BIZ); INIT(BINZ); INIT(BIHZ); INIT(BIHNZ); INIT(FA); INIT(DFA); INIT(FS); INIT(DFS); INIT(FM); INIT(DFM); INIT(DFMA); INIT(DFNMS); INIT(DFMS); INIT(DFNMA); INIT(FREST); INIT(FRSQEST); INIT(FI); INIT(CSFLT); INIT(CFLTS); INIT(CUFLT); INIT(CFLTU); INIT(FRDS); INIT(FESD); INIT(FCEQ); INIT(FCMEQ); INIT(FCGT); INIT(FCMGT); INIT(FSCRWR); INIT(FSCRRD); INIT(DFCEQ); INIT(DFCMEQ); INIT(DFCGT); INIT(DFCMGT); INIT(DFTSV); INIT(FMA); INIT(FNMS); INIT(FMS); } spu_interpreter_rt_base::~spu_interpreter_rt_base() { } spu_interpreter_rt::spu_interpreter_rt() noexcept : spu_interpreter_rt_base() , table(*ptrs) { }
82,486
C++
.cpp
2,994
25.334001
171
0.664375
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,187
RawSPUThread.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/RawSPUThread.cpp
#include "stdafx.h" #include "Emu/IdManager.h" #include "Loader/ELF.h" #include "util/asm.hpp" #include "Emu/Cell/RawSPUThread.h" inline void try_start(spu_thread& spu) { bool notify = false; if (~spu.status_npc.load().status & SPU_STATUS_RUNNING) { reader_lock lock(spu.run_ctrl_mtx); if (spu.status_npc.fetch_op([](spu_thread::status_npc_sync_var& value) { if (value.status & SPU_STATUS_RUNNING) { return false; } value.status = SPU_STATUS_RUNNING | (value.status & SPU_STATUS_IS_ISOLATED); return true; }).second) { spu.state -= cpu_flag::stop; notify = true; } } if (notify) { spu.state.notify_one(); } }; bool spu_thread::read_reg(const u32 addr, u32& value) { const u32 offset = addr - (RAW_SPU_BASE_ADDR + RAW_SPU_OFFSET * index) - RAW_SPU_PROB_OFFSET; spu_log.trace("RawSPU[%u]: Read32(0x%x, offset=0x%x)", index, addr, offset); switch (offset) { case MFC_CMDStatus_offs: { spu_mfc_cmd cmd; // All arguments must be written for all command types, even for sync barriers if (std::scoped_lock lock(mfc_prxy_mtx); mfc_prxy_write_state.all == 0x1f) { cmd = mfc_prxy_cmd; mfc_prxy_write_state.all = 0; } else { value = MFC_PPU_DMA_CMD_SEQUENCE_ERROR; return true; } switch (cmd.cmd) { case MFC_SDCRT_CMD: case MFC_SDCRTST_CMD: { value = MFC_PPU_DMA_CMD_ENQUEUE_SUCCESSFUL; return true; } case MFC_SNDSIG_CMD: case MFC_SNDSIGB_CMD: case MFC_SNDSIGF_CMD: { if (cmd.size != 4) { // Invalid for MFC but may be different for MFC proxy (TODO) break; } [[fallthrough]]; } case MFC_PUT_CMD: case MFC_PUTB_CMD: case MFC_PUTF_CMD: case MFC_PUTS_CMD: case MFC_PUTBS_CMD: case MFC_PUTFS_CMD: case MFC_PUTR_CMD: case MFC_PUTRF_CMD: case MFC_PUTRB_CMD: case MFC_GET_CMD: case MFC_GETB_CMD: case MFC_GETF_CMD: case MFC_GETS_CMD: case MFC_GETBS_CMD: case MFC_GETFS_CMD: case MFC_SDCRZ_CMD: { if (cmd.size) { // Perform transfer immediately do_dma_transfer(nullptr, cmd, ls); } if (cmd.cmd & MFC_START_MASK) { try_start(*this); } value = MFC_PPU_DMA_CMD_ENQUEUE_SUCCESSFUL; return true; } case MFC_BARRIER_CMD: case MFC_EIEIO_CMD: case MFC_SYNC_CMD: { atomic_fence_seq_cst(); value = MFC_PPU_DMA_CMD_ENQUEUE_SUCCESSFUL; return true; } default: break; } break; } case MFC_QStatus_offs: { value = MFC_PROXY_COMMAND_QUEUE_EMPTY_FLAG | 8; return true; } case SPU_Out_MBox_offs: { value = ch_out_mbox.pop(); return true; } case SPU_MBox_Status_offs: { value = (ch_out_mbox.get_count() & 0xff) | ((4 - ch_in_mbox.get_count()) << 8 & 0xff00) | (ch_out_intr_mbox.get_count() << 16 & 0xff0000); return true; } case SPU_Status_offs: { value = status_npc.load().status; return true; } case Prxy_TagStatus_offs: { value = mfc_prxy_mask; return true; } case SPU_NPC_offs: { const auto current = status_npc.load(); value = !(current.status & SPU_STATUS_RUNNING) ? current.npc : 0; return true; } case SPU_RunCntl_offs: { value = run_ctrl; return true; } } spu_log.error("RawSPU[%u]: Read32(0x%x): unknown/illegal offset (0x%x)", index, addr, offset); return false; } bool spu_thread::write_reg(const u32 addr, const u32 value) { const u32 offset = addr - (RAW_SPU_BASE_ADDR + RAW_SPU_OFFSET * index) - RAW_SPU_PROB_OFFSET; spu_log.trace("RawSPU[%u]: Write32(0x%x, offset=0x%x, value=0x%x)", index, addr, offset, value); switch (offset) { case MFC_LSA_offs: { if (value >= SPU_LS_SIZE) { break; } std::lock_guard lock(mfc_prxy_mtx); mfc_prxy_cmd.lsa = value; mfc_prxy_write_state.lsa = true; return true; } case MFC_EAH_offs: { std::lock_guard lock(mfc_prxy_mtx); mfc_prxy_cmd.eah = value; mfc_prxy_write_state.eah = true; return true; } case MFC_EAL_offs: { std::lock_guard lock(mfc_prxy_mtx); mfc_prxy_cmd.eal = value; mfc_prxy_write_state.eal = true; return true; } case MFC_Size_Tag_offs: { std::lock_guard lock(mfc_prxy_mtx); mfc_prxy_cmd.tag = value & 0x1f; mfc_prxy_cmd.size = (value >> 16) & 0x7fff; mfc_prxy_write_state.tag_size = true; return true; } case MFC_Class_CMD_offs: { std::lock_guard lock(mfc_prxy_mtx); mfc_prxy_cmd.cmd = MFC(value & 0xff); mfc_prxy_write_state.cmd = true; return true; } case Prxy_QueryType_offs: { // TODO // 0 - no query requested; cancel previous request // 1 - set (interrupt) status upon completion of any enabled tag groups // 2 - set (interrupt) status upon completion of all enabled tag groups if (value > 2) { break; } if (value) { int_ctrl[2].set(SPU_INT2_STAT_DMA_TAG_GROUP_COMPLETION_INT); // TODO } return true; } case Prxy_QueryMask_offs: { mfc_prxy_mask = value; return true; } case SPU_In_MBox_offs: { if (!ch_in_mbox.push(value).op_done) { if (auto cpu = cpu_thread::get_current()) { cpu->state += cpu_flag::again; } } return true; } case SPU_RunCntl_offs: { run_ctrl = value; if (value == SPU_RUNCNTL_RUN_REQUEST) { try_start(*this); } else if (value == SPU_RUNCNTL_STOP_REQUEST) { if (get_current_cpu_thread() == this) { // TODO state += cpu_flag::stop + cpu_flag::ret; return true; } std::scoped_lock lock(run_ctrl_mtx); if (status_npc.load().status & SPU_STATUS_RUNNING) { state += cpu_flag::stop + cpu_flag::ret; for (status_npc_sync_var old; (old = status_npc).status & SPU_STATUS_RUNNING;) { utils::bless<atomic_t<u32>>(&status_npc)[0].wait(old.status); } } } else { break; } return true; } case SPU_NPC_offs: { status_npc.fetch_op([value = value & 0x3fffd](status_npc_sync_var& state) { if (!(state.status & SPU_STATUS_RUNNING)) { state.npc = value; return true; } return false; }); return true; } case SPU_RdSigNotify1_offs: { push_snr(0, value); return true; } case SPU_RdSigNotify2_offs: { push_snr(1, value); return true; } } spu_log.error("RawSPU[%u]: Write32(0x%x, value=0x%x): unknown/illegal offset (0x%x)", index, addr, value, offset); return false; } bool spu_thread::test_is_problem_state_register_offset(u32 offset, bool for_read, bool for_write) noexcept { if (for_read) { switch (offset) { case MFC_CMDStatus_offs: case MFC_QStatus_offs: case SPU_Out_MBox_offs: case SPU_MBox_Status_offs: case SPU_Status_offs: case Prxy_TagStatus_offs: case SPU_NPC_offs: case SPU_RunCntl_offs: return true; default: break; } } if (for_write) { switch (offset) { case MFC_LSA_offs: case MFC_EAH_offs: case MFC_EAL_offs: case MFC_Size_Tag_offs: case MFC_Class_CMD_offs: case Prxy_QueryType_offs: case Prxy_QueryMask_offs: case SPU_In_MBox_offs: case SPU_RunCntl_offs: case SPU_NPC_offs: case SPU_RdSigNotify1_offs: case SPU_RdSigNotify2_offs: case (SPU_RdSigNotify2_offs & 0xffff): // Fow now accept both (this is used for an optimization so it can be imperfect) return true; default: break; } } return false; } void spu_load_exec(const spu_exec_object& elf) { spu_thread::g_raw_spu_ctr++; auto spu = idm::make_ptr<named_thread<spu_thread>>(nullptr, 0, "test_spu", 0); ensure(vm::get(vm::spu)->falloc(spu->vm_offset(), SPU_LS_SIZE, &spu->shm, vm::page_size_64k)); spu->map_ls(*spu->shm, spu->ls); for (const auto& prog : elf.progs) { if (prog.p_type == 0x1u /* LOAD */ && prog.p_memsz) { std::memcpy(spu->_ptr<void>(prog.p_vaddr), prog.bin.data(), prog.p_filesz); } } spu_thread::g_raw_spu_id[0] = spu->id; spu->status_npc = {SPU_STATUS_RUNNING, elf.header.e_entry}; atomic_storage<u32>::release(spu->pc, elf.header.e_entry); const auto funcs = spu->discover_functions(0, { spu->ls , SPU_LS_SIZE }, true, umax); if (spu_log.notice && !funcs.empty()) { std::string to_log; for (usz i = 0; i < funcs.size(); i++) { if (i == 0 && funcs.size() < 4) { // Skip newline in this case to_log += ' '; } else if (i % 4 == 0) { fmt::append(to_log, "\n[%02u] ", i / 8); } else { to_log += ", "; } fmt::append(to_log, "0x%05x", funcs[i]); } spu_log.notice("Found SPU function(s) at:%s", to_log); } if (!funcs.empty()) { spu_log.success("Found %u SPU function(s)", funcs.size()); } } void spu_load_rel_exec(const spu_rel_object& elf) { spu_thread::g_raw_spu_ctr++; auto spu = idm::make_ptr<named_thread<spu_thread>>(nullptr, 0, "test_spu", 0); ensure(vm::get(vm::spu)->falloc(spu->vm_offset(), SPU_LS_SIZE, &spu->shm, vm::page_size_64k)); spu->map_ls(*spu->shm, spu->ls); u32 total_memsize = 0; // Compute executable data size for (const auto& shdr : elf.shdrs) { if (shdr.sh_type == sec_type::sht_progbits && shdr.sh_flags().all_of(sh_flag::shf_alloc)) { total_memsize = utils::align<u32>(total_memsize + shdr.sh_size, 4); } } // Place executable data in SPU local memory u32 offs = 0; for (const auto& shdr : elf.shdrs) { if (shdr.sh_type == sec_type::sht_progbits && shdr.sh_flags().all_of(sh_flag::shf_alloc)) { std::memcpy(spu->_ptr<void>(offs), shdr.get_bin().data(), shdr.sh_size); offs = utils::align<u32>(offs + shdr.sh_size, 4); } } spu_log.success("Loaded 0x%x of SPU relocatable executable data", total_memsize); spu_thread::g_raw_spu_id[0] = spu->id; spu->status_npc = {SPU_STATUS_RUNNING, elf.header.e_entry}; atomic_storage<u32>::release(spu->pc, elf.header.e_entry); }
9,514
C++
.cpp
401
20.700748
140
0.658186
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,188
PPUFunction.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/PPUFunction.cpp
#include "stdafx.h" #include "PPUFunction.h" #include "Utilities/JIT.h" #include "util/serialization.hpp" #include "PPUModule.h" #include "PPUInterpreter.h" // Get function name by FNID extern std::string ppu_get_function_name(const std::string& _module, u32 fnid) { if (_module.empty()) switch (fnid) { case 0x0d10fd3f: return "module_prologue"; case 0x330f7005: return "module_epilogue"; case 0x3ab9a95e: return "module_exit"; case 0xbc9a0086: return "module_start"; case 0xab779874: return "module_stop"; } // Check known FNIDs if (_module == "sys_libc" || _module == "sys_libm") switch (fnid) { case 0x00acf0e5: return "spu_printf_finalize"; case 0x00fb4a6b: return "spu_thread_sprintf"; case 0x0125b2ca: return "_rand_int32_TT800"; case 0x01508f24: return "raw_spu_write_float"; case 0x0264f468: return "_Wctomb"; case 0x02f4d325: return "spu_thread_read_double"; case 0x02f52a3c: return "_filep_close_it"; case 0x03becf3c: return "_Defloc"; case 0x04a183fc: return "strcpy"; case 0x04a1f19d: return "raw_spu_write_short"; case 0x05d821c4: return "_Stoullx"; case 0x077cdb23: return "btowc"; case 0x07c7971d: return "_Stoldx"; case 0x0871ffb0: return "mspace_malloc_usable_size"; case 0x0891a3fa: return "_Tlsfree"; case 0x09cbee1e: return "strxfrm"; case 0x0a1d4b00: return "spu_thread_read_uint"; case 0x0a4e2541: return "spu_thread_read_ldouble"; case 0x0ae275a4: return "_Stolx"; case 0x0b0d272f: return "_malloc_finalize"; case 0x0b9d04d0: return "_Getnloc"; case 0x0b9ecb98: return "toupper_ascii"; case 0x0cae547f: return "raw_spu_write_double"; case 0x0d2a593b: return "srand"; case 0x0d8a2de0: return "_CStrxfrm"; case 0x0df8809f: return "__call_functions_registered_with_atexit"; case 0x0f60eb63: return "vfwscanf"; case 0x0ff4722c: return "raw_spu_read_ushort"; case 0x1096f8f1: return "ispunct_ascii"; case 0x1098a99d: return "localeconv"; case 0x112ea8ea: return "strspn"; case 0x115e2f70: return "spu_thread_snprintf"; case 0x116cda13: return "wcstol"; case 0x118712ea: return "islower"; case 0x11d270d2: return "exitspawn"; case 0x126656b7: return "_Btowc"; case 0x128b334f: return "raw_spu_read_mem"; case 0x12a55fb7: return "mbrtowc"; case 0x130d20a5: return "towlower"; case 0x1365b52a: return "fcntl"; case 0x13808972: return "wcstok"; case 0x14052ae0: return "absi4"; case 0x14348b57: return "divi4"; case 0x145853cd: return "mspace_destroy"; case 0x15362bc9: return "spu_thread_read_long"; case 0x153b364a: return "mkdir"; case 0x15bdcc00: return "rand"; case 0x15c2e29d: return "isgraph_ascii"; case 0x17752bab: return "wcsftime"; case 0x17bc0136: return "_Lrv2d"; case 0x17c031d7: return "spu_thread_read_ulong"; case 0x1855b9b1: return "setlocale"; case 0x1895908d: return "mspace_realloc"; case 0x18e48b5d: return "wscanf"; case 0x18f7b77d: return "_Dnorm"; case 0x1970cd7e: return "getpid"; case 0x19ccbb81: return "mktime"; case 0x1ab01ea8: return "truncate"; case 0x1abd0985: return "div"; case 0x1ae06860: return "wcstoumax"; case 0x1b4c3ff0: return "atexit"; case 0x1c0e8ab6: return "vswscanf"; case 0x1c2ef212: return "getwc"; case 0x1cf4d80a: return "iswalpha"; case 0x1dcd8609: return "_Strxfrmx"; case 0x1dd0d4c5: return "spu_printf_attach_group"; case 0x1df4732e: return "_Getptolower"; case 0x1e9d2b4f: return "spu_thread_read_int"; case 0x1ecae195: return "_Vacopy"; case 0x1f913e8d: return "chmod"; case 0x1f925c41: return "_allocate_mapped_pages"; case 0x206612c4: return "spu_thread_read_ptr"; case 0x216984ed: return "spu_thread_write_long"; case 0x216fcd2a: return "_Atrealloc"; case 0x21807b8e: return "towctrans"; case 0x225702e1: return "_fs_initialize"; case 0x22b0e566: return "_Stollx"; case 0x23d3bca7: return "_Eadd"; case 0x242c603e: return "_Frprep"; case 0x243b52d8: return "_Mbtowcx"; case 0x24802244: return "iswcntrl"; case 0x24c9e021: return "abs"; case 0x24e230d2: return "_Wctob"; case 0x24f6cbdd: return "clock"; case 0x253b7210: return "_rand_real2_TT800"; case 0x25beee5a: return "__raw_spu_printf"; case 0x25da8fbb: return "iscntrl"; case 0x266311a0: return "localtime"; case 0x2677568c: return "putchar"; case 0x26f023d5: return "ftell"; case 0x273b9711: return "sprintf"; case 0x28b92ebf: return "raw_spu_read_uchar"; case 0x296bc72f: return "_FDunscale"; case 0x2b45cb34: return "wcsrtombs"; case 0x2b7ba4ca: return "_Tlsset"; case 0x2b81fb7f: return "readdir"; case 0x2bc9dee6: return "raw_spu_read_short"; case 0x2caea755: return "_Once"; case 0x2d067448: return "ftruncate64"; case 0x2d17ca7f: return "_Puttxt"; case 0x2eea9f25: return "_Esub"; case 0x2f45d39c: return "strlen"; case 0x2fecec13: return "getwchar"; case 0x30fb2899: return "_Getmem"; case 0x312be3b3: return "_malloc_init_lv2"; case 0x313f04ab: return "raw_spu_read_char"; case 0x329a4540: return "_WPrintf"; case 0x32e4a30a: return "_Mtxdst"; case 0x336b4191: return "_Getint"; case 0x33d6ae54: return "ferror"; case 0x344eca7e: return "_WGetstr"; case 0x34dd6650: return "_Getcloc"; case 0x34e7c97e: return "_Unlocksyslock"; case 0x3512ad38: return "tmpnam"; case 0x355fd1fd: return "mbtowc"; case 0x3574d37d: return "_Wcsxfrmx"; case 0x36c067c1: return "_Stoll"; case 0x36f2b4ed: return "strtoull"; case 0x36feb965: return "raw_spu_write_llong"; case 0x3704840e: return "_fs_finalize"; case 0x38426d25: return "_Wctombx"; case 0x3902363a: return "malloc_footprint"; case 0x39bf419c: return "valloc"; case 0x3a210c93: return "swscanf"; case 0x3a840ae3: return "snprintf"; case 0x3b22e88a: return "isxdigit"; case 0x3b8097ac: return "_WScanf"; case 0x3bce073b: return "putc"; case 0x3bd9ce0a: return "fsync"; case 0x3ca81c76: return "_Iswctype"; case 0x3d1460e9: return "_Strerror"; case 0x3d541975: return "atoi"; case 0x3d5fdea7: return "vfwprintf"; case 0x3d85d6f8: return "strcmp"; case 0x3dbc3bee: return "opendir"; case 0x3e57dfac: return "_Genld"; case 0x3ec99a66: return "_Getptimes"; case 0x3ee29d0b: return "_Stof"; case 0x3f125e2e: return "spu_thread_write_short"; case 0x3f4ccdc7: return "isdigit"; case 0x3f650700: return "mspace_is_heap_empty"; case 0x40a2599a: return "atol"; case 0x40d04e4e: return "fwide"; case 0x40e0ff25: return "_WGenld"; case 0x41283333: return "isdigit_ascii"; case 0x418bdfe1: return "_get_fd"; case 0x4217b4cf: return "difftime"; case 0x433fe2a9: return "fwscanf"; case 0x44115dd0: return "_Geterrno"; case 0x44796e5c: return "strerror"; case 0x449317ed: return "_Fopen"; case 0x44d7cae8: return "raw_spu_read_float"; case 0x4544c2de: return "spu_thread_write_mem"; case 0x4569518c: return "malloc_stats"; case 0x459072c3: return "_init_TT800"; case 0x4595c42b: return "wcsxfrm"; case 0x468b45dc: return "mspace_calloc"; case 0x4911ff9c: return "rand_int31_TT800"; case 0x498a5036: return "raw_spu_write_mem"; case 0x4a0049c6: return "_Getpctype"; case 0x4ab5fbe2: return "_Printf"; case 0x4b36c0e0: return "vfscanf"; case 0x4b6a4010: return "vswprintf"; case 0x4bb8e2b2: return "raw_spu_write_ushort"; case 0x4c3f5f29: return "_Getgloballocale"; case 0x4c7dc863: return "iswupper"; case 0x4d348427: return "fputs"; case 0x4e4be299: return "longjmp"; case 0x4e72f810: return "wmemchr"; case 0x4ffba189: return "feof"; case 0x508196b4: return "raw_spu_printf"; case 0x508e00c6: return "_Getloc"; case 0x51b28904: return "_Stodx"; case 0x526a496a: return "write"; case 0x532b03be: return "raw_spu_read_uint"; case 0x53eb43a1: return "_Getpmbstate"; case 0x54b383bc: return "_Locvar"; case 0x54c2844e: return "spu_raw_snprintf"; case 0x54f57626: return "rewind"; case 0x5516bbbf: return "iswctype"; case 0x55d4866e: return "fgetws"; case 0x5751acf9: return "_LDscale"; case 0x575fb268: return "wctrans"; case 0x57ff7dd7: return "_WStod"; case 0x58320830: return "_WLitob"; case 0x589b5314: return "strncat"; case 0x5909e3c4: return "memset"; case 0x59640bc6: return "raw_spu_read_ullong"; case 0x59c1bb1f: return "_Getpwcstate"; case 0x59e8dd58: return "strtoll"; case 0x5a74f774: return "spu_thread_read_float"; case 0x5b162b7f: return "memmove"; case 0x5b4b6d6d: return "wcspbrk"; case 0x5cc71eee: return "raw_spu_write_ldouble"; case 0x5d43c1a3: return "_Mbtowc"; case 0x5dbceee3: return "rand_int32_TT800"; case 0x5e06c3fe: return "__getpid"; case 0x5e7888f0: return "bsearch"; case 0x5eb95641: return "_Stold"; case 0x5f922a30: return "_Dscale"; case 0x5f9a65c7: return "_WStold"; case 0x5fa1e497: return "_Unlockfilelock"; case 0x60627fb3: return "_LDunscale"; case 0x6075a3c6: return "_Ld2rv"; case 0x609080ec: return "isspace_ascii"; case 0x6137d196: return "memalign"; case 0x6287ac6a: return "iswdigit"; case 0x62bf1d6c: return "swprintf"; case 0x64aaf016: return "raw_spu_read_ldouble"; case 0x6514dbe5: return "wcstold"; case 0x6539ff6d: return "_Gentime"; case 0x6545b7de: return "fgetpos"; case 0x65e8d4d0: return "wcslen"; case 0x6660fc8d: return "TlsGetValue"; case 0x6687fba4: return "_Fgpos"; case 0x66b71b17: return "wcsspn"; case 0x67582370: return "spu_thread_write_double"; case 0x676e3e7a: return "raw_spu_write_ptr"; case 0x67d6334b: return "strtof"; case 0x6823c180: return "iswprint"; case 0x69106fd2: return "_init_by_array_TT800"; case 0x692b497f: return "perror"; case 0x6995f5e8: return "_Ldtob"; case 0x69c27c12: return "fopen"; case 0x69ff1b9b: return "fseek"; case 0x6ba10474: return "_Tlsalloc"; case 0x6cf78f3e: return "_Mtxunlock"; case 0x6d5115b0: return "wcsncmp"; case 0x6e988e5f: return "_rand_int31_TT800"; case 0x7028dea9: return "_Locksyslock"; case 0x703ec767: return "setvbuf"; case 0x70b0e833: return "mblen"; case 0x714c9618: return "__raw_spu_putfld"; case 0x717b2502: return "stat"; case 0x72236cbc: return "raw_spu_write_ullong"; case 0x72b84004: return "spu_printf_attach_thread"; case 0x73096858: return "wctob"; case 0x7345b4be: return "_WStoll"; case 0x73eae03d: return "strrchr"; case 0x744d2505: return "ispunct"; case 0x74fe4a7b: return "iswgraph"; case 0x759e0635: return "malloc"; case 0x75d4485c: return "rename"; case 0x75f98579: return "wcscoll"; case 0x76da0c84: return "ftruncate"; case 0x76ed4243: return "_Wcsftime"; case 0x770bfaee: return "wctype"; case 0x77a602dd: return "free"; case 0x77c15441: return "_WGetfloat"; case 0x77e241bc: return "_Skip"; case 0x7817edf0: return "raw_spu_write_uint"; case 0x783636d1: return "spu_thread_read_char"; case 0x78429d81: return "putwchar"; case 0x79819dbf: return "fputc"; case 0x7994c28d: return "_FDtentox"; case 0x79eadf05: return "malloc_usable_size"; case 0x7aaab95c: return "iswblank"; case 0x7ae82e0f: return "vsprintf"; case 0x7aee5acd: return "_Lockfilelock"; case 0x7b5aac20: return "spu_thread_write_ptr"; case 0x7b7a687a: return "_WPutfld"; case 0x7b9c592e: return "spu_thread_read_ullong"; case 0x7c1bcf37: return "isalnum_ascii"; case 0x7c370679: return "_Foprep"; case 0x7cec7b39: return "_Putfld"; case 0x7d894764: return "_Readloc"; case 0x7e7017b1: return "rmdir"; case 0x7ea8d860: return "spu_printf_detach_group"; case 0x7efd420a: return "_Daysto"; case 0x7fd325c4: return "mspace_malloc_stats"; case 0x7fdcf73e: return "wcscat"; case 0x806fd281: return "isblank_ascii"; case 0x809a143f: return "kill"; case 0x813a9666: return "ungetwc"; case 0x814d8cb0: return "fflush"; case 0x81a0a858: return "_memset_int"; case 0x82a3cc30: return "wcschr"; case 0x82a4561a: return "_put_fd"; case 0x831d70a5: return "memcpy"; case 0x8342b757: return "utime"; case 0x84378ddc: return "wcsncpy"; case 0x86532174: return "imaxdiv"; case 0x867275d7: return "_Stoul"; case 0x86b4c669: return "tolower_ascii"; case 0x8713c859: return "link"; case 0x8725a1a7: return "_memset_vmx"; case 0x87e8f748: return "memset_vmx"; case 0x8809cdfd: return "_Getpwctytab"; case 0x882689f2: return "_Makeloc"; case 0x882e7760: return "raw_spu_write_uchar"; case 0x889d5804: return "_Dunscale"; case 0x88e009f5: return "vwprintf"; case 0x896e1bfd: return "spu_thread_write_uchar"; case 0x89b62f56: return "_Etentox"; case 0x89f6f026: return "time"; case 0x8a6830e7: return "abort"; case 0x8a71132c: return "remove"; case 0x8a847b51: return "tmpfile"; case 0x8ab0abc6: return "strncpy"; case 0x8b439438: return "clearerr"; case 0x8b9d8dd2: return "iswpunct"; case 0x8cb6bfdc: return "_Locsum"; case 0x8d7ffaf1: return "_WStopfx"; case 0x8e2484f1: return "_Emul"; case 0x8ed71e8b: return "_WGetfld"; case 0x8ef85e47: return "_WPuttxt"; case 0x8f5dd179: return "_Nnl"; case 0x90010029: return "gets"; case 0x9027fd99: return "_WStoldx"; case 0x90457fe3: return "raw_spu_read_long"; case 0x90b27880: return "strtoumax"; case 0x9234f738: return "raw_spu_read_int"; case 0x93427cb9: return "setbuf"; case 0x938bfcf7: return "spu_thread_write_char"; case 0x93a3e3ac: return "tolower"; case 0x9439e4cd: return "wcsncat"; case 0x96b6baa6: return "spu_thread_read_mem"; case 0x96e6303b: return "_WStoxflt"; case 0x96ea4de6: return "wctomb"; case 0x97896359: return "isspace"; case 0x9800573c: return "_WLdtob"; case 0x980d3ea7: return "_Getfld"; case 0x9886810c: return "_FDnorm"; case 0x98f0eeab: return "raw_spu_write_ulong"; case 0x99782342: return "strncasecmp_ascii"; case 0x99a72146: return "vsnprintf"; case 0x99b38ce7: return "wmemmove"; case 0x9a87bb3a: return "_Getmbcurmax"; case 0x9abe8c74: return "wprintf"; case 0x9c7028a5: return "spu_thread_write_uint"; case 0x9c9d7b0d: return "strtold"; case 0x9cab08d1: return "spu_thread_write_int"; case 0x9d140351: return "_Destroytls"; case 0x9eb25e00: return "strcoll"; case 0x9eee5387: return "truncate64"; case 0x9ff08d57: return "_Clearlocks"; case 0xa0ab76d5: return "_absi4"; case 0xa0bc0efb: return "mallinfo"; case 0xa0ddba8e: return "_Stoulx"; case 0xa1dbb466: return "_Gettime"; case 0xa2945229: return "_WGetint"; case 0xa30d4797: return "wcstoll"; case 0xa3440924: return "closedir"; case 0xa3da58f6: return "rand_real1_TT800"; case 0xa45a0313: return "mspace_create"; case 0xa483d50d: return "_rv2d"; case 0xa53800c2: return "_malloc_finalize_lv2"; case 0xa568db82: return "spu_thread_read_ushort"; case 0xa57cc615: return "iswspace"; case 0xa5bc0e19: return "getchar"; case 0xa6463518: return "__rename"; case 0xa650df19: return "toupper"; case 0xa65886b8: return "_Findloc"; case 0xa72a7595: return "calloc"; case 0xa797790f: return "wcsstr"; case 0xa82d70da: return "_Tlsget"; case 0xa835be11: return "__cxa_atexit"; case 0xa874036a: return "wcstof"; case 0xa8a6f615: return "TlsSetValue"; case 0xa8b07f1b: return "wmemcpy"; case 0xa9f68eff: return "qsort"; case 0xaa1e687d: return "isgraph"; case 0xaa266d35: return "_malloc_init"; case 0xaa9635d7: return "strcat"; case 0xab4c7ca1: return "_CWcsxfrm"; case 0xab77019f: return "fstat"; case 0xabc27420: return "wcstoul"; case 0xac758d20: return "wmemcmp"; case 0xac893127: return "fgetc"; case 0xace90be4: return "_Dtentox"; case 0xad62a342: return "ldiv"; case 0xad8e9ad0: return "_Initlocks"; case 0xaec7c970: return "lseek"; case 0xaf002043: return "independent_comalloc"; case 0xaf44a615: return "fgets"; case 0xaf6bdcb0: return "_Nonfatal_Assert"; case 0xaf89fdbd: return "_Assert"; case 0xafa39179: return "_WPutstr"; case 0xb120f6ca: return "close"; case 0xb17b79d0: return "isalpha"; case 0xb18cc115: return "freopen"; case 0xb1cc43e3: return "_CStrftime"; case 0xb1f4779d: return "spu_thread_printf"; case 0xb24cb8d6: return "_Locterm"; case 0xb2702e15: return "wcrtomb"; case 0xb2748a9f: return "_Freeloc"; case 0xb30042ce: return "lldiv"; case 0xb37982ea: return "_Getstr"; case 0xb3c495bd: return "imaxabs"; case 0xb3d98d59: return "_rand_real1_TT800"; case 0xb400f226: return "isupper_ascii"; case 0xb4225825: return "mbsinit"; case 0xb43c25c7: return "wcstoull"; case 0xb49eea74: return "_init_malloc_lock0"; case 0xb4a54446: return "_Stofx"; case 0xb4fc7078: return "_close_all_FILE"; case 0xb529d259: return "isalnum"; case 0xb569849d: return "reallocalign"; case 0xb57bdf7b: return "iswxdigit"; case 0xb5d353e8: return "_LDtentox"; case 0xb6002508: return "_Putstr"; case 0xb6257e3d: return "strncasecmp"; case 0xb680e240: return "wcstombs"; case 0xb6af290e: return "_WFrprep"; case 0xb6d92ac3: return "strcasecmp"; case 0xb738027a: return "strtok_r"; case 0xb794631e: return "_WStofx"; case 0xb7ab5127: return "wcsrchr"; case 0xb7b793ed: return "get_state_TT800"; case 0xb7ba4aeb: return "_WStoul"; case 0xb7d3427f: return "iscntrl_ascii"; case 0xb81cd66a: return "mbrlen"; case 0xb9ed25d4: return "raw_spu_read_ulong"; case 0xba62681f: return "mspace_memalign"; case 0xbb605c96: return "pvalloc"; case 0xbbd4582f: return "_Setloc"; case 0xbc1d69c5: return "atoll"; case 0xbc374779: return "_Getlname"; case 0xbc5af0b5: return "fgetwc"; case 0xbc7b4b8e: return "ctime"; case 0xbe11beaa: return "_wremove"; case 0xbe251a29: return "islower_ascii"; case 0xbe6e5c58: return "spu_thread_read_uchar"; case 0xbec43f86: return "raw_spu_read_ptr"; case 0xbf5bf5ea: return "lseek64"; case 0xbfcd1b3b: return "_Getdst"; case 0xc01d9f97: return "printf"; case 0xc08cc41d: return "wcstod"; case 0xc0e27b2c: return "_Makestab"; case 0xc155a73f: return "_WStoull"; case 0xc15e657e: return "spu_raw_sprintf"; case 0xc1a71972: return "_d2rv"; case 0xc1b4bbb9: return "raw_spu_write_char"; case 0xc1c8737c: return "_Getptoupper"; case 0xc291e698: return "exit"; case 0xc3c598e2: return "spu_printf_initialize"; case 0xc3e14cbe: return "memcmp"; case 0xc4178000: return "_rand_real3_TT800"; case 0xc41c6e5d: return "_Scanf"; case 0xc57337f8: return "_Fofind"; case 0xc5c09834: return "strstr"; case 0xc63c354f: return "_Exit"; case 0xc69b2427: return "labs"; case 0xc78df618: return "rand_real3_TT800"; case 0xc7b62ab8: return "spu_thread_write_ullong"; case 0xc9471fac: return "_Mtxinit"; case 0xc94b27e3: return "_WStof"; case 0xc95b20d3: return "fputwc"; case 0xc9607d35: return "_Stopfx"; case 0xc97a17d7: return "vsscanf"; case 0xcab654bf: return "_Once_ctor"; case 0xcb85ac70: return "mspace_malloc"; case 0xcb9c535b: return "strftime"; case 0xcbac7ad7: return "memchr"; case 0xcbdc3a6d: return "raw_spu_write_int"; case 0xcc5e0c72: return "_divi4"; case 0xcca68e9c: return "putwc"; case 0xce7a9e76: return "isprint_ascii"; case 0xcecbcdc4: return "_Frv2d"; case 0xcf863219: return "_Fwprep"; case 0xcfbfb7a7: return "spu_printf_detach_thread"; case 0xd14ece90: return "strtol"; case 0xd1d69cb8: return "_Stod"; case 0xd20f6601: return "independent_calloc"; case 0xd2a99b1e: return "isprint"; case 0xd2ac48d7: return "iswalnum"; case 0xd360dcb4: return "fileno"; case 0xd3964a09: return "__spu_thread_putfld"; case 0xd40723d6: return "fread"; case 0xd417eeb5: return "_Stoull"; case 0xd4912ee3: return "_FDscale"; case 0xd5c8cb55: return "spu_thread_write_ushort"; case 0xd69c513d: return "_Wcscollx"; case 0xd784459d: return "isupper"; case 0xd7dc3a8f: return "strtod"; case 0xd8b4eb20: return "__spu_thread_puttxt"; case 0xd9674905: return "mspace_reallocalign"; case 0xd9a4f812: return "atoff"; case 0xda5a7eb8: return "strtoul"; case 0xdaeada07: return "mallopt"; case 0xddbac025: return "strcasecmp_ascii"; case 0xddc71a75: return "_SCE_Assert"; case 0xde1bb092: return "init_by_array_TT800"; case 0xde32a334: return "_Exitspawn"; case 0xde7aff7a: return "memcpy16"; case 0xdebee2af: return "strchr"; case 0xdef86a83: return "isxdigit_ascii"; case 0xdfb52083: return "_Stoxflt"; case 0xe03c7ab1: return "_Fspos"; case 0xe1858899: return "_Getpwctrtab"; case 0xe1bd3587: return "fclose"; case 0xe1e83c65: return "strncmp"; case 0xe2c5274a: return "_WStoflt"; case 0xe3812672: return "fdopen"; case 0xe3cc73f3: return "puts"; case 0xe3d91db3: return "raw_spu_read_double"; case 0xe40ba755: return "strtok"; case 0xe44bf0bf: return "atof"; case 0xe469fb20: return "_Atexit"; case 0xe48348e9: return "vprintf"; case 0xe4c51d4c: return "wcstoimax"; case 0xe5ea9e2b: return "_Isdst"; case 0xe5f09c80: return "llabs"; case 0xe60ee9e5: return "fputws"; case 0xe6a7de0a: return "ungetc"; case 0xe7def231: return "_Getfloat"; case 0xe89071ad: return "isalpha_ascii"; case 0xe9137453: return "fwprintf"; case 0xe9a2cc40: return "raw_spu_write_long"; case 0xe9b560a5: return "sscanf"; case 0xeb26298c: return "gmtime"; case 0xeb40c9ec: return "rand_real2_TT800"; case 0xeb8abe73: return "vwscanf"; case 0xec9e7cb9: return "spu_thread_read_llong"; case 0xecddba69: return "_WStodx"; case 0xed6ec979: return "fsetpos"; case 0xeda48c80: return "malloc_trim"; case 0xeddcee2c: return "init_TT800"; case 0xedec777d: return "_Ttotm"; case 0xeeeb4f3e: return "_get_state_TT800"; case 0xeeffc9a6: return "_wrename"; case 0xef110b6b: return "unlink"; case 0xf06eed36: return "wmemset"; case 0xf0776a44: return "wcscmp"; case 0xf0e022c6: return "getc"; case 0xf2bbbee9: return "_Litob"; case 0xf2fca4b2: return "spu_thread_write_llong"; case 0xf356418c: return "open"; case 0xf3ef3678: return "wcscspn"; case 0xf41355f9: return "wcscpy"; case 0xf418ee84: return "_WFwprep"; case 0xf4207734: return "spu_thread_write_ulong"; case 0xf5a32994: return "_Getpcostate"; case 0xf5ef229c: return "_Getpwcostate"; case 0xf5f7dda8: return "towupper"; case 0xf68e2ac9: return "_init_malloc_lock"; case 0xf7583d67: return "vscanf"; case 0xf7908e27: return "strcspn"; case 0xf7a14a22: return "realloc"; case 0xf7d51596: return "scanf"; case 0xf7ddb471: return "_Setgloballocale"; case 0xf88f26c4: return "fwrite"; case 0xf8935fe3: return "spu_thread_write_float"; case 0xf89dc648: return "strpbrk"; case 0xf9dae72c: return "setjmp"; case 0xf9dba140: return "_Mtxlock"; case 0xf9e26b72: return "_Once_dtor"; case 0xfa00d211: return "read"; case 0xfae4b063: return "_Strcollx"; case 0xfaec8c60: return "fprintf"; case 0xfb0f0018: return "_Makewct"; case 0xfb2081fd: return "vfprintf"; case 0xfb81426d: return "iswlower"; case 0xfb8ea4d2: return "_Fd2rv"; case 0xfc0428a6: return "strdup"; case 0xfc60575c: return "__spu_thread_printf"; case 0xfc606237: return "mbsrtowcs"; case 0xfcac2e8e: return "mbstowcs"; case 0xfd0cb96d: return "spu_thread_read_short"; case 0xfd461e85: return "spu_thread_write_ldouble"; case 0xfd6a1ddb: return "raw_spu_read_llong"; case 0xfd81f6ca: return "_Stoflt"; case 0xfe0261aa: return "mspace_free"; case 0xfe630fd9: return "isblank"; case 0xfe88e97e: return "fscanf"; case 0xff689124: return "strtoimax"; case 0xffbae95e: return "asctime"; case 0xffbd876b: return "__raw_spu_puttxt"; case 0x003395d9: return "_Feraise"; case 0x00367be0: return "fminl"; case 0x007854f4: return "_FDclass"; case 0x00fde072: return "f_powf"; case 0x010818fc: return "asinf4"; case 0x012d0a91: return "_fminf4"; case 0x016556df: return "_sinf4"; case 0x01b84b27: return "llround"; case 0x01ecef7d: return "_FCbuild"; case 0x02e68d44: return "_f_fmodf"; case 0x032cc709: return "csin"; case 0x03593d2c: return "_f_expf"; case 0x03aea906: return "divf4"; case 0x0522d1af: return "_recipf4"; case 0x054aae63: return "_fdimf4"; case 0x05cb1718: return "f_fdimf"; case 0x05e27a13: return "log10f4fast"; case 0x05efc660: return "asin"; case 0x05f1dc9e: return "_FExp"; case 0x07274304: return "csinh"; case 0x07daed62: return "log2f4"; case 0x07f400e3: return "_LCbuild"; case 0x080414bd: return "conjl"; case 0x08139bd2: return "_fmaxf4"; case 0x0829a21d: return "asinhl"; case 0x0a242ed5: return "sinf4"; case 0x0b3f4e90: return "catanhf"; case 0x0bb036a6: return "_cosf4"; case 0x0c14cfcc: return "fesetenv"; case 0x0c9b8305: return "hypotf4"; case 0x0cbdae68: return "sinf"; case 0x0cf9b8bd: return "_Erfc"; case 0x0d86295d: return "_LCaddcr"; case 0x0e53319f: return "_asinf4"; case 0x0e8573dc: return "expm1l"; case 0x0f02f882: return "llrintl"; case 0x0f428f0f: return "rint"; case 0x0f721a9d: return "_LCsubcc"; case 0x10627248: return "f_fmodf"; case 0x11c51388: return "tgamma"; case 0x1225dd31: return "casinf"; case 0x12de4e46: return "_powf4"; case 0x12e04cd7: return "cimagl"; case 0x1313a420: return "acos"; case 0x137f7e77: return "expf4"; case 0x14208b00: return "_asinf4fast"; case 0x1498a072: return "_Cmulcr"; case 0x16bf208a: return "log10f"; case 0x17316bee: return "log2"; case 0x178d98dd: return "atanf4fast"; case 0x17cd5d87: return "_recipf4fast"; case 0x182cd542: return "tgammal"; case 0x18668ce3: return "exp"; case 0x18b26998: return "remainderl"; case 0x18ec6099: return "rintl"; case 0x1988732d: return "clog10"; case 0x1a1adede: return "rsqrtf4fast"; case 0x1acb2b16: return "acosf4"; case 0x1bbdcd9f: return "expm1f4"; case 0x1bcdeb47: return "_LSinh"; case 0x1be996cc: return "_LCdivcc"; case 0x1c11885d: return "_floorf4"; case 0x1d35bfe4: return "_LLog"; case 0x1d5bf5d0: return "_modff4"; case 0x1e623f95: return "truncf4"; case 0x1e85ef02: return "f_atanf"; case 0x1e9fd6ba: return "_sinf4fast"; case 0x2033eeb7: return "csqrt"; case 0x2118fe46: return "cexpl"; case 0x21a37b3e: return "log1pf"; case 0x21e6d304: return "ceil"; case 0x22c3e308: return "_exp2f4"; case 0x238af59b: return "fegetenv"; case 0x23b985f7: return "floorf"; case 0x241f9337: return "_FCmulcr"; case 0x24497c52: return "cosf"; case 0x246ea8d0: return "f_sqrtf"; case 0x2627d6b2: return "erfc"; case 0x266d2473: return "_Caddcr"; case 0x26deed0b: return "cosl"; case 0x26ef50ed: return "asinh"; case 0x28faaa5a: return "ilogbf4"; case 0x29685118: return "_negatef4"; case 0x2a138d2b: return "truncf"; case 0x2a4dcbad: return "cacosl"; case 0x2a89ce33: return "llrintf"; case 0x2af4b73b: return "fmax"; case 0x2b282ebb: return "sqrtl"; case 0x2bb0f2c9: return "logb"; case 0x2c45fe6a: return "fmaxl"; case 0x2c601f3b: return "csinl"; case 0x2cbb6f53: return "f_hypotf"; case 0x2dcab6a4: return "nanl"; case 0x2df339bc: return "_f_floorf"; case 0x2e69bb2a: return "_FCosh"; case 0x2ec867b4: return "exp2f4fast"; case 0x30bc7a53: return "logf4"; case 0x315673f6: return "_Csubcc"; case 0x31be25c3: return "scalblnf"; case 0x31db8c89: return "atan2"; case 0x321c55de: return "nexttowardl"; case 0x3261de11: return "fesetexceptflag"; case 0x329ec019: return "rsqrtf4"; case 0x32f994a1: return "cosf4fast"; case 0x33e5929b: return "_LDsign"; case 0x33f27f25: return "_FCdivcr"; case 0x3436f008: return "csinhf"; case 0x3459748b: return "log10f4"; case 0x347c1ee1: return "atanf4"; case 0x34c0371e: return "powl"; case 0x358d7f93: return "_f_lrintf"; case 0x3593a445: return "clog"; case 0x35b6e70a: return "lrintl"; case 0x35d3f688: return "creal"; case 0x36778d1b: return "coshf"; case 0x373054d1: return "cpow"; case 0x37345541: return "log1pl"; case 0x376fb27f: return "sinhl"; case 0x3792b12d: return "lroundl"; case 0x38ba5590: return "ccosl"; case 0x38e69f09: return "pow"; case 0x398483aa: return "_expm1f4fast"; case 0x39ef81c9: return "f_fmaxf"; case 0x3ad203fa: return "lrint"; case 0x3adc01d7: return "f_frexpf"; case 0x3b802524: return "ldexpf4"; case 0x3c057fbd: return "atanf"; case 0x3c616743: return "_LDtest"; case 0x3cb818fa: return "_f_fdimf"; case 0x3d4efafb: return "atan2l"; case 0x3d549f2a: return "ctanhl"; case 0x3d901a10: return "_ceilf4"; case 0x3da55602: return "fabsf"; case 0x3dfa060f: return "scalbnl"; case 0x3e7eb58f: return "frexpf4"; case 0x3e919cba: return "scalbnf"; case 0x3ec9de23: return "_cbrtf4"; case 0x3eeedb0e: return "_Dclass"; case 0x3f6262b3: return "f_fminf"; case 0x3f701e78: return "_Poly"; case 0x4020f5ef: return "cbrt"; case 0x405f9727: return "_log1pf4fast"; case 0x40a2e212: return "_fabsf4"; case 0x4111b546: return "_LExp"; case 0x411434bb: return "asinf"; case 0x414c5ecc: return "_f_hypotf"; case 0x4152669c: return "scalbln"; case 0x417851ce: return "feholdexcept"; case 0x418036e3: return "_FTgamma"; case 0x4189a367: return "remquo"; case 0x41d1b236: return "_f_rintf"; case 0x430309a1: return "ldexpf"; case 0x434881a0: return "cacosf"; case 0x43d522f4: return "cabsl"; case 0x44cd6308: return "remainder"; case 0x44cf744b: return "tanhl"; case 0x45034943: return "nan"; case 0x452ac4bb: return "floorf4"; case 0x453f9e91: return "cbrtf"; case 0x46b66f76: return "csqrtl"; case 0x46cf72d9: return "fdimf"; case 0x47433144: return "expm1f4fast"; case 0x475d855b: return "trunc"; case 0x476b5591: return "fmaf"; case 0x48157605: return "_f_llrintf"; case 0x4826db61: return "fma"; case 0x4875601d: return "_exp2f4fast"; case 0x487bbd1c: return "tanf4"; case 0x488df791: return "cexp"; case 0x48d462a9: return "_FDint"; case 0x4930ac11: return "logbl"; case 0x4a5ae27d: return "f_exp2f"; case 0x4a6ca9a6: return "powf4"; case 0x4ab22a63: return "_Caddcc"; case 0x4add664c: return "feclearexcept"; case 0x4ae52dd3: return "exp2"; case 0x4b03d5b2: return "f_rintf"; case 0x4b584841: return "f_asinf"; case 0x4cb5fa99: return "nexttoward"; case 0x4d878773: return "remainderf4"; case 0x4ddb926b: return "powf"; case 0x4e010403: return "copysign"; case 0x4eb5eb51: return "sin"; case 0x4fa4f5ec: return "nexttowardf"; case 0x501c412f: return "cargf"; case 0x519ebb77: return "floor"; case 0x547fb4a7: return "sinf4fast"; case 0x54d2fb8c: return "rintf"; case 0x5516d621: return "acosl"; case 0x55c8a549: return "truncl"; case 0x56c573a8: return "log1p"; case 0x575e9b6e: return "asinl"; case 0x58eb9e57: return "fabs"; case 0x596ab55c: return "atanh"; case 0x5b18eded: return "clogl"; case 0x5b474c22: return "casinhl"; case 0x5bfd37be: return "_FCaddcc"; case 0x5e48dede: return "exp2f4"; case 0x5ee10a95: return "catanh"; case 0x5ee37927: return "_LErfc"; case 0x60e9ff3c: return "_expm1f4"; case 0x61250988: return "catanl"; case 0x6261c0b5: return "_log10f4"; case 0x63bbdfa6: return "_FCmulcc"; case 0x642e3d18: return "_frexpf4"; case 0x642f7d6b: return "f_copysignf"; case 0x645557bd: return "copysignl"; case 0x64abdb4d: return "csinhl"; case 0x657d0e83: return "divf4fast"; case 0x65935877: return "ilogbf"; case 0x659e011e: return "sqrt"; case 0x6636c4a5: return "frexpf"; case 0x664e04b9: return "negatef4"; case 0x6764c707: return "f_log2f"; case 0x683cacb3: return "sinh"; case 0x68a8957f: return "casinhf"; case 0x68f72416: return "nextafterl"; case 0x69040b9b: return "logbf4"; case 0x69725dce: return "lgamma"; case 0x6ad1c42b: return "_sincosf4"; case 0x6b660894: return "_acosf4fast"; case 0x6b6ab2a9: return "_LDclass"; case 0x6c009c56: return "f_log10f"; case 0x6c6285c6: return "acoshf"; case 0x6cc4bd13: return "casinh"; case 0x6ddd31b2: return "hypot"; case 0x6df35518: return "floorl"; case 0x6e9eb0dc: return "sincosf4fast"; case 0x6ef6b083: return "_FCsubcr"; case 0x6f5dd7d2: return "cexpf"; case 0x6f639afb: return "f_llroundf"; case 0x6fcc1e27: return "_FPoly"; case 0x70357b12: return "_atanf4fast"; case 0x7048396e: return "carg"; case 0x705d9e24: return "f_acosf"; case 0x70f71871: return "_FCdivcc"; case 0x71293b71: return "_FLog"; case 0x714adce1: return "log"; case 0x71f2bc56: return "_divf4fast"; case 0x728149e5: return "f_ldexpf"; case 0x729b7269: return "cproj"; case 0x72a3ed28: return "fesettrapenable"; case 0x72f1f64b: return "_logbf4"; case 0x734ca589: return "_f_cosf"; case 0x742f12b4: return "_Sin"; case 0x74902d4b: return "expf4fast"; case 0x749440f9: return "lgammal"; case 0x752fa85e: return "fmaxf4"; case 0x758f33dc: return "nearbyint"; case 0x75e3e2e9: return "nearbyintl"; case 0x76afaf04: return "_sqrtf4"; case 0x76e639ec: return "_atanf4"; case 0x772f1e4d: return "lround"; case 0x7793a86b: return "ctanf"; case 0x7831a2e0: return "hypotl"; case 0x78e4590a: return "acosh"; case 0x790c53bd: return "_Fpcomp"; case 0x7919f414: return "_f_nearbyintf"; case 0x79ba9b5c: return "expl"; case 0x7a893af1: return "_rsqrtf4"; case 0x7ab679da: return "f_cosf"; case 0x7c2eaeb5: return "fminf"; case 0x7d02a5ca: return "sqrtf4fast"; case 0x7d6191d0: return "_Cosh"; case 0x7f381837: return "frexp"; case 0x7f579e03: return "atan"; case 0x7f91cd41: return "tanf4fast"; case 0x812ed488: return "cabsf"; case 0x81daf880: return "_LCsubcr"; case 0x8217e783: return "cosh"; case 0x833e6b0e: return "cimag"; case 0x834f5917: return "ccosh"; case 0x842cb14d: return "_log1pf4"; case 0x8451edf0: return "sqrtf"; case 0x889cccb0: return "llroundl"; case 0x88fb4a66: return "recipf4fast"; case 0x892f2590: return "fegetround"; case 0x895cdb49: return "fmaxf"; case 0x89b507b3: return "catanhl"; case 0x89d1d168: return "_LAtan"; case 0x8b168769: return "fdiml"; case 0x8bd1deb2: return "_LTgamma"; case 0x8bd67efc: return "erf"; case 0x8c85369b: return "_f_fminf"; case 0x8d5858db: return "_f_exp2f"; case 0x8e01379e: return "cacoshf"; case 0x8e258fa0: return "cacos"; case 0x8ecae294: return "nextafter"; case 0x8f2bcdb5: return "_logf4"; case 0x8f96319e: return "log10l"; case 0x8fb7bac7: return "_sqrtf4fast"; case 0x904e646b: return "cargl"; case 0x90f0242f: return "_f_sinf"; case 0x9110708a: return "modfl"; case 0x91cdfdb0: return "asinf4fast"; case 0x9232baea: return "_FDtest"; case 0x9245e01b: return "_divf4"; case 0x9379e36e: return "tanf"; case 0x938fb946: return "_tanf4fast"; case 0x947ae18e: return "_LHypot"; case 0x9558ed08: return "lrintf"; case 0x95dfecb1: return "_FCsubcc"; case 0x961688d1: return "f_nearbyintf"; case 0x9616e336: return "_FHypot"; case 0x964ac044: return "creall"; case 0x96d1b95e: return "log2f4fast"; case 0x9700d9cd: return "clogf"; case 0x970a3432: return "cacosh"; case 0x99a6c261: return "catanf"; case 0x99c228fc: return "roundl"; case 0x9a81e583: return "fmodf"; case 0x9af30eaf: return "casin"; case 0x9e289062: return "_f_ceilf"; case 0x9e3ada21: return "logl"; case 0x9e8130b6: return "ccos"; case 0x9f03dd3e: return "lgammaf"; case 0x9f0efc6e: return "exp2l"; case 0x9f46f5a4: return "tgammaf"; case 0x9f65bd34: return "fdimf4"; case 0x9f78f052: return "cos"; case 0x9fded78a: return "_acosf4"; case 0xa0160c30: return "_copysignf4"; case 0xa20827a8: return "ctanl"; case 0xa2c81938: return "_LSin"; case 0xa4578433: return "fmin"; case 0xa46a70a1: return "atanhl"; case 0xa4ca5cf2: return "llroundf"; case 0xa56557b6: return "catan"; case 0xa5d0b260: return "acoshl"; case 0xa713f8cf: return "modf"; case 0xa7658186: return "log1pf4"; case 0xa823836b: return "ilogb"; case 0xa8c16038: return "_FDsign"; case 0xa8d180e8: return "_Cbuild"; case 0xa92bcc85: return "cabs"; case 0xa9e039c4: return "erfcf"; case 0xaaa270dc: return "_LCdivcr"; case 0xab377381: return "log2f"; case 0xabdccc7a: return "f_atan2f"; case 0xacca2f83: return "copysignf"; case 0xad17e787: return "_Dint"; case 0xad3a093d: return "_LCosh"; case 0xad5d3e57: return "_FLgamma"; case 0xaddce673: return "erfcl"; case 0xafa13040: return "f_llrintf"; case 0xafcfdad7: return "_Lgamma"; case 0xafd9a625: return "cimagf"; case 0xb0fa1592: return "clog10l"; case 0xb24bd2f8: return "logbf"; case 0xb348c5c2: return "_LLgamma"; case 0xb412a8dc: return "_LDint"; case 0xb4ef29d5: return "f_floorf"; case 0xb4f4513e: return "_Tgamma"; case 0xb54cc9a1: return "f_sinf"; case 0xb5961d4e: return "_sincosf4fast"; case 0xb598a495: return "fmodl"; case 0xb5e28191: return "_FSin"; case 0xb7696143: return "nextafterf"; case 0xb79012ba: return "modff"; case 0xb89863bc: return "_rsqrtf4fast"; case 0xb8aa984e: return "_expf4"; case 0xb94b9d13: return "_Dtest"; case 0xb9d2ad22: return "remquol"; case 0xba136594: return "csinf"; case 0xba84eab5: return "coshl"; case 0xbaf11866: return "ceilf"; case 0xbb165807: return "expm1f"; case 0xbb208b20: return "cbrtf4fast"; case 0xbb761c89: return "remquof"; case 0xbbaa300b: return "f_log1pf"; case 0xbbf7354e: return "fegetexceptflag"; case 0xbd7410d9: return "recipf4"; case 0xbd8bb75c: return "asinhf"; case 0xbf23f2e7: return "cprojl"; case 0xbfda6837: return "_f_log10f"; case 0xc0609820: return "nearbyintf"; case 0xc0bcf25e: return "_logf4fast"; case 0xc357b33a: return "frexpl"; case 0xc406dd09: return "cbrtf4"; case 0xc41f01db: return "fminf4"; case 0xc477c0f6: return "f_lroundf"; case 0xc4cccd1f: return "modff4"; case 0xc7369fce: return "_Atan"; case 0xc78ac9d0: return "scalbn"; case 0xc7b45a19: return "_LFpcomp"; case 0xc7f1d407: return "fmal"; case 0xc7fb73d6: return "f_lrintf"; case 0xc8910002: return "ilogbl"; case 0xc8dd9279: return "expm1"; case 0xc90f4bbc: return "_atan2f4"; case 0xc9481758: return "_tanf4"; case 0xc94fcc63: return "cbrtl"; case 0xc977e1ea: return "fetestexcept"; case 0xc984bf53: return "roundf"; case 0xc9c536ce: return "_ldexpf4"; case 0xca239640: return "fmodf4"; case 0xca463458: return "_Log"; case 0xcaaf7ae7: return "cprojf"; case 0xcac167a5: return "_Cmulcc"; case 0xcb6599c0: return "exp2f"; case 0xcb6a147e: return "_cosf4fast"; case 0xcbdf9afb: return "_log10f4fast"; case 0xccc66f11: return "_FSinh"; case 0xce91ff18: return "nanf"; case 0xcfee82d8: return "_remainderf4"; case 0xd0fd3ca8: return "_hypotf4"; case 0xd125b89e: return "conjf"; case 0xd1a3574c: return "clog10f"; case 0xd231e30a: return "ldexpl"; case 0xd28ef6dd: return "_Hypot"; case 0xd2a666c9: return "ctanh"; case 0xd3a346a8: return "tanl"; case 0xd40f3f2c: return "erff"; case 0xd42904b7: return "fabsl"; case 0xd477852d: return "logf"; case 0xd48eaae1: return "scalblnl"; case 0xd4f37b9d: return "tanhf"; case 0xd50277ad: return "tan"; case 0xd54039cb: return "fegettrapenable"; case 0xd5adc4b2: return "cpowl"; case 0xd5d38552: return "_LCaddcc"; case 0xd612fa16: return "_Sinh"; case 0xd70df92a: return "_FCaddcr"; case 0xd7653782: return "sinhf"; case 0xd76a16da: return "_fmaf4"; case 0xd8270894: return "fdim"; case 0xd8c4096d: return "atan2f4"; case 0xd8d157f5: return "f_expf"; case 0xd8f79f4c: return "log10"; case 0xd97852b7: return "sinl"; case 0xd97ce5d4: return "fesetround"; case 0xda217d1f: return "atanl"; case 0xda31fc5d: return "_FFpcomp"; case 0xdc14974c: return "fmaf4"; case 0xdc151707: return "_f_log2f"; case 0xdd8660d2: return "atan2f4fast"; case 0xdd92118e: return "ceill"; case 0xdddabb32: return "remainderf"; case 0xde7833f2: return "_log2f4fast"; case 0xdece76a6: return "acosf"; case 0xdfd41734: return "_Exp"; case 0xdffb4e3c: return "casinl"; case 0xe1288c47: return "atanhf"; case 0xe1c71b05: return "ccoshl"; case 0xe2b596ec: return "ccosf"; case 0xe2de89e6: return "csqrtf"; case 0xe2f1d4b2: return "tanh"; case 0xe31cc0d3: return "_ilogbf4"; case 0xe3e379b8: return "_expf4fast"; case 0xe584836c: return "_LPoly"; case 0xe58fc9b5: return "erfl"; case 0xe5a0be9f: return "_powf4fast"; case 0xe5d2293f: return "_Force_raise"; case 0xe5ea65e8: return "feraiseexcept"; case 0xe6c1ff41: return "llrint"; case 0xe769e5cf: return "fmod"; case 0xe8fcf1f8: return "acosf4fast"; case 0xe913a166: return "logf4fast"; case 0xe92f3fb8: return "_f_fmaf"; case 0xe93abfca: return "ctan"; case 0xe9ac8223: return "_LCmulcr"; case 0xe9f501df: return "crealf"; case 0xea1e83e3: return "f_logf"; case 0xeac62795: return "_Cdivcc"; case 0xeac7ca2c: return "ceilf4"; case 0xebb4e08a: return "hypotf"; case 0xec43b983: return "_f_sqrtf"; case 0xec7da0c8: return "_atan2f4fast"; case 0xed05c265: return "sqrtf4"; case 0xed9d1ac5: return "f_tanf"; case 0xeda86c48: return "copysignf4"; case 0xee0db701: return "_Csubcr"; case 0xee204ac6: return "f_ceilf"; case 0xee303936: return "_Dsign"; case 0xeed82401: return "_f_logf"; case 0xf0947035: return "ctanhf"; case 0xf0ab77c1: return "ccoshf"; case 0xf16568af: return "_FAtan"; case 0xf19c5e94: return "sincosf4"; case 0xf1aaa2f8: return "conj"; case 0xf3bd7d08: return "_cbrtf4fast"; case 0xf3ec0258: return "round"; case 0xf4ad6ea8: return "ldexp"; case 0xf537d837: return "_truncf4"; case 0xf5cd1e19: return "cosf4"; case 0xf7844153: return "_f_fmaxf"; case 0xf83a372f: return "f_fmaf"; case 0xf95b7769: return "powf4fast"; case 0xf99da2fc: return "fabsf4"; case 0xfa28434b: return "log2l"; case 0xfa765d42: return "_Cdivcr"; case 0xfa97afbf: return "feupdateenv"; case 0xfae9e727: return "_f_copysignf"; case 0xfb6e6213: return "log1pf4fast"; case 0xfb932a56: return "atan2f"; case 0xfbb4047a: return "lroundf"; case 0xfbe88922: return "_FErfc"; case 0xfcedabc3: return "_fmodf4"; case 0xfcf08193: return "expf"; case 0xfdec16e1: return "cacoshl"; case 0xfe23dbe9: return "_log2f4"; case 0xff036800: return "cpowf"; case 0xfffe79bf: return "_LCmulcc"; } if (_module == "sys_libstdcxx") switch (fnid) { case 0x002c338b: return "_ZNKSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE16do_get_monthnameES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateEPSt2tm"; case 0x002e18d8: return "_ZNSt6locale7_LocimpD0Ev"; case 0x0091a3fd: return "_ZNKSt6locale9_GetfacetEj"; case 0x00c3975e: return "_ZNSt15basic_streambufIcSt11char_traitsIcEE5_LockEv"; case 0x00cf44f7: return "_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES3_RSt8ios_basecl"; case 0x01409785: return "_ZNKSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE7_GetintERS3_S5_iiRi"; case 0x01aa0cef: return "_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateERx"; case 0x01c4ef01: return "_ZNSt6localeC2ERKS_S1_i"; case 0x01d9b3f5: return "_ZNSt6localeC1EPKci"; case 0x01f81190: return "_ZNSt12codecvt_baseD1Ev"; case 0x020b22f3: return "_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES3_RSt8ios_basewPKv"; case 0x02e40598: return "_ZNSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE5_InitERKSt8_Locinfo"; case 0x03217f6f: return "_ZNSt19istreambuf_iteratorIcSt11char_traitsIcEE5_PeekEv"; case 0x0339259c: return "_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES3_RSt8ios_basecPKv"; case 0x033c18f4: return "_ZNSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED1Ev"; case 0x03cca12f: return "_ZNSt6localeC1ERKS_PKci"; case 0x040c18ff: return "_ZNKSt7_MpunctIwE16do_decimal_pointEv"; case 0x045e124a: return "_ZdaPv"; case 0x0490855d: return "_ZNSt8numpunctIwE7_GetcatEPPKNSt6locale5facetE"; case 0x055c1462: return "_ZNSt15basic_streambufIcSt11char_traitsIcEED1Ev"; case 0x05903101: return "_ZNKSt7collateIcE7do_hashEPKcS2_"; case 0x05a9cef6: return "_ZNSt7_MpunctIcE5_InitERKSt8_Locinfo"; case 0x05ec37c8: return "_ZSt10_MaklocstrIwEPT_PKcS1_RKSt7_Cvtvec"; case 0x06bc5b51: return "_ZNKSt7_MpunctIwE16do_positive_signEv"; case 0x07a3bd16: return "_ZNSt6locale7_LocimpD1Ev"; case 0x07b6c924: return "_ZTv0_n12_NSt13basic_ostreamIwSt11char_traitsIwEED1Ev"; case 0x085bff4f: return "_ZNSt15basic_streambufIwSt11char_traitsIwEE5_LockEv"; case 0x08e1865c: return "_ZNKSt8numpunctIwE16do_thousands_sepEv"; case 0x09e73a2a: return "_ZNKSt7codecvtIwcSt9_MbstatetE11do_encodingEv"; case 0x0ba5483c: return "_ZNKSt12codecvt_base11do_encodingEv"; case 0x0bc08c57: return "_ZNKSt7collateIwE7do_hashEPKwS2_"; case 0x0bcc1910: return "_ZNSt10ostrstreamD2Ev"; case 0x0d4290d2: return "_ZNSt12length_errorD0Ev"; case 0x0d644dca: return "_ZNKSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE11do_get_dateES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateEPSt2tm"; case 0x0e147a9d: return "_ZNSt13basic_filebufIwSt11char_traitsIwEE9pbackfailEi"; case 0x0e744ef5: return "_ZNSt9money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED1Ev"; case 0x0e9698af: return "_ZNSt7codecvtIwcSt9_MbstatetED1Ev"; case 0x0e9a5554: return "_ZNSt13basic_istreamIwSt11char_traitsIwEED0Ev"; case 0x0f930fdd: return "_ZNSt13messages_baseD2Ev"; case 0x0ff264b9: return "_ZNSt15basic_streambufIcSt11char_traitsIcEE4syncEv"; case 0x10231873: return "_ZNSt13runtime_errorD1Ev"; case 0x10dc3f6c: return "_ZNSbIwSt11char_traitsIwESaIwEE6appendEjw"; case 0x113a515f: return "_ZNKSt8messagesIcE7do_openERKSsRKSt6locale"; case 0x114e9178: return "_ZNSt11logic_errorD0Ev"; case 0x128cd621: return "_ZNKSt5ctypeIwE10do_scan_isEsPKwS2_"; case 0x12de5772: return "_ZNKSt9money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES3_S3_bRSt8ios_baseRNSt5_IosbIiE8_IostateERSs"; case 0x1374b8c8: return "_ZNSt10moneypunctIcLb0EED1Ev"; case 0x143048bf: return "_ZNSt15basic_streambufIcSt11char_traitsIcEE7seekoffElNSt5_IosbIiE8_SeekdirENS4_9_OpenmodeE"; case 0x1474ac53: return "_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateERt"; case 0x14e3faa5: return "_ZNKSt5ctypeIwE9do_narrowEwc"; case 0x1527fe95: return "_ZNSt15basic_streambufIwSt11char_traitsIwEE4syncEv"; case 0x1692ae0c: return "_ZNSt6localeD1Ev"; case 0x16df5ecb: return "_ZNKSt12codecvt_base16do_always_noconvEv"; case 0x17dd0a4e: return "_ZNKSt7_MpunctIwE16do_negative_signEv"; case 0x18628537: return "_ZNKSt8numpunctIcE16do_decimal_pointEv"; case 0x186bcc94: return "_ZNSt8ios_base4InitD1Ev"; case 0x18a38254: return "_ZNSt10ctype_baseD1Ev"; case 0x197fc348: return "_ZNSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEED1Ev"; case 0x1989f59c: return "_ZNSt8ios_base17register_callbackEPFvNS_5eventERS_iEi"; case 0x19c901ce: return "_ZTv0_n12_NSt9strstreamD0Ev"; case 0x1a00f889: return "_ZNSt9exceptionD2Ev"; case 0x1a4f2fa6: return "_ZNSt8ios_base7failureD0Ev"; case 0x1a7f963c: return "_ZNKSt8numpunctIcE11do_truenameEv"; case 0x1b266c3d: return "_ZNSt9money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE7_GetcatEPPKNSt6locale5facetE"; case 0x1b6a7482: return "_ZNKSt7_MpunctIwE13do_neg_formatEv"; case 0x1b6ad260: return "_ZSt13resetiosflagsNSt5_IosbIiE9_FmtflagsE"; case 0x1b9b3b5c: return "_ZNSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED0Ev"; case 0x1bccd2ca: return "_ZNKSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE7_GetintERS3_S5_iiRi"; case 0x1c3f1c4f: return "_ZNSt6_MutexD1Ev"; case 0x1c8083c5: return "_ZNSt12strstreambufD0Ev"; case 0x1c8405dc: return "_ZNSt7_MpunctIcEC2Ejb"; case 0x1cf6785d: return "_ZSt9use_facetISt5ctypeIwEERKT_RKSt6locale"; case 0x1d43fb44: return "_ZSt9use_facetISt8numpunctIwEERKT_RKSt6locale"; case 0x1ee13e83: return "_ZNSt6locale5facetD0Ev"; case 0x1f2e9f4e: return "_ZNSt15basic_streambufIwSt11char_traitsIwEE9underflowEv"; case 0x1f3a9ada: return "_ZNSt12strstreambuf7seekposESt4fposISt9_MbstatetENSt5_IosbIiE9_OpenmodeE"; case 0x2070a73d: return "_ZNSt6locale7_LocimpC1ERKS0_"; case 0x207b56fa: return "_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE8_GetffldEPcRS3_S6_RKSt6locale"; case 0x20a02b6d: return "_ZNSt6locale2idcvjEv"; case 0x20f7e066: return "_ZNSt10moneypunctIwLb0EED0Ev"; case 0x21659e45: return "_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE5_FputES3_RSt8ios_basecPKcjjjj"; case 0x22777290: return "_ZNSs7replaceEjjPKcj"; case 0x229a0963: return "_ZNKSt5ctypeIwE5do_isEsw"; case 0x2354ec0a: return "_ZNKSt7codecvtIwcSt9_MbstatetE10do_unshiftERS0_PcS3_RS3_"; case 0x2356ef16: return "_ZnajRKSt9nothrow_t"; case 0x23a87483: return "_ZNKSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE11do_get_timeES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateEPSt2tm"; case 0x23ef7642: return "_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE8_GetifldEPcRS3_S6_NSt5_IosbIiE9_FmtflagsERKSt6locale"; case 0x258359df: return "_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES3_RSt8ios_basece"; case 0x2670b433: return "_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE8_GetffldEPcRS3_S6_RKSt6locale"; case 0x268c3ea5: return "_ZNKSt7_MpunctIwE13do_pos_formatEv"; case 0x26e8e1cf: return "_ZNKSt5ctypeIwE5do_isEPKwS2_Ps"; case 0x273be056: return "_ZNKSt9money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE8_PutmfldES3_bRSt8ios_basecbSs"; case 0x281f9107: return "_ZTv0_n12_NSiD1Ev"; case 0x294779fb: return "_ZNSt8ios_base4InitD2Ev"; case 0x2954d64d: return "_ZNSt13basic_filebufIwSt11char_traitsIwEE9underflowEv"; case 0x29c11f46: return "_ZNKSt7codecvtIccSt9_MbstatetE9do_lengthERKS0_PKcS5_j"; case 0x29c90b94: return "_ZNKSt8numpunctIcE16do_thousands_sepEv"; case 0x2a16469d: return "_ZNSt8ios_base5imbueERKSt6locale"; case 0x2ac890f4: return "_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateERb"; case 0x2adccb1a: return "_ZNKSt7_MpunctIcE14do_frac_digitsEv"; case 0x2af79bd6: return "_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE5_IputES3_RSt8ios_basecPcj"; case 0x2b05b95a: return "_ZNKSt7_MpunctIcE11do_groupingEv"; case 0x2b88f26e: return "_ZNSt15basic_streambufIwSt11char_traitsIwEED0Ev"; case 0x2c241d13: return "_ZnajjRKSt9nothrow_t"; case 0x2c6ce396: return "_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateERd"; case 0x2cf8ea50: return "_ZNKSt7codecvtIwcSt9_MbstatetE16do_always_noconvEv"; case 0x2d489b47: return "_ZNSt15basic_streambufIcSt11char_traitsIcEE9underflowEv"; case 0x2d50650f: return "_ZSt9use_facetISt10moneypunctIcLb1EEERKT_RKSt6locale"; case 0x2d8be7e8: return "_ZNKSt9exception6_RaiseEv"; case 0x2daa5a42: return "_ZTv0_n12_NSt9strstreamD1Ev"; case 0x2e2b80c8: return "_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateERPv"; case 0x2e84ebb3: return "_ZNSt8_LocinfoC1EiPKc"; case 0x2eb5c13a: return "_ZNSt13basic_filebufIcSt11char_traitsIcEE4syncEv"; case 0x2f29da90: return "_ZNSt12strstreambuf5_TidyEv"; case 0x2ff8d101: return "_ZNSt6localeC1ERKS_S1_i"; case 0x30195cf5: return "_ZNKSt8numpunctIcE11do_groupingEv"; case 0x30ce43d4: return "_ZNSt8numpunctIcED0Ev"; case 0x30e297ea: return "_ZNSt7_MpunctIcEC2ERKSt8_Locinfojb"; case 0x316b7a34: return "_ZNSt9exceptionD1Ev"; case 0x31a81476: return "_ZdlPvj"; case 0x31b3e5cc: return "_ZNSs5_TidyEbj"; case 0x3286b855: return "_ZNSt8time_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE7_GetcatEPPKNSt6locale5facetE"; case 0x332f8409: return "_ZNSt8time_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE7_GetcatEPPKNSt6locale5facetE"; case 0x336e904e: return "_ZNSdD0Ev"; case 0x33e04d8e: return "_ZNKSt7collateIwE12do_transformEPKwS2_"; case 0x34b63588: return "_ZNKSt5ctypeIwE9_DonarrowEwc"; case 0x34edd72b: return "_ZNSt10moneypunctIwLb0EED1Ev"; case 0x360f8a4f: return "_ZNSt9money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED1Ev"; case 0x3697bbd3: return "_ZNSt8ios_base5_InitEv"; case 0x36e7826a: return "_ZNSt7collateIcED1Ev"; case 0x3783acfa: return "_ZTv0_n12_NSt13basic_istreamIwSt11char_traitsIwEED1Ev"; case 0x38783beb: return "_ZNKSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE11do_get_yearES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateEPSt2tm"; case 0x3933645f: return "_ZNKSt7_MpunctIwE14do_frac_digitsEv"; case 0x3937f2f8: return "_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES3_RSt8ios_basecm"; case 0x39775ce9: return "_ZNSt11logic_errorD2Ev"; case 0x3ad12959: return "_ZNSt9basic_iosIcSt11char_traitsIcEE4initEPSt15basic_streambufIcS1_Eb"; case 0x3bac19dc: return "_ZThn8_NSdD0Ev"; case 0x3bda45a7: return "_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES3_RSt8ios_basecy"; case 0x3d32a7f4: return "_ZNSt6localeC2EPKci"; case 0x3da21a90: return "_ZNSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED1Ev"; case 0x3e18602a: return "_ZNKSt12codecvt_base13do_max_lengthEv"; case 0x3eeb7167: return "_ZNSt13basic_filebufIcSt11char_traitsIcEE7_UnlockEv"; case 0x3f6a6e68: return "_ZNSt13basic_filebufIcSt11char_traitsIcEE9_EndwriteEv"; case 0x3f9cb259: return "_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateERl"; case 0x3fc2324d: return "_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES3_RSt8ios_basecd"; case 0x409409af: return "_ZNSt13basic_filebufIcSt11char_traitsIcEE6setbufEPci"; case 0x411b923e: return "_ZSt9use_facetISt8numpunctIcEERKT_RKSt6locale"; case 0x4148e091: return "_ZNKSt9money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE8_GetmfldERS3_S5_bRSt8ios_base"; case 0x417f47af: return "_ZSt9use_facetISt10moneypunctIcLb0EEERKT_RKSt6locale"; case 0x42c40b2f: return "_ZNSt12out_of_rangeD0Ev"; case 0x45010630: return "_ZNSt10moneypunctIcLb1EED0Ev"; case 0x4520d6a2: return "_ZNSt13basic_filebufIcSt11char_traitsIcEE5_LockEv"; case 0x46034d2e: return "_ZNSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEED0Ev"; case 0x460e5cb7: return "_ZNSt13basic_filebufIcSt11char_traitsIcEE7seekposESt4fposISt9_MbstatetENSt5_IosbIiE9_OpenmodeE"; case 0x4761783a: return "_ZNSt13basic_filebufIcSt11char_traitsIcEE5imbueERKSt6locale"; case 0x47aab531: return "_ZNSt7_MpunctIcED0Ev"; case 0x47e5c318: return "_ZNSt8_LocinfoD2Ev"; case 0x4827e6be: return "_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateERe"; case 0x48d101ef: return "_ZNKSt8ios_base7failure8_DoraiseEv"; case 0x493212da: return "_ZNSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED0Ev"; case 0x4952490e: return "_ZNSt8ios_base5clearENSt5_IosbIiE8_IostateEb"; case 0x496c6f50: return "_Getctyptab"; case 0x49d9ddaf: return "_ZNKSt8numpunctIwE12do_falsenameEv"; case 0x49da8c5f: return "_ZNKSt9money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES3_S3_bRSt8ios_baseRNSt5_IosbIiE8_IostateERe"; case 0x49f7d434: return "_ZNSt8numpunctIwED0Ev"; case 0x4a40969d: return "_Fac_tidy"; case 0x4a799510: return "_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateERl"; case 0x4aec14d5: return "_ZNSt12length_errorD1Ev"; case 0x4aff73cc: return "_ZSt14_Debug_messagePKcS0_"; case 0x4b1ad744: return "_ZdaPvjRKSt9nothrow_t"; case 0x4b5a8abc: return "_ZNSt13basic_filebufIwSt11char_traitsIwEE6setbufEPwi"; case 0x4bc193c7: return "_ZNSt10ostrstreamC2EPciNSt5_IosbIiE9_OpenmodeE"; case 0x4bda379a: return "_ZNSt8ios_base4InitC1Ev"; case 0x4bee7ba9: return "_ZNSt8ios_base7failureD1Ev"; case 0x4cb35e7d: return "_ZNSt9time_baseD1Ev"; case 0x4cdab0ba: return "_ZNSt7_MpunctIwED0Ev"; case 0x4daf3fcf: return "_ZNSt15basic_streambufIcSt11char_traitsIcEE6xsgetnEPci"; case 0x4e34cf83: return "_ZNSbIwSt11char_traitsIwESaIwEE5_GrowEjb"; case 0x4e5cd916: return "_ZNKSt8numpunctIwE11do_groupingEv"; case 0x4ec89bf8: return "_ZNSt7collateIcE7_GetcatEPPKNSt6locale5facetE"; case 0x4ef0eb8e: return "_ZNSt12strstreambuf7seekoffElNSt5_IosbIiE8_SeekdirENS1_9_OpenmodeE"; case 0x4fde96de: return "_ZNSt15basic_streambufIwSt11char_traitsIwEED1Ev"; case 0x5015b8d3: return "_ZSt7_FiopenPKwNSt5_IosbIiE9_OpenmodeEi"; case 0x50b34c09: return "_ZNKSt9exception4whatEv"; case 0x5102ac61: return "_ZNKSt7_MpunctIwE14do_curr_symbolEv"; case 0x5119680b: return "_ZNSt8_LocinfoD1Ev"; case 0x5127dcd1: return "_ZNSsC1Ev"; case 0x522b0457: return "_ZNSt10istrstreamD0Ev"; case 0x52330fbd: return "_ZNSt13runtime_errorD0Ev"; case 0x5298ef8e: return "_ZdaPvRKSt9nothrow_t"; case 0x5333bdc9: return "_ZNKSt13runtime_error4whatEv"; case 0x53693d40: return "_ZSt11setiosflagsNSt5_IosbIiE9_FmtflagsE"; case 0x5438d7d8: return "_ZdaPvS_"; case 0x550255f7: return "_ZNKSt7codecvtIccSt9_MbstatetE10do_unshiftERS0_PcS3_RS3_"; case 0x55481e6f: return "_ZNSt15basic_streambufIwSt11char_traitsIwEE9showmanycEv"; case 0x5560c79e: return "_ZNSdD1Ev"; case 0x55b3ebf2: return "_ZNSt9strstreamC2EPciNSt5_IosbIiE9_OpenmodeE"; case 0x563fd2be: return "_ZNSt6localeC2ERKS_PKci"; case 0x5656ccff: return "_ZNKSt7collateIcE10do_compareEPKcS2_S2_S2_"; case 0x56d3d4f0: return "_ZNSt9bad_allocD1Ev"; case 0x56fac416: return "_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateERe"; case 0x577c2695: return "_ZNSt6_Mutex5_LockEv"; case 0x57ef52f0: return "_ZNSt15basic_streambufIwSt11char_traitsIwEE7_UnlockEv"; case 0x581fc95b: return "_ZNSt5ctypeIcED0Ev"; case 0x58fad1c1: return "_ZNSt5ctypeIwE7_GetcatEPPKNSt6locale5facetE"; case 0x5949408e: return "_ZNSt8ios_base5_TidyEv"; case 0x59c77266: return "_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateERd"; case 0x5a3ad4bd: return "_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateERf"; case 0x5a5a9107: return "_ZNSt6localeC2Ev"; case 0x5a6e4e50: return "_ZNSt6locale7_Locimp9_MakewlocERKSt8_LocinfoiPS0_PKS_"; case 0x5a898327: return "_ZNSt15basic_streambufIwSt11char_traitsIwEE7seekoffElNSt5_IosbIiE8_SeekdirENS4_9_OpenmodeE"; case 0x5adf9060: return "_ZNKSt5ctypeIcE8do_widenEPKcS2_Pc"; case 0x5b71b85d: return "_ZNSt19istreambuf_iteratorIwSt11char_traitsIwEE4_IncEv"; case 0x5c15972f: return "_ZNSt13basic_ostreamIwSt11char_traitsIwEED1Ev"; case 0x5ca98e4a: return "_ZNSt13basic_filebufIcSt11char_traitsIcEED0Ev"; case 0x5e1f2d37: return "_ZNKSt9exception8_DoraiseEv"; case 0x5e55ab8c: return "_ZSt10_GetloctxtIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEiRT0_S5_jPKT_"; case 0x5ed4fb7a: return "_ZTv0_n12_NSt13basic_istreamIwSt11char_traitsIwEED0Ev"; case 0x604fec95: return "_ZNSt12out_of_rangeD1Ev"; case 0x605131d5: return "_ZNSt8_LocinfoC1EPKc"; case 0x6051c802: return "_ZNSt7codecvtIccSt9_MbstatetED0Ev"; case 0x608abbb5: return "_ZNSt13basic_filebufIcSt11char_traitsIcEE5uflowEv"; case 0x61119152: return "_ZNSt6locale5_InitEv"; case 0x61248c80: return "_ZNKSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE13do_date_orderEv"; case 0x61a23009: return "_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE5_FputES3_RSt8ios_basewPKcjjjj"; case 0x61f55c30: return "_ZNKSt5ctypeIcE8do_widenEc"; case 0x629b8531: return "_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE5_IputES3_RSt8ios_basewPcj"; case 0x62d6bf82: return "_ZNSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED1Ev"; case 0x62f52bb0: return "_ZNSt7_MpunctIwEC2ERKSt8_Locinfojb"; case 0x635166c3: return "_ZNKSt9money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES3_bRSt8ios_basewe"; case 0x63a2b2cc: return "_ZNKSt8messagesIcE8do_closeEi"; case 0x643235cf: return "_ZNSt9strstreamD1Ev"; case 0x6437a975: return "_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateERt"; case 0x643e67f4: return "_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateERPv"; case 0x6463d9ea: return "_ZNKSt8messagesIwE6do_getEiiiRKSbIwSt11char_traitsIwESaIwEE"; case 0x64ce0374: return "_ZNSbIwSt11char_traitsIwESaIwEE7replaceEjjPKwj"; case 0x64ed868e: return "_ZSt9terminatev"; case 0x6500d2d5: return "_ZNSt9money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE7_GetcatEPPKNSt6locale5facetE"; case 0x65f19631: return "_ZTv0_n12_NSiD0Ev"; case 0x660882e8: return "_ZNSt6localeC1Ev"; case 0x667d741b: return "_ZNSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEED0Ev"; case 0x668b31c6: return "_ZNSs5_GrowEjb"; case 0x66f39adb: return "_ZNSt8numpunctIwED1Ev"; case 0x66fcc6f4: return "_ZNSt8messagesIwE7_GetcatEPPKNSt6locale5facetE"; case 0x67948307: return "_ZNKSt7codecvtIwcSt9_MbstatetE9do_lengthERKS0_PKcS5_j"; case 0x67c09257: return "_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateERx"; case 0x67edde2f: return "_ZdlPvjRKSt9nothrow_t"; case 0x67fbabf0: return "_ZNKSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE14do_get_weekdayES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateEPSt2tm"; case 0x683ca70a: return "_ZNKSt12_String_base5_XlenEv"; case 0x6863452e: return "_ZNSt6locale5facetD1Ev"; case 0x6929318d: return "_ZNSs6assignERKSsjj"; case 0x696b47f2: return "_ZNKSt7_MpunctIcE13do_neg_formatEv"; case 0x6a6b90c9: return "_ZSt15set_new_handlerPFvvE"; case 0x6adc320a: return "_ZNSt9money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED0Ev"; case 0x6b493669: return "_ZSt7setbasei"; case 0x6b913d53: return "_ZNSs6insertEjjc"; case 0x6c19db26: return "_ZNKSt7_MpunctIcE16do_thousands_sepEv"; case 0x6c386f54: return "_ZNSt15basic_streambufIcSt11char_traitsIcEE7seekposESt4fposISt9_MbstatetENSt5_IosbIiE9_OpenmodeE"; case 0x6c8dc459: return "_ZNKSt8bad_cast4whatEv"; case 0x6cb1a335: return "_ZNSt6locale5facet7_DecrefEv"; case 0x6d483b7a: return "_ZNSt12strstreambuf9pbackfailEi"; case 0x6daed882: return "_ZNSt8ios_baseD0Ev"; case 0x6dbbb9de: return "_ZNKSt5ctypeIcE10do_toupperEc"; case 0x6e0bf85d: return "_ZTv0_n12_NSt10istrstreamD1Ev"; case 0x6e4a84c1: return "_ZNSt5ctypeIcED1Ev"; case 0x6e61426d: return "_ZNSt13basic_filebufIwSt11char_traitsIwEED1Ev"; case 0x6f1945fc: return "_ZNSoD1Ev"; case 0x6fe060a0: return "_ZNSt15basic_streambufIwSt11char_traitsIwEE7seekposESt4fposISt9_MbstatetENSt5_IosbIiE9_OpenmodeE"; case 0x7008e209: return "_ZNKSt5ctypeIwE10do_toupperEw"; case 0x708cf940: return "_ZNKSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE11do_get_dateES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateEPSt2tm"; case 0x709ab035: return "_ZNSt15basic_streambufIcSt11char_traitsIcEE6setbufEPci"; case 0x7142ad20: return "_ZNKSt7_MpunctIcE16do_decimal_pointEv"; case 0x718977c5: return "_ZNSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE7_GetcatEPPKNSt6locale5facetE"; case 0x736c5f22: return "_ZNSoD0Ev"; case 0x74a39b4f: return "_ZThn8_NSt9strstreamD1Ev"; case 0x753c71db: return "_ZNKSt7_MpunctIcE13do_pos_formatEv"; case 0x75824de0: return "_ZNSt6_MutexC1Ev"; case 0x75975eb4: return "_ZNSsC1EPKc"; case 0x75a0617c: return "_ZNKSt7_MpunctIwE11do_groupingEv"; case 0x764ceaa4: return "_ZNSt10ostrstreamD0Ev"; case 0x767a4e70: return "_ZNSt6_WinitC2Ev"; case 0x76db6974: return "_ZNSt7codecvtIwcSt9_MbstatetED0Ev"; case 0x76de9b0f: return "_ZNSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEED1Ev"; case 0x76e846b2: return "_ZNSt15basic_streambufIwSt11char_traitsIwEE6xsputnEPKwi"; case 0x77c1d3a9: return "_ZNKSt13runtime_error8_DoraiseEv"; case 0x7882e64e: return "_ZNSt7collateIwED0Ev"; case 0x78a142d0: return "_ZSt7_FiopenPKcNSt5_IosbIiE9_OpenmodeEi"; case 0x79a415f8: return "_ZNSbIwSt11char_traitsIwESaIwEE6insertEjjw"; case 0x79ad3575: return "_ZTv0_n12_NSoD1Ev"; case 0x7a180518: return "_ZNSt10money_baseD0Ev"; case 0x7b1db41e: return "_ZNSt6locale7_AddfacEPNS_5facetEjj"; case 0x7b5fce95: return "_ZNSt15basic_streambufIwSt11char_traitsIwEE6setbufEPwi"; case 0x7c391411: return "_ZNSt10moneypunctIcLb0EED0Ev"; case 0x7cdbda48: return "_ZNSt7collateIcED0Ev"; case 0x7d23aa12: return "_ZNSt10moneypunctIwLb0EE7_GetcatEPPKNSt6locale5facetE"; case 0x7da7fdb1: return "_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES3_RSt8ios_basewy"; case 0x7e7ac30e: return "_ZNSt6locale5emptyEv"; case 0x7ebad3f0: return "_ZNKSt9money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE8_PutmfldES3_bRSt8ios_basewbSbIwS2_SaIwEE"; case 0x7fe08910: return "_ZNSt10moneypunctIcLb0EE7_GetcatEPPKNSt6locale5facetE"; case 0x7ff35597: return "_ZNSt9money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE7_GetcatEPPKNSt6locale5facetE"; case 0x8006c4ec: return "_ZNSt9money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEED0Ev"; case 0x8044f596: return "_ZNSt13basic_filebufIwSt11char_traitsIwEE5_LockEv"; case 0x81027e75: return "_ZNSt7_MpunctIwE5_InitERKSt8_Locinfo"; case 0x816aebc3: return "_ZNSt9bad_allocD0Ev"; case 0x823759d3: return "_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES3_RSt8ios_basewl"; case 0x8341b529: return "_ZNSt13basic_filebufIwSt11char_traitsIwEE8overflowEi"; case 0x83b2cc6f: return "_Znwj"; case 0x83bca135: return "_ZNKSt11logic_error4whatEv"; case 0x83cba890: return "_ZNSt6locale5facetD2Ev"; case 0x84023c03: return "_ZSt12setprecisioni"; case 0x854bc7c7: return "_ZNSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED0Ev"; case 0x85b3c6da: return "_ZNKSt8_Locinfo7_GetcvtEv"; case 0x85ba062f: return "_ZNSt15basic_streambufIcSt11char_traitsIcEE7_UnlockEv"; case 0x867956a4: return "_ZNSt9basic_iosIcSt11char_traitsIcEED1Ev"; case 0x868531a3: return "_ZdaPvj"; case 0x86c66cfc: return "_ZNSsD1Ev"; case 0x871506ea: return "_ZNSbIwSt11char_traitsIwESaIwEE6assignERKS2_jj"; case 0x8729f617: return "_ZNSt10ostrstreamC1EPciNSt5_IosbIiE9_OpenmodeE"; case 0x87b1f5eb: return "_ZNSt9exceptionD0Ev"; case 0x88052736: return "_ZTv0_n12_NSt10ostrstreamD0Ev"; case 0x883e1f16: return "_ZNKSt11logic_error8_DoraiseEv"; case 0x884b021b: return "_ZNKSt5ctypeIwE8_DowidenEc"; case 0x8a665143: return "_ZNSt8_Locinfo8_AddcatsEiPKc"; case 0x8a85d688: return "_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES3_RSt8ios_basewx"; case 0x8bfd4395: return "_ZNSt9basic_iosIwSt11char_traitsIwEE4initEPSt15basic_streambufIwS1_Eb"; case 0x8c2e6d06: return "_ZNKSt8messagesIwE7do_openERKSsRKSt6locale"; case 0x8c3afd4c: return "_ZSt10unexpectedv"; case 0x8c6b8d39: return "_ZNSt13basic_filebufIcSt11char_traitsIcEED1Ev"; case 0x8cda1f3b: return "_ZSt10_GetloctxtIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEiRT0_S5_jPKT_"; case 0x8d4e266b: return "_ZNKSt8_Locinfo9_GetctypeEv"; case 0x8fa764f3: return "_ZNSt6_WinitC1Ev"; case 0x900d1fa4: return "_ZNSt8time_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEED0Ev"; case 0x903afa37: return "_ZTv0_n12_NSt13basic_ostreamIwSt11char_traitsIwEED0Ev"; case 0x904dbd32: return "_ZNSt6locale7_LocimpC1Eb"; case 0x9111ec36: return "_ZNSt13messages_baseD0Ev"; case 0x91959ed6: return "_ZNKSt5ctypeIcE9do_narrowEcc"; case 0x91b0e37e: return "_ZSt14set_unexpectedPFvvE"; case 0x9268d6e7: return "_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateERj"; case 0x928fbe36: return "_ZTv0_n12_NSdD1Ev"; case 0x93c638e9: return "_ZNSt19istreambuf_iteratorIwSt11char_traitsIwEE5_PeekEv"; case 0x94c49383: return "_ZdlPvS_"; case 0x94fa1f5b: return "_ZNSt15basic_streambufIwSt11char_traitsIwEE5uflowEv"; case 0x95082493: return "_ZNKSt8messagesIcE6do_getEiiiRKSs"; case 0x95b43c9d: return "_ZNSt6locale7_LocimpD2Ev"; case 0x96634e42: return "_ZNKSt9bad_alloc4whatEv"; case 0x96bc2578: return "_Znajj"; case 0x97911f5f: return "_ZNSt15basic_streambufIcSt11char_traitsIcEE5uflowEv"; case 0x984ce3d7: return "_ZNSt8numpunctIcED1Ev"; case 0x9891bf45: return "_ZNKSt7_MpunctIwE16do_thousands_sepEv"; case 0x9a194306: return "_ZNSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE5_InitERKSt8_Locinfo"; case 0x9a449047: return "_ZNSt7_MpunctIwEC2Ejb"; case 0x9aa7a8b3: return "_ZNSt10istrstreamD2Ev"; case 0x9afa5d71: return "_ZNSt10money_baseD2Ev"; case 0x9b5358f9: return "_ZNKSt7_MpunctIcE16do_positive_signEv"; case 0x9c40d1f9: return "_ZNKSt8numpunctIwE16do_decimal_pointEv"; case 0x9c486668: return "_ZNSt6locale7_Locimp9_MakexlocERKSt8_LocinfoiPS0_PKS_"; case 0x9cb73ee0: return "_ZSt6_ThrowRKSt9exception"; case 0x9cfc0eaf: return "_ZNSiD1Ev"; case 0x9d6a8167: return "_ZNSbIwSt11char_traitsIwESaIwEE5eraseEjj"; case 0x9dbbe07d: return "_ZNKSt9money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE8_GetmfldERS3_S5_bRSt8ios_base"; case 0x9dc040e4: return "_Deletegloballocale"; case 0x9dcb4bcb: return "_ZNKSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE14do_get_weekdayES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateEPSt2tm"; case 0x9e741d47: return "_ZNSsC1ERKSs"; case 0x9ec88ae6: return "_ZNKSt5ctypeIwE10do_tolowerEPwPKw"; case 0x9ef60bf3: return "_ZNKSt5ctypeIwE10do_tolowerEw"; case 0x9f528cd3: return "_ZNKSt7codecvtIccSt9_MbstatetE6do_outERS0_PKcS4_RS4_PcS6_RS6_"; case 0x9f959451: return "_ZNSt13basic_istreamIwSt11char_traitsIwEED1Ev"; case 0x9facb533: return "_ZNSt13messages_baseD1Ev"; case 0x9fd2eea9: return "_ZNSt8_LocinfoC2EiPKc"; case 0xa1c6fc55: return "_ZNSt13basic_filebufIwSt11char_traitsIwEE7_UnlockEv"; case 0xa1de25c2: return "_ZTv0_n12_NSt10ostrstreamD1Ev"; case 0xa22d5dda: return "_ZNSt8messagesIcED0Ev"; case 0xa2fd0ec5: return "_ZNSt13basic_filebufIcSt11char_traitsIcEE9pbackfailEi"; case 0xa35033e8: return "_ZNKSt5ctypeIwE8do_widenEPKcS2_Pw"; case 0xa37c3e51: return "_ZNKSt5ctypeIwE8do_widenEc"; case 0xa3f5c3b2: return "_ZNSt9strstreamD2Ev"; case 0xa433147a: return "_ZNSt8messagesIcE7_GetcatEPPKNSt6locale5facetE"; case 0xa464c70a: return "_ZNKSt9money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES3_bRSt8ios_basewRKSbIwS2_SaIwEE"; case 0xa4f6a919: return "_ZThn8_NSdD1Ev"; case 0xa5306edb: return "_ZNSt10moneypunctIwLb1EED1Ev"; case 0xa562099c: return "_ZNSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED1Ev"; case 0xa700bc7d: return "_ZNKSt7codecvtIwcSt9_MbstatetE6do_outERS0_PKwS4_RS4_PcS6_RS6_"; case 0xa74e5a27: return "_ZNKSt6localeeqERKS_"; case 0xa79c4516: return "_ZNSt15basic_streambufIcSt11char_traitsIcEED0Ev"; case 0xa8ece2e0: return "_ZSt9use_facetISt10moneypunctIwLb0EEERKT_RKSt6locale"; case 0xa8f64fdb: return "_ZNKSt5ctypeIcE10do_tolowerEPcPKc"; case 0xa90c4ff2: return "_ZNSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE7_GetcatEPPKNSt6locale5facetE"; case 0xa9116516: return "_ZNSs6appendEjc"; case 0xa94be0fa: return "_ZNSt13basic_filebufIwSt11char_traitsIwEE9_EndwriteEv"; case 0xa957adcc: return "_ZNKSt5ctypeIcE9do_narrowEPKcS2_cPc"; case 0xa9e5bb16: return "_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateERm"; case 0xaa520d9f: return "_ZNSt6locale7_Locimp7_AddfacEPNS_5facetEj"; case 0xaae64804: return "_ZNSt8ios_base8_FindarrEi"; case 0xab211d97: return "_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES3_RSt8ios_basecb"; case 0xab5832fd: return "_ZNSt10money_baseD1Ev"; case 0xabd92bcc: return "_ZNSt7collateIwE7_GetcatEPPKNSt6locale5facetE"; case 0xabdc2b49: return "_ZNSt9money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE7_GetcatEPPKNSt6locale5facetE"; case 0xac6c23c0: return "_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateERy"; case 0xad3777a2: return "_ZNSt13basic_filebufIcSt11char_traitsIcEE8overflowEi"; case 0xad382a99: return "_ZdlPvRKSt9nothrow_t"; case 0xad6d839f: return "_ZNSt12codecvt_baseD0Ev"; case 0xad6dbac2: return "_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateERm"; case 0xadc2263b: return "_ZNSt15basic_streambufIcSt11char_traitsIcEE6xsputnEPKci"; case 0xae7d042f: return "_ZNSt7codecvtIwcSt9_MbstatetE7_GetcatEPPKNSt6locale5facetE"; case 0xaea59ceb: return "_ZNSt10ctype_baseD0Ev"; case 0xb0c185b7: return "_ZNSt10moneypunctIcLb1EE7_GetcatEPPKNSt6locale5facetE"; case 0xb0e7c2f3: return "_ZNSiD0Ev"; case 0xb1550b3c: return "_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES3_RSt8ios_basecx"; case 0xb1ac1fa3: return "_ZNSt15basic_streambufIwSt11char_traitsIwEE6xsgetnEPwi"; case 0xb1d696f7: return "_ZNKSt8numpunctIcE12do_falsenameEv"; case 0xb326f699: return "_ZNKSt9money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES3_bRSt8ios_basece"; case 0xb33ef042: return "_ZNSt8bad_castD0Ev"; case 0xb3f05af3: return "_ZNKSt7collateIcE12do_transformEPKcS2_"; case 0xb4352488: return "_ZNKSt9money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES3_bRSt8ios_basecRKSs"; case 0xb4a8791f: return "_ZNSt8time_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEED1Ev"; case 0xb509ab64: return "_ZNSt10moneypunctIcLb1EED1Ev"; case 0xb53fa02e: return "_ZnwjjRKSt9nothrow_t"; case 0xb6a4d760: return "_ZNSt13basic_filebufIwSt11char_traitsIwEE7seekposESt4fposISt9_MbstatetENSt5_IosbIiE9_OpenmodeE"; case 0xb6a7ba7a: return "_ZNSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED0Ev"; case 0xb74f7b8f: return "_ZNSt6locale7_LocimpC2ERKS0_"; case 0xb7dcbfdd: return "__Setgloballocale"; case 0xb80ca215: return "_ZNSt13basic_filebufIwSt11char_traitsIwEE5imbueERKSt6locale"; case 0xb87c4b43: return "_ZNSt12strstreambuf6freezeEb"; case 0xb8836b50: return "_ZNSt9exception18_Set_raise_handlerEPFvRKS_E"; case 0xb8ec13a5: return "_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES3_RSt8ios_basewb"; case 0xb9a2282d: return "_ZNSt15basic_streambufIcSt11char_traitsIcEE8overflowEi"; case 0xba0b6300: return "_ZNSt9basic_iosIwSt11char_traitsIwEED1Ev"; case 0xba85ce08: return "_ZNSt12strstreambufD2Ev"; case 0xbaa15803: return "_ZSt4setwi"; case 0xbb4599c5: return "_ZNSt11logic_errorD1Ev"; case 0xbb712718: return "_ZnwjRKSt9nothrow_t"; case 0xbc5ad91c: return "_ZNKSt7collateIwE10do_compareEPKwS2_S2_S2_"; case 0xbd140e12: return "_ZNKSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE11do_get_timeES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateEPSt2tm"; case 0xbd316983: return "_ZNSt8numpunctIcE5_InitERKSt8_Locinfo"; case 0xbd35830b: return "_ZdaPvjS_"; case 0xbd58ea5a: return "_ZNSt19ostreambuf_iteratorIwSt11char_traitsIwEEaSEw"; case 0xbda26024: return "_ZNSt9strstreamD0Ev"; case 0xbf9c3609: return "_ZNKSt5ctypeIwE10do_toupperEPwPKw"; case 0xc013acd8: return "_ZNSt8ios_base8_CallfnsENS_5eventE"; case 0xc06a4cd8: return "_ZNSt7_MpunctIwED1Ev"; case 0xc22cebd8: return "_ZNSt8messagesIwED1Ev"; case 0xc3d24eb3: return "_ZNSt9basic_iosIwSt11char_traitsIwEED0Ev"; case 0xc41d676d: return "_ZNSt9time_baseD2Ev"; case 0xc4c7993b: return "_ZNSbIwSt11char_traitsIwESaIwEE5_TidyEbj"; case 0xc53ab1c0: return "_ZNSt8numpunctIwE5_InitERKSt8_Locinfo"; case 0xc5977986: return "_ZNSt8ios_base7_AddstdEv"; case 0xc612a38e: return "_ZNSt6_WinitD1Ev"; case 0xc6e09225: return "_ZNSt13basic_filebufIwSt11char_traitsIwEE5_InitEPSt6_FiletNS2_7_InitflE"; case 0xc6ea0fd0: return "_ZNSt6locale7classicEv"; case 0xc6f18e84: return "_ZNKSt9money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES3_S3_bRSt8ios_baseRNSt5_IosbIiE8_IostateERSbIwS2_SaIwEE"; case 0xc79278ec: return "_ZNSt15basic_streambufIcSt11char_traitsIcEE5imbueERKSt6locale"; case 0xc7931798: return "_ZNKSt12_String_base5_XranEv"; case 0xc7d0ee0c: return "_ZNKSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE16do_get_monthnameES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateEPSt2tm"; case 0xc862f7c8: return "_ZNSt12strstreambuf8overflowEi"; case 0xcac83a05: return "_ZNSt6locale7_LocimpC2Eb"; case 0xcb7d00a4: return "_ZNSt6_WinitD2Ev"; case 0xcb82e0dc: return "_ZSt13set_terminatePFvvE"; case 0xcbe74ad3: return "_ZNKSt8messagesIwE8do_closeEi"; case 0xcc79f55d: return "_ZNKSt7_MpunctIcE16do_negative_signEv"; case 0xccf14bd5: return "_ZNSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE7_GetcatEPPKNSt6locale5facetE"; case 0xcd33ed4f: return "_ZNSbIwSt11char_traitsIwESaIwEEC1Ev"; case 0xcdafdf19: return "_ZNSt15basic_streambufIcSt11char_traitsIcEE9showmanycEv"; case 0xce653b6c: return "_ZNSt6_MutexC2Ev"; case 0xce6705c3: return "_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE8_GetifldEPcRS3_S6_NSt5_IosbIiE9_FmtflagsERKSt6locale"; case 0xce8c6abc: return "_ZNSt8ios_base4InitC2Ev"; case 0xcf9b4d80: return "_ZNSt10moneypunctIwLb1EED0Ev"; case 0xd05ea37c: return "_ZNKSt19istreambuf_iteratorIwSt11char_traitsIwEEdeEv"; case 0xd1b043b7: return "_ZSt10_MaklocchrIwET_cPS0_RKSt7_Cvtvec"; case 0xd1ee6195: return "_ZNKSt8time_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES3_RSt8ios_basecPKSt2tmcc"; case 0xd2f9d93d: return "_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES3_RSt8ios_basewe"; case 0xd356aefd: return "_ZNSt6_Mutex7_UnlockEv"; case 0xd38f4018: return "_ZSt11_MaklocbyteIwEcT_RKSt7_Cvtvec"; case 0xd4838fbd: return "_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES3_RSt8ios_basewm"; case 0xd4ba5b31: return "_ZNSt8_LocinfoC2EPKc"; case 0xd5244a29: return "_ZNSt10moneypunctIwLb1EE7_GetcatEPPKNSt6locale5facetE"; case 0xd5c5ee3d: return "_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateERj"; case 0xd6ee1090: return "_ZNKSt9money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES3_S3_bRSt8ios_baseRNSt5_IosbIiE8_IostateERe"; case 0xd73321ed: return "_ZNSt10ostrstreamD1Ev"; case 0xd76b2e07: return "_ZNKSt7codecvtIwcSt9_MbstatetE13do_max_lengthEv"; case 0xd78efcc3: return "_ZNSt12strstreambuf9underflowEv"; case 0xd7bc220d: return "_ZNSt8time_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEED1Ev"; case 0xd7d92e51: return "_ZNSt9money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEED0Ev"; case 0xd830252c: return "_ZNSt12strstreambuf5_InitEiPcS0_i"; case 0xd84b3689: return "_ZdlPv"; case 0xd8aeb94a: return "_ZNSt8messagesIcED1Ev"; case 0xd8b23008: return "_ZNSt8ios_baseD2Ev"; case 0xd93d52b1: return "_ZNSt9money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEED1Ev"; case 0xd9a12c5e: return "_ZNKSt5ctypeIcE10do_toupperEPcPKc"; case 0xd9d8af82: return "_ZNSt15basic_streambufIwSt11char_traitsIwEE8overflowEi"; case 0xda1088ce: return "_ZNSt6locale5facet7_IncrefEv"; case 0xda1b159a: return "_ZNSt6_MutexD2Ev"; case 0xda5469b3: return "_ZNSt9time_baseD0Ev"; case 0xdab0a910: return "_ZNSt15basic_streambufIwSt11char_traitsIwEE9pbackfailEi"; case 0xdaf3996f: return "_ZNSt6locale6globalERKS_"; case 0xdb5eae26: return "_ZNSt13basic_filebufIcSt11char_traitsIcEE5_InitEPSt6_FiletNS2_7_InitflE"; case 0xdc0c889c: return "_ZNSt8ios_base7copyfmtERKS_"; case 0xdc4d7540: return "_ZNSt5ctypeIwED1Ev"; case 0xdc65ab00: return "_ZNSt9money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEED1Ev"; case 0xdc981b5f: return "_ZNSt13basic_filebufIcSt11char_traitsIcEE9underflowEv"; case 0xdd8b1d47: return "_ZNSs5eraseEjj"; case 0xdefe3230: return "_ZNSt8ios_baseD1Ev"; case 0xdf1e09e1: return "_ZNKSt5ctypeIwE9do_narrowEPKwS2_cPc"; case 0xdf7edb4d: return "_ZSt9use_facetISt10moneypunctIwLb1EEERKT_RKSt6locale"; case 0xe177fd02: return "_ZNSt7_MpunctIcED2Ev"; case 0xe196beab: return "_ZNSt9money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED0Ev"; case 0xe206c08f: return "_ZNSt13basic_filebufIwSt11char_traitsIwEED0Ev"; case 0xe2b2ac5a: return "_ZNSt6locale5facet9_RegisterEv"; case 0xe3edd790: return "_ZNSt8bad_castD1Ev"; case 0xe528a368: return "_ZNKSt7_MpunctIcE14do_curr_symbolEv"; case 0xe54f1fe0: return "_ZNKSt9bad_alloc8_DoraiseEv"; case 0xe5e1dcbc: return "_ZNSt15basic_streambufIwSt11char_traitsIwEE5imbueERKSt6locale"; case 0xe6547e35: return "_ZNSt8messagesIwED0Ev"; case 0xe667985a: return "_ZNSt8time_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEED0Ev"; case 0xe75f6e21: return "_ZNKSt12length_error8_DoraiseEv"; case 0xe7d8449e: return "_ZdlPvjS_"; case 0xe82a422d: return "_ZNKSt8numpunctIwE11do_truenameEv"; case 0xe8691be5: return "_ZNSt5ctypeIwED0Ev"; case 0xe8c15f8a: return "_ZNSt7_MpunctIwED2Ev"; case 0xe9d7a4ae: return "_ZNKSt8time_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES3_RSt8ios_basewPKSt2tmcc"; case 0xeb76301c: return "_ZNSt15basic_streambufIcSt11char_traitsIcEE9pbackfailEi"; case 0xebd4b51d: return "_ZNKSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE11do_get_yearES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateEPSt2tm"; case 0xece969c0: return "_ZTv0_n12_NSt10istrstreamD0Ev"; case 0xed3da02b: return "_Znwjj"; case 0xee853baf: return "_ZNSt13basic_filebufIwSt11char_traitsIwEE4syncEv"; case 0xef62751c: return "_ZNKSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE13do_date_orderEv"; case 0xef6f90d8: return "_ZNKSt5ctypeIwE11do_scan_notEsPKwS2_"; case 0xef959a6d: return "_ZThn8_NSt9strstreamD0Ev"; case 0xf001a741: return "_ZNSt12strstreambufD1Ev"; case 0xf00401d2: return "_ZNSt9basic_iosIcSt11char_traitsIcEED0Ev"; case 0xf01deff8: return "_ZNKSt7codecvtIwcSt9_MbstatetE5do_inERS0_PKcS4_RS4_PwS6_RS6_"; case 0xf05df017: return "_ZNSt5ctypeIcE7_GetcatEPPKNSt6locale5facetE"; case 0xf127e816: return "_ZNSt10istrstreamD1Ev"; case 0xf1543f02: return "_ZNKSt8_Locinfo8_GetcollEv"; case 0xf1c86c92: return "_ZNKSt12out_of_range8_DoraiseEv"; case 0xf1cff87d: return "_ZNSt10ctype_baseD2Ev"; case 0xf2b9ab86: return "_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateERf"; case 0xf30d3407: return "_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES3_RSt8ios_basewd"; case 0xf51dc289: return "_ZNSt7codecvtIccSt9_MbstatetED1Ev"; case 0xf53021e0: return "_ZNSt8bad_castC1Ev"; case 0xf5825c7d: return "_ZNSt7collateIwED1Ev"; case 0xf584de56: return "_ZNSt6locale7_Locimp8_MakelocERKSt8_LocinfoiPS0_PKS_"; case 0xf58e83a5: return "_Znaj"; case 0xf67a7e17: return "_ZNSt13basic_filebufIwSt11char_traitsIwEE5uflowEv"; case 0xf73f6afc: return "_ZNSt13basic_filebufIcSt11char_traitsIcEE7seekoffElNSt5_IosbIiE8_SeekdirENS4_9_OpenmodeE"; case 0xf7845d1c: return "_ZNSt7_MpunctIcED1Ev"; case 0xf7ba51fd: return "_ZNSt13basic_ostreamIwSt11char_traitsIwEED0Ev"; case 0xf83e8d95: return "_ZNKSt5ctypeIcE10do_tolowerEc"; case 0xf9ff46a1: return "_ZNSt13basic_filebufIwSt11char_traitsIwEE7seekoffElNSt5_IosbIiE8_SeekdirENS4_9_OpenmodeE"; case 0xfb36c588: return "_ZNSt9strstreamC1EPciNSt5_IosbIiE9_OpenmodeE"; case 0xfc563813: return "_ZNKSt7codecvtIccSt9_MbstatetE5do_inERS0_PKcS4_RS4_PcS6_RS6_"; case 0xfc825dda: return "_ZNSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE7_GetcatEPPKNSt6locale5facetE"; case 0xfe468b7a: return "_ZTv0_n12_NSdD0Ev"; case 0xfeb4107c: return "_ZNSt12codecvt_baseD2Ev"; case 0xfefd7d3a: return "_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateERy"; case 0xffaf3218: return "_ZTv0_n12_NSoD0Ev"; case 0xfff6ef55: return "_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES3_S3_RSt8ios_baseRNSt5_IosbIiE8_IostateERb"; } if (_module == "sysPrxForUser") switch (fnid) { case 0x02e20ec1: return "__sys_printf_basename"; case 0x0341bb97: return "sys_prx_get_module_id_by_address"; case 0x04e83d2c: return "_sys_strncmp"; case 0x052d29a6: return "_sys_strcat"; case 0x05c65656: return "sys_mempool_try_allocate_block"; case 0x0618936b: return "_sys_vsnprintf"; case 0x06574237: return "_sys_snprintf"; case 0x1573dc3f: return "sys_lwmutex_lock"; case 0x191f0c4a: return "_sys_strrchr"; case 0x1ae10b92: return "_sys_spu_printf_attach_thread"; case 0x1bc200f4: return "sys_lwmutex_unlock"; case 0x1c9a942c: return "sys_lwcond_destroy"; case 0x1ca525a2: return "_sys_strncasecmp"; case 0x1ed454ce: return "sys_spu_elf_get_information"; case 0x24a1ea07: return "sys_ppu_thread_create"; case 0x25596f51: return "sys_mempool_get_count"; case 0x26090058: return "sys_prx_load_module"; case 0x27427742: return "_sys_memmove"; case 0x2a6d9d51: return "sys_lwcond_wait"; case 0x2c847572: return "_sys_process_atexitspawn"; case 0x2d36462b: return "_sys_strlen"; case 0x2f85c0ef: return "sys_lwmutex_create"; case 0x3172759d: return "sys_game_get_temperature"; case 0x318f17e1: return "_sys_memalign"; case 0x350d454e: return "sys_ppu_thread_get_id"; case 0x35168520: return "_sys_heap_malloc"; case 0x3bd53c7b: return "_sys_memchr"; case 0x3dd4a957: return "sys_ppu_thread_register_atexit"; case 0x3ef17f8c: return "__sys_look_ctype_table"; case 0x409ad939: return "sys_mmapper_free_memory"; case 0x4232b0db: return "_sys_panic"; case 0x42b23552: return "sys_prx_register_library"; case 0x44265c08: return "_sys_heap_memalign"; case 0x459b4393: return "_sys_strcmp"; case 0x45fe2fce: return "_sys_spu_printf_initialize"; case 0x4643ba6e: return "sys_mmapper_unmap_memory"; case 0x4a071d98: return "sys_interrupt_thread_disestablish"; case 0x4b2f301a: return "_sys_tolower"; case 0x4bbf59d0: return "_sys_net_eurus_post_command"; case 0x4f7172c9: return "sys_process_is_stack"; case 0x5267cb35: return "sys_spinlock_unlock"; case 0x52aadadf: return "sys_lwcond_signal_to"; case 0x5fdfb2fe: return "_sys_spu_printf_detach_group"; case 0x608212fc: return "sys_mempool_free_block"; case 0x620e35a7: return "sys_game_get_system_sw_version"; case 0x637bdaae: return "_proc_spawn"; case 0x67f9fedb: return "sys_game_process_exitspawn2"; case 0x68b9b011: return "_sys_memset"; case 0x6bf66ea7: return "_sys_memcpy"; case 0x6e05231d: return "sys_game_watchdog_stop"; case 0x70258515: return "sys_mmapper_allocate_memory_from_container"; case 0x71a8472a: return "sys_get_random_number"; case 0x722a0254: return "sys_spinlock_trylock"; case 0x74311398: return "sys_prx_get_my_module_id"; case 0x744680a2: return "sys_initialize_tls"; case 0x7498887b: return "_sys_strchr"; case 0x791b9219: return "_sys_vsprintf"; case 0x80fb0c19: return "sys_prx_stop_module"; case 0x8461e528: return "sys_time_get_system_time"; case 0x84bb6774: return "sys_prx_get_module_info"; case 0x893305fa: return "sys_raw_spu_load"; case 0x8985b5b6: return "_sys_heap_stats"; case 0x8a2f159b: return "console_getc"; case 0x8a561d92: return "_sys_heap_free"; case 0x8bb03ab8: return "sys_game_board_storage_write"; case 0x8c2bb498: return "sys_spinlock_initialize"; case 0x8cfef376: return "__tls_get_addr"; case 0x96328741: return "_sys_process_at_Exitspawn"; case 0x996f7cf8: return "_sys_strncat"; case 0x99c88692: return "_sys_strcpy"; case 0x9d2ec4ff: return "sys_process_spawn"; case 0x9d3c0f81: return "sys_mempool_destroy"; case 0x9e0623b5: return "sys_game_watchdog_start"; case 0x9f04f7af: return "_sys_printf"; case 0x9f18429d: return "sys_prx_start_module"; case 0x9f950780: return "sys_game_get_rtc_status"; case 0xa146a143: return "sys_mempool_allocate_block"; case 0xa1f9eafe: return "_sys_sprintf"; case 0xa285139d: return "sys_spinlock_lock"; case 0xa2c7ba64: return "sys_prx_exitspawn_with_level"; case 0xa330ad84: return "sys_prx_load_module_on_memcontainer_by_fd"; case 0xa3e3be68: return "sys_ppu_thread_once"; case 0xa5d06bf0: return "sys_prx_get_module_list"; case 0xaa6d9bff: return "sys_prx_load_module_on_memcontainer"; case 0xac6fc404: return "sys_ppu_thread_unregister_atexit"; case 0xacad8fb6: return "sys_game_watchdog_clear"; case 0xaeb78725: return "sys_lwmutex_trylock"; case 0xaede4b03: return "_sys_heap_delete_heap"; case 0xaff080a4: return "sys_ppu_thread_exit"; case 0xb257540b: return "sys_mmapper_allocate_memory"; case 0xb27c8ae7: return "sys_prx_load_module_list"; case 0xb2fcf2c8: return "_sys_heap_create_heap"; case 0xb3bbcf2a: return "_sys_spu_printf_detach_thread"; case 0xb6369393: return "_sys_heap_get_total_free_size"; case 0xb995662e: return "sys_raw_spu_image_load"; case 0xb9bf1078: return "_sys_heap_alloc_heap_memory"; case 0xbab62b99: return "_sys_process_wait_for_game_process"; case 0xbdb18f83: return "_sys_malloc"; case 0xbf8ee5bb: return "sys_process_spawn_with_memory_budget"; case 0xc3476d0c: return "sys_lwmutex_destroy"; case 0xc4fd6121: return "_sys_qsort"; case 0xca9a60bf: return "sys_mempool_create"; case 0xd0ea47a7: return "sys_prx_unregister_library"; case 0xd1ad4570: return "_sys_heap_get_mallinfo"; case 0xd3039d4d: return "_sys_strncpy"; case 0xda0eb71a: return "sys_lwcond_create"; case 0xdb6b3250: return "sys_spu_elf_get_segments"; case 0xdc578057: return "sys_mmapper_map_memory"; case 0xdd0c1e09: return "_sys_spu_printf_attach_group"; case 0xdd3b27ac: return "_sys_spu_printf_finalize"; case 0xde2f9c85: return "sys_process_atexit"; case 0xe0998dbf: return "sys_prx_get_module_id_by_name"; case 0xe0da8efd: return "sys_spu_image_close"; case 0xe66bac36: return "console_putc"; case 0xe6f2c1e7: return "sys_process_exit"; case 0xe76964f5: return "sys_game_board_storage_read"; case 0xe7ef3a80: return "sys_prx_load_module_list_on_memcontainer"; case 0xe95ffa0a: return "sys_process_wait_for_game_process"; case 0xe9a1bd84: return "sys_lwcond_signal_all"; case 0xebe5f72f: return "sys_spu_image_import"; case 0xeef75113: return "_sys_toupper"; case 0xef68c17c: return "sys_prx_load_module_by_fd"; case 0xef87a695: return "sys_lwcond_signal"; case 0xf0aece0d: return "sys_prx_unload_module"; case 0xf57e1d6f: return "console_write"; case 0xf7f7fb20: return "_sys_free"; case 0xfa7f693d: return "_sys_vprintf"; case 0xfb5db080: return "_sys_memcmp"; case 0xfc52a7a9: return "sys_game_process_exitspawn"; case 0xfecc05b6: return "__sys_time"; } // Check registered functions if (const auto sm = ppu_module_manager::get_module(_module)) { const auto found = sm->functions.find(fnid); if (found != sm->functions.end()) { return found->second.name; } } return fmt::format("0x%08X", fnid); } // Get variable name by VNID extern std::string ppu_get_variable_name(const std::string& _module, u32 vnid) { if (_module.empty()) switch (vnid) { // these arent the actual hash, but its close enough case 0xd7f43016: return "module_info"; } // Check known FNIDs if (_module == "sys_libc") switch (vnid) { case 0x071928b0: return "_LNan"; case 0x0a331920: return "_Clocale"; case 0x0b2e15ed: return "_malloc_limit"; case 0x0fbc732d: return "_Zero"; case 0x17667744: return "_LInf"; case 0x210b2f6e: return "_FNan"; case 0x2418f6c0: return "__TT800"; case 0x2470d3bc: return "_Hugeval"; case 0x26a34f81: return "_Flt"; case 0x277a84bb: return "_Mutex_attr"; case 0x29e76a6d: return "_LXbig"; case 0x2cf8b5d1: return "_Wctrans"; case 0x32e56b1a: return "_Stdin"; case 0x3916a06a: return "_FILE_P_Head"; case 0x45ec2df6: return "_LEps"; case 0x529d4301: return "_Denorm"; case 0x57dbcf27: return "_Inf"; case 0x5ff11eb4: return "_FZero"; case 0x620967c9: return "_Mbcurmax"; case 0x6524499e: return "_FInf"; case 0x67d1406b: return "__ctype_ptr"; case 0x6a09df41: return "_LRteps"; case 0x73898db8: return "environ"; case 0x76628efb: return "_FSnan"; case 0x790b0082: return "_Xbig"; case 0x7aff3242: return "_Snan"; case 0x7bc88211: return "_Tolotab"; case 0x7f456af2: return "_Rteps"; case 0x81acf7c1: return "_LZero"; case 0x8f87ed0c: return "_Times"; case 0x92c43f6d: return "_Eps"; case 0x96e1e748: return "tls_mutex_attr"; case 0x985fc057: return "_Dbl"; case 0x9c8454c9: return "_LSnan"; case 0xaa860d4c: return "_Wctype"; case 0xb5b84f80: return "_LDenorm"; case 0xb5d2f53b: return "_Touptab"; case 0xb6f5f98c: return "_FRteps"; case 0xd59c193c: return "_Nan"; case 0xd698385d: return "_Ldbl"; case 0xd97b0687: return "_Ctype"; case 0xe0bc8d86: return "_Loctab"; case 0xeace53d6: return "_FDenorm"; case 0xeca056df: return "_Locale"; case 0xef25075b: return "_FXbig"; case 0xfb2bd688: return "_Stdout"; case 0xfefbe065: return "_Stderr"; case 0xff2f0cc7: return "_FEps"; } if (_module == "sys_libm") switch (vnid) { case 0x1cf745bc: return "_LErf_one"; case 0x2259ef96: return "_LGamma_big"; case 0x3acad7f1: return "_Erf_small"; case 0x3fb8629d: return "_FErf_one"; case 0x42eb9508: return "_Fenv0"; case 0x4af28f31: return "_FErf_small"; case 0xa8d907ff: return "_LErf_small"; case 0xad443e79: return "_Erf_one"; case 0xe9892674: return "_FGamma_big"; case 0xf39005fc: return "_Gamma_big"; } // Check registered variables if (const auto sm = ppu_module_manager::get_module(_module)) { const auto found = sm->variables.find(vnid); if (found != sm->variables.end()) { return found->second.name; } } return fmt::format("0x%08X", vnid); } #if defined(ARCH_X64) auto gen_ghc_cpp_trampoline(ppu_intrp_func_t fn_target) { return [fn_target] (native_asm& c, auto& args) { using namespace asmjit; // Take second ghc arg c.mov(args[0], x86::rbp); c.mov(args[2].r32(), x86::dword_ptr(args[0], ::offset32(&ppu_thread::cia))); c.add(args[2], x86::qword_ptr(reinterpret_cast<u64>(&vm::g_base_addr))); c.jmp(fn_target); }; } #elif defined(ARCH_ARM64) auto gen_ghc_cpp_trampoline(ppu_intrp_func_t fn_target) { return [fn_target] (native_asm& c, auto& args) { using namespace asmjit; // Take second ghc arg c.mov(args[0], a64::x20); Label cia_offset = c.newLabel(); c.ldr(a64::x11, arm::Mem(cia_offset)); c.ldr(a64::x26, arm::Mem(args[0], a64::x11)); Label base_addr = c.newLabel(); c.ldr(a64::x22, arm::Mem(base_addr)); c.ldr(a64::x22, arm::Mem(a64::x22)); c.add(args[2], a64::x22, a64::x26); Label jmp_target = c.newLabel(); c.ldr(a64::x22, arm::Mem(jmp_target)); c.br(a64::x22); c.bind(base_addr); c.embedUInt64(reinterpret_cast<u64>(&vm::g_base_addr)); c.bind(cia_offset); c.embedUInt64(static_cast<u64>(::offset32(&ppu_thread::cia))); c.bind(jmp_target); c.embedUInt64(reinterpret_cast<u64>(fn_target)); }; } #else #error "Not implemented!" #endif ppu_function_manager::ppu_function_manager(utils::serial& ar) : addr(ar) { } void ppu_function_manager::save(utils::serial& ar) { ar(addr); } std::vector<ppu_intrp_func_t>& ppu_function_manager::access(bool ghc) { static std::vector<ppu_intrp_func_t> list { [](ppu_thread& ppu, ppu_opcode_t, be_t<u32>* this_op, ppu_intrp_func*) { ppu.cia = vm::get_addr(this_op); ppu_log.error("Unregistered function called (LR=0x%x)", ppu.lr); ppu.gpr[3] = 0; ppu.cia = static_cast<u32>(ppu.lr) & ~3; }, [](ppu_thread& ppu, ppu_opcode_t, be_t<u32>* this_op, ppu_intrp_func*) { ppu.state += cpu_flag::ret; ppu.cia = vm::get_addr(this_op) + 4; }, }; static std::vector<ppu_intrp_func_t> list_ghc { build_function_asm<ppu_intrp_func_t>("ppu_unregistered", gen_ghc_cpp_trampoline(list[0])), build_function_asm<ppu_intrp_func_t>("ppu_return", gen_ghc_cpp_trampoline(list[1])), }; return ghc ? list_ghc : list; } u32 ppu_function_manager::add_function(ppu_intrp_func_t function) { auto& list = access(); auto& list2 = access(true); list.push_back(function); list2.push_back(build_function_asm<ppu_intrp_func_t>("", gen_ghc_cpp_trampoline(function))); return ::size32(list) - 1; }
93,824
C++
.cpp
1,961
45.81744
156
0.793486
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,189
PPUDisAsm.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/PPUDisAsm.cpp
#include "stdafx.h" #include "PPUDisAsm.h" #include "PPUFunction.h" #include "PPUAnalyser.h" #include "Emu/IdManager.h" #include "util/asm.hpp" #include <cmath> const ppu_decoder<PPUDisAsm> s_ppu_disasm; const ppu_decoder<ppu_itype> s_ppu_itype; extern const std::unordered_map<u32, std::string_view>& get_exported_function_names_as_addr_indexed_map(); enum class ppu_syscall_code : u64; extern std::shared_ptr<CPUDisAsm> make_basic_ppu_disasm() { return std::make_shared<PPUDisAsm>(cpu_disasm_mode::normal, vm::g_sudo_addr); } u32 PPUDisAsm::disasm(u32 pc) { last_opcode.clear(); if (pc < m_start_pc) { return 0; } if (m_offset == vm::g_sudo_addr && !vm::check_addr(pc, vm::page_executable)) { return 0; } dump_pc = pc; be_t<u32> op{}; std::memcpy(&op, m_offset + pc, 4); m_op = op; (this->*(s_ppu_disasm.decode(m_op)))({ m_op }); if (m_offset != vm::g_sudo_addr) { // Exported functions lookup is not allowed in this case format_by_mode(); return 4; } const auto& map = get_exported_function_names_as_addr_indexed_map(); if (auto it = map.find(pc); it != map.end()) { last_opcode += " #"; last_opcode += it->second; } format_by_mode(); return 4; } std::pair<const void*, usz> PPUDisAsm::get_memory_span() const { return {m_offset + m_start_pc, (1ull << 32) - m_start_pc}; } std::unique_ptr<CPUDisAsm> PPUDisAsm::copy_type_erased() const { return std::make_unique<PPUDisAsm>(*this); } std::pair<PPUDisAsm::const_op, u64> PPUDisAsm::try_get_const_op_gpr_value(u32 reg, u32 pc, u32 TTL) const { if (!TTL) { // Recursion limit (Time To Live) return {}; } if (pc == umax) { // Default arg: choose pc of previous instruction if (dump_pc == 0) { // Do not underflow return {}; } pc = dump_pc - 4; } #if __cpp_using_enum >= 201907 using enum const_op; #else constexpr const_op none = const_op::none, form = const_op::form, xor_mask = const_op::xor_mask; #endif // Scan PPU executable memory backwards until unmapped or non-executable memory block is encountered for (u32 i = pc; i >= m_start_pc && (m_offset != vm::g_sudo_addr || vm::check_addr(i, vm::page_executable));) { const u32 opcode = *reinterpret_cast<const be_t<u32>*>(m_offset + i); const ppu_opcode_t op{ opcode }; const auto type = s_ppu_itype.decode(opcode); if (type & ppu_itype::branch || type == ppu_itype::UNK) { // TODO: Detect calls, ignore them if reg is a non-volatile register return {}; } // Get constant register value #define GET_CONST_OP_REG(var, reg, op) \ {\ /* Search for the constant value of the register*/\ const auto [const_op, value] = try_get_const_op_gpr_value(reg, i - 4, TTL - 1);\ \ if (const_op != const_op::op)\ {\ /* Cannot compute constant value if register/operation is not constant*/\ return {};\ }\ \ var = value;\ } void() /*<- Require a semicolon*/ #define GET_CONST_REG(var, reg) GET_CONST_OP_REG(var, reg, form) switch (type) { case ppu_itype::ADDI: { if (op.rd != reg) { // Destination register is not relevant to us break; } u64 reg_ra = 0; if (op.ra) { GET_CONST_REG(reg_ra, op.ra); } return { form, reg_ra + op.simm16 }; } case ppu_itype::ADDIS: { if (op.rd != reg) { break; } u64 reg_ra = 0; if (op.ra) { GET_CONST_REG(reg_ra, op.ra); } return { form, reg_ra + op.simm16 * 65536 }; } case ppu_itype::ORI: { if (op.rs == op.ra && !op.uimm16) { // NO-OP break; } if (op.ra != reg) { // Destination register is not relevant to us break; } u64 reg_rs = 0; GET_CONST_REG(reg_rs, op.rs); return { form, reg_rs | op.uimm16 }; } case ppu_itype::ORIS: { if (op.rs == op.ra && !op.uimm16) { // NO-OP break; } if (op.ra != reg) { break; } u64 reg_rs = 0; GET_CONST_REG(reg_rs, op.rs); return { form, reg_rs | (u64{op.uimm16} << 16)}; } case ppu_itype::XORIS: { if (op.ra != reg) { break; } const auto [const_op, reg_rs] = try_get_const_op_gpr_value(op.rs, i - 4, TTL - 1); if (const_op == none) { return { xor_mask, (u64{op.uimm16} << 16) }; } if (const_op != form) { // Unexpected return {}; } return { form, reg_rs ^ (u64{op.uimm16} << 16)}; } case ppu_itype::RLDICR: { if (op.ra != reg) { break; } u64 reg_rs = 0; GET_CONST_REG(reg_rs, op.rs); return { form, utils::rol64(reg_rs, op.sh64) & (~0ull << (op.mbe64 ^ 63)) }; } case ppu_itype::OR: { if (op.rs == op.rb && op.rs == op.ra) { // NO-OP break; } if (op.ra != reg) { break; } u64 reg_rs = 0, reg_rb = 0; GET_CONST_REG(reg_rs, op.rs); // Try to optimize if it's a register move operation if (op.rs != op.rb) { GET_CONST_REG(reg_rb, op.rb); } return { form, reg_rs | reg_rb }; } case ppu_itype::XOR: { if (op.ra != reg) { break; } if (op.rs == op.rb) { return { form, 0 }; } const auto [const_op_rs, reg_rs] = try_get_const_op_gpr_value(op.rs, i - 4, TTL - 1); const auto [const_op_rb, reg_rb] = try_get_const_op_gpr_value(op.rb, i - 4, TTL - 1); if (const_op_rs == form && const_op_rb == form) { // Normally it is not the case return { form, reg_rs ^ reg_rb }; } if (const_op_rs == form && const_op_rb == none) { return { xor_mask, reg_rs }; } else if (const_op_rb == form && const_op_rs == none) { return { xor_mask, reg_rb }; } return {}; } default: { // Ordinary test // TODO: Proper detection of destination register(s) modification (if there are any) if (op.ra == reg || op.rd == reg) { return {}; } break; } } if (i == 0) { return {}; } i -= 4; } return {}; } enum CellError : u32; void comment_constant(std::string& last_opcode, u64 value, bool print_float = false) { // Test if potentially a CELL error if ((value >> 28) == 0xf'ffff'fff8u || (value >> 28) == 0x8u) { const usz old_size = last_opcode.size(); // Comment as CELL error fmt::append(last_opcode, " #%s (0x%xh)", CellError{static_cast<u32>(value)}, value); // Test if failed to format (appended " #0x8".. in such case) if (last_opcode[old_size + 2] != '0') { // Success return; } // Revert and fallback last_opcode.resize(old_size); } // Comment constant formation fmt::append(last_opcode, " #0x%xh", value); if (print_float && ((value >> 31) <= 1u || (value >> 31) == 0x1'ffff'ffffu)) { const f32 float_val = std::bit_cast<f32>(static_cast<u32>(value)); if (std::isfinite(float_val)) { const usz old_size = last_opcode.size(); fmt::append(last_opcode, " (%.6gf)", float_val); if (usz pos = last_opcode.find_first_of('.', old_size); pos == umax) { // No decimal point has been inserted, force insertion last_opcode.insert(last_opcode.size() - 2, ".0"sv); } } else { fmt::append(last_opcode, " (%g)", float_val); } } } constexpr std::pair<const char*, char> get_BC_info(u32 bo, u32 bi) { std::pair<const char*, char> info{}; switch (bo) { case 0b00000: case 0b00001: { info = {"bdnzf", 'f'}; break; } case 0b00010: case 0b00011: { info = {"bdzf", 'f'}; break; } case 0b01000: case 0b01001: { info = {"bdnzt", 't'}; break; } case 0b01010: case 0b01011: { info = {"bdzt", 't'}; break; } case 0b10010: { info.first = "bdz"; break; } case 0b11010: { info = {"bdz", '-'}; break; } case 0b11011: { info = {"bdz", '+'}; break; } case 0b10000: { info.first = "bdnz"; break; } case 0b11000: { info = {"bdnz", '-'}; break; } case 0b11001: { info = {"bdnz", '+'}; break; } case 0b00100: { switch (bi % 4) { case 0x0: info.first = "bge"; break; case 0x1: info.first = "ble"; break; case 0x2: info.first = "bne"; break; case 0x3: info.first = "bns"; break; default: fmt::throw_exception("Unreachable"); } break; } case 0b00110: { info.second = '-'; switch (bi % 4) { case 0x0: info.first = "bge"; break; case 0x1: info.first = "ble"; break; case 0x2: info.first = "bne"; break; case 0x3: info.first = "bns"; break; default: fmt::throw_exception("Unreachable"); } break; } case 0b00111: { info.second = '+'; switch (bi % 4) { case 0x0: info.first = "bge"; break; case 0x1: info.first = "ble"; break; case 0x2: info.first = "bne"; break; case 0x3: info.first = "bns"; break; default: fmt::throw_exception("Unreachable"); } break; } case 0b01100: { switch (bi % 4) { case 0x0: info.first = "blt"; break; case 0x1: info.first = "bgt"; break; case 0x2: info.first = "beq"; break; case 0x3: info.first = "bso"; break; default: fmt::throw_exception("Unreachable"); } break; } case 0b01110: { info.second = '-'; switch (bi % 4) { case 0x0: info.first = "blt"; break; case 0x1: info.first = "bgt"; break; case 0x2: info.first = "beq"; break; case 0x3: info.first = "bso"; break; default: fmt::throw_exception("Unreachable"); } break; } case 0b01111: { info.second = '+'; switch (bi % 4) { case 0x0: info.first = "blt"; break; case 0x1: info.first = "bgt"; break; case 0x2: info.first = "beq"; break; case 0x3: info.first = "bso"; break; default: fmt::throw_exception("Unreachable"); } break; } //case 0b10100: //{ // info.first = "b"; break; //} default: break; } return info; } void PPUDisAsm::MFVSCR(ppu_opcode_t op) { DisAsm_V1("mfvscr", op.vd); } void PPUDisAsm::MTVSCR(ppu_opcode_t op) { DisAsm_V1("mtvscr", op.vb); } void PPUDisAsm::VADDCUW(ppu_opcode_t op) { DisAsm_V3("vaddcuw", op.vd, op.va, op.vb); } void PPUDisAsm::VADDFP(ppu_opcode_t op) { DisAsm_V3("vaddfp", op.vd, op.va, op.vb); } void PPUDisAsm::VADDSBS(ppu_opcode_t op) { DisAsm_V3("vaddsbs", op.vd, op.va, op.vb); } void PPUDisAsm::VADDSHS(ppu_opcode_t op) { DisAsm_V3("vaddshs", op.vd, op.va, op.vb); } void PPUDisAsm::VADDSWS(ppu_opcode_t op) { DisAsm_V3("vaddsws", op.vd, op.va, op.vb); } void PPUDisAsm::VADDUBM(ppu_opcode_t op) { DisAsm_V3("vaddubm", op.vd, op.va, op.vb); } void PPUDisAsm::VADDUBS(ppu_opcode_t op) { DisAsm_V3("vaddubs", op.vd, op.va, op.vb); } void PPUDisAsm::VADDUHM(ppu_opcode_t op) { DisAsm_V3("vadduhm", op.vd, op.va, op.vb); } void PPUDisAsm::VADDUHS(ppu_opcode_t op) { DisAsm_V3("vadduhs", op.vd, op.va, op.vb); } void PPUDisAsm::VADDUWM(ppu_opcode_t op) { DisAsm_V3("vadduwm", op.vd, op.va, op.vb); } void PPUDisAsm::VADDUWS(ppu_opcode_t op) { DisAsm_V3("vadduws", op.vd, op.va, op.vb); } void PPUDisAsm::VAND(ppu_opcode_t op) { DisAsm_V3("vand", op.vd, op.va, op.vb); } void PPUDisAsm::VANDC(ppu_opcode_t op) { DisAsm_V3("vandc", op.vd, op.va, op.vb); } void PPUDisAsm::VAVGSB(ppu_opcode_t op) { DisAsm_V3("vavgsb", op.vd, op.va, op.vb); } void PPUDisAsm::VAVGSH(ppu_opcode_t op) { DisAsm_V3("vavgsh", op.vd, op.va, op.vb); } void PPUDisAsm::VAVGSW(ppu_opcode_t op) { DisAsm_V3("vavgsw", op.vd, op.va, op.vb); } void PPUDisAsm::VAVGUB(ppu_opcode_t op) { DisAsm_V3("vavgub", op.vd, op.va, op.vb); } void PPUDisAsm::VAVGUH(ppu_opcode_t op) { DisAsm_V3("vavguh", op.vd, op.va, op.vb); } void PPUDisAsm::VAVGUW(ppu_opcode_t op) { DisAsm_V3("vavguw", op.vd, op.va, op.vb); } void PPUDisAsm::VCFSX(ppu_opcode_t op) { DisAsm_V2_UIMM("vcfsx", op.vd, op.vb, op.vuimm); } void PPUDisAsm::VCFUX(ppu_opcode_t op) { DisAsm_V2_UIMM("vcfux", op.vd, op.vb, op.vuimm); } void PPUDisAsm::VCMPBFP(ppu_opcode_t op) { DisAsm_V3(op.oe ? "vcmpbfp." : "vcmpbfp", op.vd, op.va, op.vb); } void PPUDisAsm::VCMPEQFP(ppu_opcode_t op) { DisAsm_V3(op.oe ? "vcmpeqfp." : "vcmpeqfp", op.vd, op.va, op.vb); } void PPUDisAsm::VCMPEQUB(ppu_opcode_t op) { DisAsm_V3(op.oe ? "vcmpequb." : "vcmpequb", op.vd, op.va, op.vb); } void PPUDisAsm::VCMPEQUH(ppu_opcode_t op) { DisAsm_V3(op.oe ? "vcmpequh." : "vcmpequh", op.vd, op.va, op.vb); } void PPUDisAsm::VCMPEQUW(ppu_opcode_t op) { DisAsm_V3(op.oe ? "vcmpequw." : "vcmpequw", op.vd, op.va, op.vb); } void PPUDisAsm::VCMPGEFP(ppu_opcode_t op) { DisAsm_V3(op.oe ? "vcmpgefp." : "vcmpgefp", op.vd, op.va, op.vb); } void PPUDisAsm::VCMPGTFP(ppu_opcode_t op) { DisAsm_V3(op.oe ? "vcmpgtfp." : "vcmpgtfp", op.vd, op.va, op.vb); } void PPUDisAsm::VCMPGTSB(ppu_opcode_t op) { DisAsm_V3(op.oe ? "vcmpgtsb." : "vcmpgtsb", op.vd, op.va, op.vb); } void PPUDisAsm::VCMPGTSH(ppu_opcode_t op) { DisAsm_V3(op.oe ? "vcmpgtsh." : "vcmpgtsh", op.vd, op.va, op.vb); } void PPUDisAsm::VCMPGTSW(ppu_opcode_t op) { DisAsm_V3(op.oe ? "vcmpgtsw." : "vcmpgtsw", op.vd, op.va, op.vb); } void PPUDisAsm::VCMPGTUB(ppu_opcode_t op) { DisAsm_V3(op.oe ? "vcmpgtub." : "vcmpgtub", op.vd, op.va, op.vb); } void PPUDisAsm::VCMPGTUH(ppu_opcode_t op) { DisAsm_V3(op.oe ? "vcmpgtuh." : "vcmpgtuh", op.vd, op.va, op.vb); } void PPUDisAsm::VCMPGTUW(ppu_opcode_t op) { DisAsm_V3(op.oe ? "vcmpgtuw." : "vcmpgtuw", op.vd, op.va, op.vb); } void PPUDisAsm::VCTSXS(ppu_opcode_t op) { DisAsm_V2_UIMM("vctsxs", op.vd, op.vb, op.vuimm); } void PPUDisAsm::VCTUXS(ppu_opcode_t op) { DisAsm_V2_UIMM("vctuxs", op.vd, op.vb, op.vuimm); } void PPUDisAsm::VEXPTEFP(ppu_opcode_t op) { DisAsm_V2("vexptefp", op.vd, op.vb); } void PPUDisAsm::VLOGEFP(ppu_opcode_t op) { DisAsm_V2("vlogefp", op.vd, op.vb); } void PPUDisAsm::VMADDFP(ppu_opcode_t op) { DisAsm_V4("vmaddfp", op.vd, op.va, op.vc, op.vb); } void PPUDisAsm::VMAXFP(ppu_opcode_t op) { DisAsm_V3("vmaxfp", op.vd, op.va, op.vb); } void PPUDisAsm::VMAXSB(ppu_opcode_t op) { DisAsm_V3("vmaxsb", op.vd, op.va, op.vb); } void PPUDisAsm::VMAXSH(ppu_opcode_t op) { DisAsm_V3("vmaxsh", op.vd, op.va, op.vb); } void PPUDisAsm::VMAXSW(ppu_opcode_t op) { DisAsm_V3("vmaxsw", op.vd, op.va, op.vb); } void PPUDisAsm::VMAXUB(ppu_opcode_t op) { DisAsm_V3("vmaxub", op.vd, op.va, op.vb); } void PPUDisAsm::VMAXUH(ppu_opcode_t op) { DisAsm_V3("vmaxuh", op.vd, op.va, op.vb); } void PPUDisAsm::VMAXUW(ppu_opcode_t op) { DisAsm_V3("vmaxuw", op.vd, op.va, op.vb); } void PPUDisAsm::VMHADDSHS(ppu_opcode_t op) { DisAsm_V4("vmhaddshs", op.vd, op.va, op.vb, op.vc); } void PPUDisAsm::VMHRADDSHS(ppu_opcode_t op) { DisAsm_V4("vmhraddshs", op.vd, op.va, op.vb, op.vc); } void PPUDisAsm::VMINFP(ppu_opcode_t op) { DisAsm_V3("vminfp", op.vd, op.va, op.vb); } void PPUDisAsm::VMINSB(ppu_opcode_t op) { DisAsm_V3("vminsb", op.vd, op.va, op.vb); } void PPUDisAsm::VMINSH(ppu_opcode_t op) { DisAsm_V3("vminsh", op.vd, op.va, op.vb); } void PPUDisAsm::VMINSW(ppu_opcode_t op) { DisAsm_V3("vminsw", op.vd, op.va, op.vb); } void PPUDisAsm::VMINUB(ppu_opcode_t op) { DisAsm_V3("vminub", op.vd, op.va, op.vb); } void PPUDisAsm::VMINUH(ppu_opcode_t op) { DisAsm_V3("vminuh", op.vd, op.va, op.vb); } void PPUDisAsm::VMINUW(ppu_opcode_t op) { DisAsm_V3("vminuw", op.vd, op.va, op.vb); } void PPUDisAsm::VMLADDUHM(ppu_opcode_t op) { DisAsm_V4("vmladduhm", op.vd, op.va, op.vb, op.vc); } void PPUDisAsm::VMRGHB(ppu_opcode_t op) { DisAsm_V3("vmrghb", op.vd, op.va, op.vb); } void PPUDisAsm::VMRGHH(ppu_opcode_t op) { DisAsm_V3("vmrghh", op.vd, op.va, op.vb); } void PPUDisAsm::VMRGHW(ppu_opcode_t op) { DisAsm_V3("vmrghw", op.vd, op.va, op.vb); } void PPUDisAsm::VMRGLB(ppu_opcode_t op) { DisAsm_V3("vmrglb", op.vd, op.va, op.vb); } void PPUDisAsm::VMRGLH(ppu_opcode_t op) { DisAsm_V3("vmrglh", op.vd, op.va, op.vb); } void PPUDisAsm::VMRGLW(ppu_opcode_t op) { DisAsm_V3("vmrglw", op.vd, op.va, op.vb); } void PPUDisAsm::VMSUMMBM(ppu_opcode_t op) { DisAsm_V4("vmsummbm", op.vd, op.va, op.vb, op.vc); } void PPUDisAsm::VMSUMSHM(ppu_opcode_t op) { DisAsm_V4("vmsumshm", op.vd, op.va, op.vb, op.vc); } void PPUDisAsm::VMSUMSHS(ppu_opcode_t op) { DisAsm_V4("vmsumshs", op.vd, op.va, op.vb, op.vc); } void PPUDisAsm::VMSUMUBM(ppu_opcode_t op) { DisAsm_V4("vmsumubm", op.vd, op.va, op.vb, op.vc); } void PPUDisAsm::VMSUMUHM(ppu_opcode_t op) { DisAsm_V4("vmsumuhm", op.vd, op.va, op.vb, op.vc); } void PPUDisAsm::VMSUMUHS(ppu_opcode_t op) { DisAsm_V4("vmsumuhs", op.vd, op.va, op.vb, op.vc); } void PPUDisAsm::VMULESB(ppu_opcode_t op) { DisAsm_V3("vmulesb", op.vd, op.va, op.vb); } void PPUDisAsm::VMULESH(ppu_opcode_t op) { DisAsm_V3("vmulesh", op.vd, op.va, op.vb); } void PPUDisAsm::VMULEUB(ppu_opcode_t op) { DisAsm_V3("vmuleub", op.vd, op.va, op.vb); } void PPUDisAsm::VMULEUH(ppu_opcode_t op) { DisAsm_V3("vmuleuh", op.vd, op.va, op.vb); } void PPUDisAsm::VMULOSB(ppu_opcode_t op) { DisAsm_V3("vmulosb", op.vd, op.va, op.vb); } void PPUDisAsm::VMULOSH(ppu_opcode_t op) { DisAsm_V3("vmulosh", op.vd, op.va, op.vb); } void PPUDisAsm::VMULOUB(ppu_opcode_t op) { DisAsm_V3("vmuloub", op.vd, op.va, op.vb); } void PPUDisAsm::VMULOUH(ppu_opcode_t op) { DisAsm_V3("vmulouh", op.vd, op.va, op.vb); } void PPUDisAsm::VNMSUBFP(ppu_opcode_t op) { DisAsm_V4("vnmsubfp", op.vd, op.va, op.vc, op.vb); } void PPUDisAsm::VNOR(ppu_opcode_t op) { DisAsm_V3("vnor", op.vd, op.va, op.vb); } void PPUDisAsm::VOR(ppu_opcode_t op) { DisAsm_V3("vor", op.vd, op.va, op.vb); } void PPUDisAsm::VPERM(ppu_opcode_t op) { DisAsm_V4("vperm", op.vd, op.va, op.vb, op.vc); } void PPUDisAsm::VPKPX(ppu_opcode_t op) { DisAsm_V3("vpkpx", op.vd, op.va, op.vb); } void PPUDisAsm::VPKSHSS(ppu_opcode_t op) { DisAsm_V3("vpkshss", op.vd, op.va, op.vb); } void PPUDisAsm::VPKSHUS(ppu_opcode_t op) { DisAsm_V3("vpkshus", op.vd, op.va, op.vb); } void PPUDisAsm::VPKSWSS(ppu_opcode_t op) { DisAsm_V3("vpkswss", op.vd, op.va, op.vb); } void PPUDisAsm::VPKSWUS(ppu_opcode_t op) { DisAsm_V3("vpkswus", op.vd, op.va, op.vb); } void PPUDisAsm::VPKUHUM(ppu_opcode_t op) { DisAsm_V3("vpkuhum", op.vd, op.va, op.vb); } void PPUDisAsm::VPKUHUS(ppu_opcode_t op) { DisAsm_V3("vpkuhus", op.vd, op.va, op.vb); } void PPUDisAsm::VPKUWUM(ppu_opcode_t op) { DisAsm_V3("vpkuwum", op.vd, op.va, op.vb); } void PPUDisAsm::VPKUWUS(ppu_opcode_t op) { DisAsm_V3("vpkuwus", op.vd, op.va, op.vb); } void PPUDisAsm::VREFP(ppu_opcode_t op) { DisAsm_V2("vrefp", op.vd, op.vb); } void PPUDisAsm::VRFIM(ppu_opcode_t op) { DisAsm_V2("vrfim", op.vd, op.vb); } void PPUDisAsm::VRFIN(ppu_opcode_t op) { DisAsm_V2("vrfin", op.vd, op.vb); } void PPUDisAsm::VRFIP(ppu_opcode_t op) { DisAsm_V2("vrfip", op.vd, op.vb); } void PPUDisAsm::VRFIZ(ppu_opcode_t op) { DisAsm_V2("vrfiz", op.vd, op.vb); } void PPUDisAsm::VRLB(ppu_opcode_t op) { DisAsm_V3("vrlb", op.vd, op.va, op.vb); } void PPUDisAsm::VRLH(ppu_opcode_t op) { DisAsm_V3("vrlh", op.vd, op.va, op.vb); } void PPUDisAsm::VRLW(ppu_opcode_t op) { DisAsm_V3("vrlw", op.vd, op.va, op.vb); } void PPUDisAsm::VRSQRTEFP(ppu_opcode_t op) { DisAsm_V2("vrsqrtefp", op.vd, op.vb); } void PPUDisAsm::VSEL(ppu_opcode_t op) { DisAsm_V4("vsel", op.vd, op.va, op.vb, op.vc); } void PPUDisAsm::VSL(ppu_opcode_t op) { DisAsm_V3("vsl", op.vd, op.va, op.vb); } void PPUDisAsm::VSLB(ppu_opcode_t op) { DisAsm_V3("vslb", op.vd, op.va, op.vb); } void PPUDisAsm::VSLDOI(ppu_opcode_t op) { DisAsm_V3_UIMM("vsldoi", op.vd, op.va, op.vb, op.vsh); } void PPUDisAsm::VSLH(ppu_opcode_t op) { DisAsm_V3("vslh", op.vd, op.va, op.vb); } void PPUDisAsm::VSLO(ppu_opcode_t op) { DisAsm_V3("vslo", op.vd, op.va, op.vb); } void PPUDisAsm::VSLW(ppu_opcode_t op) { DisAsm_V3("vslw", op.vd, op.va, op.vb); } void PPUDisAsm::VSPLTB(ppu_opcode_t op) { DisAsm_V2_UIMM("vspltb", op.vd, op.vb, op.vuimm & 0xf); } void PPUDisAsm::VSPLTH(ppu_opcode_t op) { DisAsm_V2_UIMM("vsplth", op.vd, op.vb, op.vuimm & 0x7); } void PPUDisAsm::VSPLTISB(ppu_opcode_t op) { DisAsm_V1_SIMM("vspltisb", op.vd, op.vsimm); } void PPUDisAsm::VSPLTISH(ppu_opcode_t op) { DisAsm_V1_SIMM("vspltish", op.vd, op.vsimm); } void PPUDisAsm::VSPLTISW(ppu_opcode_t op) { DisAsm_V1_SIMM("vspltisw", op.vd, op.vsimm); } void PPUDisAsm::VSPLTW(ppu_opcode_t op) { DisAsm_V2_UIMM("vspltw", op.vd, op.vb, op.vuimm & 0x3); } void PPUDisAsm::VSR(ppu_opcode_t op) { DisAsm_V3("vsr", op.vd, op.va, op.vb); } void PPUDisAsm::VSRAB(ppu_opcode_t op) { DisAsm_V3("vsrab", op.vd, op.va, op.vb); } void PPUDisAsm::VSRAH(ppu_opcode_t op) { DisAsm_V3("vsrah", op.vd, op.va, op.vb); } void PPUDisAsm::VSRAW(ppu_opcode_t op) { DisAsm_V3("vsraw", op.vd, op.va, op.vb); } void PPUDisAsm::VSRB(ppu_opcode_t op) { DisAsm_V3("vsrb", op.vd, op.va, op.vb); } void PPUDisAsm::VSRH(ppu_opcode_t op) { DisAsm_V3("vsrh", op.vd, op.va, op.vb); } void PPUDisAsm::VSRO(ppu_opcode_t op) { DisAsm_V3("vsro", op.vd, op.va, op.vb); } void PPUDisAsm::VSRW(ppu_opcode_t op) { DisAsm_V3("vsrw", op.vd, op.va, op.vb); } void PPUDisAsm::VSUBCUW(ppu_opcode_t op) { DisAsm_V3("vsubcuw", op.vd, op.va, op.vb); } void PPUDisAsm::VSUBFP(ppu_opcode_t op) { DisAsm_V3("vsubfp", op.vd, op.va, op.vb); } void PPUDisAsm::VSUBSBS(ppu_opcode_t op) { DisAsm_V3("vsubsbs", op.vd, op.va, op.vb); } void PPUDisAsm::VSUBSHS(ppu_opcode_t op) { DisAsm_V3("vsubshs", op.vd, op.va, op.vb); } void PPUDisAsm::VSUBSWS(ppu_opcode_t op) { DisAsm_V3("vsubsws", op.vd, op.va, op.vb); } void PPUDisAsm::VSUBUBM(ppu_opcode_t op) { DisAsm_V3("vsububm", op.vd, op.va, op.vb); } void PPUDisAsm::VSUBUBS(ppu_opcode_t op) { DisAsm_V3("vsububs", op.vd, op.va, op.vb); } void PPUDisAsm::VSUBUHM(ppu_opcode_t op) { DisAsm_V3("vsubuhm", op.vd, op.va, op.vb); } void PPUDisAsm::VSUBUHS(ppu_opcode_t op) { DisAsm_V3("vsubuhs", op.vd, op.va, op.vb); } void PPUDisAsm::VSUBUWM(ppu_opcode_t op) { DisAsm_V3("vsubuwm", op.vd, op.va, op.vb); } void PPUDisAsm::VSUBUWS(ppu_opcode_t op) { DisAsm_V3("vsubuws", op.vd, op.va, op.vb); } void PPUDisAsm::VSUMSWS(ppu_opcode_t op) { DisAsm_V3("vsumsws", op.vd, op.va, op.vb); } void PPUDisAsm::VSUM2SWS(ppu_opcode_t op) { DisAsm_V3("vsum2sws", op.vd, op.va, op.vb); } void PPUDisAsm::VSUM4SBS(ppu_opcode_t op) { DisAsm_V3("vsum4sbs", op.vd, op.va, op.vb); } void PPUDisAsm::VSUM4SHS(ppu_opcode_t op) { DisAsm_V3("vsum4shs", op.vd, op.va, op.vb); } void PPUDisAsm::VSUM4UBS(ppu_opcode_t op) { DisAsm_V3("vsum4ubs", op.vd, op.va, op.vb); } void PPUDisAsm::VUPKHPX(ppu_opcode_t op) { DisAsm_V2("vupkhpx", op.vd, op.vb); } void PPUDisAsm::VUPKHSB(ppu_opcode_t op) { DisAsm_V2("vupkhsb", op.vd, op.vb); } void PPUDisAsm::VUPKHSH(ppu_opcode_t op) { DisAsm_V2("vupkhsh", op.vd, op.vb); } void PPUDisAsm::VUPKLPX(ppu_opcode_t op) { DisAsm_V2("vupklpx", op.vd, op.vb); } void PPUDisAsm::VUPKLSB(ppu_opcode_t op) { DisAsm_V2("vupklsb", op.vd, op.vb); } void PPUDisAsm::VUPKLSH(ppu_opcode_t op) { DisAsm_V2("vupklsh", op.vd, op.vb); } void PPUDisAsm::VXOR(ppu_opcode_t op) { DisAsm_V3("vxor", op.vd, op.va, op.vb); } void PPUDisAsm::TDI(ppu_opcode_t op) { DisAsm_INT1_R1_IMM("tdi", op.bo, op.ra, op.simm16); } void PPUDisAsm::TWI(ppu_opcode_t op) { DisAsm_INT1_R1_IMM("twi", op.bo, op.ra, op.simm16); } void PPUDisAsm::MULLI(ppu_opcode_t op) { DisAsm_R2_IMM("mulli", op.rd, op.ra, op.simm16); } void PPUDisAsm::SUBFIC(ppu_opcode_t op) { DisAsm_R2_IMM("subfic", op.rd, op.ra, op.simm16); } void PPUDisAsm::CMPLI(ppu_opcode_t op) { DisAsm_CR1_R1_IMM(op.l10 ? "cmpldi" : "cmplwi", op.crfd, op.ra, op.uimm16); // Try to obtain the true constant value we are comparing against, comment on success // Upper 16/48 bits of it if (auto [is_xor, value] = try_get_const_xor_gpr_value(op.ra); is_xor && !(value & 0xFFFF)) { // Fixup value (merge the lower 16-bits of that value) value |= op.uimm16; if (!op.l10) { value = static_cast<u32>(value); } comment_constant(last_opcode, value); } } void PPUDisAsm::CMPI(ppu_opcode_t op) { DisAsm_CR1_R1_IMM(op.l10 ? "cmpdi" : "cmpwi", op.crfd, op.ra, op.simm16); // See CMPLI if (auto [is_xor, value] = try_get_const_xor_gpr_value(op.ra); is_xor && !(value & 0xFFFF)) { // Signed fixup value ^= s64{op.simm16}; if (!op.l10) { value = static_cast<u32>(value); } comment_constant(last_opcode, value); } } void PPUDisAsm::ADDIC(ppu_opcode_t op) { DisAsm_R2_IMM(op.main & 1 ? "addic." : "addic", op.rd, op.ra, op.simm16); } void PPUDisAsm::ADDI(ppu_opcode_t op) { if (op.ra == 0) { DisAsm_R1_IMM("li", op.rd, op.simm16); } else { DisAsm_R2_IMM("addi", op.rd, op.ra, op.simm16); if (auto [is_const, value] = try_get_const_gpr_value(op.ra); is_const) { // Comment constant formation comment_constant(last_opcode, value + op.simm16); } } } void PPUDisAsm::ADDIS(ppu_opcode_t op) { if (op.ra == 0) { DisAsm_R1_IMM("lis", op.rd, op.simm16); } else { DisAsm_R2_IMM("addis", op.rd, op.ra, op.simm16); if (auto [is_const, value] = try_get_const_gpr_value(op.ra); is_const) { // Comment constant formation comment_constant(last_opcode, value + op.simm16 * 65536); } } } void PPUDisAsm::BC(ppu_opcode_t op) { const u32 bo = op.bo; const u32 bi = op.bi; const s32 bd = op.ds * 4; const u32 aa = op.aa; const u32 lk = op.lk; if (m_mode == cpu_disasm_mode::compiler_elf) { fmt::append(last_opcode, "bc 0x%x, 0x%x, 0x%x, %d, %d", bo, bi, bd, aa, lk); return; } const auto [inst, sign] = get_BC_info(bo, bi); if (!inst) { fmt::append(last_opcode, "%-*s 0x%x, 0x%x, 0x%x, %d, %d", PadOp(), "bc", bo, bi, bd, aa, lk); return; } std::string final = inst; if (lk) final += 'l'; if (aa) final += 'a'; if (sign) final += sign; // Check if need to display full BI value if (sign == 't' || sign == 'f') { if (aa) { DisAsm_BI_BRANCH_A(final, bi, bd); } else { DisAsm_BI_BRANCH(final, bi, bd); } return; } if (aa) { DisAsm_CR_BRANCH_A(final, bi / 4, bd); } else { DisAsm_CR_BRANCH(final, bi / 4, bd); } } void PPUDisAsm::SC(ppu_opcode_t op) { if (op.opcode != ppu_instructions::SC(0) && op.opcode != ppu_instructions::SC(1)) { return UNK(op); } fmt::append(last_opcode, "%-*s ", PadOp(), "sc"); if (op.lev) { fmt::append(last_opcode, "%u ", op.lev); } // Try to get constant syscall index auto [is_const, index] = try_get_const_gpr_value(11); if (!is_const) { return; } switch (op.lev) { case 0: { // Lv2 syscall if (index < 1024u) { fmt::append(last_opcode, "#%s", ppu_syscall_code{index}); } return; } case 1: { // Lv1 syscall fmt::append(last_opcode, "#lv1_syscall_%u", index); return; } default: return; } } void PPUDisAsm::B(ppu_opcode_t op) { const u32 li = op.bt24; const u32 aa = op.aa; const u32 lk = op.lk; if (m_mode == cpu_disasm_mode::compiler_elf) { fmt::append(last_opcode, "b 0x%x, %d, %d", li, aa, lk); return; } switch (lk) { case 0: switch (aa) { case 0: DisAsm_BRANCH("b", li); break; case 1: DisAsm_BRANCH_A("ba", li); break; } break; case 1: switch (aa) { case 0: DisAsm_BRANCH("bl", li); break; case 1: DisAsm_BRANCH_A("bla", li); break; } break; } } void PPUDisAsm::MCRF(ppu_opcode_t op) { DisAsm_CR2("mcrf", op.crfd, op.crfs); } void PPUDisAsm::BCLR(ppu_opcode_t op) { const u32 bo = op.bo; const u32 bi = op.bi; const u32 bh = op.bh; const u32 lk = op.lk; if (bo == 0b10100) { last_opcode += (lk ? "blrl" : "blr"); return; } const auto [inst, sign] = get_BC_info(bo, bi); if (!inst) { fmt::append(last_opcode, "bclr %d, cr%d[%s], %d, %d", bo, bi / 4, get_partial_BI_field(bi), bh, lk); return; } std::string final = std::string(inst) + (lk ? "lrl" : "lr"); if (sign) final += sign; // Check if need to display full BI value if (sign == 't' || sign == 'f') { DisAsm_BI_BRANCH(final, bi, bh); return; } DisAsm_CR_BRANCH_HINT(final, bi / 4, bh); } void PPUDisAsm::CRNOR(ppu_opcode_t op) { if (op.crba == op.crbb) { DisAsm_BI2("crnot", op.crbd, op.crba); return; } DisAsm_BI3("crnor", op.crbd, op.crba, op.crbb); } void PPUDisAsm::CRANDC(ppu_opcode_t op) { DisAsm_BI3("crandc", op.crbd, op.crba, op.crbb); } void PPUDisAsm::ISYNC(ppu_opcode_t) { last_opcode += "isync"; } void PPUDisAsm::CRXOR(ppu_opcode_t op) { if (op.crba == op.crbb && op.crba == op.crbd) { DisAsm_BI1("crclr", op.crbd); return; } DisAsm_BI3("crxor", op.crbd, op.crba, op.crbb); } void PPUDisAsm::CRNAND(ppu_opcode_t op) { DisAsm_BI3("crnand", op.crbd, op.crba, op.crbb); } void PPUDisAsm::CRAND(ppu_opcode_t op) { DisAsm_BI3("crand", op.crbd, op.crba, op.crbb); } void PPUDisAsm::CREQV(ppu_opcode_t op) { if (op.crba == op.crbb && op.crba == op.crbd) { DisAsm_BI1("crset", op.crbd); return; } DisAsm_BI3("creqv", op.crbd, op.crba, op.crbb); } void PPUDisAsm::CRORC(ppu_opcode_t op) { DisAsm_BI3("crorc", op.crbd, op.crba, op.crbb); } void PPUDisAsm::CROR(ppu_opcode_t op) { if (op.crba == op.crbb) { DisAsm_BI2("crmove", op.crbd, op.crba); return; } DisAsm_BI3("cror", op.crbd, op.crba, op.crbb); } void PPUDisAsm::BCCTR(ppu_opcode_t op) { const u32 bo = op.bo; const u32 bi = op.bi; const u32 bh = op.bh; const u32 lk = op.lk; if (bo == 0b10100) { last_opcode += (lk ? "bctrl" : "bctr"); return; } const auto [inst, sign] = get_BC_info(bo, bi); if (!inst || inst[1] == 'd') { // Invalid or unknown bcctr form fmt::append(last_opcode, "bcctr %d, cr%d[%s], %d, %d", bo, bi / 4, get_partial_BI_field(bi), bh, lk); return; } std::string final = inst; final += lk ? "ctrl"sv : "ctr"sv; if (sign) final += sign; DisAsm_CR_BRANCH_HINT(final, bi / 4, bh); } void PPUDisAsm::RLWIMI(ppu_opcode_t op) { DisAsm_R2_INT3_RC("rlwimi", op.ra, op.rs, op.sh32, op.mb32, op.me32, op.rc); } void PPUDisAsm::RLWINM(ppu_opcode_t op) { if (op.mb32 == 0 && op.sh32 == 31 - op.me32) { DisAsm_R2_INT1_RC("slwi", op.ra, op.rs, op.sh32, op.rc); } else if (op.me32 == 31 && op.sh32 == 32 - op.mb32) { DisAsm_R2_INT1_RC("srwi", op.ra, op.rs, 32 - op.sh32, op.rc); } else { DisAsm_R2_INT3_RC("rlwinm", op.ra, op.rs, op.sh32, op.mb32, op.me32, op.rc); } } void PPUDisAsm::RLWNM(ppu_opcode_t op) { DisAsm_R3_INT2_RC("rlwnm", op.ra, op.rs, op.rb, op.mb32, op.me32, op.rc); } void PPUDisAsm::ORI(ppu_opcode_t op) { if (op.rs == 0 && op.ra == 0 && op.uimm16 == 0) { last_opcode += "nop"; return; } if (op.uimm16 == 0) return DisAsm_R2("mr", op.ra, op.rs); DisAsm_R2_IMM("ori", op.ra, op.rs, op.uimm16); if (auto [is_const, value] = try_get_const_gpr_value(op.rs); is_const) { // Comment constant formation comment_constant(last_opcode, value | op.uimm16); } } void PPUDisAsm::ORIS(ppu_opcode_t op) { if (op.rs == 0 && op.ra == 0 && op.uimm16 == 0) { last_opcode += "nop"; return; } DisAsm_R2_IMM("oris", op.ra, op.rs, op.uimm16); if (auto [is_const, value] = try_get_const_gpr_value(op.rs); is_const) { // Comment constant formation comment_constant(last_opcode, value | (op.uimm16 << 16)); } } void PPUDisAsm::XORI(ppu_opcode_t op) { DisAsm_R2_IMM("xori", op.ra, op.rs, op.uimm16); if (auto [is_const, value] = try_get_const_gpr_value(op.rs); is_const) { // Comment constant formation comment_constant(last_opcode, value ^ op.uimm16); } } void PPUDisAsm::XORIS(ppu_opcode_t op) { DisAsm_R2_IMM("xoris", op.ra, op.rs, op.uimm16); if (auto [is_const, value] = try_get_const_gpr_value(op.rs); is_const) { // Comment constant formation comment_constant(last_opcode, value ^ (op.uimm16 << 16)); } } void PPUDisAsm::ANDI(ppu_opcode_t op) { DisAsm_R2_IMM("andi.", op.ra, op.rs, op.uimm16); } void PPUDisAsm::ANDIS(ppu_opcode_t op) { DisAsm_R2_IMM("andis.", op.ra, op.rs, op.uimm16); } void PPUDisAsm::RLDICL(ppu_opcode_t op) { const u32 sh = op.sh64; const u32 mb = op.mbe64; if (sh == 0) { DisAsm_R2_INT1_RC("clrldi", op.ra, op.rs, mb, op.rc); } else if (mb == 0) { DisAsm_R2_INT1_RC("rotldi", op.ra, op.rs, sh, op.rc); } else if (mb == 64 - sh) { DisAsm_R2_INT1_RC("srdi", op.ra, op.rs, mb, op.rc); } else { DisAsm_R2_INT2_RC("rldicl", op.ra, op.rs, sh, mb, op.rc); } } void PPUDisAsm::RLDICR(ppu_opcode_t op) { const u32 sh = op.sh64; const u32 me = op.mbe64; if (sh == 63 - me) { DisAsm_R2_INT1_RC("sldi", op.ra, op.rs, sh, op.rc); } else { DisAsm_R2_INT2_RC("rldicr", op.ra, op.rs, sh, me, op.rc); } } void PPUDisAsm::RLDIC(ppu_opcode_t op) { const u32 sh = op.sh64; const u32 mb = op.mbe64; DisAsm_R2_INT2_RC("rldic", op.ra, op.rs, sh, mb, op.rc); } void PPUDisAsm::RLDIMI(ppu_opcode_t op) { const u32 sh = op.sh64; const u32 mb = op.mbe64; DisAsm_R2_INT2_RC("rldimi", op.ra, op.rs, sh, mb, op.rc); } void PPUDisAsm::RLDCL(ppu_opcode_t op) { const u32 mb = op.mbe64; DisAsm_R3_INT2_RC("rldcl", op.ra, op.rs, op.rb, mb, 0, op.rc); } void PPUDisAsm::RLDCR(ppu_opcode_t op) { const u32 me = op.mbe64; DisAsm_R3_INT2_RC("rldcr", op.ra, op.rs, op.rb, me, 0, op.rc); } void PPUDisAsm::CMP(ppu_opcode_t op) { DisAsm_CR1_R2(op.l10 ? "cmpd" : "cmpw", op.crfd, op.ra, op.rb); } void PPUDisAsm::TW(ppu_opcode_t op) { DisAsm_INT1_R2("tw", op.bo, op.ra, op.rb); } void PPUDisAsm::LVSL(ppu_opcode_t op) { DisAsm_V1_R2("lvsl", op.vd, op.ra, op.rb); } void PPUDisAsm::LVEBX(ppu_opcode_t op) { DisAsm_V1_R2("lvebx", op.vd, op.ra, op.rb); } void PPUDisAsm::SUBFC(ppu_opcode_t op) { DisAsm_R3_OE_RC("subfc", op.rd, op.ra, op.rb, op.oe, op.rc); } void PPUDisAsm::ADDC(ppu_opcode_t op) { DisAsm_R3_OE_RC("addc", op.rd, op.ra, op.rb, op.oe, op.rc); } void PPUDisAsm::MULHDU(ppu_opcode_t op) { DisAsm_R3_RC("mulhdu", op.rd, op.ra, op.rb, op.rc); } void PPUDisAsm::MULHWU(ppu_opcode_t op) { DisAsm_R3_RC("mulhwu", op.rd, op.ra, op.rb, op.rc); } void PPUDisAsm::MFOCRF(ppu_opcode_t op) { if (op.l11) { const u8 crm = static_cast<u8>(op.crm); const int cr = std::countl_zero<u8>(crm); if (cr >= 8 || crm & (crm - 1)) { // Note: invalid form DisAsm_R1_IMM("mfocrf", op.rd, crm); } else { DisAsm_R1_CR1("mfocrf", op.rd, cr); } } else { DisAsm_R1("mfcr", op.rd); } } void PPUDisAsm::LWARX(ppu_opcode_t op) { DisAsm_R3("lwarx", op.rd, op.ra, op.rb); } void PPUDisAsm::LDX(ppu_opcode_t op) { DisAsm_R3("ldx", op.rd, op.ra, op.rb); } void PPUDisAsm::LWZX(ppu_opcode_t op) { DisAsm_R3("lwzx", op.rd, op.ra, op.rb); } void PPUDisAsm::SLW(ppu_opcode_t op) { DisAsm_R3_RC("slw", op.ra, op.rs, op.rb, op.rc); } void PPUDisAsm::CNTLZW(ppu_opcode_t op) { DisAsm_R2_RC("cntlzw", op.ra, op.rs, op.rc); } void PPUDisAsm::SLD(ppu_opcode_t op) { DisAsm_R3_RC("sld", op.ra, op.rs, op.rb, op.rc); } void PPUDisAsm::AND(ppu_opcode_t op) { DisAsm_R3_RC("and", op.ra, op.rs, op.rb, op.rc); } void PPUDisAsm::CMPL(ppu_opcode_t op) { DisAsm_CR1_R2(op.l10 ? "cmpld" : "cmplw", op.crfd, op.ra, op.rb); } void PPUDisAsm::LVSR(ppu_opcode_t op) { DisAsm_V1_R2("lvsr", op.vd, op.ra, op.rb); } void PPUDisAsm::LVEHX(ppu_opcode_t op) { DisAsm_V1_R2("lvehx", op.vd, op.ra, op.rb); } void PPUDisAsm::SUBF(ppu_opcode_t op) { DisAsm_R3_OE_RC("subf", op.rd, op.ra, op.rb, op.oe, op.rc); } void PPUDisAsm::LDUX(ppu_opcode_t op) { DisAsm_R3("ldux", op.rd, op.ra, op.rb); } void PPUDisAsm::DCBST(ppu_opcode_t op) { DisAsm_R2("dcbst", op.ra, op.rb); } void PPUDisAsm::LWZUX(ppu_opcode_t op) { DisAsm_R3("lwzux", op.rd, op.ra, op.rb); } void PPUDisAsm::CNTLZD(ppu_opcode_t op) { DisAsm_R2_RC("cntlzd", op.ra, op.rs, op.rc); } void PPUDisAsm::ANDC(ppu_opcode_t op) { DisAsm_R3_RC("andc", op.ra, op.rs, op.rb, op.rc); } void PPUDisAsm::TD(ppu_opcode_t op) { DisAsm_INT1_R2("td", op.bo, op.ra, op.rb); } void PPUDisAsm::LVEWX(ppu_opcode_t op) { DisAsm_V1_R2("lvewx", op.vd, op.ra, op.rb); } void PPUDisAsm::MULHD(ppu_opcode_t op) { DisAsm_R3_RC("mulhd", op.rd, op.ra, op.rb, op.rc); } void PPUDisAsm::MULHW(ppu_opcode_t op) { DisAsm_R3_RC("mulhw", op.rd, op.ra, op.rb, op.rc); } void PPUDisAsm::LDARX(ppu_opcode_t op) { DisAsm_R3("ldarx", op.rd, op.ra, op.rb); } void PPUDisAsm::DCBF(ppu_opcode_t op) { DisAsm_R2("dcbf", op.ra, op.rb); } void PPUDisAsm::LBZX(ppu_opcode_t op) { DisAsm_R3("lbzx", op.rd, op.ra, op.rb); } void PPUDisAsm::LVX(ppu_opcode_t op) { DisAsm_V1_R2("lvx", op.vd, op.ra, op.rb); } void PPUDisAsm::NEG(ppu_opcode_t op) { DisAsm_R2_OE_RC("neg", op.rd, op.ra, op.oe, op.rc); } void PPUDisAsm::LBZUX(ppu_opcode_t op) { DisAsm_R3("lbzux", op.rd, op.ra, op.rb); } void PPUDisAsm::NOR(ppu_opcode_t op) { if (op.rs == op.rb) { DisAsm_R2_RC("not", op.ra, op.rs, op.rc); } else { DisAsm_R3_RC("nor", op.ra, op.rs, op.rb, op.rc); } } void PPUDisAsm::STVEBX(ppu_opcode_t op) { DisAsm_V1_R2("stvebx", op.vs, op.ra, op.rb); } void PPUDisAsm::SUBFE(ppu_opcode_t op) { DisAsm_R3_OE_RC("subfe", op.rd, op.ra, op.rb, op.oe, op.rc); } void PPUDisAsm::ADDE(ppu_opcode_t op) { DisAsm_R3_OE_RC("adde", op.rd, op.ra, op.rb, op.oe, op.rc); } void PPUDisAsm::MTOCRF(ppu_opcode_t op) { const u8 crm = static_cast<u8>(op.crm); if (!op.l11 && crm == 0xff) { DisAsm_R1("mtcr", op.rs); return; } const int cr = std::countl_zero<u8>(crm); const auto name = op.l11 ? "mtocrf" : "mtcrf"; if (cr >= 8 || crm & (crm - 1)) { DisAsm_INT1_R1(name, crm, op.rs); } else { DisAsm_CR1_R1(name, cr, op.rs); } } void PPUDisAsm::STDX(ppu_opcode_t op) { DisAsm_R3("stdx", op.rs, op.ra, op.rb); } void PPUDisAsm::STWCX(ppu_opcode_t op) { DisAsm_R3("stwcx.", op.rs, op.ra, op.rb); } void PPUDisAsm::STWX(ppu_opcode_t op) { DisAsm_R3("stwx", op.rs, op.ra, op.rb); } void PPUDisAsm::STVEHX(ppu_opcode_t op) { DisAsm_V1_R2("stvehx", op.vs, op.ra, op.rb); } void PPUDisAsm::STDUX(ppu_opcode_t op) { DisAsm_R3("stdux", op.rs, op.ra, op.rb); } void PPUDisAsm::STWUX(ppu_opcode_t op) { DisAsm_R3("stwux", op.rs, op.ra, op.rb); } void PPUDisAsm::STVEWX(ppu_opcode_t op) { DisAsm_V1_R2("stvewx", op.vs, op.ra, op.rb); } void PPUDisAsm::SUBFZE(ppu_opcode_t op) { DisAsm_R2_OE_RC("subfze", op.rd, op.ra, op.oe, op.rc); } void PPUDisAsm::ADDZE(ppu_opcode_t op) { DisAsm_R2_OE_RC("addze", op.rd, op.ra, op.oe, op.rc); } void PPUDisAsm::STDCX(ppu_opcode_t op) { DisAsm_R3("stdcx.", op.rs, op.ra, op.rb); } void PPUDisAsm::STBX(ppu_opcode_t op) { DisAsm_R3("stbx", op.rs, op.ra, op.rb); } void PPUDisAsm::STVX(ppu_opcode_t op) { DisAsm_V1_R2("stvx", op.vd, op.ra, op.rb); } void PPUDisAsm::SUBFME(ppu_opcode_t op) { DisAsm_R2_OE_RC("subfme", op.rd, op.ra, op.oe, op.rc); } void PPUDisAsm::MULLD(ppu_opcode_t op) { DisAsm_R3_OE_RC("mulld", op.rd, op.ra, op.rb, op.oe, op.rc); } void PPUDisAsm::ADDME(ppu_opcode_t op) { DisAsm_R2_OE_RC("addme", op.rd, op.ra, op.oe, op.rc); } void PPUDisAsm::MULLW(ppu_opcode_t op) { DisAsm_R3_OE_RC("mullw", op.rd, op.ra, op.rb, op.oe, op.rc); } void PPUDisAsm::DCBTST(ppu_opcode_t op) { DisAsm_R3("dcbtst", op.ra, op.rb, op.bo); } void PPUDisAsm::STBUX(ppu_opcode_t op) { DisAsm_R3("stbux", op.rs, op.ra, op.rb); } void PPUDisAsm::ADD(ppu_opcode_t op) { DisAsm_R3_OE_RC("add", op.rd, op.ra, op.rb, op.oe, op.rc); } void PPUDisAsm::DCBT(ppu_opcode_t op) { DisAsm_R2("dcbt", op.ra, op.rb); } void PPUDisAsm::LHZX(ppu_opcode_t op) { DisAsm_R3("lhzx", op.rd, op.ra, op.rb); } void PPUDisAsm::EQV(ppu_opcode_t op) { DisAsm_R3_RC("eqv", op.ra, op.rs, op.rb, op.rc); } void PPUDisAsm::ECIWX(ppu_opcode_t op) { DisAsm_R3("eciwx", op.rd, op.ra, op.rb); } void PPUDisAsm::LHZUX(ppu_opcode_t op) { DisAsm_R3("lhzux", op.rd, op.ra, op.rb); } void PPUDisAsm::XOR(ppu_opcode_t op) { DisAsm_R3_RC("xor", op.ra, op.rs, op.rb, op.rc); } void PPUDisAsm::MFSPR(ppu_opcode_t op) { const u32 n = (op.spr >> 5) | ((op.spr & 0x1f) << 5); switch (n) { case 0x001: DisAsm_R1("mfxer", op.rd); break; case 0x008: DisAsm_R1("mflr", op.rd); break; case 0x009: DisAsm_R1("mfctr", op.rd); break; default: DisAsm_R1_IMM("mfspr", op.rd, n); break; } } void PPUDisAsm::LWAX(ppu_opcode_t op) { DisAsm_R3("lwax", op.rd, op.ra, op.rb); } void PPUDisAsm::DST(ppu_opcode_t op) { DisAsm_R2("dst(t)", op.ra, op.rb); } void PPUDisAsm::LHAX(ppu_opcode_t op) { DisAsm_R3("lhax", op.rd, op.ra, op.rb); } void PPUDisAsm::LVXL(ppu_opcode_t op) { DisAsm_V1_R2("lvxl", op.vd, op.ra, op.rb); } void PPUDisAsm::MFTB(ppu_opcode_t op) { const u32 n = (op.spr >> 5) | ((op.spr & 0x1f) << 5); switch (n) { case 268: DisAsm_R1("mftb", op.rd); break; case 269: DisAsm_R1("mftbu", op.rd); break; default: DisAsm_R1_IMM("mftb", op.rd, op.spr); break; } } void PPUDisAsm::LWAUX(ppu_opcode_t op) { DisAsm_R3("lwaux", op.rd, op.ra, op.rb); } void PPUDisAsm::DSTST(ppu_opcode_t op) { DisAsm_R2("dstst(t)", op.ra, op.rb); } void PPUDisAsm::LHAUX(ppu_opcode_t op) { DisAsm_R3("lhaux", op.rd, op.ra, op.rb); } void PPUDisAsm::STHX(ppu_opcode_t op) { DisAsm_R3("sthx", op.rs, op.ra, op.rb); } void PPUDisAsm::ORC(ppu_opcode_t op) { DisAsm_R3_RC("orc", op.ra, op.rs, op.rb, op.rc); } void PPUDisAsm::ECOWX(ppu_opcode_t op) { DisAsm_R3("ecowx", op.rs, op.ra, op.rb); } void PPUDisAsm::STHUX(ppu_opcode_t op) { DisAsm_R3("sthux", op.rs, op.ra, op.rb); } void PPUDisAsm::OR(ppu_opcode_t op) { if (op.rs == op.rb) { switch (op.opcode) { case 0x7f9ce378: last_opcode += "db8cyc"; return; case 0x7fbdeb78: last_opcode += "db10cyc"; return; case 0x7fdef378: last_opcode += "db12cyc"; return; case 0x7ffffb78: last_opcode += "db16cyc"; return; default: DisAsm_R2_RC("mr", op.ra, op.rb, op.rc); } } else { DisAsm_R3_RC("or", op.ra, op.rs, op.rb, op.rc); } } void PPUDisAsm::DIVDU(ppu_opcode_t op) { DisAsm_R3_OE_RC("divdu", op.rd, op.ra, op.rb, op.oe, op.rc); } void PPUDisAsm::DIVWU(ppu_opcode_t op) { DisAsm_R3_OE_RC("divwu", op.rd, op.ra, op.rb, op.oe, op.rc); } void PPUDisAsm::MTSPR(ppu_opcode_t op) { const u32 n = (op.spr & 0x1f) + ((op.spr >> 5) & 0x1f); switch (n) { case 0x001: DisAsm_R1("mtxer", op.rs); break; case 0x008: DisAsm_R1("mtlr", op.rs); break; case 0x009: DisAsm_R1("mtctr", op.rs); break; default: DisAsm_R1_IMM("mtspr", n, op.rs); break; } } void PPUDisAsm::DCBI(ppu_opcode_t op) { DisAsm_R2("dcbi", op.ra, op.rb); } void PPUDisAsm::NAND(ppu_opcode_t op) { DisAsm_R3_RC("nand", op.ra, op.rs, op.rb, op.rc); } void PPUDisAsm::STVXL(ppu_opcode_t op) { DisAsm_V1_R2("stvxl", op.vs, op.ra, op.rb); } void PPUDisAsm::DIVD(ppu_opcode_t op) { DisAsm_R3_OE_RC("divd", op.rd, op.ra, op.rb, op.oe, op.rc); } void PPUDisAsm::DIVW(ppu_opcode_t op) { DisAsm_R3_OE_RC("divw", op.rd, op.ra, op.rb, op.oe, op.rc); } void PPUDisAsm::LVLX(ppu_opcode_t op) { DisAsm_V1_R2("lvlx", op.vd, op.ra, op.rb); } void PPUDisAsm::LDBRX(ppu_opcode_t op) { DisAsm_R3("ldbrx", op.rd, op.ra, op.rb); } void PPUDisAsm::LSWX(ppu_opcode_t op) { DisAsm_R3("lswx", op.rd, op.ra, op.rb); } void PPUDisAsm::LWBRX(ppu_opcode_t op) { DisAsm_R3("lwbrx", op.rd, op.ra, op.rb); } void PPUDisAsm::LFSX(ppu_opcode_t op) { DisAsm_F1_R2("lfsx", op.frd, op.ra, op.rb); } void PPUDisAsm::SRW(ppu_opcode_t op) { DisAsm_R3_RC("srw", op.ra, op.rs, op.rb, op.rc); } void PPUDisAsm::SRD(ppu_opcode_t op) { DisAsm_R3_RC("srd", op.ra, op.rs, op.rb, op.rc); } void PPUDisAsm::LVRX(ppu_opcode_t op) { DisAsm_V1_R2("lvrx", op.vd, op.ra, op.rb); } void PPUDisAsm::LSWI(ppu_opcode_t op) { DisAsm_R2_INT1("lswi", op.rd, op.ra, op.rb); } void PPUDisAsm::LFSUX(ppu_opcode_t op) { DisAsm_F1_R2("lfsux", op.frd, op.ra, op.rb); } void PPUDisAsm::SYNC(ppu_opcode_t op) { last_opcode += (op.l10 ? "lwsync" : "sync"); } void PPUDisAsm::LFDX(ppu_opcode_t op) { DisAsm_F1_R2("lfdx", op.frd, op.ra, op.rb); } void PPUDisAsm::LFDUX(ppu_opcode_t op) { DisAsm_F1_R2("lfdux", op.frd, op.ra, op.rb); } void PPUDisAsm::STVLX(ppu_opcode_t op) { DisAsm_V1_R2("stvlx", op.vs, op.ra, op.rb); } void PPUDisAsm::STDBRX(ppu_opcode_t op) { DisAsm_R3("stdbrx", op.rs, op.ra, op.rb); } void PPUDisAsm::STSWX(ppu_opcode_t op) { DisAsm_R3("swswx", op.rs, op.ra, op.rb); } void PPUDisAsm::STWBRX(ppu_opcode_t op) { DisAsm_R3("stwbrx", op.rs, op.ra, op.rb); } void PPUDisAsm::STFSX(ppu_opcode_t op) { DisAsm_F1_R2("stfsx", op.frs, op.ra, op.rb); } void PPUDisAsm::STVRX(ppu_opcode_t op) { DisAsm_V1_R2("stvrx", op.vs, op.ra, op.rb); } void PPUDisAsm::STFSUX(ppu_opcode_t op) { DisAsm_F1_R2("stfsux", op.frs, op.ra, op.rb); } void PPUDisAsm::STSWI(ppu_opcode_t op) { DisAsm_R2_INT1("stswi", op.rd, op.ra, op.rb); } void PPUDisAsm::STFDX(ppu_opcode_t op) { DisAsm_F1_R2("stfdx", op.frs, op.ra, op.rb); } void PPUDisAsm::STFDUX(ppu_opcode_t op) { DisAsm_F1_R2("stfdux", op.frs, op.ra, op.rb); } void PPUDisAsm::LVLXL(ppu_opcode_t op) { DisAsm_V1_R2("lvlxl", op.vd, op.ra, op.rb); } void PPUDisAsm::LHBRX(ppu_opcode_t op) { DisAsm_R3("lhbrx", op.rd, op.ra, op.rb); } void PPUDisAsm::SRAW(ppu_opcode_t op) { DisAsm_R3_RC("sraw", op.ra, op.rs, op.rb, op.rc); } void PPUDisAsm::SRAD(ppu_opcode_t op) { DisAsm_R3_RC("srad", op.ra, op.rs, op.rb, op.rc); } void PPUDisAsm::LVRXL(ppu_opcode_t op) { DisAsm_V1_R2("lvrxl", op.vd, op.ra, op.rb); } void PPUDisAsm::DSS(ppu_opcode_t) { last_opcode += "dss"; } void PPUDisAsm::SRAWI(ppu_opcode_t op) { DisAsm_R2_INT1_RC("srawi", op.ra, op.rs, op.sh32, op.rc); } void PPUDisAsm::SRADI(ppu_opcode_t op) { DisAsm_R2_INT1_RC("sradi", op.ra, op.rs, op.sh64, op.rc); } void PPUDisAsm::EIEIO(ppu_opcode_t) { last_opcode += "eieio"; } void PPUDisAsm::STVLXL(ppu_opcode_t op) { DisAsm_V1_R2("stvlxl", op.vs, op.ra, op.rb); } void PPUDisAsm::STHBRX(ppu_opcode_t op) { DisAsm_R3("sthbrx", op.rs, op.ra, op.rb); } void PPUDisAsm::EXTSH(ppu_opcode_t op) { DisAsm_R2_RC("extsh", op.ra, op.rs, op.rc); } void PPUDisAsm::STVRXL(ppu_opcode_t op) { DisAsm_V1_R2("stvrxl", op.vs, op.ra, op.rb); } void PPUDisAsm::EXTSB(ppu_opcode_t op) { DisAsm_R2_RC("extsb", op.ra, op.rs, op.rc); } void PPUDisAsm::STFIWX(ppu_opcode_t op) { DisAsm_F1_R2("stfiwx", op.frs, op.ra, op.rb); } void PPUDisAsm::EXTSW(ppu_opcode_t op) { DisAsm_R2_RC("extsw", op.ra, op.rs, op.rc); } void PPUDisAsm::ICBI(ppu_opcode_t op) { DisAsm_R2("icbi", op.ra, op.rb); } void PPUDisAsm::DCBZ(ppu_opcode_t op) { DisAsm_R2("dcbz", op.ra, op.rb); } void PPUDisAsm::LWZ(ppu_opcode_t op) { DisAsm_R2_IMM("lwz", op.rd, op.ra, op.simm16); } void PPUDisAsm::LWZU(ppu_opcode_t op) { DisAsm_R2_IMM("lwzu", op.rd, op.ra, op.simm16); } void PPUDisAsm::LBZ(ppu_opcode_t op) { DisAsm_R2_IMM("lbz", op.rd, op.ra, op.simm16); } void PPUDisAsm::LBZU(ppu_opcode_t op) { DisAsm_R2_IMM("lbzu", op.rd, op.ra, op.simm16); } void PPUDisAsm::STW(ppu_opcode_t op) { DisAsm_R2_IMM("stw", op.rs, op.ra, op.simm16); } void PPUDisAsm::STWU(ppu_opcode_t op) { DisAsm_R2_IMM("stwu", op.rs, op.ra, op.simm16); } void PPUDisAsm::STB(ppu_opcode_t op) { DisAsm_R2_IMM("stb", op.rs, op.ra, op.simm16); } void PPUDisAsm::STBU(ppu_opcode_t op) { DisAsm_R2_IMM("stbu", op.rs, op.ra, op.simm16); } void PPUDisAsm::LHZ(ppu_opcode_t op) { DisAsm_R2_IMM("lhz", op.rs, op.ra, op.simm16); } void PPUDisAsm::LHZU(ppu_opcode_t op) { DisAsm_R2_IMM("lhzu", op.rs, op.ra, op.simm16); } void PPUDisAsm::LHA(ppu_opcode_t op) { DisAsm_R2_IMM("lha", op.rs, op.ra, op.simm16); } void PPUDisAsm::LHAU(ppu_opcode_t op) { DisAsm_R2_IMM("lhau", op.rs, op.ra, op.simm16); } void PPUDisAsm::STH(ppu_opcode_t op) { DisAsm_R2_IMM("sth", op.rs, op.ra, op.simm16); } void PPUDisAsm::STHU(ppu_opcode_t op) { DisAsm_R2_IMM("sthu", op.rs, op.ra, op.simm16); } void PPUDisAsm::LMW(ppu_opcode_t op) { DisAsm_R2_IMM("lmw", op.rd, op.ra, op.simm16); } void PPUDisAsm::STMW(ppu_opcode_t op) { DisAsm_R2_IMM("stmw", op.rs, op.ra, op.simm16); } void PPUDisAsm::LFS(ppu_opcode_t op) { DisAsm_F1_IMM_R1("lfs", op.frd, op.simm16, op.ra); } void PPUDisAsm::LFSU(ppu_opcode_t op) { DisAsm_F1_IMM_R1("lfsu", op.frd, op.simm16, op.ra); } void PPUDisAsm::LFD(ppu_opcode_t op) { DisAsm_F1_IMM_R1("lfd", op.frd, op.simm16, op.ra); } void PPUDisAsm::LFDU(ppu_opcode_t op) { DisAsm_F1_IMM_R1("lfdu", op.frd, op.simm16, op.ra); } void PPUDisAsm::STFS(ppu_opcode_t op) { DisAsm_F1_IMM_R1("stfs", op.frs, op.simm16, op.ra); } void PPUDisAsm::STFSU(ppu_opcode_t op) { DisAsm_F1_IMM_R1("stfsu", op.frs, op.simm16, op.ra); } void PPUDisAsm::STFD(ppu_opcode_t op) { DisAsm_F1_IMM_R1("stfd", op.frs, op.simm16, op.ra); } void PPUDisAsm::STFDU(ppu_opcode_t op) { DisAsm_F1_IMM_R1("stfdu", op.frs, op.simm16, op.ra); } void PPUDisAsm::LD(ppu_opcode_t op) { DisAsm_R2_IMM("ld", op.rd, op.ra, op.ds * 4); } void PPUDisAsm::LDU(ppu_opcode_t op) { DisAsm_R2_IMM("ldu", op.rd, op.ra, op.ds * 4); } void PPUDisAsm::LWA(ppu_opcode_t op) { DisAsm_R2_IMM("lwa", op.rd, op.ra, op.ds * 4); } void PPUDisAsm::STD(ppu_opcode_t op) { DisAsm_R2_IMM("std", op.rs, op.ra, op.ds * 4); } void PPUDisAsm::STDU(ppu_opcode_t op) { DisAsm_R2_IMM("stdu", op.rs, op.ra, op.ds * 4); } void PPUDisAsm::FDIVS(ppu_opcode_t op) { DisAsm_F3_RC("fdivs", op.frd, op.fra, op.frb, op.rc); } void PPUDisAsm::FSUBS(ppu_opcode_t op) { DisAsm_F3_RC("fsubs", op.frd, op.fra, op.frb, op.rc); } void PPUDisAsm::FADDS(ppu_opcode_t op) { DisAsm_F3_RC("fadds", op.frd, op.fra, op.frb, op.rc); } void PPUDisAsm::FSQRTS(ppu_opcode_t op) { DisAsm_F2_RC("fsqrts", op.frd, op.frb, op.rc); } void PPUDisAsm::FRES(ppu_opcode_t op) { DisAsm_F2_RC("fres", op.frd, op.frb, op.rc); } void PPUDisAsm::FMULS(ppu_opcode_t op) { DisAsm_F3_RC("fmuls", op.frd, op.fra, op.frc, op.rc); } void PPUDisAsm::FMADDS(ppu_opcode_t op) { DisAsm_F4_RC("fmadds", op.frd, op.fra, op.frc, op.frb, op.rc); } void PPUDisAsm::FMSUBS(ppu_opcode_t op) { DisAsm_F4_RC("fmsubs", op.frd, op.fra, op.frc, op.frb, op.rc); } void PPUDisAsm::FNMSUBS(ppu_opcode_t op) { DisAsm_F4_RC("fnmsubs", op.frd, op.fra, op.frc, op.frb, op.rc); } void PPUDisAsm::FNMADDS(ppu_opcode_t op) { DisAsm_F4_RC("fnmadds", op.frd, op.fra, op.frc, op.frb, op.rc); } void PPUDisAsm::MTFSB1(ppu_opcode_t op) { fmt::append(last_opcode, "%-*s %d", PadOp(), op.rc ? "mtfsb1." : "mtfsb1", op.crbd); } void PPUDisAsm::MCRFS(ppu_opcode_t op) { DisAsm_CR2("mcrfs", op.crfd, op.crfs); } void PPUDisAsm::MTFSB0(ppu_opcode_t op) { fmt::append(last_opcode, "%-*s %d", PadOp(), op.rc ? "mtfsb0." : "mtfsb0", op.crbd); } void PPUDisAsm::MTFSFI(ppu_opcode_t op) { fmt::append(last_opcode, "%-*s cr%d,%d,%d", PadOp(), op.rc ? "mtfsfi." : "mtfsfi", op.crfd, op.i, op.l15); } void PPUDisAsm::MFFS(ppu_opcode_t op) { DisAsm_F1_RC("mffs", op.frd, op.rc); } void PPUDisAsm::MTFSF(ppu_opcode_t op) { fmt::append(last_opcode, "%-*s %d,f%d,%d,%d", PadOp(), op.rc ? "mtfsf." : "mtfsf", op.rc, op.flm, op.frb, op.l6, op.l15); } void PPUDisAsm::FCMPU(ppu_opcode_t op) { DisAsm_CR1_F2("fcmpu", op.crfd, op.fra, op.frb); } void PPUDisAsm::FRSP(ppu_opcode_t op) { DisAsm_F2_RC("frsp", op.frd, op.frb, op.rc); } void PPUDisAsm::FCTIW(ppu_opcode_t op) { DisAsm_F2_RC("fctiw", op.frd, op.frb, op.rc); } void PPUDisAsm::FCTIWZ(ppu_opcode_t op) { DisAsm_F2_RC("fctiwz", op.frd, op.frb, op.rc); } void PPUDisAsm::FDIV(ppu_opcode_t op) { DisAsm_F3_RC("fdiv", op.frd, op.fra, op.frb, op.rc); } void PPUDisAsm::FSUB(ppu_opcode_t op) { DisAsm_F3_RC("fsub", op.frd, op.fra, op.frb, op.rc); } void PPUDisAsm::FADD(ppu_opcode_t op) { DisAsm_F3_RC("fadd", op.frd, op.fra, op.frb, op.rc); } void PPUDisAsm::FSQRT(ppu_opcode_t op) { DisAsm_F2_RC("fsqrt", op.frd, op.frb, op.rc); } void PPUDisAsm::FSEL(ppu_opcode_t op) { DisAsm_F4_RC("fsel", op.frd, op.fra, op.frc, op.frb, op.rc); } void PPUDisAsm::FMUL(ppu_opcode_t op) { DisAsm_F3_RC("fmul", op.frd, op.fra, op.frc, op.rc); } void PPUDisAsm::FRSQRTE(ppu_opcode_t op) { DisAsm_F2_RC("frsqrte", op.frd, op.frb, op.rc); } void PPUDisAsm::FMSUB(ppu_opcode_t op) { DisAsm_F4_RC("fmsub", op.frd, op.fra, op.frc, op.frb, op.rc); } void PPUDisAsm::FMADD(ppu_opcode_t op) { DisAsm_F4_RC("fmadd", op.frd, op.fra, op.frc, op.frb, op.rc); } void PPUDisAsm::FNMSUB(ppu_opcode_t op) { DisAsm_F4_RC("fnmsub", op.frd, op.fra, op.frc, op.frb, op.rc); } void PPUDisAsm::FNMADD(ppu_opcode_t op) { DisAsm_F4_RC("fnmadd", op.frd, op.fra, op.frc, op.frb, op.rc); } void PPUDisAsm::FCMPO(ppu_opcode_t op) { DisAsm_F3("fcmpo", op.crfd, op.fra, op.frb); } void PPUDisAsm::FNEG(ppu_opcode_t op) { DisAsm_F2_RC("fneg", op.frd, op.frb, op.rc); } void PPUDisAsm::FMR(ppu_opcode_t op) { DisAsm_F2_RC("fmr", op.frd, op.frb, op.rc); } void PPUDisAsm::FNABS(ppu_opcode_t op) { DisAsm_F2_RC("fnabs", op.frd, op.frb, op.rc); } void PPUDisAsm::FABS(ppu_opcode_t op) { DisAsm_F2_RC("fabs", op.frd, op.frb, op.rc); } void PPUDisAsm::FCTID(ppu_opcode_t op) { DisAsm_F2_RC("fctid", op.frd, op.frb, op.rc); } void PPUDisAsm::FCTIDZ(ppu_opcode_t op) { DisAsm_F2_RC("fctidz", op.frd, op.frb, op.rc); } void PPUDisAsm::FCFID(ppu_opcode_t op) { DisAsm_F2_RC("fcfid", op.frd, op.frb, op.rc); } extern std::vector<std::string> g_ppu_function_names; void PPUDisAsm::UNK(ppu_opcode_t) { if (u32 addr{}; g_fxo->is_init<ppu_function_manager>() && (addr = g_fxo->get<ppu_function_manager>().addr)) { // HLE function index const u32 index = (dump_pc - addr) / 8; if (dump_pc % 8 == 4 && index < ppu_function_manager::get().size()) { fmt::append(last_opcode, "Function : %s (index %u)", index < g_ppu_function_names.size() ? g_ppu_function_names[index].c_str() : "?", index); return; } } last_opcode += "?? ??"; }
52,822
C++
.cpp
2,304
20.950087
144
0.655248
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,190
cellCamera.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/cellCamera.cpp
#include "stdafx.h" #include "cellCamera.h" #include "Emu/System.h" #include "Emu/system_config.h" #include "Emu/Cell/PPUModule.h" #include "Emu/Cell/lv2/sys_event.h" #include "Emu/IdManager.h" #include <cmath> LOG_CHANNEL(cellCamera); template <> void fmt_class_string<CellCameraError>::format(std::string& out, u64 arg) { format_enum(out, arg, [](CellCameraError value) { switch (value) { STR_CASE(CELL_CAMERA_ERROR_ALREADY_INIT); STR_CASE(CELL_CAMERA_ERROR_NOT_INIT); STR_CASE(CELL_CAMERA_ERROR_PARAM); STR_CASE(CELL_CAMERA_ERROR_ALREADY_OPEN); STR_CASE(CELL_CAMERA_ERROR_NOT_OPEN); STR_CASE(CELL_CAMERA_ERROR_DEVICE_NOT_FOUND); STR_CASE(CELL_CAMERA_ERROR_DEVICE_DEACTIVATED); STR_CASE(CELL_CAMERA_ERROR_NOT_STARTED); STR_CASE(CELL_CAMERA_ERROR_FORMAT_UNKNOWN); STR_CASE(CELL_CAMERA_ERROR_RESOLUTION_UNKNOWN); STR_CASE(CELL_CAMERA_ERROR_BAD_FRAMERATE); STR_CASE(CELL_CAMERA_ERROR_TIMEOUT); STR_CASE(CELL_CAMERA_ERROR_BUSY); STR_CASE(CELL_CAMERA_ERROR_FATAL); STR_CASE(CELL_CAMERA_ERROR_MUTEX); } return unknown; }); } template <> void fmt_class_string<CellCameraFormat>::format(std::string& out, u64 arg) { format_enum(out, arg, [](CellCameraFormat value) { switch (value) { STR_CASE(CELL_CAMERA_FORMAT_UNKNOWN); STR_CASE(CELL_CAMERA_JPG); STR_CASE(CELL_CAMERA_RAW8); STR_CASE(CELL_CAMERA_YUV422); STR_CASE(CELL_CAMERA_RAW10); STR_CASE(CELL_CAMERA_RGBA); STR_CASE(CELL_CAMERA_YUV420); STR_CASE(CELL_CAMERA_V_Y1_U_Y0); } return unknown; }); } // Temporarily #ifndef _MSC_VER #pragma GCC diagnostic ignored "-Wunused-parameter" #endif // ************** // * Prototypes * // ************** error_code cellCameraSetAttribute(s32 dev_num, s32 attrib, u32 arg1, u32 arg2); error_code cellCameraReadEx(s32 dev_num, vm::ptr<CellCameraReadEx> read); // ************************ // * HLE helper functions * // ************************ static const char* get_camera_attr_name(s32 value) { switch (value) { case CELL_CAMERA_GAIN: return "GAIN"; case CELL_CAMERA_REDBLUEGAIN: return "REDBLUEGAIN"; case CELL_CAMERA_SATURATION: return "SATURATION"; case CELL_CAMERA_EXPOSURE: return "EXPOSURE"; case CELL_CAMERA_BRIGHTNESS: return "BRIGHTNESS"; case CELL_CAMERA_AEC: return "AEC"; case CELL_CAMERA_AGC: return "AGC"; case CELL_CAMERA_AWB: return "AWB"; case CELL_CAMERA_ABC: return "ABC"; case CELL_CAMERA_LED: return "LED"; case CELL_CAMERA_AUDIOGAIN: return "AUDIOGAIN"; case CELL_CAMERA_QS: return "QS"; case CELL_CAMERA_NONZEROCOEFFS: return "NONZEROCOEFFS"; case CELL_CAMERA_YUVFLAG: return "YUVFLAG"; case CELL_CAMERA_JPEGFLAG: return "JPEGFLAG"; case CELL_CAMERA_BACKLIGHTCOMP: return "BACKLIGHTCOMP"; case CELL_CAMERA_MIRRORFLAG: return "MIRRORFLAG"; case CELL_CAMERA_MEASUREDQS: return "MEASUREDQS"; case CELL_CAMERA_422FLAG: return "422FLAG"; case CELL_CAMERA_USBLOAD: return "USBLOAD"; case CELL_CAMERA_GAMMA: return "GAMMA"; case CELL_CAMERA_GREENGAIN: return "GREENGAIN"; case CELL_CAMERA_AGCLIMIT: return "AGCLIMIT"; case CELL_CAMERA_DENOISE: return "DENOISE"; case CELL_CAMERA_FRAMERATEADJUST: return "FRAMERATEADJUST"; case CELL_CAMERA_PIXELOUTLIERFILTER: return "PIXELOUTLIERFILTER"; case CELL_CAMERA_AGCLOW: return "AGCLOW"; case CELL_CAMERA_AGCHIGH: return "AGCHIGH"; case CELL_CAMERA_DEVICELOCATION: return "DEVICELOCATION"; case CELL_CAMERA_FORMATCAP: return "FORMATCAP"; case CELL_CAMERA_FORMATINDEX: return "FORMATINDEX"; case CELL_CAMERA_NUMFRAME: return "NUMFRAME"; case CELL_CAMERA_FRAMEINDEX: return "FRAMEINDEX"; case CELL_CAMERA_FRAMESIZE: return "FRAMESIZE"; case CELL_CAMERA_INTERVALTYPE: return "INTERVALTYPE"; case CELL_CAMERA_INTERVALINDEX: return "INTERVALINDEX"; case CELL_CAMERA_INTERVALVALUE: return "INTERVALVALUE"; case CELL_CAMERA_COLORMATCHING: return "COLORMATCHING"; case CELL_CAMERA_PLFREQ: return "PLFREQ"; case CELL_CAMERA_DEVICEID: return "DEVICEID"; case CELL_CAMERA_DEVICECAP: return "DEVICECAP"; case CELL_CAMERA_DEVICESPEED: return "DEVICESPEED"; case CELL_CAMERA_UVCREQCODE: return "UVCREQCODE"; case CELL_CAMERA_UVCREQDATA: return "UVCREQDATA"; case CELL_CAMERA_DEVICEID2: return "DEVICEID2"; case CELL_CAMERA_READMODE: return "READMODE"; case CELL_CAMERA_GAMEPID: return "GAMEPID"; case CELL_CAMERA_PBUFFER: return "PBUFFER"; case CELL_CAMERA_READFINISH: return "READFINISH"; } return nullptr; } camera_context::camera_context(utils::serial& ar) { save(ar); } void camera_context::save(utils::serial& ar) { ar(init); if (!init) { return; } GET_OR_USE_SERIALIZATION_VERSION(ar.is_writing(), cellCamera); ar(notify_data_map, start_timestamp_us, read_mode, is_streaming, is_attached, is_open, info, attr, frame_num); } static bool check_dev_num(s32 dev_num) { return dev_num == 0; } template <typename VariantOfCellCameraInfo> static error_code check_camera_info(const VariantOfCellCameraInfo& info) { // TODO: I managed to get 0x80990004 once. :thonkang: if (info.format == CELL_CAMERA_FORMAT_UNKNOWN) { return CELL_CAMERA_ERROR_FORMAT_UNKNOWN; } if (info.resolution == CELL_CAMERA_RESOLUTION_UNKNOWN) { return CELL_CAMERA_ERROR_RESOLUTION_UNKNOWN; } if (info.framerate <= 24) { return CELL_CAMERA_ERROR_BAD_FRAMERATE; } auto check_fps = [fps = info.framerate](std::initializer_list<s32> range) { return std::find(range.begin(), range.end(), fps) != range.end(); }; switch (g_cfg.io.camera_type) { case fake_camera_type::eyetoy: switch (info.format) { case CELL_CAMERA_JPG: switch (info.resolution) { case CELL_CAMERA_VGA: case CELL_CAMERA_WGA: if (!check_fps({ 25, 30 })) return CELL_CAMERA_ERROR_FORMAT_UNKNOWN; break; case CELL_CAMERA_QVGA: if (!check_fps({ 25, 30, 50, 60 })) return CELL_CAMERA_ERROR_FORMAT_UNKNOWN; break; case CELL_CAMERA_SPECIFIED_WIDTH_HEIGHT: default: // TODO return CELL_CAMERA_ERROR_FORMAT_UNKNOWN; } break; case CELL_CAMERA_RAW8: case CELL_CAMERA_YUV422: case CELL_CAMERA_RAW10: case CELL_CAMERA_RGBA: case CELL_CAMERA_YUV420: case CELL_CAMERA_V_Y1_U_Y0: default: // TODO (also check those other formats) return CELL_CAMERA_ERROR_FORMAT_UNKNOWN; } break; case fake_camera_type::eyetoy2: switch (info.format) { case CELL_CAMERA_JPG: case CELL_CAMERA_RAW10: return CELL_CAMERA_ERROR_FORMAT_UNKNOWN; case CELL_CAMERA_RAW8: switch (info.resolution) { case CELL_CAMERA_VGA: case CELL_CAMERA_WGA: if (!check_fps({ 25, 30, 50, 60 })) return CELL_CAMERA_ERROR_FORMAT_UNKNOWN; break; case CELL_CAMERA_QVGA: if (!check_fps({ 25, 30, 50, 60, 100, 120 })) return CELL_CAMERA_ERROR_FORMAT_UNKNOWN; break; case CELL_CAMERA_SPECIFIED_WIDTH_HEIGHT: default: // TODO return CELL_CAMERA_ERROR_FORMAT_UNKNOWN; } break; case CELL_CAMERA_YUV422: case CELL_CAMERA_RGBA: // < TODO check: they all seem to pass the same resolutions as in CELL_CAMERA_YUV422 case CELL_CAMERA_YUV420: // < case CELL_CAMERA_V_Y1_U_Y0: // < default: // < switch (info.resolution) { case CELL_CAMERA_VGA: case CELL_CAMERA_WGA: if (!check_fps({ 25, 30 })) return CELL_CAMERA_ERROR_FORMAT_UNKNOWN; break; case CELL_CAMERA_QVGA: if (!check_fps({ 25, 30, 50, 60 })) return CELL_CAMERA_ERROR_FORMAT_UNKNOWN; break; case CELL_CAMERA_SPECIFIED_WIDTH_HEIGHT: default: // TODO return CELL_CAMERA_ERROR_FORMAT_UNKNOWN; } break; } break; case fake_camera_type::uvc1_1: switch (info.format) { case CELL_CAMERA_JPG: case CELL_CAMERA_YUV422: break; case CELL_CAMERA_RAW8: case CELL_CAMERA_RAW10: case CELL_CAMERA_RGBA: case CELL_CAMERA_YUV420: case CELL_CAMERA_V_Y1_U_Y0: default: // TODO return CELL_CAMERA_ERROR_FORMAT_UNKNOWN; } break; case fake_camera_type::unknown: default: // TODO break; } return CELL_OK; } std::pair<u32, u32> get_video_resolution(const CellCameraInfoEx& info) { switch (info.resolution) { case CELL_CAMERA_VGA: return{ 640, 480 }; case CELL_CAMERA_QVGA: return { 320, 240 }; case CELL_CAMERA_WGA: return{ 640, 360 }; case CELL_CAMERA_SPECIFIED_WIDTH_HEIGHT: return{ info.width, info.height }; case CELL_CAMERA_RESOLUTION_UNKNOWN: default: return{ 0, 0 }; } } u32 get_buffer_size_by_format(s32 format, s32 width, s32 height) { double bytes_per_pixel = 0.0; switch (format) { case CELL_CAMERA_RAW8: bytes_per_pixel = 1.0; break; case CELL_CAMERA_YUV422: bytes_per_pixel = 2.0; break; case CELL_CAMERA_YUV420: case CELL_CAMERA_V_Y1_U_Y0: bytes_per_pixel = 1.5; break; case CELL_CAMERA_RAW10: bytes_per_pixel = 1.25; break; case CELL_CAMERA_JPG: case CELL_CAMERA_RGBA: case CELL_CAMERA_FORMAT_UNKNOWN: default: bytes_per_pixel = 4.0; break; } return ::narrow<u32>(static_cast<u64>(std::ceil(width * height * bytes_per_pixel))); } u32 get_video_buffer_size(const CellCameraInfoEx& info) { u32 width, height; std::tie(width, height) = get_video_resolution(info); return get_buffer_size_by_format(info.format, width, height); } // ************************ // * cellCamera functions * // ************************ // This represents 4 almost identical subfunctions used by the Start/Stop/Reset/Close functions error_code check_init_and_open(s32 dev_num) { if (!check_dev_num(dev_num)) { return CELL_CAMERA_ERROR_PARAM; } // TODO: Yet another CELL_CAMERA_ERROR_BUSY auto& g_camera = g_fxo->get<camera_thread>(); if (!g_camera.init) { return CELL_CAMERA_ERROR_NOT_INIT; } if (!g_camera.is_open) { return CELL_CAMERA_ERROR_NOT_OPEN; } return CELL_OK; } // This represents a recurring subfunction throughout libCamera error_code check_resolution(s32 dev_num) { // TODO: Some sort of connection check maybe? //if (error == CELL_CAMERA_ERROR_RESOLUTION_UNKNOWN) //{ // return CELL_CAMERA_ERROR_TIMEOUT; //} // TODO: Yet another CELL_CAMERA_ERROR_FATAL return CELL_OK; } // This represents a oftenly used sequence in libCamera (usually the beginning of a subfunction). // There also exist common sequences for mutex lock/unlock by the way. error_code check_resolution_ex(s32 dev_num) { // TODO: Yet another CELL_CAMERA_ERROR_BUSY if (!check_dev_num(dev_num)) { return CELL_CAMERA_ERROR_PARAM; } if (error_code error = check_resolution(dev_num)) { return error; } return CELL_OK; } error_code cellCameraInit() { cellCamera.todo("cellCameraInit()"); // Start camera thread auto& g_camera = g_fxo->get<camera_thread>(); std::lock_guard lock(g_camera.mutex); if (g_camera.init) { return CELL_CAMERA_ERROR_ALREADY_INIT; } if (g_cfg.io.camera == camera_handler::null) { g_camera.init = 1; return CELL_OK; } switch (g_cfg.io.camera_type) { case fake_camera_type::eyetoy: { g_camera.attr[CELL_CAMERA_SATURATION] = { 164 }; g_camera.attr[CELL_CAMERA_BRIGHTNESS] = { 96 }; g_camera.attr[CELL_CAMERA_AEC] = { 1 }; g_camera.attr[CELL_CAMERA_AGC] = { 1 }; g_camera.attr[CELL_CAMERA_AWB] = { 1 }; g_camera.attr[CELL_CAMERA_ABC] = { 0 }; g_camera.attr[CELL_CAMERA_LED] = { 1 }; g_camera.attr[CELL_CAMERA_QS] = { 0 }; g_camera.attr[CELL_CAMERA_NONZEROCOEFFS] = { 32, 32 }; g_camera.attr[CELL_CAMERA_YUVFLAG] = { 0 }; g_camera.attr[CELL_CAMERA_BACKLIGHTCOMP] = { 0 }; g_camera.attr[CELL_CAMERA_MIRRORFLAG] = { 1 }; g_camera.attr[CELL_CAMERA_422FLAG] = { 1 }; g_camera.attr[CELL_CAMERA_USBLOAD] = { 4 }; break; } case fake_camera_type::eyetoy2: { g_camera.attr[CELL_CAMERA_SATURATION] = { 64 }; g_camera.attr[CELL_CAMERA_BRIGHTNESS] = { 8 }; g_camera.attr[CELL_CAMERA_AEC] = { 1 }; g_camera.attr[CELL_CAMERA_AGC] = { 1 }; g_camera.attr[CELL_CAMERA_AWB] = { 1 }; g_camera.attr[CELL_CAMERA_LED] = { 1 }; g_camera.attr[CELL_CAMERA_BACKLIGHTCOMP] = { 0 }; g_camera.attr[CELL_CAMERA_MIRRORFLAG] = { 1 }; g_camera.attr[CELL_CAMERA_GAMMA] = { 1 }; g_camera.attr[CELL_CAMERA_AGCLIMIT] = { 4 }; g_camera.attr[CELL_CAMERA_DENOISE] = { 0 }; g_camera.attr[CELL_CAMERA_FRAMERATEADJUST] = { 0 }; g_camera.attr[CELL_CAMERA_PIXELOUTLIERFILTER] = { 1 }; g_camera.attr[CELL_CAMERA_AGCLOW] = { 48 }; g_camera.attr[CELL_CAMERA_AGCHIGH] = { 64 }; break; } case fake_camera_type::uvc1_1: { g_camera.attr[CELL_CAMERA_DEVICEID] = { 0x5ca, 0x18d0 }; // KBCR-S01MU g_camera.attr[CELL_CAMERA_FORMATCAP] = { CELL_CAMERA_JPG | CELL_CAMERA_YUV422 }; g_camera.attr[CELL_CAMERA_NUMFRAME] = { 1 }; // Amount of supported resolutions break; } default: cellCamera.todo("Trying to init cellCamera with un-researched camera type."); } // TODO: Some other default attributes? Need to check the actual behaviour on a real PS3. g_camera.is_attached = true; g_camera.init = 1; return CELL_OK; } error_code cellCameraEnd() { cellCamera.todo("cellCameraEnd()"); auto& g_camera = g_fxo->get<camera_thread>(); std::lock_guard lock(g_camera.mutex); if (!g_camera.init) { return CELL_CAMERA_ERROR_NOT_INIT; } // TODO: call cellCameraClose(0), ignore errors // TODO g_camera.init = 0; g_camera.reset_state(); return CELL_OK; } error_code cellCameraOpen(s32 dev_num, vm::ptr<CellCameraInfo> info) { cellCamera.notice("cellCameraOpen(dev_num=%d, info=*0x%x)", dev_num, info); if (!info) { return CELL_CAMERA_ERROR_PARAM; } auto& g_camera = g_fxo->get<camera_thread>(); if (!g_camera.init) { return CELL_CAMERA_ERROR_NOT_INIT; } if (!check_dev_num(dev_num)) { return CELL_CAMERA_ERROR_PARAM; } if (g_camera.is_open) { return CELL_CAMERA_ERROR_ALREADY_OPEN; } if (auto res = check_camera_info(*info)) { return res; } if (!g_camera.is_attached) { return CELL_CAMERA_ERROR_DEVICE_NOT_FOUND; } return CELL_OK; } error_code cellCameraOpenAsync() { UNIMPLEMENTED_FUNC(cellCamera); return CELL_OK; } error_code cellCameraOpenEx(s32 dev_num, vm::ptr<CellCameraInfoEx> info) { cellCamera.todo("cellCameraOpenEx(dev_num=%d, info=*0x%x)", dev_num, info); // This function has a very weird order of checking for errors if (!info) { return CELL_CAMERA_ERROR_PARAM; } if (g_cfg.io.camera == camera_handler::null) { return not_an_error(CELL_CAMERA_ERROR_DEVICE_NOT_FOUND); } if (auto res = cellCameraSetAttribute(dev_num, CELL_CAMERA_READMODE, info->read_mode, 0)) { return res; } if (info->read_mode == CELL_CAMERA_READ_DIRECT) { // Note: arg1 is the return value of previous SetAttribute if (auto res = cellCameraSetAttribute(dev_num, CELL_CAMERA_GAMEPID, 0, 0)) { return res; } } if (!check_dev_num(dev_num)) { return CELL_CAMERA_ERROR_PARAM; } auto& g_camera = g_fxo->get<camera_thread>(); if (g_camera.is_open) { return CELL_CAMERA_ERROR_ALREADY_OPEN; } if (auto res = check_camera_info(*info)) { return res; } // calls cellCameraGetAttribute(dev_num, CELL_CAMERA_PBUFFER) at some point const auto vbuf_size = get_video_buffer_size(*info); std::lock_guard lock(g_camera.mutex); // TODO: find out if the buffers are also checked for nullptr if (info->read_mode == CELL_CAMERA_READ_DIRECT) { info->pbuf[0] = vm::cast(vm::alloc(vbuf_size, vm::main)); info->pbuf[1] = vm::cast(vm::alloc(vbuf_size, vm::main)); // TODO: verify info->bytesize = vbuf_size; } else { info->buffer = vm::cast(vm::alloc(vbuf_size, vm::main)); info->bytesize = vbuf_size; } std::tie(info->width, info->height) = get_video_resolution(*info); if (!g_camera.open_camera()) { return CELL_CAMERA_ERROR_DEVICE_NOT_FOUND; } g_camera.is_open = true; g_camera.info = *info; cellCamera.notice("cellCameraOpen info: format=%s, resolution=%d, framerate=%d, bytesize=%d, width=%d, height=%d, dev_num=%d, guid=%d", info->format, info->resolution, info->framerate, info->bytesize, info->width, info->height, info->dev_num, info->guid); auto& shared_data = g_fxo->get<gem_camera_shared>(); shared_data.width = info->width > 0 ? +info->width : 640; shared_data.height = info->height > 0 ? +info->height : 480; shared_data.size = vbuf_size; shared_data.format = info->format; return CELL_OK; } error_code cellCameraOpenPost() { UNIMPLEMENTED_FUNC(cellCamera); return CELL_OK; } error_code cellCameraClose(s32 dev_num) { cellCamera.notice("cellCameraClose(dev_num=%d)", dev_num); if (error_code error = check_init_and_open(dev_num)) { return error; } // TODO: Yet another CELL_CAMERA_ERROR_BUSY if (dev_num != 0) { return CELL_CAMERA_ERROR_PARAM; } if (error_code error = check_resolution(dev_num)) { return error; } auto& g_camera = g_fxo->get<camera_thread>(); std::lock_guard lock(g_camera.mutex); g_camera.is_streaming = false; if (g_camera.info.buffer) { vm::dealloc(g_camera.info.buffer.addr(), vm::main); } if (g_camera.info.pbuf[0]) { vm::dealloc(g_camera.info.pbuf[0].addr(), vm::main); } if (g_camera.info.pbuf[1]) { vm::dealloc(g_camera.info.pbuf[1].addr(), vm::main); } g_camera.close_camera(); g_camera.is_open = false; return CELL_OK; } error_code cellCameraCloseAsync() { UNIMPLEMENTED_FUNC(cellCamera); return CELL_OK; } error_code cellCameraClosePost() { UNIMPLEMENTED_FUNC(cellCamera); return CELL_OK; } error_code cellCameraGetDeviceGUID(s32 dev_num, vm::ptr<u32> guid) { cellCamera.notice("cellCameraGetDeviceGUID(dev_num=%d, guid=*0x%x)", dev_num, guid); auto& g_camera = g_fxo->get<camera_thread>(); if (!g_camera.init) { return CELL_CAMERA_ERROR_NOT_INIT; } if (guid) { *guid = 0; // apparently always 0 } return CELL_OK; } error_code cellCameraGetType(s32 dev_num, vm::ptr<s32> type) { cellCamera.trace("cellCameraGetType(dev_num=%d, type=*0x%x)", dev_num, type); auto& g_camera = g_fxo->get<camera_thread>(); if (!g_camera.init) { return CELL_CAMERA_ERROR_NOT_INIT; } if (g_cfg.io.camera == camera_handler::null) { return not_an_error(CELL_CAMERA_ERROR_DEVICE_NOT_FOUND); } if (!check_dev_num(dev_num) || !type) { return CELL_CAMERA_ERROR_PARAM; } if (error_code error = check_resolution(dev_num)) { return error; } if (!g_camera.is_attached) { return CELL_CAMERA_ERROR_DEVICE_NOT_FOUND; } switch (g_cfg.io.camera_type.get()) { case fake_camera_type::unknown: *type = CELL_CAMERA_TYPE_UNKNOWN; break; case fake_camera_type::eyetoy: *type = CELL_CAMERA_EYETOY; break; case fake_camera_type::eyetoy2: *type = CELL_CAMERA_EYETOY2; break; case fake_camera_type::uvc1_1: *type = CELL_CAMERA_USBVIDEOCLASS; break; } return CELL_OK; } s32 cellCameraIsAvailable(s32 dev_num) { cellCamera.trace("cellCameraIsAvailable(dev_num=%d)", dev_num); if (g_cfg.io.camera == camera_handler::null) { return false; } vm::var<s32> type; if (cellCameraGetType(dev_num, type) != CELL_OK || *type == CELL_CAMERA_TYPE_UNKNOWN) { return false; } if (*type > CELL_CAMERA_TYPE_UNKNOWN || *type <= CELL_CAMERA_USBVIDEOCLASS) { // TODO: checks CELL_CAMERA_DEVICESPEED attribute } return true; } s32 cellCameraIsAttached(s32 dev_num) { cellCamera.trace("cellCameraIsAttached(dev_num=%d)", dev_num); if (g_cfg.io.camera == camera_handler::null) { return false; } auto& g_camera = g_fxo->get<camera_thread>(); if (!g_camera.init) { return false; } if (!check_dev_num(dev_num)) { return false; } vm::var<s32> type; if (cellCameraGetType(dev_num, type) != CELL_OK) { return false; } std::lock_guard lock(g_camera.mutex); bool is_attached = g_camera.is_attached; if (g_cfg.io.camera == camera_handler::fake) { // "attach" camera here // normally should be attached immediately after event queue is registered, but just to be sure if (!is_attached) { g_camera.send_attach_state(true); is_attached = g_camera.is_attached; } } return is_attached; } s32 cellCameraIsOpen(s32 dev_num) { cellCamera.notice("cellCameraIsOpen(dev_num=%d)", dev_num); if (g_cfg.io.camera == camera_handler::null) { return false; } auto& g_camera = g_fxo->get<camera_thread>(); if (!g_camera.init) { return false; } if (!check_dev_num(dev_num)) { return false; } std::lock_guard lock(g_camera.mutex); return g_camera.is_open.load(); } s32 cellCameraIsStarted(s32 dev_num) { cellCamera.notice("cellCameraIsStarted(dev_num=%d)", dev_num); if (g_cfg.io.camera == camera_handler::null) { return false; } auto& g_camera = g_fxo->get<camera_thread>(); if (!g_camera.init) { return false; } if (!check_dev_num(dev_num)) { return false; } std::lock_guard lock(g_camera.mutex); return g_camera.is_streaming.load(); } error_code cellCameraGetAttribute(s32 dev_num, s32 attrib, vm::ptr<u32> arg1, vm::ptr<u32> arg2) { const auto attr_name = get_camera_attr_name(attrib); cellCamera.notice("cellCameraGetAttribute(dev_num=%d, attrib=%d=%s, arg1=*0x%x, arg2=*0x%x)", dev_num, attrib, attr_name, arg1, arg2); auto& g_camera = g_fxo->get<camera_thread>(); if (!g_camera.init) { return CELL_CAMERA_ERROR_NOT_INIT; } if (!check_dev_num(dev_num)) { return CELL_CAMERA_ERROR_PARAM; } if (g_cfg.io.camera == camera_handler::null) { return not_an_error(CELL_CAMERA_ERROR_NOT_OPEN); } // actually compares <= 0x63 which is equivalent if (attrib < CELL_CAMERA_FORMATCAP && !g_camera.is_open) { return CELL_CAMERA_ERROR_NOT_OPEN; } if (!arg1) { return CELL_CAMERA_ERROR_PARAM; } if (error_code error = check_resolution(dev_num)) { return error; } std::lock_guard lock(g_camera.mutex); if (!g_camera.is_attached) { return CELL_CAMERA_ERROR_DEVICE_NOT_FOUND; } if (!attr_name) // invalid attributes don't have a name { return CELL_CAMERA_ERROR_PARAM; } if (arg1) { *arg1 = g_camera.attr[attrib].v1; } if (arg2) { *arg2 = g_camera.attr[attrib].v2; } cellCamera.todo("cellCameraGetAttribute(attr_name=%s, v1=%d, v2=%d)", attr_name, g_camera.attr[attrib].v1, g_camera.attr[attrib].v2); return CELL_OK; } error_code cellCameraSetAttribute(s32 dev_num, s32 attrib, u32 arg1, u32 arg2) { const auto attr_name = get_camera_attr_name(attrib); (attrib == CELL_CAMERA_LED ? cellCamera.trace : cellCamera.todo)("cellCameraSetAttribute(dev_num=%d, attrib=%d=%s, arg1=%d, arg2=%d)", dev_num, attrib, attr_name, arg1, arg2); auto& g_camera = g_fxo->get<camera_thread>(); if (!g_camera.init) { return CELL_CAMERA_ERROR_NOT_INIT; } if (!check_dev_num(dev_num)) { return CELL_CAMERA_ERROR_PARAM; } if (g_cfg.io.camera == camera_handler::null) { return not_an_error(CELL_CAMERA_ERROR_NOT_OPEN); } // actually compares <= 0x63 which is equivalent if (attrib < CELL_CAMERA_FORMATCAP && !g_camera.is_open) { return CELL_CAMERA_ERROR_NOT_OPEN; } if (error_code error = check_resolution(dev_num)) { return error; } if (!attr_name) // invalid attributes don't have a name { return CELL_CAMERA_ERROR_PARAM; } g_camera.set_attr(attrib, arg1, arg2); return CELL_OK; } error_code cellCameraResetAttribute() { UNIMPLEMENTED_FUNC(cellCamera); return CELL_OK; } error_code cellCameraGetBufferSize(s32 dev_num, vm::ptr<CellCameraInfoEx> info) { cellCamera.notice("cellCameraGetBufferSize(dev_num=%d, info=*0x%x)", dev_num, info); auto& g_camera = g_fxo->get<camera_thread>(); if (!g_camera.init) { return CELL_CAMERA_ERROR_NOT_INIT; } if (g_cfg.io.camera == camera_handler::null) { return not_an_error(CELL_CAMERA_ERROR_DEVICE_NOT_FOUND); } if (!check_dev_num(dev_num)) { return CELL_CAMERA_ERROR_PARAM; } if (g_camera.is_open) { return CELL_CAMERA_ERROR_ALREADY_OPEN; } if (!info) { return CELL_CAMERA_ERROR_PARAM; } if (auto res = check_camera_info(*info)) { return res; } if (!cellCameraIsAttached(dev_num)) { return CELL_CAMERA_ERROR_DEVICE_NOT_FOUND; } if (auto status = cellCameraSetAttribute(dev_num, CELL_CAMERA_READMODE, info->read_mode, 0)) { return status; } if (error_code error = check_resolution(dev_num)) { return error; } std::lock_guard lock(g_camera.mutex); g_camera.info = *info; info->bytesize = get_video_buffer_size(g_camera.info); cellCamera.notice("cellCameraGetBufferSize info: format=%d, resolution=%d, framerate=%d, bytesize=%d, width=%d, height=%d, dev_num=%d, guid=%d", info->format, info->resolution, info->framerate, info->bytesize, info->width, info->height, info->dev_num, info->guid); return not_an_error(info->bytesize); } error_code check_get_camera_info(s32 dev_num, bool is_valid_info_struct) { auto& g_camera = g_fxo->get<camera_thread>(); if (!g_camera.init) { return CELL_CAMERA_ERROR_NOT_INIT; } if (!check_dev_num(dev_num)) { return CELL_CAMERA_ERROR_PARAM; } if (g_cfg.io.camera == camera_handler::null) { return not_an_error(CELL_CAMERA_ERROR_NOT_OPEN); } if (!g_camera.is_open) { return CELL_CAMERA_ERROR_NOT_OPEN; } if (!is_valid_info_struct) { return CELL_CAMERA_ERROR_PARAM; } return CELL_OK; } error_code cellCameraGetBufferInfo(s32 dev_num, vm::ptr<CellCameraInfo> info) { cellCamera.notice("cellCameraGetBufferInfo(dev_num=%d, info=0x%x)", dev_num, info); // called by cellCameraGetBufferInfoEx if (error_code error = check_get_camera_info(dev_num, !!info)) { return error; } auto& g_camera = g_fxo->get<camera_thread>(); std::lock_guard lock(g_camera.mutex); info->format = g_camera.info.format; info->resolution = g_camera.info.resolution; info->framerate = g_camera.info.framerate; info->buffer = g_camera.info.buffer; info->bytesize = g_camera.info.bytesize; info->width = g_camera.info.width; info->height = g_camera.info.height; info->dev_num = g_camera.info.dev_num; info->guid = g_camera.info.guid; return CELL_OK; } error_code cellCameraGetBufferInfoEx(s32 dev_num, vm::ptr<CellCameraInfoEx> info) { cellCamera.notice("cellCameraGetBufferInfoEx(dev_num=%d, info=0x%x)", dev_num, info); // calls cellCameraGetBufferInfo if (error_code error = check_get_camera_info(dev_num, !!info)) { return error; } auto& g_camera = g_fxo->get<camera_thread>(); std::lock_guard lock(g_camera.mutex); *info = g_camera.info; return CELL_OK; } error_code cellCameraPrepExtensionUnit(s32 dev_num, vm::ptr<u8> guidExtensionCode) { cellCamera.todo("cellCameraPrepExtensionUnit(dev_num=%d, guidExtensionCode=0x%x)", dev_num, guidExtensionCode); if (!check_dev_num(dev_num) || !guidExtensionCode) { return CELL_CAMERA_ERROR_PARAM; } if (error_code error = check_resolution(dev_num)) { return error; } auto& g_camera = g_fxo->get<camera_thread>(); if (!g_camera.is_attached) { return CELL_CAMERA_ERROR_DEVICE_NOT_FOUND; } return CELL_OK; } error_code cellCameraCtrlExtensionUnit(s32 dev_num, u8 request, u16 value, u16 length, vm::ptr<u8> data) { cellCamera.todo("cellCameraCtrlExtensionUnit(dev_num=%d, request=%d, value=%d, length=%d, data=*0x%x)", dev_num, request, value, length, data); if (!check_dev_num(dev_num)) { return CELL_CAMERA_ERROR_PARAM; } auto& g_camera = g_fxo->get<camera_thread>(); if (!g_camera.is_open) { return CELL_CAMERA_ERROR_NOT_OPEN; } if (!data) { return CELL_CAMERA_ERROR_PARAM; } if (!g_camera.is_attached) { return CELL_CAMERA_ERROR_DEVICE_NOT_FOUND; } // TODO: Yet another CELL_CAMERA_ERROR_PARAM return CELL_OK; } error_code cellCameraGetExtensionUnit(s32 dev_num, u16 value, u16 length, vm::ptr<u8> data) { cellCamera.todo("cellCameraGetExtensionUnit(dev_num=%d, value=%d, length=%d, data=*0x%x)", dev_num, value, length, data); return cellCameraCtrlExtensionUnit(dev_num, GET_CUR, value, length, data); } error_code cellCameraSetExtensionUnit(s32 dev_num, u16 value, u16 length, vm::ptr<u8> data) { cellCamera.todo("cellCameraSetExtensionUnit(dev_num=%d, value=%d, length=%d, data=*0x%x)", dev_num, value, length, data); return cellCameraCtrlExtensionUnit(dev_num, SET_CUR, value, length, data); } error_code cellCameraSetContainer() { UNIMPLEMENTED_FUNC(cellCamera); return CELL_OK; } error_code cellCameraReset(s32 dev_num) { cellCamera.todo("cellCameraReset(dev_num=%d)", dev_num); if (error_code error = check_init_and_open(dev_num)) { return error; } if (error_code error = check_resolution_ex(dev_num)) { return error; } auto& g_camera = g_fxo->get<camera_thread>(); if (!g_camera.is_attached) { return CELL_CAMERA_ERROR_DEVICE_NOT_FOUND; } // TODO reset camera return CELL_OK; } error_code cellCameraResetAsync() { UNIMPLEMENTED_FUNC(cellCamera); return CELL_OK; } error_code cellCameraResetPost() { UNIMPLEMENTED_FUNC(cellCamera); return CELL_OK; } error_code cellCameraStart(s32 dev_num) { cellCamera.notice("cellCameraStart(dev_num=%d)", dev_num); if (error_code error = check_init_and_open(dev_num)) { return error; } if (error_code error = check_resolution_ex(dev_num)) { return error; } auto& g_camera = g_fxo->get<camera_thread>(); std::lock_guard lock(g_camera.mutex); if (!g_camera.is_attached) { return CELL_CAMERA_ERROR_DEVICE_NOT_FOUND; } if (!g_camera.start_camera()) { return CELL_CAMERA_ERROR_DEVICE_NOT_FOUND; } // TODO: Yet another CELL_CAMERA_ERROR_TIMEOUT g_camera.start_timestamp_us = get_guest_system_time(); g_camera.is_streaming = true; return CELL_OK; } error_code cellCameraStartAsync() { UNIMPLEMENTED_FUNC(cellCamera); return CELL_OK; } error_code cellCameraStartPost() { UNIMPLEMENTED_FUNC(cellCamera); return CELL_OK; } error_code cellCameraRead(s32 dev_num, vm::ptr<u32> frame_num, vm::ptr<u32> bytes_read) { cellCamera.trace("cellCameraRead(dev_num=%d, frame_num=*0x%x, bytes_read=*0x%x)", dev_num, frame_num, bytes_read); vm::ptr<CellCameraReadEx> read_ex = vm::make_var<CellCameraReadEx>({}); if (auto res = cellCameraReadEx(dev_num, read_ex)) { return res; } if (frame_num) { *frame_num = read_ex->frame; } if (bytes_read) { *bytes_read = read_ex->bytesread; } return CELL_OK; } error_code cellCameraRead2() { UNIMPLEMENTED_FUNC(cellCamera); return CELL_OK; } error_code cellCameraReadEx(s32 dev_num, vm::ptr<CellCameraReadEx> read) { cellCamera.trace("cellCameraReadEx(dev_num=%d, read=0x%x)", dev_num, read); auto& g_camera = g_fxo->get<camera_thread>(); if (!g_camera.init) { return CELL_CAMERA_ERROR_NOT_INIT; } if (g_cfg.io.camera == camera_handler::null) { return not_an_error(CELL_CAMERA_ERROR_NOT_OPEN); } if (!check_dev_num(dev_num)) { return CELL_CAMERA_ERROR_PARAM; } std::lock_guard lock(g_camera.mutex); if (!g_camera.is_open) { return CELL_CAMERA_ERROR_NOT_OPEN; } if (!g_camera.is_attached) { return CELL_CAMERA_ERROR_DEVICE_NOT_FOUND; } if (!g_camera.is_streaming) { return CELL_CAMERA_ERROR_NOT_STARTED; } // can call cellCameraReset() and cellCameraStop() in some cases const bool has_new_frame = g_camera.has_new_frame.exchange(false); if (g_camera.handler) { if (has_new_frame) { u32 width{}; u32 height{}; u64 frame_number{}; u64 bytes_read{}; if (!g_camera.get_camera_frame(g_camera.info.buffer.get_ptr(), width, height, frame_number, bytes_read)) { return CELL_CAMERA_ERROR_DEVICE_NOT_FOUND; } g_camera.bytes_read = ::narrow<u32>(bytes_read); cellCamera.trace("cellCameraRead: frame_number=%d, width=%d, height=%d. bytes_read=%d (passed to game: frame=%d, bytesread=%d)", frame_number, width, height, bytes_read, read ? read->frame.get() : 0, read ? read->bytesread.get() : 0); } } else { g_camera.bytes_read = g_camera.is_streaming ? get_video_buffer_size(g_camera.info) : 0; } if (has_new_frame) { g_camera.frame_timestamp_us = get_guest_system_time() - g_camera.start_timestamp_us; } if (read) // NULL returns CELL_OK { read->timestamp = g_camera.frame_timestamp_us; read->frame = g_camera.frame_num; read->bytesread = g_camera.bytes_read; auto& shared_data = g_fxo->get<gem_camera_shared>(); shared_data.frame_timestamp_us.store(read->timestamp); } return CELL_OK; } error_code cellCameraReadComplete(s32 dev_num, u32 bufnum, u32 arg2) { cellCamera.todo("cellCameraReadComplete(dev_num=%d, bufnum=%d, arg2=%d)", dev_num, bufnum, arg2); if (bufnum < 2) { auto& g_camera = g_fxo->get<camera_thread>(); std::lock_guard lock(g_camera.mutex); g_camera.pbuf_locked[bufnum] = false; } return cellCameraSetAttribute(dev_num, CELL_CAMERA_READFINISH, bufnum, arg2); } error_code cellCameraStop(s32 dev_num) { cellCamera.notice("cellCameraStop(dev_num=%d)", dev_num); if (error_code error = check_init_and_open(dev_num)) { return error; } if (error_code error = check_resolution_ex(dev_num)) { return error; } auto& g_camera = g_fxo->get<camera_thread>(); std::lock_guard lock(g_camera.mutex); if (!g_camera.is_attached) { return CELL_CAMERA_ERROR_DEVICE_NOT_FOUND; } if (!g_camera.is_streaming) { return CELL_CAMERA_ERROR_NOT_STARTED; } g_camera.stop_camera(); g_camera.is_streaming = false; return CELL_OK; } error_code cellCameraStopAsync() { UNIMPLEMENTED_FUNC(cellCamera); return CELL_OK; } error_code cellCameraStopPost() { UNIMPLEMENTED_FUNC(cellCamera); return CELL_OK; } error_code cellCameraSetNotifyEventQueue(u64 key) { cellCamera.notice("cellCameraSetNotifyEventQueue(key=0x%x)", key); auto& g_camera = g_fxo->get<camera_thread>(); if (!g_camera.init) { return CELL_CAMERA_ERROR_NOT_INIT; } if (g_cfg.io.camera == camera_handler::null) { return CELL_OK; } if (error_code error = check_resolution(0)) { return error; } g_camera.add_queue(key, 0, 0); return CELL_OK; } error_code cellCameraRemoveNotifyEventQueue(u64 key) { cellCamera.notice("cellCameraRemoveNotifyEventQueue(key=0x%x)", key); auto& g_camera = g_fxo->get<camera_thread>(); if (!g_camera.init) { return CELL_CAMERA_ERROR_NOT_INIT; } if (g_cfg.io.camera == camera_handler::null) { return CELL_OK; } if (error_code error = check_resolution(0)) { return error; } g_camera.remove_queue(key); return CELL_OK; } error_code cellCameraSetNotifyEventQueue2(u64 key, u64 source, u64 flag) { cellCamera.notice("cellCameraSetNotifyEventQueue2(key=0x%x, source=%d, flag=%d)", key, source, flag); auto& g_camera = g_fxo->get<camera_thread>(); if (!g_camera.init) { return CELL_CAMERA_ERROR_NOT_INIT; } if (g_cfg.io.camera == camera_handler::null) { return CELL_OK; } if (error_code error = check_resolution(0)) { return error; } g_camera.add_queue(key, source, flag); return CELL_OK; } error_code cellCameraRemoveNotifyEventQueue2(u64 key) { cellCamera.notice("cellCameraRemoveNotifyEventQueue2(key=0x%x)", key); return cellCameraRemoveNotifyEventQueue(key); } DECLARE(ppu_module_manager::cellCamera)("cellCamera", []() { REG_FUNC(cellCamera, cellCameraInit); REG_FUNC(cellCamera, cellCameraEnd); REG_FUNC(cellCamera, cellCameraOpen); REG_FUNC(cellCamera, cellCameraOpenAsync); REG_FUNC(cellCamera, cellCameraOpenEx); REG_FUNC(cellCamera, cellCameraOpenPost); REG_FUNC(cellCamera, cellCameraClose); REG_FUNC(cellCamera, cellCameraCloseAsync); REG_FUNC(cellCamera, cellCameraClosePost); REG_FUNC(cellCamera, cellCameraGetDeviceGUID); REG_FUNC(cellCamera, cellCameraGetType); REG_FUNC(cellCamera, cellCameraIsAvailable); REG_FUNC(cellCamera, cellCameraIsAttached); REG_FUNC(cellCamera, cellCameraIsOpen); REG_FUNC(cellCamera, cellCameraIsStarted); REG_FUNC(cellCamera, cellCameraGetAttribute); REG_FUNC(cellCamera, cellCameraSetAttribute); REG_FUNC(cellCamera, cellCameraResetAttribute); REG_FUNC(cellCamera, cellCameraGetBufferSize); REG_FUNC(cellCamera, cellCameraGetBufferInfo); REG_FUNC(cellCamera, cellCameraGetBufferInfoEx); REG_FUNC(cellCamera, cellCameraPrepExtensionUnit); REG_FUNC(cellCamera, cellCameraCtrlExtensionUnit); REG_FUNC(cellCamera, cellCameraGetExtensionUnit); REG_FUNC(cellCamera, cellCameraSetExtensionUnit); REG_FUNC(cellCamera, cellCameraSetContainer); REG_FUNC(cellCamera, cellCameraReset); REG_FUNC(cellCamera, cellCameraResetAsync); REG_FUNC(cellCamera, cellCameraResetPost); REG_FUNC(cellCamera, cellCameraStart); REG_FUNC(cellCamera, cellCameraStartAsync); REG_FUNC(cellCamera, cellCameraStartPost); REG_FUNC(cellCamera, cellCameraRead); REG_FUNC(cellCamera, cellCameraRead2); REG_FUNC(cellCamera, cellCameraReadEx); REG_FUNC(cellCamera, cellCameraReadComplete); REG_FUNC(cellCamera, cellCameraStop); REG_FUNC(cellCamera, cellCameraStopAsync); REG_FUNC(cellCamera, cellCameraStopPost); REG_FUNC(cellCamera, cellCameraSetNotifyEventQueue); REG_FUNC(cellCamera, cellCameraRemoveNotifyEventQueue); REG_FUNC(cellCamera, cellCameraSetNotifyEventQueue2); REG_FUNC(cellCamera, cellCameraRemoveNotifyEventQueue2); }); // camera_thread members void camera_context::operator()() { while (thread_ctrl::state() != thread_state::aborting && !Emu.IsStopped()) { const s32 fps = info.framerate; if (!fps || Emu.IsPaused() || g_cfg.io.camera == camera_handler::null) { thread_ctrl::wait_for(1000); // hack continue; } const u64 frame_start = get_guest_system_time(); // Get latest frame with CELL_CAMERA_READ_DIRECT. // With CELL_CAMERA_READ_FUNCCALL the game fetches the buffer in cellCameraRead. const u64 buffer_number = pbuf_write_index; bool send_frame_update_event = false; bool frame_update_event_sent = false; if (is_streaming) { if (read_mode.load() == CELL_CAMERA_READ_DIRECT) { std::lock_guard lock(mutex); { send_frame_update_event = info.pbuf[pbuf_write_index] && !pbuf_locked[pbuf_write_index]; } if (handler && send_frame_update_event) { u32 width{}; u32 height{}; u64 frame_number{}; u64 bytes_read{}; send_frame_update_event = get_camera_frame(info.pbuf[pbuf_write_index].get_ptr(), width, height, frame_number, bytes_read); if (send_frame_update_event) { pbuf_write_index = pbuf_next_index(); } } } else { std::lock_guard lock(mutex); send_frame_update_event = !handler || on_handler_state(handler->get_state()); } } std::unique_lock lock(mutex_notify_data_map); for (const auto& [key, evt_data] : notify_data_map) { // handle FRAME_UPDATE if (send_frame_update_event && evt_data.flag & CELL_CAMERA_EFLAG_FRAME_UPDATE) { if (auto queue = lv2_event_queue::find(key)) { u64 data2 = 0; u64 data3 = 0; if (read_mode.load() == CELL_CAMERA_READ_DIRECT) { const u64 image_data_size = static_cast<u64>(info.bytesize); const u64 camera_id = 0; data2 = image_data_size << 32 | buffer_number << 16 | camera_id; data3 = get_guest_system_time() - start_timestamp_us; // timestamp } else // CELL_CAMERA_READ_FUNCCALL, also default { data2 = 0; // device id (always 0) data3 = 0; // unused } if (CellError err = queue->send(evt_data.source, CELL_CAMERA_FRAME_UPDATE, data2, data3)) [[unlikely]] { cellCamera.warning("Failed to send frame update event (error=0x%x)", err); } frame_update_event_sent = true; } } } ++frame_num; has_new_frame = true; if (read_mode.load() == CELL_CAMERA_READ_DIRECT && frame_update_event_sent) { std::lock_guard lock(mutex); pbuf_locked[buffer_number] = true; } lock.unlock(); for (const u64 frame_target_time = 1000000u / fps; !Emu.IsStopped();) { const u64 time_passed = get_guest_system_time() - frame_start; if (time_passed >= frame_target_time) break; lv2_obj::wait_timeout(frame_target_time - time_passed); } } } bool camera_context::open_camera() { Emu.BlockingCallFromMainThread([this]() { handler.reset(); handler = Emu.GetCallbacks().get_camera_handler(); if (handler) { handler->open_camera(); } }); return !handler || on_handler_state(handler->get_state()); } bool camera_context::start_camera() { if (handler) { handler->set_mirrored(!!attr[CELL_CAMERA_MIRRORFLAG].v1); handler->set_frame_rate(info.framerate); handler->set_resolution(info.width, info.height); handler->set_format(info.format, info.bytesize); Emu.BlockingCallFromMainThread([this]() { handler->start_camera(); }); return on_handler_state(handler->get_state()); } return true; } bool camera_context::get_camera_frame(u8* dst, u32& width, u32& height, u64& frame_number, u64& bytes_read) { if (handler) { return on_handler_state(handler->get_image(dst, info.bytesize, width, height, frame_number, bytes_read)); } return true; } void camera_context::stop_camera() { if (handler) { Emu.BlockingCallFromMainThread([this]() { handler->stop_camera(); }); } } void camera_context::close_camera() { if (handler) { Emu.BlockingCallFromMainThread([this]() { handler->close_camera(); }); } } void camera_context::reset_state() { read_mode = CELL_CAMERA_READ_FUNCCALL; is_streaming = false; is_attached = false; is_open = false; info.framerate = 0; std::memset(&attr, 0, sizeof(attr)); handler.reset(); pbuf_write_index = 0; pbuf_locked[0] = false; pbuf_locked[1] = false; has_new_frame = false; frame_timestamp_us = 0; bytes_read = 0; if (info.buffer) { vm::dealloc(info.buffer.addr(), vm::main); } if (info.pbuf[0]) { vm::dealloc(info.pbuf[0].addr(), vm::main); } if (info.pbuf[1]) { vm::dealloc(info.pbuf[1].addr(), vm::main); } std::scoped_lock lock(mutex_notify_data_map); notify_data_map.clear(); } void camera_context::send_attach_state(bool attached) { std::lock_guard lock(mutex_notify_data_map); for (const auto& [key, evt_data] : notify_data_map) { if (auto queue = lv2_event_queue::find(key)) { if (CellError err = queue->send(evt_data.source, attached ? CELL_CAMERA_ATTACH : CELL_CAMERA_DETACH, 0, 0)) [[unlikely]] { cellCamera.warning("Failed to send attach event (attached=%d, error=0x%x)", attached, err); } } } // We're not expected to send any events for attaching/detaching is_attached = attached; } void camera_context::set_attr(s32 attrib, u32 arg1, u32 arg2) { switch (attrib) { case CELL_CAMERA_READMODE: { if (arg1 != CELL_CAMERA_READ_FUNCCALL && arg1 != CELL_CAMERA_READ_DIRECT) { cellCamera.warning("Unknown read mode set: %d", arg1); arg1 = CELL_CAMERA_READ_FUNCCALL; } read_mode.exchange(arg1); break; } case CELL_CAMERA_MIRRORFLAG: { if (handler) { handler->set_mirrored(!!arg1); } break; } default: break; } std::lock_guard lock(mutex); attr[attrib] = {arg1, arg2}; } void camera_context::add_queue(u64 key, u64 source, u64 flag) { std::lock_guard lock(mutex); { std::lock_guard lock_data_map(mutex_notify_data_map); notify_data_map[key] = { source, flag }; } // send ATTACH event - HACKY send_attach_state(is_attached); } void camera_context::remove_queue(u64 key) { std::lock_guard lock(mutex); { std::lock_guard lock_data_map(mutex_notify_data_map); notify_data_map.erase(key); } } u32 camera_context::pbuf_next_index() const { // The read buffer index cannot be the same as the write index return (pbuf_write_index + 1u) % 2; } bool camera_context::on_handler_state(camera_handler_base::camera_handler_state state) { switch (state) { case camera_handler_base::camera_handler_state::closed: { if (is_attached) { send_attach_state(false); } if (handler) { if (is_streaming) { cellCamera.warning("Camera closed or disconnected (state=%d). Trying to start camera...", static_cast<int>(state)); Emu.BlockingCallFromMainThread([&]() { handler->open_camera(); handler->start_camera(); }); } else if (is_open) { cellCamera.warning("Camera closed or disconnected (state=%d). Trying to open camera...", static_cast<int>(state)); Emu.BlockingCallFromMainThread([&]() { handler->open_camera(); }); } } return false; } case camera_handler_base::camera_handler_state::open: { if (handler && is_streaming) { cellCamera.warning("Camera handler not running (state=%d). Trying to start camera...", static_cast<int>(state)); Emu.BlockingCallFromMainThread([&]() { handler->start_camera(); }); } break; } case camera_handler_base::camera_handler_state::running: { if (!is_attached) { cellCamera.warning("Camera handler not attached. Sending attach event...", static_cast<int>(state)); send_attach_state(true); } break; } } return true; }
44,189
C++
.cpp
1,593
25.166353
176
0.71504
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,191
cellSpurs.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/cellSpurs.cpp
#include "stdafx.h" #include "Emu/System.h" #include "Emu/system_config.h" #include "Emu/IdManager.h" #include "Emu/Memory/vm_reservation.h" #include "Emu/Cell/PPUModule.h" #include "Emu/Cell/SPUThread.h" #include "Emu/Cell/lv2/sys_lwmutex.h" #include "Emu/Cell/lv2/sys_lwcond.h" #include "Emu/Cell/lv2/sys_spu.h" #include "Emu/Cell/lv2/sys_ppu_thread.h" #include "Emu/Cell/lv2/sys_memory.h" #include "Emu/Cell/lv2/sys_process.h" #include "Emu/Cell/lv2/sys_semaphore.h" #include "Emu/Cell/lv2/sys_event.h" #include "sysPrxForUser.h" #include "cellSpurs.h" #include "util/asm.hpp" #include "util/v128.hpp" #include "util/simd.hpp" LOG_CHANNEL(cellSpurs); template <> void fmt_class_string<CellSpursCoreError>::format(std::string& out, u64 arg) { format_enum(out, arg, [](auto error) { switch (error) { STR_CASE(CELL_SPURS_CORE_ERROR_AGAIN); STR_CASE(CELL_SPURS_CORE_ERROR_INVAL); STR_CASE(CELL_SPURS_CORE_ERROR_NOMEM); STR_CASE(CELL_SPURS_CORE_ERROR_SRCH); STR_CASE(CELL_SPURS_CORE_ERROR_PERM); STR_CASE(CELL_SPURS_CORE_ERROR_BUSY); STR_CASE(CELL_SPURS_CORE_ERROR_STAT); STR_CASE(CELL_SPURS_CORE_ERROR_ALIGN); STR_CASE(CELL_SPURS_CORE_ERROR_NULL_POINTER); } return unknown; }); } template <> void fmt_class_string<CellSpursPolicyModuleError>::format(std::string& out, u64 arg) { format_enum(out, arg, [](auto error) { switch (error) { STR_CASE(CELL_SPURS_POLICY_MODULE_ERROR_AGAIN); STR_CASE(CELL_SPURS_POLICY_MODULE_ERROR_INVAL); STR_CASE(CELL_SPURS_POLICY_MODULE_ERROR_NOSYS); STR_CASE(CELL_SPURS_POLICY_MODULE_ERROR_NOMEM); STR_CASE(CELL_SPURS_POLICY_MODULE_ERROR_SRCH); STR_CASE(CELL_SPURS_POLICY_MODULE_ERROR_NOENT); STR_CASE(CELL_SPURS_POLICY_MODULE_ERROR_NOEXEC); STR_CASE(CELL_SPURS_POLICY_MODULE_ERROR_DEADLK); STR_CASE(CELL_SPURS_POLICY_MODULE_ERROR_PERM); STR_CASE(CELL_SPURS_POLICY_MODULE_ERROR_BUSY); STR_CASE(CELL_SPURS_POLICY_MODULE_ERROR_ABORT); STR_CASE(CELL_SPURS_POLICY_MODULE_ERROR_FAULT); STR_CASE(CELL_SPURS_POLICY_MODULE_ERROR_CHILD); STR_CASE(CELL_SPURS_POLICY_MODULE_ERROR_STAT); STR_CASE(CELL_SPURS_POLICY_MODULE_ERROR_ALIGN); STR_CASE(CELL_SPURS_POLICY_MODULE_ERROR_NULL_POINTER); } return unknown; }); } template <> void fmt_class_string<CellSpursTaskError>::format(std::string& out, u64 arg) { format_enum(out, arg, [](auto error) { switch (error) { STR_CASE(CELL_SPURS_TASK_ERROR_AGAIN); STR_CASE(CELL_SPURS_TASK_ERROR_INVAL); STR_CASE(CELL_SPURS_TASK_ERROR_NOSYS); STR_CASE(CELL_SPURS_TASK_ERROR_NOMEM); STR_CASE(CELL_SPURS_TASK_ERROR_SRCH); STR_CASE(CELL_SPURS_TASK_ERROR_NOEXEC); STR_CASE(CELL_SPURS_TASK_ERROR_PERM); STR_CASE(CELL_SPURS_TASK_ERROR_BUSY); STR_CASE(CELL_SPURS_TASK_ERROR_FAULT); STR_CASE(CELL_SPURS_TASK_ERROR_ALIGN); STR_CASE(CELL_SPURS_TASK_ERROR_STAT); STR_CASE(CELL_SPURS_TASK_ERROR_NULL_POINTER); STR_CASE(CELL_SPURS_TASK_ERROR_FATAL); STR_CASE(CELL_SPURS_TASK_ERROR_SHUTDOWN); } return unknown; }); } template <> void fmt_class_string<CellSpursJobError>::format(std::string& out, u64 arg) { format_enum(out, arg, [](auto error) { switch (error) { STR_CASE(CELL_SPURS_JOB_ERROR_AGAIN); STR_CASE(CELL_SPURS_JOB_ERROR_INVAL); STR_CASE(CELL_SPURS_JOB_ERROR_NOSYS); STR_CASE(CELL_SPURS_JOB_ERROR_NOMEM); STR_CASE(CELL_SPURS_JOB_ERROR_SRCH); STR_CASE(CELL_SPURS_JOB_ERROR_NOENT); STR_CASE(CELL_SPURS_JOB_ERROR_NOEXEC); STR_CASE(CELL_SPURS_JOB_ERROR_DEADLK); STR_CASE(CELL_SPURS_JOB_ERROR_PERM); STR_CASE(CELL_SPURS_JOB_ERROR_BUSY); STR_CASE(CELL_SPURS_JOB_ERROR_JOB_DESCRIPTOR); STR_CASE(CELL_SPURS_JOB_ERROR_JOB_DESCRIPTOR_SIZE); STR_CASE(CELL_SPURS_JOB_ERROR_FAULT); STR_CASE(CELL_SPURS_JOB_ERROR_CHILD); STR_CASE(CELL_SPURS_JOB_ERROR_STAT); STR_CASE(CELL_SPURS_JOB_ERROR_ALIGN); STR_CASE(CELL_SPURS_JOB_ERROR_NULL_POINTER); STR_CASE(CELL_SPURS_JOB_ERROR_MEMORY_CORRUPTED); STR_CASE(CELL_SPURS_JOB_ERROR_MEMORY_SIZE); STR_CASE(CELL_SPURS_JOB_ERROR_UNKNOWN_COMMAND); STR_CASE(CELL_SPURS_JOB_ERROR_JOBLIST_ALIGNMENT); STR_CASE(CELL_SPURS_JOB_ERROR_JOB_ALIGNMENT); STR_CASE(CELL_SPURS_JOB_ERROR_CALL_OVERFLOW); STR_CASE(CELL_SPURS_JOB_ERROR_ABORT); STR_CASE(CELL_SPURS_JOB_ERROR_DMALIST_ELEMENT); STR_CASE(CELL_SPURS_JOB_ERROR_NUM_CACHE); STR_CASE(CELL_SPURS_JOB_ERROR_INVALID_BINARY); } return unknown; }); } template <> void fmt_class_string<SpursWorkloadState>::format(std::string& out, u64 arg) { format_enum(out, arg, [](auto error) { switch (error) { case SPURS_WKL_STATE_NON_EXISTENT: return "Non-existent"; case SPURS_WKL_STATE_PREPARING: return "Preparing"; case SPURS_WKL_STATE_RUNNABLE: return "Runnable"; case SPURS_WKL_STATE_SHUTTING_DOWN: return "In-shutdown"; case SPURS_WKL_STATE_REMOVABLE: return "Removable"; case SPURS_WKL_STATE_INVALID: break; } return unknown; }); } error_code sys_spu_image_close(ppu_thread&, vm::ptr<sys_spu_image> img); // Temporarily #ifndef _MSC_VER #pragma GCC diagnostic ignored "-Wunused-parameter" #endif //---------------------------------------------------------------------------- // Function prototypes //---------------------------------------------------------------------------- bool spursKernelEntry(spu_thread& spu); // SPURS Internals namespace _spurs { // Get the version of SDK used by this process s32 get_sdk_version(); // Check whether libprof is loaded bool is_libprof_loaded(); // Create an LV2 event queue and attach it to the SPURS instance s32 create_lv2_eq(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<u32> queueId, vm::ptr<u8> port, s32 size, const sys_event_queue_attribute_t& name); // Attach an LV2 event queue to the SPURS instance s32 attach_lv2_eq(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 queue, vm::ptr<u8> port, s32 isDynamic, bool spursCreated); // Detach an LV2 event queue from the SPURS instance s32 detach_lv2_eq(vm::ptr<CellSpurs> spurs, u8 spuPort, bool spursCreated); // Wait until a workload in the SPURS instance becomes ready void handler_wait_ready(ppu_thread& ppu, vm::ptr<CellSpurs> spurs); // Entry point of the SPURS handler thread. This thread is responsible for starting the SPURS SPU thread group. void handler_entry(ppu_thread& ppu, vm::ptr<CellSpurs> spurs); // Create the SPURS handler thread s32 create_handler(vm::ptr<CellSpurs> spurs, u32 ppuPriority); // Invoke event handlers s32 invoke_event_handlers(ppu_thread& ppu, vm::ptr<CellSpurs::EventPortMux> eventPortMux); // Invoke workload shutdown completion callbacks s32 wakeup_shutdown_completion_waiter(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid); // Entry point of the SPURS event helper thread void event_helper_entry(ppu_thread& ppu, vm::ptr<CellSpurs> spurs); // Create the SPURS event helper thread s32 create_event_helper(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 ppuPriority); // Initialise the event port multiplexor structure void init_event_port_mux(vm::ptr<CellSpurs::EventPortMux> eventPortMux, u8 spuPort, u32 eventPort, u32 unknown); // Enable the system workload s32 add_default_syswkl(vm::ptr<CellSpurs> spurs, vm::cptr<u8> swlPriority, u32 swlMaxSpu, u32 swlIsPreem); // Destroy the SPURS SPU threads and thread group s32 finalize_spu(ppu_thread&, vm::ptr<CellSpurs> spurs); // Stop the event helper thread s32 stop_event_helper(ppu_thread& ppu, vm::ptr<CellSpurs> spurs); // Signal to the SPURS handler thread s32 signal_to_handler_thread(ppu_thread& ppu, vm::ptr<CellSpurs> spurs); // Join the SPURS handler thread s32 join_handler_thread(ppu_thread& ppu, vm::ptr<CellSpurs> spurs); // Initialise SPURS s32 initialize(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 revision, u32 sdkVersion, s32 nSpus, s32 spuPriority, s32 ppuPriority, u32 flags, vm::cptr<char> prefix, u32 prefixSize, u32 container, vm::cptr<u8> swlPriority, u32 swlMaxSpu, u32 swlIsPreem); } // // SPURS Core Functions // //s32 cellSpursInitialize(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, s32 nSpus, s32 spuPriority, s32 ppuPriority, b8 exitIfNoWork); //s32 cellSpursInitializeWithAttribute(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::cptr<CellSpursAttribute> attr); //s32 cellSpursInitializeWithAttribute2(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::cptr<CellSpursAttribute> attr); //s32 _cellSpursAttributeInitialize(vm::ptr<CellSpursAttribute> attr, u32 revision, u32 sdkVersion, u32 nSpus, s32 spuPriority, s32 ppuPriority, b8 exitIfNoWork); //s32 cellSpursAttributeSetMemoryContainerForSpuThread(vm::ptr<CellSpursAttribute> attr, u32 container); //s32 cellSpursAttributeSetNamePrefix(vm::ptr<CellSpursAttribute> attr, vm::cptr<char> prefix, u32 size); //s32 cellSpursAttributeEnableSpuPrintfIfAvailable(vm::ptr<CellSpursAttribute> attr); //s32 cellSpursAttributeSetSpuThreadGroupType(vm::ptr<CellSpursAttribute> attr, s32 type); //s32 cellSpursAttributeEnableSystemWorkload(vm::ptr<CellSpursAttribute> attr, vm::cptr<u8[8]> priority, u32 maxSpu, vm::cptr<b8[8]> isPreemptible); //s32 cellSpursFinalize(vm::ptr<CellSpurs> spurs); //s32 cellSpursGetSpuThreadGroupId(vm::ptr<CellSpurs> spurs, vm::ptr<u32> group); //s32 cellSpursGetNumSpuThread(vm::ptr<CellSpurs> spurs, vm::ptr<u32> nThreads); //s32 cellSpursGetSpuThreadId(vm::ptr<CellSpurs> spurs, vm::ptr<u32> thread, vm::ptr<u32> nThreads); //s32 cellSpursSetMaxContention(vm::ptr<CellSpurs> spurs, u32 wid, u32 maxContention); //s32 cellSpursSetPriorities(vm::ptr<CellSpurs> spurs, u32 wid, vm::cptr<u8> priorities); //s32 cellSpursSetPreemptionVictimHints(vm::ptr<CellSpurs> spurs, vm::cptr<b8> isPreemptible); //s32 cellSpursAttachLv2EventQueue(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 queue, vm::ptr<u8> port, s32 isDynamic); //s32 cellSpursDetachLv2EventQueue(vm::ptr<CellSpurs> spurs, u8 port); // Enable the SPU exception event handler s32 cellSpursEnableExceptionEventHandler(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, b8 flag); //s32 cellSpursSetGlobalExceptionEventHandler(vm::ptr<CellSpurs> spurs, vm::ptr<CellSpursGlobalExceptionEventHandler> eaHandler, vm::ptr<void> arg); //s32 cellSpursUnsetGlobalExceptionEventHandler(vm::ptr<CellSpurs> spurs); //s32 cellSpursGetInfo(vm::ptr<CellSpurs> spurs, vm::ptr<CellSpursInfo> info); // // SPURS SPU GUID functions // //s32 cellSpursGetSpuGuid(); // // SPURS trace functions // namespace _spurs { // Signal SPUs to update trace status void trace_status_update(ppu_thread& ppu, vm::ptr<CellSpurs> spurs); // Initialize SPURS trace s32 trace_initialize(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<CellSpursTraceInfo> buffer, u32 size, u32 mode, u32 updateStatus); // Start SPURS trace s32 trace_start(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 updateStatus); // Stop SPURS trace s32 trace_stop(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 updateStatus); } //s32 cellSpursTraceInitialize(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<CellSpursTraceInfo> buffer, u32 size, u32 mode); //s32 cellSpursTraceFinalize(ppu_thread& ppu, vm::ptr<CellSpurs> spurs); //s32 cellSpursTraceStart(ppu_thread& ppu, vm::ptr<CellSpurs> spurs); //s32 cellSpursTraceStop(ppu_thread& ppu, vm::ptr<CellSpurs> spurs); // // SPURS policy module functions // namespace _spurs { // Add workload s32 add_workload(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<u32> wid, vm::cptr<void> pm, u32 size, u64 data, const u8(&priorityTable)[8], u32 minContention, u32 maxContention, vm::cptr<char> nameClass, vm::cptr<char> nameInstance, vm::ptr<CellSpursShutdownCompletionEventHook> hook, vm::ptr<void> hookArg); } //s32 _cellSpursWorkloadAttributeInitialize(vm::ptr<CellSpursWorkloadAttribute> attr, u32 revision, u32 sdkVersion, vm::cptr<void> pm, u32 size, u64 data, vm::cptr<u8[8]> priority, u32 minCnt, u32 maxCnt); //s32 cellSpursWorkloadAttributeSetName(vm::ptr<CellSpursWorkloadAttribute> attr, vm::cptr<char> nameClass, vm::cptr<char> nameInstance); //s32 cellSpursWorkloadAttributeSetShutdownCompletionEventHook(vm::ptr<CellSpursWorkloadAttribute> attr, vm::ptr<CellSpursShutdownCompletionEventHook> hook, vm::ptr<void> arg); //s32 cellSpursAddWorkload(vm::ptr<CellSpurs> spurs, vm::ptr<u32> wid, vm::cptr<void> pm, u32 size, u64 data, vm::cptr<u8[8]> priority, u32 minCnt, u32 maxCnt); //s32 cellSpursAddWorkloadWithAttribute(vm::ptr<CellSpurs> spurs, vm::ptr<u32> wid, vm::cptr<CellSpursWorkloadAttribute> attr); //s32 cellSpursShutdownWorkload(); //s32 cellSpursWaitForWorkloadShutdown(); //s32 cellSpursRemoveWorkload(); // Activate the SPURS kernel s32 cellSpursWakeUp(ppu_thread& ppu, vm::ptr<CellSpurs> spurs); s32 cellSpursSendWorkloadSignal(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid); //s32 cellSpursGetWorkloadFlag(vm::ptr<CellSpurs> spurs, vm::pptr<CellSpursWorkloadFlag> flag); s32 cellSpursReadyCountStore(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid, u32 value); s32 cellSpursReadyCountSwap(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid, vm::ptr<u32> old, u32 swap); s32 cellSpursReadyCountCompareAndSwap(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid, vm::ptr<u32> old, u32 compare, u32 swap); s32 cellSpursReadyCountAdd(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid, vm::ptr<u32> old, s32 value); //s32 cellSpursGetWorkloadData(vm::ptr<CellSpurs> spurs, vm::ptr<u64> data, u32 wid); //s32 cellSpursGetWorkloadInfo(); //s32 cellSpursSetExceptionEventHandler(); //s32 cellSpursUnsetExceptionEventHandler(); s32 _cellSpursWorkloadFlagReceiver(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid, u32 is_set); //s32 _cellSpursWorkloadFlagReceiver2(); //error_code cellSpursRequestIdleSpu(); // // SPURS taskset functions // namespace _spurs { // Create taskset s32 create_taskset(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<CellSpursTaskset> taskset, u64 args, vm::cptr<u8[8]> priority, u32 max_contention, vm::cptr<char> name, u32 size, s32 enable_clear_ls); } //s32 cellSpursCreateTasksetWithAttribute(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<CellSpursTaskset> taskset, vm::ptr<CellSpursTasksetAttribute> attr); //s32 cellSpursCreateTaskset(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<CellSpursTaskset> taskset, u64 args, vm::cptr<u8[8]> priority, u32 maxContention); //s32 cellSpursJoinTaskset(vm::ptr<CellSpursTaskset> taskset); //s32 cellSpursGetTasksetId(vm::ptr<CellSpursTaskset> taskset, vm::ptr<u32> wid); //s32 cellSpursShutdownTaskset(vm::ptr<CellSpursTaskset> taskset); //s32 cellSpursTasksetAttributeSetName(vm::ptr<CellSpursTasksetAttribute> attr, vm::cptr<char> name); //s32 cellSpursTasksetAttributeSetTasksetSize(vm::ptr<CellSpursTasksetAttribute> attr, u32 size); //s32 cellSpursTasksetAttributeEnableClearLS(vm::ptr<CellSpursTasksetAttribute> attr, s32 enable); //s32 _cellSpursTasksetAttribute2Initialize(vm::ptr<CellSpursTasksetAttribute2> attribute, u32 revision); //s32 cellSpursCreateTaskset2(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<CellSpursTaskset> taskset, vm::ptr<CellSpursTasksetAttribute2> attr); //s32 cellSpursDestroyTaskset2(); //s32 cellSpursTasksetSetExceptionEventHandler(vm::ptr<CellSpursTaskset> taskset, vm::ptr<CellSpursTasksetExceptionEventHandler> handler, vm::ptr<u64> arg); //s32 cellSpursTasksetUnsetExceptionEventHandler(vm::ptr<CellSpursTaskset> taskset); // Get taskset instance from the workload ID s32 cellSpursLookUpTasksetAddress(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::pptr<CellSpursTaskset> taskset, u32 id); //s32 cellSpursTasksetGetSpursAddress(vm::cptr<CellSpursTaskset> taskset, vm::ptr<u32> spurs); //s32 cellSpursGetTasksetInfo(); //s32 _cellSpursTasksetAttributeInitialize(vm::ptr<CellSpursTasksetAttribute> attribute, u32 revision, u32 sdk_version, u64 args, vm::cptr<u8> priority, u32 max_contention); // // SPURS task functions // namespace _spurs { // Create task s32 create_task(vm::ptr<CellSpursTaskset> taskset, vm::ptr<u32> task_id, vm::cptr<void> elf, vm::cptr<void> context, u32 size, vm::ptr<CellSpursTaskLsPattern> ls_pattern, vm::ptr<CellSpursTaskArgument> arg); // Start task s32 task_start(ppu_thread& ppu, vm::ptr<CellSpursTaskset> taskset, u32 taskId); } //s32 cellSpursCreateTask(ppu_thread& ppu, vm::ptr<CellSpursTaskset> taskset, vm::ptr<u32> taskId, vm::cptr<void> elf, vm::cptr<void> context, u32 size, vm::ptr<CellSpursTaskLsPattern> lsPattern, vm::ptr<CellSpursTaskArgument> argument); // Sends a signal to the task s32 _cellSpursSendSignal(ppu_thread& ppu, vm::ptr<CellSpursTaskset> taskset, u32 taskId); //s32 cellSpursCreateTaskWithAttribute(); //s32 cellSpursTaskExitCodeGet(); //s32 cellSpursTaskExitCodeInitialize(); //s32 cellSpursTaskExitCodeTryGet(); //s32 cellSpursTaskGetLoadableSegmentPattern(); //s32 cellSpursTaskGetReadOnlyAreaPattern(); //s32 cellSpursTaskGenerateLsPattern(); //s32 _cellSpursTaskAttributeInitialize(); //s32 cellSpursTaskAttributeSetExitCodeContainer(); //s32 _cellSpursTaskAttribute2Initialize(vm::ptr<CellSpursTaskAttribute2> attribute, u32 revision); //s32 cellSpursTaskGetContextSaveAreaSize(); //s32 cellSpursCreateTask2(); //s32 cellSpursJoinTask2(); //s32 cellSpursTryJoinTask2(); //s32 cellSpursCreateTask2WithBinInfo(); // // SPURS event flag functions // namespace _spurs { // Wait for SPURS event flag s32 event_flag_wait(ppu_thread& ppu, vm::ptr<CellSpursEventFlag> eventFlag, vm::ptr<u16> mask, u32 mode, u32 block); } //s32 _cellSpursEventFlagInitialize(vm::ptr<CellSpurs> spurs, vm::ptr<CellSpursTaskset> taskset, vm::ptr<CellSpursEventFlag> eventFlag, u32 flagClearMode, u32 flagDirection); //s32 cellSpursEventFlagClear(vm::ptr<CellSpursEventFlag> eventFlag, u16 bits); //s32 cellSpursEventFlagSet(ppu_thread& ppu, vm::ptr<CellSpursEventFlag> eventFlag, u16 bits); //s32 cellSpursEventFlagWait(ppu_thread& ppu, vm::ptr<CellSpursEventFlag> eventFlag, vm::ptr<u16> mask, u32 mode); //s32 cellSpursEventFlagTryWait(ppu_thread& ppu, vm::ptr<CellSpursEventFlag> eventFlag, vm::ptr<u16> mask, u32 mode); //s32 cellSpursEventFlagAttachLv2EventQueue(ppu_thread& ppu, vm::ptr<CellSpursEventFlag> eventFlag); //s32 cellSpursEventFlagDetachLv2EventQueue(ppu_thread& ppu, vm::ptr<CellSpursEventFlag> eventFlag); //s32 cellSpursEventFlagGetDirection(vm::ptr<CellSpursEventFlag> eventFlag, vm::ptr<u32> direction); //s32 cellSpursEventFlagGetClearMode(vm::ptr<CellSpursEventFlag> eventFlag, vm::ptr<u32> clear_mode); //s32 cellSpursEventFlagGetTasksetAddress(vm::ptr<CellSpursEventFlag> eventFlag, vm::pptr<CellSpursTaskset> taskset); // // SPURS lock free queue functions // //s32 _cellSpursLFQueueInitialize(vm::ptr<void> pTasksetOrSpurs, vm::ptr<CellSpursLFQueue> pQueue, vm::cptr<void> buffer, u32 size, u32 depth, u32 direction); //s32 _cellSpursLFQueuePushBody(); //s32 cellSpursLFQueueAttachLv2EventQueue(vm::ptr<CellSyncLFQueue> queue); //s32 cellSpursLFQueueDetachLv2EventQueue(vm::ptr<CellSyncLFQueue> queue); //s32 _cellSpursLFQueuePopBody(); //s32 cellSpursLFQueueGetTasksetAddress(); // // SPURS queue functions // //s32 _cellSpursQueueInitialize(); //s32 cellSpursQueuePopBody(); //s32 cellSpursQueuePushBody(); //s32 cellSpursQueueAttachLv2EventQueue(); //s32 cellSpursQueueDetachLv2EventQueue(); //s32 cellSpursQueueGetTasksetAddress(); //s32 cellSpursQueueClear(); //s32 cellSpursQueueDepth(); //s32 cellSpursQueueGetEntrySize(); //s32 cellSpursQueueSize(); //s32 cellSpursQueueGetDirection(); // // SPURS barrier functions // //s32 cellSpursBarrierInitialize(); //s32 cellSpursBarrierGetTasksetAddress(); // // SPURS semaphore functions // //s32 _cellSpursSemaphoreInitialize(); //s32 cellSpursSemaphoreGetTasksetAddress(); // // SPURS job chain functions // namespace _spurs { s32 check_job_chain_attribute(u32 sdkVer, vm::cptr<u64> jcEntry, u16 sizeJobDescr, u16 maxGrabbedJob , u64 priorities, u32 maxContention, u8 autoSpuCount, u32 tag1, u32 tag2 , u8 isFixedMemAlloc, u32 maxSizeJob, u32 initSpuCount); s32 create_job_chain(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<CellSpursJobChain> jobChain, vm::cptr<u64> jobChainEntry, u16 sizeJob , u16 maxGrabbedJob, vm::cptr<u8[8]> prio, u32 maxContention, b8 autoReadyCount , u32 tag1, u32 tag2, u32 HaltOnError, vm::cptr<char> name, u32 param_13, u32 param_14); } //s32 cellSpursCreateJobChainWithAttribute(); //s32 cellSpursCreateJobChain(); //s32 cellSpursJoinJobChain(); s32 cellSpursKickJobChain(ppu_thread& ppu, vm::ptr<CellSpursJobChain> jobChain, u8 numReadyCount); //s32 _cellSpursJobChainAttributeInitialize(); //s32 cellSpursGetJobChainId(); //s32 cellSpursJobChainSetExceptionEventHandler(); //s32 cellSpursJobChainUnsetExceptionEventHandler(); //s32 cellSpursGetJobChainInfo(); //s32 cellSpursJobChainGetSpursAddress(); //s32 cellSpursJobGuardInitialize(); //s32 cellSpursJobChainAttributeSetName(); //s32 cellSpursShutdownJobChain(); //s32 cellSpursJobChainAttributeSetHaltOnError(); //s32 cellSpursJobChainAttributeSetJobTypeMemoryCheck(); //s32 cellSpursJobGuardNotify(); //s32 cellSpursJobGuardReset(); s32 cellSpursRunJobChain(ppu_thread& ppu, vm::ptr<CellSpursJobChain> jobChain); //s32 cellSpursJobChainGetError(); //s32 cellSpursGetJobPipelineInfo(); //s32 cellSpursJobSetMaxGrab(); //s32 cellSpursJobHeaderSetJobbin2Param(); //s32 cellSpursAddUrgentCommand(); //s32 cellSpursAddUrgentCall(); //---------------------------------------------------------------------------- // SPURS utility functions //---------------------------------------------------------------------------- s32 _spurs::get_sdk_version() { const s32 version = static_cast<s32>(g_ps3_process_info.sdk_ver); return version == -1 ? 0x485000 : version; } bool _spurs::is_libprof_loaded() { return false; } //---------------------------------------------------------------------------- // SPURS core functions //---------------------------------------------------------------------------- s32 _spurs::create_lv2_eq(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<u32> queueId, vm::ptr<u8> port, s32 size, const sys_event_queue_attribute_t& attr) { if (s32 rc = sys_event_queue_create(ppu, queueId, vm::make_var(attr), SYS_EVENT_QUEUE_LOCAL, size)) { static_cast<void>(ppu.test_stopped()); return rc; } if (_spurs::attach_lv2_eq(ppu, spurs, *queueId, port, 1, true)) { sys_event_queue_destroy(ppu, *queueId, SYS_EVENT_QUEUE_DESTROY_FORCE); static_cast<void>(ppu.test_stopped()); } return CELL_OK; } s32 _spurs::attach_lv2_eq(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 queue, vm::ptr<u8> port, s32 isDynamic, bool spursCreated) { if (!spurs || !port) { return CELL_SPURS_CORE_ERROR_NULL_POINTER; } if (!spurs.aligned()) { return CELL_SPURS_CORE_ERROR_ALIGN; } if (spurs->exception) { return CELL_SPURS_CORE_ERROR_STAT; } u8 _port = 0x3f; u64 portMask = 0; if (isDynamic == 0) { _port = *port; if (_port > 0x3f) { return CELL_SPURS_CORE_ERROR_INVAL; } if (_spurs::get_sdk_version() >= 0x180000 && _port > 0xf) { return CELL_SPURS_CORE_ERROR_PERM; } } for (u32 i = isDynamic ? 0x10 : _port; i <= _port; i++) { portMask |= 1ull << (i); } if (s32 res = sys_spu_thread_group_connect_event_all_threads(ppu, spurs->spuTG, queue, portMask, port)) { if (res + 0u == CELL_EISCONN) { return CELL_SPURS_CORE_ERROR_BUSY; } return res; } if (!spursCreated) { spurs->spuPortBits |= 1ull << *port; } return CELL_OK; } s32 _spurs::detach_lv2_eq(vm::ptr<CellSpurs> spurs, u8 spuPort, bool spursCreated) { if (!spurs) { return CELL_SPURS_CORE_ERROR_NULL_POINTER; } if (!spurs.aligned()) { return CELL_SPURS_CORE_ERROR_ALIGN; } if (!spursCreated && spurs->exception) { return CELL_SPURS_CORE_ERROR_STAT; } if (spuPort > 0x3F) { return CELL_SPURS_CORE_ERROR_INVAL; } if (!spursCreated) { if (!spurs->spuPortBits.bit_test_reset(spuPort) && _spurs::get_sdk_version() >= 0x180000) { return CELL_SPURS_CORE_ERROR_SRCH; } } return CELL_OK; } void _spurs::handler_wait_ready(ppu_thread& ppu, vm::ptr<CellSpurs> spurs) { ensure(ppu_execute<&sys_lwmutex_lock>(ppu, spurs.ptr(&CellSpurs::mutex), 0) == 0); static_cast<void>(ppu.test_stopped()); while (true) { if (spurs->handlerExiting) { ensure(ppu_execute<&sys_lwmutex_unlock>(ppu, spurs.ptr(&CellSpurs::mutex)) == 0); return sys_ppu_thread_exit(ppu, 0); } // Find a runnable workload spurs->handlerDirty = 0; if (spurs->exception == 0u) { bool foundRunnableWorkload = false; for (u32 i = 0; i < 16; i++) { if (spurs->wklState1[i] == SPURS_WKL_STATE_RUNNABLE && std::bit_cast<u64>(spurs->wklInfo1[i].priority) != 0 && spurs->wklMaxContention[i] & 0x0F) { if (spurs->wklReadyCount1[i] || spurs->wklSignal1.load() & (0x8000u >> i) || (spurs->wklFlag.flag.load() == 0u && spurs->wklFlagReceiver == static_cast<u8>(i))) { foundRunnableWorkload = true; break; } } } if (spurs->flags1 & SF1_32_WORKLOADS) { for (u32 i = 0; i < 16; i++) { if (spurs->wklState2[i] == SPURS_WKL_STATE_RUNNABLE && std::bit_cast<u64>(spurs->wklInfo2[i].priority) != 0 && spurs->wklMaxContention[i] & 0xF0) { if (spurs->wklIdleSpuCountOrReadyCount2[i] || spurs->wklSignal2.load() & (0x8000u >> i) || (spurs->wklFlag.flag.load() == 0u && spurs->wklFlagReceiver == static_cast<u8>(i) + 0x10)) { foundRunnableWorkload = true; break; } } } } if (foundRunnableWorkload) { break; } } // If we reach it means there are no runnable workloads in this SPURS instance. // Wait until some workload becomes ready. spurs->handlerWaiting = 1; if (spurs->handlerDirty == 0) { ensure(ppu_execute<&sys_lwcond_wait>(ppu, spurs.ptr(&CellSpurs::cond), 0) == 0); static_cast<void>(ppu.test_stopped()); } spurs->handlerWaiting = 0; } // If we reach here then a runnable workload was found ensure(ppu_execute<&sys_lwmutex_unlock>(ppu, spurs.ptr(&CellSpurs::mutex)) == 0); static_cast<void>(ppu.test_stopped()); } void _spurs::handler_entry(ppu_thread& ppu, vm::ptr<CellSpurs> spurs) { if (spurs->flags & SAF_UNKNOWN_FLAG_30) { return sys_ppu_thread_exit(ppu, 0); } while (true) { if (spurs->flags1 & SF1_EXIT_IF_NO_WORK) { _spurs::handler_wait_ready(ppu, spurs); } ensure(sys_spu_thread_group_start(ppu, spurs->spuTG) == 0); const s32 rc = sys_spu_thread_group_join(ppu, spurs->spuTG, vm::null, vm::null); static_cast<void>(ppu.test_stopped()); if (rc + 0u != CELL_EFAULT) { if (rc + 0u == CELL_ESTAT) { return sys_ppu_thread_exit(ppu, 0); } ensure(rc + 0u == CELL_EFAULT); } if ((spurs->flags1 & SF1_EXIT_IF_NO_WORK) == 0) { ensure((spurs->handlerExiting == 1)); return sys_ppu_thread_exit(ppu, 0); } } } s32 _spurs::create_handler(vm::ptr<CellSpurs> spurs, u32 ppuPriority) { struct handler_thread : ppu_thread { using ppu_thread::ppu_thread; void non_task() { //BIND_FUNC(_spurs::handler_entry)(*this); } }; // auto eht = idm::make_ptr<ppu_thread, handler_thread>(std::string(spurs->prefix, spurs->prefixSize) + "SpursHdlr0", ppuPriority, 0x4000); // spurs->ppu0 = eht->id; // eht->gpr[3] = spurs.addr(); // eht->run(); return CELL_OK; } s32 _spurs::invoke_event_handlers(ppu_thread& ppu, vm::ptr<CellSpurs::EventPortMux> eventPortMux) { if (eventPortMux->reqPending.exchange(0)) { for (auto node = eventPortMux->handlerList.exchange(vm::null); node; node = node->next) { node->handler(ppu, eventPortMux, node->data); } } return CELL_OK; } s32 _spurs::wakeup_shutdown_completion_waiter(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid) { if (!spurs) { return CELL_SPURS_POLICY_MODULE_ERROR_NULL_POINTER; } if (!spurs.aligned()) { return CELL_SPURS_POLICY_MODULE_ERROR_ALIGN; } if (wid >= spurs->max_workloads()) { return CELL_SPURS_POLICY_MODULE_ERROR_INVAL; } if ((spurs->wklEnabled.load() & (0x80000000u >> wid)) == 0u) { return CELL_SPURS_POLICY_MODULE_ERROR_SRCH; } if (spurs->wklState(wid) != SPURS_WKL_STATE_REMOVABLE) { return CELL_SPURS_POLICY_MODULE_ERROR_STAT; } const auto wklF = wid < CELL_SPURS_MAX_WORKLOAD ? &spurs->wklF1[wid] : &spurs->wklF2[wid & 0x0F]; const auto wklEvent = &spurs->wklEvent(wid); if (wklF->hook) { wklF->hook(ppu, spurs, wid, wklF->hookArg); ensure((wklEvent->load() & 0x01)); ensure((wklEvent->load() & 0x02)); ensure((wklEvent->load() & 0x20) == 0); wklEvent->fetch_or(0x20); } s32 rc = CELL_OK; if (!wklF->hook || wklEvent->load() & 0x10) { ensure((wklF->x28 == 2u)); rc = sys_semaphore_post(ppu, static_cast<u32>(wklF->sem), 1); static_cast<void>(ppu.test_stopped()); } return rc; } void _spurs::event_helper_entry(ppu_thread& ppu, vm::ptr<CellSpurs> spurs) { vm::var<sys_event_t[]> events(8); vm::var<u32> count; while (true) { ensure(sys_event_queue_receive(ppu, spurs->eventQueue, vm::null, 0) == 0); static_cast<void>(ppu.test_stopped()); const u64 event_src = ppu.gpr[4]; const u64 event_data1 = ppu.gpr[5]; const u64 event_data2 = ppu.gpr[6]; const u64 event_data3 = ppu.gpr[7]; if (event_src == SYS_SPU_THREAD_EVENT_EXCEPTION_KEY) { spurs->exception = 1; events[0].source = event_src; events[0].data1 = event_data1; events[0].data2 = event_data2; events[0].data3 = event_data3; if (sys_event_queue_tryreceive(ppu, spurs->eventQueue, events + 1, 7, count) != CELL_OK) { continue; } // TODO: Examine LS and dump exception details for (u32 i = 0; i < CELL_SPURS_MAX_WORKLOAD; i++) { sys_semaphore_post(ppu, static_cast<u32>(spurs->wklF1[i].sem), 1); if (spurs->flags1 & SF1_32_WORKLOADS) { sys_semaphore_post(ppu, static_cast<u32>(spurs->wklF2[i].sem), 1); } } static_cast<void>(ppu.test_stopped()); } else { const u32 data0 = event_data2 & 0x00FFFFFF; if (data0 == 1) { return; } else if (data0 < 1) { const u32 shutdownMask = static_cast<u32>(event_data3); for (u32 wid = 0; wid < CELL_SPURS_MAX_WORKLOAD; wid++) { if (shutdownMask & (0x80000000u >> wid)) { ensure(_spurs::wakeup_shutdown_completion_waiter(ppu, spurs, wid) == 0); } if ((spurs->flags1 & SF1_32_WORKLOADS) && (shutdownMask & (0x8000 >> wid))) { ensure(_spurs::wakeup_shutdown_completion_waiter(ppu, spurs, wid + 0x10) == 0); } } } else if (data0 == 2) { ensure(sys_semaphore_post(ppu, static_cast<u32>(spurs->semPrv), 1) == 0); static_cast<void>(ppu.test_stopped()); } else if (data0 == 3) { ensure(_spurs::invoke_event_handlers(ppu, spurs.ptr(&CellSpurs::eventPortMux)) == 0); } else { fmt::throw_exception("data0=0x%x", data0); } } } } s32 _spurs::create_event_helper(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 ppuPriority) { if (s32 rc = _spurs::create_lv2_eq(ppu, spurs, spurs.ptr(&CellSpurs::eventQueue), spurs.ptr(&CellSpurs::spuPort), 0x2A, sys_event_queue_attribute_t{SYS_SYNC_PRIORITY, SYS_PPU_QUEUE, {"_spuPrv\0"_u64}})) { return rc; } if (sys_event_port_create(ppu, spurs.ptr(&CellSpurs::eventPort), SYS_EVENT_PORT_LOCAL, SYS_EVENT_PORT_NO_NAME)) { if (_spurs::detach_lv2_eq(spurs, spurs->spuPort, true)) { return CELL_SPURS_CORE_ERROR_AGAIN; } sys_event_queue_destroy(ppu, spurs->eventQueue, SYS_EVENT_QUEUE_DESTROY_FORCE); return CELL_SPURS_CORE_ERROR_AGAIN; } if (sys_event_port_connect_local(ppu, spurs->eventPort, spurs->eventQueue)) { sys_event_port_destroy(ppu, spurs->eventPort); if (_spurs::detach_lv2_eq(spurs, spurs->spuPort, true)) { return CELL_SPURS_CORE_ERROR_STAT; } sys_event_queue_destroy(ppu, spurs->eventQueue, SYS_EVENT_QUEUE_DESTROY_FORCE); return CELL_SPURS_CORE_ERROR_STAT; } struct event_helper_thread : ppu_thread { using ppu_thread::ppu_thread; void non_task() { //BIND_FUNC(_spurs::event_helper_entry)(*this); } }; //auto eht = idm::make_ptr<ppu_thread, event_helper_thread>(std::string(spurs->prefix, spurs->prefixSize) + "SpursHdlr1", ppuPriority, 0x8000); //if (!eht) { sys_event_port_disconnect(ppu, spurs->eventPort); sys_event_port_destroy(ppu, spurs->eventPort); if (_spurs::detach_lv2_eq(spurs, spurs->spuPort, true)) { return CELL_SPURS_CORE_ERROR_STAT; } sys_event_queue_destroy(ppu, spurs->eventQueue, SYS_EVENT_QUEUE_DESTROY_FORCE); return CELL_SPURS_CORE_ERROR_STAT; } // eht->gpr[3] = spurs.addr(); // eht->run(); // spurs->ppu1 = eht->id; return CELL_OK; } void _spurs::init_event_port_mux(vm::ptr<CellSpurs::EventPortMux> eventPortMux, u8 spuPort, u32 eventPort, u32 unknown) { memset(eventPortMux.get_ptr(), 0, sizeof(CellSpurs::EventPortMux)); eventPortMux->spuPort = spuPort; eventPortMux->eventPort = eventPort; eventPortMux->x08 = unknown; } s32 _spurs::add_default_syswkl(vm::ptr<CellSpurs> spurs, vm::cptr<u8> swlPriority, u32 swlMaxSpu, u32 swlIsPreem) { // TODO: Implement this return CELL_OK; } s32 _spurs::finalize_spu(ppu_thread& ppu, vm::ptr<CellSpurs> spurs) { if (spurs->flags & SAF_UNKNOWN_FLAG_7 || spurs->flags & SAF_UNKNOWN_FLAG_8) { while (true) { ensure(sys_spu_thread_group_join(ppu, spurs->spuTG, vm::null, vm::null) + 0u == CELL_EFAULT); if (s32 rc = sys_spu_thread_group_destroy(ppu, spurs->spuTG)) { if (rc + 0u == CELL_EBUSY) { continue; } ensure(rc == CELL_OK); } break; } } else { if (s32 rc = sys_spu_thread_group_destroy(ppu, spurs->spuTG)) { return rc; } } ensure(ppu_execute<&sys_spu_image_close>(ppu, spurs.ptr(&CellSpurs::spuImg)) == 0); return CELL_OK; } s32 _spurs::stop_event_helper(ppu_thread& ppu, vm::ptr<CellSpurs> spurs) { if (spurs->ppu1 == 0xFFFFFFFF) { return CELL_SPURS_CORE_ERROR_STAT; } if (sys_event_port_send(spurs->eventPort, 0, 1, 0) != CELL_OK) { return CELL_SPURS_CORE_ERROR_STAT; } if (sys_ppu_thread_join(ppu, static_cast<u32>(spurs->ppu1), vm::var<u64>{}) != CELL_OK) { return CELL_SPURS_CORE_ERROR_STAT; } spurs->ppu1 = 0xFFFFFFFF; ensure(sys_event_port_disconnect(ppu, spurs->eventPort) == 0); ensure(sys_event_port_destroy(ppu, spurs->eventPort) == 0); ensure(_spurs::detach_lv2_eq(spurs, spurs->spuPort, true) == 0); ensure(sys_event_queue_destroy(ppu, spurs->eventQueue, SYS_EVENT_QUEUE_DESTROY_FORCE) == 0); return CELL_OK; } s32 _spurs::signal_to_handler_thread(ppu_thread& ppu, vm::ptr<CellSpurs> spurs) { ensure(ppu_execute<&sys_lwmutex_lock>(ppu, spurs.ptr(&CellSpurs::mutex), 0) == 0); ensure(ppu_execute<&sys_lwcond_signal>(ppu, spurs.ptr(&CellSpurs::cond)) == 0); ensure(ppu_execute<&sys_lwmutex_unlock>(ppu, spurs.ptr(&CellSpurs::mutex)) == 0); return CELL_OK; } s32 _spurs::join_handler_thread(ppu_thread& ppu, vm::ptr<CellSpurs> spurs) { if (spurs->ppu0 == 0xFFFFFFFF) { return CELL_SPURS_CORE_ERROR_STAT; } ensure(sys_ppu_thread_join(ppu, static_cast<u32>(spurs->ppu0), vm::var<u64>{}) == 0); spurs->ppu0 = 0xFFFFFFFF; return CELL_OK; } s32 _spurs::initialize(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 revision, u32 sdkVersion, s32 nSpus, s32 spuPriority, s32 ppuPriority, u32 flags, vm::cptr<char> prefix, u32 prefixSize, u32 container, vm::cptr<u8> swlPriority, u32 swlMaxSpu, u32 swlIsPreem) { vm::var<u32> sem; vm::var<sys_semaphore_attribute_t> semAttr; vm::var<char[]> spuTgName(128); vm::var<sys_spu_thread_group_attribute> spuTgAttr; vm::var<sys_spu_thread_argument> spuThArgs; vm::var<sys_spu_thread_attribute> spuThAttr; vm::var<char[]> spuThName(128); if (!spurs) { return CELL_SPURS_CORE_ERROR_NULL_POINTER; } if (!spurs.aligned()) { return CELL_SPURS_CORE_ERROR_ALIGN; } if (prefixSize > CELL_SPURS_NAME_MAX_LENGTH) { return CELL_SPURS_CORE_ERROR_INVAL; } if (process_is_spu_lock_line_reservation_address(spurs.addr(), SYS_MEMORY_ACCESS_RIGHT_SPU_THR)) { return CELL_SPURS_CORE_ERROR_PERM; } // Intialise SPURS context const bool isSecond = (flags & SAF_SECOND_VERSION) != 0; auto rollback = [&] { if (spurs->semPrv) { sys_semaphore_destroy(ppu, ::narrow<u32>(+spurs->semPrv)); } for (u32 i = 0; i < CELL_SPURS_MAX_WORKLOAD; i++) { if (spurs->wklF1[i].sem) { sys_semaphore_destroy(ppu, ::narrow<u32>(+spurs->wklF1[i].sem)); } if (isSecond) { if (spurs->wklF2[i].sem) { sys_semaphore_destroy(ppu, ::narrow<u32>(+spurs->wklF2[i].sem)); } } } }; std::memset(spurs.get_ptr(), 0, isSecond ? CELL_SPURS_SIZE2 : CELL_SPURS_SIZE); spurs->revision = revision; spurs->sdkVersion = sdkVersion; spurs->ppu0 = 0xffffffffull; spurs->ppu1 = 0xffffffffull; spurs->flags = flags; spurs->prefixSize = static_cast<u8>(prefixSize); std::memcpy(spurs->prefix, prefix.get_ptr(), prefixSize); if (!isSecond) { spurs->wklEnabled = 0xffff; } // Initialise trace spurs->sysSrvTrace.store({}); for (u32 i = 0; i < 8; i++) { spurs->sysSrvPreemptWklId[i] = -1; } // Import default system workload spurs->wklInfoSysSrv.addr.set(SPURS_IMG_ADDR_SYS_SRV_WORKLOAD); spurs->wklInfoSysSrv.size = 0x2200; spurs->wklInfoSysSrv.arg = 0; spurs->wklInfoSysSrv.uniqueId = 0xff; // Create semaphores for each workload semAttr->protocol = SYS_SYNC_PRIORITY; semAttr->pshared = SYS_SYNC_NOT_PROCESS_SHARED; semAttr->ipc_key = 0; semAttr->flags = 0; semAttr->name_u64 = "_spuWkl\0"_u64; for (u32 i = 0; i < CELL_SPURS_MAX_WORKLOAD; i++) { if (s32 rc = sys_semaphore_create(ppu, sem, semAttr, 0, 1)) { return rollback(), rc; } spurs->wklF1[i].sem = *sem; if (isSecond) { if (s32 rc = sys_semaphore_create(ppu, sem, semAttr, 0, 1)) { return rollback(), rc; } spurs->wklF2[i].sem = *sem; } } // Create semaphore semAttr->name_u64 = "_spuPrv\0"_u64; if (s32 rc = sys_semaphore_create(ppu, sem, semAttr, 0, 1)) { return rollback(), rc; } spurs->semPrv = *sem; spurs->unk11 = -1; spurs->unk12 = -1; spurs->unk13 = 0; spurs->nSpus = nSpus; spurs->spuPriority = spuPriority; // Import SPURS kernel spurs->spuImg.type = SYS_SPU_IMAGE_TYPE_USER; spurs->spuImg.segs = vm::null; spurs->spuImg.entry_point = isSecond ? CELL_SPURS_KERNEL2_ENTRY_ADDR : CELL_SPURS_KERNEL1_ENTRY_ADDR; spurs->spuImg.nsegs = 0; // Create a thread group for this SPURS context std::memcpy(spuTgName.get_ptr(), spurs->prefix, spurs->prefixSize); std::memcpy(spuTgName.get_ptr() + spurs->prefixSize, "CellSpursKernelGroup", 21); spuTgAttr->name = spuTgName; spuTgAttr->nsize = static_cast<u32>(std::strlen(spuTgAttr->name.get_ptr())) + 1; spuTgAttr->type = SYS_SPU_THREAD_GROUP_TYPE_NORMAL; if (spurs->flags & SAF_UNKNOWN_FLAG_0) { spuTgAttr->type = 0x0C00 | SYS_SPU_THREAD_GROUP_TYPE_SYSTEM; } else if (flags & SAF_SPU_TGT_EXCLUSIVE_NON_CONTEXT) { spuTgAttr->type = SYS_SPU_THREAD_GROUP_TYPE_EXCLUSIVE_NON_CONTEXT; } else { spuTgAttr->type = SYS_SPU_THREAD_GROUP_TYPE_NORMAL; } if (spurs->flags & SAF_SPU_MEMORY_CONTAINER_SET) { spuTgAttr->type |= SYS_SPU_THREAD_GROUP_TYPE_MEMORY_FROM_CONTAINER; spuTgAttr->ct = container; } if (flags & SAF_UNKNOWN_FLAG_7) spuTgAttr->type |= 0x0100 | SYS_SPU_THREAD_GROUP_TYPE_SYSTEM; if (flags & SAF_UNKNOWN_FLAG_8) spuTgAttr->type |= 0x0C00 | SYS_SPU_THREAD_GROUP_TYPE_SYSTEM; if (flags & SAF_UNKNOWN_FLAG_9) spuTgAttr->type |= 0x0800; if (flags & SAF_SYSTEM_WORKLOAD_ENABLED) spuTgAttr->type |= SYS_SPU_THREAD_GROUP_TYPE_COOPERATE_WITH_SYSTEM; if (s32 rc = sys_spu_thread_group_create(ppu, spurs.ptr(&CellSpurs::spuTG), nSpus, spuPriority, spuTgAttr)) { ppu_execute<&sys_spu_image_close>(ppu, spurs.ptr(&CellSpurs::spuImg)); return rollback(), rc; } // Initialise all SPUs in the SPU thread group std::memcpy(spuThName.get_ptr(), spurs->prefix, spurs->prefixSize); std::memcpy(spuThName.get_ptr() + spurs->prefixSize, "CellSpursKernel", 16); spuThAttr->name = spuThName; spuThAttr->name_len = static_cast<u32>(std::strlen(spuThName.get_ptr())) + 2; spuThAttr->option = SYS_SPU_THREAD_OPTION_DEC_SYNC_TB_ENABLE; spuThName[spuThAttr->name_len - 1] = '\0'; for (s32 num = 0; num < nSpus; num++) { spuThName[spuThAttr->name_len - 2] = '0' + num; spuThArgs->arg1 = static_cast<u64>(num) << 32; spuThArgs->arg2 = spurs.addr(); if (s32 rc = sys_spu_thread_initialize(ppu, spurs.ptr(&CellSpurs::spus, num), spurs->spuTG, num, spurs.ptr(&CellSpurs::spuImg), spuThAttr, spuThArgs)) { sys_spu_thread_group_destroy(ppu, spurs->spuTG); ppu_execute<&sys_spu_image_close>(ppu, spurs.ptr(&CellSpurs::spuImg)); return rollback(), rc; } // entry point cannot be initialized immediately because SPU LS will be rewritten by sys_spu_thread_group_start() //idm::get<named_thread<spu_thread>>(spurs->spus[num])->custom_task = [entry = spurs->spuImg.entry_point](spu_thread& spu) { // Disabled //spu.RegisterHleFunction(entry, spursKernelEntry); }; } // Start the SPU printf server if required if (flags & SAF_SPU_PRINTF_ENABLED) { // spu_printf: attach group if (!g_spu_printf_agcb || g_spu_printf_agcb(ppu, spurs->spuTG) != CELL_OK) { // remove flag if failed spurs->flags &= ~SAF_SPU_PRINTF_ENABLED; } } const auto lwMutex = spurs.ptr(&CellSpurs::mutex); const auto lwCond = spurs.ptr(&CellSpurs::cond); // Create a mutex to protect access to SPURS handler thread data if (vm::var<sys_lwmutex_attribute_t> attr({SYS_SYNC_PRIORITY, SYS_SYNC_NOT_RECURSIVE, {"_spuPrv\0"_u64}}); s32 rc = ppu_execute<&sys_lwmutex_create>(ppu, lwMutex, +attr)) { _spurs::finalize_spu(ppu, spurs); return rollback(), rc; } // Create condition variable to signal the SPURS handler thread if (vm::var<sys_lwcond_attribute_t> attr({"_spuPrv\0"_u64}); s32 rc = ppu_execute<&sys_lwcond_create>(ppu, lwCond, lwMutex, +attr)) { ppu_execute<&sys_lwmutex_destroy>(ppu, lwMutex); _spurs::finalize_spu(ppu, spurs); return rollback(), rc; } spurs->flags1 = (flags & SAF_EXIT_IF_NO_WORK ? SF1_EXIT_IF_NO_WORK : 0) | (isSecond ? SF1_32_WORKLOADS : 0); spurs->wklFlagReceiver = 0xff; spurs->wklFlag.flag = -1; spurs->handlerDirty = 0; spurs->handlerWaiting = 0; spurs->handlerExiting = 0; spurs->ppuPriority = ppuPriority; // Create the SPURS event helper thread if (s32 rc = _spurs::create_event_helper(ppu, spurs, ppuPriority)) { ppu_execute<&sys_lwcond_destroy>(ppu, lwCond); ppu_execute<&sys_lwmutex_destroy>(ppu, lwMutex); _spurs::finalize_spu(ppu, spurs); return rollback(), rc; } // Create the SPURS handler thread if (s32 rc = _spurs::create_handler(spurs, ppuPriority)) { _spurs::stop_event_helper(ppu, spurs); ppu_execute<&sys_lwcond_destroy>(ppu, lwCond); ppu_execute<&sys_lwmutex_destroy>(ppu, lwMutex); _spurs::finalize_spu(ppu, spurs); return rollback(), rc; } // Enable SPURS exception handler if (s32 rc = cellSpursEnableExceptionEventHandler(ppu, spurs, true /*enable*/)) { _spurs::signal_to_handler_thread(ppu, spurs); _spurs::join_handler_thread(ppu, spurs); _spurs::stop_event_helper(ppu, spurs); ppu_execute<&sys_lwcond_destroy>(ppu, lwCond); ppu_execute<&sys_lwmutex_destroy>(ppu, lwMutex); _spurs::finalize_spu(ppu, spurs); return rollback(), rc; } spurs->traceBuffer = vm::null; // TODO: Register libprof for user trace // Initialise the event port multiplexor _spurs::init_event_port_mux(spurs.ptr(&CellSpurs::eventPortMux), spurs->spuPort, spurs->eventPort, 3); // Enable the default system workload if required if (flags & SAF_SYSTEM_WORKLOAD_ENABLED) { ensure(_spurs::add_default_syswkl(spurs, swlPriority, swlMaxSpu, swlIsPreem) == 0); return CELL_OK; } else if (flags & SAF_EXIT_IF_NO_WORK) { return cellSpursWakeUp(ppu, spurs); } return CELL_OK; } /// Initialize SPURS s32 cellSpursInitialize(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, s32 nSpus, s32 spuPriority, s32 ppuPriority, b8 exitIfNoWork) { cellSpurs.warning("cellSpursInitialize(spurs=*0x%x, nSpus=%d, spuPriority=%d, ppuPriority=%d, exitIfNoWork=%d)", spurs, nSpus, spuPriority, ppuPriority, exitIfNoWork); return _spurs::initialize(ppu, spurs, 0, 0, nSpus, spuPriority, ppuPriority, exitIfNoWork ? SAF_EXIT_IF_NO_WORK : SAF_NONE, vm::null, 0, 0, vm::null, 0, 0); } /// Initialise SPURS s32 cellSpursInitializeWithAttribute(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::cptr<CellSpursAttribute> attr) { cellSpurs.warning("cellSpursInitializeWithAttribute(spurs=*0x%x, attr=*0x%x)", spurs, attr); if (!attr) { return CELL_SPURS_CORE_ERROR_NULL_POINTER; } if (!attr.aligned()) { return CELL_SPURS_CORE_ERROR_ALIGN; } if (attr->revision > 2) { return CELL_SPURS_CORE_ERROR_INVAL; } return _spurs::initialize( ppu, spurs, attr->revision, attr->sdkVersion, attr->nSpus, attr->spuPriority, attr->ppuPriority, attr->flags | (attr->exitIfNoWork ? SAF_EXIT_IF_NO_WORK : 0), attr.ptr(&CellSpursAttribute::prefix, 0), attr->prefixSize, attr->container, attr.ptr(&CellSpursAttribute::swlPriority, 0), attr->swlMaxSpu, attr->swlIsPreem); } /// Initialise SPURS s32 cellSpursInitializeWithAttribute2(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::cptr<CellSpursAttribute> attr) { cellSpurs.warning("cellSpursInitializeWithAttribute2(spurs=*0x%x, attr=*0x%x)", spurs, attr); if (!attr) { return CELL_SPURS_CORE_ERROR_NULL_POINTER; } if (!attr.aligned()) { return CELL_SPURS_CORE_ERROR_ALIGN; } if (attr->revision > 2) { return CELL_SPURS_CORE_ERROR_INVAL; } return _spurs::initialize( ppu, spurs, attr->revision, attr->sdkVersion, attr->nSpus, attr->spuPriority, attr->ppuPriority, attr->flags | (attr->exitIfNoWork ? SAF_EXIT_IF_NO_WORK : 0) | SAF_SECOND_VERSION, attr.ptr(&CellSpursAttribute::prefix, 0), attr->prefixSize, attr->container, attr.ptr(&CellSpursAttribute::swlPriority, 0), attr->swlMaxSpu, attr->swlIsPreem); } /// Initialise SPURS attribute s32 _cellSpursAttributeInitialize(vm::ptr<CellSpursAttribute> attr, u32 revision, u32 sdkVersion, u32 nSpus, s32 spuPriority, s32 ppuPriority, b8 exitIfNoWork) { cellSpurs.warning("_cellSpursAttributeInitialize(attr=*0x%x, revision=%d, sdkVersion=0x%x, nSpus=%d, spuPriority=%d, ppuPriority=%d, exitIfNoWork=%d)", attr, revision, sdkVersion, nSpus, spuPriority, ppuPriority, exitIfNoWork); if (!attr) { return CELL_SPURS_CORE_ERROR_NULL_POINTER; } if (!attr.aligned()) { return CELL_SPURS_CORE_ERROR_ALIGN; } memset(attr.get_ptr(), 0, sizeof(CellSpursAttribute)); attr->revision = revision; attr->sdkVersion = sdkVersion; attr->nSpus = nSpus; attr->spuPriority = spuPriority; attr->ppuPriority = ppuPriority; attr->exitIfNoWork = exitIfNoWork; return CELL_OK; } /// Set memory container ID for creating the SPU thread group s32 cellSpursAttributeSetMemoryContainerForSpuThread(vm::ptr<CellSpursAttribute> attr, u32 container) { cellSpurs.warning("cellSpursAttributeSetMemoryContainerForSpuThread(attr=*0x%x, container=0x%x)", attr, container); if (!attr) { return CELL_SPURS_CORE_ERROR_NULL_POINTER; } if (!attr.aligned()) { return CELL_SPURS_CORE_ERROR_ALIGN; } if (attr->flags & SAF_SPU_TGT_EXCLUSIVE_NON_CONTEXT) { return CELL_SPURS_CORE_ERROR_STAT; } attr->container = container; attr->flags |= SAF_SPU_MEMORY_CONTAINER_SET; return CELL_OK; } /// Set the prefix for SPURS s32 cellSpursAttributeSetNamePrefix(vm::ptr<CellSpursAttribute> attr, vm::cptr<char> prefix, u32 size) { cellSpurs.warning("cellSpursAttributeSetNamePrefix(attr=*0x%x, prefix=%s, size=%d)", attr, prefix, size); if (!attr || !prefix) { return CELL_SPURS_CORE_ERROR_NULL_POINTER; } if (!attr.aligned()) { return CELL_SPURS_CORE_ERROR_ALIGN; } if (size > CELL_SPURS_NAME_MAX_LENGTH) { return CELL_SPURS_CORE_ERROR_INVAL; } memcpy(attr->prefix, prefix.get_ptr(), size); attr->prefixSize = size; return CELL_OK; } /// Enable spu_printf() s32 cellSpursAttributeEnableSpuPrintfIfAvailable(vm::ptr<CellSpursAttribute> attr) { cellSpurs.warning("cellSpursAttributeEnableSpuPrintfIfAvailable(attr=*0x%x)", attr); if (!attr) { return CELL_SPURS_CORE_ERROR_NULL_POINTER; } if (!attr.aligned()) { return CELL_SPURS_CORE_ERROR_ALIGN; } attr->flags |= SAF_SPU_PRINTF_ENABLED; return CELL_OK; } /// Set the type of SPU thread group s32 cellSpursAttributeSetSpuThreadGroupType(vm::ptr<CellSpursAttribute> attr, s32 type) { cellSpurs.warning("cellSpursAttributeSetSpuThreadGroupType(attr=*0x%x, type=%d)", attr, type); if (!attr) { return CELL_SPURS_CORE_ERROR_NULL_POINTER; } if (!attr.aligned()) { return CELL_SPURS_CORE_ERROR_ALIGN; } if (type == SYS_SPU_THREAD_GROUP_TYPE_EXCLUSIVE_NON_CONTEXT) { if (attr->flags & SAF_SPU_MEMORY_CONTAINER_SET) { return CELL_SPURS_CORE_ERROR_STAT; } attr->flags |= SAF_SPU_TGT_EXCLUSIVE_NON_CONTEXT; // set } else if (type == SYS_SPU_THREAD_GROUP_TYPE_NORMAL) { attr->flags &= ~SAF_SPU_TGT_EXCLUSIVE_NON_CONTEXT; // clear } else { return CELL_SPURS_CORE_ERROR_INVAL; } return CELL_OK; } /// Enable the system workload s32 cellSpursAttributeEnableSystemWorkload(vm::ptr<CellSpursAttribute> attr, vm::cptr<u8[8]> priority, u32 maxSpu, vm::cptr<b8[8]> isPreemptible) { cellSpurs.warning("cellSpursAttributeEnableSystemWorkload(attr=*0x%x, priority=*0x%x, maxSpu=%d, isPreemptible=*0x%x)", attr, priority, maxSpu, isPreemptible); if (!attr) { return CELL_SPURS_CORE_ERROR_NULL_POINTER; } if (!attr.aligned()) { return CELL_SPURS_CORE_ERROR_ALIGN; } const u32 nSpus = attr->nSpus; if (!nSpus) { return CELL_SPURS_CORE_ERROR_INVAL; } for (u32 i = 0; i < nSpus; i++) { if ((*priority)[i] == 1) { if (!maxSpu) { return CELL_SPURS_CORE_ERROR_INVAL; } if (nSpus == 1 || attr->exitIfNoWork) { return CELL_SPURS_CORE_ERROR_PERM; } if (attr->flags & SAF_SYSTEM_WORKLOAD_ENABLED) { return CELL_SPURS_CORE_ERROR_BUSY; } attr->flags |= SAF_SYSTEM_WORKLOAD_ENABLED; // set flag std::memcpy(attr->swlPriority, priority.get_ptr(), 8); u32 isPreem = 0; // generate mask from isPreemptible values for (u32 j = 0; j < nSpus; j++) { if ((*isPreemptible)[j]) { isPreem |= (1 << j); } } attr->swlMaxSpu = maxSpu; // write max spu for system workload attr->swlIsPreem = isPreem; // write isPreemptible mask return CELL_OK; } } return CELL_SPURS_CORE_ERROR_INVAL; } /// Release resources allocated for SPURS s32 cellSpursFinalize(vm::ptr<CellSpurs> spurs) { cellSpurs.todo("cellSpursFinalize(spurs=*0x%x)", spurs); if (!spurs) { return CELL_SPURS_CORE_ERROR_NULL_POINTER; } if (!spurs.aligned()) { return CELL_SPURS_CORE_ERROR_ALIGN; } if (spurs->handlerExiting) { return CELL_SPURS_CORE_ERROR_STAT; } [[maybe_unused]] u32 wklEnabled = spurs->wklEnabled.load(); if (spurs->flags1 & SF1_32_WORKLOADS) { wklEnabled &= 0xFFFF0000; } if (spurs->flags & SAF_SYSTEM_WORKLOAD_ENABLED) { } // TODO: Implement the rest of this function return CELL_OK; } /// Get the SPU thread group ID s32 cellSpursGetSpuThreadGroupId(vm::ptr<CellSpurs> spurs, vm::ptr<u32> group) { cellSpurs.warning("cellSpursGetSpuThreadGroupId(spurs=*0x%x, group=*0x%x)", spurs, group); if (!spurs || !group) { return CELL_SPURS_CORE_ERROR_NULL_POINTER; } if (!spurs.aligned()) { return CELL_SPURS_CORE_ERROR_ALIGN; } *group = spurs->spuTG; return CELL_OK; } // Get the number of SPU threads s32 cellSpursGetNumSpuThread(vm::ptr<CellSpurs> spurs, vm::ptr<u32> nThreads) { cellSpurs.warning("cellSpursGetNumSpuThread(spurs=*0x%x, nThreads=*0x%x)", spurs, nThreads); if (!spurs || !nThreads) { return CELL_SPURS_CORE_ERROR_NULL_POINTER; } if (!spurs.aligned()) { return CELL_SPURS_CORE_ERROR_ALIGN; } *nThreads = spurs->nSpus; return CELL_OK; } /// Get SPU thread ids s32 cellSpursGetSpuThreadId(vm::ptr<CellSpurs> spurs, vm::ptr<u32> thread, vm::ptr<u32> nThreads) { cellSpurs.warning("cellSpursGetSpuThreadId(spurs=*0x%x, thread=*0x%x, nThreads=*0x%x)", spurs, thread, nThreads); if (!spurs || !thread || !nThreads) { return CELL_SPURS_CORE_ERROR_NULL_POINTER; } if (!spurs.aligned()) { return CELL_SPURS_CORE_ERROR_ALIGN; } const u32 count = std::min<u32>(*nThreads, spurs->nSpus); for (u32 i = 0; i < count; i++) { thread[i] = spurs->spus[i]; } *nThreads = count; return CELL_OK; } /// Set the maximum contention for a workload s32 cellSpursSetMaxContention(vm::ptr<CellSpurs> spurs, u32 wid, u32 maxContention) { cellSpurs.warning("cellSpursSetMaxContention(spurs=*0x%x, wid=%d, maxContention=%d)", spurs, wid, maxContention); if (!spurs) { return CELL_SPURS_CORE_ERROR_NULL_POINTER; } if (!spurs.aligned()) { return CELL_SPURS_CORE_ERROR_ALIGN; } if (wid >= spurs->max_workloads()) { return CELL_SPURS_CORE_ERROR_INVAL; } if ((spurs->wklEnabled.load() & (0x80000000u >> wid)) == 0u) { return CELL_SPURS_CORE_ERROR_SRCH; } if (spurs->exception) { return CELL_SPURS_CORE_ERROR_STAT; } if (maxContention > CELL_SPURS_MAX_SPU) { maxContention = CELL_SPURS_MAX_SPU; } vm::atomic_op(spurs->wklMaxContention[wid % CELL_SPURS_MAX_WORKLOAD], [&](u8& value) { value &= wid < CELL_SPURS_MAX_WORKLOAD ? 0xF0 : 0x0F; value |= wid < CELL_SPURS_MAX_WORKLOAD ? maxContention : maxContention << 4; }); return CELL_OK; } /// Set the priority of a workload on each SPU s32 cellSpursSetPriorities(vm::ptr<CellSpurs> spurs, u32 wid, vm::cptr<u8[8]> priorities) { cellSpurs.trace("cellSpursSetPriorities(spurs=*0x%x, wid=%d, priorities=*0x%x)", spurs, wid, priorities); if (!spurs) { return CELL_SPURS_CORE_ERROR_NULL_POINTER; } if (!spurs.aligned()) { return CELL_SPURS_CORE_ERROR_ALIGN; } if (wid >= spurs->max_workloads()) { return CELL_SPURS_CORE_ERROR_INVAL; } if ((spurs->wklEnabled.load() & (0x80000000u >> wid)) == 0u) { return CELL_SPURS_CORE_ERROR_SRCH; } if (spurs->exception) { return CELL_SPURS_CORE_ERROR_STAT; } if (spurs->flags & SAF_SYSTEM_WORKLOAD_ENABLED) { // TODO: Implement this } const u64 prio = std::bit_cast<u64>(*priorities); // Test if any of the value >= CELL_SPURS_MAX_PRIORITY if (prio & 0xf0f0f0f0f0f0f0f0) { return CELL_SPURS_CORE_ERROR_INVAL; } vm::light_op(spurs->wklInfo(wid).prio64, [&](atomic_t<u64>& v){ v.release(prio); }); vm::light_op(spurs->sysSrvMsgUpdateWorkload, [](atomic_t<u8>& v){ v.release(0xff); }); vm::light_op(spurs->sysSrvMessage, [](atomic_t<u8>& v){ v.release(0xff); }); return CELL_OK; } /// Set the priority of a workload for the specified SPU s32 cellSpursSetPriority(vm::ptr<CellSpurs> spurs, u32 wid, u32 spuId, u32 priority) { cellSpurs.trace("cellSpursSetPriority(spurs=*0x%x, wid=%d, spuId=%d, priority=%d)", spurs, wid, spuId, priority); if (!spurs) return CELL_SPURS_CORE_ERROR_NULL_POINTER; if (!spurs.aligned()) return CELL_SPURS_CORE_ERROR_ALIGN; if (wid >= spurs->max_workloads()) return CELL_SPURS_CORE_ERROR_INVAL; if (priority >= CELL_SPURS_MAX_PRIORITY || spuId >= spurs->nSpus) return CELL_SPURS_CORE_ERROR_INVAL; if ((spurs->wklEnabled & (0x80000000u >> wid)) == 0u) return CELL_SPURS_CORE_ERROR_SRCH; if (spurs->exception) return CELL_SPURS_CORE_ERROR_STAT; vm::light_op<true>(spurs->wklInfo(wid).priority[spuId], [&](u8& v){ atomic_storage<u8>::release(v, priority); }); vm::light_op<true>(spurs->sysSrvMsgUpdateWorkload, [&](atomic_t<u8>& v){ v.bit_test_set(spuId); }); vm::light_op<true>(spurs->sysSrvMessage, [&](atomic_t<u8>& v){ v.bit_test_set(spuId); }); return CELL_OK; } /// Set preemption victim SPU s32 cellSpursSetPreemptionVictimHints(vm::ptr<CellSpurs> spurs, vm::cptr<b8> isPreemptible) { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } /// Attach an LV2 event queue to a SPURS instance s32 cellSpursAttachLv2EventQueue(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 queue, vm::ptr<u8> port, s32 isDynamic) { cellSpurs.warning("cellSpursAttachLv2EventQueue(spurs=*0x%x, queue=0x%x, port=*0x%x, isDynamic=%d)", spurs, queue, port, isDynamic); return _spurs::attach_lv2_eq(ppu, spurs, queue, port, isDynamic, false); } /// Detach an LV2 event queue from a SPURS instance s32 cellSpursDetachLv2EventQueue(vm::ptr<CellSpurs> spurs, u8 port) { cellSpurs.warning("cellSpursDetachLv2EventQueue(spurs=*0x%x, port=%d)", spurs, port); return _spurs::detach_lv2_eq(spurs, port, false); } s32 cellSpursEnableExceptionEventHandler(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, b8 flag) { cellSpurs.warning("cellSpursEnableExceptionEventHandler(spurs=*0x%x, flag=%d)", spurs, flag); if (!spurs) { return CELL_SPURS_CORE_ERROR_NULL_POINTER; } if (!spurs.aligned()) { return CELL_SPURS_CORE_ERROR_ALIGN; } s32 rc = CELL_OK; auto oldEnableEH = spurs->enableEH.exchange(flag ? 1u : 0u); if (flag) { if (oldEnableEH == 0u) { rc = sys_spu_thread_group_connect_event(ppu, spurs->spuTG, spurs->eventQueue, SYS_SPU_THREAD_GROUP_EVENT_EXCEPTION); } } else { if (oldEnableEH == 1u) { rc = sys_spu_thread_group_disconnect_event(ppu, spurs->eventQueue, SYS_SPU_THREAD_GROUP_EVENT_EXCEPTION); } } return rc; } /// Set the global SPU exception event handler s32 cellSpursSetGlobalExceptionEventHandler(vm::ptr<CellSpurs> spurs, vm::ptr<CellSpursGlobalExceptionEventHandler> eaHandler, vm::ptr<void> arg) { cellSpurs.warning("cellSpursSetGlobalExceptionEventHandler(spurs=*0x%x, eaHandler=*0x%x, arg=*0x%x)", spurs, eaHandler, arg); if (!spurs || !eaHandler) { return CELL_SPURS_CORE_ERROR_NULL_POINTER; } if (!spurs.aligned()) { return CELL_SPURS_CORE_ERROR_ALIGN; } if (spurs->exception) { return CELL_SPURS_CORE_ERROR_STAT; } auto handler = spurs->globalSpuExceptionHandler.compare_and_swap(0, 1); if (handler) { return CELL_SPURS_CORE_ERROR_BUSY; } spurs->globalSpuExceptionHandlerArgs = arg.addr(); spurs->globalSpuExceptionHandler.exchange(eaHandler.addr()); return CELL_OK; } /// Remove the global SPU exception event handler s32 cellSpursUnsetGlobalExceptionEventHandler(vm::ptr<CellSpurs> spurs) { cellSpurs.warning("cellSpursUnsetGlobalExceptionEventHandler(spurs=*0x%x)", spurs); if (!spurs) return CELL_SPURS_CORE_ERROR_NULL_POINTER; if (!spurs.aligned()) return CELL_SPURS_CORE_ERROR_ALIGN; if (spurs->exception) return CELL_SPURS_CORE_ERROR_STAT; spurs->globalSpuExceptionHandlerArgs = 0; spurs->globalSpuExceptionHandler.exchange(0); return CELL_OK; } /// Get internal information of a SPURS instance s32 cellSpursGetInfo(vm::ptr<CellSpurs> spurs, vm::ptr<CellSpursInfo> info) { cellSpurs.trace("cellSpursGetInfo(spurs=*0x%x, info=*0x%x)", spurs, info); if (!spurs || !info) return CELL_SPURS_CORE_ERROR_NULL_POINTER; if (!spurs.aligned()) return CELL_SPURS_CORE_ERROR_ALIGN; const auto flags = spurs->flags; info->nSpus = spurs->nSpus; info->spuThreadGroupPriority = spurs->spuPriority; info->ppuThreadPriority = spurs->ppuPriority; info->exitIfNoWork = !!(flags & SAF_EXIT_IF_NO_WORK); info->spurs2 = !!(flags & SAF_SECOND_VERSION); info->spuThreadGroup = spurs->spuTG; std::memcpy(&info->spuThreads, &spurs->spus, sizeof(s32) * 8); info->spursHandlerThread0 = spurs->ppu0; info->spursHandlerThread1 = spurs->ppu1; info->traceBufferSize = spurs->traceDataSize; const auto trace_addr = vm::cast(spurs->traceBuffer.addr()); info->traceBuffer = vm::addr_t{trace_addr & ~3}; info->traceMode = trace_addr & 3; const u8 name_size = spurs->prefixSize; std::memcpy(&info->namePrefix, spurs->prefix, name_size); info->namePrefix[name_size] = '\0'; info->namePrefixLength = name_size; // TODO: Should call sys_spu_thread_group_syscall_253 for specific SPU group types info->deadlineMeetCounter = 0; info->deadlineMissCounter = 0; return CELL_OK; } //---------------------------------------------------------------------------- // SPURS SPU GUID functions //---------------------------------------------------------------------------- /// Get the SPU GUID from a .SpuGUID section s32 cellSpursGetSpuGuid(vm::cptr<void> guid, vm::ptr<u64> dst) { cellSpurs.trace("cellSpursGetSpuGuid(guid=*0x%x, dst=*0x%x)", guid, dst); if (!guid || !dst) return CELL_SPURS_CORE_ERROR_NULL_POINTER; if (!dst.aligned()) return CELL_SPURS_CORE_ERROR_ALIGN; return CELL_OK; } //---------------------------------------------------------------------------- // SPURS trace functions //---------------------------------------------------------------------------- void _spurs::trace_status_update(ppu_thread& ppu, vm::ptr<CellSpurs> spurs) { u8 init; vm::atomic_op(spurs->sysSrvTrace, [spurs, &init](CellSpurs::SrvTraceSyncVar& data) { if ((init = data.sysSrvTraceInitialised)) { data.sysSrvNotifyUpdateTraceComplete = 1; data.sysSrvMsgUpdateTrace = (1 << spurs->nSpus) - 1; } }); if (init) { vm::light_op<true>(spurs->sysSrvMessage, [&](atomic_t<u8>& v){ v.release(0xff); }); ensure(sys_semaphore_wait(ppu, static_cast<u32>(spurs->semPrv), 0) == 0); static_cast<void>(ppu.test_stopped()); } } s32 _spurs::trace_initialize(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<CellSpursTraceInfo> buffer, u32 size, u32 mode, u32 updateStatus) { if (!spurs || !buffer) { return CELL_SPURS_CORE_ERROR_NULL_POINTER; } if (!spurs.aligned() || !buffer.aligned()) { return CELL_SPURS_CORE_ERROR_ALIGN; } if (size < sizeof(CellSpursTraceInfo) || mode & ~(CELL_SPURS_TRACE_MODE_FLAG_MASK)) { return CELL_SPURS_CORE_ERROR_INVAL; } if (spurs->traceBuffer) { return CELL_SPURS_CORE_ERROR_STAT; } spurs->traceDataSize = size - u32{sizeof(CellSpursTraceInfo)}; for (u32 i = 0; i < 8; i++) { buffer->spuThread[i] = spurs->spus[i]; buffer->count[i] = 0; } buffer->spuThreadGroup = spurs->spuTG; buffer->numSpus = spurs->nSpus; spurs->traceBuffer.set(buffer.addr() | (mode & CELL_SPURS_TRACE_MODE_FLAG_WRAP_BUFFER ? 1 : 0)); spurs->traceMode = mode; u32 spuTraceDataCount = ::narrow<u32>((spurs->traceDataSize / sizeof(CellSpursTracePacket)) / spurs->nSpus); for (u32 i = 0, j = 8; i < 6; i++) { spurs->traceStartIndex[i] = j; j += spuTraceDataCount; } spurs->sysSrvTraceControl = 0; if (updateStatus) { _spurs::trace_status_update(ppu, spurs); } return CELL_OK; } /// Initialize SPURS trace s32 cellSpursTraceInitialize(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<CellSpursTraceInfo> buffer, u32 size, u32 mode) { cellSpurs.warning("cellSpursTraceInitialize(spurs=*0x%x, buffer=*0x%x, size=0x%x, mode=0x%x)", spurs, buffer, size, mode); if (_spurs::is_libprof_loaded()) { return CELL_SPURS_CORE_ERROR_STAT; } return _spurs::trace_initialize(ppu, spurs, buffer, size, mode, 1); } /// Finalize SPURS trace s32 cellSpursTraceFinalize(ppu_thread& ppu, vm::ptr<CellSpurs> spurs) { cellSpurs.warning("cellSpursTraceFinalize(spurs=*0x%x)", spurs); if (!spurs) { return CELL_SPURS_CORE_ERROR_NULL_POINTER; } if (!spurs.aligned()) { return CELL_SPURS_CORE_ERROR_ALIGN; } if (!spurs->traceBuffer) { return CELL_SPURS_CORE_ERROR_STAT; } spurs->sysSrvTraceControl = 0; spurs->traceMode = 0; spurs->traceBuffer = vm::null; _spurs::trace_status_update(ppu, spurs); return CELL_OK; } s32 _spurs::trace_start(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 updateStatus) { if (!spurs) { return CELL_SPURS_CORE_ERROR_NULL_POINTER; } if (!spurs.aligned()) { return CELL_SPURS_CORE_ERROR_ALIGN; } if (!spurs->traceBuffer) { return CELL_SPURS_CORE_ERROR_STAT; } spurs->sysSrvTraceControl = 1; if (updateStatus) { _spurs::trace_status_update(ppu, spurs); } return CELL_OK; } /// Start SPURS trace s32 cellSpursTraceStart(ppu_thread& ppu, vm::ptr<CellSpurs> spurs) { cellSpurs.warning("cellSpursTraceStart(spurs=*0x%x)", spurs); if (!spurs) { return CELL_SPURS_CORE_ERROR_NULL_POINTER; } if (!spurs.aligned()) { return CELL_SPURS_CORE_ERROR_ALIGN; } return _spurs::trace_start(ppu, spurs, spurs->traceMode & CELL_SPURS_TRACE_MODE_FLAG_SYNCHRONOUS_START_STOP); } s32 _spurs::trace_stop(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 updateStatus) { if (!spurs) { return CELL_SPURS_CORE_ERROR_NULL_POINTER; } if (!spurs.aligned()) { return CELL_SPURS_CORE_ERROR_ALIGN; } if (!spurs->traceBuffer) { return CELL_SPURS_CORE_ERROR_STAT; } spurs->sysSrvTraceControl = 2; if (updateStatus) { _spurs::trace_status_update(ppu, spurs); } return CELL_OK; } /// Stop SPURS trace s32 cellSpursTraceStop(ppu_thread& ppu, vm::ptr<CellSpurs> spurs) { cellSpurs.warning("cellSpursTraceStop(spurs=*0x%x)", spurs); if (!spurs) { return CELL_SPURS_CORE_ERROR_NULL_POINTER; } if (!spurs.aligned()) { return CELL_SPURS_CORE_ERROR_ALIGN; } return _spurs::trace_stop(ppu, spurs, spurs->traceMode & CELL_SPURS_TRACE_MODE_FLAG_SYNCHRONOUS_START_STOP); } //---------------------------------------------------------------------------- // SPURS policy module functions //---------------------------------------------------------------------------- /// Initialize attributes of a workload s32 _cellSpursWorkloadAttributeInitialize(ppu_thread& ppu, vm::ptr<CellSpursWorkloadAttribute> attr, u32 revision, u32 sdkVersion, vm::cptr<void> pm, u32 size, u64 data, vm::cptr<u8[8]> priority, u32 minCnt, u32 maxCnt) { cellSpurs.warning("_cellSpursWorkloadAttributeInitialize(attr=*0x%x, revision=%d, sdkVersion=0x%x, pm=*0x%x, size=0x%x, data=0x%llx, priority=*0x%x, minCnt=0x%x, maxCnt=0x%x)", attr, revision, sdkVersion, pm, size, data, priority, minCnt, maxCnt); if (!attr) { return CELL_SPURS_POLICY_MODULE_ERROR_NULL_POINTER; } if (!attr.aligned()) { return CELL_SPURS_POLICY_MODULE_ERROR_ALIGN; } if (!pm) { return CELL_SPURS_POLICY_MODULE_ERROR_NULL_POINTER; } if (!pm.aligned(16)) { return CELL_SPURS_POLICY_MODULE_ERROR_ALIGN; } // Load packed priorities (endian-agnostic) const u64 prio = std::bit_cast<u64>(*priority); // check if some priority > 15 if (minCnt == 0 || prio & 0xf0f0f0f0f0f0f0f0) { return CELL_SPURS_POLICY_MODULE_ERROR_INVAL; } std::memset(attr.get_ptr(), 0, sizeof(CellSpursWorkloadAttribute)); attr->revision = revision; attr->sdkVersion = sdkVersion; attr->pm = pm; attr->size = size; attr->data = data; std::memcpy(attr->priority, &prio, 8); attr->minContention = minCnt; attr->maxContention = maxCnt; return CELL_OK; } /// Set the name of a workload s32 cellSpursWorkloadAttributeSetName(ppu_thread& ppu, vm::ptr<CellSpursWorkloadAttribute> attr, vm::cptr<char> nameClass, vm::cptr<char> nameInstance) { cellSpurs.warning("cellSpursWorkloadAttributeSetName(attr=*0x%x, nameClass=%s, nameInstance=%s)", attr, nameClass, nameInstance); if (!attr) { return CELL_SPURS_POLICY_MODULE_ERROR_NULL_POINTER; } if (!attr.aligned()) { return CELL_SPURS_POLICY_MODULE_ERROR_ALIGN; } attr->nameClass = nameClass; attr->nameInstance = nameInstance; return CELL_OK; } /// Set a hook function for shutdown completion event of a workload s32 cellSpursWorkloadAttributeSetShutdownCompletionEventHook(vm::ptr<CellSpursWorkloadAttribute> attr, vm::ptr<CellSpursShutdownCompletionEventHook> hook, vm::ptr<void> arg) { cellSpurs.warning("cellSpursWorkloadAttributeSetShutdownCompletionEventHook(attr=*0x%x, hook=*0x%x, arg=*0x%x)", attr, hook, arg); if (!attr || !hook) { return CELL_SPURS_POLICY_MODULE_ERROR_NULL_POINTER; } if (!attr.aligned()) { return CELL_SPURS_POLICY_MODULE_ERROR_ALIGN; } attr->hook = hook; attr->hookArg = arg; return CELL_OK; } s32 _spurs::add_workload(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<u32> wid, vm::cptr<void> pm, u32 size, u64 data, const u8(&priorityTable)[8], u32 minContention, u32 maxContention, vm::cptr<char> nameClass, vm::cptr<char> nameInstance, vm::ptr<CellSpursShutdownCompletionEventHook> hook, vm::ptr<void> hookArg) { if (!spurs || !wid || !pm) { return CELL_SPURS_POLICY_MODULE_ERROR_NULL_POINTER; } if (!spurs.aligned() || !pm.aligned(16)) { return CELL_SPURS_POLICY_MODULE_ERROR_ALIGN; } if (minContention == 0 || std::bit_cast<u64>(priorityTable) & 0xf0f0f0f0f0f0f0f0ull) // check if some priority > 15 { return CELL_SPURS_POLICY_MODULE_ERROR_INVAL; } if (spurs->exception) { return CELL_SPURS_POLICY_MODULE_ERROR_STAT; } u32 wnum; const u32 wmax = spurs->flags1 & SF1_32_WORKLOADS ? CELL_SPURS_MAX_WORKLOAD2 : CELL_SPURS_MAX_WORKLOAD; // TODO: check if can be changed vm::fetch_op(spurs->wklEnabled, [&](be_t<u32>& value) { wnum = std::countl_one<u32>(value); // found empty position if (wnum < wmax) { value |= (0x80000000 >> wnum); // set workload bit } }); *wid = wnum; // store workload id if (wnum >= wmax) { return CELL_SPURS_POLICY_MODULE_ERROR_AGAIN; } auto& spurs_res = vm::reservation_acquire(spurs.addr()); auto& spurs_res2 = vm::reservation_acquire(spurs.addr() + 0x80); if (!spurs_res2.fetch_op([&](u64& r) { if (r & vm::rsrv_unique_lock) { return false; } r += 1; return true; }).second) { vm::reservation_shared_lock_internal(spurs_res2); } if (!spurs_res.fetch_op([&](u64& r) { if (r & vm::rsrv_unique_lock) { return false; } r += 1; return true; }).second) { vm::reservation_shared_lock_internal(spurs_res); } u32 index = wnum & 0xf; if (wnum <= 15) { ensure((spurs->wklCurrentContention[wnum] & 0xf) == 0); ensure((spurs->wklPendingContention[wnum] & 0xf) == 0); spurs->wklState1[wnum].release(SPURS_WKL_STATE_PREPARING); spurs->wklStatus1[wnum] = 0; spurs->wklEvent1[wnum].release(0); spurs->wklInfo1[wnum].addr = pm; spurs->wklInfo1[wnum].arg = data; spurs->wklInfo1[wnum].size = size; for (u32 i = 0; i < 8; i++) { spurs->wklInfo1[wnum].priority[i] = priorityTable[i]; } spurs->wklH1[wnum].nameClass = nameClass; spurs->wklH1[wnum].nameInstance = nameInstance; memset(spurs->wklF1[wnum].unk0, 0, 0x20); // clear struct preserving semaphore id memset(&spurs->wklF1[wnum].x28, 0, 0x58); if (hook) { spurs->wklF1[wnum].hook = hook; spurs->wklF1[wnum].hookArg = hookArg; spurs->wklEvent1[wnum] |= 2; } if ((spurs->flags1 & SF1_32_WORKLOADS) == 0) { spurs->wklIdleSpuCountOrReadyCount2[wnum] = 0; spurs->wklMinContention[wnum] = minContention > 8 ? 8 : minContention; } spurs->wklReadyCount1[wnum].release(0); } else { ensure((spurs->wklCurrentContention[index] & 0xf0) == 0); ensure((spurs->wklPendingContention[index] & 0xf0) == 0); spurs->wklState2[index].release(SPURS_WKL_STATE_PREPARING); spurs->wklStatus2[index] = 0; spurs->wklEvent2[index].release(0); spurs->wklInfo2[index].addr = pm; spurs->wklInfo2[index].arg = data; spurs->wklInfo2[index].size = size; for (u32 i = 0; i < 8; i++) { spurs->wklInfo2[index].priority[i] = priorityTable[i]; } spurs->wklH2[index].nameClass = nameClass; spurs->wklH2[index].nameInstance = nameInstance; memset(spurs->wklF2[index].unk0, 0, 0x20); // clear struct preserving semaphore id memset(&spurs->wklF2[index].x28, 0, 0x58); if (hook) { spurs->wklF2[index].hook = hook; spurs->wklF2[index].hookArg = hookArg; spurs->wklEvent2[index] |= 2; } spurs->wklIdleSpuCountOrReadyCount2[wnum].release(0); } spurs->wklMaxContention[index].atomic_op([&](u8& v) { v &= (wnum <= 15 ? 0xf0 : 0x0f); v |= (maxContention > 8 ? 8 : maxContention) << (wnum < CELL_SPURS_MAX_WORKLOAD ? 0 : 4); }); (wnum <= 15 ? spurs->wklSignal1 : spurs->wklSignal2).atomic_op([&](be_t<u16>& data) { data &= ~(0x8000 >> index); }); // Attempt to avoid CAS if (spurs->wklFlagReceiver == wnum && spurs->wklFlagReceiver.compare_and_swap(wnum, 0xff)) { // } spurs_res += 127; spurs_res2 += 127; spurs_res.notify_all(); spurs_res2.notify_all(); u32 res_wkl; const auto wkl = &spurs->wklInfo(wnum); vm::reservation_op(ppu, vm::unsafe_ptr_cast<spurs_wkl_state_op>(spurs.ptr(&CellSpurs::wklState1)), [&](spurs_wkl_state_op& op) { const u32 mask = op.wklMskB & ~(0x80000000u >> wnum); res_wkl = 0; for (u32 i = 0, m = 0x80000000, k = 0; i < 32; i++, m >>= 1) { if (mask & m) { const auto current = &spurs->wklInfo(i); if (current->addr == wkl->addr) { // if a workload with identical policy module found res_wkl = current->uniqueId; break; } else { k |= 0x80000000 >> current->uniqueId; res_wkl = std::countl_one<u32>(k); } } } wkl->uniqueId.release(static_cast<u8>(res_wkl)); op.wklMskB = mask | (0x80000000u >> wnum); (wnum < CELL_SPURS_MAX_WORKLOAD ? op.wklState1[wnum] : op.wklState2[wnum % 16]) = SPURS_WKL_STATE_RUNNABLE; }); ensure((res_wkl <= 31)); vm::light_op<true>(spurs->sysSrvMsgUpdateWorkload, [](atomic_t<u8>& v){ v.release(0xff); }); vm::light_op<true>(spurs->sysSrvMessage, [](atomic_t<u8>& v){ v.release(0xff); }); return CELL_OK; } /// Add workload s32 cellSpursAddWorkload(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<u32> wid, vm::cptr<void> pm, u32 size, u64 data, vm::cptr<u8[8]> priority, u32 minCnt, u32 maxCnt) { cellSpurs.trace("cellSpursAddWorkload(spurs=*0x%x, wid=*0x%x, pm=*0x%x, size=0x%x, data=0x%llx, priority=*0x%x, minCnt=0x%x, maxCnt=0x%x)", spurs, wid, pm, size, data, priority, minCnt, maxCnt); return _spurs::add_workload(ppu, spurs, wid, pm, size, data, *priority, minCnt, maxCnt, vm::null, vm::null, vm::null, vm::null); } /// Add workload s32 cellSpursAddWorkloadWithAttribute(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<u32> wid, vm::cptr<CellSpursWorkloadAttribute> attr) { cellSpurs.trace("cellSpursAddWorkloadWithAttribute(spurs=*0x%x, wid=*0x%x, attr=*0x%x)", spurs, wid, attr); if (!attr) { return CELL_SPURS_POLICY_MODULE_ERROR_NULL_POINTER; } if (!attr.aligned()) { return CELL_SPURS_POLICY_MODULE_ERROR_ALIGN; } if (attr->revision != 1u) { return CELL_SPURS_POLICY_MODULE_ERROR_INVAL; } return _spurs::add_workload(ppu, spurs, wid, attr->pm, attr->size, attr->data, attr->priority, attr->minContention, attr->maxContention, attr->nameClass, attr->nameInstance, attr->hook, attr->hookArg); } /// Request workload shutdown s32 cellSpursShutdownWorkload(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid) { cellSpurs.trace("cellSpursShutdownWorkload(spurs=*0x%x, wid=0x%x)", spurs, wid); if (!spurs) return CELL_SPURS_POLICY_MODULE_ERROR_NULL_POINTER; if (!spurs.aligned()) return CELL_SPURS_POLICY_MODULE_ERROR_ALIGN; if (wid >= spurs->max_workloads()) return CELL_SPURS_POLICY_MODULE_ERROR_INVAL; if (spurs->exception) return CELL_SPURS_POLICY_MODULE_ERROR_STAT; bool send_event; s32 rc, old_state; if (!vm::reservation_op(ppu, vm::unsafe_ptr_cast<spurs_wkl_state_op>(spurs.ptr(&CellSpurs::wklState1)), [&](spurs_wkl_state_op& op) { auto& state = wid < CELL_SPURS_MAX_WORKLOAD ? op.wklState1[wid] : op.wklState2[wid % 16]; if (state <= SPURS_WKL_STATE_PREPARING) { rc = CELL_SPURS_POLICY_MODULE_ERROR_STAT; return false; } if (state == SPURS_WKL_STATE_SHUTTING_DOWN || state == SPURS_WKL_STATE_REMOVABLE) { rc = CELL_OK; return false; } auto& status = wid < CELL_SPURS_MAX_WORKLOAD ? op.wklStatus1[wid] : op.wklStatus2[wid % 16]; old_state = state = status ? SPURS_WKL_STATE_SHUTTING_DOWN : SPURS_WKL_STATE_REMOVABLE; if (state == SPURS_WKL_STATE_SHUTTING_DOWN) { op.sysSrvMsgUpdateWorkload = -1; rc = CELL_OK; return true; } auto& event = wid < CELL_SPURS_MAX_WORKLOAD ? op.wklEvent1[wid] : op.wklEvent2[wid % 16]; send_event = event & 0x12 && !(event & 1); event |= 1; rc = CELL_OK; return true; })) { return rc; } if (old_state == SPURS_WKL_STATE_SHUTTING_DOWN) { vm::light_op<true>(spurs->sysSrvMessage, [&](atomic_t<u8>& v){ v.release(0xff); }); return CELL_OK; } if (send_event && sys_event_port_send(spurs->eventPort, 0, 0, (1u << 31) >> wid)) { return CELL_SPURS_CORE_ERROR_STAT; } return CELL_OK; } /// Wait for workload shutdown s32 cellSpursWaitForWorkloadShutdown(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid) { cellSpurs.trace("cellSpursWaitForWorkloadShutdown(spurs=*0x%x, wid=0x%x)", spurs, wid); if (!spurs) return CELL_SPURS_POLICY_MODULE_ERROR_NULL_POINTER; if (!spurs.aligned()) return CELL_SPURS_POLICY_MODULE_ERROR_ALIGN; if (wid >= spurs->max_workloads()) return CELL_SPURS_POLICY_MODULE_ERROR_INVAL; if (!(spurs->wklEnabled & (0x80000000u >> wid))) return CELL_SPURS_POLICY_MODULE_ERROR_SRCH; if (spurs->exception) return CELL_SPURS_POLICY_MODULE_ERROR_STAT; auto& info = spurs->wklSyncInfo(wid); const auto [_old0, ok] = vm::fetch_op(info.x28, [](be_t<u32>& val) { if (val) { return false; } val = 2; return true; }); if (!ok) { return CELL_SPURS_POLICY_MODULE_ERROR_STAT; } const auto [_old1, wait_sema] = vm::fetch_op<true>(spurs->wklEvent(wid), [](u8& event) { if ((event & 1) == 0 || (event & 0x22) == 0x2) { event |= 0x10; return true; } return false; }); if (wait_sema) { ensure(sys_semaphore_wait(ppu, static_cast<u32>(info.sem), 0) == 0); } // Reverified if (spurs->exception) return CELL_SPURS_POLICY_MODULE_ERROR_STAT; return CELL_OK; } s32 cellSpursRemoveSystemWorkloadForUtility() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } /// Remove workload s32 cellSpursRemoveWorkload(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid) { cellSpurs.trace("cellSpursRemoveWorkload(spurs=*0x%x, wid=%u)", spurs, wid); if (!spurs) return CELL_SPURS_POLICY_MODULE_ERROR_NULL_POINTER; if (!spurs.aligned()) return CELL_SPURS_POLICY_MODULE_ERROR_ALIGN; if (wid >= CELL_SPURS_MAX_WORKLOAD2 || (wid >= CELL_SPURS_MAX_WORKLOAD && (spurs->flags1 & SF1_32_WORKLOADS) == 0)) return CELL_SPURS_POLICY_MODULE_ERROR_INVAL; if (!(spurs->wklEnabled.load() & (0x80000000u >> wid))) return CELL_SPURS_POLICY_MODULE_ERROR_SRCH; if (spurs->exception) return CELL_SPURS_POLICY_MODULE_ERROR_STAT; switch (spurs->wklState(wid)) { case SPURS_WKL_STATE_SHUTTING_DOWN: return CELL_SPURS_POLICY_MODULE_ERROR_BUSY; case SPURS_WKL_STATE_REMOVABLE: break; default: return CELL_SPURS_POLICY_MODULE_ERROR_STAT; } if (spurs->wklFlagReceiver == wid) { ensure(ppu_execute<&_cellSpursWorkloadFlagReceiver>(ppu, spurs, wid, 0) == 0); } s32 rc; vm::reservation_op(ppu, vm::unsafe_ptr_cast<spurs_wkl_state_op>(spurs.ptr(&CellSpurs::wklState1)), [&](spurs_wkl_state_op& op) { auto& state = wid < CELL_SPURS_MAX_WORKLOAD ? op.wklState1[wid] : op.wklState2[wid % 16]; // Re-verification, does not exist on realfw switch (state) { case SPURS_WKL_STATE_SHUTTING_DOWN: rc = CELL_SPURS_POLICY_MODULE_ERROR_BUSY; return false; case SPURS_WKL_STATE_REMOVABLE: break; default: rc = CELL_SPURS_POLICY_MODULE_ERROR_STAT; return false; } state = SPURS_WKL_STATE_NON_EXISTENT; op.wklEnabled &= ~(0x80000000u >> wid); op.wklMskB &= ~(0x80000000u >> wid); rc = CELL_OK; return true; }); return rc; } s32 cellSpursWakeUp(ppu_thread& ppu, vm::ptr<CellSpurs> spurs) { cellSpurs.warning("cellSpursWakeUp(spurs=*0x%x)", spurs); if (!spurs) { return CELL_SPURS_POLICY_MODULE_ERROR_NULL_POINTER; } if (!spurs.aligned()) { return CELL_SPURS_POLICY_MODULE_ERROR_ALIGN; } if (spurs->exception) { return CELL_SPURS_POLICY_MODULE_ERROR_STAT; } spurs->handlerDirty.exchange(1); if (spurs->handlerWaiting) { _spurs::signal_to_handler_thread(ppu, spurs); } return CELL_OK; } /// Send a workload signal s32 cellSpursSendWorkloadSignal(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid) { cellSpurs.warning("cellSpursSendWorkloadSignal(spurs=*0x%x, wid=%d)", spurs, wid); if (!spurs) { return CELL_SPURS_POLICY_MODULE_ERROR_NULL_POINTER; } if (!spurs.aligned()) { return CELL_SPURS_POLICY_MODULE_ERROR_ALIGN; } if (wid >= CELL_SPURS_MAX_WORKLOAD2 || (wid >= CELL_SPURS_MAX_WORKLOAD && (spurs->flags1 & SF1_32_WORKLOADS) == 0)) { return CELL_SPURS_POLICY_MODULE_ERROR_INVAL; } if (!(spurs->wklEnabled.load() & (0x80000000u >> wid))) { return CELL_SPURS_POLICY_MODULE_ERROR_SRCH; } if (spurs->exception) { return CELL_SPURS_POLICY_MODULE_ERROR_STAT; } if (spurs->wklState(wid) != SPURS_WKL_STATE_RUNNABLE) { return CELL_SPURS_POLICY_MODULE_ERROR_STAT; } vm::light_op<true>(wid < CELL_SPURS_MAX_WORKLOAD ? spurs->wklSignal1 : spurs->wklSignal2, [&](atomic_be_t<u16>& sig) { sig |= 0x8000 >> (wid % 16); }); return CELL_OK; } /// Get the address of the workload flag s32 cellSpursGetWorkloadFlag(vm::ptr<CellSpurs> spurs, vm::pptr<CellSpursWorkloadFlag> flag) { cellSpurs.warning("cellSpursGetWorkloadFlag(spurs=*0x%x, flag=**0x%x)", spurs, flag); if (!spurs || !flag) { return CELL_SPURS_POLICY_MODULE_ERROR_NULL_POINTER; } if (!spurs.aligned()) { return CELL_SPURS_POLICY_MODULE_ERROR_ALIGN; } *flag = spurs.ptr(&CellSpurs::wklFlag); return CELL_OK; } /// Set ready count s32 cellSpursReadyCountStore(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid, u32 value) { cellSpurs.trace("cellSpursReadyCountStore(spurs=*0x%x, wid=%d, value=0x%x)", spurs, wid, value); if (!spurs) { return CELL_SPURS_POLICY_MODULE_ERROR_NULL_POINTER; } if (!spurs.aligned()) { return CELL_SPURS_POLICY_MODULE_ERROR_ALIGN; } if (wid >= spurs->max_workloads() || value > 0xffu) { return CELL_SPURS_POLICY_MODULE_ERROR_INVAL; } if ((spurs->wklEnabled.load() & (0x80000000u >> wid)) == 0u) { return CELL_SPURS_POLICY_MODULE_ERROR_SRCH; } if (spurs->exception || spurs->wklState(wid) != SPURS_WKL_STATE_RUNNABLE) { return CELL_SPURS_POLICY_MODULE_ERROR_STAT; } vm::light_op<true>(spurs->readyCount(wid), [&](atomic_t<u8>& v) { v.release(static_cast<u8>(value)); }); return CELL_OK; } /// Swap ready count s32 cellSpursReadyCountSwap(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid, vm::ptr<u32> old, u32 swap) { cellSpurs.trace("cellSpursReadyCountSwap(spurs=*0x%x, wid=%d, old=*0x%x, swap=0x%x)", spurs, wid, old, swap); if (!spurs || !old) { return CELL_SPURS_POLICY_MODULE_ERROR_NULL_POINTER; } if (!spurs.aligned()) { return CELL_SPURS_POLICY_MODULE_ERROR_ALIGN; } if (wid >= spurs->max_workloads() || swap > 0xffu) { return CELL_SPURS_POLICY_MODULE_ERROR_INVAL; } if ((spurs->wklEnabled.load() & (0x80000000u >> wid)) == 0u) { return CELL_SPURS_POLICY_MODULE_ERROR_SRCH; } if (spurs->exception || spurs->wklState(wid) != SPURS_WKL_STATE_RUNNABLE) { return CELL_SPURS_POLICY_MODULE_ERROR_STAT; } *old = vm::light_op(spurs->readyCount(wid), [&](atomic_t<u8>& v) { return v.exchange(static_cast<u8>(swap)); }); return CELL_OK; } /// Compare and swap ready count s32 cellSpursReadyCountCompareAndSwap(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid, vm::ptr<u32> old, u32 compare, u32 swap) { cellSpurs.trace("cellSpursReadyCountCompareAndSwap(spurs=*0x%x, wid=%d, old=*0x%x, compare=0x%x, swap=0x%x)", spurs, wid, old, compare, swap); if (!spurs || !old) { return CELL_SPURS_POLICY_MODULE_ERROR_NULL_POINTER; } if (!spurs.aligned()) { return CELL_SPURS_POLICY_MODULE_ERROR_ALIGN; } if (wid >= spurs->max_workloads() || (swap | compare) > 0xffu) { return CELL_SPURS_POLICY_MODULE_ERROR_INVAL; } if ((spurs->wklEnabled.load() & (0x80000000u >> wid)) == 0u) { return CELL_SPURS_POLICY_MODULE_ERROR_SRCH; } if (spurs->exception || spurs->wklState(wid) != SPURS_WKL_STATE_RUNNABLE) { return CELL_SPURS_POLICY_MODULE_ERROR_STAT; } u8 temp = static_cast<u8>(compare); vm::light_op(spurs->readyCount(wid), [&](atomic_t<u8>& v) { v.compare_exchange(temp, static_cast<u8>(swap)); }); *old = temp; return CELL_OK; } /// Increase or decrease ready count s32 cellSpursReadyCountAdd(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid, vm::ptr<u32> old, s32 value) { cellSpurs.trace("cellSpursReadyCountAdd(spurs=*0x%x, wid=%d, old=*0x%x, value=0x%x)", spurs, wid, old, value); if (!spurs || !old) { return CELL_SPURS_POLICY_MODULE_ERROR_NULL_POINTER; } if (!spurs.aligned()) { return CELL_SPURS_POLICY_MODULE_ERROR_ALIGN; } if (wid >= spurs->max_workloads()) { return CELL_SPURS_POLICY_MODULE_ERROR_INVAL; } if ((spurs->wklEnabled.load() & (0x80000000u >> wid)) == 0u) { return CELL_SPURS_POLICY_MODULE_ERROR_SRCH; } if (spurs->exception || spurs->wklState(wid) != SPURS_WKL_STATE_RUNNABLE) { return CELL_SPURS_POLICY_MODULE_ERROR_STAT; } *old = vm::fetch_op(spurs->readyCount(wid), [&](u8& val) { val = static_cast<u8>(std::clamp<s32>(val + static_cast<u32>(value), 0, 255)); }); return CELL_OK; } /// Get workload's data to be passed to policy module s32 cellSpursGetWorkloadData(vm::ptr<CellSpurs> spurs, vm::ptr<u64> data, u32 wid) { cellSpurs.trace("cellSpursGetWorkloadData(spurs=*0x%x, data=*0x%x, wid=%d)", spurs, data, wid); if (!spurs || !data) { return CELL_SPURS_POLICY_MODULE_ERROR_NULL_POINTER; } if (!spurs.aligned()) { return CELL_SPURS_POLICY_MODULE_ERROR_ALIGN; } if (wid >= CELL_SPURS_MAX_WORKLOAD2 || (wid >= CELL_SPURS_MAX_WORKLOAD && (spurs->flags1 & SF1_32_WORKLOADS) == 0)) { return CELL_SPURS_POLICY_MODULE_ERROR_INVAL; } if ((spurs->wklEnabled.load() & (0x80000000u >> wid)) == 0u) { return CELL_SPURS_POLICY_MODULE_ERROR_SRCH; } if (spurs->exception) { return CELL_SPURS_POLICY_MODULE_ERROR_STAT; } if (wid >= CELL_SPURS_MAX_WORKLOAD) { *data = spurs->wklInfo2[wid & 0x0F].arg; } else { *data = spurs->wklInfo1[wid].arg; } return CELL_OK; } /// Get workload information s32 cellSpursGetWorkloadInfo(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid, vm::ptr<CellSpursWorkloadInfo> info) { cellSpurs.todo("cellSpursGetWorkloadInfo(spurs=*0x%x, wid=0x%x, info=*0x%x)", spurs, wid, info); return CELL_OK; } /// Set the SPU exception event handler s32 cellSpursSetExceptionEventHandler() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } /// Disable the SPU exception event handler s32 cellSpursUnsetExceptionEventHandler() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } /// Set/unset the recipient of the workload flag s32 _cellSpursWorkloadFlagReceiver(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid, u32 is_set) { cellSpurs.warning("_cellSpursWorkloadFlagReceiver(spurs=*0x%x, wid=%d, is_set=%d)", spurs, wid, is_set); if (!spurs) { return CELL_SPURS_POLICY_MODULE_ERROR_NULL_POINTER; } if (!spurs.aligned()) { return CELL_SPURS_POLICY_MODULE_ERROR_ALIGN; } if (wid >= spurs->max_workloads()) { return CELL_SPURS_POLICY_MODULE_ERROR_INVAL; } if ((spurs->wklEnabled.load() & (0x80000000u >> wid)) == 0u) { return CELL_SPURS_POLICY_MODULE_ERROR_SRCH; } if (spurs->exception) { return CELL_SPURS_POLICY_MODULE_ERROR_STAT; } atomic_fence_acq_rel(); struct alignas(128) wklFlagOp { u8 uns[0x6C]; be_t<u32> Flag; // 0x6C u8 uns2[0x7]; u8 FlagReceiver; // 0x77 }; s32 res = CELL_OK; vm::reservation_op(ppu, vm::unsafe_ptr_cast<wklFlagOp>(spurs), [&](wklFlagOp& val) { if (is_set) { if (val.FlagReceiver != 0xff) { res = CELL_SPURS_POLICY_MODULE_ERROR_BUSY; return; } } else { if (val.FlagReceiver != wid) { res = CELL_SPURS_POLICY_MODULE_ERROR_PERM; return; } } val.Flag = -1; if (is_set) { if (val.FlagReceiver == 0xff) { val.FlagReceiver = static_cast<u8>(wid); } } else { if (val.FlagReceiver == wid) { val.FlagReceiver = 0xff; } } res = CELL_OK; }); return res; } /// Set/unset the recipient of the workload flag s32 _cellSpursWorkloadFlagReceiver2(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid, u32 is_set, u32 print_debug_output) { cellSpurs.warning("_cellSpursWorkloadFlagReceiver2(spurs=*0x%x, wid=%d, is_set=%d, print_debug_output=%d)", spurs, wid, is_set, print_debug_output); if (!spurs) { return CELL_SPURS_POLICY_MODULE_ERROR_NULL_POINTER; } if (!spurs.aligned()) { return CELL_SPURS_POLICY_MODULE_ERROR_ALIGN; } if (wid >= spurs->max_workloads()) { return CELL_SPURS_POLICY_MODULE_ERROR_INVAL; } if ((spurs->wklEnabled.load() & (0x80000000u >> wid)) == 0u) { return CELL_SPURS_POLICY_MODULE_ERROR_SRCH; } if (spurs->exception) { return CELL_SPURS_POLICY_MODULE_ERROR_STAT; } atomic_fence_acq_rel(); s32 res = CELL_OK; vm::atomic_op<true>(spurs->wklFlagReceiver, [&](u8& FlagReceiver) { if (is_set) { if (FlagReceiver != 0xff) { res = CELL_SPURS_POLICY_MODULE_ERROR_BUSY; return; } } else { if (FlagReceiver != wid) { res = CELL_SPURS_POLICY_MODULE_ERROR_PERM; return; } } if (is_set) { if (FlagReceiver == 0xff) { FlagReceiver = static_cast<u8>(wid); } } else { if (FlagReceiver == wid) { FlagReceiver = 0xff; } } res = CELL_OK; }); return res; } /// Request assignment of idle SPUs s32 cellSpursRequestIdleSpu(vm::ptr<CellSpurs> spurs, u32 wid, u32 count) { cellSpurs.trace("cellSpursRequestIdleSpu(spurs=*0x%x, wid=%d, count=%d)", spurs, wid, count); if (!spurs) { return CELL_SPURS_CORE_ERROR_NULL_POINTER; } if (!spurs.aligned()) { return CELL_SPURS_CORE_ERROR_ALIGN; } // Old API: This function doesn't support 32 workloads if (spurs->flags1 & SF1_32_WORKLOADS) { return CELL_SPURS_CORE_ERROR_STAT; } if (wid >= CELL_SPURS_MAX_WORKLOAD || count >= CELL_SPURS_MAX_SPU) { return CELL_SPURS_CORE_ERROR_INVAL; } if ((spurs->wklEnabled.load() & (0x80000000u >> wid)) == 0u) { return CELL_SPURS_CORE_ERROR_SRCH; } if (spurs->exception) { return CELL_SPURS_CORE_ERROR_STAT; } vm::light_op<true>(spurs->wklIdleSpuCountOrReadyCount2[wid], FN(x.release(static_cast<u8>(count)))); return CELL_OK; } //---------------------------------------------------------------------------- // SPURS event flag functions //---------------------------------------------------------------------------- /// Initialize a SPURS event flag s32 _cellSpursEventFlagInitialize(vm::ptr<CellSpurs> spurs, vm::ptr<CellSpursTaskset> taskset, vm::ptr<CellSpursEventFlag> eventFlag, u32 flagClearMode, u32 flagDirection) { cellSpurs.warning("_cellSpursEventFlagInitialize(spurs=*0x%x, taskset=*0x%x, eventFlag=*0x%x, flagClearMode=%d, flagDirection=%d)", spurs, taskset, eventFlag, flagClearMode, flagDirection); if ((!taskset && !spurs) || !eventFlag) { return CELL_SPURS_TASK_ERROR_NULL_POINTER; } if (!spurs.aligned() || !taskset.aligned() || !eventFlag.aligned()) { return CELL_SPURS_TASK_ERROR_ALIGN; } if (taskset && taskset->wid >= CELL_SPURS_MAX_WORKLOAD2) { return CELL_SPURS_TASK_ERROR_INVAL; } if (flagDirection > CELL_SPURS_EVENT_FLAG_LAST || flagClearMode > CELL_SPURS_EVENT_FLAG_CLEAR_LAST) { return CELL_SPURS_TASK_ERROR_INVAL; } memset(eventFlag.get_ptr(), 0, sizeof(CellSpursEventFlag)); eventFlag->direction = flagDirection; eventFlag->clearMode = flagClearMode; eventFlag->spuPort = CELL_SPURS_EVENT_FLAG_INVALID_SPU_PORT; if (taskset) { eventFlag->addr = taskset.addr(); } else { eventFlag->isIwl = 1; eventFlag->addr = spurs.addr(); } return CELL_OK; } /// Reset a SPURS event flag s32 cellSpursEventFlagClear(vm::ptr<CellSpursEventFlag> eventFlag, u16 bits) { cellSpurs.warning("cellSpursEventFlagClear(eventFlag=*0x%x, bits=0x%x)", eventFlag, bits); if (!eventFlag) { return CELL_SPURS_TASK_ERROR_NULL_POINTER; } if (!eventFlag.aligned()) { return CELL_SPURS_TASK_ERROR_ALIGN; } eventFlag->events &= ~bits; return CELL_OK; } /// Set a SPURS event flag s32 cellSpursEventFlagSet(ppu_thread& ppu, vm::ptr<CellSpursEventFlag> eventFlag, u16 bits) { cellSpurs.trace("cellSpursEventFlagSet(eventFlag=*0x%x, bits=0x%x)", eventFlag, bits); if (!eventFlag) { return CELL_SPURS_TASK_ERROR_NULL_POINTER; } if (!eventFlag.aligned()) { return CELL_SPURS_TASK_ERROR_ALIGN; } if (auto dir = eventFlag->direction; dir != CELL_SPURS_EVENT_FLAG_SPU2PPU && dir != CELL_SPURS_EVENT_FLAG_ANY2ANY) { return CELL_SPURS_TASK_ERROR_PERM; } bool send; u8 ppuWaitSlot; u16 ppuEvents; u16 pendingRecv; u16 pendingRecvTaskEvents[16]; vm::reservation_op(ppu, vm::unsafe_ptr_cast<CellSpursEventFlag_x00>(eventFlag), [bits, &send, &ppuWaitSlot, &ppuEvents, &pendingRecv, &pendingRecvTaskEvents](CellSpursEventFlag_x00& eventFlag) { send = false; ppuWaitSlot = 0; ppuEvents = 0; pendingRecv = 0; u16 eventsToClear = 0; auto& ctrl = eventFlag.ctrl; if (eventFlag.direction == CELL_SPURS_EVENT_FLAG_ANY2ANY && ctrl.ppuWaitMask) { u16 ppuRelevantEvents = (ctrl.events | bits) & ctrl.ppuWaitMask; // Unblock the waiting PPU thread if either all the bits being waited by the thread have been set or // if the wait mode of the thread is OR and atleast one bit the thread is waiting on has been set if ((ctrl.ppuWaitMask & ~ppuRelevantEvents) == 0 || ((ctrl.ppuWaitSlotAndMode & 0x0F) == CELL_SPURS_EVENT_FLAG_OR && ppuRelevantEvents != 0)) { ctrl.ppuPendingRecv = 1; ctrl.ppuWaitMask = 0; ppuEvents = ppuRelevantEvents; eventsToClear = ppuRelevantEvents; ppuWaitSlot = ctrl.ppuWaitSlotAndMode >> 4; send = true; } } s32 i = CELL_SPURS_EVENT_FLAG_MAX_WAIT_SLOTS - 1; s32 j = 0; u16 relevantWaitSlots = eventFlag.spuTaskUsedWaitSlots & ~ctrl.spuTaskPendingRecv; while (relevantWaitSlots) { if (relevantWaitSlots & 0x0001) { u16 spuTaskRelevantEvents = (ctrl.events | bits) & eventFlag.spuTaskWaitMask[i]; // Unblock the waiting SPU task if either all the bits being waited by the task have been set or // if the wait mode of the task is OR and atleast one bit the thread is waiting on has been set if ((eventFlag.spuTaskWaitMask[i] & ~spuTaskRelevantEvents) == 0 || (((eventFlag.spuTaskWaitMode >> j) & 0x0001) == CELL_SPURS_EVENT_FLAG_OR && spuTaskRelevantEvents != 0)) { eventsToClear |= spuTaskRelevantEvents; pendingRecv |= 1 << j; pendingRecvTaskEvents[j] = spuTaskRelevantEvents; } } relevantWaitSlots >>= 1; i--; j++; } ctrl.events |= bits; ctrl.spuTaskPendingRecv |= pendingRecv; // If the clear flag is AUTO then clear the bits comnsumed by all tasks marked to be unblocked if (eventFlag.clearMode == CELL_SPURS_EVENT_FLAG_CLEAR_AUTO) { ctrl.events &= ~eventsToClear; } //eventFlagControl = ((u64)events << 48) | ((u64)spuTaskPendingRecv << 32) | ((u64)ppuWaitMask << 16) | ((u64)ppuWaitSlotAndMode << 8) | (u64)ppuPendingRecv; }); if (send) { // Signal the PPU thread to be woken up eventFlag->pendingRecvTaskEvents[ppuWaitSlot] = ppuEvents; ensure(sys_event_port_send(eventFlag->eventPortId, 0, 0, 0) == 0); static_cast<void>(ppu.test_stopped()); } if (pendingRecv) { // Signal each SPU task whose conditions have been met to be woken up for (s32 i = 0; i < CELL_SPURS_EVENT_FLAG_MAX_WAIT_SLOTS; i++) { if (pendingRecv & (0x8000 >> i)) { eventFlag->pendingRecvTaskEvents[i] = pendingRecvTaskEvents[i]; vm::var<vm::ptr<CellSpursTaskset>> taskset; if (eventFlag->isIwl) { cellSpursLookUpTasksetAddress(ppu, vm::cast(eventFlag->addr), taskset, eventFlag->waitingTaskWklId[i]); } else { *taskset = vm::cast(eventFlag->addr); } auto rc = _cellSpursSendSignal(ppu, *taskset, eventFlag->waitingTaskId[i]); if (rc + 0u == CELL_SPURS_TASK_ERROR_INVAL || rc + 0u == CELL_SPURS_TASK_ERROR_STAT) { return CELL_SPURS_TASK_ERROR_FATAL; } ensure(rc == CELL_OK); } } } return CELL_OK; } s32 _spurs::event_flag_wait(ppu_thread& ppu, vm::ptr<CellSpursEventFlag> eventFlag, vm::ptr<u16> mask, u32 mode, u32 block) { if (!eventFlag || !mask) { return CELL_SPURS_TASK_ERROR_NULL_POINTER; } if (!eventFlag.aligned()) { return CELL_SPURS_TASK_ERROR_ALIGN; } if (mode > CELL_SPURS_EVENT_FLAG_WAIT_MODE_LAST) { return CELL_SPURS_TASK_ERROR_INVAL; } if (auto dir = eventFlag->direction; dir != CELL_SPURS_EVENT_FLAG_SPU2PPU && dir != CELL_SPURS_EVENT_FLAG_ANY2ANY) { return CELL_SPURS_TASK_ERROR_PERM; } if (block && eventFlag->spuPort == CELL_SPURS_EVENT_FLAG_INVALID_SPU_PORT) { return CELL_SPURS_TASK_ERROR_STAT; } if (eventFlag->ctrl.raw().ppuWaitMask || eventFlag->ctrl.raw().ppuPendingRecv) { return CELL_SPURS_TASK_ERROR_BUSY; } bool recv; s32 rc; u16 receivedEvents; vm::atomic_op(eventFlag->ctrl, [eventFlag, mask, mode, block, &recv, &rc, &receivedEvents](CellSpursEventFlag::ControlSyncVar& ctrl) { u16 relevantEvents = ctrl.events & *mask; if (eventFlag->direction == CELL_SPURS_EVENT_FLAG_ANY2ANY) { // Make sure the wait mask and mode specified does not conflict with that of the already waiting tasks. // Conflict scenarios: // OR vs OR - A conflict never occurs // OR vs AND - A conflict occurs if the masks for the two tasks overlap // AND vs AND - A conflict occurs if the masks for the two tasks are not the same // Determine the set of all already waiting tasks whose wait mode/mask can possibly conflict with the specified wait mode/mask. // This set is equal to 'set of all tasks waiting' - 'set of all tasks whose wait conditions have been met'. // If the wait mode is OR, we prune the set of all tasks that are waiting in OR mode from the set since a conflict cannot occur // with an already waiting task in OR mode. u16 relevantWaitSlots = eventFlag->spuTaskUsedWaitSlots & ~ctrl.spuTaskPendingRecv; if (mode == CELL_SPURS_EVENT_FLAG_OR) { relevantWaitSlots &= eventFlag->spuTaskWaitMode; } s32 i = CELL_SPURS_EVENT_FLAG_MAX_WAIT_SLOTS - 1; while (relevantWaitSlots) { if (relevantWaitSlots & 0x0001) { if (eventFlag->spuTaskWaitMask[i] & *mask && eventFlag->spuTaskWaitMask[i] != *mask) { rc = CELL_SPURS_TASK_ERROR_AGAIN; return; } } relevantWaitSlots >>= 1; i--; } } // There is no need to block if all bits required by the wait operation have already been set or // if the wait mode is OR and atleast one of the bits required by the wait operation has been set. if ((*mask & ~relevantEvents) == 0 || (mode == CELL_SPURS_EVENT_FLAG_OR && relevantEvents)) { // If the clear flag is AUTO then clear the bits comnsumed by this thread if (eventFlag->clearMode == CELL_SPURS_EVENT_FLAG_CLEAR_AUTO) { ctrl.events &= ~relevantEvents; } recv = false; receivedEvents = relevantEvents; } else { // If we reach here it means that the conditions for this thread have not been met. // If this is a try wait operation then do not block but return an error code. if (block == 0) { rc = CELL_SPURS_TASK_ERROR_BUSY; return; } ctrl.ppuWaitSlotAndMode = 0; if (eventFlag->direction == CELL_SPURS_EVENT_FLAG_ANY2ANY) { // Find an unsed wait slot s32 i = 0; u16 spuTaskUsedWaitSlots = eventFlag->spuTaskUsedWaitSlots; while (spuTaskUsedWaitSlots & 0x0001) { spuTaskUsedWaitSlots >>= 1; i++; } if (i == CELL_SPURS_EVENT_FLAG_MAX_WAIT_SLOTS) { // Event flag has no empty wait slots rc = CELL_SPURS_TASK_ERROR_BUSY; return; } // Mark the found wait slot as used by this thread ctrl.ppuWaitSlotAndMode = (CELL_SPURS_EVENT_FLAG_MAX_WAIT_SLOTS - 1 - i) << 4; } // Save the wait mask and mode for this thread ctrl.ppuWaitSlotAndMode |= mode; ctrl.ppuWaitMask = *mask; recv = true; } //eventFlagControl = ((u64)events << 48) | ((u64)spuTaskPendingRecv << 32) | ((u64)ppuWaitMask << 16) | ((u64)ppuWaitSlotAndMode << 8) | (u64)ppuPendingRecv; rc = CELL_OK; }); if (rc != CELL_OK) { return rc; } if (recv) { // Block till something happens ensure(sys_event_queue_receive(ppu, eventFlag->eventQueueId, vm::null, 0) == 0); static_cast<void>(ppu.test_stopped()); s32 i = 0; if (eventFlag->direction == CELL_SPURS_EVENT_FLAG_ANY2ANY) { i = eventFlag->ctrl.raw().ppuWaitSlotAndMode >> 4; } *mask = eventFlag->pendingRecvTaskEvents[i]; vm::atomic_op(eventFlag->ctrl, [](CellSpursEventFlag::ControlSyncVar& ctrl) { ctrl.ppuPendingRecv = 0; }); } *mask = receivedEvents; return CELL_OK; } /// Wait for SPURS event flag s32 cellSpursEventFlagWait(ppu_thread& ppu, vm::ptr<CellSpursEventFlag> eventFlag, vm::ptr<u16> mask, u32 mode) { cellSpurs.warning("cellSpursEventFlagWait(eventFlag=*0x%x, mask=*0x%x, mode=%d)", eventFlag, mask, mode); return _spurs::event_flag_wait(ppu, eventFlag, mask, mode, 1); } /// Check SPURS event flag s32 cellSpursEventFlagTryWait(ppu_thread& ppu, vm::ptr<CellSpursEventFlag> eventFlag, vm::ptr<u16> mask, u32 mode) { cellSpurs.warning("cellSpursEventFlagTryWait(eventFlag=*0x%x, mask=*0x%x, mode=0x%x)", eventFlag, mask, mode); return _spurs::event_flag_wait(ppu, eventFlag, mask, mode, 0); } /// Attach an LV2 event queue to a SPURS event flag s32 cellSpursEventFlagAttachLv2EventQueue(ppu_thread& ppu, vm::ptr<CellSpursEventFlag> eventFlag) { cellSpurs.warning("cellSpursEventFlagAttachLv2EventQueue(eventFlag=*0x%x)", eventFlag); if (!eventFlag) { return CELL_SPURS_TASK_ERROR_NULL_POINTER; } if (!eventFlag.aligned()) { return CELL_SPURS_TASK_ERROR_AGAIN; } if (eventFlag->direction != CELL_SPURS_EVENT_FLAG_SPU2PPU && eventFlag->direction != CELL_SPURS_EVENT_FLAG_ANY2ANY) { return CELL_SPURS_TASK_ERROR_PERM; } if (eventFlag->spuPort != CELL_SPURS_EVENT_FLAG_INVALID_SPU_PORT) { return CELL_SPURS_TASK_ERROR_STAT; } vm::ptr<CellSpurs> spurs; if (eventFlag->isIwl == 1) { spurs = vm::cast(eventFlag->addr); } else { auto taskset = vm::ptr<CellSpursTaskset>::make(vm::cast(eventFlag->addr)); spurs = taskset->spurs; } vm::var<u32> eventQueueId; vm::var<u8> port; auto failure = [](s32 rc) -> s32 { // Return rc if its an error code from SPURS otherwise convert the error code to a SPURS task error code return (rc & 0x0FFF0000) == 0x00410000 ? rc : (0x80410900 | (rc & 0xFF)); }; if (s32 rc = _spurs::create_lv2_eq(ppu, spurs, eventQueueId, port, 1, sys_event_queue_attribute_t{SYS_SYNC_PRIORITY, SYS_PPU_QUEUE, {"_spuEvF\0"_u64}})) { return failure(rc); } auto success = [&] { eventFlag->eventQueueId = *eventQueueId; eventFlag->spuPort = *port; }; if (eventFlag->direction == CELL_SPURS_EVENT_FLAG_ANY2ANY) { vm::var<u32> eventPortId; s32 rc = sys_event_port_create(ppu, eventPortId, SYS_EVENT_PORT_LOCAL, 0); if (rc == CELL_OK) { rc = sys_event_port_connect_local(ppu, *eventPortId, *eventQueueId); if (rc == CELL_OK) { eventFlag->eventPortId = *eventPortId; return success(), CELL_OK; } sys_event_port_destroy(ppu, *eventPortId); } if (_spurs::detach_lv2_eq(spurs, *port, true) == CELL_OK) { sys_event_queue_destroy(ppu, *eventQueueId, SYS_EVENT_QUEUE_DESTROY_FORCE); } return failure(rc); } return success(), CELL_OK; } /// Detach an LV2 event queue from SPURS event flag s32 cellSpursEventFlagDetachLv2EventQueue(ppu_thread& ppu, vm::ptr<CellSpursEventFlag> eventFlag) { cellSpurs.warning("cellSpursEventFlagDetachLv2EventQueue(eventFlag=*0x%x)", eventFlag); if (!eventFlag) { return CELL_SPURS_TASK_ERROR_NULL_POINTER; } if (!eventFlag.aligned()) { return CELL_SPURS_TASK_ERROR_AGAIN; } if (eventFlag->direction != CELL_SPURS_EVENT_FLAG_SPU2PPU && eventFlag->direction != CELL_SPURS_EVENT_FLAG_ANY2ANY) { return CELL_SPURS_TASK_ERROR_PERM; } if (eventFlag->spuPort == CELL_SPURS_EVENT_FLAG_INVALID_SPU_PORT) { return CELL_SPURS_TASK_ERROR_STAT; } if (eventFlag->ctrl.raw().ppuWaitMask || eventFlag->ctrl.raw().ppuPendingRecv) { return CELL_SPURS_TASK_ERROR_BUSY; } const u8 port = eventFlag->spuPort; eventFlag->spuPort = CELL_SPURS_EVENT_FLAG_INVALID_SPU_PORT; vm::ptr<CellSpurs> spurs; if (eventFlag->isIwl == 1) { spurs = vm::cast(eventFlag->addr); } else { auto taskset = vm::ptr<CellSpursTaskset>::make(vm::cast(eventFlag->addr)); spurs = taskset->spurs; } if (eventFlag->direction == CELL_SPURS_EVENT_FLAG_ANY2ANY) { sys_event_port_disconnect(ppu, eventFlag->eventPortId); sys_event_port_destroy(ppu, eventFlag->eventPortId); } s32 rc = _spurs::detach_lv2_eq(spurs, port, true); if (rc == CELL_OK) { rc = sys_event_queue_destroy(ppu, eventFlag->eventQueueId, SYS_EVENT_QUEUE_DESTROY_FORCE); } return CELL_OK; } /// Get send-receive direction of the SPURS event flag s32 cellSpursEventFlagGetDirection(vm::ptr<CellSpursEventFlag> eventFlag, vm::ptr<u32> direction) { cellSpurs.warning("cellSpursEventFlagGetDirection(eventFlag=*0x%x, direction=*0x%x)", eventFlag, direction); if (!eventFlag || !direction) { return CELL_SPURS_TASK_ERROR_NULL_POINTER; } if (!eventFlag.aligned()) { return CELL_SPURS_TASK_ERROR_ALIGN; } *direction = eventFlag->direction; return CELL_OK; } /// Get clearing mode of SPURS event flag s32 cellSpursEventFlagGetClearMode(vm::ptr<CellSpursEventFlag> eventFlag, vm::ptr<u32> clear_mode) { cellSpurs.warning("cellSpursEventFlagGetClearMode(eventFlag=*0x%x, clear_mode=*0x%x)", eventFlag, clear_mode); if (!eventFlag || !clear_mode) { return CELL_SPURS_TASK_ERROR_NULL_POINTER; } if (!eventFlag.aligned()) { return CELL_SPURS_TASK_ERROR_ALIGN; } *clear_mode = eventFlag->clearMode; return CELL_OK; } /// Get address of taskset to which the SPURS event flag belongs s32 cellSpursEventFlagGetTasksetAddress(vm::ptr<CellSpursEventFlag> eventFlag, vm::pptr<CellSpursTaskset> taskset) { cellSpurs.warning("cellSpursEventFlagGetTasksetAddress(eventFlag=*0x%x, taskset=**0x%x)", eventFlag, taskset); if (!eventFlag || !taskset) { return CELL_SPURS_TASK_ERROR_NULL_POINTER; } if (!eventFlag.aligned()) { return CELL_SPURS_TASK_ERROR_ALIGN; } taskset->set(eventFlag->isIwl ? 0u : vm::cast(eventFlag->addr)); return CELL_OK; } static inline s32 SyncErrorToSpursError(s32 res) { return res < 0 ? 0x80410900 | (res & 0xff) : res; } s32 _cellSpursLFQueueInitialize(vm::ptr<void> pTasksetOrSpurs, vm::ptr<CellSpursLFQueue> pQueue, vm::cptr<void> buffer, u32 size, u32 depth, u32 direction) { cellSpurs.todo("_cellSpursLFQueueInitialize(pTasksetOrSpurs=*0x%x, pQueue=*0x%x, buffer=*0x%x, size=0x%x, depth=0x%x, direction=%d)", pTasksetOrSpurs, pQueue, buffer, size, depth, direction); return SyncErrorToSpursError(cellSyncLFQueueInitialize(pQueue, buffer, size, depth, direction, pTasksetOrSpurs)); } s32 _cellSpursLFQueuePushBody() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 cellSpursLFQueueAttachLv2EventQueue(vm::ptr<CellSyncLFQueue> queue) { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 cellSpursLFQueueDetachLv2EventQueue(vm::ptr<CellSyncLFQueue> queue) { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 _cellSpursLFQueuePopBody() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 cellSpursLFQueueGetTasksetAddress() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 _cellSpursQueueInitialize() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 cellSpursQueuePopBody() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 cellSpursQueuePushBody() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 cellSpursQueueAttachLv2EventQueue() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 cellSpursQueueDetachLv2EventQueue() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 cellSpursQueueGetTasksetAddress() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 cellSpursQueueClear() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 cellSpursQueueDepth() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 cellSpursQueueGetEntrySize() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 cellSpursQueueSize() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 cellSpursQueueGetDirection() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 _spurs::create_taskset(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<CellSpursTaskset> taskset, u64 args, vm::cptr<u8[8]> priority, u32 max_contention, vm::cptr<char> name, u32 size, s32 enable_clear_ls) { if (!spurs || !taskset) { return CELL_SPURS_TASK_ERROR_NULL_POINTER; } if (!spurs.aligned() || !taskset.aligned()) { return CELL_SPURS_TASK_ERROR_ALIGN; } memset(taskset.get_ptr(), 0, size); taskset->spurs = spurs; taskset->args = args; taskset->enable_clear_ls = enable_clear_ls > 0 ? 1 : 0; taskset->size = size; vm::var<CellSpursWorkloadAttribute> wkl_attr; _cellSpursWorkloadAttributeInitialize(ppu, wkl_attr, 1, SYS_PROCESS_PARAM_VERSION_330_0, vm::cptr<void>::make(SPURS_IMG_ADDR_TASKSET_PM), 0x1E40 /*pm_size*/, taskset.addr(), priority, 8, max_contention); // TODO: Check return code cellSpursWorkloadAttributeSetName(ppu, wkl_attr, vm::null, name); // TODO: Check return code // TODO: cellSpursWorkloadAttributeSetShutdownCompletionEventHook(wkl_attr, hook, taskset); // TODO: Check return code vm::var<u32> wid; cellSpursAddWorkloadWithAttribute(ppu, spurs, wid, wkl_attr); // TODO: Check return code taskset->wkl_flag_wait_task = 0x80; taskset->wid = *wid; // TODO: cellSpursSetExceptionEventHandler(spurs, wid, hook, taskset); // TODO: Check return code return CELL_OK; } s32 cellSpursCreateTasksetWithAttribute(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<CellSpursTaskset> taskset, vm::ptr<CellSpursTasksetAttribute> attr) { cellSpurs.warning("cellSpursCreateTasksetWithAttribute(spurs=*0x%x, taskset=*0x%x, attr=*0x%x)", spurs, taskset, attr); if (!attr) { return CELL_SPURS_TASK_ERROR_NULL_POINTER; } if (!attr.aligned()) { return CELL_SPURS_TASK_ERROR_ALIGN; } if (attr->revision != CELL_SPURS_TASKSET_ATTRIBUTE_REVISION) { return CELL_SPURS_TASK_ERROR_INVAL; } auto rc = _spurs::create_taskset(ppu, spurs, taskset, attr->args, attr.ptr(&CellSpursTasksetAttribute::priority), attr->max_contention, attr->name, attr->taskset_size, attr->enable_clear_ls); if (attr->taskset_size >= sizeof(CellSpursTaskset2)) { // TODO: Implement this } return rc; } s32 cellSpursCreateTaskset(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<CellSpursTaskset> taskset, u64 args, vm::cptr<u8[8]> priority, u32 maxContention) { cellSpurs.warning("cellSpursCreateTaskset(spurs=*0x%x, taskset=*0x%x, args=0x%llx, priority=*0x%x, maxContention=%d)", spurs, taskset, args, priority, maxContention); return _spurs::create_taskset(ppu, spurs, taskset, args, priority, maxContention, vm::null, sizeof(CellSpursTaskset), 0); } s32 cellSpursJoinTaskset(vm::ptr<CellSpursTaskset> taskset) { cellSpurs.warning("cellSpursJoinTaskset(taskset=*0x%x)", taskset); UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 cellSpursGetTasksetId(vm::ptr<CellSpursTaskset> taskset, vm::ptr<u32> wid) { cellSpurs.warning("cellSpursGetTasksetId(taskset=*0x%x, wid=*0x%x)", taskset, wid); if (!taskset || !wid) { return CELL_SPURS_TASK_ERROR_NULL_POINTER; } if (!taskset.aligned()) { return CELL_SPURS_TASK_ERROR_ALIGN; } // Does not check its validity *wid = taskset->wid; return CELL_OK; } s32 cellSpursShutdownTaskset(ppu_thread& ppu, vm::ptr<CellSpursTaskset> taskset) { cellSpurs.warning("cellSpursShutdownTaskset(taskset=*0x%x)", taskset); if (!taskset) { return CELL_SPURS_TASK_ERROR_NULL_POINTER; } if (!taskset.aligned()) { return CELL_SPURS_TASK_ERROR_ALIGN; } const u32 wid = taskset->wid; if (wid >= CELL_SPURS_MAX_WORKLOAD) { return CELL_SPURS_TASK_ERROR_INVAL; } const auto spurs = +taskset->spurs; u32 shutdown_error = ppu_execute<&cellSpursShutdownWorkload>(ppu, spurs, wid); if (shutdown_error == CELL_SPURS_POLICY_MODULE_ERROR_STAT) { shutdown_error = CELL_SPURS_TASK_ERROR_STAT; } else if (shutdown_error == CELL_SPURS_POLICY_MODULE_ERROR_ALIGN || shutdown_error == CELL_SPURS_POLICY_MODULE_ERROR_SRCH) { // printf is used on fw in this case cellSpurs.error("cellSpursShutdownTaskset(taskset=*0x%x): Failed with %s (spurs=0x%x, wid=%d)", CellError{ shutdown_error }, spurs, wid); } return shutdown_error; } s32 _spurs::create_task(vm::ptr<CellSpursTaskset> taskset, vm::ptr<u32> task_id, vm::cptr<void> elf, vm::cptr<void> context, u32 size, vm::ptr<CellSpursTaskLsPattern> ls_pattern, vm::ptr<CellSpursTaskArgument> arg) { if (!taskset || !elf) { return CELL_SPURS_TASK_ERROR_NULL_POINTER; } if (!elf.aligned(16)) { return CELL_SPURS_TASK_ERROR_ALIGN; } if (_spurs::get_sdk_version() < 0x27FFFF) { if (!context.aligned(16)) { return CELL_SPURS_TASK_ERROR_ALIGN; } } else { if (!context.aligned(128)) { return CELL_SPURS_TASK_ERROR_ALIGN; } } u32 alloc_ls_blocks = 0; if (context) { if (size < CELL_SPURS_TASK_EXECUTION_CONTEXT_SIZE) { return CELL_SPURS_TASK_ERROR_INVAL; } alloc_ls_blocks = size > 0x3D400 ? 0x7A : ((size - 0x400) >> 11); if (ls_pattern) { v128 ls_pattern_128 = v128::from64r(ls_pattern->_u64[0], ls_pattern->_u64[1]); const u32 ls_blocks = utils::popcnt128(ls_pattern_128._u); if (ls_blocks > alloc_ls_blocks) { return CELL_SPURS_TASK_ERROR_INVAL; } v128 _0 = v128::from32(0); if ((ls_pattern_128 & v128::from32r(0xFC000000)) != _0) { // Prevent save/restore to SPURS management area return CELL_SPURS_TASK_ERROR_INVAL; } } } else { alloc_ls_blocks = 0; } // TODO: Verify the ELF header is proper and all its load segments are at address >= 0x3000 u32 tmp_task_id; vm::light_op(vm::_ref<atomic_be_t<v128>>(taskset.ptr(&CellSpursTaskset::enabled).addr()), [&](atomic_be_t<v128>& ptr) { // NOTE: Realfw processes this using 4 32-bits atomic loops // But here its processed within a single 128-bit atomic op ptr.fetch_op([&](be_t<v128>& value) { auto value0 = value.value(); if (auto pos = std::countl_one(+value0._u64[0]); pos != 64) { tmp_task_id = pos; value0._u64[0] |= (1ull << 63) >> pos; value = value0; return true; } if (auto pos = std::countl_one(+value0._u64[1]); pos != 64) { tmp_task_id = pos + 64; value0._u64[1] |= (1ull << 63) >> pos; value = value0; return true; } tmp_task_id = CELL_SPURS_MAX_TASK; return false; }); }); if (tmp_task_id >= CELL_SPURS_MAX_TASK) { return CELL_SPURS_TASK_ERROR_AGAIN; } taskset->task_info[tmp_task_id].elf = elf; taskset->task_info[tmp_task_id].context_save_storage_and_alloc_ls_blocks = (context.addr() | alloc_ls_blocks); taskset->task_info[tmp_task_id].args = *arg; if (ls_pattern) { taskset->task_info[tmp_task_id].ls_pattern = *ls_pattern; } *task_id = tmp_task_id; return CELL_OK; } s32 _spurs::task_start(ppu_thread& ppu, vm::ptr<CellSpursTaskset> taskset, u32 taskId) { vm::light_op(taskset->pending_ready, [&](CellSpursTaskset::atomic_tasks_bitset& v) { v.values[taskId / 32] |= (1u << 31) >> (taskId % 32); }); auto spurs = +taskset->spurs; ppu_execute<&cellSpursSendWorkloadSignal>(ppu, spurs, +taskset->wid); if (s32 rc = ppu_execute<&cellSpursWakeUp>(ppu, spurs)) { if (rc + 0u == CELL_SPURS_POLICY_MODULE_ERROR_STAT) { rc = CELL_SPURS_TASK_ERROR_STAT; } else { ensure(rc == CELL_OK); } } return CELL_OK; } s32 cellSpursCreateTask(ppu_thread& ppu, vm::ptr<CellSpursTaskset> taskset, vm::ptr<u32> taskId, vm::cptr<void> elf, vm::cptr<void> context, u32 size, vm::ptr<CellSpursTaskLsPattern> lsPattern, vm::ptr<CellSpursTaskArgument> argument) { cellSpurs.warning("cellSpursCreateTask(taskset=*0x%x, taskID=*0x%x, elf=*0x%x, context=*0x%x, size=0x%x, lsPattern=*0x%x, argument=*0x%x)", taskset, taskId, elf, context, size, lsPattern, argument); if (!taskset) { return CELL_SPURS_TASK_ERROR_NULL_POINTER; } if (!taskset.aligned()) { return CELL_SPURS_TASK_ERROR_ALIGN; } auto rc = _spurs::create_task(taskset, taskId, elf, context, size, lsPattern, argument); if (rc != CELL_OK) { return rc; } rc = _spurs::task_start(ppu, taskset, *taskId); if (rc != CELL_OK) { return rc; } return CELL_OK; } s32 _cellSpursSendSignal(ppu_thread& ppu, vm::ptr<CellSpursTaskset> taskset, u32 taskId) { cellSpurs.trace("_cellSpursSendSignal(taskset=*0x%x, taskId=0x%x)", taskset, taskId); if (!taskset) { return CELL_SPURS_TASK_ERROR_NULL_POINTER; } if (!taskset.aligned()) { return CELL_SPURS_TASK_ERROR_ALIGN; } if (taskId >= CELL_SPURS_MAX_TASK || taskset->wid >= CELL_SPURS_MAX_WORKLOAD2) { return CELL_SPURS_TASK_ERROR_INVAL; } int signal; vm::reservation_op(ppu, vm::unsafe_ptr_cast<spurs_taskset_signal_op>(taskset), [&](spurs_taskset_signal_op& op) { const u32 signalled = op.signalled[taskId / 32]; const u32 running = op.running[taskId / 32]; const u32 ready = op.ready[taskId / 32]; const u32 waiting = op.waiting[taskId / 32]; const u32 enabled = op.enabled[taskId / 32]; const u32 pready = op.pending_ready[taskId / 32]; const u32 mask = (1u << 31) >> (taskId % 32); if ((running & waiting) || (ready & pready) || ((signalled | waiting | pready | running | ready) & ~enabled) || !(enabled & mask)) { // Error conditions: // 1) Cannot have a waiting bit and running bit set at the same time // 2) Cannot have a read bit and pending_ready bit at the same time // 3) Any disabled bit in enabled mask must be not set // 4) Specified task must be enabled signal = -1; return false; } signal = !!(~signalled & waiting & mask); op.signalled[taskId / 32] = signalled | mask; return true; }); switch (signal) { case 0: break; case 1: { auto spurs = +taskset->spurs; ppu_execute<&cellSpursSendWorkloadSignal>(ppu, spurs, +taskset->wid); auto rc = ppu_execute<&cellSpursWakeUp>(ppu, spurs); if (rc + 0u == CELL_SPURS_POLICY_MODULE_ERROR_STAT) { return CELL_SPURS_TASK_ERROR_STAT; } return rc; } default: return CELL_SPURS_TASK_ERROR_SRCH; } return CELL_OK; } s32 cellSpursCreateTaskWithAttribute() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 cellSpursTasksetAttributeSetName(vm::ptr<CellSpursTasksetAttribute> attr, vm::cptr<char> name) { cellSpurs.warning("cellSpursTasksetAttributeSetName(attr=*0x%x, name=%s)", attr, name); if (!attr || !name) { return CELL_SPURS_TASK_ERROR_NULL_POINTER; } if (!attr.aligned()) { return CELL_SPURS_TASK_ERROR_ALIGN; } attr->name = name; return CELL_OK; } s32 cellSpursTasksetAttributeSetTasksetSize(vm::ptr<CellSpursTasksetAttribute> attr, u32 size) { cellSpurs.warning("cellSpursTasksetAttributeSetTasksetSize(attr=*0x%x, size=0x%x)", attr, size); if (!attr) { return CELL_SPURS_TASK_ERROR_NULL_POINTER; } if (!attr.aligned()) { return CELL_SPURS_TASK_ERROR_ALIGN; } if (size != sizeof(CellSpursTaskset) && size != sizeof(CellSpursTaskset2)) { return CELL_SPURS_TASK_ERROR_INVAL; } attr->taskset_size = size; return CELL_OK; } s32 cellSpursTasksetAttributeEnableClearLS(vm::ptr<CellSpursTasksetAttribute> attr, s32 enable) { cellSpurs.warning("cellSpursTasksetAttributeEnableClearLS(attr=*0x%x, enable=%d)", attr, enable); if (!attr) { return CELL_SPURS_TASK_ERROR_NULL_POINTER; } if (!attr.aligned()) { return CELL_SPURS_TASK_ERROR_ALIGN; } attr->enable_clear_ls = enable ? 1 : 0; return CELL_OK; } s32 _cellSpursTasksetAttribute2Initialize(vm::ptr<CellSpursTasksetAttribute2> attribute, u32 revision) { cellSpurs.warning("_cellSpursTasksetAttribute2Initialize(attribute=*0x%x, revision=%d)", attribute, revision); std::memset(attribute.get_ptr(), 0, attribute.size()); attribute->revision = revision; attribute->name = vm::null; attribute->args = 0; for (s32 i = 0; i < 8; i++) { attribute->priority[i] = 1; } attribute->max_contention = 8; attribute->enable_clear_ls = 0; attribute->task_name_buffer.set(0); return CELL_OK; } s32 cellSpursTaskExitCodeGet() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 cellSpursTaskExitCodeInitialize() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 cellSpursTaskExitCodeTryGet() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 cellSpursTaskGetLoadableSegmentPattern() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 cellSpursTaskGetReadOnlyAreaPattern() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 cellSpursTaskGenerateLsPattern() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 _cellSpursTaskAttributeInitialize() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 cellSpursTaskAttributeSetExitCodeContainer() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 _cellSpursTaskAttribute2Initialize(vm::ptr<CellSpursTaskAttribute2> attribute, u32 revision) { cellSpurs.warning("_cellSpursTaskAttribute2Initialize(attribute=*0x%x, revision=%d)", attribute, revision); attribute->revision = revision; attribute->sizeContext = 0; attribute->eaContext = 0; for (s32 c = 0; c < 4; c++) { attribute->lsPattern._u32[c] = 0; } attribute->name = vm::null; return CELL_OK; } s32 cellSpursTaskGetContextSaveAreaSize() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 cellSpursCreateTaskset2(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<CellSpursTaskset> taskset, vm::ptr<CellSpursTasksetAttribute2> attr) { cellSpurs.warning("cellSpursCreateTaskset2(spurs=*0x%x, taskset=*0x%x, attr=*0x%x)", spurs, taskset, attr); vm::var<CellSpursTasksetAttribute2> tmp_attr; if (!attr) { attr = tmp_attr; _cellSpursTasksetAttribute2Initialize(attr, 0); } if (s32 rc = _spurs::create_taskset(ppu, spurs, taskset, attr->args, attr.ptr(&CellSpursTasksetAttribute2::priority), attr->max_contention, attr->name, sizeof(CellSpursTaskset2), attr->enable_clear_ls)) { return rc; } if (!attr->task_name_buffer.aligned()) { return CELL_SPURS_TASK_ERROR_ALIGN; } // TODO: Implement rest of the function return CELL_OK; } s32 cellSpursCreateTask2() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 cellSpursJoinTask2() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 cellSpursTryJoinTask2() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 cellSpursDestroyTaskset2() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 cellSpursCreateTask2WithBinInfo() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 cellSpursTasksetSetExceptionEventHandler(vm::ptr<CellSpursTaskset> taskset, vm::ptr<CellSpursTasksetExceptionEventHandler> handler, vm::ptr<u64> arg) { cellSpurs.warning("cellSpursTasksetSetExceptionEventHandler(taskset=*0x%x, handler=*0x%x, arg=*0x%x)", taskset, handler, arg); if (!taskset || !handler) { return CELL_SPURS_TASK_ERROR_NULL_POINTER; } if (!taskset.aligned()) { return CELL_SPURS_TASK_ERROR_ALIGN; } if (taskset->wid >= CELL_SPURS_MAX_WORKLOAD) { return CELL_SPURS_TASK_ERROR_INVAL; } if (taskset->exception_handler) { return CELL_SPURS_TASK_ERROR_BUSY; } taskset->exception_handler = handler; taskset->exception_handler_arg = arg; return CELL_OK; } s32 cellSpursTasksetUnsetExceptionEventHandler(vm::ptr<CellSpursTaskset> taskset) { cellSpurs.warning("cellSpursTasksetUnsetExceptionEventHandler(taskset=*0x%x)", taskset); if (!taskset) { return CELL_SPURS_TASK_ERROR_NULL_POINTER; } if (!taskset.aligned()) { return CELL_SPURS_TASK_ERROR_ALIGN; } if (taskset->wid >= CELL_SPURS_MAX_WORKLOAD) { return CELL_SPURS_TASK_ERROR_INVAL; } taskset->exception_handler.set(0); taskset->exception_handler_arg.set(0); return CELL_OK; } s32 cellSpursLookUpTasksetAddress(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::pptr<CellSpursTaskset> taskset, u32 id) { cellSpurs.warning("cellSpursLookUpTasksetAddress(spurs=*0x%x, taskset=**0x%x, id=0x%x)", spurs, taskset, id); if (!taskset) { return CELL_SPURS_TASK_ERROR_NULL_POINTER; } vm::var<u64> data; if (s32 rc = cellSpursGetWorkloadData(spurs, data, id)) { // Convert policy module error code to a task error code return rc ^ 0x100; } *taskset = vm::cast(*data); return CELL_OK; } s32 cellSpursTasksetGetSpursAddress(vm::cptr<CellSpursTaskset> taskset, vm::ptr<u32> spurs) { cellSpurs.warning("cellSpursTasksetGetSpursAddress(taskset=*0x%x, spurs=**0x%x)", taskset, spurs); if (!taskset || !spurs) { return CELL_SPURS_TASK_ERROR_NULL_POINTER; } if (!taskset.aligned()) { return CELL_SPURS_TASK_ERROR_ALIGN; } if (taskset->wid >= CELL_SPURS_MAX_WORKLOAD) { return CELL_SPURS_TASK_ERROR_INVAL; } *spurs = vm::cast(taskset->spurs.addr()); return CELL_OK; } s32 cellSpursGetTasksetInfo() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 _cellSpursTasksetAttributeInitialize(vm::ptr<CellSpursTasksetAttribute> attribute, u32 revision, u32 sdk_version, u64 args, vm::cptr<u8> priority, u32 max_contention) { cellSpurs.warning("_cellSpursTasksetAttributeInitialize(attribute=*0x%x, revision=%d, skd_version=0x%x, args=0x%llx, priority=*0x%x, max_contention=%d)", attribute, revision, sdk_version, args, priority, max_contention); if (!attribute) { return CELL_SPURS_TASK_ERROR_NULL_POINTER; } if (!attribute.aligned()) { return CELL_SPURS_TASK_ERROR_ALIGN; } for (u32 i = 0; i < 8; i++) { if (priority[i] > 0xF) { return CELL_SPURS_TASK_ERROR_INVAL; } } std::memset(attribute.get_ptr(), 0, attribute.size()); attribute->revision = revision; attribute->sdk_version = sdk_version; attribute->args = args; std::memcpy(attribute->priority, priority.get_ptr(), 8); attribute->taskset_size = 6400/*CellSpursTaskset::size*/; attribute->max_contention = max_contention; return CELL_OK; } s32 _spurs::check_job_chain_attribute(u32 sdkVer, vm::cptr<u64> jcEntry, u16 sizeJobDescr, u16 maxGrabbedJob , u64 priorities, u32 maxContention, u8 autoSpuCount, u32 tag1, u32 tag2 , u8 isFixedMemAlloc, u32 maxSizeJob, u32 initSpuCount) { if (!jcEntry) return CELL_SPURS_JOB_ERROR_NULL_POINTER; if (!jcEntry.aligned()) return CELL_SPURS_JOB_ERROR_ALIGN; if (!maxGrabbedJob || maxGrabbedJob > 0x10u) return CELL_SPURS_JOB_ERROR_INVAL; // Tag 31 is not allowed from version 1.90 for some reason if (u32 max_tag = sdkVer < 0x19000 ? 31 : 30; tag1 > max_tag || tag2 > max_tag) return CELL_SPURS_JOB_ERROR_INVAL; // Test if any of the value >= CELL_SPURS_MAX_PRIORITY if (priorities & 0xf0f0f0f0f0f0f0f0) return CELL_SPURS_JOB_ERROR_INVAL; if (sizeJobDescr % 0x80 && sizeJobDescr != 64u) return CELL_SPURS_JOB_ERROR_INVAL; if (autoSpuCount && initSpuCount > 0xffu) return CELL_SPURS_JOB_ERROR_INVAL; if (maxSizeJob <= 0xffu || maxSizeJob > 0x400u || maxSizeJob % 0x80) return CELL_SPURS_JOB_ERROR_INVAL; return CELL_OK; } s32 _spurs::create_job_chain(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<CellSpursJobChain> jobChain, vm::cptr<u64> jobChainEntry, u16 sizeJob , u16 maxGrabbedJob, vm::cptr<u8[8]> prio, u32 maxContention, b8 autoReadyCount , u32 tag1, u32 tag2, u32 HaltOnError, vm::cptr<char> name, u32 param_13, u32 param_14) { const s32 sdkVer = _spurs::get_sdk_version(); jobChain->spurs = spurs; jobChain->jmVer = sdkVer > 0x14ffff ? CELL_SPURS_JOB_REVISION_1 : CELL_SPURS_JOB_REVISION_0; // Real hack in firmware jobChain->val2F = Emu.GetTitleID() == "BLJM60093" ? 1 : 0; jobChain->tag1 = static_cast<u8>(tag1); jobChain->tag2 = static_cast<u8>(tag2); jobChain->isHalted = false; jobChain->maxGrabbedJob = maxGrabbedJob; jobChain->pc = jobChainEntry; auto as_job_error = [](s32 error) -> s32 { switch (error + 0u) { case CELL_SPURS_POLICY_MODULE_ERROR_AGAIN: return CELL_SPURS_JOB_ERROR_AGAIN; case CELL_SPURS_POLICY_MODULE_ERROR_INVAL: return CELL_SPURS_JOB_ERROR_INVAL; case CELL_SPURS_POLICY_MODULE_ERROR_STAT: return CELL_SPURS_JOB_ERROR_STAT; default: return error; } }; vm::var<CellSpursWorkloadAttribute> attr_wkl; vm::var<u32> wid; // TODO if (auto err = _cellSpursWorkloadAttributeInitialize(ppu, +attr_wkl, 1, SYS_PROCESS_PARAM_VERSION_330_0, vm::null, 0, jobChain.addr(), prio, 1, maxContention)) { return as_job_error(err); } ppu_execute<&cellSpursWorkloadAttributeSetName>(ppu, +attr_wkl, +vm::make_str("JobChain"), name); if (auto err = ppu_execute<&cellSpursAddWorkloadWithAttribute>(ppu, spurs, +wid, +attr_wkl)) { return as_job_error(err); } jobChain->cause = vm::null; jobChain->error = 0; jobChain->workloadId = *wid; return CELL_OK; } s32 cellSpursCreateJobChainWithAttribute(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<CellSpursJobChain> jobChain, vm::ptr<CellSpursJobChainAttribute> attr) { cellSpurs.warning("cellSpursCreateJobChainWithAttribute(spurs=*0x%x, jobChain=*0x%x, attr=*0x%x)", spurs, jobChain, attr); if (!attr) return CELL_SPURS_JOB_ERROR_NULL_POINTER; if (!attr.aligned()) return CELL_SPURS_JOB_ERROR_ALIGN; const u64 prio = std::bit_cast<u64>(attr->priorities); if (auto err = _spurs::check_job_chain_attribute(attr->sdkVer, attr->jobChainEntry, attr->sizeJobDescriptor, attr->maxGrabbedJob, prio, attr->maxContention , attr->autoSpuCount, attr->tag1, attr->tag2, attr->isFixedMemAlloc, attr->maxSizeJobDescriptor, attr->initSpuCount)) { return err; } if (!jobChain || !spurs) return CELL_SPURS_JOB_ERROR_NULL_POINTER; if (!jobChain.aligned() || !spurs.aligned()) return CELL_SPURS_JOB_ERROR_ALIGN; std::memset(jobChain.get_ptr(), 0, 0x110); // Only allowed revisions in this function if (auto ver = attr->jmVer; ver != CELL_SPURS_JOB_REVISION_2 && ver != CELL_SPURS_JOB_REVISION_3) { return CELL_SPURS_JOB_ERROR_INVAL; } jobChain->val2C = +attr->isFixedMemAlloc << 7 | (((attr->maxSizeJobDescriptor - 0x100) / 128 & 7) << 4); if (auto err = _spurs::create_job_chain(ppu, spurs, jobChain, attr->jobChainEntry, attr->sizeJobDescriptor , attr->maxGrabbedJob, attr.ptr(&CellSpursJobChainAttribute::priorities), attr->maxContention, attr->autoSpuCount , attr->tag1, attr->tag2, attr->haltOnError, attr->name, 0, 0)) { return err; } jobChain->initSpuCount = attr->initSpuCount; jobChain->jmVer = attr->jmVer; jobChain->sdkVer = attr->sdkVer; jobChain->jobMemoryCheck = +attr->jobMemoryCheck << 1; return CELL_OK; } s32 cellSpursCreateJobChain(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<CellSpursJobChain> jobChain, vm::cptr<u64> jobChainEntry, u16 sizeJobDescriptor , u16 maxGrabbedJob, vm::cptr<u8[8]> priorities, u32 maxContention, b8 autoReadyCount, u32 tag1, u32 tag2) { cellSpurs.warning("cellSpursCreateJobChain(spurs=*0x%x, jobChain=*0x%x, jobChainEntry=*0x%x, sizeJobDescriptor=0x%x" ", maxGrabbedJob=0x%x, priorities=*0x%x, maxContention=%u, autoReadyCount=%s, tag1=%u, %u)", spurs, jobChain, jobChainEntry, sizeJobDescriptor , maxGrabbedJob, priorities, maxContention, autoReadyCount, tag1, tag2); const u64 prio = std::bit_cast<u64>(*priorities); if (auto err = _spurs::check_job_chain_attribute(-1, jobChainEntry, sizeJobDescriptor, maxGrabbedJob, prio, maxContention, autoReadyCount, tag1, tag2, 0, 0, 0)) { return err; } std::memset(jobChain.get_ptr(), 0, 0x110); if (auto err = _spurs::create_job_chain(ppu, spurs, jobChain, jobChainEntry, sizeJobDescriptor, maxGrabbedJob, priorities , maxContention, autoReadyCount, tag1, tag2, 0, vm::null, 0, 0)) { return err; } return CELL_OK; } s32 cellSpursJoinJobChain(ppu_thread& ppu, vm::ptr<CellSpursJobChain> jobChain) { cellSpurs.trace("cellSpursJoinJobChain(jobChain=*0x%x)", jobChain); if (!jobChain) return CELL_SPURS_JOB_ERROR_NULL_POINTER; if (!jobChain.aligned()) return CELL_SPURS_JOB_ERROR_ALIGN; const u32 wid = jobChain->workloadId; if (wid >= CELL_SPURS_MAX_WORKLOAD2) return CELL_SPURS_JOB_ERROR_INVAL; auto as_job_error = [](s32 error) -> s32 { switch (error + 0u) { case CELL_SPURS_POLICY_MODULE_ERROR_STAT: return CELL_SPURS_JOB_ERROR_STAT; default: return error; } }; if (auto err = ppu_execute<&cellSpursWaitForWorkloadShutdown>(ppu, +jobChain->spurs, wid)) { return as_job_error(err); } if (auto err = ppu_execute<&cellSpursRemoveWorkload>(ppu, +jobChain->spurs, wid)) { // Returned as is return err; } jobChain->workloadId = CELL_SPURS_MAX_WORKLOAD2; return jobChain->error; } s32 cellSpursKickJobChain(ppu_thread& ppu, vm::ptr<CellSpursJobChain> jobChain, u8 numReadyCount) { cellSpurs.trace("cellSpursKickJobChain(jobChain=*0x%x, numReadyCount=0x%x)", jobChain, numReadyCount); if (!jobChain) return CELL_SPURS_JOB_ERROR_NULL_POINTER; if (!jobChain.aligned()) return CELL_SPURS_JOB_ERROR_ALIGN; const u32 wid = jobChain->workloadId; const auto spurs = +jobChain->spurs; if (wid >= CELL_SPURS_MAX_WORKLOAD2) return CELL_SPURS_JOB_ERROR_INVAL; if (jobChain->jmVer > CELL_SPURS_JOB_REVISION_1) return CELL_SPURS_JOB_ERROR_PERM; if (jobChain->autoReadyCount) ppu_execute<&cellSpursReadyCountStore>(ppu, spurs, wid, numReadyCount); else ppu_execute<&cellSpursReadyCountCompareAndSwap>(ppu, spurs, wid, +vm::var<u32>{}, 0, 1); const auto err = ppu_execute<&cellSpursWakeUp>(ppu, spurs); if (err + 0u == CELL_SPURS_POLICY_MODULE_ERROR_STAT) { return CELL_SPURS_JOB_ERROR_STAT; } return err; } s32 _cellSpursJobChainAttributeInitialize(u32 jmRevsion, u32 sdkRevision, vm::ptr<CellSpursJobChainAttribute> attr, vm::cptr<u64> jobChainEntry, u16 sizeJobDescriptor, u16 maxGrabbedJob , vm::cptr<u8[8]> priorityTable, u32 maxContention, b8 autoRequestSpuCount, u32 tag1, u32 tag2, b8 isFixedMemAlloc, u32 maxSizeJobDescriptor, u32 initialRequestSpuCount) { cellSpurs.trace("_cellSpursJobChainAttributeInitialize(jmRevsion=0x%x, sdkRevision=0x%x, attr=*0x%x, jobChainEntry=*0x%x, sizeJobDescriptor=0x%x, maxGrabbedJob=0x%x, priorityTable=*0x%x" ", maxContention=%u, autoRequestSpuCount=%s, tag1=0x%x, tag2=0x%x, isFixedMemAlloc=%s, maxSizeJobDescriptor=0x%x, initialRequestSpuCount=%u)", jmRevsion, sdkRevision, attr, jobChainEntry, sizeJobDescriptor, maxGrabbedJob, priorityTable, maxContention, autoRequestSpuCount, tag1, tag2, isFixedMemAlloc, maxSizeJobDescriptor, initialRequestSpuCount); if (!attr) return CELL_SPURS_JOB_ERROR_NULL_POINTER; if (!attr.aligned()) return CELL_SPURS_JOB_ERROR_ALIGN; const u64 prio = std::bit_cast<u64>(*priorityTable); if (auto err = _spurs::check_job_chain_attribute(sdkRevision, jobChainEntry, sizeJobDescriptor, maxGrabbedJob, prio, maxContention , autoRequestSpuCount, tag1, tag2, isFixedMemAlloc, maxSizeJobDescriptor, initialRequestSpuCount)) { return err; } attr->jmVer = jmRevsion; attr->sdkVer = sdkRevision; attr->jobChainEntry = jobChainEntry; attr->sizeJobDescriptor = sizeJobDescriptor; attr->maxGrabbedJob = maxGrabbedJob; std::memcpy(&attr->priorities, &prio, 8); attr->maxContention = maxContention; attr->autoSpuCount = autoRequestSpuCount; attr->tag1 = tag1; attr->tag2 = tag2; attr->isFixedMemAlloc = isFixedMemAlloc; attr->maxSizeJobDescriptor = maxSizeJobDescriptor; attr->initSpuCount = initialRequestSpuCount; attr->haltOnError = 0; attr->name = vm::null; attr->jobMemoryCheck = false; return CELL_OK; } s32 cellSpursGetJobChainId(vm::ptr<CellSpursJobChain> jobChain, vm::ptr<u32> id) { cellSpurs.trace("cellSpursGetJobChainId(jobChain=*0x%x, id=*0x%x)", jobChain, id); if (!jobChain || !id) return CELL_SPURS_JOB_ERROR_NULL_POINTER; if (!jobChain.aligned()) return CELL_SPURS_JOB_ERROR_ALIGN; *id = jobChain->workloadId; return CELL_OK; } s32 cellSpursJobChainSetExceptionEventHandler(vm::ptr<CellSpursJobChain> jobChain, vm::ptr<CellSpursJobChainExceptionEventHandler> handler, vm::ptr<void> arg) { cellSpurs.trace("cellSpursJobChainSetExceptionEventHandler(jobChain=*0x%x, handler=*0x%x, arg=*0x%x)", jobChain, handler, arg); if (!jobChain || !handler) return CELL_SPURS_JOB_ERROR_NULL_POINTER; if (!jobChain.aligned()) return CELL_SPURS_JOB_ERROR_ALIGN; if (jobChain->workloadId >= CELL_SPURS_MAX_WORKLOAD2) return CELL_SPURS_JOB_ERROR_INVAL; if (jobChain->exceptionEventHandler) return CELL_SPURS_JOB_ERROR_BUSY; jobChain->exceptionEventHandlerArgument = arg; jobChain->exceptionEventHandler.set(handler.addr()); return CELL_OK; } s32 cellSpursJobChainUnsetExceptionEventHandler(vm::ptr<CellSpursJobChain> jobChain) { cellSpurs.trace("cellSpursJobChainUnsetExceptionEventHandler(jobChain=*0x%x)", jobChain); if (!jobChain) return CELL_SPURS_JOB_ERROR_NULL_POINTER; if (!jobChain.aligned()) return CELL_SPURS_JOB_ERROR_ALIGN; if (jobChain->workloadId >= CELL_SPURS_MAX_WORKLOAD2) return CELL_SPURS_JOB_ERROR_INVAL; jobChain->exceptionEventHandler = vm::null; jobChain->exceptionEventHandlerArgument = vm::null; return CELL_OK; } s32 cellSpursGetJobChainInfo(ppu_thread& ppu, vm::ptr<CellSpursJobChain> jobChain, vm::ptr<CellSpursJobChainInfo> info) { cellSpurs.trace("cellSpursGetJobChainInfo(jobChain=*0x%x, info=*0x%x)", jobChain, info); if (!jobChain || !info) return CELL_SPURS_JOB_ERROR_NULL_POINTER; if (!jobChain.aligned()) return CELL_SPURS_JOB_ERROR_ALIGN; const u32 wid = jobChain->workloadId; if (wid >= CELL_SPURS_MAX_WORKLOAD2) return CELL_SPURS_JOB_ERROR_INVAL; vm::var<CellSpursWorkloadInfo> wklInfo; if (auto err = ppu_execute<&cellSpursGetWorkloadInfo>(ppu, +jobChain->spurs, wid, +wklInfo)) { if (err + 0u == CELL_SPURS_POLICY_MODULE_ERROR_SRCH) { return CELL_SPURS_JOB_ERROR_INVAL; } return err; } // Read the commands queue atomically CellSpursJobChain data; vm::peek_op(ppu, vm::unsafe_ptr_cast<CellSpursJobChain_x00>(jobChain), [&](const CellSpursJobChain_x00& jch) { std::memcpy(&data, &jch, sizeof(jch)); }); info->linkRegister[0] = +data.linkRegister[0]; info->linkRegister[1] = +data.linkRegister[1]; info->linkRegister[2] = +data.linkRegister[2]; std::memcpy(&info->urgentCommandSlot, &data.urgentCmds, sizeof(info->urgentCommandSlot)); info->programCounter = +data.pc; info->idWorkload = wid; info->maxSizeJobDescriptor = (data.val2C & 0x70u) * 8 + 0x100; info->isHalted.set(data.isHalted); // Boolean truncation (non-zero becomes 1) info->autoReadyCount.set(data.autoReadyCount); info->isFixedMemAlloc = !!(data.val2C & 0x80); info->name = wklInfo->nameInstance; info->statusCode = jobChain->error; info->cause = jobChain->cause; info->exceptionEventHandler = +jobChain->exceptionEventHandler; info->exceptionEventHandlerArgument = +jobChain->exceptionEventHandlerArgument; return CELL_OK; } s32 cellSpursJobChainGetSpursAddress(vm::ptr<CellSpursJobChain> jobChain, vm::pptr<CellSpurs> spurs) { cellSpurs.trace("cellSpursJobChainGetSpursAddress(jobChain=*0x%x, spurs=*0x%x)", jobChain, spurs); if (!jobChain || !spurs) return CELL_SPURS_JOB_ERROR_NULL_POINTER; if (!jobChain.aligned()) return CELL_SPURS_JOB_ERROR_ALIGN; *spurs = +jobChain->spurs; return CELL_OK; } s32 cellSpursJobGuardInitialize(vm::ptr<CellSpursJobChain> jobChain, vm::ptr<CellSpursJobGuard> jobGuard, u32 notifyCount, u8 requestSpuCount, u8 autoReset) { cellSpurs.trace("cellSpursJobGuardInitialize(jobChain=*0x%x, jobGuard=*0x%x, notifyCount=0x%x, requestSpuCount=0x%x, autoReset=0x%x)" , jobChain, jobGuard, notifyCount, requestSpuCount, autoReset); if (!jobChain || !jobGuard) return CELL_SPURS_JOB_ERROR_NULL_POINTER; if (!jobChain.aligned() || !jobGuard.aligned()) return CELL_SPURS_JOB_ERROR_ALIGN; if (jobChain->workloadId >= CELL_SPURS_MAX_WORKLOAD2) return CELL_SPURS_JOB_ERROR_INVAL; std::memset(jobGuard.get_ptr(), 0, jobGuard.size()); jobGuard->zero = 0; jobGuard->ncount0 = notifyCount; jobGuard->ncount1 = notifyCount; jobGuard->requestSpuCount = requestSpuCount; jobGuard->autoReset = autoReset; jobGuard->jobChain = jobChain; return CELL_OK; } s32 cellSpursJobChainAttributeSetName(vm::ptr<CellSpursJobChainAttribute> attr, vm::cptr<char> name) { cellSpurs.trace("cellSpursJobChainAttributeSetName(attr=*0x%x, name=*0x%x %s)", attr, name, name); if (!attr || !name) return CELL_SPURS_JOB_ERROR_NULL_POINTER; if (!attr.aligned()) return CELL_SPURS_JOB_ERROR_ALIGN; attr->name = name; return CELL_OK; } s32 cellSpursShutdownJobChain(ppu_thread& ppu,vm::ptr<CellSpursJobChain> jobChain) { cellSpurs.trace("cellSpursShutdownJobChain(jobChain=*0x%x)", jobChain); if (!jobChain) return CELL_SPURS_JOB_ERROR_NULL_POINTER; if (!jobChain.aligned()) return CELL_SPURS_JOB_ERROR_ALIGN; const u32 wid = jobChain->workloadId; if (wid >= CELL_SPURS_MAX_WORKLOAD2) return CELL_SPURS_JOB_ERROR_INVAL; const auto err = ppu_execute<&cellSpursShutdownWorkload>(ppu, +jobChain->spurs, wid); if (err + 0u == CELL_SPURS_POLICY_MODULE_ERROR_STAT) { return CELL_SPURS_JOB_ERROR_STAT; } return err; } s32 cellSpursJobChainAttributeSetHaltOnError(vm::ptr<CellSpursJobChainAttribute> attr) { cellSpurs.trace("cellSpursJobChainAttributeSetHaltOnError(attr=*0x%x)", attr); if (!attr) return CELL_SPURS_JOB_ERROR_NULL_POINTER; if (!attr.aligned()) return CELL_SPURS_JOB_ERROR_ALIGN; attr->haltOnError = true; return CELL_OK; } s32 cellSpursJobChainAttributeSetJobTypeMemoryCheck(vm::ptr<CellSpursJobChainAttribute> attr) { cellSpurs.trace("cellSpursJobChainAttributeSetJobTypeMemoryCheck(attr=*0x%x)", attr); if (!attr) return CELL_SPURS_JOB_ERROR_NULL_POINTER; if (!attr.aligned()) return CELL_SPURS_JOB_ERROR_ALIGN; attr->jobMemoryCheck = true; return CELL_OK; } s32 cellSpursJobGuardNotify(ppu_thread& ppu, vm::ptr<CellSpursJobGuard> jobGuard) { cellSpurs.trace("cellSpursJobGuardNotify(jobGuard=*0x%x)", jobGuard); if (!jobGuard) return CELL_SPURS_JOB_ERROR_NULL_POINTER; if (!jobGuard.aligned()) return CELL_SPURS_JOB_ERROR_ALIGN; u32 allow_jobchain_run = 0; // Affects cellSpursJobChainRun execution u32 old = 0; const bool ok = vm::reservation_op(ppu, vm::unsafe_ptr_cast<CellSpursJobGuard_x00>(jobGuard), [&](CellSpursJobGuard_x00& jg) { allow_jobchain_run = jg.zero; old = jg.ncount0; if (!jg.ncount0) { return false; } jg.ncount0--; return true; }); if (!ok) { return CELL_SPURS_CORE_ERROR_STAT; } if (old > 1u) { return CELL_OK; } auto jobChain = +jobGuard->jobChain; if (jobChain->jmVer <= CELL_SPURS_JOB_REVISION_1) { ppu_execute<&cellSpursKickJobChain>(ppu, jobChain, static_cast<u8>(jobGuard->requestSpuCount)); } else if (allow_jobchain_run) { ppu_execute<&cellSpursRunJobChain>(ppu, jobChain); } return CELL_OK; } s32 cellSpursJobGuardReset(vm::ptr<CellSpursJobGuard> jobGuard) { cellSpurs.trace("cellSpursJobGuardReset(jobGuard=*0x%x)", jobGuard); if (!jobGuard) return CELL_SPURS_JOB_ERROR_NULL_POINTER; if (!jobGuard.aligned()) return CELL_SPURS_JOB_ERROR_ALIGN; vm::light_op(jobGuard->ncount0, [&](atomic_be_t<u32>& ncount0) { ncount0 = jobGuard->ncount1; }); return CELL_OK; } s32 cellSpursRunJobChain(ppu_thread& ppu, vm::ptr<CellSpursJobChain> jobChain) { cellSpurs.trace("cellSpursRunJobChain(jobChain=*0x%x)", jobChain); if (!jobChain) return CELL_SPURS_JOB_ERROR_NULL_POINTER; if (!jobChain.aligned()) return CELL_SPURS_JOB_ERROR_ALIGN; const u32 wid = jobChain->workloadId; if (wid >= CELL_SPURS_MAX_WORKLOAD2) return CELL_SPURS_JOB_ERROR_INVAL; if (jobChain->jmVer <= CELL_SPURS_JOB_REVISION_1) return CELL_SPURS_JOB_ERROR_PERM; const auto spurs = +jobChain->spurs; ppu_execute<&cellSpursSendWorkloadSignal>(ppu, spurs, wid); const auto err = ppu_execute<&cellSpursWakeUp>(ppu, spurs); if (err + 0u == CELL_SPURS_POLICY_MODULE_ERROR_STAT) { return CELL_SPURS_JOB_ERROR_STAT; } return err; } s32 cellSpursJobChainGetError(vm::ptr<CellSpursJobChain> jobChain, vm::pptr<void> cause) { cellSpurs.trace("cellSpursJobChainGetError(jobChain=*0x%x, cause=*0x%x)", jobChain, cause); if (!jobChain) return CELL_SPURS_JOB_ERROR_NULL_POINTER; if (!jobChain.aligned()) return CELL_SPURS_JOB_ERROR_ALIGN; const s32 error = jobChain->error; *cause = (error ? jobChain->cause : vm::null); return error; } s32 cellSpursGetJobPipelineInfo() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 cellSpursJobSetMaxGrab(vm::ptr<CellSpursJobChain> jobChain, u32 maxGrabbedJob) { cellSpurs.trace("cellSpursJobSetMaxGrab(jobChain=*0x%x, maxGrabbedJob=*0x%x)", jobChain, maxGrabbedJob); if (!jobChain) return CELL_SPURS_JOB_ERROR_NULL_POINTER; if (!jobChain.aligned()) return CELL_SPURS_JOB_ERROR_ALIGN; if (!maxGrabbedJob || maxGrabbedJob > 0x10u) return CELL_SPURS_JOB_ERROR_INVAL; const auto spurs = jobChain->spurs; // All of these are ERROR_STAT checks unexpectedly if (!spurs || !spurs.aligned()) return CELL_SPURS_JOB_ERROR_STAT; const u32 wid = jobChain->workloadId; if (wid >= CELL_SPURS_MAX_WORKLOAD2) return CELL_SPURS_JOB_ERROR_STAT; if ((spurs->wklEnabled & (0x80000000u >> wid)) == 0u) return CELL_SPURS_JOB_ERROR_STAT; vm::light_op(jobChain->maxGrabbedJob, [&](atomic_be_t<u16>& v) { v.release(static_cast<u16>(maxGrabbedJob)); }); return CELL_OK; } s32 cellSpursJobHeaderSetJobbin2Param() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 cellSpursAddUrgentCommand(ppu_thread& ppu, vm::ptr<CellSpursJobChain> jobChain, u64 newCmd) { cellSpurs.trace("cellSpursAddUrgentCommand(jobChain=*0x%x, newCmd=0x%llx)", jobChain, newCmd); if (!jobChain) return CELL_SPURS_JOB_ERROR_NULL_POINTER; if (!jobChain.aligned()) return CELL_SPURS_JOB_ERROR_ALIGN; if (jobChain->workloadId >= CELL_SPURS_MAX_WORKLOAD2) return CELL_SPURS_JOB_ERROR_INVAL; s32 result = CELL_OK; vm::reservation_op(ppu, vm::unsafe_ptr_cast<CellSpursJobChain_x00>(jobChain), [&](CellSpursJobChain_x00& jch) { for (auto& cmd : jch.urgentCmds) { if (!cmd) { cmd = newCmd; return true; } } // Considered unlikely so unoptimized result = CELL_SPURS_JOB_ERROR_BUSY; return false; }); return result; } s32 cellSpursAddUrgentCall(ppu_thread& ppu, vm::ptr<CellSpursJobChain> jobChain, vm::ptr<u64> commandList) { cellSpurs.trace("cellSpursAddUrgentCall(jobChain=*0x%x, commandList=*0x%x)", jobChain, commandList); if (!commandList) return CELL_SPURS_JOB_ERROR_NULL_POINTER; if (!commandList.aligned()) return CELL_SPURS_JOB_ERROR_ALIGN; return cellSpursAddUrgentCommand(ppu, jobChain, commandList.addr() | CELL_SPURS_JOB_OPCODE_CALL); } s32 cellSpursBarrierInitialize(vm::ptr<CellSpursTaskset> taskset, vm::ptr<CellSpursBarrier> barrier, u32 total) { cellSpurs.trace("cellSpursBarrierInitialize(taskset=*0x%x, barrier=*0x%x, total=0x%x)", taskset, barrier, total); if (!taskset || !barrier) return CELL_SPURS_TASK_ERROR_NULL_POINTER; if (!taskset.aligned() || !barrier.aligned()) return CELL_SPURS_TASK_ERROR_ALIGN; if (!total || total > 128u) return CELL_SPURS_TASK_ERROR_INVAL; if (taskset->wid >= CELL_SPURS_MAX_WORKLOAD2) return CELL_SPURS_TASK_ERROR_INVAL; std::memset(barrier.get_ptr(), 0, barrier.size()); barrier->zero = 0; barrier->remained = total; barrier->taskset = taskset; return CELL_OK; } s32 cellSpursBarrierGetTasksetAddress(vm::ptr<CellSpursBarrier> barrier, vm::pptr<CellSpursTaskset> taskset) { cellSpurs.trace("cellSpursBarrierGetTasksetAddress(barrier=*0x%x, taskset=*0x%x)", barrier, taskset); if (!taskset || !barrier) return CELL_SPURS_TASK_ERROR_NULL_POINTER; if (!barrier.aligned()) return CELL_SPURS_TASK_ERROR_ALIGN; *taskset = barrier->taskset; return CELL_OK; } s32 _cellSpursSemaphoreInitialize() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } s32 cellSpursSemaphoreGetTasksetAddress() { UNIMPLEMENTED_FUNC(cellSpurs); return CELL_OK; } DECLARE(ppu_module_manager::cellSpurs)("cellSpurs", [](ppu_static_module* _this) { // Core REG_FUNC(cellSpurs, cellSpursInitialize); REG_FUNC(cellSpurs, cellSpursInitializeWithAttribute); REG_FUNC(cellSpurs, cellSpursInitializeWithAttribute2); REG_FUNC(cellSpurs, cellSpursFinalize); REG_FUNC(cellSpurs, _cellSpursAttributeInitialize); REG_FUNC(cellSpurs, cellSpursAttributeSetMemoryContainerForSpuThread); REG_FUNC(cellSpurs, cellSpursAttributeSetNamePrefix); REG_FUNC(cellSpurs, cellSpursAttributeEnableSpuPrintfIfAvailable); REG_FUNC(cellSpurs, cellSpursAttributeSetSpuThreadGroupType); REG_FUNC(cellSpurs, cellSpursAttributeEnableSystemWorkload); REG_FUNC(cellSpurs, cellSpursGetSpuThreadGroupId); REG_FUNC(cellSpurs, cellSpursGetNumSpuThread); REG_FUNC(cellSpurs, cellSpursGetSpuThreadId); REG_FUNC(cellSpurs, cellSpursGetInfo); REG_FUNC(cellSpurs, cellSpursSetMaxContention); REG_FUNC(cellSpurs, cellSpursSetPriorities); REG_FUNC(cellSpurs, cellSpursSetPriority); REG_FUNC(cellSpurs, cellSpursSetPreemptionVictimHints); REG_FUNC(cellSpurs, cellSpursAttachLv2EventQueue); REG_FUNC(cellSpurs, cellSpursDetachLv2EventQueue); REG_FUNC(cellSpurs, cellSpursEnableExceptionEventHandler); REG_FUNC(cellSpurs, cellSpursSetGlobalExceptionEventHandler); REG_FUNC(cellSpurs, cellSpursUnsetGlobalExceptionEventHandler); REG_FUNC(cellSpurs, cellSpursSetExceptionEventHandler); REG_FUNC(cellSpurs, cellSpursUnsetExceptionEventHandler); // Event flag REG_FUNC(cellSpurs, _cellSpursEventFlagInitialize); REG_FUNC(cellSpurs, cellSpursEventFlagAttachLv2EventQueue); REG_FUNC(cellSpurs, cellSpursEventFlagDetachLv2EventQueue); REG_FUNC(cellSpurs, cellSpursEventFlagWait); REG_FUNC(cellSpurs, cellSpursEventFlagClear); REG_FUNC(cellSpurs, cellSpursEventFlagSet); REG_FUNC(cellSpurs, cellSpursEventFlagTryWait); REG_FUNC(cellSpurs, cellSpursEventFlagGetDirection); REG_FUNC(cellSpurs, cellSpursEventFlagGetClearMode); REG_FUNC(cellSpurs, cellSpursEventFlagGetTasksetAddress); // Taskset REG_FUNC(cellSpurs, cellSpursCreateTaskset); REG_FUNC(cellSpurs, cellSpursCreateTasksetWithAttribute); REG_FUNC(cellSpurs, _cellSpursTasksetAttributeInitialize); REG_FUNC(cellSpurs, _cellSpursTasksetAttribute2Initialize); REG_FUNC(cellSpurs, cellSpursTasksetAttributeSetName); REG_FUNC(cellSpurs, cellSpursTasksetAttributeSetTasksetSize); REG_FUNC(cellSpurs, cellSpursTasksetAttributeEnableClearLS); REG_FUNC(cellSpurs, cellSpursJoinTaskset); REG_FUNC(cellSpurs, cellSpursGetTasksetId); REG_FUNC(cellSpurs, cellSpursShutdownTaskset); REG_FUNC(cellSpurs, cellSpursCreateTask); REG_FUNC(cellSpurs, cellSpursCreateTaskWithAttribute); REG_FUNC(cellSpurs, _cellSpursTaskAttributeInitialize); REG_FUNC(cellSpurs, _cellSpursTaskAttribute2Initialize); REG_FUNC(cellSpurs, cellSpursTaskAttributeSetExitCodeContainer); REG_FUNC(cellSpurs, cellSpursTaskExitCodeGet); REG_FUNC(cellSpurs, cellSpursTaskExitCodeInitialize); REG_FUNC(cellSpurs, cellSpursTaskExitCodeTryGet); REG_FUNC(cellSpurs, cellSpursTaskGetLoadableSegmentPattern); REG_FUNC(cellSpurs, cellSpursTaskGetReadOnlyAreaPattern); REG_FUNC(cellSpurs, cellSpursTaskGenerateLsPattern); REG_FUNC(cellSpurs, cellSpursTaskGetContextSaveAreaSize); REG_FUNC(cellSpurs, _cellSpursSendSignal); REG_FUNC(cellSpurs, cellSpursCreateTaskset2); REG_FUNC(cellSpurs, cellSpursCreateTask2); REG_FUNC(cellSpurs, cellSpursJoinTask2); REG_FUNC(cellSpurs, cellSpursTryJoinTask2); REG_FUNC(cellSpurs, cellSpursDestroyTaskset2); REG_FUNC(cellSpurs, cellSpursCreateTask2WithBinInfo); REG_FUNC(cellSpurs, cellSpursLookUpTasksetAddress); REG_FUNC(cellSpurs, cellSpursTasksetGetSpursAddress); REG_FUNC(cellSpurs, cellSpursGetTasksetInfo); REG_FUNC(cellSpurs, cellSpursTasksetSetExceptionEventHandler); REG_FUNC(cellSpurs, cellSpursTasksetUnsetExceptionEventHandler); // Job Chain REG_FUNC(cellSpurs, cellSpursCreateJobChain); REG_FUNC(cellSpurs, cellSpursCreateJobChainWithAttribute); REG_FUNC(cellSpurs, cellSpursShutdownJobChain); REG_FUNC(cellSpurs, cellSpursJoinJobChain); REG_FUNC(cellSpurs, cellSpursKickJobChain); REG_FUNC(cellSpurs, cellSpursRunJobChain); REG_FUNC(cellSpurs, cellSpursJobChainGetError); REG_FUNC(cellSpurs, _cellSpursJobChainAttributeInitialize); REG_FUNC(cellSpurs, cellSpursJobChainAttributeSetName); REG_FUNC(cellSpurs, cellSpursJobChainAttributeSetHaltOnError); REG_FUNC(cellSpurs, cellSpursJobChainAttributeSetJobTypeMemoryCheck); REG_FUNC(cellSpurs, cellSpursGetJobChainId); REG_FUNC(cellSpurs, cellSpursJobChainSetExceptionEventHandler); REG_FUNC(cellSpurs, cellSpursJobChainUnsetExceptionEventHandler); REG_FUNC(cellSpurs, cellSpursGetJobChainInfo); REG_FUNC(cellSpurs, cellSpursJobChainGetSpursAddress); // Job Guard REG_FUNC(cellSpurs, cellSpursJobGuardInitialize); REG_FUNC(cellSpurs, cellSpursJobGuardNotify); REG_FUNC(cellSpurs, cellSpursJobGuardReset); // LFQueue REG_FUNC(cellSpurs, _cellSpursLFQueueInitialize); REG_FUNC(cellSpurs, _cellSpursLFQueuePushBody); REG_FUNC(cellSpurs, cellSpursLFQueueAttachLv2EventQueue); REG_FUNC(cellSpurs, cellSpursLFQueueDetachLv2EventQueue); REG_FUNC(cellSpurs, _cellSpursLFQueuePopBody); REG_FUNC(cellSpurs, cellSpursLFQueueGetTasksetAddress); // Queue REG_FUNC(cellSpurs, _cellSpursQueueInitialize); REG_FUNC(cellSpurs, cellSpursQueuePopBody); REG_FUNC(cellSpurs, cellSpursQueuePushBody); REG_FUNC(cellSpurs, cellSpursQueueAttachLv2EventQueue); REG_FUNC(cellSpurs, cellSpursQueueDetachLv2EventQueue); REG_FUNC(cellSpurs, cellSpursQueueGetTasksetAddress); REG_FUNC(cellSpurs, cellSpursQueueClear); REG_FUNC(cellSpurs, cellSpursQueueDepth); REG_FUNC(cellSpurs, cellSpursQueueGetEntrySize); REG_FUNC(cellSpurs, cellSpursQueueSize); REG_FUNC(cellSpurs, cellSpursQueueGetDirection); // Workload REG_FUNC(cellSpurs, cellSpursWorkloadAttributeSetName); REG_FUNC(cellSpurs, cellSpursWorkloadAttributeSetShutdownCompletionEventHook); REG_FUNC(cellSpurs, cellSpursAddWorkloadWithAttribute); REG_FUNC(cellSpurs, cellSpursAddWorkload); REG_FUNC(cellSpurs, cellSpursShutdownWorkload); REG_FUNC(cellSpurs, cellSpursWaitForWorkloadShutdown); REG_FUNC(cellSpurs, cellSpursRemoveSystemWorkloadForUtility); REG_FUNC(cellSpurs, cellSpursRemoveWorkload); REG_FUNC(cellSpurs, cellSpursReadyCountStore); REG_FUNC(cellSpurs, cellSpursGetWorkloadFlag); REG_FUNC(cellSpurs, _cellSpursWorkloadFlagReceiver); REG_FUNC(cellSpurs, _cellSpursWorkloadAttributeInitialize); REG_FUNC(cellSpurs, cellSpursSendWorkloadSignal); REG_FUNC(cellSpurs, cellSpursGetWorkloadData); REG_FUNC(cellSpurs, cellSpursReadyCountAdd); REG_FUNC(cellSpurs, cellSpursReadyCountCompareAndSwap); REG_FUNC(cellSpurs, cellSpursReadyCountSwap); REG_FUNC(cellSpurs, cellSpursRequestIdleSpu); REG_FUNC(cellSpurs, cellSpursGetWorkloadInfo); REG_FUNC(cellSpurs, cellSpursGetSpuGuid); REG_FUNC(cellSpurs, _cellSpursWorkloadFlagReceiver2); REG_FUNC(cellSpurs, cellSpursGetJobPipelineInfo); REG_FUNC(cellSpurs, cellSpursJobSetMaxGrab); REG_FUNC(cellSpurs, cellSpursJobHeaderSetJobbin2Param); REG_FUNC(cellSpurs, cellSpursWakeUp); REG_FUNC(cellSpurs, cellSpursAddUrgentCommand); REG_FUNC(cellSpurs, cellSpursAddUrgentCall); REG_FUNC(cellSpurs, cellSpursBarrierInitialize); REG_FUNC(cellSpurs, cellSpursBarrierGetTasksetAddress); REG_FUNC(cellSpurs, _cellSpursSemaphoreInitialize); REG_FUNC(cellSpurs, cellSpursSemaphoreGetTasksetAddress); // Trace REG_FUNC(cellSpurs, cellSpursTraceInitialize); REG_FUNC(cellSpurs, cellSpursTraceStart); REG_FUNC(cellSpurs, cellSpursTraceStop); REG_FUNC(cellSpurs, cellSpursTraceFinalize); _this->add_init_func([](ppu_static_module*) { const auto val = g_cfg.core.spu_accurate_reservations ? MFF_PERFECT : MFF_FORCED_HLE; REINIT_FUNC(cellSpursSetPriorities).flag(val); REINIT_FUNC(cellSpursAddWorkload).flag(val); REINIT_FUNC(cellSpursAddWorkloadWithAttribute).flag(val); REINIT_FUNC(cellSpursShutdownWorkload).flag(val); REINIT_FUNC(cellSpursReadyCountStore).flag(val); REINIT_FUNC(cellSpursSetPriority).flag(val); REINIT_FUNC(cellSpursTraceInitialize).flag(val); REINIT_FUNC(cellSpursWaitForWorkloadShutdown).flag(val); REINIT_FUNC(cellSpursRequestIdleSpu).flag(val); REINIT_FUNC(cellSpursRemoveWorkload).flag(val); }); });
155,836
C++
.cpp
4,512
32.007092
321
0.726703
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,192
cellPhotoImport.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/cellPhotoImport.cpp
#include "stdafx.h" #include "Emu/Cell/PPUModule.h" #include "Emu/Cell/lv2/sys_fs.h" #include "Emu/RSX/Overlays/overlay_media_list_dialog.h" #include "Emu/VFS.h" #include "Emu/System.h" #include "Utilities/StrUtil.h" #include "cellSysutil.h" LOG_CHANNEL(cellPhotoImportUtil, "cellPhotoImport"); // Return Codes enum CellPhotoImportError : u32 { CELL_PHOTO_IMPORT_ERROR_BUSY = 0x8002c701, CELL_PHOTO_IMPORT_ERROR_INTERNAL = 0x8002c702, CELL_PHOTO_IMPORT_ERROR_PARAM = 0x8002c703, CELL_PHOTO_IMPORT_ERROR_ACCESS_ERROR = 0x8002c704, CELL_PHOTO_IMPORT_ERROR_COPY = 0x8002c705, CELL_PHOTO_IMPORT_ERROR_INITIALIZE = 0x8002c706, }; template<> void fmt_class_string<CellPhotoImportError>::format(std::string& out, u64 arg) { format_enum(out, arg, [](auto error) { switch (error) { STR_CASE(CELL_PHOTO_IMPORT_ERROR_BUSY); STR_CASE(CELL_PHOTO_IMPORT_ERROR_INTERNAL); STR_CASE(CELL_PHOTO_IMPORT_ERROR_PARAM); STR_CASE(CELL_PHOTO_IMPORT_ERROR_ACCESS_ERROR); STR_CASE(CELL_PHOTO_IMPORT_ERROR_COPY); STR_CASE(CELL_PHOTO_IMPORT_ERROR_INITIALIZE); } return unknown; }); } enum CellPhotoImportVersion : u32 { CELL_PHOTO_IMPORT_VERSION_CURRENT = 0, }; enum { CELL_PHOTO_IMPORT_HDD_PATH_MAX = 1055, CELL_PHOTO_IMPORT_PHOTO_TITLE_MAX_LENGTH = 64, CELL_PHOTO_IMPORT_GAME_TITLE_MAX_SIZE = 128, CELL_PHOTO_IMPORT_GAME_COMMENT_MAX_SIZE = 1024 }; enum CellPhotoImportFormatType { CELL_PHOTO_IMPORT_FT_UNKNOWN = 0, CELL_PHOTO_IMPORT_FT_JPEG, CELL_PHOTO_IMPORT_FT_PNG, CELL_PHOTO_IMPORT_FT_GIF, CELL_PHOTO_IMPORT_FT_BMP, CELL_PHOTO_IMPORT_FT_TIFF, CELL_PHOTO_IMPORT_FT_MPO, }; enum CellPhotoImportTexRot { CELL_PHOTO_IMPORT_TEX_ROT_0 = 0, CELL_PHOTO_IMPORT_TEX_ROT_90, CELL_PHOTO_IMPORT_TEX_ROT_180, CELL_PHOTO_IMPORT_TEX_ROT_270, }; struct CellPhotoImportFileDataSub { be_t<s32> width; be_t<s32> height; be_t<CellPhotoImportFormatType> format; be_t<CellPhotoImportTexRot> rotate; }; struct CellPhotoImportFileData { char dstFileName[CELL_FS_MAX_FS_FILE_NAME_LENGTH]; char photo_title[CELL_PHOTO_IMPORT_PHOTO_TITLE_MAX_LENGTH * 3]; char game_title[CELL_PHOTO_IMPORT_GAME_TITLE_MAX_SIZE]; char game_comment[CELL_PHOTO_IMPORT_GAME_COMMENT_MAX_SIZE]; char padding; vm::bptr<CellPhotoImportFileDataSub> data_sub; vm::bptr<void> reserved; }; struct CellPhotoImportSetParam { be_t<u32> fileSizeMax; vm::bptr<void> reserved1; vm::bptr<void> reserved2; }; using CellPhotoImportFinishCallback = void(s32 result, vm::ptr<CellPhotoImportFileData> filedata, vm::ptr<void> userdata); struct photo_import { shared_mutex mutex; bool is_busy = false; CellPhotoImportSetParam param{}; vm::ptr<CellPhotoImportFinishCallback> func_finish{}; vm::ptr<void> userdata{}; }; vm::gvar<CellPhotoImportFileDataSub> g_filedata_sub; vm::gvar<CellPhotoImportFileData> g_filedata; error_code select_photo(std::string dst_dir) { auto& pi_manager = g_fxo->get<photo_import>(); if (!pi_manager.func_finish) { cellPhotoImportUtil.error("func_finish is null"); return CELL_PHOTO_IMPORT_ERROR_PARAM; } if (!dst_dir.starts_with("/dev_hdd0"sv) && !dst_dir.starts_with("/dev_hdd1"sv)) { cellPhotoImportUtil.error("Destination '%s' is not inside dev_hdd0 or dev_hdd1", dst_dir); return CELL_PHOTO_IMPORT_ERROR_ACCESS_ERROR; // TODO: is this correct? } dst_dir = vfs::get(dst_dir); if (!fs::is_dir(dst_dir)) { // TODO: check if the dir is user accessible and can be written to cellPhotoImportUtil.error("Destination '%s' is not a directory", dst_dir); return CELL_PHOTO_IMPORT_ERROR_ACCESS_ERROR; // TODO: is this correct? } pi_manager.is_busy = true; const std::string vfs_dir_path = vfs::get("/dev_hdd0/photo"); const std::string title = get_localized_string(localized_string_id::RSX_OVERLAYS_MEDIA_DIALOG_TITLE_PHOTO_IMPORT); error_code error = rsx::overlays::show_media_list_dialog(rsx::overlays::media_list_dialog::media_type::photo, vfs_dir_path, title, [&pi_manager, dst_dir](s32 status, utils::media_info info) { sysutil_register_cb([&pi_manager, dst_dir, info, status](ppu_thread& ppu) -> s32 { *g_filedata_sub = {}; *g_filedata = {}; u32 result = status >= 0 ? u32{CELL_OK} : u32{CELL_CANCEL}; if (result == CELL_OK) { fs::stat_t f_info{}; if (!fs::get_stat(info.path, f_info) || f_info.is_directory) { cellPhotoImportUtil.error("Path does not belong to a valid file: '%s'", info.path); result = CELL_PHOTO_IMPORT_ERROR_ACCESS_ERROR; // TODO: is this correct ? pi_manager.is_busy = false; pi_manager.func_finish(ppu, result, g_filedata, pi_manager.userdata); return CELL_OK; } if (f_info.size > pi_manager.param.fileSizeMax) { cellPhotoImportUtil.error("File size is too large: %d (fileSizeMax=%d)", f_info.size, pi_manager.param.fileSizeMax); result = CELL_PHOTO_IMPORT_ERROR_COPY; // TODO: is this correct ? pi_manager.is_busy = false; pi_manager.func_finish(ppu, result, g_filedata, pi_manager.userdata); return CELL_OK; } const std::string filename = info.path.substr(info.path.find_last_of(fs::delim) + 1); const std::string title = info.get_metadata("title", filename); const std::string dst_path = dst_dir + "/" + filename; std::string sub_type = info.sub_type; strcpy_trunc(g_filedata->dstFileName, filename); strcpy_trunc(g_filedata->photo_title, title); strcpy_trunc(g_filedata->game_title, Emu.GetTitle()); strcpy_trunc(g_filedata->game_comment, ""); // TODO g_filedata->data_sub = g_filedata_sub; g_filedata->data_sub->width = info.width; g_filedata->data_sub->height = info.height; cellPhotoImportUtil.notice("Raw image data: filename='%s', title='%s', game='%s', sub_type='%s', width=%d, height=%d, orientation=%d ", filename, title, Emu.GetTitle(), sub_type, info.width, info.height, info.orientation); // Fallback to extension if necessary if (sub_type.empty()) { sub_type = get_file_extension(filename); } if (!sub_type.empty()) { sub_type = fmt::to_lower(sub_type); } if (sub_type == "jpg" || sub_type == "jpeg") { g_filedata->data_sub->format = CELL_PHOTO_IMPORT_FT_JPEG; } else if (sub_type == "png") { g_filedata->data_sub->format = CELL_PHOTO_IMPORT_FT_PNG; } else if (sub_type == "tif" || sub_type == "tiff") { g_filedata->data_sub->format = CELL_PHOTO_IMPORT_FT_TIFF; } else if (sub_type == "bmp") { g_filedata->data_sub->format = CELL_PHOTO_IMPORT_FT_BMP; } else if (sub_type == "gif") { g_filedata->data_sub->format = CELL_PHOTO_IMPORT_FT_GIF; } else if (sub_type == "mpo") { g_filedata->data_sub->format = CELL_PHOTO_IMPORT_FT_MPO; } else { g_filedata->data_sub->format = CELL_PHOTO_IMPORT_FT_UNKNOWN; } switch (info.orientation) { default: case CELL_SEARCH_ORIENTATION_UNKNOWN: case CELL_SEARCH_ORIENTATION_TOP_LEFT: g_filedata->data_sub->rotate = CELL_PHOTO_IMPORT_TEX_ROT_0; break; case CELL_SEARCH_ORIENTATION_TOP_RIGHT: g_filedata->data_sub->rotate = CELL_PHOTO_IMPORT_TEX_ROT_90; break; case CELL_SEARCH_ORIENTATION_BOTTOM_RIGHT: g_filedata->data_sub->rotate = CELL_PHOTO_IMPORT_TEX_ROT_180; break; case CELL_SEARCH_ORIENTATION_BOTTOM_LEFT: g_filedata->data_sub->rotate = CELL_PHOTO_IMPORT_TEX_ROT_270; break; } cellPhotoImportUtil.notice("Media list dialog: Copying '%s' to '%s'...", info.path, dst_path); if (!fs::copy_file(info.path, dst_path, false)) { cellPhotoImportUtil.error("Failed to copy '%s' to '%s'. Error = '%s'", info.path, dst_path, fs::g_tls_error); result = CELL_PHOTO_IMPORT_ERROR_COPY; } cellPhotoImportUtil.notice("Cell image data: dstFileName='%s', photo_title='%s', game_title='%s', format=%d, width=%d, height=%d, rotate=%d ", g_filedata->dstFileName, g_filedata->photo_title, g_filedata->game_title, static_cast<s32>(g_filedata->data_sub->format), g_filedata->data_sub->width, g_filedata->data_sub->height, static_cast<s32>(g_filedata->data_sub->rotate)); } else { cellPhotoImportUtil.notice("Media list dialog was canceled"); } pi_manager.is_busy = false; pi_manager.func_finish(ppu, result, g_filedata, pi_manager.userdata); return CELL_OK; }); }); if (error != CELL_OK) { pi_manager.is_busy = false; } return error; } error_code cellPhotoImport(u32 version, vm::cptr<char> dstHddPath, vm::ptr<CellPhotoImportSetParam> param, u32 container, vm::ptr<CellPhotoImportFinishCallback> funcFinish, vm::ptr<void> userdata) { cellPhotoImportUtil.todo("cellPhotoImport(version=0x%x, dstHddPath=%s, param=*0x%x, container=0x%x, funcFinish=*0x%x, userdata=*0x%x)", version, dstHddPath, param, container, funcFinish, userdata); if (version != CELL_PHOTO_IMPORT_VERSION_CURRENT || !funcFinish || !param || !dstHddPath) { return CELL_PHOTO_IMPORT_ERROR_PARAM; } if (container != 0xffffffff && false) // TODO { return CELL_PHOTO_IMPORT_ERROR_PARAM; } auto& pi_manager = g_fxo->get<photo_import>(); std::lock_guard lock(pi_manager.mutex); if (pi_manager.is_busy) { return CELL_PHOTO_IMPORT_ERROR_BUSY; } pi_manager.param = *param; pi_manager.func_finish = funcFinish; pi_manager.userdata = userdata; return select_photo(dstHddPath.get_ptr()); } error_code cellPhotoImport2(u32 version, vm::cptr<char> dstHddPath, vm::ptr<CellPhotoImportSetParam> param, vm::ptr<CellPhotoImportFinishCallback> funcFinish, vm::ptr<void> userdata) { cellPhotoImportUtil.todo("cellPhotoImport2(version=0x%x, dstHddPath=%s, param=*0x%x, funcFinish=*0x%x, userdata=*0x%x)", version, dstHddPath, param, funcFinish, userdata); if (version != CELL_PHOTO_IMPORT_VERSION_CURRENT || !funcFinish || !param || !dstHddPath) { return CELL_PHOTO_IMPORT_ERROR_PARAM; } auto& pi_manager = g_fxo->get<photo_import>(); std::lock_guard lock(pi_manager.mutex); if (pi_manager.is_busy) { return CELL_PHOTO_IMPORT_ERROR_BUSY; } pi_manager.param = *param; pi_manager.func_finish = funcFinish; pi_manager.userdata = userdata; return select_photo(dstHddPath.get_ptr()); } DECLARE(ppu_module_manager::cellPhotoImportUtil)("cellPhotoImportUtil", []() { REG_FUNC(cellPhotoImportUtil, cellPhotoImport); REG_FUNC(cellPhotoImportUtil, cellPhotoImport2); REG_VAR(cellPhotoImportUtil, g_filedata_sub).flag(MFF_HIDDEN); REG_VAR(cellPhotoImportUtil, g_filedata).flag(MFF_HIDDEN); });
10,619
C++
.cpp
286
33.465035
235
0.702471
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,193
cellBgdl.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/cellBgdl.cpp
#include "stdafx.h" #include "Emu/Cell/PPUModule.h" #include "cellBgdl.h" LOG_CHANNEL(cellBGDL); error_code cellBGDLGetInfo(vm::cptr<char> content_id, vm::ptr<CellBGDLInfo> info, s32 num) { cellBGDL.todo("cellBGDLGetInfo(content_id=%s, info=*0x%x, num=%d)", content_id, info, num); return CELL_OK; } error_code cellBGDLGetInfo2(vm::cptr<char> service_id, vm::ptr<CellBGDLInfo> info, s32 num) { cellBGDL.todo("cellBGDLGetInfo2(service_id=%s, info=*0x%x, num=%d)", service_id, info, num); return CELL_OK; } error_code cellBGDLSetMode(CellBGDLMode mode) { cellBGDL.todo("cellBGDLSetMode(mode=%d)", +mode); return CELL_OK; } error_code cellBGDLGetMode(vm::ptr<CellBGDLMode> mode) { cellBGDL.todo("cellBGDLGetMode(mode=*0x%x)", mode); return CELL_OK; } DECLARE(ppu_module_manager::cellBGDL)("cellBGDLUtility", []() { REG_FUNC(cellBGDLUtility, cellBGDLGetInfo); REG_FUNC(cellBGDLUtility, cellBGDLGetInfo2); REG_FUNC(cellBGDLUtility, cellBGDLSetMode); REG_FUNC(cellBGDLUtility, cellBGDLGetMode); });
1,012
C++
.cpp
31
31.032258
93
0.76386
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
true
false
false
true
false
false
5,194
cellSysutilMisc.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/cellSysutilMisc.cpp
#include "stdafx.h" #include "Emu/system_config.h" #include "Emu/Cell/PPUModule.h" #include "cellSysutil.h" LOG_CHANNEL(cellSysutilMisc); s32 cellSysutilGetLicenseArea() { cellSysutilMisc.warning("cellSysutilGetLicenseArea()"); const CellSysutilLicenseArea license_area = g_cfg.sys.license_area; cellSysutilMisc.notice("cellSysutilGetLicenseArea(): %s", license_area); return license_area; } DECLARE(ppu_module_manager::cellSysutilMisc)("cellSysutilMisc", []() { REG_FUNC(cellSysutilMisc, cellSysutilGetLicenseArea); });
530
C++
.cpp
16
31.5625
73
0.805882
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
true
false
false
true
false
false
5,195
cellGameExec.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/cellGameExec.cpp
#include "stdafx.h" #include "Emu/Cell/PPUModule.h" #include "Emu/IdManager.h" #include "Emu/System.h" #include "cellGame.h" LOG_CHANNEL(cellGameExec); struct game_exec_data { atomic_t<u32> execdata = 0; // TODO: pass this to the source application after closing the current application }; error_code cellGameSetExitParam(u32 execdata) { cellGameExec.todo("cellGameSetExitParam(execdata=0x%x)", execdata); g_fxo->get<game_exec_data>().execdata = execdata; return CELL_OK; } error_code cellGameGetHomeDataExportPath(vm::ptr<char> exportPath) { cellGameExec.warning("cellGameGetHomeDataExportPath(exportPath=*0x%x)", exportPath); if (!exportPath) { return CELL_GAME_ERROR_PARAM; } // TODO: PlayStation home is defunct. return CELL_GAME_ERROR_NOAPP; } error_code cellGameGetHomePath(vm::ptr<char> homePath) { cellGameExec.todo("cellGameGetHomePath(homePath=*0x%x)", homePath); if (!homePath) { return CELL_GAME_ERROR_PARAM; } // TODO: PlayStation home is defunct. return CELL_OK; } error_code cellGameGetHomeDataImportPath(vm::ptr<char> importPath) { cellGameExec.warning("cellGameGetHomeDataImportPath(importPath=*0x%x)", importPath); if (!importPath) { return CELL_GAME_ERROR_PARAM; } // TODO: PlayStation home is defunct. return CELL_GAME_ERROR_NOAPP; } error_code cellGameGetHomeLaunchOptionPath(vm::ptr<char> commonPath, vm::ptr<char> personalPath) { cellGameExec.todo("cellGameGetHomeLaunchOptionPath(commonPath=%s, personalPath=%s)", commonPath, personalPath); if (!commonPath || !personalPath) { return CELL_GAME_ERROR_PARAM; } // TODO: PlayStation home is not supported atm. return CELL_GAME_ERROR_NOAPP; } error_code cellGameExecGame(u32 type, vm::ptr<char> dirName, u32 options, u32 memContainer, u32 execData, u32 userData) { cellGameExec.todo("cellGameExecGame(type=0x%x, dirName=%s, options=0x%x, memContainer=0x%x, execData=0x%x, userData=0x%x)", type, dirName, options, memContainer, execData, userData); return CELL_OK; } error_code cellGameDeleteGame(vm::ptr<char> dirName, u32 memContainer) { cellGameExec.todo("cellGameDeleteGame(dirName=%s, memContainer=0x%x)", dirName, memContainer); return CELL_OK; } error_code cellGameGetBootGameInfo(vm::ptr<u32> type, vm::ptr<char> dirName, vm::ptr<u32> execdata) { cellGameExec.todo("cellGameGetBootGameInfo(type=*0x%x, dirName=*0x%x, execdata=*0x%x)", type, dirName, execdata); if (!type || !dirName) // execdata can be NULL { return CELL_GAME_ERROR_PARAM; } const u32 source_type = Emu.GetBootSourceType(); *type = source_type; if (execdata) { *execdata = g_fxo->get<game_exec_data>().execdata; } if (source_type == CELL_GAME_GAMETYPE_HDD) { const std::string dir_name = Emu.GetDir(); if (dir_name.size() >= CELL_GAME_DIRNAME_SIZE) { return CELL_HDDGAME_ERROR_INTERNAL; // Speculative } std::memcpy(dirName.get_ptr(), dir_name.c_str(), dir_name.size() + 1); } return CELL_OK; } error_code cellGameGetExitGameInfo(vm::ptr<u32> status, vm::ptr<u32> type, vm::ptr<char> dirName, vm::ptr<u32> execData, vm::ptr<u32> userData) { cellGameExec.todo("cellGameGetExitGameInfo(status=*0x%x, type=*0x%x, dirName=*0x%x, execData=*0x%x, userData=0x%x)", status, type, dirName, execData, userData); return CELL_OK; } error_code cellGameGetList(u32 listBufNum, u32 unk, vm::ptr<u32> listNum, vm::ptr<u32> getListNum, u32 memContainer) { cellGameExec.todo("cellGameGetList(listBufNum=0x%x, unk=0x%x, listNum=*0x%x, getListNum=*0x%x, memContainer=0x%x)", listBufNum, unk, listNum, getListNum, memContainer); return CELL_OK; } DECLARE(ppu_module_manager::cellGameExec)("cellGameExec", []() { REG_FUNC(cellGameExec, cellGameSetExitParam); REG_FUNC(cellGameExec, cellGameGetHomeDataExportPath); REG_FUNC(cellGameExec, cellGameGetHomePath); REG_FUNC(cellGameExec, cellGameGetHomeDataImportPath); REG_FUNC(cellGameExec, cellGameGetHomeLaunchOptionPath); REG_FUNC(cellGameExec, cellGameExecGame); REG_FUNC(cellGameExec, cellGameDeleteGame); REG_FUNC(cellGameExec, cellGameGetBootGameInfo); REG_FUNC(cellGameExec, cellGameGetExitGameInfo); REG_FUNC(cellGameExec, cellGameGetList); });
4,141
C++
.cpp
113
34.59292
183
0.769597
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,196
cellGifDec.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/cellGifDec.cpp
#include "stdafx.h" #include "Emu/VFS.h" #include "Emu/IdManager.h" #include "Emu/Cell/PPUModule.h" // STB_IMAGE_IMPLEMENTATION is already defined in stb_image.cpp #include <stb_image.h> #include "Emu/Cell/lv2/sys_fs.h" #include "cellGifDec.h" #include "util/asm.hpp" LOG_CHANNEL(cellGifDec); // Temporarily #ifndef _MSC_VER #pragma GCC diagnostic ignored "-Wunused-parameter" #endif template <> void fmt_class_string<CellGifDecError>::format(std::string& out, u64 arg) { format_enum(out, arg, [](auto error) { switch (error) { STR_CASE(CELL_GIFDEC_ERROR_OPEN_FILE); STR_CASE(CELL_GIFDEC_ERROR_STREAM_FORMAT); STR_CASE(CELL_GIFDEC_ERROR_SEQ); STR_CASE(CELL_GIFDEC_ERROR_ARG); STR_CASE(CELL_GIFDEC_ERROR_FATAL); STR_CASE(CELL_GIFDEC_ERROR_SPU_UNSUPPORT); STR_CASE(CELL_GIFDEC_ERROR_SPU_ERROR); STR_CASE(CELL_GIFDEC_ERROR_CB_PARAM); } return unknown; }); } error_code cellGifDecCreate(vm::ptr<GifDecoder> mainHandle, vm::cptr<CellGifDecThreadInParam> threadInParam, vm::ptr<CellGifDecThreadOutParam> threadOutParam) { cellGifDec.todo("cellGifDecCreate(mainHandle=*0x%x, threadInParam=*0x%x, threadOutParam=*0x%x)", mainHandle, threadInParam, threadOutParam); if (!mainHandle) { return CELL_GIFDEC_ERROR_ARG; } *mainHandle = {}; if (!threadOutParam || !threadInParam || !threadInParam->cbCtrlMallocFunc || !threadInParam->cbCtrlFreeFunc || (threadInParam->spuThreadEnable != CELL_GIFDEC_SPU_THREAD_DISABLE && (threadInParam->spuThreadEnable != CELL_GIFDEC_SPU_THREAD_ENABLE || threadInParam->ppuThreadPriority > 3071 || threadInParam->spuThreadPriority > 255))) { return CELL_GIFDEC_ERROR_ARG; } threadOutParam->gifCodecVersion = 0x240000; return CELL_OK; } error_code cellGifDecExtCreate(vm::ptr<GifDecoder> mainHandle, vm::cptr<CellGifDecThreadInParam> threadInParam, vm::ptr<CellGifDecThreadOutParam> threadOutParam, vm::cptr<CellGifDecExtThreadInParam> extThreadInParam, vm::ptr<CellGifDecExtThreadOutParam> extThreadOutParam) { cellGifDec.todo("cellGifDecExtCreate(mainHandle=*0x%x, threadInParam=*0x%x, threadOutParam=*0x%x, extThreadInParam=*0x%x, extThreadOutParam=*0x%x)", mainHandle, threadInParam, threadOutParam, extThreadInParam, extThreadOutParam); if (!mainHandle) { return CELL_GIFDEC_ERROR_ARG; } *mainHandle = {}; if (!threadOutParam || !extThreadOutParam || !extThreadInParam || !threadInParam || !threadInParam->cbCtrlMallocFunc || !threadInParam->cbCtrlFreeFunc || (threadInParam->spuThreadEnable != CELL_GIFDEC_SPU_THREAD_DISABLE && threadInParam->spuThreadEnable != CELL_GIFDEC_SPU_THREAD_ENABLE)) { return CELL_GIFDEC_ERROR_ARG; } if (threadInParam->spuThreadEnable == CELL_GIFDEC_SPU_THREAD_ENABLE && !extThreadInParam->spurs) { return CELL_GIFDEC_ERROR_ARG; } if (extThreadInParam->maxContention == 0u || extThreadInParam->maxContention >= 8u) { return CELL_GIFDEC_ERROR_ARG; } for (u32 i = 0; i < 8; i++) { if (extThreadInParam->priority[i] > 15) { return CELL_GIFDEC_ERROR_ARG; } } threadOutParam->gifCodecVersion = 0x240000; return CELL_OK; } error_code cellGifDecOpen(vm::ptr<GifDecoder> mainHandle, vm::pptr<GifStream> subHandle, vm::cptr<CellGifDecSrc> src, vm::ptr<CellGifDecOpnInfo> openInfo) { cellGifDec.warning("cellGifDecOpen(mainHandle=*0x%x, subHandle=**0x%x, src=*0x%x, openInfo=*0x%x)", mainHandle, subHandle, src, openInfo); if (!mainHandle || !subHandle) { return CELL_GIFDEC_ERROR_ARG; } if (false) // TODO { return CELL_GIFDEC_ERROR_SEQ; } if (!openInfo || !src) { return CELL_GIFDEC_ERROR_ARG; } GifStream current_subHandle{}; current_subHandle.fd = 0; current_subHandle.src = *src; switch (src->srcSelect) { case CELL_GIFDEC_BUFFER: { if (!src->streamPtr || !src->streamSize) { return CELL_GIFDEC_ERROR_ARG; } current_subHandle.fileSize = src->streamSize; break; } case CELL_GIFDEC_FILE: { if (!src->fileName) { return CELL_GIFDEC_ERROR_OPEN_FILE; } // Get file descriptor and size const std::string real_path = vfs::get(src->fileName.get_ptr()); fs::file file_s(real_path); if (!file_s) { return CELL_GIFDEC_ERROR_OPEN_FILE; } if (src->fileOffset < 0) { return CELL_GIFDEC_ERROR_ARG; } current_subHandle.fileSize = file_s.size(); current_subHandle.fd = idm::make<lv2_fs_object, lv2_file>(src->fileName.get_ptr(), std::move(file_s), 0, 0, real_path); break; } default: { return CELL_GIFDEC_ERROR_ARG; } } subHandle->set(vm::alloc(sizeof(GifStream), vm::main)); **subHandle = current_subHandle; return CELL_OK; } error_code cellGifDecExtOpen(vm::ptr<GifDecoder> mainHandle, vm::pptr<GifStream> subHandle, vm::cptr<CellGifDecSrc> src, vm::ptr<CellGifDecOpnInfo> openInfo, vm::cptr<CellGifDecCbCtrlStrm> cbCtrlStrm) { cellGifDec.todo("cellGifDecExtOpen(mainHandle=*0x%x, subHandle=*0x%x, src=*0x%x, openInfo=*0x%x, cbCtrlStrm=*0x%x)", mainHandle, subHandle, src, openInfo, cbCtrlStrm); if (!mainHandle || !subHandle) { return CELL_GIFDEC_ERROR_ARG; } if (false) // TODO { return CELL_GIFDEC_ERROR_SEQ; } if (!openInfo || !src) { return CELL_GIFDEC_ERROR_ARG; } GifStream current_subHandle{}; current_subHandle.fd = 0; current_subHandle.src = *src; switch (src->srcSelect) { case CELL_GIFDEC_BUFFER: { if (!src->streamPtr || !src->streamSize) { return CELL_GIFDEC_ERROR_ARG; } current_subHandle.fileSize = src->streamSize; break; } case CELL_GIFDEC_FILE: { if (!src->fileName) { return CELL_GIFDEC_ERROR_OPEN_FILE; } // Get file descriptor and size const std::string real_path = vfs::get(src->fileName.get_ptr()); fs::file file_s(real_path); if (!file_s) { return CELL_GIFDEC_ERROR_OPEN_FILE; } if (src->fileOffset < 0) { return CELL_GIFDEC_ERROR_ARG; } current_subHandle.fileSize = file_s.size(); current_subHandle.fd = idm::make<lv2_fs_object, lv2_file>(src->fileName.get_ptr(), std::move(file_s), 0, 0, real_path); break; } default: { return CELL_GIFDEC_ERROR_ARG; } } return CELL_OK; } error_code cellGifDecReadHeader(vm::ptr<GifDecoder> mainHandle, vm::ptr<GifStream> subHandle, vm::ptr<CellGifDecInfo> info) { cellGifDec.warning("cellGifDecReadHeader(mainHandle=*0x%x, subHandle=*0x%x, info=*0x%x)", mainHandle, subHandle, info); if (!mainHandle) { return CELL_GIFDEC_ERROR_ARG; } if (false) // TODO: check main handle { return CELL_GIFDEC_ERROR_SEQ; } if (!subHandle) { return CELL_GIFDEC_ERROR_ARG; } if (false) // TODO: check sub handle { return CELL_GIFDEC_ERROR_SEQ; } if (!info) { return CELL_GIFDEC_ERROR_ARG; } const u32& fd = subHandle->fd; CellGifDecInfo& current_info = subHandle->info; // Write the header to buffer u8 buffer[13]; switch (subHandle->src.srcSelect) { case CELL_GIFDEC_BUFFER: { std::memcpy(buffer, subHandle->src.streamPtr.get_ptr(), sizeof(buffer)); break; } case CELL_GIFDEC_FILE: { auto file = idm::get<lv2_fs_object, lv2_file>(fd); file->file.seek(0); file->file.read(buffer, sizeof(buffer)); break; } default: break; // TODO } if (read_from_ptr<be_t<u32>>(buffer + 0) != 0x47494638u || (read_from_ptr<le_t<u16>>(buffer + 4) != 0x6139u && read_from_ptr<le_t<u16>>(buffer + 4) != 0x6137u)) // Error: The first 6 bytes are not a valid GIF signature { return CELL_GIFDEC_ERROR_STREAM_FORMAT; // Surprisingly there is no error code related with headerss } u8 packedField = buffer[10]; current_info.SWidth = buffer[6] + buffer[7] * 0x100; current_info.SHeight = buffer[8] + buffer[9] * 0x100; current_info.SGlobalColorTableFlag = packedField >> 7; current_info.SColorResolution = ((packedField >> 4) & 7)+1; current_info.SSortFlag = (packedField >> 3) & 1; current_info.SSizeOfGlobalColorTable = (packedField & 7)+1; current_info.SBackGroundColor = buffer[11]; current_info.SPixelAspectRatio = buffer[12]; *info = current_info; return CELL_OK; } error_code cellGifDecExtReadHeader(vm::ptr<GifDecoder> mainHandle, vm::cptr<GifStream> subHandle, vm::ptr<CellGifDecInfo> info, vm::ptr<CellGifDecExtInfo> extInfo) { cellGifDec.todo("cellGifDecExtReadHeader(mainHandle=*0x%x, subHandle=*0x%x, info=*0x%x, extInfo=*0x%x)", mainHandle, subHandle, info, extInfo); if (!mainHandle) { return CELL_GIFDEC_ERROR_ARG; } if (false) // TODO: check main handle { return CELL_GIFDEC_ERROR_SEQ; } if (!subHandle) { return CELL_GIFDEC_ERROR_ARG; } if (false) // TODO: check sub handle { return CELL_GIFDEC_ERROR_SEQ; } if (!info || !extInfo) { return CELL_GIFDEC_ERROR_ARG; } return CELL_OK; } error_code cellGifDecSetParameter(vm::ptr<GifDecoder> mainHandle, vm::ptr<GifStream> subHandle, vm::cptr<CellGifDecInParam> inParam, vm::ptr<CellGifDecOutParam> outParam) { cellGifDec.warning("cellGifDecSetParameter(mainHandle=*0x%x, subHandle=*0x%x, inParam=*0x%x, outParam=*0x%x)", mainHandle, subHandle, inParam, outParam); if (!mainHandle) { return CELL_GIFDEC_ERROR_ARG; } if (false) // TODO: check main handle { return CELL_GIFDEC_ERROR_SEQ; } if (!subHandle) { return CELL_GIFDEC_ERROR_ARG; } if (false) // TODO: check sub handle { return CELL_GIFDEC_ERROR_SEQ; } if (!inParam || !outParam) { return CELL_GIFDEC_ERROR_ARG; } CellGifDecInfo& current_info = subHandle->info; CellGifDecOutParam& current_outParam = subHandle->outParam; current_outParam.outputWidthByte = (current_info.SWidth * current_info.SColorResolution * 3) / 8; current_outParam.outputWidth = current_info.SWidth; current_outParam.outputHeight = current_info.SHeight; current_outParam.outputColorSpace = inParam->colorSpace; switch (current_outParam.outputColorSpace) { case CELL_GIFDEC_RGBA: case CELL_GIFDEC_ARGB: current_outParam.outputComponents = 4; break; default: return CELL_GIFDEC_ERROR_ARG; // Not supported color space } current_outParam.outputBitDepth = 0; // Unimplemented current_outParam.useMemorySpace = 0; // Unimplemented *outParam = current_outParam; return CELL_OK; } error_code cellGifDecExtSetParameter(vm::ptr<GifDecoder> mainHandle, vm::ptr<GifStream> subHandle, vm::cptr<CellGifDecInParam> inParam, vm::ptr<CellGifDecOutParam> outParam, vm::cptr<CellGifDecExtInParam> extInParam, vm::ptr<CellGifDecExtOutParam> extOutParam) { cellGifDec.todo("cellGifDecExtSetParameter(mainHandle=*0x%x, subHandle=*0x%x, inParam=*0x%x, outParam=*0x%x, extInParam=*0x%x, extOutParam=*0x%x)", mainHandle, subHandle, inParam, outParam, extInParam, extOutParam); if (!mainHandle) { return CELL_GIFDEC_ERROR_ARG; } if (false) // TODO: check main handle { return CELL_GIFDEC_ERROR_SEQ; } if (!subHandle) { return CELL_GIFDEC_ERROR_ARG; } if (false) // TODO: check sub handle { return CELL_GIFDEC_ERROR_SEQ; } if (!inParam || !outParam) { return CELL_GIFDEC_ERROR_ARG; } CellGifDecInfo& current_info = subHandle->info; CellGifDecOutParam& current_outParam = subHandle->outParam; current_outParam.outputWidthByte = (current_info.SWidth * current_info.SColorResolution * 3) / 8; current_outParam.outputWidth = current_info.SWidth; current_outParam.outputHeight = current_info.SHeight; current_outParam.outputColorSpace = inParam->colorSpace; switch (current_outParam.outputColorSpace) { case CELL_GIFDEC_RGBA: case CELL_GIFDEC_ARGB: current_outParam.outputComponents = 4; break; default: return CELL_GIFDEC_ERROR_ARG; // Not supported color space } current_outParam.outputBitDepth = 0; // Unimplemented current_outParam.useMemorySpace = 0; // Unimplemented *outParam = current_outParam; if (!extInParam || extInParam->bufferMode != CELL_GIFDEC_LINE_MODE || !extOutParam) { return CELL_GIFDEC_ERROR_ARG; } return CELL_OK; } error_code cellGifDecDecodeData(vm::ptr<GifDecoder> mainHandle, vm::cptr<GifStream> subHandle, vm::ptr<u8> data, vm::cptr<CellGifDecDataCtrlParam> dataCtrlParam, vm::ptr<CellGifDecDataOutInfo> dataOutInfo) { cellGifDec.warning("cellGifDecDecodeData(mainHandle=*0x%x, subHandle=*0x%x, data=*0x%x, dataCtrlParam=*0x%x, dataOutInfo=*0x%x)", mainHandle, subHandle, data, dataCtrlParam, dataOutInfo); if (!mainHandle) { return CELL_GIFDEC_ERROR_ARG; } if (false) // TODO: check main handle { return CELL_GIFDEC_ERROR_SEQ; } if (!subHandle) { return CELL_GIFDEC_ERROR_ARG; } if (false) // TODO: check sub handle { return CELL_GIFDEC_ERROR_SEQ; } if (!dataOutInfo || !dataCtrlParam) { return CELL_GIFDEC_ERROR_ARG; } dataOutInfo->status = CELL_GIFDEC_DEC_STATUS_STOP; const u32 fd = subHandle->fd; const u64 fileSize = subHandle->fileSize; const CellGifDecOutParam& current_outParam = subHandle->outParam; //Copy the GIF file to a buffer std::unique_ptr<u8[]> gif(new u8[fileSize]); switch (subHandle->src.srcSelect) { case CELL_GIFDEC_BUFFER: std::memcpy(gif.get(), subHandle->src.streamPtr.get_ptr(), fileSize); break; case CELL_GIFDEC_FILE: { auto file = idm::get<lv2_fs_object, lv2_file>(fd); file->file.seek(0); file->file.read(gif.get(), fileSize); break; } default: break; // TODO } //Decode GIF file. (TODO: Is there any faster alternative? Can we do it without external libraries?) int width, height, actual_components; auto image = std::unique_ptr<unsigned char,decltype(&::free)> ( stbi_load_from_memory(gif.get(), ::narrow<int>(fileSize), &width, &height, &actual_components, 4), &::free ); if (!image) return CELL_GIFDEC_ERROR_STREAM_FORMAT; const int bytesPerLine = static_cast<int>(dataCtrlParam->outputBytesPerLine); const char nComponents = 4; uint image_size = width * height * nComponents; switch(current_outParam.outputColorSpace) { case CELL_GIFDEC_RGBA: { if (bytesPerLine > width * nComponents) // Check if we need padding { const int linesize = std::min(bytesPerLine, width * nComponents); for (int i = 0; i < height; i++) { const int dstOffset = i * bytesPerLine; const int srcOffset = width * nComponents * i; memcpy(&data[dstOffset], &image.get()[srcOffset], linesize); } } else { memcpy(data.get_ptr(), image.get(), image_size); } } break; case CELL_GIFDEC_ARGB: { if (bytesPerLine > width * nComponents) // Check if we need padding { //TODO: find out if we can't do padding without an extra copy const int linesize = std::min(bytesPerLine, width * nComponents); const auto output = std::make_unique<char[]>(linesize); for (int i = 0; i < height; i++) { const int dstOffset = i * bytesPerLine; const int srcOffset = width * nComponents * i; for (int j = 0; j < linesize; j += nComponents) { output[j + 0] = image.get()[srcOffset + j + 3]; output[j + 1] = image.get()[srcOffset + j + 0]; output[j + 2] = image.get()[srcOffset + j + 1]; output[j + 3] = image.get()[srcOffset + j + 2]; } std::memcpy(&data[dstOffset], output.get(), linesize); } } else { const auto img = std::make_unique<uint[]>(image_size); uint* source_current = reinterpret_cast<uint*>(image.get()); uint* dest_current = img.get(); for (uint i = 0; i < image_size / nComponents; i++) { uint val = *source_current; *dest_current = (val >> 24) | (val << 8); // set alpha (A8) as leftmost byte source_current++; dest_current++; } std::memcpy(data.get_ptr(), img.get(), image_size); } } break; default: return CELL_GIFDEC_ERROR_ARG; } dataOutInfo->status = CELL_GIFDEC_DEC_STATUS_FINISH; dataOutInfo->recordType = CELL_GIFDEC_RECORD_TYPE_IMAGE_DESC; return CELL_OK; } error_code cellGifDecExtDecodeData(vm::ptr<GifDecoder> mainHandle, vm::cptr<GifStream> subHandle, vm::ptr<u8> data, vm::cptr<CellGifDecDataCtrlParam> dataCtrlParam, vm::ptr<CellGifDecDataOutInfo> dataOutInfo, vm::cptr<CellGifDecCbCtrlDisp> cbCtrlDisp, vm::ptr<CellGifDecDispParam> dispParam) { cellGifDec.todo("cellGifDecExtDecodeData(mainHandle=*0x%x, subHandle=*0x%x, data=*0x%x, dataCtrlParam=*0x%x, dataOutInfo=*0x%x, cbCtrlDisp=*0x%x, dispParam=*0x%x)", mainHandle, subHandle, data, dataCtrlParam, dataOutInfo, cbCtrlDisp, dispParam); if (!mainHandle) { return CELL_GIFDEC_ERROR_ARG; } if (false) // TODO: check main handle { return CELL_GIFDEC_ERROR_SEQ; } if (!subHandle) { return CELL_GIFDEC_ERROR_ARG; } if (false) // TODO: check sub handle { return CELL_GIFDEC_ERROR_SEQ; } if (!dataOutInfo || !dataCtrlParam) { return CELL_GIFDEC_ERROR_ARG; } return CELL_OK; } error_code cellGifDecClose(vm::ptr<GifDecoder> mainHandle, vm::cptr<GifStream> subHandle) { cellGifDec.warning("cellGifDecClose(mainHandle=*0x%x, subHandle=*0x%x)", mainHandle, subHandle); if (!mainHandle) { return CELL_GIFDEC_ERROR_ARG; } if (false) // TODO: check main handle { return CELL_GIFDEC_ERROR_SEQ; } if (!subHandle) { return CELL_GIFDEC_ERROR_ARG; } idm::remove<lv2_fs_object, lv2_file>(subHandle->fd); vm::dealloc(subHandle.addr()); return CELL_OK; } error_code cellGifDecDestroy(vm::ptr<GifDecoder> mainHandle) { cellGifDec.todo("cellGifDecDestroy(mainHandle=*0x%x)", mainHandle); if (!mainHandle) { return CELL_GIFDEC_ERROR_ARG; } if (false) // TODO: check main handle { return CELL_GIFDEC_ERROR_SEQ; } return CELL_OK; } DECLARE(ppu_module_manager::cellGifDec)("cellGifDec", []() { REG_FUNC(cellGifDec, cellGifDecCreate); REG_FUNC(cellGifDec, cellGifDecExtCreate); REG_FUNC(cellGifDec, cellGifDecOpen); REG_FUNC(cellGifDec, cellGifDecReadHeader); REG_FUNC(cellGifDec, cellGifDecSetParameter); REG_FUNC(cellGifDec, cellGifDecDecodeData); REG_FUNC(cellGifDec, cellGifDecClose); REG_FUNC(cellGifDec, cellGifDecDestroy); REG_FUNC(cellGifDec, cellGifDecExtOpen); REG_FUNC(cellGifDec, cellGifDecExtReadHeader); REG_FUNC(cellGifDec, cellGifDecExtSetParameter); REG_FUNC(cellGifDec, cellGifDecExtDecodeData); });
17,882
C++
.cpp
559
29.389982
291
0.728499
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,197
cellSaveData.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/cellSaveData.cpp
#include "stdafx.h" #include "Emu/System.h" #include "Emu/VFS.h" #include "Emu/IdManager.h" #include "Emu/localized_string.h" #include "Emu/savestate_utils.hpp" #include "Emu/Cell/lv2/sys_fs.h" #include "Emu/Cell/lv2/sys_sync.h" #include "Emu/Cell/lv2/sys_process.h" #include "Emu/Cell/PPUModule.h" #include "Emu/Cell/Modules/cellSysutil.h" #include "Emu/Cell/Modules/cellUserInfo.h" #include "Emu/RSX/Overlays/overlay_message.h" #include "Emu/system_config.h" #include "cellSaveData.h" #include "cellMsgDialog.h" #include "Loader/PSF.h" #include "Utilities/StrUtil.h" #include "Utilities/date_time.h" #include <mutex> #include <algorithm> #include <span> #include "util/asm.hpp" LOG_CHANNEL(cellSaveData); template<> void fmt_class_string<CellSaveDataError>::format(std::string& out, u64 arg) { format_enum(out, arg, [](auto error) { switch (error) { STR_CASE(CELL_SAVEDATA_ERROR_CBRESULT); STR_CASE(CELL_SAVEDATA_ERROR_ACCESS_ERROR); STR_CASE(CELL_SAVEDATA_ERROR_INTERNAL); STR_CASE(CELL_SAVEDATA_ERROR_PARAM); STR_CASE(CELL_SAVEDATA_ERROR_NOSPACE); STR_CASE(CELL_SAVEDATA_ERROR_BROKEN); STR_CASE(CELL_SAVEDATA_ERROR_FAILURE); STR_CASE(CELL_SAVEDATA_ERROR_BUSY); STR_CASE(CELL_SAVEDATA_ERROR_NOUSER); STR_CASE(CELL_SAVEDATA_ERROR_SIZEOVER); STR_CASE(CELL_SAVEDATA_ERROR_NODATA); STR_CASE(CELL_SAVEDATA_ERROR_NOTSUPPORTED); } return unknown; }); } SaveDialogBase::~SaveDialogBase() { } std::string SaveDataEntry::date() const { return date_time::fmt_time("%c", mtime); } std::string SaveDataEntry::data_size() const { std::string metric = "KB"; u64 sz = utils::aligned_div(size, 1000); if (sz > 1000) { metric = "MB"; sz = utils::aligned_div(sz, 1000); } return fmt::format("%lu %s", sz, metric); } // cellSaveData aliases (only for cellSaveData.cpp) using PSetList = vm::ptr<CellSaveDataSetList>; using PSetBuf = vm::ptr<CellSaveDataSetBuf>; using PFuncFixed = vm::ptr<CellSaveDataFixedCallback>; using PFuncList = vm::ptr<CellSaveDataListCallback>; using PFuncStat = vm::ptr<CellSaveDataStatCallback>; using PFuncFile = vm::ptr<CellSaveDataFileCallback>; using PFuncDone = vm::ptr<CellSaveDataDoneCallback>; enum : u32 { SAVEDATA_OP_AUTO_SAVE = 0, SAVEDATA_OP_AUTO_LOAD = 1, SAVEDATA_OP_LIST_AUTO_SAVE = 2, SAVEDATA_OP_LIST_AUTO_LOAD = 3, SAVEDATA_OP_LIST_SAVE = 4, SAVEDATA_OP_LIST_LOAD = 5, SAVEDATA_OP_FIXED_SAVE = 6, SAVEDATA_OP_FIXED_LOAD = 7, SAVEDATA_OP_LIST_IMPORT = 9, SAVEDATA_OP_LIST_EXPORT = 10, SAVEDATA_OP_FIXED_IMPORT = 11, SAVEDATA_OP_FIXED_EXPORT = 12, SAVEDATA_OP_LIST_DELETE = 13, SAVEDATA_OP_FIXED_DELETE = 14, }; namespace { struct savedata_context { alignas(16) CellSaveDataCBResult result; alignas(16) CellSaveDataListGet listGet; alignas(16) CellSaveDataListSet listSet; alignas(16) CellSaveDataFixedSet fixedSet; alignas(16) CellSaveDataStatGet statGet; alignas(16) CellSaveDataStatSet statSet; alignas(16) CellSaveDataFileGet fileGet; alignas(16) CellSaveDataFileSet fileSet; alignas(16) CellSaveDataDoneGet doneGet; }; } vm::gvar<savedata_context> g_savedata_context; struct savedata_manager { semaphore<> mutex; atomic_t<bool> enable_overlay{false}; atomic_t<s32> last_cbresult_error_dialog{0}; // CBRESULT errors are negative }; int check_filename(std::string_view file_path, bool disallow_system_files, bool account_sfo_pfd) { if (file_path.size() >= CELL_SAVEDATA_FILENAME_SIZE) { // ****** sysutil savedata parameter error : 71 ****** return 71; } auto dotpos = file_path.find_last_of('.'); if (dotpos == umax) { // Point to end of string instead dotpos = file_path.size(); } if (file_path.empty() || dotpos > 8u || file_path.size() - dotpos > 4u) { // ****** sysutil savedata parameter error : 70 ****** return 70; } if (file_path == "."sv || (!account_sfo_pfd && (file_path == "PARAM.SFO"sv || file_path == "PARAM.PFD"sv))) { // ****** sysutil savedata parameter error : 70 ****** return 70; } char name[CELL_SAVEDATA_FILENAME_SIZE + 3]; if (dotpos) { // Copy file name std::span dst(name, dotpos + 1); strcpy_trunc(dst, file_path); // Allow multiple '.' even though sysutil_check_name_string does not std::replace(name, name + dotpos, '.', '-'); // Allow '_' at start even though sysutil_check_name_string does not if (name[0] == '_') { name[0] = '-'; } if (disallow_system_files && ((dotpos >= 5u && std::memcmp(name, "PARAM", 5) == 0) || (dotpos >= 4u && std::memcmp(name, "ICON", 4) == 0) || (dotpos >= 3u && std::memcmp(name, "PIC", 3) == 0) || (dotpos >= 3u && std::memcmp(name, "SND", 3) == 0))) { // ****** sysutil savedata parameter error : 70 ****** return 70; } // Check filename if (sysutil_check_name_string(name, 1, 9) == -1) { // ****** sysutil savedata parameter error : 70 ****** return 70; } } if (file_path.size() > dotpos + 1) { // Copy file extension std::span dst(name, file_path.size() - dotpos); strcpy_trunc(dst, file_path.substr(dotpos + 1)); // Allow '_' at start even though sysutil_check_name_string does not if (name[0] == '_') { name[0] = '-'; } // Check file extension if (sysutil_check_name_string(name, 1, 4) == -1) { // ****** sysutil savedata parameter error : 70 ****** return 70; } } return 0; } static std::vector<SaveDataEntry> get_save_entries(const std::string& base_dir, const std::string& prefix) { std::vector<SaveDataEntry> save_entries; if (base_dir.empty() || prefix.empty()) { return save_entries; } // get the saves matching the supplied prefix for (auto&& entry : fs::dir(base_dir)) { if (!entry.is_directory || sysutil_check_name_string(entry.name.c_str(), 1, CELL_SAVEDATA_DIRNAME_SIZE) != 0) { continue; } if (!entry.name.starts_with(prefix)) { continue; } // PSF parameters const psf::registry psf = psf::load_object(base_dir + entry.name + "/PARAM.SFO"); if (psf.empty()) { continue; } SaveDataEntry save_entry; save_entry.dirName = psf::get_string(psf, "SAVEDATA_DIRECTORY"); save_entry.listParam = psf::get_string(psf, "SAVEDATA_LIST_PARAM"); save_entry.title = psf::get_string(psf, "TITLE"); save_entry.subtitle = psf::get_string(psf, "SUB_TITLE"); save_entry.details = psf::get_string(psf, "DETAIL"); for (const auto& entry2 : fs::dir(base_dir + entry.name)) { if (entry2.is_directory || check_filename(vfs::unescape(entry2.name), false, true)) { continue; } save_entry.size += entry2.size; } save_entry.atime = entry.atime; save_entry.mtime = entry.mtime; save_entry.ctime = entry.ctime; if (fs::file icon{base_dir + entry.name + "/ICON0.PNG"}) save_entry.iconBuf = icon.to_vector<uchar>(); save_entry.isNew = false; save_entry.escaped = std::move(entry.name); save_entries.emplace_back(save_entry); } return save_entries; } static error_code select_and_delete(ppu_thread& ppu) { std::unique_lock hle_lock(g_fxo->get<hle_locks_t>(), std::try_to_lock); if (!hle_lock) { ppu.state += cpu_flag::again; return {}; } std::unique_lock lock(g_fxo->get<savedata_manager>().mutex, std::try_to_lock); if (!lock) { return CELL_SAVEDATA_ERROR_BUSY; } const std::string base_dir = vfs::get(fmt::format("/dev_hdd0/home/%08u/savedata/", Emu.GetUsrId())); auto save_entries = get_save_entries(base_dir, Emu.GetTitleID()); s32 selected = -1; s32 focused = -1; while (true) { // Yield before a blocking dialog is being spawned lv2_obj::sleep(ppu); // Display a blocking Save Data List asynchronously in the GUI thread. if (auto save_dialog = Emu.GetCallbacks().get_save_dialog()) { selected = save_dialog->ShowSaveDataList(save_entries, focused, SAVEDATA_OP_LIST_DELETE, vm::null, g_fxo->get<savedata_manager>().enable_overlay); } // Reschedule after a blocking dialog returns if (ppu.check_state()) { return 0; } // Abort if dialog was canceled or selection is invalid in this context if (selected < 0) { return CELL_CANCEL; } // Set focused entry for the next iteration focused = save_entries.empty() ? -1 : selected; // Get information from the selected entry SaveDataEntry entry = save_entries[selected]; const std::string info = entry.title + "\n" + entry.subtitle + "\n" + entry.details; // Reusable display message string std::string msg = get_localized_string(localized_string_id::CELL_SAVEDATA_DELETE_CONFIRMATION, info.c_str()); // Yield before a blocking dialog is being spawned lv2_obj::sleep(ppu); // Get user confirmation by opening a blocking dialog s32 return_code = CELL_MSGDIALOG_BUTTON_NONE; error_code res = open_msg_dialog(true, CELL_MSGDIALOG_TYPE_SE_TYPE_NORMAL | CELL_MSGDIALOG_TYPE_BUTTON_TYPE_YESNO, vm::make_str(msg), msg_dialog_source::_cellSaveData, vm::null, vm::null, vm::null, &return_code); // Reschedule after a blocking dialog returns if (ppu.check_state()) { return 0; } if (res != CELL_OK) { return CELL_SAVEDATA_ERROR_INTERNAL; } if (return_code == CELL_MSGDIALOG_BUTTON_YES) { // Remove directory const std::string path = base_dir + save_entries[selected].escaped; fs::remove_all(path); // Remove entry from the list and reset the selection save_entries.erase(save_entries.cbegin() + selected); selected = -1; // Reset the focused index if the new list is empty if (save_entries.empty()) { focused = -1; } // Update display message msg = get_localized_string(localized_string_id::CELL_SAVEDATA_DELETE_SUCCESS, info.c_str()); cellSaveData.success("%s", msg); // Yield before blocking dialog is being spawned lv2_obj::sleep(ppu); // Display success message by opening a blocking dialog (return value should be irrelevant here) res = open_msg_dialog(true, CELL_MSGDIALOG_TYPE_SE_TYPE_NORMAL | CELL_MSGDIALOG_TYPE_BUTTON_TYPE_OK, vm::make_str(msg), msg_dialog_source::_cellSaveData); // Reschedule after blocking dialog returns if (ppu.check_state()) { return 0; } } } return CELL_CANCEL; } // Displays a CellSaveDataCBResult error message. static error_code display_callback_result_error_message(ppu_thread& ppu, const CellSaveDataCBResult& result, u32 errDialog) { std::string msg; bool use_invalid_message = false; switch (result.result) { case CELL_SAVEDATA_CBRESULT_ERR_NOSPACE: msg = get_localized_string(localized_string_id::CELL_SAVEDATA_CB_NO_SPACE, fmt::format("%d", result.errNeedSizeKB).c_str()); break; case CELL_SAVEDATA_CBRESULT_ERR_FAILURE: msg = get_localized_string(localized_string_id::CELL_SAVEDATA_CB_FAILURE); break; case CELL_SAVEDATA_CBRESULT_ERR_BROKEN: msg = get_localized_string(localized_string_id::CELL_SAVEDATA_CB_BROKEN); break; case CELL_SAVEDATA_CBRESULT_ERR_NODATA: msg = get_localized_string(localized_string_id::CELL_SAVEDATA_CB_NO_DATA); break; case CELL_SAVEDATA_CBRESULT_ERR_INVALID: if (result.invalidMsg) use_invalid_message = true; break; default: // ****** sysutil savedata parameter error : 22 ****** return {CELL_SAVEDATA_ERROR_PARAM, "22"}; } if (errDialog == CELL_SAVEDATA_ERRDIALOG_NONE || (errDialog == CELL_SAVEDATA_ERRDIALOG_NOREPEAT && result.result == g_fxo->get<savedata_manager>().last_cbresult_error_dialog.exchange(result.result))) { // TODO: Find out if the "last error" is always tracked or only when NOREPEAT is set return CELL_SAVEDATA_ERROR_CBRESULT; } // Yield before a blocking dialog is being spawned lv2_obj::sleep(ppu); // Get user confirmation by opening a blocking dialog (return value should be irrelevant here) [[maybe_unused]] error_code res = open_msg_dialog(true, CELL_MSGDIALOG_TYPE_SE_TYPE_NORMAL | CELL_MSGDIALOG_TYPE_BUTTON_TYPE_OK, use_invalid_message ? result.invalidMsg : vm::make_str(msg), msg_dialog_source::_cellSaveData); // Reschedule after a blocking dialog returns if (ppu.check_state()) { return 0; } return CELL_SAVEDATA_ERROR_CBRESULT; } static std::string get_confirmation_message(u32 operation, const SaveDataEntry& entry) { const std::string info = fmt::format("%s\n%s\n%s\n%s\n\n%s", entry.title, entry.subtitle, entry.date(), entry.data_size(), entry.details); if (operation == SAVEDATA_OP_LIST_DELETE || operation == SAVEDATA_OP_FIXED_DELETE) { return get_localized_string(localized_string_id::CELL_SAVEDATA_DELETE, info.c_str()); } else if (operation == SAVEDATA_OP_LIST_LOAD || operation == SAVEDATA_OP_FIXED_LOAD) { return get_localized_string(localized_string_id::CELL_SAVEDATA_LOAD, info.c_str()); } else if (operation == SAVEDATA_OP_LIST_SAVE || operation == SAVEDATA_OP_FIXED_SAVE) { return get_localized_string(localized_string_id::CELL_SAVEDATA_OVERWRITE, info.c_str()); } return ""; } static s32 savedata_check_args(u32 operation, u32 version, vm::cptr<char> dirName, u32 errDialog, PSetList setList, PSetBuf setBuf, PFuncList funcList, PFuncFixed funcFixed, PFuncStat funcStat, PFuncFile funcFile, u32 /*container*/, u32 unk_op_flags, vm::ptr<void> /*userdata*/, u32 userId, PFuncDone funcDone) { if (version > CELL_SAVEDATA_VERSION_420) { // ****** sysutil savedata parameter error : 1 ****** return 1; } if (errDialog > CELL_SAVEDATA_ERRDIALOG_NOREPEAT) { // ****** sysutil savedata parameter error : 5 ****** return 5; } if (operation <= SAVEDATA_OP_AUTO_LOAD || operation == SAVEDATA_OP_FIXED_IMPORT || operation == SAVEDATA_OP_FIXED_EXPORT) { if (!dirName) { // ****** sysutil savedata parameter error : 2 ****** return 2; } switch (sysutil_check_name_string(dirName.get_ptr(), 1, CELL_SAVEDATA_DIRNAME_SIZE)) { case -1: { // ****** sysutil savedata parameter error : 3 ****** return 3; } case -2: { // ****** sysutil savedata parameter error : 4 ****** return 4; } case 0: break; default: fmt::throw_exception("Unreachable"); } } if ((operation >= SAVEDATA_OP_LIST_AUTO_SAVE && operation <= SAVEDATA_OP_FIXED_LOAD) || operation == SAVEDATA_OP_LIST_IMPORT || operation == SAVEDATA_OP_LIST_EXPORT || operation == SAVEDATA_OP_LIST_DELETE || operation == SAVEDATA_OP_FIXED_DELETE) { if (!setList) { // ****** sysutil savedata parameter error : 11 ****** return 11; } if (setList->sortType > CELL_SAVEDATA_SORTTYPE_SUBTITLE) { // ****** sysutil savedata parameter error : 12 ****** return 12; } if (setList->sortOrder > CELL_SAVEDATA_SORTORDER_ASCENT) { // ****** sysutil savedata parameter error : 13 ****** return 13; } if (!setList->dirNamePrefix) { // ****** sysutil savedata parameter error : 15 ****** return 15; } if (!memchr(setList->dirNamePrefix.get_ptr(), '\0', CELL_SAVEDATA_PREFIX_SIZE) || (g_ps3_process_info.sdk_ver > 0x3FFFFF && !setList->dirNamePrefix[0])) { // ****** sysutil savedata parameter error : 17 ****** return 17; } const bool allow_asterisk = (operation == SAVEDATA_OP_LIST_DELETE); // TODO: SAVEDATA_OP_FIXED_DELETE ? if (!allow_asterisk || !(setList->dirNamePrefix[0] == '*' && setList->dirNamePrefix[1] == '\0')) { char cur, buf[CELL_SAVEDATA_DIRNAME_SIZE + 1]{}; for (s32 pos = 0, posprefix = 0; cur = setList->dirNamePrefix[pos++], true;) { if (cur == '\0' || cur == '|') { // Check prefix if not empty if (posprefix) { switch (sysutil_check_name_string(buf, 1, CELL_SAVEDATA_DIRNAME_SIZE)) { case -1: { // ****** sysutil savedata parameter error : 16 ****** return 16; } case -2: { // ****** sysutil savedata parameter error : 17 ****** return 17; } case 0: break; default: fmt::throw_exception("Unreachable"); } } if (cur == '\0') { break; } // Note: no need to reset buffer, only position posprefix = 0; continue; } if (posprefix == CELL_SAVEDATA_DIRNAME_SIZE) { // ****** sysutil savedata parameter error : 17 ****** return 17; } buf[posprefix++] = cur; } } if (setList->reserved) { // ****** sysutil savedata parameter error : 14 ****** return 14; } } if (operation >= SAVEDATA_OP_LIST_IMPORT && operation <= SAVEDATA_OP_FIXED_EXPORT) { if (!funcDone || userId > CELL_SYSUTIL_USERID_MAX) { // ****** sysutil savedata parameter error : 137 ****** return 137; } // There are no more parameters to check for the import and export functions. return CELL_OK; } if (!setBuf) { // ****** sysutil savedata parameter error : 74 ****** return 74; } if ((operation >= SAVEDATA_OP_LIST_AUTO_SAVE && operation <= SAVEDATA_OP_FIXED_LOAD) || operation == SAVEDATA_OP_LIST_DELETE || operation == SAVEDATA_OP_FIXED_DELETE) { if (setBuf->dirListMax > CELL_SAVEDATA_DIRLIST_MAX) { // ****** sysutil savedata parameter error : 8 ****** return 8; } CHECK_SIZE(CellSaveDataDirList, 48); if (setBuf->dirListMax * sizeof(CellSaveDataDirList) > setBuf->bufSize) { // ****** sysutil savedata parameter error : 7 ****** return 7; } } CHECK_SIZE(CellSaveDataFileStat, 56); if (operation == SAVEDATA_OP_LIST_DELETE || operation == SAVEDATA_OP_FIXED_DELETE) { if (setBuf->fileListMax != 0u) { // ****** sysutil savedata parameter error : 9 ****** return 9; } } else if (setBuf->fileListMax * sizeof(CellSaveDataFileStat) > setBuf->bufSize) { // ****** sysutil savedata parameter error : 7 ****** return 7; } if (setBuf->bufSize && !setBuf->buf) { // ****** sysutil savedata parameter error : 6 ****** return 6; } for (auto resv : setBuf->reserved) { if (resv) { // ****** sysutil savedata parameter error : 10 ****** return 10; } } if ((operation == SAVEDATA_OP_LIST_SAVE || operation == SAVEDATA_OP_LIST_LOAD || operation == SAVEDATA_OP_LIST_DELETE) && !funcList) { // ****** sysutil savedata parameter error : 18 ****** return 18; } if ((operation == SAVEDATA_OP_FIXED_SAVE || operation == SAVEDATA_OP_FIXED_LOAD || operation == SAVEDATA_OP_LIST_AUTO_LOAD || operation == SAVEDATA_OP_LIST_AUTO_SAVE || operation == SAVEDATA_OP_FIXED_DELETE) && !funcFixed) { // ****** sysutil savedata parameter error : 19 ****** return 19; } // NOTE: funcStat and funcFile are not present in the delete functions. unk_op_flags is 0x2 for SAVEDATA_OP_FIXED_DELETE, but I added the redundant check anyway for clarity. if (operation != SAVEDATA_OP_LIST_DELETE && operation != SAVEDATA_OP_FIXED_DELETE && (!(unk_op_flags & 0x2) || operation == SAVEDATA_OP_AUTO_SAVE || operation == SAVEDATA_OP_AUTO_LOAD)) { if (!funcStat) { // ****** sysutil savedata parameter error : 20 ****** return 20; } if (!(unk_op_flags & 0x2) && !funcFile) { // ****** sysutil savedata parameter error : 18 ****** return 18; } } if (userId > CELL_SYSUTIL_USERID_MAX) { // ****** sysutil savedata parameter error : 91 ****** return 91; } return CELL_OK; } static NEVER_INLINE error_code savedata_op(ppu_thread& ppu, u32 operation, u32 version, vm::cptr<char> dirName, u32 errDialog, PSetList setList, PSetBuf setBuf, PFuncList funcList, PFuncFixed funcFixed, PFuncStat funcStat, PFuncFile funcFile, u32 container, u32 unk_op_flags /*TODO*/, vm::ptr<void> userdata, u32 userId, PFuncDone funcDone) { if (const auto& [ok, list] = setList.try_read(); ok) cellSaveData.notice("savedata_op(): setList = { .sortType=%d, .sortOrder=%d, .dirNamePrefix='%s' }", list.sortType, list.sortOrder, list.dirNamePrefix); if (const auto& [ok, buf] = setBuf.try_read(); ok) cellSaveData.notice("savedata_op(): setBuf = { .dirListMax=%d, .fileListMax=%d, .bufSize=%d }", buf.dirListMax, buf.fileListMax, buf.bufSize); if (const auto ecode = savedata_check_args(operation, version, dirName, errDialog, setList, setBuf, funcList, funcFixed, funcStat, funcFile, container, unk_op_flags, userdata, userId, funcDone)) { return {CELL_SAVEDATA_ERROR_PARAM, " (error %d)", ecode}; } std::unique_lock hle_lock(g_fxo->get<hle_locks_t>(), std::try_to_lock); if (!hle_lock) { ppu.state += cpu_flag::again; return {}; } std::unique_lock lock(g_fxo->get<savedata_manager>().mutex, std::try_to_lock); if (!lock) { return CELL_SAVEDATA_ERROR_BUSY; } // Simulate idle time while data is being sent to VSH const auto lv2_sleep = [](ppu_thread& ppu, usz sleep_time) { lv2_obj::sleep(ppu); lv2_obj::wait_timeout(sleep_time); ppu.check_state(); }; lv2_sleep(ppu, 500); std::memset(g_savedata_context.get_ptr(), 0, g_savedata_context.size()); vm::ptr<CellSaveDataCBResult> result = g_savedata_context.ptr(&savedata_context::result); vm::ptr<CellSaveDataListGet> listGet = g_savedata_context.ptr(&savedata_context::listGet); vm::ptr<CellSaveDataListSet> listSet = g_savedata_context.ptr(&savedata_context::listSet); vm::ptr<CellSaveDataFixedSet> fixedSet = g_savedata_context.ptr(&savedata_context::fixedSet); vm::ptr<CellSaveDataStatGet> statGet = g_savedata_context.ptr(&savedata_context::statGet); vm::ptr<CellSaveDataStatSet> statSet = g_savedata_context.ptr(&savedata_context::statSet); vm::ptr<CellSaveDataFileGet> fileGet = g_savedata_context.ptr(&savedata_context::fileGet); vm::ptr<CellSaveDataFileSet> fileSet = g_savedata_context.ptr(&savedata_context::fileSet); vm::ptr<CellSaveDataDoneGet> doneGet = g_savedata_context.ptr(&savedata_context::doneGet); // userId(0) = CELL_SYSUTIL_USERID_CURRENT; // path of the specified user (00000001 by default) const std::string base_dir = vfs::get(fmt::format("/dev_hdd0/home/%08u/savedata/", userId ? userId : Emu.GetUsrId())); if (userId && !fs::is_dir(base_dir)) { return CELL_SAVEDATA_ERROR_NOUSER; } result->userdata = userdata; // probably should be assigned only once (allows the callback to change it) SaveDataEntry save_entry; if (setList) { std::vector<SaveDataEntry> save_entries; listGet->dirNum = 0; listGet->dirListNum = 0; listGet->dirList.set(setBuf->buf.addr()); std::memset(listGet->reserved, 0, sizeof(listGet->reserved)); std::vector<std::string> prefix_list = fmt::split(setList->dirNamePrefix.get_ptr(), {"|"}); // if prefix_list is empty game wants to check all savedata if (prefix_list.empty() && (operation == SAVEDATA_OP_LIST_LOAD || operation == SAVEDATA_OP_FIXED_LOAD)) { cellSaveData.notice("savedata_op(): dirNamePrefix is empty. Listing all entries. operation=%d", operation); prefix_list = {""}; } // if prefix_list only contains an asterisk the game wants to check all savedata const bool allow_asterisk = (operation == SAVEDATA_OP_LIST_DELETE); // TODO: SAVEDATA_OP_FIXED_DELETE ? if (allow_asterisk && prefix_list.size() == 1 && prefix_list.front() == "*") { cellSaveData.notice("savedata_op(): dirNamePrefix is '*'. Listing all entries starting with '%s'. operation=%d", Emu.GetTitleID(), operation); prefix_list.front() = Emu.GetTitleID(); // TODO: Let's be cautious for now and only list savedata starting with this game's ID //prefix_list.front().clear(); // List savedata of all the games of this user } // get the saves matching the supplied prefix for (auto&& entry : fs::dir(base_dir)) { if (!entry.is_directory || sysutil_check_name_string(entry.name.c_str(), 1, CELL_SAVEDATA_DIRNAME_SIZE) != 0) { continue; } for (const std::string& prefix : prefix_list) { if (entry.name.starts_with(prefix)) { // Count the amount of matches and the amount of listed directories if (!listGet->dirNum++) // total number of directories { // Clear buf exactly to bufSize only if dirNum becomes non-zero (regardless of dirListNum) std::memset(setBuf->buf.get_ptr(), 0, setBuf->bufSize); } if (listGet->dirListNum < setBuf->dirListMax) { listGet->dirListNum++; // number of directories in list // PSF parameters const psf::registry psf = psf::load_object(base_dir + entry.name + "/PARAM.SFO"); if (psf.empty()) { break; } SaveDataEntry save_entry2; save_entry2.dirName = psf::get_string(psf, "SAVEDATA_DIRECTORY"); save_entry2.listParam = psf::get_string(psf, "SAVEDATA_LIST_PARAM"); save_entry2.title = psf::get_string(psf, "TITLE"); save_entry2.subtitle = psf::get_string(psf, "SUB_TITLE"); save_entry2.details = psf::get_string(psf, "DETAIL"); for (const auto& entry2 : fs::dir(base_dir + entry.name)) { if (entry2.is_directory || check_filename(vfs::unescape(entry2.name), false, true)) { continue; } save_entry2.size += entry2.size; } save_entry2.atime = entry.atime; save_entry2.mtime = entry.mtime; save_entry2.ctime = entry.ctime; if (fs::file icon{base_dir + entry.name + "/ICON0.PNG"}) save_entry2.iconBuf = icon.to_vector<uchar>(); save_entry2.isNew = false; save_entry2.escaped = std::move(entry.name); save_entries.emplace_back(save_entry2); } break; } } } // Sort the entries { const u32 order = setList->sortOrder; const u32 type = setList->sortType; std::sort(save_entries.begin(), save_entries.end(), [=](const SaveDataEntry& entry1, const SaveDataEntry& entry2) { if (order == CELL_SAVEDATA_SORTORDER_DESCENT && type == CELL_SAVEDATA_SORTTYPE_MODIFIEDTIME) { return entry1.mtime >= entry2.mtime; } if (order == CELL_SAVEDATA_SORTORDER_DESCENT && type == CELL_SAVEDATA_SORTTYPE_SUBTITLE) { return entry1.subtitle >= entry2.subtitle; } if (order == CELL_SAVEDATA_SORTORDER_ASCENT && type == CELL_SAVEDATA_SORTTYPE_MODIFIEDTIME) { return entry1.mtime < entry2.mtime; } if (order == CELL_SAVEDATA_SORTORDER_ASCENT && type == CELL_SAVEDATA_SORTTYPE_SUBTITLE) { return entry1.subtitle < entry2.subtitle; } return true; }); } // Fill the listGet->dirList array auto dir_list = listGet->dirList.get_ptr(); for (const auto& entry : save_entries) { auto& dir = *dir_list++; strcpy_trunc(dir.dirName, entry.dirName); strcpy_trunc(dir.listParam, entry.listParam); } s32 selected = -1; s32 focused = -1; if (funcList) { listSet->focusPosition = CELL_SAVEDATA_FOCUSPOS_LISTHEAD; std::memset(result.get_ptr(), 0, ::offset32(&CellSaveDataCBResult::userdata)); // List Callback funcList(ppu, result, listGet, listSet); if (const s32 res = result->result; res != CELL_SAVEDATA_CBRESULT_OK_NEXT) { cellSaveData.warning("savedata_op(): funcList returned result=%d.", result->result); // if the callback has returned ok, lets return OK. // typically used at game launch when no list is actually required. // CELL_SAVEDATA_CBRESULT_OK_LAST_NOCONFIRM is only valid for funcFile and funcDone if (result->result == CELL_SAVEDATA_CBRESULT_OK_LAST || result->result == CELL_SAVEDATA_CBRESULT_OK_LAST_NOCONFIRM) { return CELL_OK; } return display_callback_result_error_message(ppu, *result, errDialog); } if (listSet->fixedListNum > CELL_SAVEDATA_LISTITEM_MAX) { // ****** sysutil savedata parameter error : 38 ****** return {CELL_SAVEDATA_ERROR_PARAM, "38 (fixedListNum=%d)", listSet->fixedListNum}; } if (listSet->fixedListNum && !listSet->fixedList) { // ****** sysutil savedata parameter error : 39 ****** return {CELL_SAVEDATA_ERROR_PARAM, "39"}; } else { // TODO: What happens if fixedListNum is zero? } std::set<std::string_view> selected_list; for (u32 i = 0; i < listSet->fixedListNum; i++) { switch (sysutil_check_name_string(listSet->fixedList[i].dirName, 1, CELL_SAVEDATA_DIRNAME_SIZE)) { case -1: { // ****** sysutil savedata parameter error : 40 ****** return {CELL_SAVEDATA_ERROR_PARAM, "40"}; } case -2: { if (listSet->fixedList[i].dirName[0]) // ??? { // ****** sysutil savedata parameter error : 41 ****** return {CELL_SAVEDATA_ERROR_PARAM, "41"}; } break; } case 0: break; default: fmt::throw_exception("Unreachable"); } selected_list.emplace(listSet->fixedList[i].dirName); } // Clean save data list save_entries.erase(std::remove_if(save_entries.begin(), save_entries.end(), [&selected_list](const SaveDataEntry& entry) -> bool { return selected_list.count(entry.dirName) == 0; }), save_entries.end()); if (listSet->newData) { switch (listSet->newData->iconPosition) { case CELL_SAVEDATA_ICONPOS_HEAD: case CELL_SAVEDATA_ICONPOS_TAIL: break; default: { // ****** sysutil savedata parameter error : 43 ****** return {CELL_SAVEDATA_ERROR_PARAM, "43 (iconPosition=0x%x)", listSet->newData->iconPosition}; } } if (!listSet->newData->dirName) { // ****** sysutil savedata parameter error : 44 ****** return {CELL_SAVEDATA_ERROR_PARAM, "44"}; } switch (sysutil_check_name_string(listSet->newData->dirName.get_ptr(), 1, CELL_SAVEDATA_DIRNAME_SIZE)) { case -1: { // ****** sysutil savedata parameter error : 45 ****** return {CELL_SAVEDATA_ERROR_PARAM, "45"}; } case -2: { if (listSet->newData->dirName[0]) // ??? { // ****** sysutil savedata parameter error : 4 ****** return {CELL_SAVEDATA_ERROR_PARAM, "46"}; } break; } case 0: break; default: fmt::throw_exception("Unreachable"); } } switch (const u32 pos_type = listSet->focusPosition) { case CELL_SAVEDATA_FOCUSPOS_DIRNAME: { if (!listSet->focusDirName) { // ****** sysutil savedata parameter error : 35 ****** return {CELL_SAVEDATA_ERROR_PARAM, "35"}; } switch (sysutil_check_name_string(listSet->focusDirName.get_ptr(), 1, CELL_SAVEDATA_DIRNAME_SIZE)) { case -1: { // ****** sysutil savedata parameter error : 36 ****** return {CELL_SAVEDATA_ERROR_PARAM, "36"}; } case -2: { if (listSet->focusDirName[0]) // ??? { // ****** sysutil savedata parameter error : 37 ****** return {CELL_SAVEDATA_ERROR_PARAM, "37"}; } break; } case 0: break; default: fmt::throw_exception("Unreachable"); } const std::string dirStr = listSet->focusDirName.get_ptr(); for (u32 i = 0; i < save_entries.size(); i++) { if (save_entries[i].dirName == dirStr) { focused = i; break; } } break; } case CELL_SAVEDATA_FOCUSPOS_LISTHEAD: { focused = save_entries.empty() ? -1 : 0; break; } case CELL_SAVEDATA_FOCUSPOS_LISTTAIL: { focused = ::size32(save_entries) - 1; break; } case CELL_SAVEDATA_FOCUSPOS_LATEST: { s64 max = smin; for (u32 i = 0; i < save_entries.size(); i++) { if (save_entries[i].mtime > max) { focused = i; max = save_entries[i].mtime; } } break; } case CELL_SAVEDATA_FOCUSPOS_OLDEST: { s64 min = smax; for (u32 i = 0; i < save_entries.size(); i++) { if (save_entries[i].mtime < min) { focused = i; min = save_entries[i].mtime; } } break; } case CELL_SAVEDATA_FOCUSPOS_NEWDATA: { if (!listSet->newData) { // ****** sysutil savedata parameter error : 34 ****** cellSaveData.error("savedata_op(): listSet->newData is null while listSet->focusPosition is NEWDATA"); return {CELL_SAVEDATA_ERROR_PARAM, "34"}; } if (listSet->newData->iconPosition == CELL_SAVEDATA_ICONPOS_TAIL) { focused = ::size32(save_entries); } else { focused = 0; } break; } default: { // ****** sysutil savedata parameter error : 34 ****** cellSaveData.error("savedata_op(): unknown listSet->focusPosition (0x%x)", pos_type); return {CELL_SAVEDATA_ERROR_PARAM, "34"}; } } } auto delete_save = [&]() { strcpy_trunc(doneGet->dirName, save_entries[selected].dirName); doneGet->hddFreeSizeKB = 40 * 1024 * 1024 - 256; // Read explanation in cellHddGameCheck doneGet->excResult = CELL_OK; std::memset(doneGet->reserved, 0, sizeof(doneGet->reserved)); const std::string old_path = base_dir + ".backup_" + save_entries[selected].escaped + "/"; const std::string del_path = base_dir + save_entries[selected].escaped + "/"; const fs::dir _dir(del_path); u64 size_bytes = 0; for (auto&& file : _dir) { if (!file.is_directory) { size_bytes += utils::align(file.size, 1024); } } doneGet->sizeKB = ::narrow<s32>(size_bytes / 1024); if (_dir) { // Remove old backup fs::remove_all(old_path); // Remove savedata by renaming if (!vfs::host::rename(del_path, old_path, &g_mp_sys_dev_hdd0, false)) { fmt::throw_exception("Failed to move directory %s (%s)", del_path, fs::g_tls_error); } // Cleanup fs::remove_all(old_path); } else { doneGet->excResult = CELL_SAVEDATA_ERROR_NODATA; } std::memset(result.get_ptr(), 0, ::offset32(&CellSaveDataCBResult::userdata)); if (!funcDone) { // TODO: return CELL_SAVEDATA_ERROR_PARAM at the correct location fmt::throw_exception("cellSaveData: funcDone is nullptr. operation=%d", operation); } funcDone(ppu, result, doneGet); }; while (funcList) { // Yield before a blocking dialog is being spawned lv2_obj::sleep(ppu); // Display a blocking Save Data List asynchronously in the GUI thread. if (auto save_dialog = Emu.GetCallbacks().get_save_dialog()) { selected = save_dialog->ShowSaveDataList(save_entries, focused, operation, listSet, g_fxo->get<savedata_manager>().enable_overlay); } else { selected = -2; } // Reschedule after a blocking dialog returns if (ppu.check_state()) { return 0; } // Cancel selected in UI if (selected == -2) { return CELL_CANCEL; } std::string message; // UI returns -1 for new save games if (selected == -1) { message = get_localized_string(localized_string_id::CELL_SAVEDATA_SAVE_CONFIRMATION); save_entry.dirName = listSet->newData->dirName.get_ptr(); save_entry.escaped = vfs::escape(save_entry.dirName); } else { // Get information from the selected entry SaveDataEntry entry = save_entries[selected]; message = get_confirmation_message(operation, entry); } // Yield before a blocking dialog is being spawned lv2_obj::sleep(ppu); // Get user confirmation by opening a blocking dialog s32 return_code = CELL_MSGDIALOG_BUTTON_NONE; error_code res = open_msg_dialog(true, CELL_MSGDIALOG_TYPE_SE_TYPE_NORMAL | CELL_MSGDIALOG_TYPE_BUTTON_TYPE_YESNO, vm::make_str(message), msg_dialog_source::_cellSaveData, vm::null, vm::null, vm::null, &return_code); // Reschedule after a blocking dialog returns if (ppu.check_state()) { return 0; } if (res != CELL_OK) { return CELL_SAVEDATA_ERROR_INTERNAL; } if (return_code != CELL_MSGDIALOG_BUTTON_YES) { if (selected >= 0) { focused = selected; } continue; } if (operation == SAVEDATA_OP_LIST_DELETE) { delete_save(); if (const s32 res = result->result; res != CELL_SAVEDATA_CBRESULT_OK_NEXT) { cellSaveData.warning("savedata_op(): funcDone returned result=%d.", res); if (res == CELL_SAVEDATA_CBRESULT_OK_LAST || res == CELL_SAVEDATA_CBRESULT_OK_LAST_NOCONFIRM) { return CELL_OK; } return display_callback_result_error_message(ppu, *result, errDialog); } // CELL_SAVEDATA_CBRESULT_OK_NEXT expected save_entries.erase(save_entries.cbegin() + selected); focused = save_entries.empty() ? -1 : selected; selected = -1; continue; } break; } if (funcFixed) { lv2_sleep(ppu, 250); std::memset(result.get_ptr(), 0, ::offset32(&CellSaveDataCBResult::userdata)); // Fixed Callback funcFixed(ppu, result, listGet, fixedSet); if (const s32 res = result->result; res != CELL_SAVEDATA_CBRESULT_OK_NEXT) { cellSaveData.warning("savedata_op(): funcFixed returned result=%d.", res); // skip all following steps if OK_LAST (NOCONFIRM is not allowed) if (res == CELL_SAVEDATA_CBRESULT_OK_LAST) { return CELL_OK; } return display_callback_result_error_message(ppu, *result, errDialog); } if (!fixedSet->dirName) { // ****** sysutil savedata parameter error : 26 ****** return {CELL_SAVEDATA_ERROR_PARAM, "26"}; } switch (sysutil_check_name_string(fixedSet->dirName.get_ptr(), 1, CELL_SAVEDATA_DIRNAME_SIZE)) { case -1: { // ****** sysutil savedata parameter error : 27 ****** return {CELL_SAVEDATA_ERROR_PARAM, "27"}; } case -2: { // ****** sysutil savedata parameter error : 28 ****** return {CELL_SAVEDATA_ERROR_PARAM, "28"}; } case 0: break; default: fmt::throw_exception("Unreachable"); } const std::string dirStr = fixedSet->dirName.get_ptr(); for (u32 i = 0; i < save_entries.size(); i++) { if (save_entries[i].dirName == dirStr) { selected = i; break; } } switch (fixedSet->option) { case CELL_SAVEDATA_OPTION_NONE: { if (operation != SAVEDATA_OP_FIXED_SAVE && operation != SAVEDATA_OP_FIXED_LOAD && operation != SAVEDATA_OP_FIXED_DELETE) { lv2_sleep(ppu, 30000); break; } std::string message; if (selected == -1) { message = get_localized_string(localized_string_id::CELL_SAVEDATA_SAVE_CONFIRMATION); } else { // Get information from the selected entry SaveDataEntry entry = save_entries[selected]; message = get_confirmation_message(operation, entry); } // Yield before a blocking dialog is being spawned lv2_obj::sleep(ppu); // Get user confirmation by opening a blocking dialog s32 return_code = CELL_MSGDIALOG_BUTTON_NONE; error_code res = open_msg_dialog(true, CELL_MSGDIALOG_TYPE_SE_TYPE_NORMAL | CELL_MSGDIALOG_TYPE_BUTTON_TYPE_YESNO, vm::make_str(message), msg_dialog_source::_cellSaveData, vm::null, vm::null, vm::null, &return_code); // Reschedule after a blocking dialog returns if (ppu.check_state()) { return {}; } if (res != CELL_OK) { return CELL_SAVEDATA_ERROR_INTERNAL; } if (return_code != CELL_MSGDIALOG_BUTTON_YES) { return CELL_CANCEL; } break; } case CELL_SAVEDATA_OPTION_NOCONFIRM: lv2_sleep(ppu, 30000); break; default : // ****** sysutil savedata parameter error : 81 ****** return {CELL_SAVEDATA_ERROR_PARAM, "81 (option=0x%x)", fixedSet->option}; } if (selected == -1) { save_entry.dirName = dirStr; save_entry.escaped = vfs::escape(save_entry.dirName); } if (operation == SAVEDATA_OP_FIXED_DELETE) { delete_save(); if (const s32 res = result->result; res != CELL_SAVEDATA_CBRESULT_OK_NEXT) { cellSaveData.warning("savedata_op(): funcDone returned result=%d.", res); if (res == CELL_SAVEDATA_CBRESULT_OK_LAST || res == CELL_SAVEDATA_CBRESULT_OK_LAST_NOCONFIRM) { return CELL_OK; } return display_callback_result_error_message(ppu, *result, errDialog); } return CELL_OK; } } if (listGet->dirNum) { // Clear buf exactly to bufSize again after funcFixed/List (for funcStat) std::memset(setBuf->buf.get_ptr(), 0, setBuf->bufSize); } if (selected >= 0) { if (static_cast<u32>(selected) < save_entries.size()) { save_entry.dirName = std::move(save_entries[selected].dirName); save_entry.escaped = vfs::escape(save_entry.dirName); } else { fmt::throw_exception("Invalid savedata selected"); } } } if (dirName) { save_entry.dirName = dirName.get_ptr(); save_entry.escaped = vfs::escape(save_entry.dirName); } const std::string dir_path = base_dir + save_entry.escaped + "/"; const std::string old_path = base_dir + ".backup_" + save_entry.escaped + "/"; const std::string new_path = base_dir + ".working_" + save_entry.escaped + "/"; psf::registry psf = psf::load_object(dir_path + "PARAM.SFO"); bool has_modified = false; bool recreated = false; lv2_sleep(ppu, 250); ppu.state += cpu_flag::wait; // Check if RPCS3_BLIST section exist in PARAM.SFO // This section contains the list of files in the save ordered as they would be in BSD filesystem std::vector<std::string> blist; if (const auto it = psf.find("RPCS3_BLIST"); it != psf.cend()) blist = fmt::split(it->second.as_string(), {"/"}, false); // Get save stats { if (!funcStat) { // ****** sysutil savedata parameter error : 20 ****** return {CELL_SAVEDATA_ERROR_PARAM, "20"}; } fs::stat_t dir_info{}; if (!fs::get_stat(dir_path, dir_info)) { // funcStat is called even if the directory doesn't exist. } statGet->hddFreeSizeKB = 40 * 1024 * 1024 - 256; // Read explanation in cellHddGameCheck statGet->isNewData = save_entry.isNew = psf.empty(); statGet->dir.atime = save_entry.atime = dir_info.atime; statGet->dir.mtime = save_entry.mtime = dir_info.mtime; statGet->dir.ctime = save_entry.ctime = dir_info.ctime; strcpy_trunc(statGet->dir.dirName, save_entry.dirName); if (!psf.empty()) { statGet->getParam.parental_level = psf::get_integer(psf, "PARENTAL_LEVEL"); statGet->getParam.attribute = psf::get_integer(psf, "ATTRIBUTE"); // ??? strcpy_trunc(statGet->getParam.title, save_entry.title = psf::get_string(psf, "TITLE")); strcpy_trunc(statGet->getParam.subTitle, save_entry.subtitle = psf::get_string(psf, "SUB_TITLE")); strcpy_trunc(statGet->getParam.detail, save_entry.details = psf::get_string(psf, "DETAIL")); strcpy_trunc(statGet->getParam.listParam, save_entry.listParam = psf::get_string(psf, "SAVEDATA_LIST_PARAM")); } statGet->bind = 0; statGet->fileNum = 0; statGet->fileList.set(setBuf->buf.addr()); statGet->fileListNum = 0; std::memset(statGet->reserved, 0, sizeof(statGet->reserved)); if (!save_entry.isNew) { // Clear to bufSize if !isNew regardless of fileNum std::memset(setBuf->buf.get_ptr(), 0, setBuf->bufSize); } auto file_list = statGet->fileList.get_ptr(); u64 size_bytes = 0; std::vector<fs::dir_entry> files_sorted; for (auto&& entry : fs::dir(dir_path)) { entry.name = vfs::unescape(entry.name); if (!entry.is_directory) { if (check_filename(entry.name, false, false)) { continue; // system files are not included in the file list } files_sorted.push_back(entry); } } // clang-format off std::sort(files_sorted.begin(), files_sorted.end(), [&](const fs::dir_entry& a, const fs::dir_entry& b) -> bool { const auto a_it = std::find(blist.begin(), blist.end(), a.name); const auto b_it = std::find(blist.begin(), blist.end(), b.name); if (a_it == blist.end() && b_it == blist.end()) { // Order alphabetically for old saves return a.name.compare(b.name); } return a_it < b_it; }); // clang-format on for (auto&& entry : files_sorted) { { statGet->fileNum++; size_bytes += utils::align(entry.size, 1024); // firmware rounds this value up if (statGet->fileListNum >= setBuf->fileListMax) continue; statGet->fileListNum++; auto& file = *file_list++; file.size = entry.size; file.atime = entry.atime; file.mtime = entry.mtime; file.ctime = entry.ctime; strcpy_trunc(file.fileName, entry.name); if (entry.name == "ICON0.PNG") { file.fileType = CELL_SAVEDATA_FILETYPE_CONTENT_ICON0; } else if (entry.name == "ICON1.PAM") { file.fileType = CELL_SAVEDATA_FILETYPE_CONTENT_ICON1; } else if (entry.name == "PIC1.PNG") { file.fileType = CELL_SAVEDATA_FILETYPE_CONTENT_PIC1; } else if (entry.name == "SND0.AT3") { file.fileType = CELL_SAVEDATA_FILETYPE_CONTENT_SND0; } else if (psf::get_integer(psf, "*" + entry.name)) // let's put the list of protected files in PARAM.SFO (int param = 1 if protected) { file.fileType = CELL_SAVEDATA_FILETYPE_SECUREFILE; } else { file.fileType = CELL_SAVEDATA_FILETYPE_NORMALFILE; } } } statGet->sysSizeKB = 35; // always reported as 35 regardless of actual file sizes statGet->sizeKB = !save_entry.isNew ? ::narrow<s32>((size_bytes / 1024) + statGet->sysSizeKB) : 0; std::memset(result.get_ptr(), 0, ::offset32(&CellSaveDataCBResult::userdata)); // Stat Callback funcStat(ppu, result, statGet, statSet); ppu.state += cpu_flag::wait; if (const s32 res = result->result; res != CELL_SAVEDATA_CBRESULT_OK_NEXT) { cellSaveData.warning("savedata_op(): funcStat returned result=%d.", res); // Skip and return without error on OK_LAST (NOCONFIRM is not allowed) if (res == CELL_SAVEDATA_CBRESULT_OK_LAST) { return CELL_OK; } return display_callback_result_error_message(ppu, *result, errDialog); } if (statSet->setParam) { if (statSet->setParam->attribute > CELL_SAVEDATA_ATTR_NODUPLICATE) { // ****** sysutil savedata parameter error : 57 ****** return {CELL_SAVEDATA_ERROR_PARAM, "57 (attribute=0x%x)", statSet->setParam->attribute}; } if (statSet->setParam->parental_level > 11) { // ****** sysutil savedata parameter error : 58 ****** return {CELL_SAVEDATA_ERROR_PARAM, "58 (sdk_ver=0x%x, parental_level=%d)", g_ps3_process_info.sdk_ver, statSet->setParam->parental_level}; } // Note: in firmware 3.70 or higher parental_level was changed to reserved2 for (usz index = 0;; index++) { // Convert to pointer to avoid UB when accessing out of range const u8 c = (+statSet->setParam->listParam)[index]; if (c == 0 || index >= (g_ps3_process_info.sdk_ver > 0x36FFFF ? std::size(statSet->setParam->listParam) - 1 : std::size(statSet->setParam->listParam))) { if (c) { // ****** sysutil savedata parameter error : 76 ****** return {CELL_SAVEDATA_ERROR_PARAM, "76 (listParam=0x%016x)", std::bit_cast<be_t<u64>>(statSet->setParam->listParam)}; } break; } if ((c < 'A' || c > 'Z') && (c < '0' || c > '9') && c != '-' && c != '_') { // ****** sysutil savedata parameter error : 77 ****** return {CELL_SAVEDATA_ERROR_PARAM, "77 (listParam=0x%016x)", std::bit_cast<be_t<u64>>(statSet->setParam->listParam)}; } } for (u8 resv : statSet->setParam->reserved) { if (resv) { // ****** sysutil savedata parameter error : 59 ****** return {CELL_SAVEDATA_ERROR_PARAM, "59"}; } } // Update PARAM.SFO psf::assign(psf, "ACCOUNT_ID", psf::array(16, "0000000000000000")); // ??? psf::assign(psf, "ATTRIBUTE", statSet->setParam->attribute.value()); psf::assign(psf, "CATEGORY", psf::string(4, "SD")); // ??? psf::assign(psf, "PARAMS", psf::string(1024, {})); // ??? psf::assign(psf, "PARAMS2", psf::string(12, {})); // ??? psf::assign(psf, "PARENTAL_LEVEL", statSet->setParam->parental_level.value()); psf::assign(psf, "DETAIL", psf::string(CELL_SAVEDATA_SYSP_DETAIL_SIZE, statSet->setParam->detail)); psf::assign(psf, "SAVEDATA_DIRECTORY", psf::string(CELL_SAVEDATA_DIRNAME_SIZE, save_entry.dirName)); psf::assign(psf, "SAVEDATA_LIST_PARAM", psf::string(CELL_SAVEDATA_SYSP_LPARAM_SIZE, statSet->setParam->listParam)); psf::assign(psf, "SUB_TITLE", psf::string(CELL_SAVEDATA_SYSP_SUBTITLE_SIZE, statSet->setParam->subTitle)); psf::assign(psf, "TITLE", psf::string(CELL_SAVEDATA_SYSP_TITLE_SIZE, statSet->setParam->title)); has_modified = true; } else if (save_entry.isNew) { // ****** sysutil savedata parameter error : 50 ****** return {CELL_SAVEDATA_ERROR_PARAM, "50"}; } switch (statSet->reCreateMode & CELL_SAVEDATA_RECREATE_MASK) { case CELL_SAVEDATA_RECREATE_NO: { //CELL_SAVEDATA_RECREATE_NO = overwrite and let the user know, not data is corrupt. //cellSaveData.error("Savedata %s considered broken", save_entry.dirName); //TODO: if this is a save, and it's not auto, then show a dialog [[fallthrough]]; } case CELL_SAVEDATA_RECREATE_NO_NOBROKEN: { break; } case CELL_SAVEDATA_RECREATE_YES: case CELL_SAVEDATA_RECREATE_YES_RESET_OWNER: { if (!statSet->setParam) { // ****** sysutil savedata parameter error : 50 ****** return {CELL_SAVEDATA_ERROR_PARAM, "50"}; } cellSaveData.warning("savedata_op(): Recreating savedata. (mode=%d)", statSet->reCreateMode); // Clear secure file info for (auto it = psf.cbegin(), end = psf.cend(); it != end;) { if (it->first[0] == '*') it = psf.erase(it); else it++; } // Clear order info blist.clear(); // Set to not load files on next step recreated = true; has_modified = true; break; } default: { // ****** sysutil savedata parameter error : 48 ****** cellSaveData.error("savedata_op(): unknown statSet->reCreateMode (0x%x)", statSet->reCreateMode); return {CELL_SAVEDATA_ERROR_PARAM, "48"}; } } } // Create save directory if necessary if (!psf.empty() && save_entry.isNew && !fs::create_dir(dir_path) && fs::g_tls_error != fs::error::exist) { cellSaveData.warning("savedata_op(): failed to create %s (%s)", dir_path, fs::g_tls_error); return CELL_SAVEDATA_ERROR_ACCESS_ERROR; } // Enter the loop where the save files are read/created/deleted std::map<std::string, std::pair<s64, s64>> all_times; std::map<std::string, fs::file> all_files; // First, preload all files (TODO: beware of possible lag, although it should be insignificant) for (auto&& entry : fs::dir(dir_path)) { if (!recreated && !entry.is_directory) { // Read file into a vector and make a memory file entry.name = vfs::unescape(entry.name); if (check_filename(entry.name, false, true)) { continue; } all_times.emplace(entry.name, std::make_pair(entry.atime, entry.mtime)); all_files.emplace(std::move(entry.name), fs::make_stream(fs::file(dir_path + entry.name).to_vector<uchar>())); } } fileGet->excSize = 0; // show indicator for automatic save or auto load interactions if the game requests it (statSet->indicator) const bool show_auto_indicator = operation <= SAVEDATA_OP_LIST_AUTO_LOAD && statSet && statSet->indicator && g_cfg.misc.show_autosave_autoload_hint; if (show_auto_indicator) { auto msg_text = localized_string_id::INVALID; if (operation == SAVEDATA_OP_AUTO_SAVE || operation == SAVEDATA_OP_LIST_AUTO_SAVE) { msg_text = localized_string_id::CELL_SAVEDATA_AUTOSAVE; } else if (operation == SAVEDATA_OP_AUTO_LOAD || operation == SAVEDATA_OP_LIST_AUTO_LOAD) { msg_text = localized_string_id::CELL_SAVEDATA_AUTOLOAD; } auto msg_location = rsx::overlays::message_pin_location::top_left; switch (statSet->indicator->dispPosition & 0x0F) { case CELL_SAVEDATA_INDICATORPOS_LOWER_RIGHT: msg_location = rsx::overlays::message_pin_location::bottom_right; break; case CELL_SAVEDATA_INDICATORPOS_LOWER_LEFT: msg_location = rsx::overlays::message_pin_location::bottom_left; break; case CELL_SAVEDATA_INDICATORPOS_UPPER_RIGHT: msg_location = rsx::overlays::message_pin_location::top_right; break; case CELL_SAVEDATA_INDICATORPOS_UPPER_LEFT: case CELL_SAVEDATA_INDICATORPOS_CENTER: default: msg_location = rsx::overlays::message_pin_location::top_left; break; } // TODO: Blinking variants // RPCS3 saves basically instantaneously so there's not much point in showing auto indicator // WHILE saving is in progress. Instead we show the indicator for 3 seconds to let the user // know when the game autosaves. rsx::overlays::queue_message(msg_text, 3'000'000, {}, msg_location); } error_code savedata_result = CELL_OK; u64 delay_save_until = 0; while (funcFile) { lv2_sleep(ppu, 2000); std::memset(fileSet.get_ptr(), 0, fileSet.size()); std::memset(fileGet->reserved, 0, sizeof(fileGet->reserved)); std::memset(result.get_ptr(), 0, ::offset32(&CellSaveDataCBResult::userdata)); funcFile(ppu, result, fileGet, fileSet); ppu.state += cpu_flag::wait; if (const s32 res = result->result; res != CELL_SAVEDATA_CBRESULT_OK_NEXT) { if (res == CELL_SAVEDATA_CBRESULT_OK_LAST || res == CELL_SAVEDATA_CBRESULT_OK_LAST_NOCONFIRM) { // TODO: display user prompt // Some games (Jak II [NPUA80707]) rely on this delay lv2_obj::sleep(ppu); delay_save_until = get_guest_system_time() + (has_modified ? 500'000 : 100'000); break; } cellSaveData.warning("savedata_op(): funcFile returned result=%d.", res); if (res < CELL_SAVEDATA_CBRESULT_ERR_INVALID || res > CELL_SAVEDATA_CBRESULT_OK_LAST_NOCONFIRM) { // ****** sysutil savedata parameter error : 22 ****** savedata_result = {CELL_SAVEDATA_ERROR_PARAM, "22"}; break; } savedata_result = {CELL_SAVEDATA_ERROR_CBRESULT, res}; break; } // TODO: Show progress if it's not an auto load/save std::string file_path; switch (const u32 type = fileSet->fileType) { case CELL_SAVEDATA_FILETYPE_SECUREFILE: case CELL_SAVEDATA_FILETYPE_NORMALFILE: { if (!fileSet->fileName) { // ****** sysutil savedata parameter error : 69 ****** savedata_result = {CELL_SAVEDATA_ERROR_PARAM, "69"}; break; } const char* fileName = fileSet->fileName.get_ptr(); if (const auto termpos = std::memchr(fileName, '\0', CELL_SAVEDATA_FILENAME_SIZE)) { file_path.assign(fileName, static_cast<const char*>(termpos)); } else { // ****** sysutil savedata parameter error : 71 ****** savedata_result = {CELL_SAVEDATA_ERROR_PARAM, "71"}; break; } if (int error = check_filename(file_path, true, false)) { savedata_result = {CELL_SAVEDATA_ERROR_PARAM, "%d", error}; break; } if (type == CELL_SAVEDATA_FILETYPE_SECUREFILE) { cellSaveData.notice("SECUREFILE: %s -> %s", file_path, fileSet->secureFileId); } break; } case CELL_SAVEDATA_FILETYPE_CONTENT_ICON0: { file_path = "ICON0.PNG"; break; } case CELL_SAVEDATA_FILETYPE_CONTENT_ICON1: { file_path = "ICON1.PAM"; break; } case CELL_SAVEDATA_FILETYPE_CONTENT_PIC1: { file_path = "PIC1.PNG"; break; } case CELL_SAVEDATA_FILETYPE_CONTENT_SND0: { file_path = "SND0.AT3"; break; } default: { // ****** sysutil savedata parameter error : 61 ****** cellSaveData.error("savedata_op(): unknown fileSet->fileType (0x%x)", type); savedata_result = {CELL_SAVEDATA_ERROR_PARAM, "61"}; break; } } if (savedata_result) { break; } // clang-format off auto add_to_blist = [&](const std::string& to_add) { if (std::find(blist.begin(), blist.end(), to_add) == blist.end()) { if (auto it = std::find(blist.begin(), blist.end(), ""); it != blist.end()) *it = to_add; else blist.push_back(to_add); } }; auto del_from_blist = [&](const std::string& to_del) { if (auto it = std::find(blist.begin(), blist.end(), to_del); it != blist.end()) *it = ""; }; // clang-format on cellSaveData.warning("savedata_op(): Fileop: file='%s', type=%d, op=%d, bufSize=%d, fileSize=%d, offset=%d", file_path, fileSet->fileType, fileSet->fileOperation, fileSet->fileBufSize, fileSet->fileSize, fileSet->fileOffset); if ((file_path == "." || file_path == "..") && fileSet->fileOperation <= CELL_SAVEDATA_FILEOP_WRITE_NOTRUNC) { savedata_result = CELL_SAVEDATA_ERROR_BROKEN; break; } switch (const u32 op = fileSet->fileOperation) { case CELL_SAVEDATA_FILEOP_READ: { if (fileSet->fileBufSize < fileSet->fileSize) { // ****** sysutil savedata parameter error : 72 ****** savedata_result = {CELL_SAVEDATA_ERROR_PARAM, "72"}; break; } if (!fileSet->fileBuf && fileSet->fileBufSize) { // ****** sysutil savedata parameter error : 73 ****** savedata_result = {CELL_SAVEDATA_ERROR_PARAM, "73"}; break; } const auto file = std::as_const(all_files).find(file_path); const u64 pos = fileSet->fileOffset; if (file == all_files.cend() || file->second.size() <= pos) { cellSaveData.error("Failed to open file %s%s (size=%d, fileOffset=%d)", dir_path, file_path, file == all_files.cend() ? -1 : file->second.size(), fileSet->fileOffset); savedata_result = CELL_SAVEDATA_ERROR_FAILURE; break; } // Read from memory file to vm const u64 rr = lv2_file::op_read(file->second, fileSet->fileBuf, fileSet->fileSize, pos); fileGet->excSize = ::narrow<u32>(rr); break; } case CELL_SAVEDATA_FILEOP_WRITE: { if (fileSet->fileBufSize < fileSet->fileSize) { // ****** sysutil savedata parameter error : 72 ****** savedata_result = {CELL_SAVEDATA_ERROR_PARAM, "72"}; break; } if (!fileSet->fileBuf && fileSet->fileBufSize) { // ****** sysutil savedata parameter error : 73 ****** savedata_result = {CELL_SAVEDATA_ERROR_PARAM, "73"}; break; } fs::file& file = all_files[file_path]; if (!file) { file = fs::make_stream<std::vector<uchar>>(); } // Write to memory file and truncate const u64 sr = file.seek(fileSet->fileOffset); const u64 wr = lv2_file::op_write(file, fileSet->fileBuf, fileSet->fileSize); file.trunc(sr + wr); fileGet->excSize = ::narrow<u32>(wr); all_times.erase(file_path); add_to_blist(file_path); has_modified = true; break; } case CELL_SAVEDATA_FILEOP_DELETE: { // Delete memory file if (all_files.erase(file_path) == 0) { cellSaveData.error("Failed to delete file %s%s", dir_path, file_path); savedata_result = CELL_SAVEDATA_ERROR_FAILURE; break; } psf.erase("*" + file_path); fileGet->excSize = 0; all_times.erase(file_path); del_from_blist(file_path); has_modified = true; break; } case CELL_SAVEDATA_FILEOP_WRITE_NOTRUNC: { if (fileSet->fileBufSize < fileSet->fileSize) { // ****** sysutil savedata parameter error : 72 ****** savedata_result = {CELL_SAVEDATA_ERROR_PARAM, "72"}; break; } if (!fileSet->fileBuf && fileSet->fileBufSize) { // ****** sysutil savedata parameter error : 73 ****** savedata_result = {CELL_SAVEDATA_ERROR_PARAM, "73"}; break; } fs::file& file = all_files[file_path]; if (!file) { file = fs::make_stream<std::vector<uchar>>(); } // Write to memory file normally file.seek(fileSet->fileOffset); const u64 wr = lv2_file::op_write(file, fileSet->fileBuf, fileSet->fileSize); fileGet->excSize = ::narrow<u32>(wr); all_times.erase(file_path); add_to_blist(file_path); has_modified = true; break; } default: { // ****** sysutil savedata parameter error : 60 ****** cellSaveData.error("savedata_op(): unknown fileSet->fileOperation (0x%x)", op); savedata_result = {CELL_SAVEDATA_ERROR_PARAM, "60"}; break; } } if (savedata_result) { break; } if (fileSet->fileOperation != CELL_SAVEDATA_FILEOP_DELETE) { psf.emplace("*" + file_path, fileSet->fileType == CELL_SAVEDATA_FILETYPE_SECUREFILE); } } // Write PARAM.SFO and savedata if (!psf.empty() && has_modified) { // First, create temporary directory if (fs::create_dir(new_path) || fs::g_tls_error == fs::error::exist) { fs::remove_all(new_path, false); } else { fmt::throw_exception("Failed to create directory %s (%s)", new_path, fs::g_tls_error); } // add file list per FS order to PARAM.SFO std::string final_blist; final_blist = fmt::merge(blist, "/"); psf::assign(psf, "RPCS3_BLIST", psf::string(utils::align(::size32(final_blist) + 1, 4), final_blist)); // Write all files in temporary directory auto& fsfo = all_files["PARAM.SFO"]; fsfo = fs::make_stream<std::vector<uchar>>(); fsfo.write(psf::save_object(psf)); for (auto&& pair : all_files) { if (auto file = pair.second.release()) { auto&& fvec = static_cast<fs::container_stream<std::vector<uchar>>&>(*file); #ifdef _WIN32 fs::pending_file f(new_path + vfs::escape(pair.first)); f.file.write(fvec.obj); ensure(f.commit()); #else ensure(fs::write_file(new_path + vfs::escape(pair.first), fs::rewrite, fvec.obj)); #endif } } for (auto&& pair : all_times) { // Restore atime/mtime for files which have not been modified fs::utime(new_path + vfs::escape(pair.first), pair.second.first, pair.second.second); } // Remove old backup fs::remove_all(old_path); fs::sync(); // Backup old savedata if (!vfs::host::rename(dir_path, old_path, &g_mp_sys_dev_hdd0, false)) { fmt::throw_exception("Failed to move directory %s (%s)", dir_path, fs::g_tls_error); } // Commit new savedata if (!vfs::host::rename(new_path, dir_path, &g_mp_sys_dev_hdd0, false)) { // TODO: handle the case when only commit failed at the next save load fmt::throw_exception("Failed to move directory %s (%s)", new_path, fs::g_tls_error); } // Remove backup again (TODO: may be changed to persistent backup implementation) fs::remove_all(old_path); } if (show_auto_indicator) { // auto indicator should be hidden here if save/load throttling is added } if (savedata_result + 0u == CELL_SAVEDATA_ERROR_CBRESULT) { return display_callback_result_error_message(ppu, *result, errDialog); } if (u64 current_time = get_guest_system_time(); current_time < delay_save_until) { lv2_sleep(ppu, delay_save_until - current_time); } return savedata_result; } static NEVER_INLINE error_code savedata_get_list_item(vm::cptr<char> dirName, vm::ptr<CellSaveDataDirStat> dir, vm::ptr<CellSaveDataSystemFileParam> sysFileParam, vm::ptr<u32> bind, vm::ptr<u32> sizeKB, u32 userId) { if (userId == CELL_SYSUTIL_USERID_CURRENT) { userId = Emu.GetUsrId(); } else if (userId > CELL_USERINFO_USER_MAX) { // ****** sysutil savedata parameter error : 137 ****** return {CELL_SAVEDATA_ERROR_PARAM, "137 (userId=0x%x)", userId}; } if (!dirName) { // ****** sysutil savedata parameter error : 107 ****** return {CELL_SAVEDATA_ERROR_PARAM, "107"}; } switch (sysutil_check_name_string(dirName.get_ptr(), 1, CELL_SAVEDATA_DIRLIST_MAX)) { case -1: { // ****** sysutil savedata parameter error : 108 ****** return {CELL_SAVEDATA_ERROR_PARAM, "108"}; } case -2: { // ****** sysutil savedata parameter error : 109 ****** return {CELL_SAVEDATA_ERROR_PARAM, "109"}; } case 0: break; default: fmt::throw_exception("Unreachable"); } const std::string base_dir = fmt::format("/dev_hdd0/home/%08u/savedata/", userId); if (!fs::is_dir(vfs::get(base_dir))) { return CELL_SAVEDATA_ERROR_NOUSER; } const std::string save_path = vfs::get(base_dir + dirName.get_ptr() + '/'); std::string sfo = save_path + "PARAM.SFO"; if (!fs::is_dir(save_path) && !fs::is_file(sfo)) { cellSaveData.error("cellSaveDataGetListItem(): Savedata at %s does not exist", dirName); return CELL_SAVEDATA_ERROR_NODATA; } const psf::registry psf = psf::load_object(sfo); if (sysFileParam) { strcpy_trunc(sysFileParam->listParam, psf::get_string(psf, "SAVEDATA_LIST_PARAM")); strcpy_trunc(sysFileParam->title, psf::get_string(psf, "TITLE")); strcpy_trunc(sysFileParam->subTitle, psf::get_string(psf, "SUB_TITLE")); strcpy_trunc(sysFileParam->detail, psf::get_string(psf, "DETAIL")); } if (dir) { fs::stat_t dir_info{}; if (!fs::get_stat(save_path, dir_info)) { return CELL_SAVEDATA_ERROR_INTERNAL; } // get file stats, namely directory strcpy_trunc(dir->dirName, std::string_view(dirName.get_ptr())); dir->atime = dir_info.atime; dir->ctime = dir_info.ctime; dir->mtime = dir_info.mtime; } if (sizeKB) { u32 size_kbytes = 0; for (const auto& entry : fs::dir(save_path)) { if (entry.is_directory || check_filename(vfs::unescape(entry.name), false, false)) { continue; } size_kbytes += ::narrow<u32>((entry.size + 1023) / 1024); // firmware rounds this value up } // Add a seemingly constant allocation disk space of PARAM.SFO + PARAM.PFD *sizeKB = size_kbytes + 35; } if (bind) { //TODO: Set bind in accordance to any problems *bind = 0; } return CELL_OK; } // Functions error_code cellSaveDataListSave2(ppu_thread& ppu, u32 version, PSetList setList, PSetBuf setBuf, PFuncList funcList, PFuncStat funcStat, PFuncFile funcFile, u32 container, vm::ptr<void> userdata) { cellSaveData.warning("cellSaveDataListSave2(version=%d, setList=*0x%x, setBuf=*0x%x, funcList=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)", version, setList, setBuf, funcList, funcStat, funcFile, container, userdata); return savedata_op(ppu, SAVEDATA_OP_LIST_SAVE, version, vm::null, CELL_SAVEDATA_ERRDIALOG_ALWAYS, setList, setBuf, funcList, vm::null, funcStat, funcFile, container, 2, userdata, 0, vm::null); } error_code cellSaveDataListLoad2(ppu_thread& ppu, u32 version, PSetList setList, PSetBuf setBuf, PFuncList funcList, PFuncStat funcStat, PFuncFile funcFile, u32 container, vm::ptr<void> userdata) { cellSaveData.warning("cellSaveDataListLoad2(version=%d, setList=*0x%x, setBuf=*0x%x, funcList=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)", version, setList, setBuf, funcList, funcStat, funcFile, container, userdata); return savedata_op(ppu, SAVEDATA_OP_LIST_LOAD, version, vm::null, CELL_SAVEDATA_ERRDIALOG_ALWAYS, setList, setBuf, funcList, vm::null, funcStat, funcFile, container, 2, userdata, 0, vm::null); } error_code cellSaveDataListSave(ppu_thread& ppu, u32 version, PSetList setList, PSetBuf setBuf, PFuncList funcList, PFuncStat funcStat, PFuncFile funcFile, u32 container) { cellSaveData.warning("cellSaveDataListSave(version=%d, setList=*0x%x, setBuf=*0x%x, funcList=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x)", version, setList, setBuf, funcList, funcStat, funcFile, container); return savedata_op(ppu, SAVEDATA_OP_LIST_SAVE, version, vm::null, CELL_SAVEDATA_ERRDIALOG_ALWAYS, setList, setBuf, funcList, vm::null, funcStat, funcFile, container, 2, vm::null, 0, vm::null); } error_code cellSaveDataListLoad(ppu_thread& ppu, u32 version, PSetList setList, PSetBuf setBuf, PFuncList funcList, PFuncStat funcStat, PFuncFile funcFile, u32 container) { cellSaveData.warning("cellSaveDataListLoad(version=%d, setList=*0x%x, setBuf=*0x%x, funcList=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x)", version, setList, setBuf, funcList, funcStat, funcFile, container); return savedata_op(ppu, SAVEDATA_OP_LIST_LOAD, version, vm::null, CELL_SAVEDATA_ERRDIALOG_ALWAYS, setList, setBuf, funcList, vm::null, funcStat, funcFile, container, 2, vm::null, 0, vm::null); } error_code cellSaveDataFixedSave2(ppu_thread& ppu, u32 version, PSetList setList, PSetBuf setBuf, PFuncFixed funcFixed, PFuncStat funcStat, PFuncFile funcFile, u32 container, vm::ptr<void> userdata) { cellSaveData.warning("cellSaveDataFixedSave2(version=%d, setList=*0x%x, setBuf=*0x%x, funcFixed=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)", version, setList, setBuf, funcFixed, funcStat, funcFile, container, userdata); return savedata_op(ppu, SAVEDATA_OP_FIXED_SAVE, version, vm::null, CELL_SAVEDATA_ERRDIALOG_ALWAYS, setList, setBuf, vm::null, funcFixed, funcStat, funcFile, container, 2, userdata, 0, vm::null); } error_code cellSaveDataFixedLoad2(ppu_thread& ppu, u32 version, PSetList setList, PSetBuf setBuf, PFuncFixed funcFixed, PFuncStat funcStat, PFuncFile funcFile, u32 container, vm::ptr<void> userdata) { cellSaveData.warning("cellSaveDataFixedLoad2(version=%d, setList=*0x%x, setBuf=*0x%x, funcFixed=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)", version, setList, setBuf, funcFixed, funcStat, funcFile, container, userdata); return savedata_op(ppu, SAVEDATA_OP_FIXED_LOAD, version, vm::null, CELL_SAVEDATA_ERRDIALOG_ALWAYS, setList, setBuf, vm::null, funcFixed, funcStat, funcFile, container, 2, userdata, 0, vm::null); } error_code cellSaveDataFixedSave(ppu_thread& ppu, u32 version, PSetList setList, PSetBuf setBuf, PFuncFixed funcFixed, PFuncStat funcStat, PFuncFile funcFile, u32 container) { cellSaveData.warning("cellSaveDataFixedSave(version=%d, setList=*0x%x, setBuf=*0x%x, funcFixed=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x)", version, setList, setBuf, funcFixed, funcStat, funcFile, container); return savedata_op(ppu, SAVEDATA_OP_FIXED_SAVE, version, vm::null, CELL_SAVEDATA_ERRDIALOG_ALWAYS, setList, setBuf, vm::null, funcFixed, funcStat, funcFile, container, 2, vm::null, 0, vm::null); } error_code cellSaveDataFixedLoad(ppu_thread& ppu, u32 version, PSetList setList, PSetBuf setBuf, PFuncFixed funcFixed, PFuncStat funcStat, PFuncFile funcFile, u32 container) { cellSaveData.warning("cellSaveDataFixedLoad(version=%d, setList=*0x%x, setBuf=*0x%x, funcFixed=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x)", version, setList, setBuf, funcFixed, funcStat, funcFile, container); return savedata_op(ppu, SAVEDATA_OP_FIXED_LOAD, version, vm::null, CELL_SAVEDATA_ERRDIALOG_ALWAYS, setList, setBuf, vm::null, funcFixed, funcStat, funcFile, container, 2, vm::null, 0, vm::null); } error_code cellSaveDataAutoSave2(ppu_thread& ppu, u32 version, vm::cptr<char> dirName, u32 errDialog, PSetBuf setBuf, PFuncStat funcStat, PFuncFile funcFile, u32 container, vm::ptr<void> userdata) { cellSaveData.warning("cellSaveDataAutoSave2(version=%d, dirName=%s, errDialog=%d, setBuf=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)", version, dirName, errDialog, setBuf, funcStat, funcFile, container, userdata); return savedata_op(ppu, SAVEDATA_OP_AUTO_SAVE, version, dirName, errDialog, vm::null, setBuf, vm::null, vm::null, funcStat, funcFile, container, 2, userdata, 0, vm::null); } error_code cellSaveDataAutoLoad2(ppu_thread& ppu, u32 version, vm::cptr<char> dirName, u32 errDialog, PSetBuf setBuf, PFuncStat funcStat, PFuncFile funcFile, u32 container, vm::ptr<void> userdata) { cellSaveData.warning("cellSaveDataAutoLoad2(version=%d, dirName=%s, errDialog=%d, setBuf=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)", version, dirName, errDialog, setBuf, funcStat, funcFile, container, userdata); return savedata_op(ppu, SAVEDATA_OP_AUTO_LOAD, version, dirName, errDialog, vm::null, setBuf, vm::null, vm::null, funcStat, funcFile, container, 2, userdata, 0, vm::null); } error_code cellSaveDataAutoSave(ppu_thread& ppu, u32 version, vm::cptr<char> dirName, u32 errDialog, PSetBuf setBuf, PFuncStat funcStat, PFuncFile funcFile, u32 container) { cellSaveData.warning("cellSaveDataAutoSave(version=%d, dirName=%s, errDialog=%d, setBuf=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x)", version, dirName, errDialog, setBuf, funcStat, funcFile, container); return savedata_op(ppu, SAVEDATA_OP_AUTO_SAVE, version, dirName, errDialog, vm::null, setBuf, vm::null, vm::null, funcStat, funcFile, container, 2, vm::null, 0, vm::null); } error_code cellSaveDataAutoLoad(ppu_thread& ppu, u32 version, vm::cptr<char> dirName, u32 errDialog, PSetBuf setBuf, PFuncStat funcStat, PFuncFile funcFile, u32 container) { cellSaveData.warning("cellSaveDataAutoLoad(version=%d, dirName=%s, errDialog=%d, setBuf=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x)", version, dirName, errDialog, setBuf, funcStat, funcFile, container); return savedata_op(ppu, SAVEDATA_OP_AUTO_LOAD, version, dirName, errDialog, vm::null, setBuf, vm::null, vm::null, funcStat, funcFile, container, 2, vm::null, 0, vm::null); } error_code cellSaveDataListAutoSave(ppu_thread& ppu, u32 version, u32 errDialog, PSetList setList, PSetBuf setBuf, PFuncFixed funcFixed, PFuncStat funcStat, PFuncFile funcFile, u32 container, vm::ptr<void> userdata) { cellSaveData.warning("cellSaveDataListAutoSave(version=%d, errDialog=%d, setList=*0x%x, setBuf=*0x%x, funcFixed=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)", version, errDialog, setList, setBuf, funcFixed, funcStat, funcFile, container, userdata); return savedata_op(ppu, SAVEDATA_OP_LIST_AUTO_SAVE, version, vm::null, errDialog, setList, setBuf, vm::null, funcFixed, funcStat, funcFile, container, 2, userdata, 0, vm::null); } error_code cellSaveDataListAutoLoad(ppu_thread& ppu, u32 version, u32 errDialog, PSetList setList, PSetBuf setBuf, PFuncFixed funcFixed, PFuncStat funcStat, PFuncFile funcFile, u32 container, vm::ptr<void> userdata) { cellSaveData.warning("cellSaveDataListAutoLoad(version=%d, errDialog=%d, setList=*0x%x, setBuf=*0x%x, funcFixed=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)", version, errDialog, setList, setBuf, funcFixed, funcStat, funcFile, container, userdata); return savedata_op(ppu, SAVEDATA_OP_LIST_AUTO_LOAD, version, vm::null, errDialog, setList, setBuf, vm::null, funcFixed, funcStat, funcFile, container, 2, userdata, 0, vm::null); } error_code cellSaveDataDelete(ppu_thread& ppu, u32 container) { cellSaveData.warning("cellSaveDataDelete(container=0x%x)", container); return select_and_delete(ppu); } error_code cellSaveDataDelete2(ppu_thread& ppu, u32 container) { cellSaveData.warning("cellSaveDataDelete2(container=0x%x)", container); return select_and_delete(ppu); } error_code cellSaveDataFixedDelete(ppu_thread& ppu, PSetList setList, PSetBuf setBuf, PFuncFixed funcFixed, PFuncDone funcDone, u32 container, vm::ptr<void> userdata) { cellSaveData.warning("cellSaveDataFixedDelete(setList=*0x%x, setBuf=*0x%x, funcFixed=*0x%x, funcDone=*0x%x, container=0x%x, userdata=*0x%x)", setList, setBuf, funcFixed, funcDone, container, userdata); return savedata_op(ppu, SAVEDATA_OP_FIXED_DELETE, 0, vm::null, 1, setList, setBuf, vm::null, funcFixed, vm::null, vm::null, container, 2, userdata, 0, funcDone); } error_code cellSaveDataUserListSave(ppu_thread& ppu, u32 version, u32 userId, PSetList setList, PSetBuf setBuf, PFuncList funcList, PFuncStat funcStat, PFuncFile funcFile, u32 container, vm::ptr<void> userdata) { cellSaveData.error("cellSaveDataUserListSave(version=%d, userId=%d, setList=*0x%x, setBuf=*0x%x, funcList=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)", version, userId, setList, setBuf, funcList, funcStat, funcFile, container, userdata); return savedata_op(ppu, SAVEDATA_OP_LIST_SAVE, version, vm::null, 0, setList, setBuf, funcList, vm::null, funcStat, funcFile, container, 6, userdata, userId, vm::null); } error_code cellSaveDataUserListLoad(ppu_thread& ppu, u32 version, u32 userId, PSetList setList, PSetBuf setBuf, PFuncList funcList, PFuncStat funcStat, PFuncFile funcFile, u32 container, vm::ptr<void> userdata) { cellSaveData.error("cellSaveDataUserListLoad(version=%d, userId=%d, setList=*0x%x, setBuf=*0x%x, funcList=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)", version, userId, setList, setBuf, funcList, funcStat, funcFile, container, userdata); return savedata_op(ppu, SAVEDATA_OP_LIST_LOAD, version, vm::null, 0, setList, setBuf, funcList, vm::null, funcStat, funcFile, container, 6, userdata, userId, vm::null); } error_code cellSaveDataUserFixedSave(ppu_thread& ppu, u32 version, u32 userId, PSetList setList, PSetBuf setBuf, PFuncFixed funcFixed, PFuncStat funcStat, PFuncFile funcFile, u32 container, vm::ptr<void> userdata) { cellSaveData.error("cellSaveDataUserFixedSave(version=%d, userId=%d, setList=*0x%x, setBuf=*0x%x, funcFixed=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)", version, userId, setList, setBuf, funcFixed, funcStat, funcFile, container, userdata); return savedata_op(ppu, SAVEDATA_OP_FIXED_SAVE, version, vm::null, 0, setList, setBuf, vm::null, funcFixed, funcStat, funcFile, container, 6, userdata, userId, vm::null); } error_code cellSaveDataUserFixedLoad(ppu_thread& ppu, u32 version, u32 userId, PSetList setList, PSetBuf setBuf, PFuncFixed funcFixed, PFuncStat funcStat, PFuncFile funcFile, u32 container, vm::ptr<void> userdata) { cellSaveData.error("cellSaveDataUserFixedLoad(version=%d, userId=%d, setList=*0x%x, setBuf=*0x%x, funcFixed=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)", version, userId, setList, setBuf, funcFixed, funcStat, funcFile, container, userdata); return savedata_op(ppu, SAVEDATA_OP_FIXED_LOAD, version, vm::null, 0, setList, setBuf, vm::null, funcFixed, funcStat, funcFile, container, 6, userdata, userId, vm::null); } error_code cellSaveDataUserAutoSave(ppu_thread& ppu, u32 version, u32 userId, vm::cptr<char> dirName, u32 errDialog, PSetBuf setBuf, PFuncStat funcStat, PFuncFile funcFile, u32 container, vm::ptr<void> userdata) { cellSaveData.error("cellSaveDataUserAutoSave(version=%d, userId=%d, dirName=%s, errDialog=%d, setBuf=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)", version, userId, dirName, errDialog, setBuf, funcStat, funcFile, container, userdata); return savedata_op(ppu, SAVEDATA_OP_AUTO_SAVE, version, dirName, errDialog, vm::null, setBuf, vm::null, vm::null, funcStat, funcFile, container, 6, userdata, userId, vm::null); } error_code cellSaveDataUserAutoLoad(ppu_thread& ppu, u32 version, u32 userId, vm::cptr<char> dirName, u32 errDialog, PSetBuf setBuf, PFuncStat funcStat, PFuncFile funcFile, u32 container, vm::ptr<void> userdata) { cellSaveData.error("cellSaveDataUserAutoLoad(version=%d, userId=%d, dirName=%s, errDialog=%d, setBuf=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)", version, userId, dirName, errDialog, setBuf, funcStat, funcFile, container, userdata); return savedata_op(ppu, SAVEDATA_OP_AUTO_LOAD, version, dirName, errDialog, vm::null, setBuf, vm::null, vm::null, funcStat, funcFile, container, 6, userdata, userId, vm::null); } error_code cellSaveDataUserListAutoSave(ppu_thread& ppu, u32 version, u32 userId, u32 errDialog, PSetList setList, PSetBuf setBuf, PFuncFixed funcFixed, PFuncStat funcStat, PFuncFile funcFile, u32 container, vm::ptr<void> userdata) { cellSaveData.error("cellSaveDataUserListAutoSave(version=%d, userId=%d, errDialog=%d, setList=*0x%x, setBuf=*0x%x, funcFixed=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)", version, userId, errDialog, setList, setBuf, funcFixed, funcStat, funcFile, container, userdata); return savedata_op(ppu, SAVEDATA_OP_LIST_AUTO_SAVE, version, vm::null, errDialog, setList, setBuf, vm::null, funcFixed, funcStat, funcFile, container, 6, userdata, userId, vm::null); } error_code cellSaveDataUserListAutoLoad(ppu_thread& ppu, u32 version, u32 userId, u32 errDialog, PSetList setList, PSetBuf setBuf, PFuncFixed funcFixed, PFuncStat funcStat, PFuncFile funcFile, u32 container, vm::ptr<void> userdata) { cellSaveData.error("cellSaveDataUserListAutoLoad(version=%d, userId=%d, errDialog=%d, setList=*0x%x, setBuf=*0x%x, funcFixed=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)", version, userId, errDialog, setList, setBuf, funcFixed, funcStat, funcFile, container, userdata); return savedata_op(ppu, SAVEDATA_OP_LIST_AUTO_LOAD, version, vm::null, errDialog, setList, setBuf, vm::null, funcFixed, funcStat, funcFile, container, 6, userdata, userId, vm::null); } error_code cellSaveDataUserFixedDelete(ppu_thread& ppu, u32 userId, PSetList setList, PSetBuf setBuf, PFuncFixed funcFixed, PFuncDone funcDone, u32 container, vm::ptr<void> userdata) { cellSaveData.error("cellSaveDataUserFixedDelete(userId=%d, setList=*0x%x, setBuf=*0x%x, funcFixed=*0x%x, funcDone=*0x%x, container=0x%x, userdata=*0x%x)", userId, setList, setBuf, funcFixed, funcDone, container, userdata); return savedata_op(ppu, SAVEDATA_OP_FIXED_DELETE, 0, vm::null, 1, setList, setBuf, vm::null, funcFixed, vm::null, vm::null, container, 6, userdata, userId, funcDone); } void cellSaveDataEnableOverlay(s32 enable) { cellSaveData.notice("cellSaveDataEnableOverlay(enable=%d)", enable); auto& manager = g_fxo->get<savedata_manager>(); manager.enable_overlay = enable != 0; } // Functions (Extensions) error_code cellSaveDataListDelete(ppu_thread& ppu, PSetList setList, PSetBuf setBuf, PFuncList funcList, PFuncDone funcDone, u32 container, vm::ptr<void> userdata) { cellSaveData.warning("cellSaveDataListDelete(setList=*0x%x, setBuf=*0x%x, funcList=*0x%x, funcDone=*0x%x, container=0x%x, userdata=*0x%x)", setList, setBuf, funcList, funcDone, container, userdata); return savedata_op(ppu, SAVEDATA_OP_LIST_DELETE, 0, vm::null, 0, setList, setBuf, funcList, vm::null, vm::null, vm::null, container, 0x40, userdata, 0, funcDone); } // Temporarily #ifndef _MSC_VER #pragma GCC diagnostic ignored "-Wunused-parameter" #endif error_code cellSaveDataListImport(ppu_thread& ppu, PSetList setList, u32 maxSizeKB, PFuncDone funcDone, u32 container, vm::ptr<void> userdata) { cellSaveData.todo("cellSaveDataListImport(setList=*0x%x, maxSizeKB=%d, funcDone=*0x%x, container=0x%x, userdata=*0x%x)", setList, maxSizeKB, funcDone, container, userdata); if (const auto ecode = savedata_check_args(SAVEDATA_OP_LIST_IMPORT, CELL_SAVEDATA_VERSION_OLD, vm::null, CELL_SAVEDATA_ERRDIALOG_NONE, setList, vm::null, vm::null, vm::null, vm::null, vm::null, container, 0x40, userdata, 0, funcDone)) { return {CELL_SAVEDATA_ERROR_PARAM, " (error %d)", ecode}; } // TODO return CELL_OK; } error_code cellSaveDataListExport(ppu_thread& ppu, PSetList setList, u32 maxSizeKB, PFuncDone funcDone, u32 container, vm::ptr<void> userdata) { cellSaveData.todo("cellSaveDataListExport(setList=*0x%x, maxSizeKB=%d, funcDone=*0x%x, container=0x%x, userdata=*0x%x)", setList, maxSizeKB, funcDone, container, userdata); if (const auto ecode = savedata_check_args(SAVEDATA_OP_LIST_EXPORT, CELL_SAVEDATA_VERSION_OLD, vm::null, CELL_SAVEDATA_ERRDIALOG_NONE, setList, vm::null, vm::null, vm::null, vm::null, vm::null, container, 0x40, userdata, 0, funcDone)) { return {CELL_SAVEDATA_ERROR_PARAM, " (error %d)", ecode}; } // TODO return CELL_OK; } error_code cellSaveDataFixedImport(ppu_thread& ppu, vm::cptr<char> dirName, u32 maxSizeKB, PFuncDone funcDone, u32 container, vm::ptr<void> userdata) { cellSaveData.todo("cellSaveDataFixedImport(dirName=%s, maxSizeKB=%d, funcDone=*0x%x, container=0x%x, userdata=*0x%x)", dirName, maxSizeKB, funcDone, container, userdata); if (const auto ecode = savedata_check_args(SAVEDATA_OP_FIXED_IMPORT, CELL_SAVEDATA_VERSION_OLD, dirName, CELL_SAVEDATA_ERRDIALOG_NONE, vm::null, vm::null, vm::null, vm::null, vm::null, vm::null, container, 0x44, userdata, 0, funcDone)) { return {CELL_SAVEDATA_ERROR_PARAM, " (error %d)", ecode}; } // TODO return CELL_OK; } error_code cellSaveDataFixedExport(ppu_thread& ppu, vm::cptr<char> dirName, u32 maxSizeKB, PFuncDone funcDone, u32 container, vm::ptr<void> userdata) { cellSaveData.todo("cellSaveDataFixedExport(dirName=%s, maxSizeKB=%d, funcDone=*0x%x, container=0x%x, userdata=*0x%x)", dirName, maxSizeKB, funcDone, container, userdata); if (const auto ecode = savedata_check_args(SAVEDATA_OP_FIXED_EXPORT, CELL_SAVEDATA_VERSION_OLD, dirName, CELL_SAVEDATA_ERRDIALOG_NONE, vm::null, vm::null, vm::null, vm::null, vm::null, vm::null, container, 0x44, userdata, 0, funcDone)) { return {CELL_SAVEDATA_ERROR_PARAM, " (error %d)", ecode}; } // TODO return CELL_OK; } error_code cellSaveDataGetListItem(ppu_thread& ppu, vm::cptr<char> dirName, vm::ptr<CellSaveDataDirStat> dir, vm::ptr<CellSaveDataSystemFileParam> sysFileParam, vm::ptr<u32> bind, vm::ptr<u32> sizeKB) { ppu.state += cpu_flag::wait; cellSaveData.warning("cellSaveDataGetListItem(dirName=%s, dir=*0x%x, sysFileParam=*0x%x, bind=*0x%x, sizeKB=*0x%x)", dirName, dir, sysFileParam, bind, sizeKB); return savedata_get_list_item(dirName, dir, sysFileParam, bind, sizeKB, 0); } error_code cellSaveDataUserListDelete(ppu_thread& ppu, u32 userId, PSetList setList, PSetBuf setBuf, PFuncList funcList, PFuncDone funcDone, u32 container, vm::ptr<void> userdata) { cellSaveData.error("cellSaveDataUserListDelete(userId=%d, setList=*0x%x, setBuf=*0x%x, funcList=*0x%x, funcDone=*0x%x, container=0x%x, userdata=*0x%x)", userId, setList, setBuf, funcList, funcDone, container, userdata); return savedata_op(ppu, SAVEDATA_OP_LIST_DELETE, 0, vm::null, 0, setList, setBuf, funcList, vm::null, vm::null, vm::null, container, 0x40, userdata, userId, funcDone); } error_code cellSaveDataUserListImport(ppu_thread& ppu, u32 userId, PSetList setList, u32 maxSizeKB, PFuncDone funcDone, u32 container, vm::ptr<void> userdata) { cellSaveData.todo("cellSaveDataUserListImport(userId=%d, setList=*0x%x, maxSizeKB=%d, funcDone=*0x%x, container=0x%x, userdata=*0x%x)", userId, setList, maxSizeKB, funcDone, container, userdata); if (const auto ecode = savedata_check_args(SAVEDATA_OP_LIST_IMPORT, CELL_SAVEDATA_VERSION_OLD, vm::null, CELL_SAVEDATA_ERRDIALOG_NONE, setList, vm::null, vm::null, vm::null, vm::null, vm::null, container, 0x44, userdata, userId, funcDone)) { return {CELL_SAVEDATA_ERROR_PARAM, " (error %d)", ecode}; } // TODO return CELL_OK; } error_code cellSaveDataUserListExport(ppu_thread& ppu, u32 userId, PSetList setList, u32 maxSizeKB, PFuncDone funcDone, u32 container, vm::ptr<void> userdata) { cellSaveData.todo("cellSaveDataUserListExport(userId=%d, setList=*0x%x, maxSizeKB=%d, funcDone=*0x%x, container=0x%x, userdata=*0x%x)", userId, setList, maxSizeKB, funcDone, container, userdata); if (const auto ecode = savedata_check_args(SAVEDATA_OP_LIST_EXPORT, CELL_SAVEDATA_VERSION_OLD, vm::null, CELL_SAVEDATA_ERRDIALOG_NONE, setList, vm::null, vm::null, vm::null, vm::null, vm::null, container, 0x44, userdata, userId, funcDone)) { return {CELL_SAVEDATA_ERROR_PARAM, " (error %d)", ecode}; } // TODO return CELL_OK; } error_code cellSaveDataUserFixedImport(ppu_thread& ppu, u32 userId, vm::cptr<char> dirName, u32 maxSizeKB, PFuncDone funcDone, u32 container, vm::ptr<void> userdata) { cellSaveData.todo("cellSaveDataUserFixedImport(userId=%d, dirName=%s, maxSizeKB=%d, funcDone=*0x%x, container=0x%x, userdata=*0x%x)", userId, dirName, maxSizeKB, funcDone, container, userdata); if (const auto ecode = savedata_check_args(SAVEDATA_OP_FIXED_IMPORT, CELL_SAVEDATA_VERSION_OLD, dirName, CELL_SAVEDATA_ERRDIALOG_NONE, vm::null, vm::null, vm::null, vm::null, vm::null, vm::null, container, 0x44, userdata, userId, funcDone)) { return {CELL_SAVEDATA_ERROR_PARAM, " (error %d)", ecode}; } // TODO return CELL_OK; } error_code cellSaveDataUserFixedExport(ppu_thread& ppu, u32 userId, vm::cptr<char> dirName, u32 maxSizeKB, PFuncDone funcDone, u32 container, vm::ptr<void> userdata) { cellSaveData.todo("cellSaveDataUserFixedExport(userId=%d, dirName=%s, maxSizeKB=%d, funcDone=*0x%x, container=0x%x, userdata=*0x%x)", userId, dirName, maxSizeKB, funcDone, container, userdata); if (const auto ecode = savedata_check_args(SAVEDATA_OP_FIXED_EXPORT, CELL_SAVEDATA_VERSION_OLD, dirName, CELL_SAVEDATA_ERRDIALOG_NONE, vm::null, vm::null, vm::null, vm::null, vm::null, vm::null, container, 0x44, userdata, userId, funcDone)) { return {CELL_SAVEDATA_ERROR_PARAM, " (error %d)", ecode}; } // TODO return CELL_OK; } error_code cellSaveDataUserGetListItem(u32 userId, vm::cptr<char> dirName, vm::ptr<CellSaveDataDirStat> dir, vm::ptr<CellSaveDataSystemFileParam> sysFileParam, vm::ptr<u32> bind, vm::ptr<u32> sizeKB) { cellSaveData.warning("cellSaveDataUserGetListItem(dirName=%s, dir=*0x%x, sysFileParam=*0x%x, bind=*0x%x, sizeKB=*0x%x, userID=*0x%x)", dirName, dir, sysFileParam, bind, sizeKB, userId); return savedata_get_list_item(dirName, dir, sysFileParam, bind, sizeKB, userId); } void cellSysutil_SaveData_init() { REG_VAR(cellSysutil, g_savedata_context).flag(MFF_HIDDEN); // libsysutil functions: REG_FUNC(cellSysutil, cellSaveDataEnableOverlay); REG_FUNC(cellSysutil, cellSaveDataDelete2); REG_FUNC(cellSysutil, cellSaveDataDelete); REG_FUNC(cellSysutil, cellSaveDataUserFixedDelete); REG_FUNC(cellSysutil, cellSaveDataFixedDelete); REG_FUNC(cellSysutil, cellSaveDataUserFixedLoad); REG_FUNC(cellSysutil, cellSaveDataUserFixedSave); REG_FUNC(cellSysutil, cellSaveDataFixedLoad2); REG_FUNC(cellSysutil, cellSaveDataFixedSave2); REG_FUNC(cellSysutil, cellSaveDataFixedLoad); REG_FUNC(cellSysutil, cellSaveDataFixedSave); REG_FUNC(cellSysutil, cellSaveDataUserListLoad); REG_FUNC(cellSysutil, cellSaveDataUserListSave); REG_FUNC(cellSysutil, cellSaveDataListLoad2); REG_FUNC(cellSysutil, cellSaveDataListSave2); REG_FUNC(cellSysutil, cellSaveDataListLoad); REG_FUNC(cellSysutil, cellSaveDataListSave); REG_FUNC(cellSysutil, cellSaveDataUserListAutoLoad); REG_FUNC(cellSysutil, cellSaveDataUserListAutoSave); REG_FUNC(cellSysutil, cellSaveDataListAutoLoad); REG_FUNC(cellSysutil, cellSaveDataListAutoSave); REG_FUNC(cellSysutil, cellSaveDataUserAutoLoad); REG_FUNC(cellSysutil, cellSaveDataUserAutoSave); REG_FUNC(cellSysutil, cellSaveDataAutoLoad2); REG_FUNC(cellSysutil, cellSaveDataAutoSave2); REG_FUNC(cellSysutil, cellSaveDataAutoLoad); REG_FUNC(cellSysutil, cellSaveDataAutoSave); } DECLARE(ppu_module_manager::cellSaveData)("cellSaveData", []() { // libsysutil_savedata functions: REG_FUNC(cellSaveData, cellSaveDataUserGetListItem); REG_FUNC(cellSaveData, cellSaveDataGetListItem); REG_FUNC(cellSaveData, cellSaveDataUserListDelete); REG_FUNC(cellSaveData, cellSaveDataListDelete); REG_FUNC(cellSaveData, cellSaveDataUserFixedExport); REG_FUNC(cellSaveData, cellSaveDataUserFixedImport); REG_FUNC(cellSaveData, cellSaveDataUserListExport); REG_FUNC(cellSaveData, cellSaveDataUserListImport); REG_FUNC(cellSaveData, cellSaveDataFixedExport); REG_FUNC(cellSaveData, cellSaveDataFixedImport); REG_FUNC(cellSaveData, cellSaveDataListExport); REG_FUNC(cellSaveData, cellSaveDataListImport); }); DECLARE(ppu_module_manager::cellMinisSaveData)("cellMinisSaveData", []() { // libsysutil_savedata_psp functions: //REG_FUNC(cellMinisSaveData, cellMinisSaveDataDelete); // 0x6eb168b3 //REG_FUNC(cellMinisSaveData, cellMinisSaveDataListDelete); // 0xe63eb964 //REG_FUNC(cellMinisSaveData, cellMinisSaveDataFixedLoad); // 0x66515c18 //REG_FUNC(cellMinisSaveData, cellMinisSaveDataFixedSave); // 0xf3f974b8 //REG_FUNC(cellMinisSaveData, cellMinisSaveDataListLoad); // 0xba161d45 //REG_FUNC(cellMinisSaveData, cellMinisSaveDataListSave); // 0xa342a73f //REG_FUNC(cellMinisSaveData, cellMinisSaveDataListAutoLoad); // 0x22f2a553 //REG_FUNC(cellMinisSaveData, cellMinisSaveDataListAutoSave); // 0xa931356e //REG_FUNC(cellMinisSaveData, cellMinisSaveDataAutoLoad); // 0xfc3045d9 });
90,606
C++
.cpp
2,243
36.809184
231
0.695572
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,198
cellMsgDialog.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/cellMsgDialog.cpp
#include "stdafx.h" #include "Emu/System.h" #include "Emu/Cell/PPUModule.h" #include "Emu/Cell/PPUThread.h" #include "Emu/Cell/lv2/sys_sync.h" #include "Emu/Cell/timers.hpp" #include "Emu/Io/interception.h" #include "Emu/RSX/Overlays/overlay_manager.h" #include "Emu/RSX/Overlays/overlay_message_dialog.h" #include "cellSysutil.h" #include "cellMsgDialog.h" #include <thread> #include "util/init_mutex.hpp" LOG_CHANNEL(cellSysutil); template<> void fmt_class_string<CellMsgDialogError>::format(std::string& out, u64 arg) { format_enum(out, arg, [](auto error) { switch (error) { STR_CASE(CELL_MSGDIALOG_ERROR_PARAM); STR_CASE(CELL_MSGDIALOG_ERROR_DIALOG_NOT_OPENED); } return unknown; }); } template<> void fmt_class_string<msg_dialog_source>::format(std::string& out, u64 arg) { format_enum(out, arg, [](auto src) { switch (src) { case msg_dialog_source::_cellMsgDialog: return "cellMsgDialog"; case msg_dialog_source::_cellSaveData: return "cellSaveData"; case msg_dialog_source::_cellGame: return "cellGame"; case msg_dialog_source::_cellCrossController: return "cellCrossController"; case msg_dialog_source::_sceNp: return "sceNp"; case msg_dialog_source::_sceNpTrophy: return "sceNpTrophy"; case msg_dialog_source::sys_progress: return "sys_progress"; case msg_dialog_source::shader_loading: return "shader_loading"; } return unknown; }); } MsgDialogBase::~MsgDialogBase() { } struct msg_info { std::shared_ptr<MsgDialogBase> dlg; stx::init_mutex init; // Emulate fxm as if it's some sort of museum std::shared_ptr<MsgDialogBase> make() noexcept { const auto init_lock = init.init(); if (!init_lock) { return nullptr; } dlg = Emu.GetCallbacks().get_msg_dialog(); return dlg; } std::shared_ptr<MsgDialogBase> get() noexcept { const auto init_lock = init.access(); if (!init_lock) { return nullptr; } return dlg; } void remove() noexcept { const auto init_lock = init.reset(); if (!init_lock) { return; } dlg.reset(); } }; struct msg_dlg_thread_info { atomic_t<u64> wait_until = 0; void operator()() { while (thread_ctrl::state() != thread_state::aborting) { const u64 new_value = wait_until.load(); if (new_value == 0) { thread_ctrl::wait_on(wait_until, 0); continue; } while (get_guest_system_time() < new_value) { if (wait_until.load() != new_value) break; thread_ctrl::wait_for(10'000); } if (auto manager = g_fxo->try_get<rsx::overlays::display_manager>()) { if (auto dlg = manager->get<rsx::overlays::message_dialog>()) { if (!wait_until.compare_and_swap_test(new_value, 0)) { continue; } dlg->close(true, true); } } else if (const auto dlg = g_fxo->get<msg_info>().get()) { if (!wait_until.compare_and_swap_test(new_value, 0)) { continue; } dlg->on_close(CELL_MSGDIALOG_BUTTON_NONE); } } } static constexpr auto thread_name = "MsgDialog Close Thread"sv; }; using msg_dlg_thread = named_thread<msg_dlg_thread_info>; // forward declaration for open_msg_dialog error_code cellMsgDialogOpen2(u32 type, vm::cptr<char> msgString, vm::ptr<CellMsgDialogCallback> callback, vm::ptr<void> userData, vm::ptr<void> extParam); // wrapper to call for other hle dialogs error_code open_msg_dialog(bool is_blocking, u32 type, vm::cptr<char> msgString, msg_dialog_source source, vm::ptr<CellMsgDialogCallback> callback, vm::ptr<void> userData, vm::ptr<void> extParam, s32* return_code) { cellSysutil.notice("open_msg_dialog(is_blocking=%d, type=0x%x, msgString=%s, source=%s, callback=*0x%x, userData=*0x%x, extParam=*0x%x, return_code=*0x%x)", is_blocking, type, msgString, source, callback, userData, extParam, return_code); const MsgDialogType _type{ type }; if (return_code) { *return_code = CELL_MSGDIALOG_BUTTON_NONE; } if (auto manager = g_fxo->try_get<rsx::overlays::display_manager>()) { if (manager->get<rsx::overlays::message_dialog>()) { return CELL_SYSUTIL_ERROR_BUSY; } if (s32 ret = sysutil_send_system_cmd(CELL_SYSUTIL_DRAWING_BEGIN, 0); ret < 0) { return CellSysutilError{ret + 0u}; } const auto notify = std::make_shared<atomic_t<u32>>(0); const auto res = manager->create<rsx::overlays::message_dialog>()->show(is_blocking, msgString.get_ptr(), _type, source, [callback, userData, &return_code, is_blocking, notify](s32 status) { if (is_blocking && return_code) { *return_code = status; } sysutil_send_system_cmd(CELL_SYSUTIL_DRAWING_END, 0); if (callback) { sysutil_register_cb([=](ppu_thread& ppu) -> s32 { callback(ppu, status, userData); return CELL_OK; }); } if (is_blocking && notify) { *notify = 1; notify->notify_one(); } }); // Wait for on_close while (is_blocking && !Emu.IsStopped() && !*notify) { notify->wait(false, atomic_wait_timeout{1'000'000}); } return res; } const auto dlg = g_fxo->get<msg_info>().make(); if (!dlg) { return CELL_SYSUTIL_ERROR_BUSY; } if (s32 ret = sysutil_send_system_cmd(CELL_SYSUTIL_DRAWING_BEGIN, 0); ret < 0) { return CellSysutilError{ret + 0u}; } dlg->type = _type; dlg->source = source; dlg->on_close = [callback, userData, is_blocking, &return_code, wptr = std::weak_ptr<MsgDialogBase>(dlg)](s32 status) { if (is_blocking && return_code) { *return_code = status; } const auto dlg = wptr.lock(); if (dlg && dlg->state.compare_and_swap_test(MsgDialogState::Open, MsgDialogState::Close)) { sysutil_send_system_cmd(CELL_SYSUTIL_DRAWING_END, 0); if (callback) { sysutil_register_cb([=](ppu_thread& ppu) -> s32 { callback(ppu, status, userData); return CELL_OK; }); } g_fxo->get<msg_dlg_thread>().wait_until = 0; g_fxo->get<msg_info>().remove(); } input::SetIntercepted(false); }; input::SetIntercepted(true); auto& ppu = *get_current_cpu_thread(); lv2_obj::sleep(ppu); // PS3 memory must not be accessed by Main thread std::string msg_string = msgString.get_ptr(); // Run asynchronously in GUI thread Emu.CallFromMainThread([&, msg_string = std::move(msg_string)]() { dlg->Create(msg_string); lv2_obj::awake(&ppu); }); while (auto state = ppu.state.fetch_sub(cpu_flag::signal)) { if (is_stopped(state)) { return {}; } if (state & cpu_flag::signal) { break; } ppu.state.wait(state); } if (is_blocking) { while (auto dlg = g_fxo->get<msg_info>().get()) { if (Emu.IsStopped() || dlg->state != MsgDialogState::Open) { break; } std::this_thread::yield(); } } return CELL_OK; } void close_msg_dialog() { cellSysutil.notice("close_msg_dialog()"); if (auto manager = g_fxo->try_get<rsx::overlays::display_manager>()) { if (auto dlg = manager->get<rsx::overlays::message_dialog>()) { g_fxo->get<msg_dlg_thread>().wait_until = 0; dlg->close(false, true); // this doesn't call on_close sysutil_send_system_cmd(CELL_SYSUTIL_DRAWING_END, 0); return; } } if (const auto dlg = g_fxo->get<msg_info>().get()) { dlg->state = MsgDialogState::Close; g_fxo->get<msg_dlg_thread>().wait_until = 0; g_fxo->get<msg_info>().remove(); // this shouldn't call on_close input::SetIntercepted(false); // so we need to reenable the pads here sysutil_send_system_cmd(CELL_SYSUTIL_DRAWING_END, 0); } } void exit_game(s32/* buttonType*/, vm::ptr<void>/* userData*/) { sysutil_send_system_cmd(CELL_SYSUTIL_REQUEST_EXITGAME, 0); } error_code open_exit_dialog(const std::string& message, bool is_exit_requested, msg_dialog_source source) { cellSysutil.notice("open_exit_dialog(message=%s, is_exit_requested=%d, source=%s)", message, is_exit_requested, source); vm::bptr<CellMsgDialogCallback> callback = vm::null; if (is_exit_requested) { callback.set(g_fxo->get<ppu_function_manager>().func_addr(FIND_FUNC(exit_game))); } const error_code res = open_msg_dialog ( true, CELL_MSGDIALOG_TYPE_SE_TYPE_ERROR | CELL_MSGDIALOG_TYPE_BUTTON_TYPE_OK | CELL_MSGDIALOG_TYPE_DISABLE_CANCEL_ON, vm::make_str(message), source, callback ); if (res != CELL_OK) { // Something went wrong, exit anyway if (is_exit_requested) { sysutil_send_system_cmd(CELL_SYSUTIL_REQUEST_EXITGAME, 0); } } return CELL_OK; } error_code cellMsgDialogOpen2(u32 type, vm::cptr<char> msgString, vm::ptr<CellMsgDialogCallback> callback, vm::ptr<void> userData, vm::ptr<void> extParam) { cellSysutil.warning("cellMsgDialogOpen2(type=0x%x, msgString=%s, callback=*0x%x, userData=*0x%x, extParam=*0x%x)", type, msgString, callback, userData, extParam); if (!msgString || std::strlen(msgString.get_ptr()) >= CELL_MSGDIALOG_STRING_SIZE || type & -0x33f8) { return CELL_MSGDIALOG_ERROR_PARAM; } const MsgDialogType _type = {type ^ CELL_MSGDIALOG_TYPE_BG_INVISIBLE}; switch (_type.button_type.unshifted()) { case CELL_MSGDIALOG_TYPE_BUTTON_TYPE_NONE: { if (_type.default_cursor || _type.progress_bar_count > 2) { return CELL_MSGDIALOG_ERROR_PARAM; } break; } case CELL_MSGDIALOG_TYPE_BUTTON_TYPE_YESNO: { if (_type.default_cursor > 1 || _type.progress_bar_count) { return CELL_MSGDIALOG_ERROR_PARAM; } break; } case CELL_MSGDIALOG_TYPE_BUTTON_TYPE_OK: { if (_type.default_cursor || _type.progress_bar_count) { return CELL_MSGDIALOG_ERROR_PARAM; } break; } default: return CELL_MSGDIALOG_ERROR_PARAM; } if (_type.se_normal) { cellSysutil.warning("Opening message dialog with message: %s", msgString); } else { cellSysutil.error("Opening error message dialog with message: %s", msgString); } return open_msg_dialog(false, type, msgString, msg_dialog_source::_cellMsgDialog, callback, userData, extParam); } error_code cellMsgDialogOpen(u32 type, vm::cptr<char> msgString, vm::ptr<CellMsgDialogCallback> callback, vm::ptr<void> userData, vm::ptr<void> extParam) { // Note: This function needs proper implementation, solve first argument "type" conflict with MsgDialogOpen2 in cellMsgDialog.h. cellSysutil.todo("cellMsgDialogOpen(type=0x%x, msgString=%s, callback=*0x%x, userData=*0x%x, extParam=*0x%x)", type, msgString, callback, userData, extParam); return cellMsgDialogOpen2(type, msgString, callback, userData, extParam); } error_code cellMsgDialogOpenErrorCode(u32 errorCode, vm::ptr<CellMsgDialogCallback> callback, vm::ptr<void> userData, vm::ptr<void> extParam) { cellSysutil.warning("cellMsgDialogOpenErrorCode(errorCode=0x%x, callback=*0x%x, userData=*0x%x, extParam=*0x%x)", errorCode, callback, userData, extParam); localized_string_id string_id = localized_string_id::INVALID; switch (errorCode) { case 0x80010001: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010001; break; // The resource is temporarily unavailable. case 0x80010002: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010002; break; // Invalid argument or flag. case 0x80010003: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010003; break; // The feature is not yet implemented. case 0x80010004: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010004; break; // Memory allocation failed. case 0x80010005: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010005; break; // The resource with the specified identifier does not exist. case 0x80010006: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010006; break; // The file does not exist. case 0x80010007: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010007; break; // The file is in unrecognized format / The file is not a valid ELF file. case 0x80010008: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010008; break; // Resource deadlock is avoided. case 0x80010009: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010009; break; // Operation not permitted. case 0x8001000A: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_8001000A; break; // The device or resource is busy. case 0x8001000B: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_8001000B; break; // The operation is timed out. case 0x8001000C: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_8001000C; break; // The operation is aborted. case 0x8001000D: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_8001000D; break; // Invalid memory access. case 0x8001000F: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_8001000F; break; // State of the target thread is invalid. case 0x80010010: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010010; break; // Alignment is invalid. case 0x80010011: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010011; break; // Shortage of the kernel resources. case 0x80010012: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010012; break; // The file is a directory. case 0x80010013: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010013; break; // Operation cancelled. case 0x80010014: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010014; break; // Entry already exists. case 0x80010015: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010015; break; // Port is already connected. case 0x80010016: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010016; break; // Port is not connected. case 0x80010017: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010017; break; // Failure in authorizing SELF. Program authentication fail. case 0x80010018: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010018; break; // The file is not MSELF. case 0x80010019: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010019; break; // System version error. case 0x8001001A: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_8001001A; break; // Fatal system error occurred while authorizing SELF. SELF auth failure. case 0x8001001B: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_8001001B; break; // Math domain violation. case 0x8001001C: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_8001001C; break; // Math range violation. case 0x8001001D: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_8001001D; break; // Illegal multi-byte sequence in input. case 0x8001001E: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_8001001E; break; // File position error. case 0x8001001F: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_8001001F; break; // Syscall was interrupted. case 0x80010020: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010020; break; // File too large. case 0x80010021: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010021; break; // Too many links. case 0x80010022: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010022; break; // File table overflow. case 0x80010023: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010023; break; // No space left on device. case 0x80010024: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010024; break; // Not a TTY. case 0x80010025: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010025; break; // Broken pipe. case 0x80010026: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010026; break; // Read-only filesystem. case 0x80010027: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010027; break; // Illegal seek. case 0x80010028: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010028; break; // Arg list too long. case 0x80010029: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010029; break; // Access violation. case 0x8001002A: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_8001002A; break; // Invalid file descriptor. case 0x8001002B: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_8001002B; break; // Filesystem mounting failed. case 0x8001002C: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_8001002C; break; // Too many files open. case 0x8001002D: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_8001002D; break; // No device. case 0x8001002E: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_8001002E; break; // Not a directory. case 0x8001002F: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_8001002F; break; // No such device or IO. case 0x80010030: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010030; break; // Cross-device link error. case 0x80010031: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010031; break; // Bad Message. case 0x80010032: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010032; break; // In progress. case 0x80010033: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010033; break; // Message size error. case 0x80010034: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010034; break; // Name too long. case 0x80010035: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010035; break; // No lock. case 0x80010036: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010036; break; // Not empty. case 0x80010037: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010037; break; // Not supported. case 0x80010038: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010038; break; // File-system specific error. case 0x80010039: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_80010039; break; // Overflow occured. case 0x8001003A: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_8001003A; break; // Filesystem not mounted. case 0x8001003B: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_8001003B; break; // Not SData. case 0x8001003C: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_8001003C; break; // Incorrect version in sys_load_param. case 0x8001003D: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_8001003D; break; // Pointer is null. case 0x8001003E: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_8001003E; break; // Pointer is null. default: string_id = localized_string_id::CELL_MSG_DIALOG_ERROR_DEFAULT; break; // An error has occurred. } const std::string error = get_localized_string(string_id, fmt::format("%08x", errorCode).c_str()); return cellMsgDialogOpen2(CELL_MSGDIALOG_TYPE_SE_TYPE_ERROR | CELL_MSGDIALOG_TYPE_BUTTON_TYPE_OK, vm::make_str(error), callback, userData, extParam); } error_code cellMsgDialogClose(f32 delay) { cellSysutil.warning("cellMsgDialogClose(delay=%f)", delay); const u64 wait_until = get_guest_system_time() + static_cast<s64>(std::max<float>(delay, 0.0f) * 1000); if (auto manager = g_fxo->try_get<rsx::overlays::display_manager>()) { if (auto dlg = manager->get<rsx::overlays::message_dialog>(); dlg && dlg->source() == msg_dialog_source::_cellMsgDialog) { auto& thr = g_fxo->get<msg_dlg_thread>(); thr.wait_until = wait_until; thr.wait_until.notify_one(); return CELL_OK; } return CELL_MSGDIALOG_ERROR_DIALOG_NOT_OPENED; } const auto dlg = g_fxo->get<msg_info>().get(); if (!dlg || dlg->source != msg_dialog_source::_cellMsgDialog) { return CELL_MSGDIALOG_ERROR_DIALOG_NOT_OPENED; } auto& thr = g_fxo->get<msg_dlg_thread>(); thr.wait_until = wait_until; thr.wait_until.notify_one(); return CELL_OK; } error_code cellMsgDialogAbort() { cellSysutil.warning("cellMsgDialogAbort()"); if (auto manager = g_fxo->try_get<rsx::overlays::display_manager>()) { if (auto dlg = manager->get<rsx::overlays::message_dialog>(); dlg && dlg->source() == msg_dialog_source::_cellMsgDialog) { g_fxo->get<msg_dlg_thread>().wait_until = 0; dlg->close(false, true); // this doesn't call on_close sysutil_send_system_cmd(CELL_SYSUTIL_DRAWING_END, 0); return CELL_OK; } return CELL_OK; // Not CELL_MSGDIALOG_ERROR_DIALOG_NOT_OPENED, tested on HW. } const auto dlg = g_fxo->get<msg_info>().get(); if (!dlg || dlg->source != msg_dialog_source::_cellMsgDialog) { return CELL_OK; // Not CELL_MSGDIALOG_ERROR_DIALOG_NOT_OPENED, tested on HW. } if (!dlg->state.compare_and_swap_test(MsgDialogState::Open, MsgDialogState::Abort)) { return CELL_SYSUTIL_ERROR_BUSY; } g_fxo->get<msg_dlg_thread>().wait_until = 0; g_fxo->get<msg_info>().remove(); // this shouldn't call on_close input::SetIntercepted(false); // so we need to reenable the pads here sysutil_send_system_cmd(CELL_SYSUTIL_DRAWING_END, 0); return CELL_OK; } error_code cellMsgDialogOpenSimulViewWarning(vm::ptr<CellMsgDialogCallback> callback, vm::ptr<void> userData, vm::ptr<void> extParam) { cellSysutil.todo("cellMsgDialogOpenSimulViewWarning(callback=*0x%x, userData=*0x%x, extParam=*0x%x)", callback, userData, extParam); error_code ret = cellMsgDialogOpen2(CELL_MSGDIALOG_TYPE_SE_TYPE_NORMAL | CELL_MSGDIALOG_TYPE_BUTTON_TYPE_OK, vm::make_str("SimulView Warning"), callback, userData, extParam); // The dialog should ideally only be closeable by pressing ok after 3 seconds until it closes itself automatically after 5 seconds if (ret == CELL_OK) cellMsgDialogClose(5000.0f); return ret; } error_code cellMsgDialogProgressBarSetMsg(u32 progressBarIndex, vm::cptr<char> msgString) { cellSysutil.warning("cellMsgDialogProgressBarSetMsg(progressBarIndex=%d, msgString=%s)", progressBarIndex, msgString); if (!msgString || std::strlen(msgString.get_ptr()) >= CELL_MSGDIALOG_PROGRESSBAR_STRING_SIZE) { return CELL_MSGDIALOG_ERROR_PARAM; } if (auto manager = g_fxo->try_get<rsx::overlays::display_manager>()) { if (auto dlg = manager->get<rsx::overlays::message_dialog>()) { return dlg->progress_bar_set_message(progressBarIndex, msgString.get_ptr()); } } const auto dlg = g_fxo->get<msg_info>().get(); if (!dlg) { return CELL_MSGDIALOG_ERROR_DIALOG_NOT_OPENED; } if (progressBarIndex >= dlg->type.progress_bar_count) { return CELL_MSGDIALOG_ERROR_PARAM; } Emu.CallFromMainThread([=, msg = std::string{ msgString.get_ptr() }] { dlg->ProgressBarSetMsg(progressBarIndex, msg); }); return CELL_OK; } error_code cellMsgDialogProgressBarReset(u32 progressBarIndex) { cellSysutil.warning("cellMsgDialogProgressBarReset(progressBarIndex=%d)", progressBarIndex); if (auto manager = g_fxo->try_get<rsx::overlays::display_manager>()) { if (auto dlg = manager->get<rsx::overlays::message_dialog>()) { return dlg->progress_bar_reset(progressBarIndex); } } const auto dlg = g_fxo->get<msg_info>().get(); if (!dlg) { return CELL_MSGDIALOG_ERROR_DIALOG_NOT_OPENED; } if (progressBarIndex >= dlg->type.progress_bar_count) { return CELL_MSGDIALOG_ERROR_PARAM; } Emu.CallFromMainThread([=] { dlg->ProgressBarReset(progressBarIndex); }); return CELL_OK; } error_code cellMsgDialogProgressBarInc(u32 progressBarIndex, u32 delta) { cellSysutil.warning("cellMsgDialogProgressBarInc(progressBarIndex=%d, delta=%d)", progressBarIndex, delta); if (auto manager = g_fxo->try_get<rsx::overlays::display_manager>()) { if (auto dlg = manager->get<rsx::overlays::message_dialog>()) { return dlg->progress_bar_increment(progressBarIndex, static_cast<f32>(delta)); } } const auto dlg = g_fxo->get<msg_info>().get(); if (!dlg) { return CELL_MSGDIALOG_ERROR_DIALOG_NOT_OPENED; } if (progressBarIndex >= dlg->type.progress_bar_count) { return CELL_MSGDIALOG_ERROR_PARAM; } Emu.CallFromMainThread([=] { dlg->ProgressBarInc(progressBarIndex, delta); }); return CELL_OK; } void cellSysutil_MsgDialog_init() { REG_FUNC(cellSysutil, cellMsgDialogOpen); REG_FUNC(cellSysutil, cellMsgDialogOpen2); REG_FUNC(cellSysutil, cellMsgDialogOpenErrorCode); REG_FUNC(cellSysutil, cellMsgDialogOpenSimulViewWarning); REG_FUNC(cellSysutil, cellMsgDialogProgressBarSetMsg); REG_FUNC(cellSysutil, cellMsgDialogProgressBarReset); REG_FUNC(cellSysutil, cellMsgDialogProgressBarInc); REG_FUNC(cellSysutil, cellMsgDialogClose); REG_FUNC(cellSysutil, cellMsgDialogAbort); // Helper Function REG_HIDDEN_FUNC(exit_game); }
23,852
C++
.cpp
580
38.367241
239
0.731017
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,199
cellMusic.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/cellMusic.cpp
#include "stdafx.h" #include "Emu/Cell/PPUModule.h" #include "Emu/Cell/lv2/sys_lwmutex.h" #include "Emu/Cell/lv2/sys_lwcond.h" #include "Emu/Cell/lv2/sys_spu.h" #include "Emu/Io/music_handler_base.h" #include "Emu/System.h" #include "Emu/VFS.h" #include "Emu/RSX/Overlays/overlay_media_list_dialog.h" #include "cellSearch.h" #include "cellSpurs.h" #include "cellSysutil.h" #include "cellMusic.h" LOG_CHANNEL(cellMusic); template<> void fmt_class_string<CellMusicError>::format(std::string& out, u64 arg) { format_enum(out, arg, [](auto error) { switch (error) { STR_CASE(CELL_MUSIC_CANCELED); STR_CASE(CELL_MUSIC_PLAYBACK_FINISHED); STR_CASE(CELL_MUSIC_ERROR_PARAM); STR_CASE(CELL_MUSIC_ERROR_BUSY); STR_CASE(CELL_MUSIC_ERROR_NO_ACTIVE_CONTENT); STR_CASE(CELL_MUSIC_ERROR_NO_MATCH_FOUND); STR_CASE(CELL_MUSIC_ERROR_INVALID_CONTEXT); STR_CASE(CELL_MUSIC_ERROR_PLAYBACK_FAILURE); STR_CASE(CELL_MUSIC_ERROR_NO_MORE_CONTENT); STR_CASE(CELL_MUSIC_DIALOG_OPEN); STR_CASE(CELL_MUSIC_DIALOG_CLOSE); STR_CASE(CELL_MUSIC_ERROR_GENERIC); } return unknown; }); } template<> void fmt_class_string<CellMusic2Error>::format(std::string& out, u64 arg) { format_enum(out, arg, [](auto error) { switch (error) { STR_CASE(CELL_MUSIC2_CANCELED); STR_CASE(CELL_MUSIC2_PLAYBACK_FINISHED); STR_CASE(CELL_MUSIC2_ERROR_PARAM); STR_CASE(CELL_MUSIC2_ERROR_BUSY); STR_CASE(CELL_MUSIC2_ERROR_NO_ACTIVE_CONTENT); STR_CASE(CELL_MUSIC2_ERROR_NO_MATCH_FOUND); STR_CASE(CELL_MUSIC2_ERROR_INVALID_CONTEXT); STR_CASE(CELL_MUSIC2_ERROR_PLAYBACK_FAILURE); STR_CASE(CELL_MUSIC2_ERROR_NO_MORE_CONTENT); STR_CASE(CELL_MUSIC2_DIALOG_OPEN); STR_CASE(CELL_MUSIC2_DIALOG_CLOSE); STR_CASE(CELL_MUSIC2_ERROR_GENERIC); } return unknown; }); } struct music_state { shared_mutex mutex; vm::ptr<void(u32 event, vm::ptr<void> param, vm::ptr<void> userData)> func{}; vm::ptr<void> userData{}; shared_mutex mtx; std::shared_ptr<music_handler_base> handler; music_selection_context current_selection_context{}; SAVESTATE_INIT_POS(16); music_state() { handler = Emu.GetCallbacks().get_music_handler(); handler->set_status_callback([this](music_handler_base::player_status status) { // TODO: disabled until I find a game that uses CELL_MUSIC_EVENT_STATUS_NOTIFICATION return; if (!func) { return; } s32 result = CELL_OK; switch (status) { case music_handler_base::player_status::end_of_media: result = CELL_MUSIC_PLAYBACK_FINISHED; break; default: return; } sysutil_register_cb([this, &result](ppu_thread& ppu) -> s32 { cellMusic.notice("Sending status notification %d", result); func(ppu, CELL_MUSIC_EVENT_STATUS_NOTIFICATION, vm::addr_t(result), userData); return CELL_OK; }); }); } music_state(utils::serial& ar) : music_state() { save(ar); } void save(utils::serial& ar) { ar(func); if (!func) { return; } GET_OR_USE_SERIALIZATION_VERSION(ar.is_writing(), cellMusic); ar(userData); } // NOTE: This function only uses CELL_MUSIC enums. CELL_MUSIC2 enums are identical. error_code set_playback_command(s32 command) { switch (command) { case CELL_MUSIC_PB_CMD_STOP: handler->stop(); break; case CELL_MUSIC_PB_CMD_PAUSE: handler->pause(); break; case CELL_MUSIC_PB_CMD_PLAY: case CELL_MUSIC_PB_CMD_FASTFORWARD: case CELL_MUSIC_PB_CMD_FASTREVERSE: case CELL_MUSIC_PB_CMD_NEXT: case CELL_MUSIC_PB_CMD_PREV: { std::string path; bool no_more_tracks = false; { std::lock_guard lock(mtx); const std::vector<std::string>& playlist = current_selection_context.playlist; const u32 current_track = current_selection_context.current_track; u32 next_track = current_track; if (command == CELL_MUSIC_PB_CMD_NEXT || command == CELL_MUSIC_PB_CMD_PREV) { next_track = current_selection_context.step_track(command == CELL_MUSIC_PB_CMD_NEXT); } if (next_track < playlist.size()) { path = vfs::get(::at32(playlist, next_track)); cellMusic.notice("set_playback_command: current vfs path: '%s' (unresolved='%s')", path, ::at32(playlist, next_track)); } else { current_selection_context.current_track = current_track; no_more_tracks = true; } } if (no_more_tracks) { cellMusic.notice("set_playback_command: no more tracks to play"); return CELL_MUSIC_ERROR_NO_MORE_CONTENT; } switch (command) { case CELL_MUSIC_PB_CMD_FASTFORWARD: handler->fast_forward(path); break; case CELL_MUSIC_PB_CMD_FASTREVERSE: handler->fast_reverse(path); break; default: handler->play(path); break; } break; } default: break; } return CELL_OK; } }; error_code cell_music_select_contents() { auto& music = g_fxo->get<music_state>(); if (!music.func) return CELL_MUSIC_ERROR_GENERIC; const std::string vfs_dir_path = vfs::get("/dev_hdd0/music"); const std::string title = get_localized_string(localized_string_id::RSX_OVERLAYS_MEDIA_DIALOG_TITLE); error_code error = rsx::overlays::show_media_list_dialog(rsx::overlays::media_list_dialog::media_type::audio, vfs_dir_path, title, [&music](s32 status, utils::media_info info) { sysutil_register_cb([&music, info, status](ppu_thread& ppu) -> s32 { std::lock_guard lock(music.mtx); const u32 result = status >= 0 ? u32{CELL_OK} : u32{CELL_MUSIC_CANCELED}; if (result == CELL_OK) { music_selection_context context{}; context.set_playlist(info.path); // TODO: context.repeat_mode = CELL_SEARCH_REPEATMODE_NONE; // TODO: context.context_option = CELL_SEARCH_CONTEXTOPTION_NONE; music.current_selection_context = context; music.current_selection_context.create_playlist(music_selection_context::get_next_hash()); cellMusic.success("Media list dialog: selected entry '%s'", context.playlist.front()); } else { cellMusic.warning("Media list dialog was canceled"); } music.func(ppu, CELL_MUSIC_EVENT_SELECT_CONTENTS_RESULT, vm::addr_t(result), music.userData); return CELL_OK; }); }); return error; } error_code cellMusicGetSelectionContext(vm::ptr<CellMusicSelectionContext> context) { cellMusic.todo("cellMusicGetSelectionContext(context=*0x%x)", context); if (!context) return CELL_MUSIC_ERROR_PARAM; auto& music = g_fxo->get<music_state>(); std::lock_guard lock(music.mtx); if (!music.current_selection_context) { return CELL_MUSIC_ERROR_NO_ACTIVE_CONTENT; } *context = music.current_selection_context.get(); cellMusic.success("cellMusicGetSelectionContext: selection context = %s", music.current_selection_context.to_string()); return CELL_OK; } error_code cellMusicSetSelectionContext2(vm::ptr<CellMusicSelectionContext> context) { cellMusic.todo("cellMusicSetSelectionContext2(context=*0x%x)", context); if (!context) return CELL_MUSIC2_ERROR_PARAM; auto& music = g_fxo->get<music_state>(); if (!music.func) return CELL_MUSIC2_ERROR_GENERIC; sysutil_register_cb([context = *context, &music](ppu_thread& ppu) -> s32 { bool result = false; { std::lock_guard lock(music.mtx); result = music.current_selection_context.set(context); } const u32 status = result ? u32{CELL_OK} : u32{CELL_MUSIC2_ERROR_INVALID_CONTEXT}; if (result) cellMusic.success("cellMusicSetSelectionContext2: new selection context = %s", music.current_selection_context.to_string()); else cellMusic.todo("cellMusicSetSelectionContext2: failed. context = %s", music_selection_context::context_to_hex(context)); music.func(ppu, CELL_MUSIC2_EVENT_SET_SELECTION_CONTEXT_RESULT, vm::addr_t(status), music.userData); return CELL_OK; }); return CELL_OK; } error_code cellMusicSetVolume2(f32 level) { cellMusic.warning("cellMusicSetVolume2(level=%f)", level); level = std::clamp(level, 0.0f, 1.0f); auto& music = g_fxo->get<music_state>(); if (!music.func) return CELL_MUSIC2_ERROR_GENERIC; music.handler->set_volume(level); sysutil_register_cb([=, &music](ppu_thread& ppu) -> s32 { music.func(ppu, CELL_MUSIC2_EVENT_SET_VOLUME_RESULT, vm::addr_t(CELL_OK), music.userData); return CELL_OK; }); return CELL_OK; } error_code cellMusicGetContentsId(vm::ptr<CellSearchContentId> contents_id) { cellMusic.todo("cellMusicGetContentsId(contents_id=*0x%x)", contents_id); if (!contents_id) return CELL_MUSIC_ERROR_PARAM; // HACKY auto& music = g_fxo->get<music_state>(); std::lock_guard lock(music.mtx); if (!music.current_selection_context) { return CELL_MUSIC_ERROR_NO_ACTIVE_CONTENT; } return music.current_selection_context.find_content_id(contents_id); } error_code cellMusicSetSelectionContext(vm::ptr<CellMusicSelectionContext> context) { cellMusic.todo("cellMusicSetSelectionContext(context=*0x%x)", context); if (!context) return CELL_MUSIC_ERROR_PARAM; auto& music = g_fxo->get<music_state>(); if (!music.func) return CELL_MUSIC_ERROR_GENERIC; sysutil_register_cb([context = *context, &music](ppu_thread& ppu) -> s32 { bool result = false; { std::lock_guard lock(music.mtx); result = music.current_selection_context.set(context); } const u32 status = result ? u32{CELL_OK} : u32{CELL_MUSIC_ERROR_INVALID_CONTEXT}; if (result) cellMusic.success("cellMusicSetSelectionContext: new selection context = %s)", music.current_selection_context.to_string()); else cellMusic.todo("cellMusicSetSelectionContext: failed. context = %s)", music_selection_context::context_to_hex(context)); music.func(ppu, CELL_MUSIC_EVENT_SET_SELECTION_CONTEXT_RESULT, vm::addr_t(status), music.userData); return CELL_OK; }); return CELL_OK; } error_code cellMusicInitialize2SystemWorkload(s32 mode, vm::ptr<CellMusic2Callback> func, vm::ptr<void> userData, vm::ptr<CellSpurs> spurs, vm::cptr<u8> priority, vm::cptr<struct CellSpursSystemWorkloadAttribute> attr) { cellMusic.todo("cellMusicInitialize2SystemWorkload(mode=0x%x, func=*0x%x, userData=*0x%x, spurs=*0x%x, priority=*0x%x, attr=*0x%x)", mode, func, userData, spurs, priority, attr); if (mode != CELL_MUSIC2_PLAYER_MODE_NORMAL || !func || !spurs || !priority) { return CELL_MUSIC2_ERROR_PARAM; } auto& music = g_fxo->get<music_state>(); music.func = func; music.userData = userData; music.current_selection_context = {}; sysutil_register_cb([=, &music](ppu_thread& ppu) -> s32 { music.func(ppu, CELL_MUSIC2_EVENT_INITIALIZE_RESULT, vm::addr_t(CELL_OK), userData); return CELL_OK; }); return CELL_OK; } error_code cellMusicGetPlaybackStatus2(vm::ptr<s32> status) { cellMusic.warning("cellMusicGetPlaybackStatus2(status=*0x%x)", status); if (!status) return CELL_MUSIC2_ERROR_PARAM; const auto& music = g_fxo->get<music_state>(); *status = music.handler->get_state(); return CELL_OK; } error_code cellMusicGetContentsId2(vm::ptr<CellSearchContentId> contents_id) { cellMusic.todo("cellMusicGetContentsId2(contents_id=*0x%x)", contents_id); if (!contents_id) return CELL_MUSIC2_ERROR_PARAM; // HACKY auto& music = g_fxo->get<music_state>(); std::lock_guard lock(music.mtx); if (!music.current_selection_context) { return CELL_MUSIC2_ERROR_NO_ACTIVE_CONTENT; } return music.current_selection_context.find_content_id(contents_id); } error_code cellMusicFinalize() { cellMusic.todo("cellMusicFinalize()"); auto& music = g_fxo->get<music_state>(); if (music.func) { sysutil_register_cb([=, &music](ppu_thread& ppu) -> s32 { music.func(ppu, CELL_MUSIC_EVENT_FINALIZE_RESULT, vm::addr_t(CELL_OK), music.userData); return CELL_OK; }); } return CELL_OK; } error_code cellMusicInitializeSystemWorkload(s32 mode, u32 container, vm::ptr<CellMusicCallback> func, vm::ptr<void> userData, vm::ptr<CellSpurs> spurs, vm::cptr<u8> priority, vm::cptr<struct CellSpursSystemWorkloadAttribute> attr) { cellMusic.todo("cellMusicInitializeSystemWorkload(mode=0x%x, container=0x%x, func=*0x%x, userData=*0x%x, spurs=*0x%x, priority=*0x%x, attr=*0x%x)", mode, container, func, userData, spurs, priority, attr); if (mode != CELL_MUSIC2_PLAYER_MODE_NORMAL || !func || !spurs || !priority) { return CELL_MUSIC2_ERROR_PARAM; } auto& music = g_fxo->get<music_state>(); music.func = func; music.userData = userData; music.current_selection_context = {}; sysutil_register_cb([=, &music](ppu_thread& ppu) -> s32 { music.func(ppu, CELL_MUSIC_EVENT_INITIALIZE_RESULT, vm::addr_t(CELL_OK), userData); return CELL_OK; }); return CELL_OK; } error_code cellMusicInitialize(s32 mode, u32 container, s32 spuPriority, vm::ptr<CellMusicCallback> func, vm::ptr<void> userData) { cellMusic.todo("cellMusicInitialize(mode=0x%x, container=0x%x, spuPriority=0x%x, func=*0x%x, userData=*0x%x)", mode, container, spuPriority, func, userData); if (mode != CELL_MUSIC_PLAYER_MODE_NORMAL || !func || spuPriority < 16 || spuPriority > 255) { return CELL_MUSIC_ERROR_PARAM; } auto& music = g_fxo->get<music_state>(); music.func = func; music.userData = userData; music.current_selection_context = {}; sysutil_register_cb([=, &music](ppu_thread& ppu) -> s32 { music.func(ppu, CELL_MUSIC_EVENT_INITIALIZE_RESULT, vm::addr_t(CELL_OK), userData); return CELL_OK; }); return CELL_OK; } error_code cellMusicFinalize2() { cellMusic.todo("cellMusicFinalize2()"); auto& music = g_fxo->get<music_state>(); if (music.func) { sysutil_register_cb([=, &music](ppu_thread& ppu) -> s32 { music.func(ppu, CELL_MUSIC2_EVENT_FINALIZE_RESULT, vm::addr_t(CELL_OK), music.userData); return CELL_OK; }); } return CELL_OK; } error_code cellMusicGetSelectionContext2(vm::ptr<CellMusicSelectionContext> context) { cellMusic.todo("cellMusicGetSelectionContext2(context=*0x%x)", context); if (!context) return CELL_MUSIC2_ERROR_PARAM; auto& music = g_fxo->get<music_state>(); std::lock_guard lock(music.mtx); if (!music.current_selection_context) { return CELL_MUSIC2_ERROR_NO_ACTIVE_CONTENT; } *context = music.current_selection_context.get(); cellMusic.success("cellMusicGetSelectionContext2: selection context = %s", music.current_selection_context.to_string()); return CELL_OK; } error_code cellMusicGetVolume(vm::ptr<f32> level) { cellMusic.warning("cellMusicGetVolume(level=*0x%x)", level); if (!level) return CELL_MUSIC_ERROR_PARAM; const auto& music = g_fxo->get<music_state>(); *level = music.handler->get_volume(); return CELL_OK; } error_code cellMusicGetPlaybackStatus(vm::ptr<s32> status) { cellMusic.warning("cellMusicGetPlaybackStatus(status=*0x%x)", status); if (!status) return CELL_MUSIC_ERROR_PARAM; const auto& music = g_fxo->get<music_state>(); *status = music.handler->get_state(); return CELL_OK; } error_code cellMusicSetPlaybackCommand2(s32 command, vm::ptr<void> param) { cellMusic.todo("cellMusicSetPlaybackCommand2(command=0x%x, param=*0x%x)", command, param); if (command < CELL_MUSIC2_PB_CMD_STOP || command > CELL_MUSIC2_PB_CMD_FASTREVERSE) return CELL_MUSIC2_ERROR_PARAM; auto& music = g_fxo->get<music_state>(); if (!music.func) return CELL_MUSIC2_ERROR_GENERIC; error_code result = CELL_OK; if (!music.current_selection_context) { result = CELL_MUSIC_ERROR_GENERIC; } sysutil_register_cb([=, &music, prev_res = result](ppu_thread& ppu) -> s32 { const error_code result = prev_res ? prev_res : music.set_playback_command(command); music.func(ppu, CELL_MUSIC2_EVENT_SET_PLAYBACK_COMMAND_RESULT, vm::addr_t(+result), music.userData); return CELL_OK; }); return result; } error_code cellMusicSetPlaybackCommand(s32 command, vm::ptr<void> param) { cellMusic.todo("cellMusicSetPlaybackCommand(command=0x%x, param=*0x%x)", command, param); if (command < CELL_MUSIC_PB_CMD_STOP || command > CELL_MUSIC_PB_CMD_FASTREVERSE) return CELL_MUSIC_ERROR_PARAM; auto& music = g_fxo->get<music_state>(); if (!music.func) return CELL_MUSIC_ERROR_GENERIC; error_code result = CELL_OK; if (!music.current_selection_context) { result = CELL_MUSIC2_ERROR_GENERIC; } sysutil_register_cb([=, &music, prev_res = result](ppu_thread& ppu) -> s32 { const error_code result = prev_res ? prev_res : music.set_playback_command(command); music.func(ppu, CELL_MUSIC_EVENT_SET_PLAYBACK_COMMAND_RESULT, vm::addr_t(+result), music.userData); return CELL_OK; }); return result; } error_code cellMusicSelectContents2() { cellMusic.todo("cellMusicSelectContents2()"); return cell_music_select_contents(); } error_code cellMusicSelectContents(u32 container) { cellMusic.todo("cellMusicSelectContents(container=0x%x)", container); return cell_music_select_contents(); } error_code cellMusicInitialize2(s32 mode, s32 spuPriority, vm::ptr<CellMusic2Callback> func, vm::ptr<void> userData) { cellMusic.todo("cellMusicInitialize2(mode=%d, spuPriority=%d, func=*0x%x, userData=*0x%x)", mode, spuPriority, func, userData); if (mode != CELL_MUSIC_PLAYER_MODE_NORMAL || !func || spuPriority < 16 || spuPriority > 255) { return CELL_MUSIC2_ERROR_PARAM; } auto& music = g_fxo->get<music_state>(); music.func = func; music.userData = userData; music.current_selection_context = {}; sysutil_register_cb([=, &music](ppu_thread& ppu) -> s32 { music.func(ppu, CELL_MUSIC2_EVENT_INITIALIZE_RESULT, vm::addr_t(CELL_OK), userData); return CELL_OK; }); return CELL_OK; } error_code cellMusicSetVolume(f32 level) { cellMusic.warning("cellMusicSetVolume(level=%f)", level); level = std::clamp(level, 0.0f, 1.0f); auto& music = g_fxo->get<music_state>(); if (!music.func) return CELL_MUSIC_ERROR_GENERIC; music.handler->set_volume(level); sysutil_register_cb([=, &music](ppu_thread& ppu) -> s32 { music.func(ppu, CELL_MUSIC_EVENT_SET_VOLUME_RESULT, vm::addr_t(CELL_OK), music.userData); return CELL_OK; }); return CELL_OK; } error_code cellMusicGetVolume2(vm::ptr<f32> level) { cellMusic.warning("cellMusicGetVolume2(level=*0x%x)", level); if (!level) return CELL_MUSIC2_ERROR_PARAM; const auto& music = g_fxo->get<music_state>(); *level = music.handler->get_volume(); return CELL_OK; } DECLARE(ppu_module_manager::cellMusic)("cellMusicUtility", []() { REG_FUNC(cellMusicUtility, cellMusicGetSelectionContext); REG_FUNC(cellMusicUtility, cellMusicSetSelectionContext2); REG_FUNC(cellMusicUtility, cellMusicSetVolume2); REG_FUNC(cellMusicUtility, cellMusicGetContentsId); REG_FUNC(cellMusicUtility, cellMusicSetSelectionContext); REG_FUNC(cellMusicUtility, cellMusicInitialize2SystemWorkload); REG_FUNC(cellMusicUtility, cellMusicGetPlaybackStatus2); REG_FUNC(cellMusicUtility, cellMusicGetContentsId2); REG_FUNC(cellMusicUtility, cellMusicFinalize); REG_FUNC(cellMusicUtility, cellMusicInitializeSystemWorkload); REG_FUNC(cellMusicUtility, cellMusicInitialize); REG_FUNC(cellMusicUtility, cellMusicFinalize2); REG_FUNC(cellMusicUtility, cellMusicGetSelectionContext2); REG_FUNC(cellMusicUtility, cellMusicGetVolume); REG_FUNC(cellMusicUtility, cellMusicGetPlaybackStatus); REG_FUNC(cellMusicUtility, cellMusicSetPlaybackCommand2); REG_FUNC(cellMusicUtility, cellMusicSetPlaybackCommand); REG_FUNC(cellMusicUtility, cellMusicSelectContents2); REG_FUNC(cellMusicUtility, cellMusicSelectContents); REG_FUNC(cellMusicUtility, cellMusicInitialize2); REG_FUNC(cellMusicUtility, cellMusicSetVolume); REG_FUNC(cellMusicUtility, cellMusicGetVolume2); });
19,381
C++
.cpp
557
31.969479
231
0.736253
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,200
cellSysutilNpEula.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/cellSysutilNpEula.cpp
#include "stdafx.h" #include "Emu/Cell/PPUModule.h" #include "cellSysutil.h" #include "sceNp.h" #include "Emu/IdManager.h" LOG_CHANNEL(cellSysutilNpEula); enum SceNpEulaStatus { SCE_NP_EULA_UNKNOWN = 0, SCE_NP_EULA_ACCEPTED = 1, SCE_NP_EULA_ALREADY_ACCEPTED = 2, SCE_NP_EULA_REJECTED = 3, SCE_NP_EULA_ABORTED = 4, SCE_NP_EULA_ERROR = 5, }; using SceNpEulaVersion = u32; using SceNpEulaCheckEulaStatusCallback = void(SceNpEulaStatus status, u32 errorCode, SceNpEulaVersion version, vm::ptr<void> arg); struct sceNpEulaCallbacksRegistered { atomic_t<SceNpEulaStatus> status = SCE_NP_EULA_UNKNOWN; atomic_t<bool> sceNpEulaCheckEulaStatus_callback_registered = false; atomic_t<bool> sceNpEulaShowCurrentEula_callback_registered = false; }; error_code sceNpEulaCheckEulaStatus(vm::cptr<SceNpCommunicationId> communicationId, u32 arg2, u64 arg3, vm::ptr<SceNpEulaCheckEulaStatusCallback> cbFunc, vm::ptr<void> cbFuncArg) { cellSysutilNpEula.warning("sceNpEulaCheckEulaStatus(communicationId=*0x%x, arg2=0x%x, arg3=0x%x, cbFunc=*0x%x, cbFuncArg=*0x%x)", communicationId, arg2, arg3, cbFunc, cbFuncArg); if (!communicationId || !cbFunc) { return SCE_NP_EULA_ERROR_INVALID_ARGUMENT; } auto& cb_infos = g_fxo->get<sceNpEulaCallbacksRegistered>(); if (cb_infos.sceNpEulaCheckEulaStatus_callback_registered || cb_infos.sceNpEulaShowCurrentEula_callback_registered) { return SCE_NP_EULA_ERROR_ALREADY_INITIALIZED; } cb_infos.sceNpEulaCheckEulaStatus_callback_registered = true; cb_infos.status = SCE_NP_EULA_ALREADY_ACCEPTED; sysutil_register_cb([=](ppu_thread& cb_ppu) -> s32 { auto& cb_infos = g_fxo->get<sceNpEulaCallbacksRegistered>(); cbFunc(cb_ppu, cb_infos.status, CELL_OK, 1, cbFuncArg); cb_infos.sceNpEulaCheckEulaStatus_callback_registered = false; return 0; }); return CELL_OK; } error_code sceNpEulaAbort() { cellSysutilNpEula.warning("sceNpEulaAbort()"); auto& cb_infos = g_fxo->get<sceNpEulaCallbacksRegistered>(); if (!cb_infos.sceNpEulaCheckEulaStatus_callback_registered && !cb_infos.sceNpEulaShowCurrentEula_callback_registered) { return SCE_NP_EULA_ERROR_NOT_INITIALIZED; } // It would forcefully abort the dialog/process of getting the eula but since we don't show the dialog, just alter the status returned cb_infos.status = SCE_NP_EULA_ABORTED; return CELL_OK; } // Seen on: Resistance 3, Uncharted 2 error_code sceNpEulaShowCurrentEula(vm::cptr<SceNpCommunicationId> communicationId, u64 arg2, vm::ptr<CellSysutilCallback> cbFunc, vm::ptr<void> cbFuncArg) { cellSysutilNpEula.todo("sceNpEulaShowCurrentEula(communicationId=*0x%x, arg2=0x%x, cbFunc=*0x%x, cbFuncArg=*0x%x)", communicationId, arg2, cbFunc, cbFuncArg); if (!communicationId || !cbFunc) { return SCE_NP_EULA_ERROR_INVALID_ARGUMENT; } auto& cb_infos = g_fxo->get<sceNpEulaCallbacksRegistered>(); if (cb_infos.sceNpEulaCheckEulaStatus_callback_registered || cb_infos.sceNpEulaShowCurrentEula_callback_registered) { return SCE_NP_EULA_ERROR_ALREADY_INITIALIZED; } // Call callback (Unknown parameters) return CELL_OK; } DECLARE(ppu_module_manager::cellSysutilNpEula)("cellSysutilNpEula", []() { REG_FUNC(cellSysutilNpEula, sceNpEulaCheckEulaStatus); REG_FUNC(cellSysutilNpEula, sceNpEulaAbort); REG_FUNC(cellSysutilNpEula, sceNpEulaShowCurrentEula); });
3,354
C++
.cpp
80
39.8625
179
0.778222
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
5,201
cellAuthDialog.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/cellAuthDialog.cpp
#include "stdafx.h" #include "Emu/Cell/PPUModule.h" LOG_CHANNEL(cellSysutil); // All error codes are unknown at this point in implementation enum cellSysutilAuthDialogError : u32 { CELL_AUTHDIALOG_UNKNOWN_201 = 0x8002D201, CELL_AUTHDIALOG_ARG1_IS_ZERO = 0x8002D202, CELL_AUTHDIALOG_UNKNOWN_203 = 0x8002D203, }; template<> void fmt_class_string<cellSysutilAuthDialogError>::format(std::string& out, u64 arg) { format_enum(out, arg, [](auto error) { switch (error) { STR_CASE(CELL_AUTHDIALOG_UNKNOWN_201); STR_CASE(CELL_AUTHDIALOG_ARG1_IS_ZERO); STR_CASE(CELL_AUTHDIALOG_UNKNOWN_203); } return unknown; }); } // Decompilation suggests arg1 is s64 but the check is for == 0 instead of >= 0 error_code cellAuthDialogOpen(u64 arg1 /* arg2 */) { cellSysutil.todo("cellAuthDialogOpen(arg1=%u)", arg1); if (arg1 == 0) return CELL_AUTHDIALOG_ARG1_IS_ZERO; return CELL_OK; } error_code cellAuthDialogAbort() { cellSysutil.todo("cellAuthDialogAbort()"); // If it fails the first if condition (not init cond?) // return CELL_AUTHDIALOG_UNKNOWN_203; return CELL_OK; } error_code cellAuthDialogClose(/* arg1 */) { cellSysutil.todo("cellAuthDialogClose()"); // If it fails the first if condition (not init cond?) // return CELL_AUTHDIALOG_UNKNOWN_203; return CELL_OK; } DECLARE(ppu_module_manager::cellAuthDialogUtility)("cellAuthDialogUtility", []() { REG_FUNC(cellAuthDialogUtility, cellAuthDialogOpen); REG_FUNC(cellAuthDialogUtility, cellAuthDialogAbort); REG_FUNC(cellAuthDialogUtility, cellAuthDialogClose); });
1,558
C++
.cpp
52
27.942308
84
0.766086
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
true
false
false
true
false
false
5,202
cellSync.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/cellSync.cpp
#include "stdafx.h" #include "Emu/Cell/PPUModule.h" #include "Emu/Cell/lv2/sys_event.h" #include "Emu/Cell/lv2/sys_process.h" #include "cellSync.h" LOG_CHANNEL(cellSync); template<> void fmt_class_string<CellSyncError>::format(std::string& out, u64 arg) { format_enum(out, arg, [](auto error) { switch (error) { STR_CASE(CELL_SYNC_ERROR_AGAIN); STR_CASE(CELL_SYNC_ERROR_INVAL); STR_CASE(CELL_SYNC_ERROR_NOSYS); STR_CASE(CELL_SYNC_ERROR_NOMEM); STR_CASE(CELL_SYNC_ERROR_SRCH); STR_CASE(CELL_SYNC_ERROR_NOENT); STR_CASE(CELL_SYNC_ERROR_NOEXEC); STR_CASE(CELL_SYNC_ERROR_DEADLK); STR_CASE(CELL_SYNC_ERROR_PERM); STR_CASE(CELL_SYNC_ERROR_BUSY); STR_CASE(CELL_SYNC_ERROR_ABORT); STR_CASE(CELL_SYNC_ERROR_FAULT); STR_CASE(CELL_SYNC_ERROR_CHILD); STR_CASE(CELL_SYNC_ERROR_STAT); STR_CASE(CELL_SYNC_ERROR_ALIGN); STR_CASE(CELL_SYNC_ERROR_NULL_POINTER); STR_CASE(CELL_SYNC_ERROR_NOT_SUPPORTED_THREAD); STR_CASE(CELL_SYNC_ERROR_NO_NOTIFIER); STR_CASE(CELL_SYNC_ERROR_NO_SPU_CONTEXT_STORAGE); } return unknown; }); } error_code cellSyncMutexInitialize(vm::ptr<CellSyncMutex> mutex) { cellSync.trace("cellSyncMutexInitialize(mutex=*0x%x)", mutex); if (!mutex) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!mutex.aligned()) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } mutex->ctrl.exchange({0, 0}); return CELL_OK; } error_code cellSyncMutexLock(ppu_thread& ppu, vm::ptr<CellSyncMutex> mutex) { cellSync.trace("cellSyncMutexLock(mutex=*0x%x)", mutex); if (!mutex) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!mutex.aligned()) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } // Increase acq value and remember its old value const auto order = mutex->ctrl.atomic_op(&CellSyncMutex::Counter::lock_begin); // Wait until rel value is equal to old acq value while (mutex->ctrl.load().rel != order) { if (ppu.test_stopped()) { return 0; } } atomic_fence_acq_rel(); return CELL_OK; } error_code cellSyncMutexTryLock(vm::ptr<CellSyncMutex> mutex) { cellSync.trace("cellSyncMutexTryLock(mutex=*0x%x)", mutex); if (!mutex) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!mutex.aligned()) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } if (!mutex->ctrl.atomic_op(&CellSyncMutex::Counter::try_lock)) { return not_an_error(CELL_SYNC_ERROR_BUSY); } return CELL_OK; } error_code cellSyncMutexUnlock(vm::ptr<CellSyncMutex> mutex) { cellSync.trace("cellSyncMutexUnlock(mutex=*0x%x)", mutex); if (!mutex) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!mutex.aligned()) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } mutex->ctrl.atomic_op(&CellSyncMutex::Counter::unlock); return CELL_OK; } error_code cellSyncBarrierInitialize(vm::ptr<CellSyncBarrier> barrier, u16 total_count) { cellSync.trace("cellSyncBarrierInitialize(barrier=*0x%x, total_count=%d)", barrier, total_count); if (!barrier) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!barrier.aligned()) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } if (!total_count || total_count > 32767) [[unlikely]] { return CELL_SYNC_ERROR_INVAL; } // clear current value, write total_count and sync barrier->ctrl.exchange({0, total_count}); return CELL_OK; } error_code cellSyncBarrierNotify(ppu_thread& ppu, vm::ptr<CellSyncBarrier> barrier) { cellSync.trace("cellSyncBarrierNotify(barrier=*0x%x)", barrier); if (!barrier) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!barrier.aligned()) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } while (!barrier->ctrl.atomic_op(&CellSyncBarrier::try_notify)) { if (ppu.test_stopped()) { return 0; } } return CELL_OK; } error_code cellSyncBarrierTryNotify(vm::ptr<CellSyncBarrier> barrier) { cellSync.trace("cellSyncBarrierTryNotify(barrier=*0x%x)", barrier); if (!barrier) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!barrier.aligned()) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } atomic_fence_acq_rel(); if (!barrier->ctrl.atomic_op(&CellSyncBarrier::try_notify)) { return not_an_error(CELL_SYNC_ERROR_BUSY); } return CELL_OK; } error_code cellSyncBarrierWait(ppu_thread& ppu, vm::ptr<CellSyncBarrier> barrier) { cellSync.trace("cellSyncBarrierWait(barrier=*0x%x)", barrier); if (!barrier) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!barrier.aligned()) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } atomic_fence_acq_rel(); while (!barrier->ctrl.atomic_op(&CellSyncBarrier::try_wait)) { if (ppu.test_stopped()) { return 0; } } return CELL_OK; } error_code cellSyncBarrierTryWait(vm::ptr<CellSyncBarrier> barrier) { cellSync.trace("cellSyncBarrierTryWait(barrier=*0x%x)", barrier); if (!barrier) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!barrier.aligned()) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } atomic_fence_acq_rel(); if (!barrier->ctrl.atomic_op(&CellSyncBarrier::try_wait)) { return not_an_error(CELL_SYNC_ERROR_BUSY); } return CELL_OK; } error_code cellSyncRwmInitialize(vm::ptr<CellSyncRwm> rwm, vm::ptr<void> buffer, u32 buffer_size) { cellSync.trace("cellSyncRwmInitialize(rwm=*0x%x, buffer=*0x%x, buffer_size=0x%x)", rwm, buffer, buffer_size); if (!rwm || !buffer) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!rwm.aligned() || !buffer.aligned(128)) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } if (buffer_size % 128 || buffer_size > 0x4000) [[unlikely]] { return CELL_SYNC_ERROR_INVAL; } // clear readers and writers, write buffer_size, buffer addr and sync rwm->ctrl.store({ 0, 0 }); rwm->size = buffer_size; rwm->buffer = buffer; atomic_fence_acq_rel(); return CELL_OK; } error_code cellSyncRwmRead(ppu_thread& ppu, vm::ptr<CellSyncRwm> rwm, vm::ptr<void> buffer) { cellSync.trace("cellSyncRwmRead(rwm=*0x%x, buffer=*0x%x)", rwm, buffer); if (!rwm || !buffer) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!rwm.aligned()) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } // wait until `writers` is zero, increase `readers` while (!rwm->ctrl.atomic_op(&CellSyncRwm::try_read_begin)) { if (ppu.test_stopped()) { return 0; } } // copy data to buffer std::memcpy(buffer.get_ptr(), rwm->buffer.get_ptr(), rwm->size); // decrease `readers`, return error if already zero if (!rwm->ctrl.atomic_op(&CellSyncRwm::try_read_end)) { return CELL_SYNC_ERROR_ABORT; } return CELL_OK; } error_code cellSyncRwmTryRead(vm::ptr<CellSyncRwm> rwm, vm::ptr<void> buffer) { cellSync.trace("cellSyncRwmTryRead(rwm=*0x%x, buffer=*0x%x)", rwm, buffer); if (!rwm || !buffer) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!rwm.aligned()) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } // increase `readers` if `writers` is zero if (!rwm->ctrl.atomic_op(&CellSyncRwm::try_read_begin)) { return not_an_error(CELL_SYNC_ERROR_BUSY); } // copy data to buffer std::memcpy(buffer.get_ptr(), rwm->buffer.get_ptr(), rwm->size); // decrease `readers`, return error if already zero if (!rwm->ctrl.atomic_op(&CellSyncRwm::try_read_end)) { return CELL_SYNC_ERROR_ABORT; } return CELL_OK; } error_code cellSyncRwmWrite(ppu_thread& ppu, vm::ptr<CellSyncRwm> rwm, vm::cptr<void> buffer) { cellSync.trace("cellSyncRwmWrite(rwm=*0x%x, buffer=*0x%x)", rwm, buffer); if (!rwm || !buffer) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!rwm.aligned()) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } // wait until `writers` is zero, set to 1 while (!rwm->ctrl.atomic_op(&CellSyncRwm::try_write_begin)) { if (ppu.test_stopped()) { return 0; } } // wait until `readers` is zero while (rwm->ctrl.load().readers != 0) { if (ppu.test_stopped()) { return 0; } } // copy data from buffer std::memcpy(rwm->buffer.get_ptr(), buffer.get_ptr(), rwm->size); // sync and clear `readers` and `writers` rwm->ctrl.exchange({ 0, 0 }); return CELL_OK; } error_code cellSyncRwmTryWrite(vm::ptr<CellSyncRwm> rwm, vm::cptr<void> buffer) { cellSync.trace("cellSyncRwmTryWrite(rwm=*0x%x, buffer=*0x%x)", rwm, buffer); if (!rwm || !buffer) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!rwm.aligned()) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } // set `writers` to 1 if `readers` and `writers` are zero if (!rwm->ctrl.compare_and_swap_test({ 0, 0 }, { 0, 1 })) { return not_an_error(CELL_SYNC_ERROR_BUSY); } // copy data from buffer std::memcpy(rwm->buffer.get_ptr(), buffer.get_ptr(), rwm->size); // sync and clear `readers` and `writers` rwm->ctrl.exchange({ 0, 0 }); return CELL_OK; } error_code cellSyncQueueInitialize(vm::ptr<CellSyncQueue> queue, vm::ptr<u8> buffer, u32 size, u32 depth) { cellSync.trace("cellSyncQueueInitialize(queue=*0x%x, buffer=*0x%x, size=0x%x, depth=0x%x)", queue, buffer, size, depth); if (!queue) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (size && !buffer) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!queue.aligned() || !buffer.aligned(16)) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } if (!depth || size % 16) [[unlikely]] { return CELL_SYNC_ERROR_INVAL; } // clear sync var, write size, depth, buffer addr and sync queue->ctrl.store({}); queue->size = size; queue->depth = depth; queue->buffer = buffer; atomic_fence_acq_rel(); return CELL_OK; } error_code cellSyncQueuePush(ppu_thread& ppu, vm::ptr<CellSyncQueue> queue, vm::cptr<void> buffer) { cellSync.trace("cellSyncQueuePush(queue=*0x%x, buffer=*0x%x)", queue, buffer); if (!queue || !buffer) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!queue.aligned()) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } const u32 depth = queue->check_depth(); u32 position; while (!queue->ctrl.atomic_op([&](CellSyncQueue::ctrl_t& ctrl) { return CellSyncQueue::try_push_begin(ctrl, depth, &position); })) { if (ppu.test_stopped()) { return 0; } } // copy data from the buffer at the position std::memcpy(&queue->buffer[position * queue->size], buffer.get_ptr(), queue->size); queue->ctrl.atomic_op(&CellSyncQueue::push_end); return CELL_OK; } error_code cellSyncQueueTryPush(vm::ptr<CellSyncQueue> queue, vm::cptr<void> buffer) { cellSync.trace("cellSyncQueueTryPush(queue=*0x%x, buffer=*0x%x)", queue, buffer); if (!queue || !buffer) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!queue.aligned()) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } const u32 depth = queue->check_depth(); u32 position; while (!queue->ctrl.atomic_op([&](CellSyncQueue::ctrl_t& ctrl) { return CellSyncQueue::try_push_begin(ctrl, depth, &position); })) { return not_an_error(CELL_SYNC_ERROR_BUSY); } // copy data from the buffer at the position std::memcpy(&queue->buffer[position * queue->size], buffer.get_ptr(), queue->size); queue->ctrl.atomic_op(&CellSyncQueue::push_end); return CELL_OK; } error_code cellSyncQueuePop(ppu_thread& ppu, vm::ptr<CellSyncQueue> queue, vm::ptr<void> buffer) { cellSync.trace("cellSyncQueuePop(queue=*0x%x, buffer=*0x%x)", queue, buffer); if (!queue || !buffer) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!queue.aligned()) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } const u32 depth = queue->check_depth(); u32 position; while (!queue->ctrl.atomic_op([&](CellSyncQueue::ctrl_t& ctrl) { return CellSyncQueue::try_pop_begin(ctrl, depth, &position); })) { if (ppu.test_stopped()) { return 0; } } // copy data at the position to the buffer std::memcpy(buffer.get_ptr(), &queue->buffer[position % depth * queue->size], queue->size); queue->ctrl.atomic_op(&CellSyncQueue::pop_end); return CELL_OK; } error_code cellSyncQueueTryPop(vm::ptr<CellSyncQueue> queue, vm::ptr<void> buffer) { cellSync.trace("cellSyncQueueTryPop(queue=*0x%x, buffer=*0x%x)", queue, buffer); if (!queue || !buffer) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!queue.aligned()) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } const u32 depth = queue->check_depth(); u32 position; while (!queue->ctrl.atomic_op([&](CellSyncQueue::ctrl_t& ctrl) { return CellSyncQueue::try_pop_begin(ctrl, depth, &position); })) { return not_an_error(CELL_SYNC_ERROR_BUSY); } // copy data at the position to the buffer std::memcpy(buffer.get_ptr(), &queue->buffer[position % depth * queue->size], queue->size); queue->ctrl.atomic_op(&CellSyncQueue::pop_end); return CELL_OK; } error_code cellSyncQueuePeek(ppu_thread& ppu, vm::ptr<CellSyncQueue> queue, vm::ptr<void> buffer) { cellSync.trace("cellSyncQueuePeek(queue=*0x%x, buffer=*0x%x)", queue, buffer); if (!queue || !buffer) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!queue.aligned()) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } const u32 depth = queue->check_depth(); u32 position; while (!queue->ctrl.atomic_op([&](CellSyncQueue::ctrl_t& ctrl) { return CellSyncQueue::try_peek_begin(ctrl, depth, &position); })) { if (ppu.test_stopped()) { return 0; } } // copy data at the position to the buffer std::memcpy(buffer.get_ptr(), &queue->buffer[position % depth * queue->size], queue->size); queue->ctrl.atomic_op(&CellSyncQueue::pop_end); return CELL_OK; } error_code cellSyncQueueTryPeek(vm::ptr<CellSyncQueue> queue, vm::ptr<void> buffer) { cellSync.trace("cellSyncQueueTryPeek(queue=*0x%x, buffer=*0x%x)", queue, buffer); if (!queue || !buffer) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!queue.aligned()) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } const u32 depth = queue->check_depth(); u32 position; while (!queue->ctrl.atomic_op([&](CellSyncQueue::ctrl_t& ctrl) { return CellSyncQueue::try_peek_begin(ctrl, depth, &position); })) { return not_an_error(CELL_SYNC_ERROR_BUSY); } // copy data at the position to the buffer std::memcpy(buffer.get_ptr(), &queue->buffer[position % depth * queue->size], queue->size); queue->ctrl.atomic_op(&CellSyncQueue::pop_end); return CELL_OK; } error_code cellSyncQueueSize(vm::ptr<CellSyncQueue> queue) { cellSync.trace("cellSyncQueueSize(queue=*0x%x)", queue); if (!queue) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!queue.aligned()) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } queue->check_depth(); return not_an_error(queue->ctrl.load().count & 0xffffff); } error_code cellSyncQueueClear(ppu_thread& ppu, vm::ptr<CellSyncQueue> queue) { cellSync.trace("cellSyncQueueClear(queue=*0x%x)", queue); if (!queue) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!queue.aligned()) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } queue->check_depth(); while (!queue->ctrl.atomic_op(&CellSyncQueue::try_clear_begin_1)) { if (ppu.test_stopped()) { return 0; } } while (!queue->ctrl.atomic_op(&CellSyncQueue::try_clear_begin_2)) { if (ppu.test_stopped()) { return 0; } } queue->ctrl.store({}); return CELL_OK; } // LFQueue functions void syncLFQueueInitialize(vm::ptr<CellSyncLFQueue> queue, vm::cptr<void> buffer, u32 size, u32 depth, u32 direction, vm::ptr<void> eaSignal) { queue->m_size = size; queue->m_depth = depth; queue->m_buffer = buffer; queue->m_direction = direction; memset(queue->m_hs1, 0, sizeof(queue->m_hs1)); memset(queue->m_hs2, 0, sizeof(queue->m_hs2)); queue->m_eaSignal = eaSignal; if (direction == CELL_SYNC_QUEUE_ANY2ANY) { queue->pop1.store({}); queue->push1.store({}); queue->m_buffer.set(queue->m_buffer.addr() | 1); queue->m_bs[0] = -1; queue->m_bs[1] = -1; //m_bs[2] //m_bs[3] queue->m_v1 = -1; queue->push2.store({ 0xffff }); queue->pop2.store({ 0xffff }); } else { queue->pop1.store({ 0, 0, queue->pop1.load().m_h3, 0}); queue->push1.store({ 0, 0, queue->push1.load().m_h7, 0 }); queue->m_bs[0] = -1; // written as u32 queue->m_bs[1] = -1; queue->m_bs[2] = -1; queue->m_bs[3] = -1; queue->m_v1 = 0; queue->push2.store({}); queue->pop2.store({}); } queue->m_v2 = 0; queue->m_eq_id = 0; } error_code cellSyncLFQueueInitialize(vm::ptr<CellSyncLFQueue> queue, vm::cptr<void> buffer, u32 size, u32 depth, u32 direction, vm::ptr<void> eaSignal) { cellSync.warning("cellSyncLFQueueInitialize(queue=*0x%x, buffer=*0x%x, size=0x%x, depth=0x%x, direction=%d, eaSignal=*0x%x)", queue, buffer, size, depth, direction, eaSignal); if (!queue) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (size) { if (!buffer) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (size > 0x4000 || size % 16) [[unlikely]] { return CELL_SYNC_ERROR_INVAL; } } if (!depth || depth > 0x7fff || direction > 3) [[unlikely]] { return CELL_SYNC_ERROR_INVAL; } if (!queue.aligned() || !buffer.aligned(16)) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } // get sdk version of current process s32 sdk_ver; if (s32 ret = process_get_sdk_version(process_getpid(), sdk_ver)) { return not_an_error(ret); } if (sdk_ver == -1) { sdk_ver = 0x460000; } // reserve `init` u32 old_value; while (true) { const auto old = queue->init.load(); auto init = old; if (old) { if (sdk_ver > 0x17ffff && old != 2) [[unlikely]] { return CELL_SYNC_ERROR_STAT; } old_value = old; } else { if (sdk_ver > 0x17ffff) { for (const auto& data : vm::_ref<u64[16]>(queue.addr())) { if (data) [[unlikely]] { return CELL_SYNC_ERROR_STAT; } } } init = 1; old_value = 1; } if (queue->init.compare_and_swap_test(old, init)) break; } if (old_value == 2) { if (queue->m_size != size || queue->m_depth != depth || queue->m_buffer != buffer) [[unlikely]] { return CELL_SYNC_ERROR_INVAL; } if (sdk_ver > 0x17ffff) { if (queue->m_eaSignal != eaSignal || queue->m_direction != direction) [[unlikely]] { return CELL_SYNC_ERROR_INVAL; } } atomic_fence_acq_rel(); } else { syncLFQueueInitialize(queue, buffer, size, depth, direction, eaSignal); queue->init.exchange(0); } return CELL_OK; } error_code _cellSyncLFQueueGetPushPointer(ppu_thread& ppu, vm::ptr<CellSyncLFQueue> queue, vm::ptr<s32> pointer, u32 isBlocking, u32 useEventQueue) { cellSync.warning("_cellSyncLFQueueGetPushPointer(queue=*0x%x, pointer=*0x%x, isBlocking=%d, useEventQueue=%d)", queue, pointer, isBlocking, useEventQueue); if (queue->m_direction != CELL_SYNC_QUEUE_PPU2SPU) [[unlikely]] { return CELL_SYNC_ERROR_PERM; } const s32 depth = queue->m_depth; u32 var1 = 0; while (true) { while (true) { const auto old = queue->push1.load(); auto push = old; if (var1) { push.m_h7 = 0; } if (isBlocking && useEventQueue && std::bit_cast<s32>(queue->m_bs) == -1) { return CELL_SYNC_ERROR_STAT; } s32 var2 = static_cast<s16>(push.m_h8); s32 res; if (useEventQueue && (+push.m_h5 != var2 || push.m_h7)) { res = CELL_SYNC_ERROR_BUSY; } else { var2 -= queue->pop1.load().m_h1; if (var2 < 0) { var2 += depth * 2; } if (var2 < depth) { const s32 _pointer = static_cast<s16>(push.m_h8); *pointer = _pointer; if (_pointer + 1 >= depth * 2) { push.m_h8 = 0; } else { push.m_h8++; } res = CELL_OK; } else if (!isBlocking) { return CELL_SYNC_ERROR_AGAIN; } else if (!useEventQueue) { continue; } else { res = CELL_OK; push.m_h7 = 3; if (isBlocking != 3) { break; } } } if (queue->push1.compare_and_swap_test(old, push)) { if (!push.m_h7 || res) { return not_an_error(res); } break; } } ensure(sys_event_queue_receive(ppu, queue->m_eq_id, vm::null, 0) == CELL_OK); var1 = 1; } } error_code _cellSyncLFQueueGetPushPointer2(ppu_thread& /*ppu*/, vm::ptr<CellSyncLFQueue> queue, vm::ptr<s32> pointer, u32 isBlocking, u32 useEventQueue) { // arguments copied from _cellSyncLFQueueGetPushPointer cellSync.todo("_cellSyncLFQueueGetPushPointer2(queue=*0x%x, pointer=*0x%x, isBlocking=%d, useEventQueue=%d)", queue, pointer, isBlocking, useEventQueue); return CELL_OK; } error_code _cellSyncLFQueueCompletePushPointer(ppu_thread& ppu, vm::ptr<CellSyncLFQueue> queue, s32 pointer, vm::ptr<s32(u32 addr, u32 arg)> fpSendSignal) { cellSync.warning("_cellSyncLFQueueCompletePushPointer(queue=*0x%x, pointer=%d, fpSendSignal=*0x%x)", queue, pointer, fpSendSignal); if (queue->m_direction != CELL_SYNC_QUEUE_PPU2SPU) [[unlikely]] { return CELL_SYNC_ERROR_PERM; } const s32 depth = queue->m_depth; while (true) { const auto old = queue->push2.load(); auto push2 = old; // Loads must be in this order const auto old2 = queue->push3.load(); auto push3 = old2; s32 var1 = pointer - push3.m_h5; if (var1 < 0) { var1 += depth * 2; } s32 var2 = static_cast<s16>(queue->pop1.load().m_h4) - queue->pop1.load().m_h1; if (var2 < 0) { var2 += depth * 2; } s32 var9_ = 15 - var1; // calculate (u16)(1 slw (15 - var1)) if (var9_ & 0x30) { var9_ = 0; } else { var9_ = 1 << var9_; } s32 var9 = std::countl_zero<u32>(static_cast<u16>(~(var9_ | push3.m_h6))) - 16; // count leading zeros in u16 s32 var5 = push3.m_h6 | var9_; if (var9 & 0x30) { var5 = 0; } else { var5 <<= var9; } s32 var3 = push3.m_h5 + var9; if (var3 >= depth * 2) { var3 -= depth * 2; } u16 pack = push2.pack; // three packed 5-bit fields s32 var4 = ((pack >> 10) & 0x1f) - ((pack >> 5) & 0x1f); if (var4 < 0) { var4 += 0x1e; } u32 var6; if (var2 + var4 <= 15 && ((pack >> 10) & 0x1f) != (pack & 0x1f)) { s32 var8 = (pack & 0x1f) - ((pack >> 10) & 0x1f); if (var8 < 0) { var8 += 0x1e; } if (var9 > 1 && static_cast<u32>(var8) > 1) { ensure((16 - var2 <= 1)); } s32 var11 = (pack >> 10) & 0x1f; if (var11 >= 15) { var11 -= 15; } u16 var12 = (pack >> 10) & 0x1f; if (var12 == 0x1d) { var12 = 0; } else { var12 = (var12 + 1) << 10; } push2.pack = (pack & 0x83ff) | var12; var6 = queue->m_hs1[var11]; } else { var6 = -1; } push3.m_h5 = static_cast<u16>(var3); push3.m_h6 = static_cast<u16>(var5); if (queue->push2.compare_and_swap_test(old, push2)) { ensure((var2 + var4 < 16)); if (var6 != umax) { ensure((queue->push3.compare_and_swap_test(old2, push3))); ensure((fpSendSignal)); return not_an_error(fpSendSignal(ppu, vm::cast(queue->m_eaSignal.addr()), var6)); } else { pack = queue->push2.load().pack; if ((pack & 0x1f) == ((pack >> 10) & 0x1f)) { if (queue->push3.compare_and_swap_test(old2, push3)) { return CELL_OK; } } } } } } error_code _cellSyncLFQueueCompletePushPointer2(ppu_thread&, vm::ptr<CellSyncLFQueue> queue, s32 pointer, vm::ptr<s32(u32 addr, u32 arg)> fpSendSignal) { // arguments copied from _cellSyncLFQueueCompletePushPointer cellSync.todo("_cellSyncLFQueueCompletePushPointer2(queue=*0x%x, pointer=%d, fpSendSignal=*0x%x)", queue, pointer, fpSendSignal); return CELL_OK; } error_code _cellSyncLFQueuePushBody(ppu_thread& ppu, vm::ptr<CellSyncLFQueue> queue, vm::cptr<void> buffer, u32 isBlocking) { // cellSyncLFQueuePush has 1 in isBlocking param, cellSyncLFQueueTryPush has 0 cellSync.warning("_cellSyncLFQueuePushBody(queue=*0x%x, buffer=*0x%x, isBlocking=%d)", queue, buffer, isBlocking); if (!queue || !buffer) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!queue.aligned() || !buffer.aligned(16)) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } vm::var<s32> position; while (true) { s32 res; if (queue->m_direction != CELL_SYNC_QUEUE_ANY2ANY) { res = _cellSyncLFQueueGetPushPointer(ppu, queue, position, isBlocking, 0); } else { res = _cellSyncLFQueueGetPushPointer2(ppu, queue, position, isBlocking, 0); } if (!isBlocking || res + 0u != CELL_SYNC_ERROR_AGAIN) { if (res) return not_an_error(res); break; } if (ppu.test_stopped()) { return 0; } } const s32 depth = queue->m_depth; const s32 size = queue->m_size; const s32 pos = *position; const u32 addr = vm::cast<u64>((queue->m_buffer.addr() & ~1ull) + size * (pos >= depth ? pos - depth : pos)); std::memcpy(vm::base(addr), buffer.get_ptr(), size); if (queue->m_direction != CELL_SYNC_QUEUE_ANY2ANY) { return _cellSyncLFQueueCompletePushPointer(ppu, queue, pos, vm::null); } else { return _cellSyncLFQueueCompletePushPointer2(ppu, queue, pos, vm::null); } } error_code _cellSyncLFQueueGetPopPointer(ppu_thread& ppu, vm::ptr<CellSyncLFQueue> queue, vm::ptr<s32> pointer, u32 isBlocking, u32 arg4, u32 useEventQueue) { cellSync.warning("_cellSyncLFQueueGetPopPointer(queue=*0x%x, pointer=*0x%x, isBlocking=%d, arg4=%d, useEventQueue=%d)", queue, pointer, isBlocking, arg4, useEventQueue); if (queue->m_direction != CELL_SYNC_QUEUE_SPU2PPU) [[unlikely]] { return CELL_SYNC_ERROR_PERM; } const s32 depth = queue->m_depth; u32 var1 = 0; while (true) { while (true) { const auto old = queue->pop1.load(); auto pop = old; if (var1) { pop.m_h3 = 0; } if (isBlocking && useEventQueue && std::bit_cast<s32>(queue->m_bs) == -1) { return CELL_SYNC_ERROR_STAT; } s32 var2 = static_cast<s16>(pop.m_h4); s32 res; if (useEventQueue && (static_cast<s32>(pop.m_h1) != var2 || pop.m_h3)) { res = CELL_SYNC_ERROR_BUSY; } else { var2 = queue->push1.load().m_h5 - var2; if (var2 < 0) { var2 += depth * 2; } if (var2 > 0) { const s32 _pointer = static_cast<s16>(pop.m_h4); *pointer = _pointer; if (_pointer + 1 >= depth * 2) { pop.m_h4 = 0; } else { pop.m_h4++; } res = CELL_OK; } else if (!isBlocking) { return CELL_SYNC_ERROR_AGAIN; } else if (!useEventQueue) { continue; } else { res = CELL_OK; pop.m_h3 = 3; if (isBlocking != 3) { break; } } } if (queue->pop1.compare_and_swap_test(old, pop)) { if (!pop.m_h3 || res) { return not_an_error(res); } break; } } ensure((sys_event_queue_receive(ppu, queue->m_eq_id, vm::null, 0) == CELL_OK)); var1 = 1; } } error_code _cellSyncLFQueueGetPopPointer2(ppu_thread&, vm::ptr<CellSyncLFQueue> queue, vm::ptr<s32> pointer, u32 isBlocking, u32 useEventQueue) { // arguments copied from _cellSyncLFQueueGetPopPointer cellSync.todo("_cellSyncLFQueueGetPopPointer2(queue=*0x%x, pointer=*0x%x, isBlocking=%d, useEventQueue=%d)", queue, pointer, isBlocking, useEventQueue); return CELL_OK; } error_code _cellSyncLFQueueCompletePopPointer(ppu_thread& ppu, vm::ptr<CellSyncLFQueue> queue, s32 pointer, vm::ptr<s32(u32 addr, u32 arg)> fpSendSignal, u32 noQueueFull) { // arguments copied from _cellSyncLFQueueCompletePushPointer + unknown argument (noQueueFull taken from LFQueue2CompletePopPointer) cellSync.warning("_cellSyncLFQueueCompletePopPointer(queue=*0x%x, pointer=%d, fpSendSignal=*0x%x, noQueueFull=%d)", queue, pointer, fpSendSignal, noQueueFull); if (queue->m_direction != CELL_SYNC_QUEUE_SPU2PPU) [[unlikely]] { return CELL_SYNC_ERROR_PERM; } const s32 depth = queue->m_depth; while (true) { const auto old = queue->pop2.load(); auto pop2 = old; // Loads must be in this order const auto old2 = queue->pop3.load(); auto pop3 = old2; s32 var1 = pointer - pop3.m_h1; if (var1 < 0) { var1 += depth * 2; } s32 var2 = static_cast<s16>(queue->push1.load().m_h8) - queue->push1.load().m_h5; if (var2 < 0) { var2 += depth * 2; } s32 var9_ = 15 - var1; // calculate (u16)(1 slw (15 - var1)) if (var9_ & 0x30) { var9_ = 0; } else { var9_ = 1 << var9_; } s32 var9 = std::countl_zero<u32>(static_cast<u16>(~(var9_ | pop3.m_h2))) - 16; // count leading zeros in u16 s32 var5 = pop3.m_h2 | var9_; if (var9 & 0x30) { var5 = 0; } else { var5 <<= var9; } s32 var3 = pop3.m_h1 + var9; if (var3 >= depth * 2) { var3 -= depth * 2; } u16 pack = pop2.pack; // three packed 5-bit fields s32 var4 = ((pack >> 10) & 0x1f) - ((pack >> 5) & 0x1f); if (var4 < 0) { var4 += 0x1e; } u32 var6; if (noQueueFull || var2 + var4 > 15 || ((pack >> 10) & 0x1f) == (pack & 0x1f)) { var6 = -1; } else { s32 var8 = (pack & 0x1f) - ((pack >> 10) & 0x1f); if (var8 < 0) { var8 += 0x1e; } if (var9 > 1 && static_cast<u32>(var8) > 1) { ensure((16 - var2 <= 1)); } s32 var11 = (pack >> 10) & 0x1f; if (var11 >= 15) { var11 -= 15; } u16 var12 = (pack >> 10) & 0x1f; if (var12 == 0x1d) { var12 = 0; } else { var12 = (var12 + 1) << 10; } pop2.pack = (pack & 0x83ff) | var12; var6 = queue->m_hs2[var11]; } pop3.m_h1 = static_cast<u16>(var3); pop3.m_h2 = static_cast<u16>(var5); if (queue->pop2.compare_and_swap_test(old, pop2)) { if (var6 != umax) { ensure((queue->pop3.compare_and_swap_test(old2, pop3))); ensure((fpSendSignal)); return not_an_error(fpSendSignal(ppu, vm::cast(queue->m_eaSignal.addr()), var6)); } else { pack = queue->pop2.load().pack; if ((pack & 0x1f) == ((pack >> 10) & 0x1f)) { if (queue->pop3.compare_and_swap_test(old2, pop3)) { return CELL_OK; } } } } } } error_code _cellSyncLFQueueCompletePopPointer2(ppu_thread&, vm::ptr<CellSyncLFQueue> queue, s32 pointer, vm::ptr<s32(u32 addr, u32 arg)> fpSendSignal, u32 noQueueFull) { // arguments copied from _cellSyncLFQueueCompletePopPointer cellSync.todo("_cellSyncLFQueueCompletePopPointer2(queue=*0x%x, pointer=%d, fpSendSignal=*0x%x, noQueueFull=%d)", queue, pointer, fpSendSignal, noQueueFull); return CELL_OK; } error_code _cellSyncLFQueuePopBody(ppu_thread& ppu, vm::ptr<CellSyncLFQueue> queue, vm::ptr<void> buffer, u32 isBlocking) { // cellSyncLFQueuePop has 1 in isBlocking param, cellSyncLFQueueTryPop has 0 cellSync.warning("_cellSyncLFQueuePopBody(queue=*0x%x, buffer=*0x%x, isBlocking=%d)", queue, buffer, isBlocking); if (!queue || !buffer) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!queue.aligned() || !buffer.aligned(16)) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } vm::var<s32> position; while (true) { s32 res; if (queue->m_direction != CELL_SYNC_QUEUE_ANY2ANY) { res = _cellSyncLFQueueGetPopPointer(ppu, queue, position, isBlocking, 0, 0); } else { res = _cellSyncLFQueueGetPopPointer2(ppu, queue, position, isBlocking, 0); } if (!isBlocking || res + 0u != CELL_SYNC_ERROR_AGAIN) { if (res) return not_an_error(res); break; } if (ppu.test_stopped()) { return 0; } } const s32 depth = queue->m_depth; const s32 size = queue->m_size; const s32 pos = *position; const u32 addr = vm::cast<u64>((queue->m_buffer.addr() & ~1) + size * (pos >= depth ? pos - depth : pos)); std::memcpy(buffer.get_ptr(), vm::base(addr), size); if (queue->m_direction != CELL_SYNC_QUEUE_ANY2ANY) { return _cellSyncLFQueueCompletePopPointer(ppu, queue, pos, vm::null, 0); } else { return _cellSyncLFQueueCompletePopPointer2(ppu, queue, pos, vm::null, 0); } } error_code cellSyncLFQueueClear(vm::ptr<CellSyncLFQueue> queue) { cellSync.warning("cellSyncLFQueueClear(queue=*0x%x)", queue); if (!queue) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!queue.aligned()) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } while (true) { const auto old = queue->pop1.load(); auto pop = old; // Loads must be in this order const auto push = queue->push1.load(); s32 var1, var2; if (queue->m_direction != CELL_SYNC_QUEUE_ANY2ANY) { var1 = var2 = queue->pop2.load().pack; } else { var1 = push.m_h7; var2 = pop.m_h3; } if (static_cast<s16>(pop.m_h4) != +pop.m_h1 || static_cast<s16>(push.m_h8) != +push.m_h5 || ((var2 >> 10) & 0x1f) != (var2 & 0x1f) || ((var1 >> 10) & 0x1f) != (var1 & 0x1f)) { return CELL_SYNC_ERROR_BUSY; } pop.m_h1 = push.m_h5; pop.m_h2 = push.m_h6; pop.m_h3 = push.m_h7; pop.m_h4 = push.m_h8; if (queue->pop1.compare_and_swap_test(old, pop)) break; } return CELL_OK; } error_code cellSyncLFQueueSize(vm::ptr<CellSyncLFQueue> queue, vm::ptr<u32> size) { cellSync.warning("cellSyncLFQueueSize(queue=*0x%x, size=*0x%x)", queue, size); if (!queue || !size) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!queue.aligned()) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } while (true) { const auto old = queue->pop3.load(); // Loads must be in this order u32 var1 = queue->pop1.load().m_h1; u32 var2 = queue->push1.load().m_h5; if (queue->pop3.compare_and_swap_test(old, old)) { if (var1 <= var2) { *size = var2 - var1; } else { *size = var2 - var1 + queue->m_depth * 2; } return CELL_OK; } } } error_code cellSyncLFQueueDepth(vm::ptr<CellSyncLFQueue> queue, vm::ptr<u32> depth) { cellSync.trace("cellSyncLFQueueDepth(queue=*0x%x, depth=*0x%x)", queue, depth); if (!queue || !depth) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!queue.aligned()) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } *depth = queue->m_depth; return CELL_OK; } error_code _cellSyncLFQueueGetSignalAddress(vm::cptr<CellSyncLFQueue> queue, vm::pptr<void> ppSignal) { cellSync.trace("_cellSyncLFQueueGetSignalAddress(queue=*0x%x, ppSignal=**0x%x)", queue, ppSignal); if (!queue || !ppSignal) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!queue.aligned()) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } *ppSignal = queue->m_eaSignal; return CELL_OK; } error_code cellSyncLFQueueGetDirection(vm::cptr<CellSyncLFQueue> queue, vm::ptr<u32> direction) { cellSync.trace("cellSyncLFQueueGetDirection(queue=*0x%x, direction=*0x%x)", queue, direction); if (!queue || !direction) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!queue.aligned()) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } *direction = queue->m_direction; return CELL_OK; } error_code cellSyncLFQueueGetEntrySize(vm::cptr<CellSyncLFQueue> queue, vm::ptr<u32> entry_size) { cellSync.trace("cellSyncLFQueueGetEntrySize(queue=*0x%x, entry_size=*0x%x)", queue, entry_size); if (!queue || !entry_size) [[unlikely]] { return CELL_SYNC_ERROR_NULL_POINTER; } if (!queue.aligned()) [[unlikely]] { return CELL_SYNC_ERROR_ALIGN; } *entry_size = queue->m_size; return CELL_OK; } error_code _cellSyncLFQueueAttachLv2EventQueue(vm::ptr<u32> spus, u32 num, vm::ptr<CellSyncLFQueue> queue) { cellSync.todo("_cellSyncLFQueueAttachLv2EventQueue(spus=*0x%x, num=%d, queue=*0x%x)", spus, num, queue); return CELL_OK; } error_code _cellSyncLFQueueDetachLv2EventQueue(vm::ptr<u32> spus, u32 num, vm::ptr<CellSyncLFQueue> queue) { cellSync.todo("_cellSyncLFQueueDetachLv2EventQueue(spus=*0x%x, num=%d, queue=*0x%x)", spus, num, queue); return CELL_OK; } DECLARE(ppu_module_manager::cellSync)("cellSync", []() { REG_FUNC(cellSync, cellSyncMutexInitialize); REG_FUNC(cellSync, cellSyncMutexLock); REG_FUNC(cellSync, cellSyncMutexTryLock); REG_FUNC(cellSync, cellSyncMutexUnlock); REG_FUNC(cellSync, cellSyncBarrierInitialize); REG_FUNC(cellSync, cellSyncBarrierNotify); REG_FUNC(cellSync, cellSyncBarrierTryNotify); REG_FUNC(cellSync, cellSyncBarrierWait); REG_FUNC(cellSync, cellSyncBarrierTryWait); REG_FUNC(cellSync, cellSyncRwmInitialize); REG_FUNC(cellSync, cellSyncRwmRead); REG_FUNC(cellSync, cellSyncRwmTryRead); REG_FUNC(cellSync, cellSyncRwmWrite); REG_FUNC(cellSync, cellSyncRwmTryWrite); REG_FUNC(cellSync, cellSyncQueueInitialize); REG_FUNC(cellSync, cellSyncQueuePush); REG_FUNC(cellSync, cellSyncQueueTryPush); REG_FUNC(cellSync, cellSyncQueuePop); REG_FUNC(cellSync, cellSyncQueueTryPop); REG_FUNC(cellSync, cellSyncQueuePeek); REG_FUNC(cellSync, cellSyncQueueTryPeek); REG_FUNC(cellSync, cellSyncQueueSize); REG_FUNC(cellSync, cellSyncQueueClear); REG_FUNC(cellSync, cellSyncLFQueueGetEntrySize); REG_FUNC(cellSync, cellSyncLFQueueSize); REG_FUNC(cellSync, cellSyncLFQueueClear); REG_FUNC(cellSync, _cellSyncLFQueueCompletePushPointer2); REG_FUNC(cellSync, _cellSyncLFQueueGetPopPointer2); REG_FUNC(cellSync, _cellSyncLFQueueCompletePushPointer); REG_FUNC(cellSync, _cellSyncLFQueueAttachLv2EventQueue); REG_FUNC(cellSync, _cellSyncLFQueueGetPushPointer2); REG_FUNC(cellSync, _cellSyncLFQueueGetPopPointer); REG_FUNC(cellSync, _cellSyncLFQueueCompletePopPointer2); REG_FUNC(cellSync, _cellSyncLFQueueDetachLv2EventQueue); REG_FUNC(cellSync, cellSyncLFQueueInitialize); REG_FUNC(cellSync, _cellSyncLFQueueGetSignalAddress); REG_FUNC(cellSync, _cellSyncLFQueuePushBody); REG_FUNC(cellSync, cellSyncLFQueueGetDirection); REG_FUNC(cellSync, cellSyncLFQueueDepth); REG_FUNC(cellSync, _cellSyncLFQueuePopBody); REG_FUNC(cellSync, _cellSyncLFQueueGetPushPointer); REG_FUNC(cellSync, _cellSyncLFQueueCompletePopPointer); });
37,345
C++
.cpp
1,381
24.094135
176
0.678133
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,203
cellSheap.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/cellSheap.cpp
#include "stdafx.h" #include "Emu/Cell/PPUModule.h" LOG_CHANNEL(cellSheap); // Return Codes enum CellSheapError : u32 { CELL_SHEAP_ERROR_INVAL = 0x80410302, CELL_SHEAP_ERROR_BUSY = 0x8041030A, CELL_SHEAP_ERROR_ALIGN = 0x80410310, CELL_SHEAP_ERROR_SHORTAGE = 0x80410312, }; template <> void fmt_class_string<CellSheapError>::format(std::string& out, u64 arg) { format_enum(out, arg, [](auto error) { switch (error) { STR_CASE(CELL_SHEAP_ERROR_INVAL); STR_CASE(CELL_SHEAP_ERROR_BUSY); STR_CASE(CELL_SHEAP_ERROR_ALIGN); STR_CASE(CELL_SHEAP_ERROR_SHORTAGE); } return unknown; }); } error_code cellSheapInitialize() { UNIMPLEMENTED_FUNC(cellSheap); return CELL_OK; } error_code cellSheapAllocate() { UNIMPLEMENTED_FUNC(cellSheap); return CELL_OK; } error_code cellSheapFree() { UNIMPLEMENTED_FUNC(cellSheap); return CELL_OK; } error_code cellSheapQueryMax() { UNIMPLEMENTED_FUNC(cellSheap); return CELL_OK; } error_code cellSheapQueryFree() { UNIMPLEMENTED_FUNC(cellSheap); return CELL_OK; } error_code cellKeySheapInitialize() { UNIMPLEMENTED_FUNC(cellSheap); return CELL_OK; } error_code cellKeySheapBufferNew() { UNIMPLEMENTED_FUNC(cellSheap); return CELL_OK; } error_code cellKeySheapBufferDelete() { UNIMPLEMENTED_FUNC(cellSheap); return CELL_OK; } error_code cellKeySheapMutexNew() { UNIMPLEMENTED_FUNC(cellSheap); return CELL_OK; } error_code cellKeySheapMutexDelete() { UNIMPLEMENTED_FUNC(cellSheap); return CELL_OK; } error_code cellKeySheapBarrierNew() { UNIMPLEMENTED_FUNC(cellSheap); return CELL_OK; } error_code cellKeySheapBarrierDelete() { UNIMPLEMENTED_FUNC(cellSheap); return CELL_OK; } error_code cellKeySheapSemaphoreNew() { UNIMPLEMENTED_FUNC(cellSheap); return CELL_OK; } error_code cellKeySheapSemaphoreDelete() { UNIMPLEMENTED_FUNC(cellSheap); return CELL_OK; } error_code cellKeySheapRwmNew() { UNIMPLEMENTED_FUNC(cellSheap); return CELL_OK; } error_code cellKeySheapRwmDelete() { UNIMPLEMENTED_FUNC(cellSheap); return CELL_OK; } error_code cellKeySheapQueueNew() { UNIMPLEMENTED_FUNC(cellSheap); return CELL_OK; } error_code cellKeySheapQueueDelete() { UNIMPLEMENTED_FUNC(cellSheap); return CELL_OK; } DECLARE(ppu_module_manager::cellSheap)("cellSheap", []() { REG_FUNC(cellSheap, cellSheapInitialize); REG_FUNC(cellSheap, cellSheapAllocate); REG_FUNC(cellSheap, cellSheapFree); REG_FUNC(cellSheap, cellSheapQueryMax); REG_FUNC(cellSheap, cellSheapQueryFree); REG_FUNC(cellSheap, cellKeySheapInitialize); REG_FUNC(cellSheap, cellKeySheapBufferNew); REG_FUNC(cellSheap, cellKeySheapBufferDelete); REG_FUNC(cellSheap, cellKeySheapMutexNew); REG_FUNC(cellSheap, cellKeySheapMutexDelete); REG_FUNC(cellSheap, cellKeySheapBarrierNew); REG_FUNC(cellSheap, cellKeySheapBarrierDelete); REG_FUNC(cellSheap, cellKeySheapSemaphoreNew); REG_FUNC(cellSheap, cellKeySheapSemaphoreDelete); REG_FUNC(cellSheap, cellKeySheapRwmNew); REG_FUNC(cellSheap, cellKeySheapRwmDelete); REG_FUNC(cellSheap, cellKeySheapQueueNew); REG_FUNC(cellSheap, cellKeySheapQueueDelete); });
3,089
C++
.cpp
137
20.773723
72
0.802528
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
true
false
false
true
false
false
5,204
cellSsl.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/cellSsl.cpp
#include "stdafx.h" #include <bitset> #include <string> #include "cellSsl.h" #include "Emu/Cell/PPUModule.h" #include "Utilities/File.h" #include "Emu/VFS.h" #include "Emu/IdManager.h" #include "cellRtc.h" LOG_CHANNEL(cellSsl); template<> void fmt_class_string<CellSslError>::format(std::string& out, u64 arg) { format_enum(out, arg, [](auto error) { switch (error) { STR_CASE(CELL_SSL_ERROR_NOT_INITIALIZED); STR_CASE(CELL_SSL_ERROR_ALREADY_INITIALIZED); STR_CASE(CELL_SSL_ERROR_INITIALIZATION_FAILED); STR_CASE(CELL_SSL_ERROR_NO_BUFFER); STR_CASE(CELL_SSL_ERROR_INVALID_CERTIFICATE); STR_CASE(CELL_SSL_ERROR_UNRETRIEVABLE); STR_CASE(CELL_SSL_ERROR_INVALID_FORMAT); STR_CASE(CELL_SSL_ERROR_NOT_FOUND); STR_CASE(CELL_SSL_ERROR_INVALID_TIME); STR_CASE(CELL_SSL_ERROR_INAVLID_NEGATIVE_TIME); STR_CASE(CELL_SSL_ERROR_INCORRECT_TIME); STR_CASE(CELL_SSL_ERROR_UNDEFINED_TIME_TYPE); STR_CASE(CELL_SSL_ERROR_NO_MEMORY); STR_CASE(CELL_SSL_ERROR_NO_STRING); STR_CASE(CELL_SSL_ERROR_UNKNOWN_LOAD_CERT); } return unknown; }); } error_code cellSslInit(vm::ptr<void> pool, u32 poolSize) { cellSsl.todo("cellSslInit(pool=*0x%x, poolSize=%d)", pool, poolSize); auto& manager = g_fxo->get<ssl_manager>(); if (manager.is_init) return CELL_SSL_ERROR_ALREADY_INITIALIZED; manager.is_init = true; return CELL_OK; } error_code cellSslEnd() { cellSsl.todo("cellSslEnd()"); auto& manager = g_fxo->get<ssl_manager>(); if (!manager.is_init) return CELL_SSL_ERROR_NOT_INITIALIZED; manager.is_init = false; return CELL_OK; } error_code cellSslGetMemoryInfo() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } std::string getCert(const std::string& certPath, const int certID, const bool isNormalCert) { int newID = certID; // The 'normal' certs have some special rules for loading. if (isNormalCert && certID >= BaltimoreCert && certID <= GTECyberTrustGlobalCert) { if (certID == BaltimoreCert) newID = GTECyberTrustGlobalCert; else if (certID == Class3G2V2Cert) newID = BaltimoreCert; else if (certID == EntrustNetCert) newID = ClassSSV4Cert; else newID = certID - 1; } std::string filePath = fmt::format("%sCA%02d.cer", certPath, newID); if (!fs::exists(filePath)) { cellSsl.error("Can't find certificate file %s, do you have the PS3 firmware installed?", filePath); return ""; } return fs::file(filePath).to_string(); } error_code cellSslCertificateLoader(u64 flag, vm::ptr<char> buffer, u32 size, vm::ptr<u32> required) { cellSsl.trace("cellSslCertificateLoader(flag=%llu, buffer=*0x%x, size=%zu, required=*0x%x)", flag, buffer, size, required); const std::bitset<58> flagBits(flag); const std::string certPath = vfs::get("/dev_flash/data/cert/"); if (required) { *required = 0; for (uint i = 1; i <= flagBits.size(); i++) { if (!flagBits[i-1]) continue; // If we're loading cert 6 (the baltimore cert), then we need set that we're loading the 'normal' set of certs. *required += ::size32(getCert(certPath, i, flagBits[BaltimoreCert-1])); } } else { std::string final; for (uint i = 1; i <= flagBits.size(); i++) { if (!flagBits[i-1]) continue; // If we're loading cert 6 (the baltimore cert), then we need set that we're loading the 'normal' set of certs. final.append(getCert(certPath, i, flagBits[BaltimoreCert-1])); } memset(buffer.get_ptr(), '\0', size - 1); memcpy(buffer.get_ptr(), final.c_str(), final.size()); } return CELL_OK; } error_code cellSslCertGetSerialNumber(vm::cptr<void> sslCert, vm::cpptr<u8> sboData, vm::ptr<u64> sboLength) { cellSsl.todo("cellSslCertGetSerialNumber(sslCert=*0x%x, sboData=**0x%x, sboLength=*0x%x)", sslCert, sboData, sboLength); if (!g_fxo->get<ssl_manager>().is_init) return CELL_SSL_ERROR_NOT_INITIALIZED; if (!sslCert) return CELL_SSL_ERROR_INVALID_CERTIFICATE; if (!sboData || !sboLength) return CELL_SSL_ERROR_NO_BUFFER; return CELL_OK; } error_code cellSslCertGetPublicKey(vm::cptr<void> sslCert, vm::cpptr<u8> sboData, vm::ptr<u64> sboLength) { cellSsl.todo("cellSslCertGetPublicKey(sslCert=*0x%x, sboData=**0x%x, sboLength=*0x%x)", sslCert, sboData, sboLength); if (!g_fxo->get<ssl_manager>().is_init) return CELL_SSL_ERROR_NOT_INITIALIZED; if (!sslCert) return CELL_SSL_ERROR_INVALID_CERTIFICATE; if (!sboData || !sboLength) return CELL_SSL_ERROR_NO_BUFFER; return CELL_OK; } error_code cellSslCertGetRsaPublicKeyModulus(vm::cptr<void> sslCert, vm::cpptr<u8> sboData, vm::ptr<u64> sboLength) { cellSsl.todo("cellSslCertGetRsaPublicKeyModulus(sslCert=*0x%x, sboData=**0x%x, sboLength=*0x%x)", sslCert, sboData, sboLength); if (!g_fxo->get<ssl_manager>().is_init) return CELL_SSL_ERROR_NOT_INITIALIZED; if (!sslCert) return CELL_SSL_ERROR_INVALID_CERTIFICATE; if (!sboData || !sboLength) return CELL_SSL_ERROR_NO_BUFFER; return CELL_OK; } error_code cellSslCertGetRsaPublicKeyExponent(vm::cptr<void> sslCert, vm::cpptr<u8> sboData, vm::ptr<u64> sboLength) { cellSsl.todo("cellSslCertGetRsaPublicKeyExponent(sslCert=*0x%x, sboData=**0x%x, sboLength=*0x%x)", sslCert, sboData, sboLength); if (!g_fxo->get<ssl_manager>().is_init) return CELL_SSL_ERROR_NOT_INITIALIZED; if (!sslCert) return CELL_SSL_ERROR_INVALID_CERTIFICATE; if (!sboData || !sboLength) return CELL_SSL_ERROR_NO_BUFFER; return CELL_OK; } error_code cellSslCertGetNotBefore(vm::cptr<void> sslCert, vm::ptr<CellRtcTick> begin) { cellSsl.todo("cellSslCertGetNotBefore(sslCert=*0x%x, begin=*0x%x)", sslCert, begin); if (!g_fxo->get<ssl_manager>().is_init) return CELL_SSL_ERROR_NOT_INITIALIZED; if (!sslCert) return CELL_SSL_ERROR_INVALID_CERTIFICATE; if (!begin) return CELL_SSL_ERROR_NO_BUFFER; return CELL_OK; } error_code cellSslCertGetNotAfter(vm::cptr<void> sslCert, vm::ptr<CellRtcTick> limit) { cellSsl.todo("cellSslCertGetNotAfter(sslCert=*0x%x, limit=*0x%x)", sslCert, limit); if (!g_fxo->get<ssl_manager>().is_init) return CELL_SSL_ERROR_NOT_INITIALIZED; if (!sslCert) return CELL_SSL_ERROR_INVALID_CERTIFICATE; if (!limit) return CELL_SSL_ERROR_NO_BUFFER; return CELL_OK; } error_code cellSslCertGetSubjectName(vm::cptr<void> sslCert, vm::cpptr<void> certName) { cellSsl.todo("cellSslCertGetSubjectName(sslCert=*0x%x, certName=**0x%x)", sslCert, certName); if (!g_fxo->get<ssl_manager>().is_init) return CELL_SSL_ERROR_NOT_INITIALIZED; if (!sslCert) return CELL_SSL_ERROR_INVALID_CERTIFICATE; if (!certName) return CELL_SSL_ERROR_NO_BUFFER; return CELL_OK; } error_code cellSslCertGetIssuerName(vm::cptr<void> sslCert, vm::cpptr<void> certName) { cellSsl.todo("cellSslCertGetIssuerName(sslCert=*0x%x, certName=**0x%x)", sslCert, certName); if (!g_fxo->get<ssl_manager>().is_init) return CELL_SSL_ERROR_NOT_INITIALIZED; if (!sslCert) return CELL_SSL_ERROR_INVALID_CERTIFICATE; if (!certName) return CELL_SSL_ERROR_NO_BUFFER; return CELL_OK; } error_code cellSslCertGetNameEntryCount(vm::cptr<void> certName, vm::ptr<u32> entryCount) { cellSsl.todo("cellSslCertGetNameEntryCount(certName=*0x%x, entryCount=*0x%x)", certName, entryCount); if (!g_fxo->get<ssl_manager>().is_init) return CELL_SSL_ERROR_NOT_INITIALIZED; if (!certName) return CELL_SSL_ERROR_INVALID_CERTIFICATE; if (!entryCount) return CELL_SSL_ERROR_NO_BUFFER; return CELL_OK; } error_code cellSslCertGetNameEntryInfo(vm::cptr<void> certName, u32 entryNum, vm::cpptr<char> oidName, vm::cpptr<u8> value, vm::ptr<u64> valueLength, s32 flag) { cellSsl.todo("cellSslCertGetNameEntryInfo(certName=*0x%x, entryNum=%d, oidName=**0x%x, value=**0x%x, valueLength=*0x%x, flag=0x%x)", certName, entryNum, oidName, value, valueLength, flag); if (!g_fxo->get<ssl_manager>().is_init) return CELL_SSL_ERROR_NOT_INITIALIZED; if (!certName) return CELL_SSL_ERROR_INVALID_CERTIFICATE; if (!oidName || !value || !valueLength) return CELL_SSL_ERROR_NO_BUFFER; return CELL_OK; } error_code cellSslCertGetMd5Fingerprint(vm::cptr<void> sslCert, vm::cptr<u8> buf, vm::cptr<u32> plen) { cellSsl.todo("cellSslCertGetMd5Fingerprint(sslCert=*0x%x, buf=*0x%x, plen=*0x%x)", sslCert, buf, plen); if (!g_fxo->get<ssl_manager>().is_init) return CELL_SSL_ERROR_NOT_INITIALIZED; if (!sslCert) return CELL_SSL_ERROR_INVALID_CERTIFICATE; if (!buf || !plen) return CELL_SSL_ERROR_NO_BUFFER; return CELL_OK; } error_code _cellSslConvertCipherId() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code _cellSslConvertSslVersion() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } b8 _cellSslIsInitd() { UNIMPLEMENTED_FUNC(cellSsl); return false; } error_code _cellSslPemReadPrivateKey() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code _cellSslPemReadX509() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code BER_read_item() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code BIO_ctrl() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code BIO_dump() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code BIO_free() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code BIO_get_cb_arg() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code BIO_get_retry_reason() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code BIO_new_mem() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code BIO_new_socket() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code BIO_printf() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code BIO_ptr_ctrl() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code BIO_set_cb_arg() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code ERR_clear_error() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code ERR_get_error() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code ERR_error_string() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code ERR_func_error_string() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code ERR_peek_error() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code EVP_PKEY_free() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code R_time() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code R_time_cmp() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code R_time_export() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code R_time_free() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code R_time_import() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code R_time_new() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_CIPHER_description() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_CIPHER_get_bits() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_CIPHER_get_id() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_CIPHER_get_name() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_CIPHER_get_version() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_CTX_ctrl() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_CTX_free() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_CTX_new() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_CTX_set_app_verify_cb() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_CTX_set_info_cb() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_CTX_set_options() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_CTX_set_verify_mode() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_CTX_use_certificate() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_CTX_use_PrivateKey() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_SESSION_free() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_alert_desc_string_long() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_alert_type_string_long() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_clear() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_do_handshake() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_free() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_get_current_cipher() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_get_error() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_get_rbio() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_get_version() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_new() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_peek() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_read() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_set_bio() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_set_connect_state() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_set_session() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_set_ssl_method() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_shutdown() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_state() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_state_string_long() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_version() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_want() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSL_write() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSLCERT_check_private_key() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSLCERT_free() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSLCERT_from_binary() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSLCERT_get_basic_constraints_int() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSLCERT_get_extension() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSLCERT_get_issuer_name() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSLCERT_get_notAfter() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSLCERT_get_notBefore() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSLCERT_get_pubkey() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSLCERT_get_subject_name() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSLCERT_NAME_cmp() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSLCERT_NAME_ENTRY_get_info() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSLCERT_NAME_get_entry() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSLCERT_NAME_get_entry_count() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSLCERT_NAME_oneline() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSLCERT_OID_to_string() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSLCERT_verify() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code SSLv3_client_method() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } error_code TLSv1_client_method() { UNIMPLEMENTED_FUNC(cellSsl); return CELL_OK; } DECLARE(ppu_module_manager::cellSsl)("cellSsl", []() { REG_FUNC(cellSsl, cellSslInit); REG_FUNC(cellSsl, cellSslEnd); REG_FUNC(cellSsl, cellSslGetMemoryInfo); REG_FUNC(cellSsl, cellSslCertificateLoader); REG_FUNC(cellSsl, cellSslCertGetSerialNumber); REG_FUNC(cellSsl, cellSslCertGetPublicKey); REG_FUNC(cellSsl, cellSslCertGetRsaPublicKeyModulus); REG_FUNC(cellSsl, cellSslCertGetRsaPublicKeyExponent); REG_FUNC(cellSsl, cellSslCertGetNotBefore); REG_FUNC(cellSsl, cellSslCertGetNotAfter); REG_FUNC(cellSsl, cellSslCertGetSubjectName); REG_FUNC(cellSsl, cellSslCertGetIssuerName); REG_FUNC(cellSsl, cellSslCertGetNameEntryCount); REG_FUNC(cellSsl, cellSslCertGetNameEntryInfo); REG_FUNC(cellSsl, cellSslCertGetMd5Fingerprint); REG_FUNC(cellSsl, _cellSslConvertCipherId); REG_FUNC(cellSsl, _cellSslConvertSslVersion); REG_FUNC(cellSsl, _cellSslIsInitd); REG_FUNC(cellSsl, _cellSslPemReadPrivateKey); REG_FUNC(cellSsl, _cellSslPemReadX509); REG_FUNC(cellSsl, BER_read_item); REG_FUNC(cellSsl, BIO_ctrl); REG_FUNC(cellSsl, BIO_dump); REG_FUNC(cellSsl, BIO_free); REG_FUNC(cellSsl, BIO_get_cb_arg); REG_FUNC(cellSsl, BIO_get_retry_reason); REG_FUNC(cellSsl, BIO_new_mem); REG_FUNC(cellSsl, BIO_new_socket); REG_FUNC(cellSsl, BIO_printf); REG_FUNC(cellSsl, BIO_ptr_ctrl); REG_FUNC(cellSsl, BIO_set_cb_arg); REG_FUNC(cellSsl, ERR_clear_error); REG_FUNC(cellSsl, ERR_get_error); REG_FUNC(cellSsl, ERR_error_string); REG_FUNC(cellSsl, ERR_func_error_string); REG_FUNC(cellSsl, ERR_peek_error); REG_FUNC(cellSsl, EVP_PKEY_free); REG_FUNC(cellSsl, R_time); REG_FUNC(cellSsl, R_time_cmp); REG_FUNC(cellSsl, R_time_export); REG_FUNC(cellSsl, R_time_free); REG_FUNC(cellSsl, R_time_import); REG_FUNC(cellSsl, R_time_new); REG_FUNC(cellSsl, SSL_CIPHER_description); REG_FUNC(cellSsl, SSL_CIPHER_get_bits); REG_FUNC(cellSsl, SSL_CIPHER_get_id); REG_FUNC(cellSsl, SSL_CIPHER_get_name); REG_FUNC(cellSsl, SSL_CIPHER_get_version); REG_FUNC(cellSsl, SSL_CTX_ctrl); REG_FUNC(cellSsl, SSL_CTX_free); REG_FUNC(cellSsl, SSL_CTX_new); REG_FUNC(cellSsl, SSL_CTX_set_app_verify_cb); REG_FUNC(cellSsl, SSL_CTX_set_info_cb); REG_FUNC(cellSsl, SSL_CTX_set_options); REG_FUNC(cellSsl, SSL_CTX_set_verify_mode); REG_FUNC(cellSsl, SSL_CTX_use_certificate); REG_FUNC(cellSsl, SSL_CTX_use_PrivateKey); REG_FUNC(cellSsl, SSL_SESSION_free); REG_FUNC(cellSsl, SSL_alert_desc_string_long); REG_FUNC(cellSsl, SSL_alert_type_string_long); REG_FUNC(cellSsl, SSL_clear); REG_FUNC(cellSsl, SSL_do_handshake); REG_FUNC(cellSsl, SSL_free); REG_FUNC(cellSsl, SSL_get_current_cipher); REG_FUNC(cellSsl, SSL_get_error); REG_FUNC(cellSsl, SSL_get_rbio); REG_FUNC(cellSsl, SSL_get_version); REG_FUNC(cellSsl, SSL_new); REG_FUNC(cellSsl, SSL_peek); REG_FUNC(cellSsl, SSL_read); REG_FUNC(cellSsl, SSL_set_bio); REG_FUNC(cellSsl, SSL_set_connect_state); REG_FUNC(cellSsl, SSL_set_session); REG_FUNC(cellSsl, SSL_set_ssl_method); REG_FUNC(cellSsl, SSL_shutdown); REG_FUNC(cellSsl, SSL_state); REG_FUNC(cellSsl, SSL_state_string_long); REG_FUNC(cellSsl, SSL_version); REG_FUNC(cellSsl, SSL_want); REG_FUNC(cellSsl, SSL_write); REG_FUNC(cellSsl, SSLCERT_free); REG_FUNC(cellSsl, SSLCERT_from_binary); REG_FUNC(cellSsl, SSLCERT_check_private_key); REG_FUNC(cellSsl, SSLCERT_get_basic_constraints_int); REG_FUNC(cellSsl, SSLCERT_get_extension); REG_FUNC(cellSsl, SSLCERT_get_issuer_name); REG_FUNC(cellSsl, SSLCERT_get_notAfter); REG_FUNC(cellSsl, SSLCERT_get_notBefore); REG_FUNC(cellSsl, SSLCERT_get_pubkey); REG_FUNC(cellSsl, SSLCERT_get_subject_name); REG_FUNC(cellSsl, SSLCERT_NAME_cmp); REG_FUNC(cellSsl, SSLCERT_NAME_ENTRY_get_info); REG_FUNC(cellSsl, SSLCERT_NAME_get_entry); REG_FUNC(cellSsl, SSLCERT_NAME_get_entry_count); REG_FUNC(cellSsl, SSLCERT_NAME_oneline); REG_FUNC(cellSsl, SSLCERT_OID_to_string); REG_FUNC(cellSsl, SSLCERT_verify); REG_FUNC(cellSsl, SSLv3_client_method); REG_FUNC(cellSsl, TLSv1_client_method); });
19,308
C++
.cpp
756
23.56746
189
0.756465
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
true
false
false
5,205
libmixer.cpp
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/libmixer.cpp
#include "stdafx.h" #include "Emu/Cell/PPUModule.h" #include "Emu/Cell/lv2/sys_sync.h" #include "cellAudio.h" #include "libmixer.h" #include <cmath> #include <mutex> LOG_CHANNEL(libmixer); template<> void fmt_class_string<CellLibmixerError>::format(std::string& out, u64 arg) { format_enum(out, arg, [](auto error) { switch (error) { STR_CASE(CELL_LIBMIXER_ERROR_NOT_INITIALIZED); STR_CASE(CELL_LIBMIXER_ERROR_INVALID_PARAMATER); STR_CASE(CELL_LIBMIXER_ERROR_NO_MEMORY); STR_CASE(CELL_LIBMIXER_ERROR_ALREADY_EXIST); STR_CASE(CELL_LIBMIXER_ERROR_FULL); STR_CASE(CELL_LIBMIXER_ERROR_NOT_EXIST); STR_CASE(CELL_LIBMIXER_ERROR_TYPE_MISMATCH); STR_CASE(CELL_LIBMIXER_ERROR_NOT_FOUND); } return unknown; }); } struct SurMixerConfig { std::mutex mutex; u32 audio_port; s32 priority; u32 ch_strips_1; u32 ch_strips_2; u32 ch_strips_6; u32 ch_strips_8; vm::ptr<CellSurMixerNotifyCallbackFunction> cb; vm::ptr<void> cb_arg; f32 mixdata[8 * 256]; u64 mixcount; }; struct SSPlayer { bool m_created; // SSPlayerCreate/Remove bool m_connected; // AANConnect/Disconnect bool m_active; // SSPlayerPlay/Stop u32 m_channels; // 1 or 2 u32 m_addr; u32 m_samples; u32 m_loop_start; u32 m_loop_mode; u32 m_position; float m_level; float m_speed; float m_x; float m_y; float m_z; }; // TODO: use fxm SurMixerConfig g_surmx; std::vector<SSPlayer> g_ssp; s32 cellAANAddData(u32 aan_handle, u32 aan_port, u32 offset, vm::ptr<float> addr, u32 samples) { libmixer.trace("cellAANAddData(aan_handle=0x%x, aan_port=0x%x, offset=0x%x, addr=*0x%x, samples=%d)", aan_handle, aan_port, offset, addr, samples); u32 type = aan_port >> 16; u32 port = aan_port & 0xffff; switch (type) { case CELL_SURMIXER_CHSTRIP_TYPE1A: if (port >= g_surmx.ch_strips_1) type = 0; break; case CELL_SURMIXER_CHSTRIP_TYPE2A: if (port >= g_surmx.ch_strips_2) type = 0; break; case CELL_SURMIXER_CHSTRIP_TYPE6A: if (port >= g_surmx.ch_strips_6) type = 0; break; case CELL_SURMIXER_CHSTRIP_TYPE8A: if (port >= g_surmx.ch_strips_8) type = 0; break; default: type = 0; break; } if (aan_handle != 0x11111111 || samples != 256 || !type || offset != 0) { libmixer.error("cellAANAddData(aan_handle=0x%x, aan_port=0x%x, offset=0x%x, addr=*0x%x, samples=%d): invalid parameters", aan_handle, aan_port, offset, addr, samples); return CELL_LIBMIXER_ERROR_INVALID_PARAMATER; } std::lock_guard lock(g_surmx.mutex); if (type == CELL_SURMIXER_CHSTRIP_TYPE1A) { // mono upmixing for (u32 i = 0; i < samples; i++) { const float center = addr[i]; g_surmx.mixdata[i * 8 + 0] += center; g_surmx.mixdata[i * 8 + 1] += center; } } else if (type == CELL_SURMIXER_CHSTRIP_TYPE2A) { // stereo upmixing for (u32 i = 0; i < samples; i++) { const float left = addr[i * 2 + 0]; const float right = addr[i * 2 + 1]; g_surmx.mixdata[i * 8 + 0] += left; g_surmx.mixdata[i * 8 + 1] += right; } } else if (type == CELL_SURMIXER_CHSTRIP_TYPE6A) { // 5.1 upmixing for (u32 i = 0; i < samples; i++) { const float left = addr[i * 6 + 0]; const float right = addr[i * 6 + 1]; const float center = addr[i * 6 + 2]; const float low_freq = addr[i * 6 + 3]; const float rear_left = addr[i * 6 + 4]; const float rear_right = addr[i * 6 + 5]; g_surmx.mixdata[i * 8 + 0] += left; g_surmx.mixdata[i * 8 + 1] += right; g_surmx.mixdata[i * 8 + 2] += center; g_surmx.mixdata[i * 8 + 3] += low_freq; g_surmx.mixdata[i * 8 + 4] += rear_left; g_surmx.mixdata[i * 8 + 5] += rear_right; } } else if (type == CELL_SURMIXER_CHSTRIP_TYPE8A) { // 7.1 for (u32 i = 0; i < samples * 8; i++) { g_surmx.mixdata[i] += addr[i]; } } return CELL_OK; } s32 cellAANConnect(u32 receive, u32 receivePortNo, u32 source, u32 sourcePortNo) { libmixer.warning("cellAANConnect(receive=0x%x, receivePortNo=0x%x, source=0x%x, sourcePortNo=0x%x)", receive, receivePortNo, source, sourcePortNo); std::lock_guard lock(g_surmx.mutex); if (source >= g_ssp.size() || !g_ssp[source].m_created) { libmixer.error("cellAANConnect(): invalid source (%d)", source); return CELL_LIBMIXER_ERROR_INVALID_PARAMATER; } g_ssp[source].m_connected = true; return CELL_OK; } s32 cellAANDisconnect(u32 receive, u32 receivePortNo, u32 source, u32 sourcePortNo) { libmixer.warning("cellAANDisconnect(receive=0x%x, receivePortNo=0x%x, source=0x%x, sourcePortNo=0x%x)", receive, receivePortNo, source, sourcePortNo); std::lock_guard lock(g_surmx.mutex); if (source >= g_ssp.size() || !g_ssp[source].m_created) { libmixer.error("cellAANDisconnect(): invalid source (%d)", source); return CELL_LIBMIXER_ERROR_INVALID_PARAMATER; } g_ssp[source].m_connected = false; return CELL_OK; } s32 cellSSPlayerCreate(vm::ptr<u32> handle, vm::ptr<CellSSPlayerConfig> config) { libmixer.warning("cellSSPlayerCreate(handle=*0x%x, config=*0x%x)", handle, config); if (config->outputMode != 0u || config->channels - 1u >= 2u) { libmixer.error("cellSSPlayerCreate(config.outputMode=%d, config.channels=%d): invalid parameters", config->outputMode, config->channels); return CELL_LIBMIXER_ERROR_INVALID_PARAMATER; } std::lock_guard lock(g_surmx.mutex); SSPlayer p; p.m_created = true; p.m_connected = false; p.m_active = false; p.m_channels = config->channels; g_ssp.push_back(p); *handle = ::size32(g_ssp) - 1; return CELL_OK; } s32 cellSSPlayerRemove(u32 handle) { libmixer.warning("cellSSPlayerRemove(handle=0x%x)", handle); std::lock_guard lock(g_surmx.mutex); if (handle >= g_ssp.size() || !g_ssp[handle].m_created) { libmixer.error("cellSSPlayerRemove(): SSPlayer not found (%d)", handle); return CELL_LIBMIXER_ERROR_INVALID_PARAMATER; } g_ssp[handle].m_active = false; g_ssp[handle].m_created = false; g_ssp[handle].m_connected = false; return CELL_OK; } s32 cellSSPlayerSetWave(u32 handle, vm::ptr<CellSSPlayerWaveParam> waveInfo, vm::ptr<CellSSPlayerCommonParam> commonInfo) { libmixer.warning("cellSSPlayerSetWave(handle=0x%x, waveInfo=*0x%x, commonInfo=*0x%x)", handle, waveInfo, commonInfo); std::lock_guard lock(g_surmx.mutex); if (handle >= g_ssp.size() || !g_ssp[handle].m_created) { libmixer.error("cellSSPlayerSetWave(): SSPlayer not found (%d)", handle); return CELL_LIBMIXER_ERROR_INVALID_PARAMATER; } // TODO: check parameters g_ssp[handle].m_addr = waveInfo->addr; g_ssp[handle].m_samples = waveInfo->samples; g_ssp[handle].m_loop_start = waveInfo->loopStartOffset - 1; g_ssp[handle].m_loop_mode = commonInfo ? +commonInfo->loopMode : CELL_SSPLAYER_ONESHOT; g_ssp[handle].m_position = waveInfo->startOffset - 1; return CELL_OK; } s32 cellSSPlayerPlay(u32 handle, vm::ptr<CellSSPlayerRuntimeInfo> info) { libmixer.warning("cellSSPlayerPlay(handle=0x%x, info=*0x%x)", handle, info); std::lock_guard lock(g_surmx.mutex); if (handle >= g_ssp.size() || !g_ssp[handle].m_created) { libmixer.error("cellSSPlayerPlay(): SSPlayer not found (%d)", handle); return CELL_LIBMIXER_ERROR_INVALID_PARAMATER; } // TODO: check parameters g_ssp[handle].m_active = true; g_ssp[handle].m_level = info->level; g_ssp[handle].m_speed = info->speed; g_ssp[handle].m_x = info->position.x; g_ssp[handle].m_y = info->position.y; g_ssp[handle].m_z = info->position.z; return CELL_OK; } s32 cellSSPlayerStop(u32 handle, u32 mode) { libmixer.warning("cellSSPlayerStop(handle=0x%x, mode=0x%x)", handle, mode); std::lock_guard lock(g_surmx.mutex); if (handle >= g_ssp.size() || !g_ssp[handle].m_created) { libmixer.error("cellSSPlayerStop(): SSPlayer not found (%d)", handle); return CELL_LIBMIXER_ERROR_INVALID_PARAMATER; } // TODO: transition to stop state g_ssp[handle].m_active = false; return CELL_OK; } s32 cellSSPlayerSetParam(u32 handle, vm::ptr<CellSSPlayerRuntimeInfo> info) { libmixer.warning("cellSSPlayerSetParam(handle=0x%x, info=*0x%x)", handle, info); std::lock_guard lock(g_surmx.mutex); if (handle >= g_ssp.size() || !g_ssp[handle].m_created) { libmixer.error("cellSSPlayerSetParam(): SSPlayer not found (%d)", handle); return CELL_LIBMIXER_ERROR_INVALID_PARAMATER; } // TODO: check parameters g_ssp[handle].m_level = info->level; g_ssp[handle].m_speed = info->speed; g_ssp[handle].m_x = info->position.x; g_ssp[handle].m_y = info->position.y; g_ssp[handle].m_z = info->position.z; return CELL_OK; } s32 cellSSPlayerGetState(u32 handle) { libmixer.warning("cellSSPlayerGetState(handle=0x%x)", handle); std::lock_guard lock(g_surmx.mutex); if (handle >= g_ssp.size() || !g_ssp[handle].m_created) { libmixer.warning("cellSSPlayerGetState(): SSPlayer not found (%d)", handle); return CELL_SSPLAYER_STATE_ERROR; } if (g_ssp[handle].m_active) { return CELL_SSPLAYER_STATE_ON; } return CELL_SSPLAYER_STATE_OFF; } struct surmixer_thread : ppu_thread { using ppu_thread::ppu_thread; void non_task() { auto& g_audio = g_fxo->get<cell_audio>(); audio_port& port = g_audio.ports[g_surmx.audio_port]; while (port.state != audio_port_state::closed) { if (g_surmx.mixcount > (port.active_counter + 0)) // adding positive value (1-15): preemptive buffer filling (hack) { thread_ctrl::wait_for(1000); // hack continue; } if (port.state == audio_port_state::started) { //u64 stamp0 = get_guest_system_time(); memset(g_surmx.mixdata, 0, sizeof(g_surmx.mixdata)); if (g_surmx.cb) { g_surmx.cb(*this, g_surmx.cb_arg, static_cast<u32>(g_surmx.mixcount), 256); lv2_obj::sleep(*this); } //u64 stamp1 = get_guest_system_time(); { std::lock_guard lock(g_surmx.mutex); for (auto& p : g_ssp) if (p.m_active && p.m_created) { auto v = vm::ptrl<s16>::make(p.m_addr); // 16-bit LE audio data float left = 0.0f; float right = 0.0f; float speed = std::fabs(p.m_speed); float fpos = 0.0f; for (s32 i = 0; i < 256; i++) if (p.m_active) { u32 pos = p.m_position; s32 pos_inc = 0; if (p.m_speed > 0.0f) // select direction { pos_inc = 1; } else if (p.m_speed < 0.0f) { pos_inc = -1; } s32 shift = i - static_cast<s32>(fpos); // change playback speed (simple and rough) if (shift > 0) { // slow playback pos_inc = 0; // duplicate one sample at this time fpos += 1.0f; fpos += speed; } else if (shift < 0) { // fast playback i--; // mix two sample into one at this time fpos -= 1.0f; } else { fpos += speed; } p.m_position += pos_inc; if (p.m_channels == 1) // get mono data { left = right = v[pos] / 32768.f * p.m_level; } else if (p.m_channels == 2) // get stereo data { left = v[pos * 2 + 0] / 32768.f * p.m_level; right = v[pos * 2 + 1] / 32768.f * p.m_level; } if (p.m_connected) // mix { // TODO: m_x, m_y, m_z ignored g_surmx.mixdata[i * 8 + 0] += left; g_surmx.mixdata[i * 8 + 1] += right; } if ((p.m_position == p.m_samples && p.m_speed > 0.0f) || (p.m_position == umax && p.m_speed < 0.0f)) // loop or stop { if (p.m_loop_mode == CELL_SSPLAYER_LOOP_ON) { p.m_position = p.m_loop_start; } else if (p.m_loop_mode == CELL_SSPLAYER_ONESHOT_CONT) { p.m_position -= pos_inc; // restore position } else // oneshot { p.m_active = false; p.m_position = p.m_loop_start; // TODO: check value } } } } } //u64 stamp2 = get_guest_system_time(); auto buf = vm::_ptr<f32>(port.addr.addr() + (g_surmx.mixcount % port.num_blocks) * port.num_channels * AUDIO_BUFFER_SAMPLES * sizeof(float)); for (auto& mixdata : g_surmx.mixdata) { // reverse byte order *buf++ = mixdata; } //u64 stamp3 = get_guest_system_time(); //ConLog.Write("Libmixer perf: start=%lld (cb=%lld, ssp=%lld, finalize=%lld)", stamp0 - m_config.start_time, stamp1 - stamp0, stamp2 - stamp1, stamp3 - stamp2); } g_surmx.mixcount++; } idm::remove<ppu_thread>(id); } }; s32 cellSurMixerCreate(vm::cptr<CellSurMixerConfig> config) { libmixer.warning("cellSurMixerCreate(config=*0x%x)", config); auto& g_audio = g_fxo->get<cell_audio>(); const auto port = g_audio.open_port(); if (!port) { return CELL_LIBMIXER_ERROR_FULL; } g_surmx.audio_port = port->number; g_surmx.priority = config->priority; g_surmx.ch_strips_1 = config->chStrips1; g_surmx.ch_strips_2 = config->chStrips2; g_surmx.ch_strips_6 = config->chStrips6; g_surmx.ch_strips_8 = config->chStrips8; port->num_channels = 8; port->num_blocks = 16; port->attr = 0; port->size = port->num_channels * port->num_blocks * AUDIO_BUFFER_SAMPLES * sizeof(float); port->level = 1.0f; port->level_set.store({ 1.0f, 0.0f }); libmixer.warning("*** audio port opened (port=%d)", g_surmx.audio_port); g_surmx.mixcount = 0; g_surmx.cb = vm::null; g_ssp.clear(); libmixer.warning("*** surMixer created (ch1=%d, ch2=%d, ch6=%d, ch8=%d)", config->chStrips1, config->chStrips2, config->chStrips6, config->chStrips8); //auto thread = idm::make_ptr<ppu_thread>("Surmixer Thread"); return CELL_OK; } s32 cellSurMixerGetAANHandle(vm::ptr<u32> handle) { libmixer.warning("cellSurMixerGetAANHandle(handle=*0x%x) -> %d", handle, 0x11111111); *handle = 0x11111111; return CELL_OK; } s32 cellSurMixerChStripGetAANPortNo(vm::ptr<u32> port, u32 type, u32 index) { libmixer.warning("cellSurMixerChStripGetAANPortNo(port=*0x%x, type=0x%x, index=0x%x) -> 0x%x", port, type, index, (type << 16) | index); *port = (type << 16) | index; return CELL_OK; } s32 cellSurMixerSetNotifyCallback(vm::ptr<CellSurMixerNotifyCallbackFunction> func, vm::ptr<void> arg) { libmixer.warning("cellSurMixerSetNotifyCallback(func=*0x%x, arg=*0x%x)", func, arg); if (g_surmx.cb) { fmt::throw_exception("Callback already set"); } g_surmx.cb = func; g_surmx.cb_arg = arg; return CELL_OK; } s32 cellSurMixerRemoveNotifyCallback(vm::ptr<CellSurMixerNotifyCallbackFunction> func) { libmixer.warning("cellSurMixerRemoveNotifyCallback(func=*0x%x)", func); if (g_surmx.cb != func) { fmt::throw_exception("Callback not set"); } g_surmx.cb = vm::null; return CELL_OK; } s32 cellSurMixerStart() { libmixer.warning("cellSurMixerStart()"); auto& g_audio = g_fxo->get<cell_audio>(); if (g_surmx.audio_port >= AUDIO_PORT_COUNT) { return CELL_LIBMIXER_ERROR_NOT_INITIALIZED; } g_audio.ports[g_surmx.audio_port].state.compare_and_swap(audio_port_state::opened, audio_port_state::started); return CELL_OK; } s32 cellSurMixerSetParameter(u32 param, float value) { libmixer.todo("cellSurMixerSetParameter(param=0x%x, value=%f)", param, value); return CELL_OK; } s32 cellSurMixerFinalize() { libmixer.warning("cellSurMixerFinalize()"); auto& g_audio = g_fxo->get<cell_audio>(); if (g_surmx.audio_port >= AUDIO_PORT_COUNT) { return CELL_LIBMIXER_ERROR_NOT_INITIALIZED; } g_audio.ports[g_surmx.audio_port].state.compare_and_swap(audio_port_state::opened, audio_port_state::closed); return CELL_OK; } s32 cellSurMixerSurBusAddData(u32 busNo, u32 offset, vm::ptr<float> addr, u32 samples) { if (busNo < 8 && samples == 256 && offset == 0) { libmixer.trace("cellSurMixerSurBusAddData(busNo=%d, offset=0x%x, addr=0x%x, samples=%d)", busNo, offset, addr, samples); } else { libmixer.todo("cellSurMixerSurBusAddData(busNo=%d, offset=0x%x, addr=0x%x, samples=%d)", busNo, offset, addr, samples); return CELL_OK; } std::lock_guard lock(g_surmx.mutex); for (u32 i = 0; i < samples; i++) { // reverse byte order and mix g_surmx.mixdata[i * 8 + busNo] += addr[i]; } return CELL_OK; } s32 cellSurMixerChStripSetParameter(u32 type, u32 index, vm::ptr<CellSurMixerChStripParam> param) { libmixer.todo("cellSurMixerChStripSetParameter(type=%d, index=%d, param=*0x%x)", type, index, param); return CELL_OK; } s32 cellSurMixerPause(u32 type) { libmixer.warning("cellSurMixerPause(type=%d)", type); auto& g_audio = g_fxo->get<cell_audio>(); if (g_surmx.audio_port >= AUDIO_PORT_COUNT) { return CELL_LIBMIXER_ERROR_NOT_INITIALIZED; } g_audio.ports[g_surmx.audio_port].state.compare_and_swap(audio_port_state::started, audio_port_state::opened); return CELL_OK; } s32 cellSurMixerGetCurrentBlockTag(vm::ptr<u64> tag) { libmixer.trace("cellSurMixerGetCurrentBlockTag(tag=*0x%x)", tag); *tag = g_surmx.mixcount; return CELL_OK; } s32 cellSurMixerGetTimestamp(u64 tag, vm::ptr<u64> stamp) { libmixer.error("cellSurMixerGetTimestamp(tag=0x%llx, stamp=*0x%x)", tag, stamp); auto& g_audio = g_fxo->get<cell_audio>(); *stamp = g_audio.m_start_time + tag * AUDIO_BUFFER_SAMPLES * 1'000'000 / g_audio.cfg.audio_sampling_rate; return CELL_OK; } void cellSurMixerBeep(u32 arg) { libmixer.todo("cellSurMixerBeep(arg=%d)", arg); return; } f32 cellSurMixerUtilGetLevelFromDB(f32 dB) { libmixer.fatal("cellSurMixerUtilGetLevelFromDB(dB=%f)", dB); return 0; } f32 cellSurMixerUtilGetLevelFromDBIndex(s32 index) { libmixer.fatal("cellSurMixerUtilGetLevelFromDBIndex(index=%d)", index); return 0; } f32 cellSurMixerUtilNoteToRatio(u8 refNote, u8 note) { libmixer.fatal("cellSurMixerUtilNoteToRatio(refNote=%d, note=%d)", refNote, note); return 0; } DECLARE(ppu_module_manager::libmixer)("libmixer", []() { REG_FUNC(libmixer, cellAANAddData); REG_FUNC(libmixer, cellAANConnect); REG_FUNC(libmixer, cellAANDisconnect); REG_FUNC(libmixer, cellSurMixerCreate); REG_FUNC(libmixer, cellSurMixerGetAANHandle); REG_FUNC(libmixer, cellSurMixerChStripGetAANPortNo); REG_FUNC(libmixer, cellSurMixerSetNotifyCallback); REG_FUNC(libmixer, cellSurMixerRemoveNotifyCallback); REG_FUNC(libmixer, cellSurMixerStart); REG_FUNC(libmixer, cellSurMixerSetParameter); REG_FUNC(libmixer, cellSurMixerFinalize); REG_FUNC(libmixer, cellSurMixerSurBusAddData); REG_FUNC(libmixer, cellSurMixerChStripSetParameter); REG_FUNC(libmixer, cellSurMixerPause); REG_FUNC(libmixer, cellSurMixerGetCurrentBlockTag); REG_FUNC(libmixer, cellSurMixerGetTimestamp); REG_FUNC(libmixer, cellSurMixerBeep); REG_FUNC(libmixer, cellSSPlayerCreate); REG_FUNC(libmixer, cellSSPlayerRemove); REG_FUNC(libmixer, cellSSPlayerSetWave); REG_FUNC(libmixer, cellSSPlayerPlay); REG_FUNC(libmixer, cellSSPlayerStop); REG_FUNC(libmixer, cellSSPlayerSetParam); REG_FUNC(libmixer, cellSSPlayerGetState); REG_FUNC(libmixer, cellSurMixerUtilGetLevelFromDB); REG_FUNC(libmixer, cellSurMixerUtilGetLevelFromDBIndex); REG_FUNC(libmixer, cellSurMixerUtilNoteToRatio); });
18,762
C++
.cpp
578
29.257785
169
0.69085
RPCS3/rpcs3
15,204
1,895
1,021
GPL-2.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
true
false
false
true
false
false